PKjUpI1tests/__init__.py__author__ = 'Robbert Harms' __date__ = "2014-05-21" __license__ = "LGPL v3" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl"PKjUpI"mdt/log_handlers.py"""Implements multiple handles that hook into the Python logging module. These handlers can for example echo the log entry to the terminal, write it to a file or dispatch it to another class. They are typically configured in the MDT configuration file. """ import codecs from logging import StreamHandler import os import sys __author__ = 'Robbert Harms' __date__ = "2015-08-19" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class ModelOutputLogHandler(StreamHandler): __instances__ = set() def __init__(self, mode='a', encoding=None): """This logger logs information about a model optimization to the folder of the model that is being optimized. It is by default (see the MDT configuration) already constructed and added to the logging module. To set a new file, or to disable this logger set the file using the :attr:`output_file` property. """ super(ModelOutputLogHandler, self).__init__() self.__class__.__instances__.add(self) if codecs is None: encoding = None self._output_file = None self.mode = mode self.encoding = encoding self.stream = None @property def output_file(self): return self._output_file @output_file.setter def output_file(self, output_file): self.close() self._output_file = output_file if self._output_file: if not os.path.isdir(os.path.dirname(self._output_file)): os.makedirs(os.path.dirname(self._output_file)) self._open() def emit(self, record): if self._output_file and self.stream: super(ModelOutputLogHandler, self).emit(record) def close(self): if self._output_file: self.acquire() try: if self.stream: self.flush() if hasattr(self.stream, "close"): self.stream.close() self.stream = None super(ModelOutputLogHandler, self).close() finally: self.release() def _open(self): """ Open the current base file with the (original) mode and encoding. Return the resulting stream. """ if self._output_file: if self.encoding is None: self.stream = open(self._output_file, self.mode) else: self.stream = codecs.open(self._output_file, self.mode, self.encoding) class StdOutHandler(StreamHandler): def __init__(self, stream=None): """A redirect for stdout. Emits all log entries to the stdout. Args: stream: the IO stream to which to emit the log entries. If not given we use sys.stdout. """ stream = stream or sys.stdout super(StdOutHandler, self).__init__(stream=stream) def emit(self, record): if self.stream: super(StdOutHandler, self).emit(record) class LogDispatchHandler(StreamHandler): _listeners = [] def __init__(self, *args, **kwargs): """This class is able to dispatch messages to all the attached log listeners. You can add listeners by adding them to the list of listeners. This list is a class variable and as such is available to all instances and subclasses. The listeners should be of instance LogListenerInterface. This enables for example the GUI to hook a log listener indirectly into the logging module. In general only one copy of this class should be used. """ super(LogDispatchHandler, self).__init__(*args, **kwargs) def emit(self, record): for listener in self._listeners: listener.emit(record, self.format(record)) @staticmethod def add_listener(listener): """Add a listener to the dispatch handler. Args: listener (LogListenerInterface): listener that implements the log listener interface. Returns: int: the listener id number. You can use this to remove the listener again. """ listener_id = len(LogDispatchHandler._listeners) LogDispatchHandler._listeners.append(listener) return listener_id @staticmethod def remove_listener(listener_id): """Remove a listener from the log dispatcher. Args: listener_id (int): the id of the listener to remove """ del LogDispatchHandler._listeners[listener_id] class LogListenerInterface(object): """Interface for listeners to work in conjunction with :class:`LogDispatchHandler`""" def emit(self, record, formatted_message): pass PKht}It,qqmdt/components_loader.py"""The general components loader. This modules consists of two main items, component sources and component loaders. The component loaders have a list of sources from which they load the available components. """ import inspect import os import imp #todo in P3.4 replace imp calls with importlib.SourceFileLoader(name, path).load_module(name) import collections from six import with_metaclass from mdt.exceptions import NonUniqueComponent from mot.model_building.cl_functions.base import ModelFunction, LibraryFunction import mot.model_building.cl_functions.library_functions import mot.model_building.cl_functions.model_functions __author__ = 'Robbert Harms' __date__ = "2015-06-21" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" def get_model(model_name, **kwargs): """Load one of the available models. Args: model_name (str): One of the models from get_list_of_composite_models() or get_list_of_cascade_models() **kwargs: Extra keyword arguments used for the initialization of the model Returns: Either a cascade model or a composite model. In any case, a model that can be given to the fit_model function. """ cml = CascadeModelsLoader() sml = CompositeModelsLoader() try: return cml.load(model_name, **kwargs) except ImportError: try: return sml.load(model_name, **kwargs) except ImportError: raise ValueError('The model with the name "{}" could not be found.'.format(model_name)) def get_meta_info(model_name): """Get the meta information of a particular model Args: model_name (str): One of the models from get_list_of_composite_models() or get_list_of_cascade_models() Returns: Either a cascade model or a composite model. In any case, a model that can be given to the fit_model function. """ cml = CascadeModelsLoader() sml = CompositeModelsLoader() try: return cml.get_meta_info(model_name) except ImportError: try: return sml.get_meta_info(model_name) except ImportError: raise ValueError('The model with the name "{}" could not be found.'.format(model_name)) class ComponentBuilder(object): def __init__(self): """The base class for component builders. Component builders, together with ComponentConfig allow you to define components using class attributes. The idea is that the ComponentConfig contains class attributes defining the component and that the ComponentBuilder is able to create a class of the right type from the information in the component config. """ def create_class(self, template): """Create a class of the right type given the information in the template. Args: template (ComponentConfig): the information as a component config Returns: class: the class of the right type """ def bind_function(func): """This decorator is for methods in ComponentConfigs that we would like to bind to the constructed component. Example suppose you want to inherit or overwrite a function in the constructed model, then in your template/config you should define the function and add @bind_function to it as a decorator, like this: .. code-block:: python # the class we want to create class MyGoal(object): def test(self): print('test') # the template class from which we want to construct a new MyGoal, note the @bind_function class MyConfig(ComponentConfig): @bind_function def test(self): super(MyGoal, self).test() print('test2') The component builder takes care to actually bind the new method to the final object. What this will do essentially is that it will add the property bind to the function. This should act as a flag indicating that that function should be bound. Args: func (python function): the function to bind to the build object """ func._bind = True return func def method_binding_meta(template, *bases): """Adds all bound functions from the ComponentConfig to the class being constructed. This returns a metaclass similar to the with_metaclass of the six library. Args: template (ComponentConfig): the component config with the bound_methods attribute which we will all add to the attributes of the to creating class. """ class ApplyMethodBinding(type): def __new__(mcs, name, bases, attributes): attributes.update(template.bound_methods) return super(ApplyMethodBinding, mcs).__new__(mcs, name, bases, attributes) return with_metaclass(ApplyMethodBinding, *bases) class ComponentConfigMeta(type): def __new__(mcs, name, bases, attributes): """A pre-processor for the components. On the moment this meta class does two things, first it adds all functions with the '_bind' property to the bound_methods list for binding them later to the constructed class. Second, it sets the 'name' attribute of the component to the class name if there is no name attribute defined. """ result = super(ComponentConfigMeta, mcs).__new__(mcs, name, bases, attributes) bound_methods = {value.__name__: value for value in attributes.values() if hasattr(value, '_bind')} for base in bases: if hasattr(base, 'bound_methods'): for key, value in base.bound_methods.items(): if key not in bound_methods: bound_methods.update({key: value}) result.bound_methods = bound_methods if 'name' not in attributes: result.name = name return result class ComponentConfig(with_metaclass(ComponentConfigMeta, object)): """The component configuration. By overriding the class attributes you can define complex configurations. The actual class distilled from these configurations are loaded by the ComponentBuilder """ name = '' description = '' @classmethod def meta_info(cls): return {'name': cls.name, 'description': cls.description} class ComponentsLoader(object): def __init__(self, sources): """The base class for loading and displaying components. Args: sources (:class:`list`): the list of sources to use for loading the components """ self._sources = sources self._check_unique_names() def list_all(self): """List the names of all the available components.""" components = [] for source in self._sources: components.extend(source.list()) return list(sorted(set(components))) def has_component(self, name): """Check if this loader has a component with the given name. Args: name (str): the name of the component Returns: boolean: true if this loader has the given component, false otherwise """ try: self._get_preferred_source(name) return True except ImportError: return False def get_all_meta_info(self): """Get the meta information of all loadable components. Returns: dict: the keys are the names of the objects, as returned by list_all() and the values are the meta-information dicts. """ return {k: self.get_meta_info(k) for k in self.list_all()} def get_meta_info(self, name): """Get the meta information of a component of the given name. Args: name (str): The name of the component we want to use Returns: dict: a dictionary with meta information for this component. Standard meta information is: - name (str): the name of the component - description (str): the description of the component """ source = self._get_preferred_source(name) return source.get_meta_info(name) def get_class(self, name): """Get the class to the component of the given name. Args: name (str): The name of the component we want to use Returns: class or cb function to construct the given component """ source = self._get_preferred_source(name) return source.get_class(name) def load(self, name, *args, **kwargs): """Load the component with the given name Args: name (str): The name of the component we want to use *args: passed to the component **kwargs: passed to the component Returns: the loaded module """ c = self.get_class(name) return c(*args, **kwargs) def _get_preferred_source(self, name): """Try to get the preferred source for the component with the given name. The order of the sources matter, the first source takes precedence over the latter ones and so forth. """ for source in self._sources: try: source.get_class(name) return source except ImportError: pass raise ImportError("No component found with the name {}".format(name)) def _check_unique_names(self): """Check if all the elements in the sources are unique.""" elements = [] for source in self._sources: elements.extend(source.list()) non_unique = list(item for item, count in collections.Counter(elements).items() if count > 1) if len(non_unique): raise NonUniqueComponent('Non-unique components detected: {}, please rename them.'.format(non_unique)) class ComponentsSource(object): def __init__(self): """Defines a source for components. This has functions for listing the available components as well as getting the class and meta information. """ def list(self): """Get the names of all the available components from this source. Returns: list or str: list of the names of all the components loadable from this source. """ return [] def get_class(self, name): """Get the class for the component by the given name Args: name (str): The name of the component we want to use Returns: the construction function """ raise ImportError def get_meta_info(self, name): """Get the meta information of a component of the given name. Args: name (str): The name of the component we want to use Returns: dict: a dictionary with meta information for this component. Standard meta information is: - name (str): the name of the component - description (str): the description of the component """ return {} class UserComponentsSourceSingle(ComponentsSource): def __init__(self, user_type, component_type): """Load the user components of the type *single*. This expects that the available python files contain a class with the same name as the file in which the class resides. For example, if we have a python file named "MySingleComponent.py" we should at least have the class named "MySingleComponent" in that python file. Additionally, the file can contain a dictionary named 'meta_info' which contains meta information about that module. Args: user_type (str): either 'standard' or 'user' component_type (str): the type of component we wish to use. This should be named exactly to one of the directories available in mdt/data/components/ """ super(UserComponentsSourceSingle, self).__init__() self.path = _get_components_path(user_type, component_type) self._class_filenames = {} def list(self): items = [] if os.path.isdir(self.path): for dir_name, sub_dirs, files in os.walk(self.path): for file in files: if file.endswith('.py') and not file.startswith('__'): items.append(file[0:-3]) self._class_filenames[file[0:-3]] = os.path.join(dir_name, file) return items def get_class(self, name): if name in self._class_filenames: module = imp.load_source(name, self._class_filenames[name]) return getattr(module, name) raise ImportError def get_meta_info(self, name): path = os.path.join(self.path, name + '.py') if os.path.exists(path): module = imp.load_source(name, path) try: return getattr(module, 'meta_info') except AttributeError: try: cls = self.get_class(name) return cls.meta_info() except AttributeError: return {} return {} class AutoUserComponentsSourceSingle(UserComponentsSourceSingle): def __init__(self, user_type, component_type, component_builder): """ This class extends the default single components source loader by also being able to use components defined using the ComponentConfig method. This means that the components are defined as subclasses of ComponentConfig and we need a ComponentBuilder to actually create the components. Args: user_type (str): either 'standard' or 'user' component_type (str): the type of component we wish to use. This should be named exactly to one of the directories available in mdt/data/components/ component_builder (ComponentBuilder): the component creator that can create components using ComponentConfig classes """ self.component_builder = component_builder super(AutoUserComponentsSourceSingle, self).__init__(user_type, component_type) def get_class(self, name): cls = super(AutoUserComponentsSourceSingle, self).get_class(name) if issubclass(cls, ComponentConfig): return self.component_builder.create_class(cls) return cls class ComponentInfo(object): def get_name(self): """Get the name of this component Returns: str: the name of this component """ return NotImplemented def get_meta_info(self): """Get the additional meta info of this component Returns: dict: the meta info """ return NotImplemented def get_component_class(self): """Get the component class Returns: class: the class of the component """ return NotImplemented class UserComponentsSourceMulti(ComponentsSource): loaded_modules_cache = {} def __init__(self, user_type, component_type): """"Base class for components in which there are multiple components per file. Args: user_type (str): either 'user' or 'standard'. This defines from which dir to use the components component_type (str): from which dir in 'user' or 'standard' to use the components Attributes: loaded_modules_cache (dict): A cache for loaded components. If we do not do this, we fit_model into TypeError problems when a class is reloaded while there is already an instantiated object. This dict is indexed per directory names and contains for each loaded python file a tuple with the module and the loaded component. """ super(UserComponentsSourceMulti, self).__init__() self._user_type = user_type self._component_type = component_type if self._user_type not in self.loaded_modules_cache: self.loaded_modules_cache[self._user_type] = {} if self._component_type not in self.loaded_modules_cache[self._user_type]: self.loaded_modules_cache[self._user_type][self._component_type] = {} self.path = _get_components_path(user_type, component_type) self._check_path() self._components = self._load_all_components() def list(self): return self._components.keys() def get_class(self, name): if name not in self._components: raise ImportError return self._components[name].get_component_class() def get_meta_info(self, name): return self._components[name].get_meta_info() def _load_all_components(self): self._update_modules_cache() all_components = [] for module, components in self.loaded_modules_cache[self._user_type][self._component_type].values(): all_components.extend(components) return {component.get_name(): component for component in all_components} def _update_modules_cache(self): """Fill the modules cache with the components. This loops through all the python files present in the dir name and tries to use them as modules if they are not yet loaded. """ for dir_name, sub_dirs, files in os.walk(self.path): for file in files: if file.endswith('.py') and not file.startswith('__'): path = os.path.join(dir_name, file) if path not in self.loaded_modules_cache[self._user_type][self._component_type]: module_name = self._user_type + '/' + \ self._component_type + '/' + \ dir_name[len(self.path) + 1:] + '/' + \ os.path.splitext(os.path.basename(path))[0] module = imp.load_source(module_name, path) self.loaded_modules_cache[self._user_type][self._component_type][path] = \ (module, self._get_components_from_module(module)) def _get_components_from_module(self, module): """Return a list of all the available components in the given module. Args: module (module): the module from which to use the components. Returns: list: list of ComponentInfo objects """ loaded_items = [] if hasattr(module, 'get_components_list'): loaded_items.extend(module.get_components_list()) return loaded_items def _check_path(self): if not os.path.isdir(self.path): raise RuntimeError('The components folder "{0}" could not be found. ' 'Please check the path to the components in your configuration file.'.format(self.path)) class AutoUserComponentsSourceMulti(UserComponentsSourceMulti): def __init__(self, user_type, component_type, component_class, component_builder): """Create a component source that can create components using multiple types of definitions. This will use either objects of the class defined by component_class or it will use objects of type ComponentConfig using the builder defined by component_creator or it will use the objects from the get_components_list function. Args: user_type (str): either 'user' or 'standard'. This defines from which dir to use the components component_type (str): from which dir in 'user' or 'standard' to use the components component_class (class): the class to auto use component_builder (ComponentBuilder): the component creator to use for components defined as a ComponentConfig. """ self._component_class = component_class self.component_builder = component_builder super(AutoUserComponentsSourceMulti, self).__init__(user_type, component_type) def get_class(self, name): if name not in self._components: raise ImportError base = self._components[name].get_component_class() if inspect.isclass(base) and issubclass(base, ComponentConfig): return self.component_builder.create_class(base) return super(AutoUserComponentsSourceMulti, self).get_class(name) def _get_components_from_module(self, module): """Return a list of all the available components in the given module. Args: module (module): the module from which to use the components. Returns: list: list of components loaded from this module """ class DynamicInfo(ComponentInfo): def __init__(self, component): self._component = component def get_name(self): return self._component.name def get_meta_info(self): return self._component.meta_info() def get_component_class(self): return self._component loaded_items = super(AutoUserComponentsSourceMulti, self)._get_components_from_module(module) items = inspect.getmembers(module, _get_class_predicate(module, self._component_class)) loaded_items.extend(DynamicInfo(item[1]) for item in items) items = inspect.getmembers(module, _get_class_predicate(module, ComponentConfig)) loaded_items.extend(DynamicInfo(item[1]) for item in items) return loaded_items class ParametersSource(AutoUserComponentsSourceMulti): def __init__(self, user_type): """Source for the items in the 'parameters' dir in the components folder.""" from mot.model_building.cl_functions.parameters import CLFunctionParameter from mdt.models.parameters import ParameterBuilder super(ParametersSource, self).__init__(user_type, 'parameters', CLFunctionParameter, ParameterBuilder()) class CompositeModelSource(AutoUserComponentsSourceMulti): def __init__(self, user_type): """Source for the items in the 'composite_models' dir in the components folder.""" from mdt.models.composite import DMRICompositeModel, DMRICompositeModelBuilder super(CompositeModelSource, self).__init__(user_type, 'composite_models', DMRICompositeModel, DMRICompositeModelBuilder()) class CascadeSource(AutoUserComponentsSourceMulti): def __init__(self, user_type): """Source for the items in the 'cascade_models' dir in the components folder.""" from mdt.models.cascade import DMRICascadeModelInterface, CascadeBuilder super(CascadeSource, self).__init__(user_type, 'cascade_models', DMRICascadeModelInterface, CascadeBuilder()) class MOTSourceSingle(ComponentsSource): def get_meta_info(self, name): return {} class MOTLibraryFunctionSource(MOTSourceSingle): def get_class(self, name): return getattr(mot.model_building.cl_functions.library_functions, name) def list(self): module = mot.model_building.cl_functions.library_functions items = inspect.getmembers(module, _get_class_predicate(module, LibraryFunction)) return [x[0] for x in items if x[0] != 'LibraryFunction'] class MOTCompartmentModelsSource(MOTSourceSingle): def get_class(self, name): return getattr(mot.model_building.cl_functions.model_functions, name) def list(self): module = mot.model_building.cl_functions.model_functions items = inspect.getmembers(module, _get_class_predicate(module, ModelFunction)) return [x[0] for x in items if x[0] != 'ModelFunction'] class BatchProfilesLoader(ComponentsLoader): def __init__(self): super(BatchProfilesLoader, self).__init__([UserComponentsSourceSingle('user', 'batch_profiles'), UserComponentsSourceSingle('standard', 'batch_profiles')]) class ProcessingStrategiesLoader(ComponentsLoader): def __init__(self): super(ProcessingStrategiesLoader, self).__init__( [UserComponentsSourceSingle('user', 'processing_strategies'), UserComponentsSourceSingle('standard', 'processing_strategies')]) class NoiseSTDCalculatorsLoader(ComponentsLoader): def __init__(self): super(NoiseSTDCalculatorsLoader, self).__init__( [UserComponentsSourceSingle('user', 'noise_std_estimators'), UserComponentsSourceSingle('standard', 'noise_std_estimators')]) class CompartmentModelsLoader(ComponentsLoader): def __init__(self): from mdt.models.compartments import CompartmentBuilder super(CompartmentModelsLoader, self).__init__( [AutoUserComponentsSourceSingle('user', 'compartment_models', CompartmentBuilder()), AutoUserComponentsSourceSingle('standard', 'compartment_models', CompartmentBuilder()), MOTCompartmentModelsSource()]) class LibraryFunctionsLoader(ComponentsLoader): def __init__(self): super(LibraryFunctionsLoader, self).__init__([UserComponentsSourceSingle('user', 'library_functions'), UserComponentsSourceSingle('standard', 'library_functions'), MOTLibraryFunctionSource()]) class CompositeModelsLoader(ComponentsLoader): def __init__(self): super(CompositeModelsLoader, self).__init__([CompositeModelSource('user'), CompositeModelSource('standard')]) class ParametersLoader(ComponentsLoader): def __init__(self): super(ParametersLoader, self).__init__([ParametersSource('user'), ParametersSource('standard')]) class CascadeModelsLoader(ComponentsLoader): def __init__(self): super(CascadeModelsLoader, self).__init__([CascadeSource('user'), CascadeSource('standard')]) def get_component_class(component_type, component_name): """Return the class of the given component. Args: component_type (str): the type of component, for example 'batch_profiles' or 'parameters' component_name (str): the name of the component to use Returns: the class of the given component """ if component_type == 'batch_profiles': return BatchProfilesLoader().get_class(component_name) if component_type == 'cascade_models': return CascadeModelsLoader().get_class(component_name) if component_type == 'compartment_models': return CompartmentModelsLoader().get_class(component_name) if component_type == 'library_functions': return LibraryFunctionsLoader().get_class(component_name) if component_type == 'noise_std_estimators': return NoiseSTDCalculatorsLoader().get_class(component_name) if component_type == 'parameters': return ParametersLoader().get_class(component_name) if component_type == 'processing_strategies': return ProcessingStrategiesLoader().get_class(component_name) if component_type == 'composite_models': return CompositeModelsLoader().get_class(component_name) raise ValueError('Could not find the given component type {}'.format(component_type)) def load_component(component_type, component_name, *args, **kwargs): """Load the class indicated by the given component type and name. Args: component_type (str): the type of component, for example 'batch_profiles' or 'parameters' component_name (str): the name of the component to use *args: passed to the component **kwargs: passed to the component Returns: the loaded component """ component = get_component_class(component_type, component_name) return component(*args, **kwargs) def _get_class_predicate(module, class_type): """A predicate to be used in the function inspect.getmembers This predicate checks if the module of the item we inspect matches the given module, checks the class type to be the given class type and checks if the checked item is not in the exclude list. Args: module (module): the module to check against the module of the item class_type (module): the module to check against the class_type of the item Returns: function: a function to be used as a predicate in inspect.getmembers """ def defined_in_module(item): return item.__module__ == module.__name__ def complete_predicate(item): return inspect.isclass(item) and defined_in_module(item) and issubclass(item, class_type) return complete_predicate def _get_components_path(user_type, component_type): """ Args: user_type (str): either 'standard' or 'user' component_type (str): one of the dir names in standard and user """ from mdt.configuration import get_config_dir return os.path.join(get_config_dir(), 'components', user_type, component_type) PKAyI@OOmdt/model_fitting.pyimport collections import glob import logging import os import time import timeit from contextlib import contextmanager from six import string_types from mdt.__version__ import __version__ from mdt.nifti import get_all_image_data from mdt.batch_utils import batch_profile_factory, AllSubjects from mdt.components_loader import get_model from mdt.configuration import get_processing_strategy, get_optimizer_for_model from mdt.models.cascade import DMRICascadeModelInterface from mdt.protocols import write_protocol from mdt.utils import create_roi, get_cl_devices, model_output_exists, \ per_model_logging_context, get_temporary_results_dir from mdt.processing_strategies import SimpleModelProcessingWorkerGenerator, FittingProcessingWorker from mdt.exceptions import InsufficientProtocolError from mot.load_balance_strategies import EvenDistribution import mot.configuration from mot.configuration import RuntimeConfigurationAction __author__ = 'Robbert Harms' __date__ = "2015-05-01" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class BatchFitting(object): def __init__(self, data_folder, batch_profile=None, subjects_selection=None, recalculate=False, models_to_fit=None, cascade_subdir=False, cl_device_ind=None, double_precision=False, tmp_results_dir=True): """This class is meant to make running computations as simple as possible. The idea is that a single folder is enough to fit_model the computations. One can optionally give it the batch_profile to use for the fitting. If not given, this class will attempt to use the batch_profile that fits the data folder best. Setting the ``cl_device_ind`` has the side effect that it changes the current run time cl_device settings in the MOT toolkit for the duration of this function. Args: data_folder (str): the main directory to look for items to process. batch_profile (:class:`~mdt.batch_utils.BatchProfile` or str): the batch profile to use or the name of a batch profile to use from the users folder. subjects_selection (:class:`~mdt.batch_utils.BatchSubjectSelection`): the subjects to use for processing. If None all subjects are processed. recalculate (boolean): If we want to recalculate the results if they are already present. cascade_subdir (boolean): if we want to create a subdirectory for every cascade model. Per default we output the maps of cascaded results in the same directory, this allows reusing cascaded results for other cascades (for example, if you cascade BallStick -> Noddi you can use the BallStick results also for BallStick -> Charmed). This flag disables that behaviour and instead outputs the results of a cascade model to a subdirectory for that cascade. This does not apply recursive. models_to_fit (list of str): A list of models to fit to the data. This overrides the models in the batch config. cl_device_ind (int): the index of the CL device to use. The index is from the list from the function get_cl_devices(). double_precision (boolean): if we would like to do the calculations in double precision tmp_results_dir (str, True or None): The temporary dir for the calculations. Set to a string to use that path directly, set to True to use the config value, set to None to disable. """ self._logger = logging.getLogger(__name__) self._batch_profile = batch_profile_factory(batch_profile, data_folder) self._subjects_selection = subjects_selection or AllSubjects() self._tmp_results_dir = tmp_results_dir if models_to_fit: self._models_to_fit = models_to_fit else: self._models_to_fit = self._batch_profile.get_models_to_fit() self._cl_device_ind = cl_device_ind self._recalculate = recalculate self._double_precision = double_precision self._cascade_subdir = cascade_subdir if self._batch_profile is None: raise RuntimeError('No suitable batch profile could be ' 'found for the directory {0}'.format(os.path.abspath(data_folder))) self._logger.info('Using MDT version {}'.format(__version__)) self._logger.info('Using batch profile: {0}'.format(self._batch_profile)) self._subjects = self._subjects_selection.get_selection(self._batch_profile.get_subjects()) self._logger.info('Subjects found: {0}'.format(self._batch_profile.get_subjects_count())) self._logger.info('Subjects to process: {0}'.format(len(self._subjects))) if self._cl_device_ind is not None: if not isinstance(self._cl_device_ind, collections.Iterable): self._cl_device_ind = [self._cl_device_ind] devices = get_cl_devices() mot.configuration.set_cl_environments([devices[ind] for ind in self._cl_device_ind]) def get_all_subjects_info(self): """Get a dictionary with the info about all the found subjects. This will return information about all the subjects found and will disregard the current ``subjects`` setting that limits the amount of subjects we will run. Returns: list of :class:`~mdt.batch_utils.SubjectInfo`: information about all available subjects """ return self._batch_profile.get_subjects() def get_subjects_info(self): """Get a dictionary with the info of the subject we will run computations on. This will return information about the subjects that we will use in the batch fitting. Returns: list of :class:`~mdt.batch_utils.SubjectInfo`: information about all subjects we will actually use """ return self._subjects def run(self): """Run the computations on the current dir with all the configured options. """ self._logger.info('Running computations on {0} subjects'.format(len(self._subjects))) run_func = _BatchFitRunner(self._models_to_fit, self._recalculate, self._cascade_subdir, self._cl_device_ind, self._double_precision, self._tmp_results_dir) for ind, subject in enumerate(self._subjects): self._logger.info('Going to process subject {}, ({} of {}, we are at {:.2%})'.format( subject.subject_id, ind + 1, len(self._subjects), ind / len(self._subjects))) run_func(subject) return self._subjects class _BatchFitRunner(object): def __init__(self, models_to_fit, recalculate, cascade_subdir, cl_device_ind, double_precision, tmp_results_dir): self._models_to_fit = models_to_fit self._recalculate = recalculate self._cascade_subdir = cascade_subdir self._cl_device_ind = cl_device_ind self._double_precision = double_precision self._logger = logging.getLogger(__name__) self._tmp_results_dir = tmp_results_dir def __call__(self, subject_info): """Run the batch fitting on the given subject. This is a module level function to allow for python multiprocessing to work. Args: subject_info (SubjectInfo): the subject information """ output_dir = subject_info.output_dir if all(model_output_exists(model, output_dir) for model in self._models_to_fit) and not self._recalculate: self._logger.info('Skipping subject {0}, output exists'.format(subject_info.subject_id)) return self._logger.info('Loading the data (DWI, mask and protocol) of subject {0}'.format(subject_info.subject_id)) problem_data = subject_info.get_problem_data() with self._timer(subject_info.subject_id): for model in self._models_to_fit: self._logger.info('Going to fit model {0} on subject {1}'.format(model, subject_info.subject_id)) try: model_fit = ModelFit(model, problem_data, output_dir, recalculate=self._recalculate, only_recalculate_last=True, cascade_subdir=self._cascade_subdir, cl_device_ind=self._cl_device_ind, double_precision=self._double_precision, tmp_results_dir=self._tmp_results_dir) model_fit.run() except InsufficientProtocolError as ex: self._logger.info('Could not fit model {0} on subject {1} ' 'due to protocol problems. {2}'.format(model, subject_info.subject_id, ex)) else: self._logger.info('Done fitting model {0} on subject {1}'.format(model, subject_info.subject_id)) @contextmanager def _timer(self, subject_id): start_time = timeit.default_timer() yield self._logger.info('Fitted all models on subject {0} in time {1} (h:m:s)'.format( subject_id, time.strftime('%H:%M:%S', time.gmtime(timeit.default_timer() - start_time)))) class ModelFit(object): def __init__(self, model, problem_data, output_folder, optimizer=None, recalculate=False, only_recalculate_last=False, cascade_subdir=False, cl_device_ind=None, double_precision=False, tmp_results_dir=True): """Setup model fitting for the given input model and data. To actually fit the model call run(). Args: model (:class:`~mdt.models.composite.DMRICompositeModel` or :class:`~mdt.models.cascade.DMRICascadeModelInterface`): the model we want to optimize. problem_data (:class:`~mdt.utils.DMRIProblemData`): the problem data object which contains the dwi image, the dwi header, the brain_mask and the protocol to use. output_folder (string): The full path to the folder where to place the output optimizer (:class:`mot.cl_routines.optimizing.base.AbstractOptimizer`): The optimization routine to use. If None, we create one using the configuration files. recalculate (boolean): If we want to recalculate the results if they are already present. only_recalculate_last (boolean): If we want to recalculate all the models. This is only of importance when dealing with CascadeModels. If set to true we only recalculate the last element in the chain (if recalculate is set to True, that is). If set to false, we recalculate everything. This only holds for the first level of the cascade. cascade_subdir (boolean): if we want to create a subdirectory for the given model if it is a cascade model. Per default we output the maps of cascaded results in the same directory, this allows reusing cascaded results for other cascades (for example, if you cascade BallStick -> Noddi you can use the BallStick results also for BallStick -> Charmed). This flag disables that behaviour and instead outputs the results of a cascade model to a subdirectory for that cascade. This does not apply recursive. cl_device_ind (int): the index of the CL device to use. The index is from the list from the function get_cl_devices(). This can also be a list of device indices. double_precision (boolean): if we would like to do the calculations in double precision tmp_results_dir (str, True or None): The temporary dir for the calculations. Set to a string to use that path directly, set to True to use the config value, set to None to disable. """ if isinstance(model, string_types): model = get_model(model) model.double_precision = double_precision self._model = model self._problem_data = problem_data self._output_folder = output_folder if cascade_subdir and isinstance(self._model, DMRICascadeModelInterface): self._output_folder += '/{}'.format(self._model.name) self._optimizer = optimizer self._recalculate = recalculate self._only_recalculate_last = only_recalculate_last self._logger = logging.getLogger(__name__) self._cl_device_indices = cl_device_ind self._model_names_list = [] self._tmp_results_dir = get_temporary_results_dir(tmp_results_dir) if self._cl_device_indices is not None and not isinstance(self._cl_device_indices, collections.Iterable): self._cl_device_indices = [self._cl_device_indices] self._cl_envs = None self._load_balancer = None if self._cl_device_indices is not None: all_devices = get_cl_devices() self._cl_envs = [all_devices[ind] for ind in self._cl_device_indices] self._load_balancer = EvenDistribution() if not model.is_protocol_sufficient(self._problem_data.protocol): raise InsufficientProtocolError( 'The provided protocol is insufficient for this model. ' 'The reported errors where: {}'.format(self._model.get_protocol_problems( self._problem_data.protocol))) def run(self): """Run the model and return the resulting voxel estimates within the ROI. Returns: dict: The result maps for the current model or the last model in the cascade. This returns the results as 2d arrays with on the first dimension the voxels within the ROI and on the second axis the value(s) for all result maps. """ return self._run(self._model, self._recalculate, self._only_recalculate_last) def _run(self, model, recalculate, only_recalculate_last): """Recursively calculate the (cascade) models Args: model: The model to fit, if cascade we recurse recalculate (boolean): if we recalculate only_recalculate_last: if we recalculate, if we only recalculate the last item in the first cascade """ self._model_names_list.append(model.name) if isinstance(model, DMRICascadeModelInterface): results = {} last_result = None while model.has_next(): sub_model = model.get_next(results) sub_recalculate = False if recalculate: if only_recalculate_last: if not model.has_next(): sub_recalculate = True else: sub_recalculate = True new_results = self._run(sub_model, sub_recalculate, recalculate) results.update({sub_model.name: new_results}) last_result = new_results self._model_names_list.pop() model.reset() return last_result return self._run_composite_model(model, recalculate, self._model_names_list) def _run_composite_model(self, model, recalculate, model_names): with mot.configuration.config_context(RuntimeConfigurationAction(cl_environments=self._cl_envs, load_balancer=self._load_balancer)): with per_model_logging_context(os.path.join(self._output_folder, model.name)): self._logger.info('Using MDT version {}'.format(__version__)) self._logger.info('Preparing for model {0}'.format(model.name)) self._logger.info('Current cascade: {0}'.format(model_names)) optimizer = self._optimizer or get_optimizer_for_model(model_names) if self._cl_device_indices is not None: all_devices = get_cl_devices() optimizer.cl_environments = [all_devices[ind] for ind in self._cl_device_indices] optimizer.load_balancer = EvenDistribution() processing_strategy = get_processing_strategy('optimization', model_names=model_names) processing_strategy.set_tmp_dir(self._tmp_results_dir) fitter = SingleModelFit(model, self._problem_data, self._output_folder, optimizer, processing_strategy, recalculate=recalculate) results = fitter.run() return results class SingleModelFit(object): def __init__(self, model, problem_data, output_folder, optimizer, processing_strategy, recalculate=False): """Fits a composite model. This does not accept cascade models. Please use the more general ModelFit class for all models, composite and cascade. Args: model (:class:`~mdt.models.composite.DMRICompositeModel`): An implementation of an composite model that contains the model we want to optimize. problem_data (:class:`~mdt.utils.DMRIProblemData`): The problem data object for the model output_folder (string): The full path to the folder where to place the output optimizer (:class:`mot.cl_routines.optimizing.base.AbstractOptimizer`): The optimization routine to use. processing_strategy (:class:`~mdt.processing_strategies.ModelProcessingStrategy`): the processing strategy to use recalculate (boolean): If we want to recalculate the results if they are already present. """ self.recalculate = recalculate self._model = model self._problem_data = problem_data self._output_folder = output_folder self._output_path = os.path.join(self._output_folder, self._model.name) self._optimizer = optimizer self._logger = logging.getLogger(__name__) self._processing_strategy = processing_strategy if not self._model.is_protocol_sufficient(problem_data.protocol): raise InsufficientProtocolError( 'The given protocol is insufficient for this model. ' 'The reported errors where: {}'.format(self._model.get_protocol_problems(problem_data.protocol))) def run(self): """Fits the composite model.""" with per_model_logging_context(self._output_path): self._model.set_problem_data(self._problem_data) if self.recalculate: if os.path.exists(self._output_path): list(map(os.remove, glob.glob(os.path.join(self._output_path, '*.nii*')))) else: if model_output_exists(self._model, self._output_folder): maps = get_all_image_data(self._output_path) self._logger.info('Not recalculating {} model'.format(self._model.name)) return create_roi(maps, self._problem_data.mask) if not os.path.exists(self._output_path): os.makedirs(self._output_path) with self._logging(): results = self._processing_strategy.run( self._model, self._problem_data, self._output_path, self.recalculate, SimpleModelProcessingWorkerGenerator(lambda *args: FittingProcessingWorker(self._optimizer, *args))) self._write_protocol() return results def _write_protocol(self): write_protocol(self._problem_data.protocol, os.path.join(self._output_path, 'used_protocol.prtcl')) @contextmanager def _logging(self): """Adds logging information around the processing.""" minimize_start_time = timeit.default_timer() self._logger.info('Fitting {} model'.format(self._model.name)) yield run_time = timeit.default_timer() - minimize_start_time run_time_str = time.strftime('%H:%M:%S', time.gmtime(run_time)) self._logger.info('Fitted {0} model with runtime {1} (h:m:s).'.format(self._model.name, run_time_str)) PKjUpIQXn&EEmdt/masking.pyimport logging import os import numpy as np from scipy.ndimage import binary_dilation, generate_binary_structure from six import string_types from mdt.utils import load_brain_mask from mdt.protocols import load_protocol from mdt.nifti import load_nifti, write_nifti from mot.cl_routines.filters.median import MedianFilter import mot.configuration __author__ = 'Robbert Harms' __date__ = "2015-07-20" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" def create_median_otsu_brain_mask(dwi_info, protocol, mask_threshold=0, **kwargs): """Create a brain mask using the given volume. Args: dwi_info (string or tuple or image): The information about the volume, either: - the filename of the input file - or a tuple with as first index a ndarray with the DWI and as second index the header - or only the image as an ndarray protocol (string or :class:`~mdt.protocols.Protocol`): The filename of the protocol file or a Protocol object mask_threshold (double): everything below this threshold is masked away **kwargs: the additional arguments for median_otsu. Returns: ndarray: The created brain mask """ logger = logging.getLogger(__name__) logger.info('Starting calculating a brain mask') if isinstance(dwi_info, string_types): signal_img = load_nifti(dwi_info) dwi = signal_img.get_data() elif isinstance(dwi_info, (tuple, list)): dwi = dwi_info[0] else: dwi = dwi_info if isinstance(protocol, string_types): protocol = load_protocol(protocol) if len(dwi.shape) == 4: unweighted_ind = protocol.get_unweighted_indices() if len(unweighted_ind): unweighted = np.mean(dwi[..., unweighted_ind], axis=3) else: unweighted = np.mean(dwi, axis=3) else: unweighted = dwi.copy() brain_mask = median_otsu(unweighted, **kwargs) brain_mask = brain_mask > 0 if mask_threshold: brain_mask = np.mean(dwi[..., protocol.get_weighted_indices()], axis=3) * brain_mask > mask_threshold logger.info('Finished calculating a brain mask') return brain_mask def generate_simple_wm_mask(fa_fname, brain_mask_fname, out_fname, fa_threshold=0.3, median_radius=1, numpass=2): """Generate a simple white matter mask by thresholding the given FA map. Everything below the given FA threshold will be masked (not used). It also applies the regular brain mask to only retain values inside the brain. Args: fa_fname (str): the path to the FA file brain_mask_fname (str): the path to the general brain mask in use out_fname (str): where to write the outfile. fa_threshold (double): the FA threshold. Everything below this threshold is masked (set to 0). To be precise: where fa_data < fa_threshold set the value to 0. median_radius (int): the radius of the median filter numpass (int): the number of passes we apply the median filter """ logger = logging.getLogger(__name__) logger.info('Starting calculating a white matter mask using FA.') nib_container = load_nifti(fa_fname) fa_data = nib_container.get_data() fa_data[fa_data < fa_threshold] = 0 fa_data[fa_data > 0] = 1 mask = load_brain_mask(brain_mask_fname) median_filter = MedianFilter(median_radius) fa_data = median_filter.filter(fa_data, mask=mask, nmr_of_times=numpass) write_nifti(fa_data, nib_container.get_header(), out_fname) logger.info('Finished calculating a white matter mask.') def create_write_median_otsu_brain_mask(dwi_info, protocol, output_fname, **kwargs): """Write a brain mask using the given volume and output as the given volume. Args: dwi_info (string or tuple or ndarray): the filename of the input file or a tuple with as first index a ndarray with the DWI and as second index the header or only the image. protocol (string or :class:`~mdt.protocols.Protocol`): The filename of the protocol file or a Protocol object output_fname (string): the filename of the output file (the extracted brain mask) If None, no output is written. If ``dwi_info`` is an ndarray also no file is written (we don't have the header). Returns: ndarray: The created brain mask """ if not os.path.isdir(os.path.dirname(output_fname)): os.makedirs(os.path.dirname(output_fname)) if isinstance(dwi_info, string_types): signal_img = load_nifti(dwi_info) dwi = signal_img.get_data() header = signal_img.get_header() else: dwi = dwi_info[0] header = dwi_info[1] mask = create_median_otsu_brain_mask(dwi, protocol, **kwargs) write_nifti(mask, header, output_fname) return mask def median_otsu(unweighted_volume, median_radius=4, numpass=4, dilate=1): """ Simple brain extraction tool for dMRI data. This function is inspired from the ``median_otsu`` function from ``dipy`` and is copied here to remove a dependency. It uses a median filter smoothing of the ``unweighted_volume`` automatic histogram Otsu thresholding technique, hence the name *median_otsu*. This function is inspired from Mrtrix's bet which has default values ``median_radius=3``, ``numpass=2``. However, from tests on multiple 1.5T and 3T data. From GE, Philips, Siemens, the most robust choice is ``median_radius=4``, ``numpass=4``. Args: unweighted_volume (ndarray): ndarray of the unweighted volumes brain volumes median_radius (int): Radius (in voxels) of the applied median filter (default 4) numpass (int): Number of pass of the median filter (default 4) dilate (None or int): optional number of iterations for binary dilation Returns: ndarray: a 3D ndarray with the binary brain mask """ b0vol = unweighted_volume logger = logging.getLogger(__name__) logger.info('We will use a single precision float type for the calculations.'.format()) for env in mot.configuration.get_load_balancer().get_used_cl_environments(mot.configuration.get_cl_environments()): logger.info('Using device \'{}\'.'.format(str(env))) m = MedianFilter(median_radius) b0vol = m.filter(b0vol, nmr_of_times=numpass) thresh = _otsu(b0vol) mask = b0vol > thresh if dilate is not None: cross = generate_binary_structure(3, 1) mask = binary_dilation(mask, cross, iterations=dilate) return mask def _otsu(image, nbins=256): """ Return threshold value based on Otsu's method. Copied from scikit-image to remove dependency. Parameters ---------- image : array Input image. nbins : int Number of bins used to calculate histogram. This value is ignored for integer arrays. Returns ------- threshold : float Threshold value. """ hist, bin_centers = np.histogram(image, nbins) hist = hist.astype(np.float) # class probabilities for all possible thresholds weight1 = np.cumsum(hist) weight2 = np.cumsum(hist[::-1])[::-1] # class means for all possible thresholds mean1 = np.cumsum(hist * bin_centers[1:]) / weight1 mean2 = (np.cumsum((hist * bin_centers[1:])[::-1]) / weight2[::-1])[::-1] # Clip ends to align class 1 and class 2 variables: # The last value of `weight1`/`mean1` should pair with zero values in # `weight2`/`mean2`, which do not exist. variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:])**2 idx = np.argmax(variance12) threshold = bin_centers[:-1][idx] return threshold PKUpI,i( ( mdt/model_sampling.pyfrom contextlib import contextmanager import logging import os import timeit import time from mdt.utils import model_output_exists, load_samples from mdt.processing_strategies import SimpleModelProcessingWorkerGenerator, SamplingProcessingWorker from mdt.exceptions import InsufficientProtocolError __author__ = 'Robbert Harms' __date__ = "2015-05-01" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" def sample_composite_model(model, problem_data, output_folder, sampler, processing_strategy, recalculate=False, store_samples=True): """Sample a composite model. Args: model (:class:`~mdt.models.composite.DMRICompositeModel`): a composite model to sample problem_data (:class:`~mdt.utils.DMRIProblemData`): The problem data object with which the model is initialized before running output_folder (string): The full path to the folder where to place the output sampler (:class:`mot.cl_routines.sampling.base.AbstractSampler`): The sampling routine to use. processing_strategy (:class:`~mdt.processing_strategies.ModelProcessingStrategy`): the processing strategy recalculate (boolean): If we want to recalculate the results if they are already present. store_samples (boolean): if set to False we will store none of the samples. Use this if you are only interested in the volume maps and not in the entire sample chain. """ if not model.is_protocol_sufficient(problem_data.protocol): raise InsufficientProtocolError( 'The provided protocol is insufficient for this model. ' 'The reported errors where: {}'.format(model.get_protocol_problems(problem_data.protocol))) logger = logging.getLogger(__name__) if not recalculate: if model_output_exists(model, output_folder + '/volume_maps/', append_model_name_to_path=False): logger.info('Not recalculating {} model'.format(model.name)) return load_samples(output_folder) if not os.path.isdir(output_folder): os.makedirs(output_folder) model.set_problem_data(problem_data) with _log_info(logger, model.name): worker_generator = SimpleModelProcessingWorkerGenerator( lambda *args: SamplingProcessingWorker(sampler, store_samples, *args)) return processing_strategy.run(model, problem_data, output_folder, recalculate, worker_generator) @contextmanager def _log_info(logger, model_name): minimize_start_time = timeit.default_timer() logger.info('Sampling {} model'.format(model_name)) yield run_time = timeit.default_timer() - minimize_start_time run_time_str = time.strftime('%H:%M:%S', time.gmtime(run_time)) logger.info('Sampled {0} model with runtime {1} (h:m:s).'.format(model_name, run_time_str)) PKjUpIIFFmdt/deferred_mappings.pyimport collections import copy __author__ = 'Robbert Harms' __date__ = "2016-11-10" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class DeferredActionDict(collections.MutableMapping): def __init__(self, func, items, memoize=True): """Applies the given function on the given items at moment of request. On the moment one of the keys of this dict class is requested we apply the given function on the given items and return the result of that function. The advantage of this class is that it defers an expensive operation until it is needed. Args: func (function): the callback function to apply on the given items at request, with signature: .. code-block: python def callback(key, value) items (dict): the items on which we operate memoize (boolean): if true we memorize the function output internally. If False we apply the given function on every request. """ self._func = func self._items = copy.copy(items) self._memoize = memoize self._memoized = {} def __delitem__(self, key): del self._items[key] if key in self._memoized: del self._memoized[key] def __getitem__(self, key): if not self._memoize: return self._func(key, self._items[key]) if key not in self._memoized: self._memoized[key] = self._func(key, self._items[key]) return self._memoized[key] def __contains__(self, key): try: self._items[key] except KeyError: return False else: return True def __iter__(self): for key in self._items.keys(): yield key def __len__(self): return len(self._items) def __setitem__(self, key, value): self._memoized[key] = value class DeferredFunctionDict(collections.MutableMapping): def __init__(self, items, memoize=True): """The items should contain a list of functions that we apply at the moment of request. On the moment one of the keys of this dict class is requested we apply the function stored in the items dict for that key and return the result of that function. The advantage of this class is that it defers an expensive operation until it is needed. Args: items (dict): the items on which we operate, each value should contain a function with no parameters that we run to return the results. memoize (boolean): if true we memorize the function output internally. If False we apply the item's function on every request. """ self._items = copy.copy(items) self._memoize = memoize self._memoized = {} def __delitem__(self, key): del self._items[key] if key in self._memoized: del self._memoized[key] def __getitem__(self, key): if not self._memoize: return self._items[key]() if key not in self._memoized: self._memoized[key] = self._items[key]() return self._memoized[key] def __contains__(self, key): try: self._items[key] except KeyError: return False else: return True def __iter__(self): for key in self._items.keys(): yield key def __len__(self): return len(self._items) def __setitem__(self, key, value): self._memoized[key] = value class DeferredActionTuple(collections.Sequence): def __init__(self, func, items, memoize=True): """Applies the given function on the given items at moment of request. On the moment one of the elements is requested we apply the given function on the given items and return the result of that function. The advantage of this class is that it defers an expensive operation until it is needed. Args: func (function): the callback function to apply on the given items at request, with signature: .. code-block: python def callback(index, value) items (list, tuple): the items on which we operate memoize (boolean): if true we memorize the function output internally. If False we apply the given function on every request. """ self._func = func self._items = copy.copy(items) self._memoize = memoize self._memoized = {} def __getitem__(self, index): if not self._memoize: return self._func(index, self._items[index]) if index not in self._memoized: self._memoized[index] = self._func(index, self._items[index]) return self._memoized[index] def __len__(self): return len(self._items) PKjUpI mdt/user_script_info.pyimport datetime import hashlib import os from six import string_types __author__ = 'Robbert Harms' __date__ = "2016-08-04" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class SaveUserScriptInfo(object): def __init__(self): """Base class for writing the user info to a file. """ def write(self, output_file): """Write the information about the script the user is executing to a output information file. Args: output_file (str): the file we will write the output to """ raise NotImplementedError def easy_save_user_script_info(save_user_script_info, output_file, file_path_from_stack): """Handy routine for saving the user script info from multiple sources. Args: save_user_script_info (boolean, str or SaveUserScriptInfo): The info we need to save about the script the user is currently executing. If True (default) we use the stack to lookup the script the user is executing and save that using a SaveFromScript saver. If a string is given we use that filename again for the SaveFromScript saver. If False or None, we do not write any information. If a SaveUserScriptInfo is given we use that directly. output_file (str): the output folder for the output file file_path_from_stack (str): the file path from the stack inspection """ if save_user_script_info: if isinstance(save_user_script_info, SaveUserScriptInfo): save_user_script_info.write(output_file) elif isinstance(save_user_script_info, string_types): SaveFromScript(save_user_script_info).write(output_file) elif save_user_script_info is True: SaveFromScript(file_path_from_stack).write(output_file) class SaveFromScript(SaveUserScriptInfo): def __init__(self, user_script_path): super(SaveFromScript, self).__init__() self._user_script_path = user_script_path def write(self, output_file): """Write the information about the script the user is executing to a output information file. This function relies on the caller of the script to provide the filename of the script he user is currently executing. You can do this using for example a stack lookup: .. code-block:: python stack()[1][0].f_globals.get('__file__') Args: output_file (str): the file we will write the output to """ output_file_existed = os.path.exists(output_file) utc_time = datetime.datetime.utcnow() class_hash_name = hashlib.sha1() class_hash_name.update(str(utc_time).encode('utf-8')) with open(output_file, 'a') as output_file: if not output_file_existed: output_file.write('from mdt.user_script_info import UserScriptInfo') output_file.write('\n\n\nclass Script_{}(UserScriptInfo):\n\n'.format(class_hash_name.hexdigest())) output_file.write(' '*4 + 'date = "{}"\n'.format(utc_time)) output_file.write(' '*4 + 'filename = "{}"\n\n'.format(self._user_script_path)) output_file.write(' ' * 4 + '@staticmethod\n'.format(self._user_script_path)) output_file.write(' ' * 4 + 'def body():\n'.format(self._user_script_path)) with open(self._user_script_path, 'r') as input_file: for line in input_file: output_file.write(' '*8 + line) class UserScriptInfo(object): pass PKjUpIc mdt/shell_utils.pyimport argparse import os import textwrap import argcomplete import sys __author__ = 'Robbert Harms' __date__ = "2015-10-16" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" def get_argparse_extension_checker(choices, dir_allowed=False): """Get an :class:`argparge.Action` class that can check for correct extensions. Returns: argparse.Action: a class (not an instance) of an argparse action. """ class Act(argparse.Action): def __call__(self, parser, namespace, fname, option_string=None): is_valid = any(map(lambda choice: fname[-len(choice):] == choice, choices)) if not is_valid and dir_allowed and os.path.isdir(fname): is_valid = True if is_valid: setattr(namespace, self.dest, fname) else: option_string = '({})'.format(option_string) if option_string else '' parser.error("File doesn't end with one of {}{}".format(choices, option_string)) return Act class BasicShellApplication(object): @classmethod def console_script(cls): """Method used to start the command when launched from a distutils console script.""" cls().start(sys.argv[1:]) def start(self, run_args=None): """ Starts a command and registers single handlers. Args: run_args (:class:`list`): the list of run arguments. If None we use sys.argv[1:]. """ if run_args is None: run_args = sys.argv[1:] parser = self._get_arg_parser() argcomplete.autocomplete(parser) args = parser.parse_args(run_args) self.run(args) def run(self, args): """Run the application with the given arguments. Args: args: the arguments from the argparser. """ def _get_arg_parser(self): """Create the auto parser. This should be implemented by the implementing class. To enable autocomplete in your shell please execute activate-global-python-argcomplete in your shell. """ description = textwrap.dedent(""" Basic parser introduction here. Can be multiline. """) epilog = textwrap.dedent(""" Examples of use: mdt-model-fit "BallStick (Cascade)" data.nii.gz data.prtcl roi_mask_0_50.nii.gz """) parser = argparse.ArgumentParser(description=description, epilog=epilog, formatter_class=argparse.RawTextHelpFormatter) return parser def _get_citation_message(self): return get_citation_message() def get_citation_message(): """The citation message used in the shell scripts. Returns: str: the citation message for use in the description of every shell script """ return textwrap.dedent(""" If you use any of the scripts/functions/tools from MDT in your research, please cite the following paper: """) PKjUpIlW >3>3mdt/simulations.py"""This module contains some functions that allow for generating simulated data. The simulated data is on the level of diffusion MRI models, not on the level of simulated physical molecule interaction as found in for example Camino. """ import numbers import os import numpy as np import mdt from mdt.nifti import write_nifti from mdt.components_loader import NoiseSTDCalculatorsLoader from mdt.utils import MockDMRIProblemData from mot.cl_routines.mapping.calculate_model_estimates import CalculateModelEstimates __author__ = 'Robbert Harms' __date__ = "2016-03-17" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" def create_parameters_cube(primary_parameter_index, randomize_parameter_indices, grid_size, default_values, lower_bounds, upper_bounds, dtype=np.float32, seed=None): """Create a simple 3d parameters cube. On the first dimension we put a linearly spaced primary parameter and on the second dimension we randomly change the other indicated parameters. The 3d dimension holds the parameter realizations for the other dimensions. Args: primary_parameter_index (int): the index of the primary parameter for the values on the first axis randomize_parameter_indices (list of int): the indices of the parameter we sample randomly using a uniform distribution on the half open interval between the [lower, upper) bounds. See np.random.uniform. grid_size (tuple of int): the size of the generated grid, the first value refers to the first dimension, the second to the second dimension. default_values (list of float): the default values for each of the parameters in the model lower_bounds (list of float): the lower bounds used for the generation of the grid upper_bounds (list of float): the upper bounds used for the generation of the grid dtype (dtype): the numpy data type for this grid seed (int): if given the seed for the random number generator, this makes the random parameters predictable. Returns: ndarray: a three dimensional cube for the parameters """ grid = permutate_parameters([primary_parameter_index], default_values, lower_bounds, upper_bounds, grid_size[0], dtype=dtype) grid = np.reshape(grid, (grid.shape[0], 1, grid.shape[1])) grid = np.repeat(grid, grid_size[1], axis=1) random_state = np.random.RandomState(seed) for param_ind in randomize_parameter_indices: grid[:, :, param_ind] = random_state.uniform(lower_bounds[param_ind], upper_bounds[param_ind], size=grid_size) return grid def add_noise_realizations(signal_cube, nmr_noise_realizations, noise_sigma, seed=None): """Add noise realizations to a signal cube. The given signal cube should be a 3d matrix that contains on the last axis the signals per protocol line. All the other axis normally represent the variations of parameter values that generated the last dimension. This function inserts a new 3th dimension to that matrix, one to contain the variation over noise realisations. It will then make the signals rician distributed per index on that new 3th dimension. Args: signal_cube (ndarray): the 3d matrix with the signals nmr_noise_realizations (int): the number of noise realizations to use on the new third axis noise_sigma (float): the noise level, given by: noise_level = unweighted_signal_height / SNR seed (int): if given, the seed for the random number generation Returns: ndarray: a 4d cube with on the newly added third dimension the variating noise realizations """ signals = np.reshape(signal_cube, signal_cube.shape[0: 2] + (1, signal_cube.shape[2])) signals = np.repeat(signals, nmr_noise_realizations, axis=2) return make_rician_distributed(signals, noise_sigma, seed=seed) def simulate_signals_param_cube(model_name, protocol, parameters_cube): """Generate the signal for the given model for a generated parameters cube. Args: model_name (str): the name of the model we want to generate the values for protocol (Protocol): the protocol object we use for generating the signals parameters_cube (ndarray): the 3d matrix with the parameters for every problem instance Returns: signal estimates as a cube """ parameters = np.reshape(parameters_cube, (-1, parameters_cube.shape[-1])) simulated_signals = simulate_signals(model_name, protocol, parameters) return np.reshape(simulated_signals, parameters_cube.shape[0:2] + (simulated_signals.shape[-1], )) def permutate_parameters(var_params_ind, default_values, lower_bounds, upper_bounds, grid_size, dtype=np.float32): """Generate the combination of parameters for a simulation. This is useful if you want to generate a list of different parameter combinations. You do not need to use this if you want to simulate only one parameter. This generates for each of the parameters of interest a linearly indexed range of parameter values starting with the lower bounds and ending at the upper bound (both inclusive). The length of the list is determined by the grid size per parameter. Next we create a matrix with the cartesian product of each of these parameters of interest and with all the other parameters set to their default value. Args: var_params_ind (list of int): the list of indices into the parameters. This indices the parameters we want to vary. default_values (list of float): the default values for each of the parameters in the model lower_bounds (list of float): the lower bounds used for the generation of the grid upper_bounds (list of float): the upper bounds used for the generation of the grid grid_size (int or list of int): the size of the grid. If a single int is given we assume a grid equal in all dimensions. If a list is given it should match the number of variable parameter indices and should contain a grid size for each parameter. dtype (dtype): the data type of the result matrix Returns: ndarray: the matrix with all combinations of the parameters of interest and with all other parameters set to the given default value. """ if isinstance(grid_size, numbers.Number): grid_size = [int(grid_size)] * len(var_params_ind) result = np.reshape(default_values, [len(lower_bounds), 1]).astype(dtype) repeat_mult = 1 for linear_ind, params_ind in enumerate(var_params_ind): result = np.tile(result, grid_size[linear_ind]) result[params_ind] = np.repeat(np.linspace(lower_bounds[params_ind], upper_bounds[params_ind], grid_size[linear_ind]), repeat_mult) repeat_mult *= grid_size[linear_ind] return np.transpose(result) def get_permuted_indices(nmr_var_params, grid_size): """Get for every parameter of interest the locations per parameter value. This is useful if you want to generate a list of different parameter combinations. You do not need to use this if you want to simulate only one parameter. Suppose you have three variable parameters and you generate all permutations using permutate_parameters(), then you might want to know for any given parameter and for any value of that parameter at which indices that parameter occurs. This function tells you where. Note, we could have taken the nmr_var_params from the grid size, but the grid size can be a single scalar for all params. Args: nmr_var_params (int): the number of variable parameters grid_size (int or list of int): the grid size for all or per parameter Returns: ndarray: per permutation the value index indexing the parameter value """ indices = np.zeros((nmr_var_params, 1), dtype=np.int64) repeat_mult = 1 for ind in range(nmr_var_params): indices = np.tile(indices, grid_size[ind]) indices[ind, :] = np.repeat(np.arange(0, grid_size[ind]), repeat_mult) repeat_mult *= grid_size[ind] return np.transpose(indices) def simulate_signals(model_name, protocol, parameters): """Generate the signal for the given model for each of the parameters. This function only accepts a 2d list of parameters. For a generated parameters cube use function simulate_signals_param_cube. Args: model_name (str): the name of the model we want to generate the values for protocol (Protocol): the protocol object we use for generating the signals parameters (ndarray): the 2d matrix with the parameters for every problem instance Returns: signal estimates """ problem_data = MockDMRIProblemData(protocol, None, None, None) model = mdt.get_model(model_name) model.set_problem_data(problem_data) signal_evaluate = CalculateModelEstimates() return signal_evaluate.calculate(model, parameters) def make_rician_distributed(signals, noise_level, seed=None): """Make the given signal Rician distributed. To calculate the noise level divide the signal of the unweighted volumes by the SNR you want. For example, for a unweighted signal b0=1e4 and a desired SNR of 20, you need an noise level of 1e4/20 = 500. Args: signals: the signals to make Rician distributed noise_level: the level of noise to add. The actual Rician stdev depends on the signal. See ricestat in the mathworks library. The noise level can be calculated using b0/SNR. seed (int): if given, the seed for the random number generation Returns: ndarray: Rician distributed signals. """ random_state = np.random.RandomState(seed) x = noise_level * random_state.normal(size=signals.shape) + signals y = noise_level * random_state.normal(size=signals.shape) return np.sqrt(np.power(x, 2), np.power(y, 2)).astype(signals.dtype) def list_2d_to_4d(item_list): """Convert a 2d signal/parameter list to a 4d volume. This appends two singleton volumes to the signal list to make it 4d. Args: item_list (2d ndarray): the list with on the first dimension every problem and on the second the signals per protocol line. Returns: ndarray: 4d ndarray of size (1, 1, n, p) where n is the number of problems and p the length of the protocol. """ return np.reshape(item_list, (1, 1) + item_list.shape) def save_data_volume(file_name, data): """Save the 3d/4d volume to the given file. Args: file_name (str): the output file name. If the directory does not exist we create one. data (ndarray): the 4d array to save. """ if not os.path.isdir(os.path.dirname(file_name)): os.makedirs(os.path.dirname(file_name)) write_nifti(data, None, file_name, affine=np.eye(4)) def save_2d_list_as_4d_volume(file_name, data): """Save the given 2d list with values as a 4d volume. This is a convenience function that calls list_2d_to_4d and volume4d_to_file after each other. Args: file_name (str): the output file name. If the directory does not exist we create one. data (ndarray): the 2d array to save """ save_data_volume(file_name, list_2d_to_4d(data)) def get_unweighted_volumes(signals, protocol): """Get the signals and protocol for only the unweighted signals. Args: signals (ndarray): the matrix with for every problem (first dimension) the volumes (second dimension) protocol (Protocol): the protocol object Returns: tuple: unweighted signals and the protocol for only the unweighted indices. """ unweighted_indices = protocol.get_unweighted_indices() unweighted_signals = signals[:, unweighted_indices] unweighted_protocol = protocol.get_new_protocol_with_indices(unweighted_indices) return unweighted_signals, unweighted_protocol def estimate_noise_std(simulated_noisy_signals, protocol, noise_estimator_name='AllUnweightedVolumes'): """Estimate the noise on the noisy simulated dataset. This routine tries to estimate the noise level of the added noise. It first fits an S0 model to the data with a noise std of 1. It then removes this estimated S0 from the given signal and tries to estimate the noise std on the result. Args: simulated_noisy_signals (ndarray): the list with per problem the noisy simulated signal protocol (Protocol): the protocol object noise_estimator_name (str): the name of the noise estimator to use Returns: float: the noise standard deviation """ mask = np.ones(simulated_noisy_signals.shape[0:3]) loader = NoiseSTDCalculatorsLoader() cls = loader.get_class(noise_estimator_name) calculator = cls(mdt.load_problem_data(simulated_noisy_signals, protocol, mask)) return calculator.estimate() PK5rIMByymdt/exceptions.py__author__ = 'Robbert Harms' __date__ = "2016-06-25" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class ProtocolIOError(Exception): """Custom exception class for protocol input output errors. This can be raised if a protocol is inconsistent or incomplete. It should not be raised for general IO errors, use the IO exception for that. """ class InsufficientProtocolError(Exception): """Indicates that the protocol constains insufficient information for fitting a specific model. This can be raised if a model misses a column it needs in the protocol, or if there are not enough shells, etc. """ class NoiseStdEstimationNotPossible(Exception): """An exception that can be raised by any ComplexNoiseStdEstimator. This indicates that the noise std can not be estimated by the estimation routine. """ class NonUniqueComponent(Exception): """Raised when there are two components of the same type with the same name in the dynamically loadable components. If this is raised, please double check your components for items with non-unique names. """ PKd\I$oggmdt/batch_utils.pyimport glob import logging import os import shutil import six from six import string_types from mdt.components_loader import BatchProfilesLoader, get_model from mdt.data_loaders.protocol import ProtocolLoader from mdt.masking import create_write_median_otsu_brain_mask from mdt.models.cascade import DMRICascadeModelInterface from mdt.protocols import load_protocol, auto_load_protocol from mdt.utils import split_image_path, AutoDict, load_problem_data from mdt.nifti import load_nifti __author__ = 'Robbert Harms' __date__ = "2015-08-21" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class BatchProfile(object): def __init__(self): """Batch profiles encapsulate information about the subjects and modelling settings. Suppose you have a directory full of subjects that you want to analyze with a few models. One way to do that is to write some scripts yourself that walk through the directory and fit the models to the subjects. The other way would be to implement a :class:`BatchProfile` that contains details about your directory structure and let :func:`mdt.batch_fit` fetch all the subjects for you. Batch profiles contain a list with subject information (see :class:`SubjectInfo`) and a list of models we wish to apply to these subjects. Furthermore each profile should support some functionality that checks if this profile is suitable for a given directory. Using those functions the :func:`mdt.batch_fit` can try to auto-recognize the batch profile to use based on the profile that is suitable and returns the most subjects. """ self._root_dir = '' def set_root_dir(self, root_dir): """Set the root dir. That is, the directory we search for subjects. This is function level dependency injection. Args: root_dir (str): the root dir to use """ self._root_dir = root_dir def get_root_dir(self): """Get the root dir this profile uses. Returns: str: the root dir this batch profile uses. """ return self._root_dir def get_models_to_fit(self): """Get the list of models we want to fit to every found subject. The models can either be real model objects, or strings with the model names. Returns: :class:`list`: the list of models we want to fit to the subjects """ def get_subjects(self): """Get the information about all the subjects in the current folder. Returns: list of :class`SubjectInfo`: the information about the found subjects """ def profile_suitable(self): """Check if this directory can be used to use subjects from using this batch fitting profile. This is used for auto detecting the best batch fitting profile to use for loading subjects from the given root dir. Returns: boolean: true if this batch fitting profile can use the subjects in the current root directory, false otherwise. """ def get_subjects_count(self): """Get the number of subjects this batch fitting profile can use from the current root directory. Returns: int: the number of subjects this batch fitting profile can use from the current root directory. """ class SimpleBatchProfile(BatchProfile): def __init__(self): """A base class for quickly implementing a batch profile. Implementing classes need only implement the method :meth:`_get_subjects`, then this class will handle the rest. """ super(SimpleBatchProfile, self).__init__() self._subjects_found = None self._output_base_dir = 'output' self._output_sub_dir = None self._append_mask_name_to_output_sub_dir = True self.models_to_fit = ('BallStick_r3 (Cascade)', 'Tensor (Cascade)', 'NODDI (Cascade)', 'CHARMED_r1 (Cascade)', 'CHARMED_r2 (Cascade)', 'CHARMED_r3 (Cascade)') @property def output_base_dir(self): return self._output_base_dir @output_base_dir.setter def output_base_dir(self, output_base_dir): self._output_base_dir = output_base_dir self._subjects_found = None @property def append_mask_name_to_output_sub_dir(self): return self._append_mask_name_to_output_sub_dir @append_mask_name_to_output_sub_dir.setter def append_mask_name_to_output_sub_dir(self, append_mask_name_to_output_sub_dir): self._append_mask_name_to_output_sub_dir = append_mask_name_to_output_sub_dir @property def output_sub_dir(self): return self._output_sub_dir @output_sub_dir.setter def output_sub_dir(self, output_sub_dir): self._output_sub_dir = output_sub_dir self._subjects_found = None def get_models_to_fit(self): return self.models_to_fit def get_subjects(self): if not self._subjects_found: self._subjects_found = self._get_subjects() return self._subjects_found def profile_suitable(self): if not self._subjects_found: self._subjects_found = self._get_subjects() return len(self._subjects_found) > 0 def get_subjects_count(self): if not self._subjects_found: self._subjects_found = self._get_subjects() return len(self._subjects_found) def _autoload_noise_std(self, subject_id, file_path=None): """Try to autoload the noise standard deviation from a noise_std file. Args: subject_id (str): the subject for which to use the noise std. file_path (str): optionally provide the exact file to use. Returns: float or None: a float if a float could be loaded from a file noise_std, else nothing. """ file_path = file_path or os.path.join(self._root_dir, subject_id, 'noise_std') noise_std_files = glob.glob(file_path + '*') if len(noise_std_files): with open(noise_std_files[0], 'r') as f: return float(f.read()) return None def _get_subjects(self): """Get the matching subjects from the given root dir. This is the only function that should be implemented to get up and running. Returns: list of SubjectInfo: the information about the found subjects """ return [] def _get_subject_output_dir(self, subject_id, mask_fname, subject_base_dir=None): """Helper function for generating the output directory for a subject. Args: subject_id (str): the id of the subject to use mask_fname (str): the name of the mask we are using for this subject subject_base_dir (str): the base directory for this subject, defaults to self._root_dir / subject_id / self.output_base_dir Returns: str: the path for the output directory """ output_dir = subject_base_dir or os.path.join(self._root_dir, subject_id, self.output_base_dir) if self.output_sub_dir: output_dir = os.path.join(output_dir, self.output_sub_dir) if self._append_mask_name_to_output_sub_dir and mask_fname: output_dir = os.path.join(output_dir, split_image_path(mask_fname)[1]) return output_dir def _get_first_existing_file(self, filenames, default=None, prepend_path=None): """Tries a list of filenames and returns the first filename in the list that exists. Args: filenames (iterator): the list of filenames to search for existence default (str): the default value returned if none of the filenames existed prepend_path (str): the path to optionally prepend to every file before checking existence Returns: str: the filename of the first existing file, if prepend path is set it is included. """ for fname in filenames: if fname: if prepend_path: fname = os.path.join(prepend_path, fname) if os.path.isfile(fname): return fname return default def _get_first_existing_nifti(self, filenames, default=None, prepend_path=None): """Tries a list of filenames and returns the first filename in the list that exists. Additional to the method :meth:`_get_first_existing_file`, this additionally tries to see for every filename if a file with extension '.nii' or with '.nii.gz' exists (in that order) for that filename. If so, the path with the added extension is returned. Args: filenames (iterator): the list of filenames to search for existence, does additional extension lookup per filename default (str): the default value returned if none of the filenames existed prepend_path (str): the path to optionally prepend to every file before checking existence Returns: str: the filename of the first existing file, can contain an extra extension for the returned filename. """ for fname in filenames: resolve_extension = self._get_first_existing_file([fname, fname + '.nii', fname + '.nii.gz'], prepend_path=prepend_path) if resolve_extension: return resolve_extension return default def _autoload_protocol(self, path, protocols_to_try=(), bvecs_to_try=(), bvals_to_try=(), protocol_columns=None): prtcl_fname = self._get_first_existing_file(protocols_to_try, prepend_path=path) bval_fname = self._get_first_existing_file(bvals_to_try, prepend_path=path) bvec_fname = self._get_first_existing_file(bvecs_to_try, prepend_path=path) return BatchFitProtocolLoader( path, protocol_fname=prtcl_fname, bvec_fname=bvec_fname, bval_fname=bval_fname, protocol_columns=protocol_columns) class SubjectInfo(object): @property def subject_id(self): """Get the ID of this subject. Returns: str: the id of this subject """ return '' @property def output_dir(self): """Get the output folder for this subject. Returns: str: the output folder """ return '' def get_problem_data(self): """Get the DMRIProblemData for this subject. This is the data we will use during model fitting. Returns: :class:`~mdt.utils.DMRIProblemData`: the problem data to use during model fitting """ def get_mask_filename(self): """Get the filename of the mask to use. Returns: str: the filename of the mask to use """ class SimpleSubjectInfo(SubjectInfo): def __init__(self, subject_id, dwi_fname, protocol_loader, mask_fname, output_dir, gradient_deviations=None, use_gradient_deviations=True, noise_std=None): """This class contains all the information about found subjects during batch fitting. It is returned by the method get_subjects() from the class BatchProfile. Args: subject_id (str): the subject id dwi_fname (str): the filename with path to the dwi image protocol_loader (ProtocolLoader): the protocol loader that can use us the protocol mask_fname (str): the filename of the mask to use. If None a mask is auto generated. output_dir (str): the output directory gradient_deviations (str) if given, the path to the gradient deviations use_gradient_deviations (boolean): if we use the gradient deviations or not noise_std (float, ndarray, str): either None for automatic noise detection or a float with the noise STD to use during fitting or an ndarray with one value per voxel. """ self._subject_id = subject_id self._dwi_fname = dwi_fname self._protocol_loader = protocol_loader self._mask_fname = mask_fname self._output_dir = output_dir self._gradient_deviations = gradient_deviations self._use_gradient_deviations = use_gradient_deviations self._noise_std = noise_std if self._mask_fname is None: self._mask_fname = os.path.join(self.output_dir, 'auto_generated_mask.nii.gz') @property def subject_id(self): return self._subject_id @property def output_dir(self): return self._output_dir def get_problem_data(self): protocol = self._protocol_loader.get_protocol() brain_mask_fname = self.get_mask_filename() return load_problem_data(self._dwi_fname, protocol, brain_mask_fname, gradient_deviations=self._get_gradient_deviations(), noise_std=self._noise_std) def get_subject_id(self): return self.subject_id def get_mask_filename(self): if not os.path.isfile(self._mask_fname): logger = logging.getLogger(__name__) logger.info('Creating a brain mask for subject {0}'.format(self.subject_id)) protocol = self._protocol_loader.get_protocol() create_write_median_otsu_brain_mask(self._dwi_fname, protocol, self._mask_fname) return self._mask_fname def _get_gradient_deviations(self): if self._use_gradient_deviations and self._gradient_deviations is not None: return load_nifti(self._gradient_deviations).get_data() return None class BatchSubjectSelection(object): def get_selection(self, subjects): """Get the selection of subjects from the given list of subjects. Args: subjects (list of :class:`SubjectInfo`): the list of subjects from which we can choose which one to process Returns: list of :class:`SubjectInfo`: the given list or a subset of the given list with the subjects to process. """ pass class AllSubjects(BatchSubjectSelection): def __init__(self): """Selects all subjects for use in the processing""" super(AllSubjects, self).__init__() def get_selection(self, subjects): return subjects class SelectedSubjects(BatchSubjectSelection): def __init__(self, subject_ids=None, indices=None, start_from=None): """Only process the selected subjects. This method allows either a selection by index (unsafe for the order may change) or by subject name/ID (more safe in general). If ``start_from`` is given it additionally limits the list of selected subjects to include only those after that index. This essentially creates three different subsets of the given list of subjects and it will only process the subjects in the intersection of all those sets. Set any one of the options to None to ignore that option. Args: subject_ids (list of str): the list of names of subjects to process indices (list/tuple of int): the list of indices of subjects we wish to process start_from (list or int): the index of the name of the subject from which we want to start processing. """ self.subject_ids = subject_ids self.indices = indices self.start_from = start_from def get_selection(self, subjects): starting_pos = self._get_starting_pos(subjects) if self.indices is None and self.subject_ids is None: return subjects[starting_pos:] if self.indices: subjects = [subject for ind, subject in enumerate(subjects) if ind in self.indices and ind >= starting_pos] if self.subject_ids: subjects = list(filter(lambda subject: subject.subject_id in self.subject_ids, subjects)) return subjects def _get_starting_pos(self, subjects): if self.start_from is None: return 0 if isinstance(self.start_from, six.string_types): for ind, subject in enumerate(subjects): if subject.subject_id == self.start_from: return ind for ind, subject in enumerate(subjects): if ind == int(self.start_from): return ind class BatchFitProtocolLoader(ProtocolLoader): def __init__(self, base_dir, protocol_fname=None, protocol_columns=None, bvec_fname=None, bval_fname=None): """A simple protocol loader for loading a protocol from a protocol file or bvec/bval files. This either loads the protocol file if present, or autoloads the protocol using the auto_load_protocol from the protocol module. """ super(BatchFitProtocolLoader, self).__init__() self._base_dir = base_dir self._protocol_fname = protocol_fname self._bvec_fname = bvec_fname self._bval_fname = bval_fname self._protocol_columns = protocol_columns def get_protocol(self): super(BatchFitProtocolLoader, self).get_protocol() if self._protocol_fname and os.path.isfile(self._protocol_fname): return load_protocol(self._protocol_fname) return auto_load_protocol(self._base_dir, protocol_columns=self._protocol_columns, bvec_fname=self._bvec_fname, bval_fname=self._bval_fname) class BatchFitSubjectOutputInfo(object): def __init__(self, subject_info, output_path, model_name): """This class is used in conjunction with the function :func:`run_function_on_batch_fit_output`. Args: subject_info (SubjectInfo): the information about the subject before batch fitting output_path (str): the full path to the directory with the maps model_name (str): the name of the model (not a path) """ self.subject_info = subject_info self.output_path = output_path self.model_name = model_name @property def mask_name(self): return split_image_path(self.subject_info.get_mask_filename())[1] @property def subject_id(self): return self.subject_info.subject_id class BatchFitOutputInfo(object): def __init__(self, data_folder, batch_profile=None, subjects_selection=None): """Single point of information about batch fitting results. Args: data_folder (str): The data folder with the output files batch_profile (:class:`BatchProfile` or str): the batch profile to use, can also be the name of a batch profile to use. If not given it is auto detected. subjects_selection (BatchSubjectSelection): the subjects to use for processing. If None all subjects are processed. """ self._data_folder = data_folder self._batch_profile = batch_profile_factory(batch_profile, data_folder) self._subjects_selection = subjects_selection or AllSubjects() self._subjects = self._subjects_selection.get_selection(self._batch_profile.get_subjects()) self._mask_paths = {} def subject_output_info_generator(self): """Generates for every subject an output info object which contains all relevant information about the subject. Returns: generator: returns an BatchFitSubjectOutputInfo per subject """ model_names = self._get_composite_model_names(self._batch_profile.get_models_to_fit()) for subject_info in self._subjects: for model_name in model_names: output_path = os.path.join(subject_info.output_dir, model_name) if os.path.isdir(output_path): yield BatchFitSubjectOutputInfo(subject_info, output_path, model_name) @staticmethod def _get_composite_model_names(model_names): """Resolve the composite model names from the list of (possibly cascade) model names from the BatchProfile""" lookup_cache = {} def get_names(current_names): composite_model_names = [] for model_name in current_names: if model_name not in lookup_cache: model = get_model(model_name) if isinstance(model, DMRICascadeModelInterface): resolved_names = get_names(model.get_model_names()) lookup_cache[model_name] = resolved_names else: lookup_cache[model_name] = [model_name] composite_model_names.extend(lookup_cache[model_name]) return composite_model_names return list(set(get_names(model_names))) def run_function_on_batch_fit_output(data_folder, func, batch_profile=None, subjects_selection=None): """Run a function on the output of a batch fitting routine. This enables you to run a function on every model output from every subject. The callback python function should accept as single argument an instance of the class BatchFitSubjectOutputInfo. Args: data_folder (str): The data folder with the output files func (python function): the python function we should call for every map and model. This should accept as single parameter a BatchFitSubjectOutputInfo. batch_profile (BatchProfile class or str): the batch profile to use, can also be the name of a batch profile to use. If not given it is auto detected. subjects_selection (BatchSubjectSelection): the subjects to use for processing. If None all subjects are processed. Returns: dict: indexed by subject->model_name, values are the return values of the users function """ output_info = BatchFitOutputInfo(data_folder, batch_profile, subjects_selection=subjects_selection) results = AutoDict() for subject in output_info.subject_output_info_generator(): results[subject.subject_id][subject.model_name] = func(subject) return results.to_normal_dict() def batch_profile_factory(batch_profile, data_folder): """Wrapper function for getting a batch profile. Args: batch_profile (None, string or BatchProfile): indication of the batch profile to use. If a string is given it is loaded from the users home folder. Else the best matching profile is returned. data_folder (str): the data folder we want to use the batch profile on. Returns: If the given batch profile is None we return the output from get_best_batch_profile(). If batch profile is a string we use it from the batch profiles loader. Else we return the input. """ if batch_profile is None: batch_profile = get_best_batch_profile(data_folder) elif isinstance(batch_profile, string_types): batch_profile = BatchProfilesLoader().load(batch_profile) batch_profile.set_root_dir(data_folder) return batch_profile def get_best_batch_profile(data_folder): """Get the batch profile that best matches the given directory. Args: data_folder (str): the directory for which to get the best batch profile. Returns: BatchProfile: the best matching batch profile. """ profile_loader = BatchProfilesLoader() crawlers = [profile_loader.load(c) for c in profile_loader.list_all()] best_crawler = None best_subjects_count = 0 for crawler in crawlers: crawler.set_root_dir(data_folder) if crawler.profile_suitable(): tmp_count = crawler.get_subjects_count() if tmp_count > best_subjects_count: best_crawler = crawler best_subjects_count = tmp_count return best_crawler def collect_batch_fit_output(data_folder, output_dir, batch_profile=None, subjects_selection=None, symlink=True, symlink_absolute=False, move=False): """Load from the given data folder all the output files and put them into the output directory. The results are placed in the output folder per subject. Example: ``//`` Args: data_folder (str): The data folder with the output files output_dir (str): The path to the output folder where all the files will be put. batch_profile (:class:`BatchProfile` or str): the batch profile to use, can also be the name of a batch profile to use. If not given it is auto detected. subjects_selection (BatchSubjectSelection): the subjects to use for processing. If None all subjects are processed. symlink (boolean): only available under Unix OS's. Creates a symlink instead of copying. This will create an absolute position symlink. symlink_absolute (boolean): if symlink is set to true, do you want an absolute symlink (True) or a relative one (False) move (boolean): instead of copying the files, move them to a new position. If set, this overrules the parameter symlink. """ def copy_function(subject_info): if not os.path.exists(os.path.join(output_dir, subject_info.subject_id)): os.makedirs(os.path.join(output_dir, subject_info.subject_id)) subject_out = os.path.join(output_dir, subject_info.subject_id, subject_info.model_name) if os.path.exists(subject_out) or os.path.islink(subject_out): if os.path.islink(subject_out): os.unlink(subject_out) else: shutil.rmtree(subject_out) if move: shutil.move(subject_info.output_path, subject_out) else: if symlink: if symlink_absolute: os.symlink(subject_info.output_path, subject_out) else: os.symlink(os.path.relpath(subject_info.output_path, os.path.dirname(subject_out)), subject_out) else: shutil.copytree(subject_info.output_path, subject_out) run_function_on_batch_fit_output(data_folder, copy_function, batch_profile=batch_profile, subjects_selection=subjects_selection) PK1Imdt/__version__.pyVERSION = '0.9.5' _items = VERSION.split('-') VERSION_NUMBER_PARTS = tuple(int(i) for i in _items[0].split('.')) if len(_items) > 1: VERSION_STATUS = _items[1] else: VERSION_STATUS = '' __version__ = VERSION PKjUpI~;// mdt/nifti.pyimport glob import os import numpy as np import nibabel as nib from mdt.deferred_mappings import DeferredActionDict __author__ = 'Robbert Harms' __date__ = "2014-08-28" __license__ = "LGPL v3" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" def load_nifti(nifti_volume): """Load and return a nifti file. This will apply path resolution if a filename without extension is given. See the function :func:`nifti_filepath_resolution` for details. Args: nifti_volume (string): The filename of the volume to use. Returns: :class:`nibabel.nifti1.Nifti1Image` """ path = nifti_filepath_resolution(nifti_volume) return nib.load(path) def load_all_niftis(directory, map_names=None): """Loads all niftis in the given directory. If map_names is given we will only load the given maps. Else, we load all .nii and .nii.gz files in the given directory. The map name is the filename of a nifti without the extension. Args: directory (str): the directory from which we want to load the niftis map_names (list of str): the names of the maps we want to use. If given, we only use and return these maps. Returns: dict: A dictionary with the loaded nibabel proxies (see :func:`load_nifti`). The keys of the dictionary are the filenames without the extension of the .nii(.gz) files in the given directory. """ maps_paths = {} for path, map_name, _ in yield_nifti_info(directory): if not map_names or map_name in map_names: maps_paths.update({map_name: path}) return {k: load_nifti(v) for k, v in maps_paths.items()} def get_all_image_data(directory, map_names=None, deferred=True): """Get the data of all the nifti volumes in the given directory. If map_names is given we will only load the given map names. Else, we load all .nii and .nii.gz files in the given directory. Args: directory (str): the directory from which we want to read a number of maps map_names (list of str): the names of the maps we want to use. If given, we only use and return these maps. deferred (boolean): if True we return an deferred loading dictionary instead of a dictionary with the values loaded as arrays. Returns: dict: A dictionary with the volumes. The keys of the dictionary are the filenames without the extension of the .nii(.gz) files in the given directory. """ proxies = load_all_niftis(directory, map_names=map_names) if deferred: return DeferredActionDict(lambda _, item: item.get_data(), proxies) else: return {k: v.get_data() for k, v in proxies.items()} def write_nifti(data, header, output_fname, affine=None, **kwargs): """Write data to a nifti file. Args: output_fname (str): the name of the resulting nifti file data (ndarray): the data to write to that nifti file header (nibabel header): the nibabel header to use as header for the nifti file affine (ndarray): the affine transformation matrix **kwargs: other arguments to Nifti1Image from NiBabel """ nib.Nifti1Image(data, affine, header, **kwargs).to_filename(output_fname) def write_all_as_nifti(volumes, directory, nifti_header, overwrite_volumes=True, gzip=True): """Write a number of volume maps to the specific directory. Args: volumes (dict): the volume maps (in 3d) with the results we want to write. The filenames are generated using the keys in the given volumes directory (str): the directory to write to nifti_header: the nifti header to use for each of the volumes overwrite_volumes (boolean): defaults to True, if we want to overwrite the volumes if they exists gzip (boolean): if True we write the files as .nii.gz, if False we write the files as .nii """ if not os.path.exists(directory): os.makedirs(directory) for key, volume in volumes.items(): extension = '.nii' if gzip: extension += '.gz' filename = key + extension full_filename = os.path.abspath(os.path.join(directory, filename)) if os.path.exists(full_filename): if overwrite_volumes: os.remove(full_filename) write_nifti(volume, nifti_header, full_filename) else: write_nifti(volume, nifti_header, full_filename) def nifti_filepath_resolution(file_path): """Tries to resolve the filename to a nifti based on only the filename. For example, this resolves the path: ``/tmp/mask`` to: - ``/tmp/mask`` if exists - ``/tmp/mask.nii`` if exist - ``/tmp/mask.nii.gz`` if exists Hence, the lookup order is: ``path``, ``path.nii``, ``path.nii.gz`` If a file with an extension is given we will do no further resolving and return the path as is. Args: file_path (str): the path to the nifti file, can be without extension. Returns: str: the file path we resolved to the final file. Raises: ValueError: if no nifti file could be found """ if file_path[:-len('.nii')] == '.nii' or file_path[:-len('.nii.gz')] == '.nii.gz': return file_path if os.path.isfile(file_path): return file_path elif os.path.isfile(file_path + '.nii'): return file_path + '.nii' elif os.path.isfile(file_path + '.nii.gz'): return file_path + '.nii.gz' raise ValueError('No nifti file could be found using the path {}.'.format(file_path)) def yield_nifti_info(directory): """Get information about the nifti volumes in the given directory. Args: directory (str): the directory to get the names of the available maps from Yields: tuple: (path, map_name, extension) for every map found """ for extension in ('.nii', '.nii.gz'): for f in glob.glob(os.path.join(directory, '*' + extension)): yield f, os.path.basename(f)[0:-len(extension)], extension class TrackMark(object): """TrackMark is an proprietary visualization tool written by Alard Roebroeck and can be used to visualize fibre\ directions. This class is meant to convert nifti files to TrackMark specific files. """ @staticmethod def write_tvl_direction_pairs(tvl_filename, tvl_header, direction_pairs): """Write the given directions to TVL. The direction pairs should be a list with lists containing the vector and value to write. For example: ((vec, val), (vec1, val1), ...) up to three pairs are allowed. Args: tvl_filename (str): the filename to write to tvl_header (:class:`list`): the header for the TVL file. This is a list of either 4 or 10 entries. 4 entries: [version, res, gap, offset] 10 entries: [version, x_res, x_gap, x_offset, y_res, y_gap, y_offset, z_res, z_gap, z_offset] direction_pairs (list of ndarrays): The list with direction pairs, only three are used. This is a list with (vector, magnitude) tuples in which the vectors are 4d volumes with for every voxel a 3d coordinate. """ direction_pairs = direction_pairs[0:3] dir_matrix = np.zeros(direction_pairs[0][0].shape[0:3] + (12,)) for ind, dirs in enumerate(direction_pairs): dir_matrix[..., ind*3:ind*3+3] = np.ascontiguousarray(np.squeeze(dirs[0])) dir_matrix[..., 9 + ind] = np.ascontiguousarray(np.squeeze(dirs[1])) TrackMark.write_tvl_matrix(tvl_filename, tvl_header, dir_matrix) @staticmethod def write_tvl_matrix(tvl_filename, tvl_header, directions_matrix): """Write the given directions matrix to TVL. Args: tvl_filename: the filename to write to tvl_header: the header for the TVL file. This is a list of either 4 or 10 entries. 4 entries: [version, res, gap, offset] 10 entries: [version, x_res, x_gap, x_offset, y_res, y_gap, y_offset, z_res, z_gap, z_offset] directions_matrix: an 4dimensional matrix, of which the fourth dimension is of length 12. """ if os.path.exists(tvl_filename): os.remove(tvl_filename) if not os.path.exists(os.path.dirname(tvl_filename)): os.makedirs(os.path.dirname(tvl_filename)) open(tvl_filename, 'a').close() with open(tvl_filename, 'rb+') as f: version = np.array(tvl_header[0]).astype(np.uint16) version.tofile(f, '') if len(tvl_header) == 4: for i in range(3): np.array(directions_matrix.shape[i]).astype(np.uint32).tofile(f, '') np.array(tvl_header[1]).astype(np.float64).tofile(f, '') np.array(tvl_header[2]).astype(np.float64).tofile(f, '') np.array(tvl_header[3]).astype(np.float64).tofile(f, '') else: for i in range(3): np.array(directions_matrix.shape[i]).astype(np.uint32).tofile(f, '') np.array(tvl_header[i * 3 + 1]).astype(np.float64).tofile(f, '') np.array(tvl_header[i * 3 + 2]).astype(np.float64).tofile(f, '') np.array(tvl_header[i * 3 + 3]).astype(np.float64).tofile(f, '') directions_matrix = np.transpose(directions_matrix, (3, 2, 1, 0)).astype(np.float32).flatten('F') directions_matrix.tofile(f, '') @staticmethod def write_rawmaps(directory, volumes, overwrite_volumes=True): """Write a dictionary with volumes to the given directory. Args: directory (str): the directory to write to volumes (dict): an dictionary with the volume maps (3d) with the results we want to write out The naming of the file is the key of the volume with .rawmap appended by this function. overwrite_volumes (boolean): if we want to overwrite already present volumes """ if not os.path.isdir(directory): os.makedirs(directory) for key, volume in volumes.items(): filename = key + '.rawmap' full_filename = os.path.abspath(os.path.join(directory, filename)) if os.path.exists(full_filename): if overwrite_volumes: os.remove(full_filename) TrackMark.write_rawmap(full_filename, volume) else: TrackMark.write_rawmap(full_filename, volume) @staticmethod def write_rawmap(rawmap_filename, volume): """Write a rawmap to the given file. Args: rawmap_filename (str): The filename to write to, if not exists, it is created (along with extra directories). This should end on .rawmap, it not it is added. volume (ndarray): the volume to write. 3d or 4d. If 4d and 4th dimension is larger than 1 additional maps are created. """ if rawmap_filename[-len('.rawmap'):] != '.rawmap': rawmap_filename += '.rawmap' if os.path.exists(rawmap_filename): os.remove(rawmap_filename) if not os.path.exists(os.path.dirname(rawmap_filename)): os.makedirs(os.path.dirname(rawmap_filename)) open(rawmap_filename, 'a').close() s = volume.shape if len(s) == 4: if s[3] == 1: volume = np.squeeze(volume, axis=(3,)) else: subnames = rawmap_filename[0:-len('.rawmap')] + '_' for ind in range(volume.shape[3]): TrackMark.write_rawmap(subnames + repr(ind) + '.rawmap', volume[..., ind]) return with open(rawmap_filename, 'rb+') as f: np.array(s[0]).astype(np.uint16).tofile(f, '') np.array(s[1]).astype(np.uint16).tofile(f, '') np.array(s[2]).astype(np.uint16).tofile(f, '') m = np.transpose(volume, [2, 1, 0]).astype(np.float32).flatten('F') m.tofile(f, '') PKjUpIgffmdt/model_protocol_problem.py__author__ = 'Robbert Harms' __date__ = "2015-08-25" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class ModelProtocolProblem(object): def __init__(self): """The base class for indicating problems with a protocol. These are meant to be returned from the function get_protocol_problems() from the ProtocolCheckInterface. Each of these problems is supposed to overwrite the function __str__() for reporting the problem. """ def __repr__(self): return self.__str__() class MissingColumns(ModelProtocolProblem): def __init__(self, missing_columns): super(MissingColumns, self).__init__() self.missing_columns = missing_columns def __str__(self): return 'Missing columns: ' + ', '.join(self.missing_columns) class InsufficientShells(ModelProtocolProblem): def __init__(self, required_nmr_shells, nmr_shells): super(InsufficientShells, self).__init__() self.required_nmr_shells = required_nmr_shells self.nmr_shells = nmr_shells def __str__(self): return 'Required number of shells is {}, this protocol has {}.'.format( self.required_nmr_shells, self.nmr_shells) class NamedProtocolProblem(ModelProtocolProblem): def __init__(self, model_protocol_problem, model_name): """This extends the given model protocol problem to also include the name of the model. Args: model_protocol_problem (ModelProtocolProblem): The name for the problem with the given model. model_name (str): the name of the model """ super(NamedProtocolProblem, self).__init__() self._model_protocol_problem = model_protocol_problem self._model_name = model_name def __str__(self): return "{0}: {1}".format(self._model_name, self._model_protocol_problem) PKeI'AI mdt/utils.pyimport collections import distutils.dir_util import glob import logging import logging.config as logging_config import os import re import shutil import tempfile from collections import defaultdict from contextlib import contextmanager import numpy as np import pkg_resources import six from numpy.lib.format import open_memmap from scipy.special import jnp_zeros from six import string_types import mot.utils from mdt.nifti import load_nifti, write_nifti, write_all_as_nifti, get_all_image_data from mdt.cl_routines.mapping.calculate_eigenvectors import CalculateEigenvectors from mdt.components_loader import get_model from mdt.configuration import get_config_dir from mdt.configuration import get_logging_configuration_dict, get_noise_std_estimators, get_tmp_results_dir from mdt.data_loaders.brain_mask import autodetect_brain_mask_loader from mdt.data_loaders.noise_std import autodetect_noise_std_loader from mdt.data_loaders.protocol import autodetect_protocol_loader from mdt.deferred_mappings import DeferredActionDict, DeferredActionTuple from mdt.exceptions import NoiseStdEstimationNotPossible from mdt.log_handlers import ModelOutputLogHandler from mdt.protocols import load_protocol, write_protocol from mot.cl_environments import CLEnvironmentFactory from mot.cl_routines.mapping.calculate_model_estimates import CalculateModelEstimates from mot.cl_routines.mapping.loglikelihood_calculator import LogLikelihoodCalculator from mot.model_building.evaluation_models import OffsetGaussianEvaluationModel from mot.model_building.problem_data import AbstractProblemData try: import codecs except ImportError: codecs = None __author__ = 'Robbert Harms' __date__ = "2014-02-05" __license__ = "LGPL v3" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class DMRIProblemData(AbstractProblemData): def __init__(self, protocol, dwi_volume, mask, volume_header, static_maps=None, gradient_deviations=None, noise_std=None): """An implementation of the problem data for diffusion MRI models. Args: protocol (Protocol): The protocol object used as input data to the model dwi_volume (ndarray): The DWI data (4d matrix) mask (ndarray): The mask used to create the observations list volume_header (nifti header): The header of the nifti file to use for writing the results. static_maps (Dict[str, ndarray]): the static maps used as values for the static map parameters gradient_deviations (ndarray): the gradient deviations containing per voxel 9 values that constitute the gradient non-linearities. Of the 4d matrix the first 3 dimensions are supposed to be the voxel index and the 4th should contain the grad dev data. noise_std (number or ndarray): either None for automatic detection, or a scalar, or an 3d matrix with one value per voxel. Attributes: dwi_volume (ndarray): The DWI volume volume_header (nifti header): The header of the nifti file to use for writing the results. """ self._logger = logging.getLogger(__name__) self.dwi_volume = dwi_volume self.volume_header = volume_header self._mask = mask self._protocol = protocol self._observation_list = None self._static_maps = static_maps or {} self.gradient_deviations = gradient_deviations self._noise_std = noise_std def copy_with_updates(self, *args, **kwargs): """Create a copy of this problem data, while setting some of the arguments to new values. You can use any of the arguments (args and kwargs) of the constructor for this call. If given we will use those values instead of the values in this problem data object for the copy. """ new_args = [self._protocol, self.dwi_volume, self._mask, self.volume_header] for ind, value in enumerate(args): new_args[ind] = value new_kwargs = dict(static_maps=self._static_maps, gradient_deviations=self.gradient_deviations, noise_std=self._noise_std) for key, value in kwargs.items(): new_kwargs[key] = value return DMRIProblemData(*new_args, **new_kwargs) def get_nmr_inst_per_problem(self): return self._protocol.length @property def protocol(self): return self._protocol @property def observations(self): if self._observation_list is None: self._observation_list = create_roi(self.dwi_volume, self._mask) return self._observation_list @property def mask(self): """Return the mask in use Returns: np.array: the numpy mask array """ return self._mask @mask.setter def mask(self, new_mask): """Set the new mask and update the observations list. Args: new_mask (np.array): the new mask """ self._mask = new_mask self._observation_list = None @property def static_maps(self): """Get the static maps. They are used as data for the static parameters. Returns: Dict[str, val]: per static map the value for the static map. This can either be an one or two dimensional matrix containing the values for each problem instance or it can be a single value we will use for all problem instances. """ if self._static_maps is not None: return_items = {} for key, val in self._static_maps.items(): loaded_val = None if isinstance(val, six.string_types): loaded_val = create_roi(load_nifti(val).get_data(), self.mask) elif isinstance(val, np.ndarray): loaded_val = create_roi(val, self.mask) elif is_scalar(val): loaded_val = val return_items[key] = loaded_val return return_items return self._static_maps @property def noise_std(self): """The noise standard deviation we will use during model evaluation. During optimization or sampling the model will be evaluated against the observations using an evaluation model. Most of these evaluation models need to have a standard deviation. Returns: number of ndarray: either a scalar or a 2d matrix with one value per problem instance. """ try: noise_std = autodetect_noise_std_loader(self._noise_std).get_noise_std(self) except NoiseStdEstimationNotPossible: logger = logging.getLogger(__name__) logger.warn('Failed to obtain a noise std for this subject. We will continue with an std of 1.') noise_std = 1 if is_scalar(noise_std): self._logger.info('Using a scalar noise standard deviation of {0}'.format(noise_std)) return noise_std else: self._logger.info('Using a voxel wise noise standard deviation.') return create_roi(noise_std, self.mask) class MockDMRIProblemData(DMRIProblemData): def __init__(self, protocol=None, dwi_volume=None, mask=None, volume_header=None, **kwargs): """A mock DMRI problem data object that returns None for everything unless given. """ super(MockDMRIProblemData, self).__init__(protocol, dwi_volume, mask, volume_header, **kwargs) @property def observations(self): return self._observation_list @property def noise_std(self): return self._noise_std class PathJoiner(object): def __init__(self, *args): """The path joining class. To construct use something like: .. code-block:: python >>> pjoin = PathJoiner(r'/my/images/dir/') or: .. code-block:: python >>> pjoin = PathJoiner('my', 'images', 'dir') Then, you can call it like: .. code-block:: python >>> pjoin() /my/images/dir At least, it returns the above on Linux. On windows it will return ``my\\images\\dir``. You can also call it with an additional path element that is (temporarily) appended to the path: .. code-block:: python >>> pjoin('/brain_mask.nii.gz') /my/images/dir/brain_mask.nii.gz To add a path permanently to the path joiner use: .. code-block:: python >>> pjoin.append('results') This will extend the stored path to ``/my/images/dir/results/``: .. code-block:: python >>> pjoin('/brain_mask.nii.gz') /my/images/dir/results/brain_mask.nii.gz You can reset the path joiner to the state of at object construction using: .. code-block:: python >>> pjoin.reset() You can also create a copy of this class with extended path elements by calling .. code-block:: python >>> pjoin2 = pjoin.create_extended('results') This returns a new PathJoiner instance with as path the current path plus the items in the arguments. .. code-block:: python >>> pjoin2('brain_mask.nii.gz') /my/images/dir/results/brain_mask.nii.gz Args: *args: the initial path element(s). """ self._initial_path = os.path.abspath(os.path.join('', *args)) self._path = os.path.abspath(os.path.join('', *args)) def create_extended(self, *args): """Create and return a new PathJoiner instance with the path extended by the given arguments.""" return PathJoiner(os.path.join(self._path, *args)) def append(self, *args): """Extend the stored path with the given elements""" self._path = os.path.join(self._path, *args) return self def reset(self): """Reset the path to the path at construction time""" self._path = self._initial_path return self def make_dirs(self, mode=0o777): """Create the directories if they do not exists. This uses os.makedirs to make the directories. The given argument mode is handed to os.makedirs. Args: mode: the mode parameter for os.makedirs """ if not os.path.exists(self._path): os.makedirs(self._path, mode) def __call__(self, *args): return os.path.abspath(os.path.join(self._path, *args)) def split_dataset(dataset, split_dimension, split_index): """Split the given dataset along the given dimension on the given index. Args: dataset (ndarray, list, tuple or dict): The single or list of volume which to split in two split_dimension (int): The dimension along which to split the dataset split_index (int): The index on the given dimension to split the volume(s) Returns: If dataset is a single volume return the two volumes that when concatenated give the original volume back. If it is a list, tuple or dict return two of those with exactly the same indices but with each holding one half of the splitted data. """ if isinstance(dataset, (tuple, list)): output_1 = [] output_2 = [] for d in dataset: split = split_dataset(d, split_dimension, split_index) output_1.append(split[0]) output_2.append(split[1]) if isinstance(dataset, tuple): return tuple(output_1), tuple(output_2) return output_1, output_2 elif isinstance(dataset, dict): output_1 = {} output_2 = {} for k, d in dataset.items(): split = split_dataset(d, split_dimension, split_index) output_1[k] = split[0] output_2[k] = split[1] return output_1, output_2 ind_1 = [slice(None)] * dataset.ndim ind_1[split_dimension] = range(0, split_index) ind_2 = [slice(None)] * dataset.ndim ind_2[split_dimension] = range(split_index, dataset.shape[split_dimension]) return dataset[ind_1], dataset[ind_2] def split_write_dataset(input_fname, split_dimension, split_index, output_folder=None): """Split the given dataset using the function split_dataset and write the output files. Args: dataset (str): The filename of a volume to split split_dimension (int): The dimension along which to split the dataset split_index (int): The index on the given dimension to split the volume(s) """ if output_folder is None: output_folder = os.path.dirname(input_fname) dataset = load_nifti(input_fname) data = dataset.get_data() split = split_dataset(data, split_dimension, split_index) basename = os.path.basename(input_fname).split('.')[0] length = data.shape[split_dimension] lengths = (repr(0) + 'to' + repr(split_index-1), repr(split_index) + 'to' + repr(length-1)) volumes = {} for ind, v in enumerate(split): volumes.update({str(basename) + '_split_' + str(split_dimension) + '_' + lengths[ind]: v}) write_all_as_nifti(volumes, output_folder, dataset.get_header()) def get_bessel_roots(number_of_roots=30, np_data_type=np.float64): """These roots are used in some of the compartment models. It are the roots of the equation ``J'_1(x) = 0``. That is, where ``J_1`` is the first order Bessel function of the first kind. Args: number_of_root (int): The number of roots we want to calculate. np_data_type (np.data_type): the numpy data type Returns: ndarray: A vector with the indicated number of bessel roots (of the first order Bessel function of the first kind). """ return jnp_zeros(1, number_of_roots).astype(np_data_type, copy=False, order='C') def read_split_write_volume(volume_fname, first_output_fname, second_output_fname, split_dimension, split_index): """Read the given dataset from file, then split it along the given dimension on the given index. This writes two files, first_output_fname and second_output_fname with respectively the first and second halves of the split dataset. Args: volume_fname (str): The filename of the volume to use and split first_output_fname (str): The filename of the first half of the split second_output_fname (str): The filename of the second half of the split split_dimension (int): The dimension along which to split the dataset split_index (int): The index on the given dimension to split the volume(s) """ signal_img = load_nifti(volume_fname) signal4d = signal_img.get_data() img_header = signal_img.get_header() split = split_dataset(signal4d, split_dimension, split_index) write_nifti(split[0], img_header, first_output_fname) write_nifti(split[1], img_header, second_output_fname) def create_slice_roi(brain_mask, roi_dimension, roi_slice): """Create a region of interest out of the given brain mask by taking one specific slice out of the mask. Args: brain_mask (ndarray): The brain_mask used to create the new brain mask roi_dimension (int): The dimension to take a slice out of roi_slice (int): The index on the given dimension. Returns: A brain mask of the same dimensions as the original mask, but with only one slice activated. """ roi_mask = np.zeros_like(brain_mask) ind_pos = [slice(None)] * roi_mask.ndim ind_pos[roi_dimension] = roi_slice roi_mask[tuple(ind_pos)] = get_slice_in_dimension(brain_mask, roi_dimension, roi_slice) return roi_mask def write_slice_roi(brain_mask_fname, roi_dimension, roi_slice, output_fname, overwrite_if_exists=False): """Create a region of interest out of the given brain mask by taking one specific slice out of the mask. This will both write and return the created slice ROI. We need a filename as input brain mask since we need the header of the file to be able to write the output file with the same header. Args: brain_mask_fname (string): The filename of the brain_mask used to create the new brain mask roi_dimension (int): The dimension to take a slice out of roi_slice (int): The index on the given dimension. output_fname (string): The output filename overwrite_if_exists (boolean, optional, default false): If we want to overwrite the file if it already exists Returns: A brain mask of the same dimensions as the original mask, but with only one slice set to one. """ if os.path.exists(output_fname) and not overwrite_if_exists: return load_brain_mask(output_fname) if not os.path.isdir(os.path.dirname(output_fname)): os.makedirs(os.path.dirname(output_fname)) brain_mask_img = load_nifti(brain_mask_fname) brain_mask = brain_mask_img.get_data() img_header = brain_mask_img.get_header() roi_mask = create_slice_roi(brain_mask, roi_dimension, roi_slice) write_nifti(roi_mask, img_header, output_fname) return roi_mask def concatenate_two_mri_measurements(datasets): """ Concatenate the given datasets (combination of volumes and protocols) For example, as input one can give: .. code-block:: python ((protocol_1, volume_1), (protocol_2, volume_2), ...) And the output is: ``(protocol, volumes)`` where the volumes is for every voxel a concatenation of the given volumes, and the protocol is a concatenation of the given protocols. Args: datasets: a list of datasets, where a dataset is a tuple structured as: (protocol, volume). Returns A single tuple holding the concatenation of the given datasets """ signal_list = [datasets[0][1]] protocol_concat = datasets[0][0].deepcopy() for i in range(1, len(datasets)): signal_list.append(datasets[i][1]) protocol_concat.append_protocol(datasets[i][0]) signal4d_concat = np.concatenate(signal_list, 3) return protocol_concat, signal4d_concat def get_slice_in_dimension(volume, dimension, index): """From the given volume get a slice on the given dimension (x, y, z, ...) and then on the given index. Args: volume (ndarray): the volume, 3d, 4d or more dimension (int): the dimension on which we want a slice index (int): the index of the slice Returns: ndarray: A slice (plane) or hyperplane of the given volume """ ind_pos = [slice(None)] * volume.ndim ind_pos[dimension] = index array_slice = volume[tuple(ind_pos)] return np.squeeze(array_slice) def create_roi(data, brain_mask): """Create and return masked data of the given brain volume and mask Args: data (string, ndarray or dict): a brain volume with four dimensions (x, y, z, w) where w is the length of the protocol, or a list, tuple or dictionary with volumes or a string with a filename of a dataset to use. brain_mask (ndarray or str): the mask indicating the region of interest with dimensions: (x, y, z) or the string to the brain mask to use Returns: ndarray, tuple, dict: If a single ndarray is given we will return the ROI for that array. If an iterable is given we will return a tuple. If a dict is given we return a dict. For each result the axis are: (voxels, protocol) """ from mdt.data_loaders.brain_mask import autodetect_brain_mask_loader brain_mask = autodetect_brain_mask_loader(brain_mask).get_data() def creator(v): return_val = v[brain_mask] if len(return_val.shape) == 1: return_val = np.expand_dims(return_val, axis=1) return return_val if isinstance(data, (dict, collections.MutableMapping)): return DeferredActionDict(lambda _, item: create_roi(item, brain_mask), data, memoize=True) elif isinstance(data, six.string_types): return creator(load_nifti(data).get_data()) elif isinstance(data, (list, tuple, collections.Sequence)): return DeferredActionTuple(lambda _, item: create_roi(item, brain_mask), data, memoize=True) return creator(data) def restore_volumes(data, brain_mask, with_volume_dim=True): """Restore the given data to a whole brain volume The data can be a list, tuple or dictionary or directly a two dimensional list of data points Args: data (ndarray): the data as a x dimensional list of voxels, or, a list, tuple, or dict of those voxel lists brain_mask (ndarray): the brain_mask which was used to generate the data list with_volume_dim (boolean): If true we return values with 4 dimensions. The extra dimension is for the volume index. If false we return 3 dimensions. Returns: Either a single whole volume, a list, tuple or dict of whole volumes, depending on the given data. If with_volume_ind_dim is set we return values with 4 dimensions. (x, y, z, 1). If not set we return only three dimensions. """ from mdt.data_loaders.brain_mask import autodetect_brain_mask_loader brain_mask = autodetect_brain_mask_loader(brain_mask).get_data() shape3d = brain_mask.shape[:3] indices = np.ravel_multi_index(np.nonzero(brain_mask)[:3], shape3d, order='C') def restorer(voxel_list): s = voxel_list.shape def restore_3d(voxels): return_volume = np.zeros((brain_mask.size,), dtype=voxels.dtype, order='C') return_volume[indices] = voxels return np.reshape(return_volume, shape3d) def restore_4d(voxels): return_volume = np.zeros((brain_mask.size, s[1]), dtype=voxels.dtype, order='C') return_volume[indices] = voxels return np.reshape(return_volume, brain_mask.shape + (s[1], )) if len(s) > 1 and s[1] > 1: if with_volume_dim: return restore_4d(voxel_list) else: return restore_3d(voxel_list[:, 0]) else: volume = restore_3d(voxel_list) if with_volume_dim: return np.expand_dims(volume, axis=3) return volume if isinstance(data, collections.MutableMapping): return {key: restorer(value) for key, value in data.items()} elif isinstance(data, list): return [restorer(value) for value in data] elif isinstance(data, tuple): return (restorer(value) for value in data) elif isinstance(data, collections.Sequence): return [restorer(value) for value in data] else: return restorer(data) def spherical_to_cartesian(theta, phi): """Convert polar coordinates in 3d space to cartesian unit coordinates. .. code-block:: python x = cos(phi) * sin(theta) y = sin(phi) * sin(theta) z = cos(theta) Args: theta (ndarray): The 1d vector with theta's phi (ndarray): The 1d vector with phi's Returns: ndarray: Two dimensional array with on the first axis the voxels and on the second the [x, y, z] coordinates. """ theta = np.squeeze(theta) phi = np.squeeze(phi) sin_theta = np.sin(theta) return_val = np.array([np.cos(phi) * sin_theta, np.sin(phi) * sin_theta, np.cos(theta)]).transpose() if len(return_val.shape) == 1: return return_val[np.newaxis, :] return return_val def eigen_vectors_from_tensor(theta, phi, psi): """Calculate the eigenvectors for a Tensor given the three angles. This will return the eigenvectors unsorted, since this function knows nothing about the eigenvalues. The caller of this function will have to sort them by eigenvalue if necessary. Args: theta_roi (ndarray): The list of theta's per voxel in the ROI phi_roi (ndarray): The list of phi's per voxel in the ROI psi_roi (ndarray): The list of psi's per voxel in the ROI Returns: The three eigenvectors per voxel in the ROI. The return matrix is of shape (n, 3, 3) where n is the number of voxels, the first three is the number of directions (three directions) and the last three is the components of each vector, x, y and z. Hence the three by three matrix for one voxel looks like: .. code-block:: python [[evec_1_x, evec_1_y, evec_1_z], [evec_2_x, evec_2_y, evec_2_z], [evec_3_x, evec_3_y, evec_3_z]] The resulting eigenvectors are the same as those from the Tensor. """ return CalculateEigenvectors().convert_theta_phi_psi(theta, phi, psi) def init_user_settings(pass_if_exists=True): """Initializes the user settings folder using a skeleton. This will create all the necessary directories for adding components to MDT. It will also create a basic configuration file for setting global wide MDT options. Also, it will copy the user components from the previous version to this version. Each MDT version will have it's own sub-directory in the config directory. Args: pass_if_exists (boolean): if the folder for this version already exists, we might do nothing (if True) Returns: str: the path the user settings skeleton was written to """ from mdt.configuration import get_config_dir path = get_config_dir() base_path = os.path.dirname(get_config_dir()) if not os.path.exists(base_path): os.makedirs(base_path) @contextmanager def tmp_save_latest_version(): versions_available = list(reversed(sorted(os.listdir(base_path)))) tmp_dir = tempfile.mkdtemp() if versions_available: previous_version = versions_available[0] if os.path.exists(os.path.join(base_path, previous_version, 'components', 'user')): shutil.copytree(os.path.join(base_path, previous_version, 'components', 'user'), tmp_dir + '/components/') if os.path.isfile(os.path.join(base_path, previous_version, 'mdt.conf')): shutil.copy(os.path.join(base_path, previous_version, 'mdt.conf'), tmp_dir + '/mdt.conf') if os.path.isfile(os.path.join(base_path, previous_version, 'mdt.gui.conf')): shutil.copy(os.path.join(base_path, previous_version, 'mdt.gui.conf'), tmp_dir + '/mdt.gui.conf') yield tmp_dir shutil.rmtree(tmp_dir) def init_from_mdt(): cache_path = pkg_resources.resource_filename('mdt', 'data/components') distutils.dir_util.copy_tree(cache_path, os.path.join(path, 'components')) cache_path = pkg_resources.resource_filename('mdt', 'data/mdt.conf') shutil.copy(cache_path, path + '/mdt.default.conf') if not os.path.exists(path + '/components/user/'): os.makedirs(path + '/components/user/') def copy_user_components(tmp_dir): if os.path.exists(tmp_dir + '/components/'): shutil.rmtree(os.path.join(path, 'components', 'user'), ignore_errors=True) shutil.move(tmp_dir + '/components/', os.path.join(path, 'components', 'user')) def make_sure_user_components_exists(): for folder_name in os.listdir(os.path.join(path, 'components/standard/')): if not os.path.exists(path + '/components/user/' + folder_name): os.mkdir(path + '/components/user/' + folder_name) def copy_old_configs(tmp_dir): for config_file in ['mdt.conf', 'mdt.gui.conf']: if os.path.exists(tmp_dir + '/' + config_file): shutil.copy(tmp_dir + '/' + config_file, path + '/' + config_file) with tmp_save_latest_version() as tmp_dir: if pass_if_exists: if os.path.exists(path): return path else: if os.path.exists(path): shutil.rmtree(path) init_from_mdt() copy_user_components(tmp_dir) make_sure_user_components_exists() copy_old_configs(tmp_dir) return path def check_user_components(): """Check if the components in the user's home folder are up to date with this version of MDT Returns: bool: True if the .mdt folder for this version exists. False otherwise. """ return os.path.isdir(get_config_dir()) def setup_logging(disable_existing_loggers=None): """Setup global logging. This uses the loaded config settings to set up the logging. Args: disable_existing_loggers (boolean): If we would like to disable the existing loggers when creating this one. None means use the default from the config, True and False overwrite the config. """ conf = get_logging_configuration_dict() if disable_existing_loggers is not None: conf['disable_existing_loggers'] = True logging_config.dictConfig(conf) def configure_per_model_logging(output_path, overwrite=False): """Set up logging for one specific model. Args: output_path: the output path where the model results are stored. overwrite (boolean): if we want to overwrite or append. If overwrite is True we overwrite the file, if False we append. """ if output_path: output_path = os.path.abspath(os.path.join(output_path, 'info.log')) had_this_output_file = all(h.output_file == output_path for h in ModelOutputLogHandler.__instances__) if overwrite: # close any open files for handler in ModelOutputLogHandler.__instances__: handler.output_file = None if os.path.isfile(output_path): os.remove(output_path) for handler in ModelOutputLogHandler.__instances__: handler.output_file = output_path logger = logging.getLogger(__name__) if not had_this_output_file: if output_path: logger.info('Started appending to the per model log file') else: logger.info('Stopped appending to the per model log file') @contextmanager def per_model_logging_context(output_path, overwrite=False): """A logging context wrapper for the function configure_per_model_logging. Args: output_path: the output path where the model results are stored. overwrite (boolean): if we want to overwrite an existing file (if True), or append to it (if False) """ configure_per_model_logging(output_path, overwrite=overwrite) yield configure_per_model_logging(None) def create_sort_matrix(input_4d_vol, reversed_sort=False): """Create an index matrix that sorts the given input on the 4th volume from small to large values (per voxel). This uses Args: input_4d_vol (ndarray): the 4d input volume for which we create a sort index matrix reversed_sort (boolean): if True we reverse the sort and we sort from large to small. Returns: ndarray: a 4d matrix with on the 4th dimension the indices of the elements in sorted order. """ sort_index = np.argsort(input_4d_vol, axis=3) if reversed_sort: return sort_index[..., ::-1] return sort_index def sort_volumes_per_voxel(input_volumes, sort_matrix): """Sort the given volumes per voxel using the sort index in the given matrix. What this essentially does is to look per voxel from which map we should take the first value. Then we place that value in the first volume and we repeat for the next value and finally for the next voxel. If the length of the 4th dimension is > 1 we shift the 4th dimension to the 5th dimension and sort the array as if the 4th dimension values where a single value. This is useful for sorting (eigen)vector matrices. Args: input_volumes (:class:`list`): list of 4d ndarray sort_matrix (ndarray): 4d ndarray with for every voxel the sort index Returns: :class:`list`: the same input volumes but then with every voxel sorted according to the given sort index. """ if input_volumes[0].shape[3] > 1: volume = np.concatenate([np.reshape(m, m.shape[0:3] + (1,) + (m.shape[3],)) for m in input_volumes], axis=3) grid = np.ogrid[[slice(x) for x in volume.shape]] sorted_volume = volume[list(grid[:-2]) + [np.reshape(sort_matrix, sort_matrix.shape + (1,))] + list(grid[-1])] return [sorted_volume[..., ind, :] for ind in range(len(input_volumes))] else: volume = np.concatenate([m for m in input_volumes], axis=3) sorted_volume = volume[list(np.ogrid[[slice(x) for x in volume.shape]][:-1])+[sort_matrix]] return [np.reshape(sorted_volume[..., ind], sorted_volume.shape[0:3] + (1,)) for ind in range(len(input_volumes))] def load_problem_data(volume_info, protocol, mask, static_maps=None, gradient_deviations=None, noise_std=None): """Load and create the problem data object that can be given to a model Args: volume_info (string or tuple): Either an (ndarray, img_header) tuple or the full path to the volume (4d signal data). protocol (:class:`~mdt.protocols.Protocol` or str): A protocol object with the right protocol for the given data, or a string object with a filename to the given file. mask (ndarray, str): A full path to a mask file or a 3d ndarray containing the mask static_maps (Dict[str, val]): the dictionary with per static map the value to use. The value can either be an 3d or 4d ndarray, a single number or a string. We will convert all to the right format. gradient_deviations (str or ndarray): set of gradient deviations to use. In HCP WUMINN format. Set to None to disable. noise_std (number or ndarray): either None for automatic detection, or a scalar, or an 3d matrix with one value per voxel. Returns: DMRIProblemData: the problem data object containing all the info needed for diffusion MRI model fitting """ protocol = autodetect_protocol_loader(protocol).get_protocol() mask = autodetect_brain_mask_loader(mask).get_data() if isinstance(volume_info, string_types): info = load_nifti(volume_info) signal4d = info.get_data() img_header = info.get_header() else: signal4d, img_header = volume_info if isinstance(gradient_deviations, six.string_types): gradient_deviations = load_nifti(gradient_deviations).get_data() return DMRIProblemData(protocol, signal4d, mask, img_header, static_maps=static_maps, noise_std=noise_std, gradient_deviations=gradient_deviations) def load_brain_mask(brain_mask_fname): """Load the brain mask from the given file. Args: brain_mask_fname (string): The path of the brain mask to use. Returns: ndarray: The loaded brain mask data """ return load_nifti(brain_mask_fname).get_data() > 0 def flatten(input_it): """Flatten an iterator with a new iterator Args: it (iterable): the input iterable to flatten Returns: a new iterable with a flattened version of the original iterable. """ try: it = iter(input_it) except TypeError: yield input_it else: for i in it: for j in flatten(i): yield j def get_cl_devices(): """Get a list of all CL devices in the system. The indices of the devices can be used in the model fitting/sampling functions for 'cl_device_ind'. Returns: A list of CLEnvironments, one for each device in the system. """ return CLEnvironmentFactory.smart_device_selection() def model_output_exists(model, output_folder, append_model_name_to_path=True): """Checks if the output for the given model exists in the given output folder. This will check for a given model if the output folder exists and contains a nifti file for each parameter of the model. When using this to try to skip subjects when batch fitting it might fail if one of the models can not be calculated for a given subject. For example Noddi requires two shells. If that is not given we can not calculate it and hence no maps will be generated. When we are testing if the output exists it will therefore return False. Args: model (AbstractModel, CascadeModel or str): the model to check for existence, accepts cascade models. If a string is given the model is tried to be loaded from the components loader. output_folder (str): the folder where the output folder of the results should reside in append_model_name_to_path (boolean): by default we will append the name of the model to the output folder. This is to be consistent with the way the model fitting routine places the results in the / directories. Sometimes, however you might want to skip this appending. Returns: boolean: true if the output folder exists and contains files for all the parameters of the model. For a cascade model it returns true if the maps of all the models exist. """ if isinstance(model, string_types): model = get_model(model) from mdt.models.cascade import DMRICascadeModelInterface if isinstance(model, DMRICascadeModelInterface): return all(model_output_exists(sub_model, output_folder, append_model_name_to_path) for sub_model in model.get_model_names()) if append_model_name_to_path: output_path = os.path.join(output_folder, model.name) else: output_path = output_folder parameter_names = model.get_optimization_output_param_names() if not os.path.exists(output_path): return False for parameter_name in parameter_names: if not glob.glob(os.path.join(output_path, parameter_name + '*')): return False return True def split_image_path(image_path): """Split the path to an image into three parts, the directory, the basename and the extension. Args: image_path (str): the path to an image Returns: list of str: the path, the basename and the extension """ folder = os.path.dirname(image_path) basename = os.path.basename(image_path) for extension in ['.nii.gz', '.nii']: if basename[-len(extension):] == extension: return folder, basename[0:-len(extension)], extension return folder, basename, '' def calculate_information_criterions(log_likelihoods, k, n): """Calculate various information criterions. Args: log_likelihoods (1d np array): the array with the log likelihoods k (int): number of parameters n (int): the number of instances, protocol length Returns: dict with therein the BIC, AIC and AICc which stand for the Bayesian, Akaike and Akaike corrected Information Criterion """ criteria = { 'BIC': -2 * log_likelihoods + k * np.log(n), 'AIC': -2 * log_likelihoods + k * 2} if n > (k + 1): criteria.update({'AICc': -2 * log_likelihoods + k * 2 + (2 * k * (k + 1))/(n - k - 1)}) return criteria class ComplexNoiseStdEstimator(object): def estimate(self, problem_data, **kwargs): """Get a noise std for the entire volume. Args: problem_data (DMRIProblemData): the problem data for which to find a noise std Returns: float or ndarray: the noise sigma of the Gaussian noise in the original complex image domain Raises: :class:`~mdt.exceptions.NoiseStdEstimationNotPossible`: if we can not estimate the sigma using this estimator """ raise NotImplementedError() def apply_mask(volume, mask, inplace=True): """Apply a mask to the given input. Args: volume (str, ndarray, list, tuple or dict): The input file path or the image itself or a list, tuple or dict. mask (str or ndarray): The filename of the mask or the mask itself inplace (boolean): if True we apply the mask in place on the volume image. If false we do not. Returns: Depending on the input either a singla image of the same size as the input image, or a list, tuple or dict. This will set for all the output images the the values to zero where the mask is zero. """ from six import string_types from mdt.data_loaders.brain_mask import autodetect_brain_mask_loader mask = autodetect_brain_mask_loader(mask).get_data() def apply(volume, mask): if isinstance(volume, string_types): volume = load_nifti(volume).get_data() mask = mask.reshape(mask.shape + (volume.ndim - mask.ndim) * (1,)) if len(mask.shape) < 4: mask = mask.reshape(mask.shape + (1,)) if len(volume.shape) < 4: volume = volume.reshape(volume.shape + (1,)) if inplace: volume *= mask return volume return volume * mask if isinstance(volume, tuple): return (apply(v, mask) for v in volume) elif isinstance(volume, list): return [apply(v, mask) for v in volume] elif isinstance(volume, dict): return {k: apply(v, mask) for k, v in volume.items()} return apply(volume, mask) def apply_mask_to_file(input_fname, mask, output_fname=None): """Apply a mask to the given input (nifti) file. If no output filename is given, the input file is overwritten. Args: input_fname (str): The input file path mask (str or ndarray): The mask to use output_fname (str): The filename for the output file (the masked input file). """ mask = autodetect_brain_mask_loader(mask).get_data() if output_fname is None: output_fname = input_fname write_nifti(apply_mask(input_fname, mask), load_nifti(input_fname).get_header(), output_fname) def load_samples(data_folder, mode='r'): """Load sampled results as a dictionary of numpy memmap. Args: data_folder (str): the folder from which to use the samples mode (str): the mode in which to open the memory mapped sample files (see numpy mode parameter) Returns: dict: the memory loaded samples per sampled parameter. """ data_dict = {} for fname in glob.glob(os.path.join(data_folder, '*.samples.npy')): samples = open_memmap(fname, mode=mode) map_name = os.path.basename(fname)[0:-len('.samples.npy')] data_dict.update({map_name: samples}) return data_dict def estimate_noise_std(problem_data, estimator=None): """Estimate the noise standard deviation. Args: problem_data (DMRIProblemData): the problem data we can use to do the estimation estimator (ComplexNoiseStdEstimator): the estimator to use for the estimation. If not set we use the one in the configuration. Returns: the noise std estimated from the data. This can either be a single float, or an ndarray. Raises: :class:`~mdt.exceptions.NoiseStdEstimationNotPossible`: if the noise could not be estimated """ logger = logging.getLogger(__name__) logger.info('Trying to estimate a noise std.') def estimate(estimation_routine): noise_std = estimator.estimate(problem_data) if isinstance(noise_std, np.ndarray) and not is_scalar(noise_std): logger.info('Found voxel-wise noise std using estimator {}.'.format(estimation_routine)) return noise_std if np.isfinite(noise_std) and noise_std > 0: logger.info('Found global noise std {} using estimator {}.'.format(noise_std, estimation_routine)) return noise_std raise NoiseStdEstimationNotPossible('Could not estimate a noise from this dataset.') if estimator: estimators = [estimator] else: estimators = get_noise_std_estimators() if len(estimators) == 1: return estimate(estimators[0]) else: for estimator in estimators: try: return estimate(estimator) except NoiseStdEstimationNotPossible: pass raise NoiseStdEstimationNotPossible('Estimating the noise was not possible.') class AutoDict(defaultdict): def __init__(self): """Create an auto-vivacious dictionary.""" super(AutoDict, self).__init__(AutoDict) def to_normal_dict(self): """Convert this dictionary to a normal dict (recursive). Returns: dict: a normal dictionary with the items in this dictionary. """ results = {} for key, value in self.items(): if isinstance(value, AutoDict): value = value.to_normal_dict() results.update({key: value}) return results def is_scalar(value): """Test if the given value is a scalar. This function also works with memmapped array values, in contrast to the numpy isscalar method. Args: value: the value to test for being a scalar value Returns: boolean: true if the value is a scalar, false otherwise. """ return mot.utils.is_scalar(value) def roi_index_to_volume_index(roi_indices, brain_mask): """Get the 3d index of a voxel given the linear index in a ROI created with the given brain mask. This is the inverse function of :func:`volume_index_to_roi_index`. This function is useful if you, for example, have sampling results of a specific voxel and you want to locate that voxel in the brain maps. Please note that this function can be memory intensive for a large list of roi_indices Args: roi_indices (int or ndarray): the index in the ROI created by that brain mask brain_mask (str or 3d array): the brain mask you would like to use Returns: ndarray: the 3d voxel location(s) of the indicated voxel(s) """ mask = autodetect_brain_mask_loader(brain_mask).get_data() return np.argwhere(mask)[roi_indices, :] def volume_index_to_roi_index(volume_index, brain_mask): """Get the ROI index given the volume index (in 3d). This is the inverse function of :func:`roi_index_to_volume_index`. This function is useful if you want to locate a voxel in the ROI given the position in the volume. Args: volume_index (tuple): the volume index, a tuple or list of length 3 brain_mask (str or 3d array): the brain mask you would like to use Returns: int: the index of the given voxel in the ROI created by the given mask """ return create_index_matrix(brain_mask)[volume_index] def create_index_matrix(brain_mask): """Get a matrix with on every 3d position the linear index number of that voxel. This function is useful if you want to locate a voxel in the ROI given the position in the volume. Args: brain_mask (str or 3d array): the brain mask you would like to use Returns: 3d ndarray: a 3d volume of the same size as the given mask and with as every non-zero element the position of that voxel in the linear ROI list. """ mask = autodetect_brain_mask_loader(brain_mask).get_data() roi = np.arange(0, np.count_nonzero(mask)) return restore_volumes(roi, mask, with_volume_dim=False) def get_temporary_results_dir(user_value): """Get the temporary results dir from the user value and from the config. Args: user_value (string, boolean or None): if a string is given we will use that directly. If a boolean equal to True is given we will use the configuration defined value. If None/False is given we will not use a specific temporary results dir. Returns: str or None: either the temporary results dir or None """ if isinstance(user_value, string_types): return user_value if user_value is True: return get_tmp_results_dir() return None def create_blank_mask(volume4d_path, output_fname): """Create a blank mask for the given 4d volume. Sometimes you want to use all the voxels in the given dataset, without masking any voxel. Since the optimization routines require a mask, you have to submit one. The solution is to use a blank mask, that is, a mask that masks nothing. Args: volume4d_path (str): the path to the 4d volume you want to create a blank mask for output_fname (str): the path to the result mask """ volume_info = load_nifti(volume4d_path) mask = np.ones(volume_info.shape[:3]) write_nifti(mask, volume_info.get_header(), output_fname) def volume_merge(volume_paths, output_fname, sort=False): """Merge a list of volumes on the 4th dimension. Writes the result as a file. You can enable sorting the list of volume names based on a natural key sort. This is the most convenient option in the case of globbing files. By default this behaviour is disabled. Example usage with globbing: .. code-block:: python mdt.volume_merge(glob.glob('*.nii'), 'merged.nii.gz', True) Args: volume_paths (list of str): the list with the input filenames output_fname (str): the output filename sort (boolean): if true we natural sort the list of DWI images before we merge them. If false we don't. The default is True. Returns: list of str: the list with the filenames in the order of concatenation. """ images = [] header = None if sort: def natural_key(_str): return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', _str)] volume_paths.sort(key=natural_key) for volume in volume_paths: nib_container = load_nifti(volume) header = header or nib_container.get_header() image_data = nib_container.get_data() if len(image_data.shape) < 4: image_data = np.expand_dims(image_data, axis=3) images.append(image_data) combined_image = np.concatenate(images, axis=3) write_nifti(combined_image, header, output_fname) return volume_paths def concatenate_mri_sets(items, output_volume_fname, output_protocol_fname, overwrite_if_exists=False): """Concatenate two or more DMRI datasets. Normally used to concatenate different DWI shells into one image. This writes a single volume and a single protocol file. Args: items (tuple of dict): A tuple of dicts with volume filenames and protocol filenames: .. code-block:: python ( {'volume': volume_fname, 'protocol': protocol_filename }, ... ) output_volume_fname (string): The name of the output volume output_protocol_fname (string): The name of the output protocol file overwrite_if_exists (boolean, optional, default false): Overwrite the output files if they already exists. """ if not items: return if os.path.exists(output_volume_fname) and os.path.exists(output_protocol_fname) and not overwrite_if_exists: return to_concat = [] nii_header = None for e in items: signal_img = load_nifti(e['volume']) signal4d = signal_img.get_data() nii_header = signal_img.get_header() protocol = load_protocol(e['protocol']) to_concat.append((protocol, signal4d)) protocol, signal4d = concatenate_two_mri_measurements(to_concat) write_nifti(signal4d, nii_header, output_volume_fname) write_protocol(protocol, output_protocol_fname) def create_median_otsu_brain_mask(dwi_info, protocol, output_fname=None, **kwargs): """Create a brain mask and optionally write it. It will always return the mask. If output_fname is set it will also write the mask. Args: dwi_info (string or tuple or image): the dwi info, either: - the filename of the input file; - or a tuple with as first index a ndarray with the DWI and as second index the header; - or only the image as an ndarray protocol (string or :class:`~mdt.protocols.Protocol`): The filename of the protocol file or a Protocol object output_fname (string): the filename of the output file. If None, no output is written. If dwi_info is only an image also no file is written. **kwargs: the additional arguments for the function median_otsu. Returns: ndarray: The created brain mask """ from mdt.masking import create_median_otsu_brain_mask, create_write_median_otsu_brain_mask if output_fname: if not isinstance(dwi_info, (string_types, tuple, list)): raise ValueError('No header obtainable, can not write the brain mask.') return create_write_median_otsu_brain_mask(dwi_info, protocol, output_fname, **kwargs) return create_median_otsu_brain_mask(dwi_info, protocol, **kwargs) def extract_volumes(input_volume_fname, input_protocol, output_volume_fname, output_protocol, volume_indices): """Extract volumes from the given volume and save them to separate files. This will index the given input volume in the 4th dimension, as is usual in multi shell DWI files. Args: input_volume_fname (str): the input volume from which to get the specific volumes input_protocol (str or :class:`~mdt.protocols.Protocol`): the input protocol, either a file or preloaded protocol object output_volume_fname (str): the output filename for the selected volumes output_protocol (str): the output protocol for the selected volumes volume_indices (:class:`list`): the desired indices, indexing the input_volume """ input_protocol = autodetect_protocol_loader(input_protocol).get_protocol() new_protocol = input_protocol.get_new_protocol_with_indices(volume_indices) write_protocol(new_protocol, output_protocol) input_volume = load_nifti(input_volume_fname) image_data = input_volume.get_data()[..., volume_indices] write_nifti(image_data, input_volume.get_header(), output_volume_fname) def recalculate_error_measures(model, problem_data, data_dir, sigma, output_dir=None, sigma_param_name=None, evaluation_model=None): """Recalculate the information criterion maps. This will write the results either to the original data directory, or to the given output dir. Args: model (str or AbstractModel): An implementation of an AbstractModel that contains the model we want to optimize or the name of an model we use with get_model() problem_data (DMRIProblemData): the problem data object data_dir (str): the directory containing the results for the given model sigma (float): the new noise sigma we use for calculating the log likelihood and then the information criteria's. output_dir (str): if given, we write the output to this directory instead of the data dir. sigma_param_name (str): the name of the parameter to which we will set sigma. If not given we search the result maps for something ending in .sigma evaluation_model: the evaluation model, we will manually fix the sigma in this function """ from mdt.models.cascade import DMRICascadeModelInterface logger = logging.getLogger(__name__) if isinstance(model, string_types): model = get_model(model) if isinstance(model, DMRICascadeModelInterface): raise ValueError('This function does not accept cascade models.') model.set_problem_data(problem_data) results_maps = create_roi(get_all_image_data(data_dir), problem_data.mask) if sigma_param_name is None: sigma_params = list(filter(lambda key: '.sigma' in key, model.get_optimization_output_param_names())) if not sigma_params: raise ValueError('Could not find a suitable parameter to set sigma for.') sigma_param_name = sigma_params[0] logger.info('Setting the given sigma value to the model parameter {}.'.format(sigma_param_name)) model.fix(sigma_param_name, sigma) evaluation_model = evaluation_model or OffsetGaussianEvaluationModel() evaluation_model.set_noise_level_std(sigma) log_likelihood_calc = LogLikelihoodCalculator() log_likelihoods = log_likelihood_calc.calculate(model, results_maps, evaluation_model=evaluation_model) k = model.get_nmr_estimable_parameters() n = problem_data.get_nmr_inst_per_problem() results_maps.update({'LogLikelihood': log_likelihoods}) results_maps.update(calculate_information_criterions(log_likelihoods, k, n)) volumes = restore_volumes(results_maps, problem_data.mask) output_dir = output_dir or data_dir write_all_as_nifti(volumes, output_dir, problem_data.volume_header) def create_signal_estimates(volume_maps, problem_data, model, output_fname): """Estimate and write the signals of a given model on the given data. Args: volume_maps (str or dict): either a directory file name or a dictionary containing the results problem_data (DMRIProblemData): the problem data object, we will set this to the model model (str or model): the model or the name of the model to use for estimating the signals output_fname (str): the file name of the file to write the signal estimates to (.nii or .nii.gz) """ if isinstance(model, string_types): model = get_model(model) if isinstance(volume_maps, string_types): volume_maps = get_all_image_data(volume_maps) model.set_problem_data(problem_data) calculator = CalculateModelEstimates() results = calculator.calculate(model, create_roi(volume_maps, problem_data.mask)) signal_estimates = restore_volumes(results, problem_data.mask) write_nifti(signal_estimates, problem_data.volume_header, output_fname) PKq}Iy8\\mdt/protocols.pyimport collections import glob import numbers import os import numpy as np import copy import six from mdt.exceptions import ProtocolIOError __author__ = 'Robbert Harms' __date__ = "2014-02-06" __license__ = "LGPL v3" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class Protocol(collections.MutableMapping): def __init__(self, columns=None): """Create a new protocol. Optionally initializes the protocol with the given set of columns. Please note that we use SI units throughout MDT. Take care when loading the data that you load it in SI units. For example: * G (gradient amplitude) in T/m (Tesla per meter) * Delta (time interval) in seconds * delta (duration) in seconds Args: columns (dict): The initial list of columns used by this protocol, the keys should be the name of the parameter (the same as those used in the model functions). The values should be numpy arrays of equal length. """ super(Protocol, self).__init__() self._gamma_h = 267.5987E6 # radians s^-1 T^-1 (s = seconds, T = Tesla) self._unweighted_threshold = 25e6 # s/m^2 self._columns = {} self._preferred_column_order = ('gx', 'gy', 'gz', 'G', 'Delta', 'delta', 'TE', 'T1', 'b', 'q', 'maxG') self._virtual_columns = [VirtualColumnB(), SimpleVirtualColumn('Delta', lambda protocol: get_sequence_timings(protocol)['Delta']), SimpleVirtualColumn('delta', lambda protocol: get_sequence_timings(protocol)['delta']), SimpleVirtualColumn('G', lambda protocol: get_sequence_timings(protocol)['G'])] if columns: self._columns = columns for k, v in columns.items(): s = v.shape if len(s) > 2 or (len(s) == 2 and s[1] > 1): raise ValueError("All columns should be of width one.") if len(s) ==2 and s[1] == 1: self._columns[k] = np.squeeze(v) @property def gamma_h(self): """Get the used gamma of the ``H`` atom used by this protocol. Returns: float: The used gamma of the ``H`` atom used by this protocol. """ return self._gamma_h def update_column(self, name, data): """Updates the given column with new information. This actually calls add_column(name, data) in the background. This function is included to allow for clearer function calls. Args: name (str): The name of the column to add data (ndarray): The vector to add to this protocol. Returns: self: for chaining """ return self.add_column(name, data) def add_column(self, name, data): """Add a column to this protocol. This overrides the column if present. Args: name (str): The name of the column to add data (ndarray): The vector to add to this protocol. Returns: self: for chaining """ if isinstance(data, six.string_types): data = float(data) if isinstance(data, numbers.Number) or not data.shape: if self._columns: data = np.ones((self.length,)) * data else: data = np.ones((1,)) * data s = data.shape if self.length and s[0] > self.length: self._logger.info("The column '{}' has to many elements ({}), we will only use the first {}.".format( name, s[0], self.length)) self._columns.update({name: data[:self.length]}) elif self.length and s[0] < self.length: raise ValueError("Incorrect column length given for '{}', expected {} and got {}.".format( name, self.length, s[0])) else: if name == 'g' and len(data.shape) > 1 and data.shape[1] == 3: self._columns.update({'gx': data[:, 0], 'gy': data[:, 1], 'gz': data[:, 2]}) else: self._columns.update({name: data}) return self def add_column_from_file(self, name, file_name, multiplication_factor=1): """Add a column to this protocol, loaded from the given file. The given file can either contain a single value (which is broadcasted), or one value per protocol line. Args: name (str): The name of the column to add file_name (str): The file to get the column from. multiplication_factor (double): we might need to scale the data by a constant. For example, if the data in the file is in ms we might need to scale it to seconds by multiplying with 1e-3 Returns: self: for chaining """ if name == 'g': self._columns.update(get_g_columns(file_name)) for column_name in ('gx', 'gy', 'gz'): self._columns[column_name] *= multiplication_factor return self data = np.genfromtxt(file_name) data *= multiplication_factor self.add_column(name, data) return self def remove_column(self, column_name): """Completely remove a column from this protocol. Args: column_name (str): The name of the column to remove """ if column_name == 'g': del self._columns['gx'] del self._columns['gy'] del self._columns['gz'] else: if column_name in self._columns: del self._columns[column_name] def remove_rows(self, rows): """Remove a list of rows from all the columns. Please note that the protocol is 0 indexed. Args: rows (list of int): List with indices of the rows to remove """ for key, column in self._columns.items(): self._columns[key] = np.delete(column, rows) def get_columns(self, column_names): """Get a matrix containing the requested column names in the order given. Returns: ndarrray: A 2d matrix with the column requested concatenated. """ if not column_names: return None return np.concatenate([self[i] for i in column_names], axis=1) def get_column(self, column_name): """Get the column associated by the given column name. Args: column_name (str): The name of the column we want to return. Returns: ndarray: The column we would like to return. This is returned as a 2d matrix with shape (n, 1). Raises: KeyError: If the column could not be found. """ try: return self._get_real_column(column_name) except KeyError: try: return self._get_estimated_column(column_name) except KeyError: raise KeyError('The given column name "{}" could not be found in this protocol.'.format(column_name)) @property def length(self): """Get the length of this protocol. Returns: int: The length of the protocol. """ if self._columns: return self._columns[list(self._columns.keys())[0]].shape[0] return 0 @property def number_of_columns(self): """Get the number of columns in this protocol. This only counts the real columns, not the estimated ones. Returns: int: The number columns in this protocol. """ return len(self._columns) @property def column_names(self): """Get the names of the columns. This only lists the real columns, not the estimated ones. Returns: list of str: The names of the columns. """ return self._column_names_in_preferred_order(self._columns.keys()) @property def estimated_column_names(self): """Get the names of the virtual columns. This will only return the names of the virtual columns for which no real column exists. """ return self._column_names_in_preferred_order([e.name for e in self._virtual_columns if e.name not in self.column_names]) def get_nmr_shells(self): """Get the number of unique shells in this protocol. This is measured by counting the number of unique weighted bvals in this protocol. Returns: int: The number of unique weighted b-values in this protocol Raises: KeyError: This function may throw a key error if the 'b' column in the protocol could not be loaded. """ return len(self.get_b_values_shells()) def get_b_values_shells(self): """Get the b-values of the unique shells in this protocol. Returns: :class:`list`: a list with the unique weighted bvals in this protocol. Raises: KeyError: This function may throw a key error if the 'b' column in the protocol could not be loaded. """ return np.unique(self.get_column('b')[self.get_weighted_indices()]).tolist() def count_occurences(self, column, value): """Count the occurences of the given value in the given column. This can for example be used to count the occurences of a single b-value in the protocol. Args: column (str): the name of the column value (float): the value to count for occurences """ return sum(1 for v in self[column] if v == value) def has_column(self, column_name): """Check if this protocol has a column with the given name. This will also return true if the column can be estimated from the other columns. See is_column_real() to get information for columns that are really known. Returns: boolean: true if there is a column with the given name, false otherwise. """ try: return self.get_column(column_name) is not None except KeyError: return False def is_column_real(self, column_name): """Check if this protocol has real column information for the column with the given name. For example, the other function has_column('G') will normally return true since 'G' can be estimated from 'b'. This function will return false if the column needs to be estimated and will return true if real data is available for the columnn. Returns: boolean: true if there is really a column with the given name, false otherwise. """ return column_name in self._columns def get_unweighted_indices(self, unweighted_threshold=None): """Get the indices to the unweighted volumes. If the column 'b' could not be found, assume that all measurements are unweighted. Args: unweighted_threshold (float): the threshold under which we call it unweighted. Returns: list of int: A list of indices to the unweighted volumes. """ unweighted_threshold = unweighted_threshold or self._unweighted_threshold try: b = self.get_column('b') g = self.get_column('g') g_limit = np.sqrt(g[:, 0]**2 + g[:, 1]**2 + g[:, 2]**2) < 0.99 b_limit = b[:, 0] < unweighted_threshold return np.where(g_limit + b_limit)[0] except KeyError: return range(self.length) def get_weighted_indices(self, unweighted_threshold=None): """Get the indices to the weighted volumes. Args: unweighted_threshold (float): the threshold under which we call it unweighted. Returns: list of int: A list of indices to the weighted volumes. """ return sorted(set(range(self.get_column('b').shape[0])) - set(self.get_unweighted_indices(unweighted_threshold=unweighted_threshold))) def get_indices_bval_in_range(self, start=0, end=1.0e9): """Get the indices of the b-values in the range [start, end]. This can be used to get the indices of gradients whose b-value is in the range suitable for a specific analysis. Note that we use SI units and you need to specify the values in units of s/m^2 and not in s/mm^2. Also note that specifying 0 as start of the range does not automatically mean that the unweighted volumes are returned. It can happen that the b-value of the unweighted volumes is higher then 0 even if the the gradient ``g`` is ``[0 0 0]``. This function does not make any assumptions about that and just returns indices in the given range. If you want to include the unweighted volumes, make a call to :meth:`get_unweighted_indices` yourself. Args: start (float): b-value of the start of the range (inclusive) we want to get the indices of the volumes from. Should be positive. We subtract epsilon for float comparison end (float): b-value of the end of the range (inclusive) we want to get the indices of the volumes from. Should be positive. We add epsilon for float comparison epsilon (float): the epsilon we use in the range. Returns: :class:`list`: a list of indices of all volumes whose b-value is in the given range. If you want to include the unweighted volumes, make a call to get_unweighted_indices() yourself. """ b_values = self.get_column('b') return np.where((start <= b_values) * (b_values <= end))[0] def get_all_columns(self): """Get all real (known) columns as a big array. Returns: ndarray: All the real columns of this protocol. """ return self.get_columns(self.column_names) def deepcopy(self): """Return a deep copy of this protocol. Returns: Protocol: A deep copy of this protocol. """ return Protocol(columns=copy.deepcopy(self._columns)) def append_protocol(self, protocol): """Append another protocol to this protocol. This will add the columns of the other protocol to the columns of this protocol. This supposes both protocols have the same columns. """ if type(protocol) is type(self): for key, value in self._columns.items(): self._columns[key] = np.append(self[key], protocol[key], 0) def get_new_protocol_with_indices(self, indices): """Create a new protocol object with all the columns but as rows only those of the given indices. Args: indices: the indices we want to use in the new protocol Returns: Protocol: a protocol with all the data of the given indices """ return Protocol(columns={k: v[indices] for k, v in self._columns.items()}) def _get_real_column(self, column_name): """Try to use a real column from this protocol. Returns: ndarray: A real column, that is, a column from which we have real data. Raises: KeyError: If the column name could not be found we raise a key error. """ if column_name in self._columns: return np.reshape(self._columns[column_name], (-1, 1)) if column_name == 'g': return self.get_columns(('gx', 'gy', 'gz')) raise KeyError('The given column could not be found.') def _get_estimated_column(self, column_name): """Try to use an estimated column from this protocol. This uses the list of virtual columns to try to estimate the requested column. Returns: ndarray: An estimated column, that is, a column we estimate from the other columns. Raises: KeyError: If the column name could not be estimated we raise a key error. """ for virtual_column in self._virtual_columns: if virtual_column.name == column_name: return np.reshape(virtual_column.get_values(self), (-1, 1)) raise KeyError('The given column name "{}" could not be found in this protocol.'.format(column_name)) def _column_names_in_preferred_order(self, column_names): """Sort the given column names in the preferred order. Column names not in the list of preferred ordering are appended to the end of the list. Args: column_names (list): the list of column names Returns: list: the list of column names in the preferred order """ columns_list = [n for n in column_names] final_list = [] for column_name in self._preferred_column_order: if column_name in columns_list: columns_list.remove(column_name) final_list.append(column_name) final_list.extend(columns_list) return final_list def __len__(self): return self.length def __contains__(self, column): return self.has_column(column) def __getitem__(self, column): return self.get_column(column) def __delitem__(self, key): return self.remove_column(key) def __iter__(self): for key in self._columns.keys(): yield key def __setitem__(self, key, value): return self.add_column(key, value) def __str__(self): s = 'Column names: ' + ', '.join(self.column_names) + "\n" s += 'Data: ' + "\n" s += np.array_str(self.get_all_columns()) return s class VirtualColumn(object): def __init__(self, name): """The interface for generating virtual columns. Virtual columns are columns generated on the fly from the other parts of the protocol. They are generally only generated if the column it tries to generate is not in the protocol. In the Protocol they are used separately from the RealColumns. The VirtualColumns can always be added to the Protocol, but are only used when needed. The RealColumns can overrule VirtualColumns by their presence. Args: name (str): the name of the column this object generates. """ self.name = name def get_values(self, parent_protocol): """Get the column given the information in the given protocol. Args: parent_protocol (Protocol): the protocol object to use as a basis for generating the column Returns: ndarray: the single column as a row vector or 2d matrix of shape nx1 """ class SimpleVirtualColumn(VirtualColumn): def __init__(self, name, generate_function): """Create a simple virtual column that uses the given generate function to get the column. Args: name (str): the name of the column generate_function (python function): the function to generate the column """ super(SimpleVirtualColumn, self).__init__(name) self._generate_function = generate_function def get_values(self, parent_protocol): return self._generate_function(parent_protocol) class VirtualColumnB(VirtualColumn): def __init__(self): super(VirtualColumnB, self).__init__('b') def get_values(self, parent_protocol): sequence_timings = get_sequence_timings(parent_protocol) return np.reshape(np.array(parent_protocol.gamma_h ** 2 * sequence_timings['G'] ** 2 * sequence_timings['delta'] ** 2 * (sequence_timings['Delta'] - (sequence_timings['delta'] / 3))), (-1, 1)) def get_sequence_timings(protocol): """Return G, Delta and delta, estimate them if necessary. If Delta and delta are available, they are used instead of estimated Delta and delta. Args: protocol (Protocol): the protocol for which we want to get the sequence timings. Returns: dict: the columns G, Delta and delta """ def all_real(columns): return all(map(protocol.is_column_real, columns)) if all_real(['G', 'delta', 'Delta']): return {name: protocol[name] for name in ['G', 'delta', 'Delta']} if all_real(['b', 'Delta', 'delta']): G = np.sqrt(protocol['b'] / (protocol.gamma_h ** 2 * protocol['delta'] ** 2 * (protocol['Delta'] - (protocol['delta'] / 3.0)))) G[protocol.get_unweighted_indices()] = 0 return {'G': G, 'Delta': protocol['Delta'], 'delta': protocol['delta']} if all_real(['b', 'Delta', 'G']): input_array = np.zeros((protocol.length, 4)) input_array[:, 0] = -1 / 3.0 input_array[:, 1] = np.squeeze(protocol['Delta']) input_array[:, 2] = 0 input_array[:, 3] = np.squeeze(-protocol['b'] / (protocol.gamma_h ** 2 * protocol['G'] ** 2)) b = protocol['b'] delta = np.zeros((protocol.length, 1)) for ind in range(protocol.length): if b[ind] == 0: delta[ind] = 0 else: roots = np.roots(input_array[ind]) delta[ind] = roots[0] return {'G': protocol['G'], 'Delta': protocol['Delta'], 'delta': delta} if all_real(['b', 'G', 'delta']): Delta = np.nan_to_num(np.array((protocol['b'] - protocol.gamma_h ** 2 * protocol['G'] ** 2 * protocol['delta'] ** 3 / 3.0) / (protocol.gamma_h ** 2 * protocol['G'] ** 2 * protocol['delta'] ** 2))) return {'G': protocol['G'], 'delta': protocol['delta'], 'Delta': Delta} if not protocol.is_column_real('b'): raise KeyError('Can not estimate the sequence timings, column "b" is not provided.') if protocol.has_column('maxG'): maxG = protocol['maxG'] else: maxG = np.reshape(np.ones((protocol.length,)) * 0.04, (-1, 1)) bvals = protocol['b'] if protocol.get_b_values_shells(): bmax = max(protocol.get_b_values_shells()) else: bmax = 1 Deltas = (3 * bmax / (2 * protocol.gamma_h ** 2 * maxG ** 2)) ** (1 / 3.0) deltas = Deltas G = np.sqrt(bvals / bmax) * maxG return {'G': G, 'Delta': Deltas, 'delta': deltas} def load_bvec_bval(bvec, bval, column_based='auto', bval_scale='auto'): """Load an protocol from a bvec and bval file. This supposes that the bvec (the vector file) has 3 rows (gx, gy, gz) and is space or tab seperated. The bval file (the b values) are one one single line with space or tab separated b values. Args: bvec (str): The filename of the bvec file bval (str): The filename of the bval file column_based (boolean): If true, this supposes that the bvec (the vector file) has 3 rows (gx, gy, gz) and is space or tab seperated and that the bval file (the b values) are one one single line with space or tab separated b values. If false, the vectors and b values are each one a different line. If 'auto' it is autodetected, this is the default. bval_scale (float): The amount by which we want to scale (multiply) the b-values. The default is auto, this checks if the b-val is lower then 1e4 and if so multiplies it by 1e6. (sets bval_scale to 1e6 and multiplies), else multiplies by 1. Returns: Protocol the loaded protocol. """ bvec = get_g_columns(bvec, column_based=column_based) bval = np.expand_dims(np.genfromtxt(bval), axis=1) if bval_scale == 'auto' and bval[0, 0] < 1e4: bval *= 1e6 else: bval *= bval_scale columns = {'b': bval} columns.update(bvec) if bvec['gx'].shape[0] != bval.shape[0]: raise ValueError('Columns not of same length.') return Protocol(columns=columns) def get_g_columns(bvec_file, column_based='auto'): """Get the columns of a bvec file. Use auto transpose if needed. Args: bvec_file (str): The filename of the bvec file column_based (boolean): If true, this supposes that the bvec (the vector file) has 3 rows (gx, gy, gz) and is space or tab seperated If false, the vectors are each one a different line. If 'auto' it is autodetected, this is the default. Returns: dict: the loaded bvec matrix separated into 'gx', 'gy' and 'gz' """ bvec = np.genfromtxt(bvec_file) if len(bvec.shape) < 2: raise ValueError('Bvec file does not have enough dimensions.') if column_based == 'auto': if bvec.shape[1] > bvec.shape[0]: bvec = bvec.transpose() elif column_based: bvec = bvec.transpose() return {'gx': np.reshape(bvec[:, 0], (-1, 1)), 'gy': np.reshape(bvec[:, 1], (-1, 1)), 'gz': np.reshape(bvec[:, 2], (-1, 1))} def write_bvec_bval(protocol, bvec_fname, bval_fname, column_based=True, bval_scale=1): """Write the given protocol to bvec and bval files. This writes the bvector and bvalues to the given filenames. Args: protocol (Protocol): The protocol to write to bvec and bval files. bvec_fname (string): The bvector filename bval_fname (string): The bval filename column_based (boolean, optional, default true): If true, this supposes that the bvec (the vector file) will have 3 rows (gx, gy, gz) and will be space or tab seperated and that the bval file (the b values) are one one single line with space or tab separated b values. bval_scale (double or str): the amount by which we want to scale (multiply) the b-values. The default is auto, this checks if the first b-value is higher than 1e4 and if so multiplies it by 1e-6 (sets bval_scale to 1e-6 and multiplies), else multiplies by 1. """ b = protocol['b'].copy() g = protocol['g'].copy() if bval_scale == 'auto': if b[0] > 1e4: b *= 1e-6 else: b *= bval_scale if column_based: b = b.transpose() g = g.transpose() for d in (bvec_fname, bval_fname): if not os.path.isdir(os.path.dirname(d)): os.makedirs(os.path.dirname(d)) np.savetxt(bvec_fname, g) np.savetxt(bval_fname, b) def load_protocol(protocol_fname): """Load an protocol from the given protocol file, with as column names the given list of names. Args: protocol_fname (string): The filename of the protocol file to use. This should be a comma separated or tab delimited file with equal length columns. Returns: An protocol with all the columns loaded. """ with open(protocol_fname) as f: protocol = f.readlines() if protocol[0][0] != '#': raise ProtocolIOError('No column names defined in protocol.') column_names = [c.strip() for c in protocol[0][1:-1].split(',')] data = np.genfromtxt(protocol_fname) s = data.shape d = {} if len(s) == 1: d.update({column_names[0]: data}) else: for i in range(s[1]): d.update({column_names[i]: data[:, i]}) return Protocol(columns=d) def write_protocol(protocol, fname, columns_list=None): """Write the given protocol to a file. Args: protocol (Protocol): The protocol to write to file fname (string): The filename to write to columns_list (tuple): The tuple with the columns names to write (and in that order). If None, all the columns are written to file. Returns: tuple: the parameters that where written (and in that order) """ if not columns_list: columns_list = protocol.column_names if 'G' in columns_list and 'Delta' in columns_list and 'delta' in columns_list: if 'b' in columns_list: columns_list.remove('b') if 'maxG' in columns_list: columns_list.remove('maxG') data = protocol.get_columns(columns_list) if not os.path.isdir(os.path.dirname(fname)): os.makedirs(os.path.dirname(fname)) with open(fname, 'w') as f: f.write('#') f.write(','.join(columns_list)) f.write("\n") with open(fname, 'ab') as f: np.savetxt(f, data, delimiter="\t") if columns_list: return columns_list return protocol.column_names def auto_load_protocol(directory, protocol_columns=None, bvec_fname=None, bval_fname=None, bval_scale='auto'): """Load a protocol from the given directory. This function will only auto-search files in the top directory and not in the sub-directories. This will first try to use the first .prtcl file found. If none present, it will try to find bval and bvec files to use and then try to find the protocol options. The protocol_options should be a dictionary mapping protocol items to filenames. If given, we only use the items in that dictionary. If not given we try to autodetect the protocol option files from the given directory. The search order is (continue until matched): 1) anything ending in .prtcl 2) a) the given bvec and bval file b) anything containing bval or b-val c) anything containing bvec or b-vec i) This will prefer a bvec file that also has 'fsl' in the name. This to be able to auto use HCP MGH bvec directions. d) protocol options i) using dict ii) matching filenames exactly to the available protocol options. (e.g, finding a file named TE for the TE's) The available protocol options are: - TE: the TE in seconds, either a file or, one value or one value per bvec - TR: the TR in seconds, either a file or, either one value or one value per bvec - Delta: the big Delta in seconds, either a file or, either one value or one value per bvec - delta: the small delta in seconds, either a file or, either one value or one value per bvec - maxG: the maximum gradient amplitude G in T/m. Used in estimating G, Delta and delta if not given. Args: directory (str): the directory to use the protocol from protocol_columns (dict): mapping protocol items to filenames (as a subpath of the given directory) or mapping them to values (one value or one value per bvec line) bvec_fname (str): if given, the filename of the bvec file (as a subpath of the given directory) bval_fname (str): if given, the filename of the bvec file (as a subpath of the given directory) bval_scale (double): The scale by which to scale the values in the bval file. If we use from bvec and bval we will use this scale. If 'auto' we try to guess the units/scale. Returns: Protocol: a loaded protocol file. Raises: ValueError: if not enough information could be found. (No protocol or no bvec/bval combo). """ protocol_files = list(glob.glob(os.path.join(directory, '*.prtcl'))) if protocol_files: return load_protocol(protocol_files[0]) if not bval_fname: bval_files = list(glob.glob(os.path.join(directory, '*bval*'))) if not bval_files: bval_files = glob.glob(os.path.join(directory, '*b-val*')) if not bval_files: raise ValueError('Could not find a suitable bval file') bval_fname = bval_files[0] if not bvec_fname: bvec_files = list(glob.glob(os.path.join(directory, '*bvec*'))) if not bvec_files: bvec_files = glob.glob(os.path.join(directory, '*b-vec*')) if not bvec_files: raise ValueError('Could not find a suitable bvec file') for bvec_file in bvec_files: if 'fsl' in os.path.basename(bvec_file): bvec_fname = bvec_files[0] if not bvec_fname: bvec_fname = bvec_files[0] protocol = load_bvec_bval(bvec_fname, bval_fname, bval_scale=bval_scale) protocol_extra_cols = ['TE', 'TR', 'Delta', 'delta', 'maxG'] if protocol_columns: for col in protocol_extra_cols: if col in protocol_columns: if isinstance(protocol_columns[col], six.string_types): protocol.add_column_from_file(col, os.path.join(directory, protocol_columns[col])) else: protocol.add_column(col, protocol_columns[col]) else: for col in protocol_extra_cols: if os.path.isfile(os.path.join(directory, col)): protocol.add_column_from_file(col, os.path.join(directory, col)) return protocol PK\|I;ppmdt/__init__.pyimport collections import glob import logging import logging.config as logging_config import os from inspect import stack import numpy as np import six from six import string_types from .__version__ import VERSION, VERSION_STATUS, __version__ from mdt.configuration import get_logging_configuration_dict try: logging_config.dictConfig(get_logging_configuration_dict()) except ValueError: print('Logging disabled') from mdt.user_script_info import easy_save_user_script_info from mdt.utils import estimate_noise_std, get_cl_devices, load_problem_data, create_blank_mask, create_index_matrix, \ volume_index_to_roi_index, roi_index_to_volume_index, load_brain_mask, init_user_settings, restore_volumes, \ apply_mask, create_roi, volume_merge, concatenate_mri_sets, create_median_otsu_brain_mask, load_samples, \ load_nifti, write_slice_roi, split_write_dataset, apply_mask_to_file, extract_volumes, recalculate_error_measures, \ create_signal_estimates, get_slice_in_dimension, per_model_logging_context, get_temporary_results_dir from mdt.batch_utils import collect_batch_fit_output, run_function_on_batch_fit_output from mdt.protocols import load_bvec_bval, load_protocol, auto_load_protocol, write_protocol, write_bvec_bval from mdt.components_loader import load_component, get_model from mdt.configuration import config_context, get_processing_strategy from mdt.exceptions import InsufficientProtocolError __author__ = 'Robbert Harms' __date__ = "2015-03-10" __license__ = "LGPL v3" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" def fit_model(model, problem_data, output_folder, optimizer=None, recalculate=False, only_recalculate_last=False, cascade_subdir=False, cl_device_ind=None, double_precision=False, tmp_results_dir=True, save_user_script_info=True): """Run the optimizer on the given model. Args: model (str or :class:`~mdt.models.composite.DMRICompositeModel` or :class:`~mdt.models.cascade.DMRICascadeModelInterface`): An implementation of an AbstractModel that contains the model we want to optimize or the name of an model. problem_data (:class:`~mdt.utils.DMRIProblemData`): the problem data object containing all the info needed for diffusion MRI model fitting output_folder (string): The path to the folder where to place the output, we will make a subdir with the model name in it. optimizer (:class:`mot.cl_routines.optimizing.base.AbstractOptimizer`): The optimization routine to use. recalculate (boolean): If we want to recalculate the results if they are already present. only_recalculate_last (boolean): This is only of importance when dealing with CascadeModels. If set to true we only recalculate the last element in the chain (if recalculate is set to True, that is). If set to false, we recalculate everything. This only holds for the first level of the cascade. cascade_subdir (boolean): if we want to create a subdirectory for the given model if it is a cascade model. Per default we output the maps of cascaded results in the same directory, this allows reusing cascaded results for other cascades (for example, if you cascade BallStick -> Noddi you can use the BallStick results also for BallStick -> Charmed). This flag disables that behaviour and instead outputs the results of a cascade model to a subdirectory for that cascade. This does not apply recursive. cl_device_ind (int or list): the index of the CL device to use. The index is from the list from the function utils.get_cl_devices(). This can also be a list of device indices. double_precision (boolean): if we would like to do the calculations in double precision tmp_results_dir (str, True or None): The temporary dir for the calculations. Set to a string to use that path directly, set to True to use the config value, set to None to disable. save_user_script_info (boolean, str or SaveUserScriptInfo): The info we need to save about the script the user is currently executing. If True (default) we use the stack to lookup the script the user is executing and save that using a SaveFromScript saver. If a string is given we use that filename again for the SaveFromScript saver. If False or None, we do not write any information. If a SaveUserScriptInfo is given we use that directly. Returns: dict: The result maps for the (final) optimized model. This returns the results as 2d arrays with on the first dimension the optimized voxels and on the second the value(s) for the micro-structure maps. """ import mdt.utils from mdt.model_fitting import ModelFit if not mdt.utils.check_user_components(): init_user_settings(pass_if_exists=True) model_fit = ModelFit(model, problem_data, output_folder, optimizer=optimizer, recalculate=recalculate, only_recalculate_last=only_recalculate_last, cascade_subdir=cascade_subdir, cl_device_ind=cl_device_ind, double_precision=double_precision, tmp_results_dir=tmp_results_dir) results = model_fit.run() easy_save_user_script_info(save_user_script_info, output_folder + '/used_scripts.py', stack()[1][0].f_globals.get('__file__')) return results def sample_model(model, problem_data, output_folder, sampler=None, recalculate=False, cl_device_ind=None, double_precision=False, store_samples=True, tmp_results_dir=True, save_user_script_info=True, initialization_maps=None): """Sample a composite model using the given cascading strategy. Args: model (:class:`~mdt.models.composite.DMRICompositeModel` or str): the model to sample problem_data (:class:`~mdt.utils.DMRIProblemData`): the problem data object output_folder (string): The path to the folder where to place the output, we will make a subdir with the model name in it (for the optimization results) and then a subdir with the samples output. sampler (:class:`mot.cl_routines.sampling.base.AbstractSampler`): the sampler to use recalculate (boolean): If we want to recalculate the results if they are already present. cl_device_ind (int): the index of the CL device to use. The index is from the list from the function utils.get_cl_devices(). double_precision (boolean): if we would like to do the calculations in double precision store_samples (boolean): if set to False we will store none of the samples. Use this if you are only interested in the volume maps and not in the entire sample chain. tmp_results_dir (str, True or None): The temporary dir for the calculations. Set to a string to use that path directly, set to True to use the config value, set to None to disable. save_user_script_info (boolean, str or SaveUserScriptInfo): The info we need to save about the script the user is currently executing. If True (default) we use the stack to lookup the script the user is executing and save that using a SaveFromScript saver. If a string is given we use that filename again for the SaveFromScript saver. If False or None, we do not write any information. If a SaveUserScriptInfo is given we use that directly. initialization_maps (dict): 4d maps to initialize the sampling with. Per default this is None, common practice is to use the maps from an optimization as starting point Returns: dict: the samples per parameter as a numpy memmap if store_samples is True """ import mdt.utils from mot.load_balance_strategies import EvenDistribution from mdt.model_sampling import sample_composite_model from mdt.models.cascade import DMRICascadeModelInterface import mot.configuration if not mdt.utils.check_user_components(): init_user_settings(pass_if_exists=True) if isinstance(model, string_types): model = get_model(model) if isinstance(model, DMRICascadeModelInterface): raise ValueError('The function \'sample_model()\' does not accept cascade models.') if not model.is_protocol_sufficient(problem_data.protocol): raise InsufficientProtocolError( 'The given protocol is insufficient for this model. ' 'The reported errors where: {}'.format(model.get_protocol_problems(problem_data.protocol))) if cl_device_ind is not None and not isinstance(cl_device_ind, collections.Iterable): cl_device_ind = [cl_device_ind] if cl_device_ind is None: cl_context_action = mot.configuration.VoidConfigurationAction() else: cl_context_action = mot.configuration.RuntimeConfigurationAction( cl_environments=[get_cl_devices()[ind] for ind in cl_device_ind], load_balancer=EvenDistribution()) with mot.configuration.config_context(cl_context_action): if sampler is None: sampler = configuration.get_sampler() processing_strategy = get_processing_strategy('sampling', model_names=model.name) processing_strategy.set_tmp_dir(get_temporary_results_dir(tmp_results_dir)) output_folder = os.path.join(output_folder, model.name, 'samples') if not os.path.isdir(output_folder): os.makedirs(output_folder) with per_model_logging_context(output_folder, overwrite=recalculate): logger = logging.getLogger(__name__) logger.info('Using MDT version {}'.format(__version__)) logger.info('Preparing for model {0}'.format(model.name)) if initialization_maps: model.set_initial_parameters(create_roi(initialization_maps, problem_data.mask)) model.double_precision = double_precision results = sample_composite_model(model, problem_data, output_folder, sampler, processing_strategy, recalculate=recalculate, store_samples=store_samples) easy_save_user_script_info(save_user_script_info, output_folder + '/used_scripts.py', stack()[1][0].f_globals.get('__file__')) return results def batch_fit(data_folder, batch_profile=None, subjects_selection=None, recalculate=False, models_to_fit=None, cascade_subdir=False, cl_device_ind=None, dry_run=False, double_precision=False, tmp_results_dir=True): """Run all the available and applicable models on the data in the given folder. Args: data_folder (str): The data folder to process batch_profile (:class:`~mdt.batch_utils.BatchProfile` or str): the batch profile to use, or the name of a batch profile to use. If not given it is auto detected. subjects_selection (:class:`~mdt.batch_utils.BatchSubjectSelection`): the subjects to use for processing. If None all subjects are processed. recalculate (boolean): If we want to recalculate the results if they are already present. models_to_fit (list of str): A list of models to fit to the data. This overrides the models in the batch config. cascade_subdir (boolean): if we want to create a subdirectory for every cascade model. Per default we output the maps of cascaded results in the same directory, this allows reusing cascaded results for other cascades (for example, if you cascade BallStick -> Noddi you can use the BallStick results also for BallStick -> Charmed). This flag disables that behaviour and instead outputs the results of a cascade model to a subdirectory for that cascade. This does not apply recursive. cl_device_ind (int or list of int): the index of the CL device to use. The index is from the list from the function get_cl_devices(). dry_run (boolean): a dry run will do no computations, but will list all the subjects found in the given directory. double_precision (boolean): if we would like to do the calculations in double precision tmp_results_dir (str, True or None): The temporary dir for the calculations. Set to a string to use that path directly, set to True to use the config value, set to None to disable. Returns: The list of subjects we will calculate / have calculated. """ import mdt.utils from mdt.model_fitting import BatchFitting if not mdt.utils.check_user_components(): init_user_settings(pass_if_exists=True) batch_fitting = BatchFitting(data_folder, batch_profile=batch_profile, subjects_selection=subjects_selection, recalculate=recalculate, models_to_fit=models_to_fit, cascade_subdir=cascade_subdir, cl_device_ind=cl_device_ind, double_precision=double_precision, tmp_results_dir=tmp_results_dir) if dry_run: return batch_fitting.get_subjects_info() return batch_fitting.run() def view_maps(data, config=None, to_file=None, to_file_options=None, block=True, show_maximized=False, use_qt=True, figure_options=None, window_title=None, enable_directory_watcher=True): """View a number of maps using the MDT Maps Visualizer. To save a file to an image, you can use (the simplest) the following two options:: to_file='filename.png', figure_options={'width': 1200, 'height': 750, 'dpi': 100} Args: data (str, dict, :class:`~mdt.visualization.maps.base.DataInfo`): the data we are showing, either a dictionary with result maps, a string with a path name or a DataInfo object config (str, dict, :class:`~mdt.visualization.maps.base import MapPlotConfig`): either a Yaml string or a dictionary with configuration settings or a ValidatedMapPlotConfig object to use directly to_file (str): if set we output the figure to a file and do not launch a GUI to_file_options (dict): extra output options for the savefig command from matplotlib, if dpi is not given, we use the dpi from the figure_options. block (boolean): if we block the plots or not show_maximized (boolean): if we show the window maximized or not use_qt (boolean): if we want to use the Qt GUI, or show the results directly in matplotlib figure_options (dict): figure options for the matplotlib Figure, if figsizes is not given you can also specify two ints, width and height, to indicate the pixel size of the resulting figure, together with the dpi they are used to calculate the figsize. window_title (str): the title for the window enable_directory_watcher (boolean): if the directory watcher should be enabled/disabled, only applicable for the QT GUI. If the directory watcher is enabled, the viewer will automatically add new maps when added to the folder and also automatically remove maps when they are removed from the directory. It is useful to disable this if you want to have multiple viewers open with old results. """ from mdt.gui.maps_visualizer.main import start_gui from mdt.visualization.maps.base import MapPlotConfig from mdt.visualization.maps.matplotlib_renderer import MapsVisualizer import matplotlib.pyplot as plt from mdt.visualization.maps.base import DataInfo if isinstance(data, string_types): data = DataInfo.from_dir(data) elif isinstance(data, dict): data = DataInfo(data) elif data is None: data = DataInfo({}) if config is None: config = MapPlotConfig() elif isinstance(config, string_types): config = MapPlotConfig.from_yaml(config) elif isinstance(config, dict): config = MapPlotConfig.from_dict(config) if to_file: figure_options = figure_options or {} figure_options['dpi'] = figure_options.get('dpi', 80) if 'figsize' not in figure_options: figure_options['figsize'] = (figure_options.pop('width', 800) / figure_options['dpi'], figure_options.pop('height', 640) / figure_options['dpi']) figure = plt.figure(**figure_options) viz = MapsVisualizer(data, figure) to_file_options = to_file_options or {} to_file_options['dpi'] = to_file_options.get('dpi', figure_options['dpi']) viz.to_file(to_file, config, **to_file_options) elif use_qt: start_gui(data, config, app_exec=block, show_maximized=show_maximized, window_title=window_title, enable_directory_watcher=enable_directory_watcher) else: figure_options = figure_options or {} figure_options['dpi'] = figure_options.get('dpi', 100) if 'figsize' not in figure_options: figure_options['figsize'] = (figure_options.pop('width', 1800) / figure_options['dpi'], figure_options.pop('height', 1600) / figure_options['dpi']) figure = plt.figure(**figure_options) viz = MapsVisualizer(data, figure) viz.show(config, block=block, maximize=show_maximized) def results_preselection_names(data): """Generate a list of useful map names to display. This is primarily to be used as argument to the config option ``maps_to_show`` in the function :func:`view_maps`. Args: data (str or dict or list of str): either a directory or a dictionary of results or a list of map names. Returns: list of str: the list of useful/filtered map names. """ keys = [] if isinstance(data, string_types): for extension in ('.nii', '.nii.gz'): for f in glob.glob(os.path.join(data, '*' + extension)): keys.append(os.path.basename(f)[0:-len(extension)]) elif isinstance(data, dict): keys = data.keys() else: keys = data filter_match = ('.vec', '.d', '.sigma', 'AIC', 'Errors.mse', 'Errors.sse', '.eigen_ranking', 'SignalEstimates', 'UsedMask') return list(sorted(filter(lambda v: all(m not in v for m in filter_match), keys))) def block_plots(use_qt=True): """A small function to block matplotlib plots and Qt GUI instances. This basically calls either ``plt.show()`` and ``QtApplication.exec_()`` depending on ``use_qt``. Args: use_qt (boolean): if True we block Qt windows, if False we block matplotlib windows """ if use_qt: from mdt.gui.utils import QtManager QtManager.exec_() else: import matplotlib.pyplot as plt plt.show() def view_result_samples(data, **kwargs): """View the samples from the given results set. Args: data (string or dict): The location of the maps to use the samples from, or the samples themselves. kwargs (dict): see SampleVisualizer for all the supported keywords """ from mdt.visualization.samples import SampleVisualizer if isinstance(data, string_types): data = load_samples(data) if kwargs.get('voxel_ind') is None: kwargs.update({'voxel_ind': data[list(data.keys())[0]].shape[0] / 2}) SampleVisualizer(data).show(**kwargs) def make_path_joiner(*folder): """Generates and returns an instance of utils.PathJoiner to quickly join pathnames. Returns: PathJoiner: easy path manipulation path joiner """ from mdt.utils import PathJoiner return PathJoiner(*folder) def write_image(fname, data, header): """Write the given data with the given header to the given file. Args: fname (str): The output filename data (ndarray): The data to write header (nibabel header): The header to use """ import nibabel as nib nib.Nifti1Image(data, None, header).to_filename(fname) def write_trackmark_rawmaps(data, output_folder, maps_to_convert=None): """Convert the given nifti files in the input folder to rawmaps in the output folder. Args: data (str or dict): the name of the input folder, of a dictionary with maps to save. output_folder (str): the name of the output folder. Defaults to /trackmark. maps_to_convert (:class:`list`): the list with the names of the maps we want to convert (without the extension). """ from mdt.nifti import TrackMark if isinstance(data, six.string_types): volumes = load_volume_maps(data, map_names=maps_to_convert) else: volumes = data if maps_to_convert: volumes = {k: v for k, v in volumes.items() if k in maps_to_convert} TrackMark.write_rawmaps(output_folder, volumes) def write_trackmark_tvl(output_tvl, vector_directions, vector_magnitudes, tvl_header=(1, 1.8, 0, 0)): """Write a list of vector directions with corresponding magnitude to a trackmark TVL file. Note that the length of the vector_directions and vector_magnitudes should correspond to each other. Next, we only use the first three elements in both lists. Args: output_tvl (str): the name of the output tvl vector_directions (list of str/ndarray): a list of 4d volumes with per voxel the normalized vector direction vector_magnitudes (list of str/ndarray): a list of 4d volumes with per voxel the vector magnitude. tvl_header (list or tuple): The list with header arguments for writing the TVL. See IO.TrackMark for specifics. """ from mdt.nifti import TrackMark if len(vector_directions) != len(vector_magnitudes): raise ValueError('The length of the list of vector directions does not ' 'match with the length of the list of vector magnitudes.') TrackMark.write_tvl_direction_pairs(output_tvl, tvl_header, list(zip(vector_directions, vector_magnitudes))[:3]) def sort_maps(maps_to_sort_on, extra_maps_to_sort=None, reversed_sort=False, sort_index_map=None): """Sort the given maps on the maps to sort on. This first creates a sort matrix to index the maps in sorted order per voxel. Next, it creates the output maps for the maps we sort on. If extra_maps_to_sort is given it should be of the same length as the maps_to_sort_on. Args: maps_to_sort_on (:class:`list`): a list of string (filenames) or ndarrays we will use and compare extra_maps_to_sort (:class:`list`) an additional list we will sort based on the indices in maps_to_sort. This should be of the same length as maps_to_sort_on. reversed_sort (boolean): if we want to sort from large to small instead of small to large. sort_index_map (ndarray): if given we use this sort index map instead of generating one by sorting the maps_to_sort_on. Returns: tuple: the first element is the list of sorted volumes, the second the list of extra sorted maps and the last is the sort index map used. """ def load_maps(map_list): tmp = [] for data in map_list: if isinstance(data, string_types): tmp.append(load_nifti(data).get_data()) else: tmp.append(data) return tmp maps_to_sort_on = load_maps(maps_to_sort_on) if extra_maps_to_sort: extra_maps_to_sort = load_maps(extra_maps_to_sort) if len(extra_maps_to_sort) != len(maps_to_sort_on): raise ValueError('The length of the maps to sort on and the extra maps to sort do not match.') from mdt.utils import create_sort_matrix, sort_volumes_per_voxel if sort_index_map is None: sort_index_map = create_sort_matrix(np.concatenate([m for m in maps_to_sort_on], axis=3), reversed_sort=reversed_sort) elif isinstance(sort_index_map, string_types): sort_index_map = np.round(load_nifti(sort_index_map).get_data()).astype(np.int64) sorted_maps = sort_volumes_per_voxel(maps_to_sort_on, sort_index_map) if extra_maps_to_sort: sorted_extra_maps = sort_volumes_per_voxel(extra_maps_to_sort, sort_index_map) return sorted_maps, sorted_extra_maps, sort_index_map return sorted_maps, [], sort_index_map def load_volume_maps(directory, map_names=None, deferred=True): """Read a number of Nifti volume maps from a directory. Args: directory (str): the directory from which we want to read a number of maps map_names (list or tuple): the names of the maps we want to use. If given we only use and return these maps. deferred (boolean): if True we return an deferred loading dictionary instead of a dictionary with the values loaded as arrays. Returns: dict: A dictionary with the volumes. The keys of the dictionary are the filenames (without the extension) of the files in the given directory. """ from mdt.nifti import get_all_image_data return get_all_image_data(directory, map_names=map_names, deferred=deferred) def get_volume_names(directory): """Get the names of the Nifti volume maps in the given directory. Args: directory: the directory to get the names of the available maps from. Returns: :class:`list`: A list with the names of the volumes. """ from mdt.nifti import yield_nifti_info return list(sorted(el[1] for el in yield_nifti_info(directory))) def write_volume_maps(maps, directory, header, overwrite_volumes=True): """Write a dictionary with maps to the given directory using the given header. Args: maps (dict): The maps with as keys the map names and as values 3d or 4d maps directory (str): The dir to write to header: The Nibabel Image Header overwrite_volumes (boolean): If we want to overwrite the volumes if they are present. """ from mdt.nifti import write_all_as_nifti write_all_as_nifti(maps, directory, header, overwrite_volumes=overwrite_volumes) def get_list_of_composite_models(): """Get a list of all available composite models Returns: list of str: A list of all available composite model names. """ from mdt.components_loader import CompositeModelsLoader return CompositeModelsLoader().list_all() def get_list_of_cascade_models(): """Get a list of all available cascade models Returns: list of str: A list of available cascade models """ from mdt.components_loader import CascadeModelsLoader return CascadeModelsLoader().list_all() def get_models_list(): """Get a list of all available models, composite and cascade. Returns: list of str: A list of available model names. """ l = get_list_of_cascade_models() l.extend(get_list_of_composite_models()) return list(sorted(l)) def get_models_meta_info(): """Get the meta information tags for all the models returned by get_models_list() Returns: dict of dict: The first dictionary indexes the model names to the meta tags, the second holds the meta information. """ from mdt.components_loader import CompositeModelsLoader, CascadeModelsLoader sml = CompositeModelsLoader() cml = CascadeModelsLoader() meta_info = {} for model_loader in (sml, cml): models = model_loader.list_all() for model in models: meta_info.update({model: model_loader.get_meta_info(model)}) return meta_info def get_batch_profile(batch_profile_name, *args, **kwargs): """Load one of the batch profiles. This is short for load_component('batch_profiles', batch_profile_name). Args: batch_profile_name (str): the name of the batch profile to use Returns: BatchProfile: the batch profile for use in batch fitting routines. """ return load_component('batch_profiles', batch_profile_name, *args, **kwargs) def gui(base_dir=None, app_exec=True): """Start the model fitting GUI. Args: base_dir (str): the starting directory for the file opening actions app_exec (boolean): if true we execute the Qt application, set to false to disable. This is only important if you want to start this GUI from within an existing Qt application. If you leave this at true in that case, this will try to start a new Qt application which may create problems. """ from mdt.gui.model_fit.qt_main import start_gui return start_gui(base_dir=base_dir, app_exec=app_exec) PK}vI 0: remaining_time = (run_time / current_percentage) - run_time else: remaining_time = None self._logger.info('Computations are at {0:.2%}, processing next {1} voxels (' '{2} voxels in total, {3} processed). Time spent: {4}, time left: {5} (h:m:s).'. format(total_processed / total_nmr_voxels, len(voxel_indices), total_nmr_voxels, total_processed, time.strftime('%H:%M:%S', time.gmtime(run_time)), time.strftime('%H:%M:%S', time.gmtime(remaining_time)) if remaining_time else '?')) worker.process(voxel_indices) class ModelProcessingWorkerCreator(object): def create_worker(self, model, problem_data, output_dir, tmp_storage_dir, honor_voxels_to_analyze): """Create and return the worker that the processing strategy will use. Args: model (:class:`~mdt.models.composite.DMRICompositeModel`): the model we want to process problem_data (:class:`~mdt.utils.DMRIProblemData`): The problem data object with which the model is initialized before running output_dir (str): the location for the final output files tmp_storage_dir (str): the location for the temporary output files honor_voxels_to_analyze (boolean): if we should honor the voxels_to_analyze list in the model if applicable. Returns: ModelProcessingWorker: the worker the processing strategy will use. """ class SimpleModelProcessingWorkerGenerator(ModelProcessingWorkerCreator): def __init__(self, callback_function): """Create a generator that will instantiate the worker using the given callback function. Args: callback_function: the callback function we will call when create_worker is called. """ self._callback_function = callback_function def create_worker(self, model, problem_data, output_dir, tmp_storage_dir, honor_voxels_to_analyze): return self._callback_function(model, problem_data, output_dir, tmp_storage_dir, honor_voxels_to_analyze) class ModelProcessingWorker(object): def __init__(self, model, problem_data, output_dir, tmp_storage_dir, honor_voxels_to_analyze): self._write_volumes_gzipped = True self._used_mask_name = 'UsedMask' self._model = model self._problem_data = problem_data self._output_dir = output_dir self._tmp_storage_dir = tmp_storage_dir self._honor_voxels_to_analyze = honor_voxels_to_analyze self._roi_lookup_path = os.path.join(self._tmp_storage_dir, '_roi_voxel_lookup_table.npy') self._volume_indices = self._create_roi_to_volume_index_lookup_table() def process(self, roi_indices): """Process the indicated voxels in the way prescribed by this worker. Since the processing strategy can use all voxels to do the analysis in one go, this function should return all the output it can, i.e. the same kind of output as from the function :meth:`combine`. Args: roi_indices (ndarray): The list of roi indices we want to compute Returns: the results for this single processing step """ def get_voxels_to_compute(self): """Get the ROI indices of the voxels we need to compute. This should either return an entire list with all the ROI indices for the given brain mask, or a list with the specific roi indices we want the strategy to compute. Returns: ndarray: the list of ROI indices (indexing the current mask) with the voxels we want to compute. """ if self._honor_voxels_to_analyze and self._model.problems_to_analyze: roi_list = self._model.problems_to_analyze else: roi_list = np.arange(0, np.count_nonzero(self._problem_data.mask)) mask_path = os.path.join(self._tmp_storage_dir, '{}.npy'.format(self._used_mask_name)) if os.path.exists(mask_path): return roi_list[np.logical_not(np.squeeze(create_roi(np.load(mask_path, mmap_mode='r'), self._problem_data.mask)[roi_list]))] return roi_list def combine(self): """Combine all the calculated parts. Returns: the processing results for as much as this is applicable """ del self._volume_indices os.remove(self._roi_lookup_path) def _write_volumes(self, roi_indices, results, tmp_dir): """Write the result arrays to the temporary storage Args: roi_indices (ndarray): the indices of the voxels we computed results (dict): the dictionary with the results to save tmp_dir (str): the directory to save the intermediate results to """ if not os.path.exists(tmp_dir): os.makedirs(tmp_dir) volume_indices = self._volume_indices[roi_indices, :] for param_name, result_array in results.items(): storage_path = os.path.join(tmp_dir, param_name + '.npy') map_4d_dim_len = 1 if len(result_array.shape) > 1: map_4d_dim_len = result_array.shape[1] else: result_array = np.reshape(result_array, (-1, 1)) mode = 'w+' if os.path.isfile(storage_path): mode = 'r+' tmp_matrix = open_memmap(storage_path, mode=mode, dtype=result_array.dtype, shape=self._problem_data.mask.shape[0:3] + (map_4d_dim_len,)) tmp_matrix[volume_indices[:, 0], volume_indices[:, 1], volume_indices[:, 2]] = result_array mask_path = os.path.join(tmp_dir, '{}.npy'.format(self._used_mask_name)) mode = 'w+' if os.path.isfile(mask_path): mode = 'r+' tmp_mask = open_memmap(mask_path, mode=mode, dtype=np.bool, shape=self._problem_data.mask.shape) tmp_mask[volume_indices[:, 0], volume_indices[:, 1], volume_indices[:, 2]] = True def _combine_volumes(self, output_dir, chunks_dir, volume_header, maps_subdir=''): """Combine volumes found in subdirectories to a final volume. Args: output_dir (str): the location for the output files chunks_dir (str): the directory in which all the chunks are located maps_subdir (str): we may have per chunk a subdirectory in which the maps are located. This parameter is for that subdir. Example search: ///*.nii* Returns: dict: the dictionary with the ROIs for every volume, by parameter name """ if not os.path.exists(os.path.join(output_dir, maps_subdir)): os.makedirs(os.path.join(output_dir, maps_subdir)) map_names = list(map(lambda p: os.path.splitext(os.path.basename(p))[0], glob.glob(os.path.join(chunks_dir, maps_subdir, '*.npy')))) basic_info = (os.path.join(chunks_dir, maps_subdir), os.path.join(output_dir, maps_subdir), volume_header, self._write_volumes_gzipped) info_list = [(map_name, basic_info) for map_name in map_names] list(map(_combine_volumes_write_out, info_list)) def _create_roi_to_volume_index_lookup_table(self): """Creates and returns a lookup table for roi index -> volume index. This will create from the given mask a memory mapped lookup table mapping the ROI indices (single integer) to the correct voxel location in 3d. To find a voxel index using the ROI index, just index this lookup table using the ROI index as index. For example, suppose we have the lookup table: 0: (0, 0, 0) 1: (0, 0, 1) 2: (0, 1, 0) ... We can get the position of a voxel in the 3d space by indexing this array as: ``lookup_table[roi_index]`` to get the correct 3d location. Returns: memmap ndarray: the memory mapped array which """ if os.path.isfile(self._roi_lookup_path): os.remove(self._roi_lookup_path) np.save(self._roi_lookup_path, np.argwhere(self._problem_data.mask)) return np.load(self._roi_lookup_path, mmap_mode='r') def _combine_volumes_write_out(info_pair): """Write out the given information to a nifti volume. Needs to be used by ModelProcessingWorker._combine_volumes """ map_name, info_list = info_pair chunks_dir, output_dir, volume_header, write_gzipped = info_list data = np.load(os.path.join(chunks_dir, map_name + '.npy'), mmap_mode='r') write_all_as_nifti({map_name: data}, output_dir, volume_header, gzip=write_gzipped) del data class FittingProcessingWorker(ModelProcessingWorker): def __init__(self, optimizer, *args): """The processing worker for model fitting. Use this if you want to use the model processing strategy to do model fitting. Args: optimizer: the optimization routine to use """ super(FittingProcessingWorker, self).__init__(*args) self._optimizer = optimizer self._write_volumes_gzipped = gzip_optimization_results() def process(self, roi_indices): results, extra_output = self._optimizer.minimize(self._model, full_output=True) results.update(extra_output) self._write_volumes(roi_indices, results, self._tmp_storage_dir) return results def combine(self): super(FittingProcessingWorker, self).combine() self._combine_volumes(self._output_dir, self._tmp_storage_dir, self._problem_data.volume_header) return create_roi(get_all_image_data(self._output_dir), self._problem_data.mask) class SamplingProcessingWorker(ModelProcessingWorker): class SampleChainNotStored(object): pass def __init__(self, sampler, store_samples=False, *args): """The processing worker for model sampling. Use this if you want to use the model processing strategy to do model sampling. Args: sampler (AbstractSampler): the optimization sampler to use store_samples (boolean): if set to False we will store none of the samples. Use this if you are only interested in the volume maps and not in the entire sample chain. If set to True the process and combine function will no longer return any results. """ super(SamplingProcessingWorker, self).__init__(*args) self._sampler = sampler self._write_volumes_gzipped = gzip_sampling_results() self._store_samples = store_samples def process(self, roi_indices): results, other_output = self._sampler.sample(self._model, full_output=True) self._write_volumes(roi_indices, other_output, os.path.join(self._tmp_storage_dir, 'volume_maps')) if self._store_samples: self._write_sample_results(results, self._problem_data.mask, roi_indices) return results return SamplingProcessingWorker.SampleChainNotStored() def combine(self): super(SamplingProcessingWorker, self).combine() self._combine_volumes(self._output_dir, self._tmp_storage_dir, self._problem_data.volume_header, maps_subdir='volume_maps') if self._store_samples: for samples in glob.glob(os.path.join(self._tmp_storage_dir, '*.samples.npy')): shutil.move(samples, self._output_dir) return load_samples(self._output_dir) return SamplingProcessingWorker.SampleChainNotStored() def _write_sample_results(self, results, full_mask, roi_indices): """Write the sample results to a .npy file. If the given sample files do not exists or if the existing file is not large enough it will create one with enough storage to hold all the samples for the given total_nmr_voxels. On storing it should also be given a list of voxel indices with the indices of the voxels that are being stored. Args: results (dict): the samples to write full_mask (ndarray): the complete mask for the entire brain roi_indices (ndarray): the roi indices of the voxels we computed """ total_nmr_voxels = np.count_nonzero(full_mask) if not os.path.exists(self._output_dir): os.makedirs(self._output_dir) for map_name, samples in results.items(): samples_path = os.path.join(self._output_dir, map_name + '.samples.npy') mode = 'w+' if os.path.isfile(samples_path): mode = 'r+' current_results = open_memmap(samples_path, mode='r') if current_results.shape[1] != samples.shape[1]: mode = 'w+' del current_results # closes the memmap saved = open_memmap(samples_path, mode=mode, dtype=samples.dtype, shape=(total_nmr_voxels, samples.shape[1])) saved[roi_indices, :] = samples PKrE~I2m[[mdt/configuration.py"""Contains the runtime configuration of MDT. This consists of two parts, functions to get the current runtime settings and configuration actions to update these settings. To set a new configuration, create a new :py:class:`ConfigAction` and use this within a context environment using :py:func:`config_context`. Example: .. code-block:: python from mdt.configuration import YamlStringAction, config_context config = ''' optimization: general: optimizers: - name: 'NMSimplex' patience: 10 ''' with mdt.config_context(YamlStringAction(config)): mdt.fit_model(...) """ import os import re from copy import deepcopy import collections import yaml from contextlib import contextmanager from pkg_resources import resource_stream from six import string_types from mot.factory import get_optimizer_by_name, get_sampler_by_name from mdt.components_loader import ProcessingStrategiesLoader, NoiseSTDCalculatorsLoader import mot.configuration from mot.load_balance_strategies import EvenDistribution from mdt.__version__ import __version__ __author__ = 'Robbert Harms' __date__ = "2015-06-23" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" """ The current configuration """ _config = {} def get_config_dir(): """Get the location of the components. Return: str: the path to the components """ return os.path.join(os.path.expanduser("~"), '.mdt', __version__) def config_insert(keys, value): """Insert the given value in the given key. This will create all layers of the dictionary if needed. Args: keys (list of str): the position of the input value value (object): the value to put at the position of the key. """ config = _config for key in keys[:-1]: if key not in config: config[key] = {} config = config[key] config[keys[-1]] = value def ensure_exists(keys): """Ensure the given layer of keys exists. Args: keys (list of str): the positions to ensure exist """ config = _config for key in keys: if key not in config: config[key] = {} config = config[key] def load_builtin(): """Load the config file from the skeleton in mdt/data/mdt.conf""" with resource_stream('mdt', 'data/mdt.conf') as f: load_from_yaml(f.read()) def load_user_home(): """Load the config file from the user home directory""" config_file = os.path.join(get_config_dir(), 'mdt.conf') if os.path.isfile(config_file): with open(config_file) as f: load_from_yaml(f.read()) else: raise IOError('Config file could not be loaded.') def load_user_gui(): """Load the gui specific config file from the user home directory""" config_file = os.path.join(get_config_dir(), 'mdt.gui.conf') if os.path.isfile(config_file): with open(config_file) as f: load_from_yaml(f.read()) else: raise IOError('Config file could not be loaded.') def load_specific(file_name): """Can be called by the application to use the config from a specific file. This assumes that the given file contains YAML content, that is, we want to process it with the function load_from_yaml(). Please note that the last configuration loaded overwrites the values of the previously loaded config files. Args: file_name (str): The name of the file to use. """ with open(file_name) as f: load_from_yaml(f.read()) def load_from_yaml(yaml_str): """Can be called to use configuration options from a YAML string. This will update the current configuration with the new options. Args: yaml_str (str): The string containing the YAML config to parse. """ config_dict = yaml.load(yaml_str) or {} load_from_dict(config_dict) def load_from_dict(config_dict): """Load configuration options from a given dictionary. Args: config_dict (dict): the dictionary from which to use the configurations """ for key, value in config_dict.items(): loader = get_section_loader(key) loader.load(value) def update_gui_config(update_dict): """Update the GUI configuration file with the given settings. Args: update_dict (dict): the items to update in the GUI config file """ update_write_config(os.path.join(get_config_dir(), 'mdt.gui.conf'), update_dict) def update_write_config(config_file, update_dict): """Update a given configuration file with updated values. If the configuration file does not exist, a new one is created. Args: config_file (str): the location of the config file to update update_dict (dict): the items to update in the config file """ if not os.path.exists(config_file): with open(config_file, 'a'): pass with open(config_file, 'r') as f: config_dict = yaml.load(f.read()) or {} for key, value in update_dict.items(): loader = get_section_loader(key) loader.update(config_dict, value) with open(config_file, 'w') as f: yaml.dump(config_dict, f) class ConfigSectionLoader(object): def load(self, value): """Load the given configuration value into the current configuration. Args: value: the value to use in the configuration """ def update(self, config_dict, updates): """Update the given configuration dictionary with the values in the given updates dict. This enables automating updating a configuration file. Updates are written in place. Args: config_dict (dict): the current configuration dict updates (dict): the updated values to add to the given config dict. """ class OutputFormatLoader(ConfigSectionLoader): """Loader for the top level key output_format. """ def load(self, value): for item in ['optimization', 'sampling']: options = value.get(item, {}) if 'gzip' in options: config_insert(['output_format', item, 'gzip'], bool(options['gzip'])) class LoggingLoader(ConfigSectionLoader): """Loader for the top level key logging. """ def load(self, value): ensure_exists(['logging', 'info_dict']) if 'info_dict' in value: self._load_info_dict(value['info_dict']) def _load_info_dict(self, info_dict): for item in ['version', 'disable_existing_loggers', 'formatters', 'handlers', 'loggers', 'root']: if item in info_dict: config_insert(['logging', 'info_dict', item], info_dict[item]) class OptimizationSettingsLoader(ConfigSectionLoader): """Loads the optimization section""" def load(self, value): ensure_exists(['optimization', 'general']) ensure_exists(['optimization', 'model_specific']) if 'general' in value: config_insert(['optimization', 'general'], value['general']) if 'model_specific' in value: for key, sub_value in value['model_specific'].items(): config_insert(['optimization', 'model_specific', key], sub_value) class SampleSettingsLoader(ConfigSectionLoader): """Loads the sampling section""" def load(self, value): ensure_exists(['sampling', 'general']) if 'general' in value: config_insert(['sampling', 'general'], value['general']) class ProcessingStrategySectionLoader(ConfigSectionLoader): """Loads the config section processing_strategies""" def load(self, value): if 'optimization' in value: self._load_options('optimization', value['optimization']) if 'sampling' in value: self._load_options('sampling', value['sampling']) def _load_options(self, current_type, options): if 'general' in options: config_insert(['processing_strategies', current_type, 'general'], options['general']) ensure_exists(['processing_strategies', current_type, 'model_specific']) if 'model_specific' in options: for key, value in options['model_specific'].items(): config_insert(['processing_strategies', current_type, 'model_specific', key], value) class TmpResultsDirSectionLoader(ConfigSectionLoader): """Load the section tmp_results_dir""" def load(self, value): config_insert(['tmp_results_dir'], value) class NoiseStdEstimationSectionLoader(ConfigSectionLoader): """Load the section noise_std_estimating""" def load(self, value): if 'estimators' in value: config_insert(['noise_std_estimating', 'estimators'], value['estimators']) class RuntimeSettingsLoader(ConfigSectionLoader): def load(self, value): if 'cl_device_ind' in value: if value['cl_device_ind'] is not None: from mdt.utils import get_cl_devices all_devices = get_cl_devices() indices = value['cl_device_ind'] if not isinstance(indices, collections.Iterable): indices = [indices] devices = [all_devices[ind] for ind in indices if ind < len(all_devices)] if devices: mot.configuration.set_cl_environments(devices) mot.configuration.set_load_balancer(EvenDistribution()) def update(self, config_dict, updates): if 'runtime_settings' not in config_dict: config_dict.update({'runtime_settings': {}}) config_dict['runtime_settings'].update(updates) def get_section_loader(section): """Get the section loader to use for the given top level section. Args: section (str): the section key we want to get the loader for Returns: ConfigSectionLoader: the config section loader for this top level section of the configuration. """ if section == 'output_format': return OutputFormatLoader() if section == 'logging': return LoggingLoader() if section == 'optimization': return OptimizationSettingsLoader() if section == 'sampling': return SampleSettingsLoader() if section == 'processing_strategies': return ProcessingStrategySectionLoader() if section == 'tmp_results_dir': return TmpResultsDirSectionLoader() if section == 'noise_std_estimating': return NoiseStdEstimationSectionLoader() if section == 'runtime_settings': return RuntimeSettingsLoader() raise ValueError('Could not find a suitable configuration loader for the section {}.'.format(section)) def gzip_optimization_results(): """Check if we should write the volume maps from the optimization gzipped or not. Returns: boolean: True if the results of optimization computations should be gzipped, False otherwise. """ return _config['output_format']['optimization']['gzip'] def gzip_sampling_results(): """Check if we should write the volume maps from the sampling gzipped or not. Returns: boolean: True if the results of sampling computations should be gzipped, False otherwise. """ return _config['output_format']['sampling']['gzip'] def get_tmp_results_dir(): """Get the default tmp results directory. This is the default directory for saving temporary computation results. Set to None to disable this and use the model directory. Returns: str or None: the tmp results dir to use during optimization and sampling """ return _config['tmp_results_dir'] def get_processing_strategy(processing_type, model_names=None): """Get the correct processing strategy for the given model. Args: processing_type (str): 'optimization', 'sampling' or any other of the processing_strategies defined in the config model_names (list of str): the list of model names (the full recursive cascade of model names) Returns: ModelProcessingStrategy: the processing strategy to use for this model """ strategy_name = _config['processing_strategies'][processing_type]['general']['name'] options = _config['processing_strategies'][processing_type]['general'].get('options', {}) or {} if model_names and ('model_specific' in _config['processing_strategies'][processing_type]): info_dict = get_model_config(model_names, _config['processing_strategies'][processing_type]['model_specific']) if info_dict: strategy_name = info_dict['name'] options = info_dict.get('options', {}) or {} return ProcessingStrategiesLoader().load(strategy_name, **options) def get_noise_std_estimators(): """Get the noise std estimators for finding the std of the noise. Returns: list of ComplexNoiseStdEstimator: the noise estimators to use for finding the complex noise """ loader = NoiseSTDCalculatorsLoader() return [loader.load(c) for c in _config['noise_std_estimating']['estimators']] def get_logging_configuration_dict(): """Get the configuration dictionary for the logging.dictConfig(). MDT uses a few special logging configuration options to log to the files and GUI's. These options are defined using a configuration dictionary that this function returns. Returns: dict: the configuration dict for use with dictConfig of the Python logging modules """ return _config['logging']['info_dict'] def get_general_optimizer(): """Load the general optimizer from the configuration. Returns: Optimizer: the configured optimizer for use in MDT """ return _resolve_optimizer(_config['optimization']['general']) def get_optimizer_for_model(model_names): """Get the optimizer for this specific cascade of models. This configuration function supports having a different optimizer for optimizing, for example, NODDI in a cascade and NODDI without a cascade. Args: model_names (list of str): the list of model names (typically a cascade of models) for which we want to get the optimizer to use. Returns: Optimizer: the optimizer to use for optimizing the specific model """ info_dict = get_model_config(model_names, _config['optimization']['model_specific']) if info_dict: return _resolve_optimizer(info_dict) else: return get_general_optimizer() def _resolve_optimizer(optimizer_info): """Resolve the optimization routine from the given information dictionary. Args: optimizer_info (dict): the optimization dictionary with at least 'name' for the optimizer and settings for the optimizer settings Returns: optimizer: the optimization routine """ name = optimizer_info['name'] settings = deepcopy(optimizer_info.get('settings', {}) or {}) optimizer = get_optimizer_by_name(name) if 'optimizers' in settings and settings['optimizers']: settings['optimizers'] = [_resolve_optimizer(info) for info in settings['optimizers']] if 'optimizer' in settings and settings['optimizer']: settings['optimizer'] = _resolve_optimizer(settings['optimizer']) if 'grid_generator' in settings and settings['grid_generator']: cls = getattr(mot.cl_routines.optimizing.grid_search, list(settings['grid_generator'].keys())[0]) settings['grid_generator'] = cls(**list(settings['grid_generator'].values())[0]) if 'starting_point_generator' in settings and settings['starting_point_generator']: cls = getattr(mot.cl_routines.optimizing.random_restart, list(settings['starting_point_generator'].keys())[0]) settings['starting_point_generator'] = cls(**list(settings['starting_point_generator'].values())[0]) return optimizer(**settings) def get_general_optimizer_name(): """Get the name of the currently configured general optimizer Returns: str: the name of the currently configured optimizer """ return _config['optimization']['general']['name'] def get_general_optimizer_settings(): """Get the settings of the currently configured general optimizer Returns: dict: the settings of the currently configured optimizer """ return _config['optimization']['general']['settings'] def get_sampler(): """Load the sampler from the configuration. Returns: Sampler: the configured sampler for use in MDT """ sampler = get_sampler_by_name(_config['sampling']['general']['name']) return sampler(**_config['sampling']['general']['settings']) def get_model_config(model_names, config): """Get from the given dictionary the config for the given model. This tries to find the best match between the given config items (by key) and the given model list. For example if model_names is ['BallStick', 'S0'] and we have the following config dict: .. code-block:: python {'^S0$': 0, '^BallStick$': 1 ('^BallStick$', '^S0$'): 2, ('^BallStickStick$', '^BallStick$', '^S0$'): 3, } then this function should return 2. because that one is the best match, even though the last option is also a viable match. That is, since a subset of the keys in the last option also matches the model names, it is considered a match as well. Still the best match is the third option (returning 2). Args: model_names (list of str): the names of the models we want to match. This should contain the entire recursive list of cascades leading to the composite model we want to get the config for. config (dict): the config items with as keys either a composite model regex for a name or a list of regex for a chain of model names. Returns: The config content of the best matching key. """ if not config: return {} def get_key_length(key): if isinstance(key, tuple): return len(key) return 1 def is_match(model_names, config_key): if isinstance(model_names, string_types): model_names = [model_names] if len(model_names) != get_key_length(config_key): return False if isinstance(config_key, tuple): return all([re.match(config_key[ind], model_names[ind]) for ind in range(len(config_key))]) return re.match(config_key, model_names[0]) key_length_lookup = ((get_key_length(key), key) for key in config.keys()) ascending_keys = tuple(item[1] for item in sorted(key_length_lookup, key=lambda info: info[0])) # direct matching for key in ascending_keys: if is_match(model_names, key): return config[key] # partial matching string keys to last model name for key in ascending_keys: if not isinstance(key, tuple): if is_match(model_names[-1], key): return config[key] # partial matching tuple keys with a moving filter for key in ascending_keys: if isinstance(key, tuple): for start_ind in range(len(key)): sub_key = key[start_ind:] if is_match(model_names, sub_key): return config[key] # no match found return {} @contextmanager def config_context(config_action): """Creates a temporary configuration context with the given config action. This will temporarily alter the given configuration keys to the given values. After the context is executed the configuration will revert to the original settings. Example usage: .. code-block:: python config = ''' optimization: general: optimizers: - name: 'NMSimplex' patience: 10 ''' with mdt.config_context(mdt.configuration.YamlStringAction(config)): mdt.fit_model(...) This loads the configuration from a YAML string and uses that configuration as the context. Args: config_action (ConfigAction or str): the configuration action to apply. If a string is given we will use it using the YamlStringAction config action. """ if isinstance(config_action, string_types): config_action = YamlStringAction(config_action) config_action.apply() yield config_action.unapply() class ConfigAction(object): def __init__(self): """Defines a configuration action for the use in a configuration context. This should define an apply and an unapply function that sets and unsets the given configuration options. The applying action needs to remember the state before applying the action. """ def apply(self): """Apply the current action to the current runtime configuration.""" def unapply(self): """Reset the current configuration to the previous state.""" class VoidConfigAction(ConfigAction): """Does nothing. Meant as a container to not have to check for None's everywhere.""" def apply(self): pass def unapply(self): pass class SimpleConfigAction(ConfigAction): def __init__(self): """Defines a default implementation of a configuration action. This simple config implements a default apply() method that saves the current state and a default unapply() that restores the previous state. It is easiest to implement _apply() for extra actions. """ super(SimpleConfigAction, self).__init__() self._old_config = {} def apply(self): """Apply the current action to the current runtime configuration.""" self._old_config = deepcopy(_config) self._apply() def unapply(self): """Reset the current configuration to the previous state.""" global _config _config = self._old_config def _apply(self): """Implement this function add apply() logic after this class saves the current config.""" class YamlStringAction(SimpleConfigAction): def __init__(self, yaml_str): super(YamlStringAction, self).__init__() self._yaml_str = yaml_str def _apply(self): load_from_yaml(self._yaml_str) class SetGeneralSampler(SimpleConfigAction): def __init__(self, sampler_name, settings=None): super(SetGeneralSampler, self).__init__() self._sampler_name = sampler_name self._settings = settings or {} def _apply(self): SampleSettingsLoader().load({'general': {'name': self._sampler_name, 'settings': self._settings}}) class SetGeneralOptimizer(SimpleConfigAction): def __init__(self, optimizer_name, settings=None): super(SetGeneralOptimizer, self).__init__() self._optimizer_name = optimizer_name self._settings = settings or {} @classmethod def from_object(self, optimizer): return SetGeneralOptimizer(optimizer.__class__.__name__, optimizer.optimizer_settings) def _apply(self): OptimizationSettingsLoader().load({'general': {'name': self._optimizer_name, 'settings': self._settings}}) """Load the default configuration, and if possible, the users configuration.""" load_builtin() try: load_user_home() except IOError: pass PKjUpIYAmdt/cl_routines/__init__.py__author__ = 'Robbert Harms' __date__ = "2015-04-16" __license__ = "LGPL v3" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl"PK;}IZHH'mdt/cl_routines/mapping/dti_measures.pyimport numpy as np import pyopencl as cl from mot.utils import get_float_type_def from mot.cl_routines.base import CLRoutine from mot.load_balance_strategies import Worker __author__ = 'Robbert Harms' __date__ = "2015-04-16" __license__ = "LGPL v3" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class DTIMeasures(CLRoutine): def concat_and_calculate(self, eigenval1, eigenval2, eigenval3, double_precision=True): """Calculate DTI statistics from the given eigenvalues. This concatenates the eigenvalue matrices and runs self.calculate(eigenvalues) on them. Args: eigenval1 (ndarray): The first set of eigenvalues, can be 2d, per voxel one eigenvalue, or 3d, per voxel multiple eigenvalues. eigenval2 (ndarray): The first set of eigenvalues, can be 2d, per voxel one eigenvalue, or 3d, per voxel multiple eigenvalues. eigenval3 (ndarray): The first set of eigenvalues, can be 2d, per voxel one eigenvalue, or 3d, per voxel multiple eigenvalues. double_precision (boolean): if we want to use float (set it to False) or double (set it to True) Returns: Per voxel, and optionally per instance per voxel, the FA and MD: (fa, md) """ s = eigenval1.shape if len(s) < 3: return self.calculate(np.concatenate((np.reshape(eigenval1, (s[0], 1)), np.reshape(eigenval2, (s[0], 1)), np.reshape(eigenval3, (s[0], 1))), axis=1), double_precision) else: return self.calculate(np.concatenate((np.reshape(eigenval1, (s[0], s[1], 1)), np.reshape(eigenval2, (s[0], s[1], 1)), np.reshape(eigenval3, (s[0], s[1], 1))), axis=2), double_precision) def calculate(self, eigenvalues, double_precision=True): """Calculate DTI statistics from the given eigenvalues. Args: eigenvalues (ndarray): The set of eigen values, can be 2d, per voxel one eigenvalue, or 3d, per voxel multiple eigenvalues. double_precision (boolean): if we want to use float (set it to False) or double (set it to True) Returns: Per voxel, and optionally per instance per voxel, the FA and MD: (fa, md) """ np_dtype = np.float32 if double_precision: np_dtype = np.float64 eigenvalues = np.require(eigenvalues, np_dtype, requirements=['C', 'A', 'O']) s = eigenvalues.shape if len(s) < 3: fa_host = np.zeros((s[0], 1), dtype=np_dtype) md_host = np.zeros((s[0], 1), dtype=np_dtype) items = s[0] else: fa_host = np.zeros((s[0] * s[1], 1), dtype=np_dtype) md_host = np.zeros((s[0] * s[1], 1), dtype=np_dtype) items = s[0] * s[1] eigenvalues = np.reshape(eigenvalues, (s[0] * s[1], -1)) workers = self._create_workers(lambda cl_environment: _DTIMeasuresWorker( cl_environment, self.get_compile_flags_list(), eigenvalues, fa_host, md_host, double_precision)) self.load_balancer.process(workers, items) if len(s) > 2: fa_host = np.reshape(fa_host, (s[0], s[1], 1)) md_host = np.reshape(md_host, (s[0], s[1], 1)) return fa_host, md_host class _DTIMeasuresWorker(Worker): def __init__(self, cl_environment, compile_flags, eigenvalues, fa_host, md_host, double_precision): super(_DTIMeasuresWorker, self).__init__(cl_environment) self._eigenvalues = eigenvalues self._fa_host = fa_host self._md_host = md_host self._double_precision = double_precision self._kernel = self._build_kernel(compile_flags) def calculate(self, range_start, range_end): nmr_problems = range_end - range_start eigenvalues_buf = cl.Buffer(self._cl_run_context.context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=self._eigenvalues[range_start:range_end]) fa_buf = cl.Buffer(self._cl_run_context.context, cl.mem_flags.WRITE_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=self._fa_host[range_start:range_end]) md_buf = cl.Buffer(self._cl_run_context.context, cl.mem_flags.WRITE_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=self._md_host[range_start:range_end]) buffers = [eigenvalues_buf, fa_buf, md_buf] self._kernel.calculate_measures(self._cl_run_context.queue, (int(nmr_problems), ), None, *buffers) cl.enqueue_copy(self._cl_run_context.queue, self._fa_host[range_start:range_end], fa_buf, is_blocking=True) event = cl.enqueue_copy(self._cl_run_context.queue, self._md_host[range_start:range_end], md_buf, is_blocking=False) return [event] def _get_kernel_source(self): kernel_source = '' kernel_source += get_float_type_def(self._double_precision) kernel_source += ''' __kernel void calculate_measures( global mot_float_type* eigenvalues, global mot_float_type* fas, global mot_float_type* mds ){ int gid = get_global_id(0); int voxel = gid * 3; mot_float_type v1 = eigenvalues[voxel]; mot_float_type v2 = eigenvalues[voxel + 1]; mot_float_type v3 = eigenvalues[voxel + 2]; fas[gid] = sqrt(0.5 * (((v1 - v2) * (v1 - v2)) + ((v1 - v3) * (v1 - v3)) + ((v2 - v3) * (v2 - v3))) / (v1 * v1 + v2 * v2 + v3 * v3)); mds[gid] = (v1 + v2 + v3) / 3.0; } ''' return kernel_source PK:}I601mdt/cl_routines/mapping/calculate_eigenvectors.pyimport pyopencl as cl import numpy as np from mot.utils import get_float_type_def from mot.cl_routines.base import CLRoutine from mot.load_balance_strategies import Worker __author__ = 'Robbert Harms' __date__ = "2014-05-18" __license__ = "LGPL v3" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class CalculateEigenvectors(CLRoutine): def convert_theta_phi_psi(self, theta_roi, phi_roi, psi_roi, double_precision=False): """Calculate the eigenvectors from the given theta, phi and psi angles. This will return the eigenvectors unsorted (since we know nothing about the eigenvalues). Args: theta_roi (ndarray): The list of theta's per voxel in the ROI phi_roi (ndarray): The list of phi's per voxel in the ROI psi_roi (ndarray): The list of psi's per voxel in the ROI double_precision (boolean): if we want to use float (set it to False) or double (set it to True) Returns: The three eigenvectors per voxel in the ROI. The return matrix is of shape (n, 3, 3) where n is the number of voxels and the second dimension holds the number of vectors and the last dimension the direction per vector. In other words, this gives for one voxel the matrix:: [evec_1_x, evec_1_y, evec_1_z, evec_2_x, evec_2_y, evec_2_z evec_3_x, evec_3_y, evec_3_z] The resulting eigenvectors are the same as those from the Tensor compartment model. """ np_dtype = np.float32 if double_precision: np_dtype = np.float64 theta_roi = np.require(theta_roi, np_dtype, requirements=['C', 'A', 'O']) phi_roi = np.require(phi_roi, np_dtype, requirements=['C', 'A', 'O']) psi_roi = np.require(psi_roi, np_dtype, requirements=['C', 'A', 'O']) rows = theta_roi.shape[0] evecs = np.zeros((rows, 3, 3), dtype=np_dtype, order='C') workers = self._create_workers(lambda cl_environment: _CEWorker(cl_environment, self.get_compile_flags_list(), theta_roi, phi_roi, psi_roi, evecs, double_precision)) self.load_balancer.process(workers, rows) return evecs class _CEWorker(Worker): def __init__(self, cl_environment, compile_flags, theta_roi, phi_roi, psi_roi, evecs, double_precision): super(_CEWorker, self).__init__(cl_environment) self._theta_roi = theta_roi self._phi_roi = phi_roi self._psi_roi = psi_roi self._evecs = evecs self._double_precision = double_precision self._all_buffers, self._evecs_buf = self._create_buffers() self._kernel = self._build_kernel(compile_flags) def __del__(self): list(buffer.release() for buffer in self._all_buffers) def calculate(self, range_start, range_end): nmr_problems = range_end - range_start event = self._kernel.generate_tensor(self._cl_run_context.queue, (int(nmr_problems), ), None, *self._all_buffers, global_offset=(int(range_start),)) return [self._enqueue_readout(self._evecs_buf, self._evecs, range_start, range_end, [event])] def _create_buffers(self): thetas_buf = cl.Buffer(self._cl_run_context.context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=self._theta_roi) phis_buf = cl.Buffer(self._cl_run_context.context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=self._phi_roi) psis_buf = cl.Buffer(self._cl_run_context.context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=self._psi_roi) evecs_buf = cl.Buffer(self._cl_run_context.context, cl.mem_flags.WRITE_ONLY | cl.mem_flags.USE_HOST_PTR, hostbuf=self._evecs) all_buffers = [thetas_buf, phis_buf, psis_buf, evecs_buf] return all_buffers, evecs_buf def _get_kernel_source(self): kernel_source = '' kernel_source += get_float_type_def(self._double_precision) kernel_source += ''' __kernel void generate_tensor( global mot_float_type* thetas, global mot_float_type* phis, global mot_float_type* psis, global mot_float_type* evecs ){ int gid = get_global_id(0); mot_float_type theta = thetas[gid]; mot_float_type phi = phis[gid]; mot_float_type psi = psis[gid]; mot_float_type cos_theta; mot_float_type sin_theta = sincos(theta, &cos_theta); mot_float_type cos_phi; mot_float_type sin_phi = sincos(phi, &cos_phi); mot_float_type cos_psi; mot_float_type sin_psi = sincos(psi, &cos_psi); mot_float_type4 n1 = (mot_float_type4)(cos_phi * sin_theta, sin_phi * sin_theta, cos_theta, 0.0); // rotate around n1 mot_float_type tmp = sin(theta+(M_PI_2)); // using tmp as the rotation factor (90 degrees) mot_float_type4 n2 = (mot_float_type4)(tmp * cos_phi, tmp * sin_phi, cos(theta+(M_PI_2)), 0.0); tmp = select(1, -1, n1.z < 0 || ((n1.z == 0.0) && n1.x < 0.0)); // using tmp as the multiplier n2 = n2 * cos_psi + (cross(n2, tmp * n1) * sin_psi) + (tmp * n1 * dot(tmp * n1, n2) * (1-cos_psi)); mot_float_type4 n3 = cross(n1, n2); evecs[gid*9] = n1.x; evecs[gid*9 + 1] = n1.y; evecs[gid*9 + 2] = n1.z; evecs[gid*9 + 3] = n2.x; evecs[gid*9 + 4] = n2.y; evecs[gid*9 + 5] = n2.z; evecs[gid*9 + 6] = n3.x; evecs[gid*9 + 7] = n3.y; evecs[gid*9 + 8] = n3.z; } ''' return kernel_source PKjUpIYA#mdt/cl_routines/mapping/__init__.py__author__ = 'Robbert Harms' __date__ = "2015-04-16" __license__ = "LGPL v3" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl"PKUpIPk k mdt/models/base.py__author__ = 'Robbert Harms' __date__ = "2015-10-27" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class DMRIOptimizable(object): def __init__(self, *args, **kwargs): """This is an interface for some base methods we expect in an MRI model. Since we have both composite dMRI models and cascade models we must have an overarching interface to make sure that both type of models implement the same additional methods. The methods in this interface have little to do with modelling, but unify some extra required methods in the cascades and composite models. Attributes: problems_to_analyze (list): the list with problems we want to analyze. Suppose we have a few thousands problems defined in this model, but we want to run the optimization only on a few problems. By setting this attribute to a list of problems indices only those problems will be analyzed. double_precision (boolean): if we do the computations in double or float precision """ super(DMRIOptimizable, self).__init__() self.problems_to_analyze = None self.double_precision = False def is_protocol_sufficient(self, protocol=None): """Check if the protocol holds enough information for this model to work. Args: protocol (Protocol): The protocol object to check for sufficient information. If set the None, the current protocol in the problem data is used. Returns: boolean: True if there is enough information in the protocol, false otherwise """ def get_protocol_problems(self, protocol=None): """Get all the problems with the protocol. Args: protocol (Protocol): The protocol object to check for problems. If set the None, the current protocol in the problem data is used. Returns: list of ModelProtocolProblem: A list of :class:`~mdt.model_protocol_problem.ModelProtocolProblem` instances or subclasses of that baseclass. These objects indicate the problems with the protocol and this model. """ def get_required_protocol_names(self): """Get a list with the constant data names that are needed for this model to work. For example, an implementing diffusion MRI model might require the presence of the protocol parameter ``g`` and ``b``. This function should then return ``('g', 'b')``. Returns: :class:`list`: A list of columns names that are to be taken from the protocol data. """ PKUpIg3cmdt/models/parameters.pyimport six from mdt.components_loader import ComponentConfig, ComponentBuilder, method_binding_meta from mot.cl_data_type import CLDataType from mot.model_building.cl_functions.parameters import StaticMapParameter, ProtocolParameter, ModelDataParameter, \ FreeParameter from mot.model_building.parameter_functions.priors import UniformWithinBoundsPrior from mot.model_building.parameter_functions.proposals import GaussianProposal from mot.model_building.parameter_functions.sample_statistics import GaussianPSS from mot.model_building.parameter_functions.transformations import IdentityTransform __author__ = 'Robbert Harms' __date__ = "2015-12-12" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class ParameterConfig(ComponentConfig): """The cascade config to inherit from. These configs are loaded on the fly by the ParametersBuilder Config options: name (str): the name of the parameter, defaults to the class name description (str): the description of this parameter data_type (str or DataType): either a string we use as datatype or the actual datatype itself type (str): the type of parameter (free, protocol or model_data) """ name = '' description = '' data_type = 'mot_float_type' type = None class ProtocolParameterConfig(ParameterConfig): """The default config options for protocol parameters. This sets the attribute type to protocol. """ type = 'protocol' data_type = 'mot_float_type' class FreeParameterConfig(ParameterConfig): """The default config options for free parameters. This sets the attribute type to free. Attributes: init_value (float): the initial value fixed (boolean or ndarray of float): if this parameter is fixed or not. If not fixed this should hold a reference to a value or a matrix lower_bound (float): the lower bounds upper_bound (float): the upper bounds parameter_transform: the parameter transformation sampling_proposal: the proposal function sampling_prior: the prior function sampling_statistics: the sampling statistic, used after the sampling """ type = 'free' data_type = 'mot_float_type' fixed = False init_value = 0.03 lower_bound = 0.0 upper_bound = 4.0 parameter_transform = IdentityTransform() sampling_proposal = GaussianProposal(1.0) sampling_prior = UniformWithinBoundsPrior() sampling_statistics = GaussianPSS() class ModelDataParameterConfig(ParameterConfig): """The default config options for model data parameters. This sets the attribute type to model_data. """ type = 'model_data' value = None class StaticMapParameterConfig(ParameterConfig): """The default config options for static data parameters. This sets the attribute type to static_map. """ type = 'static_map' value = None class ParameterBuilder(ComponentBuilder): def create_class(self, template): """Creates classes with as base class DMRICompositeModel Args: template (ParameterConfig): the configuration for the parameter. """ data_type = template.data_type if isinstance(data_type, six.string_types): data_type = CLDataType.from_string(data_type) if template.type.lower() == 'protocol': class AutoProtocolParameter(method_binding_meta(template, ProtocolParameter)): def __init__(self): super(AutoProtocolParameter, self).__init__(data_type, template.name) return AutoProtocolParameter elif template.type.lower() == 'free': class AutoFreeParameter(method_binding_meta(template, FreeParameter)): def __init__(self): super(AutoFreeParameter, self).__init__( data_type, template.name, template.fixed, template.init_value, template.lower_bound, template.upper_bound, parameter_transform=template.parameter_transform, sampling_proposal=template.sampling_proposal, sampling_prior=template.sampling_prior, sampling_statistics=template.sampling_statistics ) return AutoFreeParameter elif template.type.lower() == 'model_data': class AutoModelDataParameter(method_binding_meta(template, ModelDataParameter)): def __init__(self): super(AutoModelDataParameter, self).__init__(data_type, template.name, template.value) return AutoModelDataParameter elif template.type.lower() == 'static_map': class AutoStaticMapParameter(method_binding_meta(template, StaticMapParameter)): def __init__(self): super(AutoStaticMapParameter, self).__init__(data_type, template.name, template.value) return AutoStaticMapParameter PKI{/SSmdt/models/composite.pyimport logging from copy import deepcopy import numpy as np import six from mdt.components_loader import ComponentConfig, ComponentBuilder, method_binding_meta from mdt.model_protocol_problem import MissingColumns, InsufficientShells from mdt.models.base import DMRIOptimizable from mdt.models.parsers.CompositeModelExpressionParser import parse from mdt.protocols import VirtualColumnB from mdt.utils import create_roi, calculate_information_criterions from mot.cl_data_type import CLDataType from mot.model_building.cl_functions.model_functions import Weight from mot.cl_routines.mapping.loglikelihood_calculator import LogLikelihoodCalculator from mot.model_building.data_adapter import SimpleDataAdapter from mot.model_building.evaluation_models import OffsetGaussianEvaluationModel from mot.model_building.model_builders import SampleModelBuilder from mot.model_building.parameter_functions.dependencies import WeightSumToOneRule, SimpleAssignment from mot.model_building.trees import CompartmentModelTree __author__ = 'Robbert Harms' __date__ = "2014-10-26" __license__ = "LGPL v3" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class DMRICompositeModel(SampleModelBuilder, DMRIOptimizable): def __init__(self, model_name, model_tree, evaluation_model, signal_noise_model=None, problem_data=None, add_default_weights_dependency=True): """Create a composite dMRI sample model. This also implements the perturbation interface to allow perturbation of the data during meta-optimization. It furthermore implements some protocol check functions. These are used by the fit_model functions in MDT to check if the protocol is correct for the model we try to fit. Args: add_default_weights_dependency (boolean): if we want to add the default weights dependency to this model or not, by default we use this. Attributes: required_nmr_shells (int): Define the minimum number of unique shells necessary for this model. The default is false, which means that we don't check for this. """ self._add_default_weights_dependency = add_default_weights_dependency super(DMRICompositeModel, self).__init__(model_name, model_tree, evaluation_model, signal_noise_model, problem_data=problem_data) self.required_nmr_shells = False self._logger = logging.getLogger(__name__) self._original_problem_data = None def set_problem_data(self, problem_data): """Overwrites the super implementation by adding a call to _prepare_problem_data() before the problem data is added to the model. """ self._check_data_consistency(problem_data) self._original_problem_data = problem_data return super(DMRICompositeModel, self).set_problem_data(self._prepare_problem_data(problem_data)) def _get_variable_data(self): var_data_dict = super(DMRICompositeModel, self)._get_variable_data() if self._problem_data.gradient_deviations is not None: if len(self._problem_data.gradient_deviations.shape) > 2: grad_dev = create_roi(self._problem_data.gradient_deviations, self._problem_data.mask) else: grad_dev = np.copy(self._problem_data.gradient_deviations) self._logger.info('Using the gradient deviations in the model optimization.') # adds the eye(3) matrix to every grad dev, so we don't have to do it in the kernel. # Flattening an eye(3) matrix gives the same result with F and C ordering, I nevertheless put it here # to emphasize that the gradient deviations matrix is in Fortran (column-major) order. grad_dev += np.eye(3).flatten(order='F') if self.problems_to_analyze is not None: grad_dev = grad_dev[self.problems_to_analyze, ...] adapter = SimpleDataAdapter(grad_dev, CLDataType.from_string('mot_float_type*'), self._get_mot_float_type()) var_data_dict.update({'gradient_deviations': adapter}) return var_data_dict def is_protocol_sufficient(self, protocol=None): """See ProtocolCheckInterface""" return not self.get_protocol_problems(protocol=protocol) def get_protocol_problems(self, protocol=None): """See ProtocolCheckInterface""" if protocol is None: protocol = self._problem_data.protocol problems = [] missing_columns = [name for name in self.get_required_protocol_names() if not protocol.has_column(name)] if missing_columns: problems.append(MissingColumns(missing_columns)) try: shells = protocol.get_nmr_shells() if shells < self.required_nmr_shells: problems.append(InsufficientShells(self.required_nmr_shells, shells)) except KeyError: pass return problems def get_abstract_model_function(self): """Get the abstract diffusion model function computed by this model. Returns: str: the abstract model function of this class """ return self._model_tree def _set_default_dependencies(self): super(DMRICompositeModel, self)._set_default_dependencies() if self._add_default_weights_dependency: names = [w.name + '.w' for w in self._get_weight_models()] if len(names): self.add_parameter_dependency(names[0], WeightSumToOneRule(names[1:])) def _get_weight_models(self): return [n.data for n in self._model_tree.leaves if isinstance(n.data, Weight)] def _get_pre_model_expression_eval_code(self): if self._can_use_gradient_deviations(): s = ''' mot_float_type4 _new_gradient_vector_raw = _get_new_gradient_raw(g, data->var_data_gradient_deviations); mot_float_type _new_gradient_vector_length = length(_new_gradient_vector_raw); g = _new_gradient_vector_raw/_new_gradient_vector_length; ''' if 'b' in list(self._get_protocol_data().keys()): s += 'b *= _new_gradient_vector_length * _new_gradient_vector_length;' + "\n" if 'G' in list(self._get_protocol_data().keys()): s += 'G *= _new_gradient_vector_length;' + "\n" if 'q' in list(self._get_protocol_data().keys()): s += 'q *= _new_gradient_vector_length;' + "\n" return s def _get_pre_model_expression_eval_function(self): if self._can_use_gradient_deviations(): return ''' #ifndef GET_NEW_GRADIENT_RAW #define GET_NEW_GRADIENT_RAW mot_float_type4 _get_new_gradient_raw(mot_float_type4 g, global const mot_float_type* const gradient_deviations){ const mot_float_type4 il_0 = (mot_float_type4)(gradient_deviations[0], gradient_deviations[3], gradient_deviations[6], 0.0); const mot_float_type4 il_1 = (mot_float_type4)(gradient_deviations[1], gradient_deviations[4], gradient_deviations[7], 0.0); const mot_float_type4 il_2 = (mot_float_type4)(gradient_deviations[2], gradient_deviations[5], gradient_deviations[8], 0.0); return (mot_float_type4)(dot(il_0, g), dot(il_1, g), dot(il_2, g), 0.0); } #endif //GET_NEW_GRADIENT_RAW ''' def _can_use_gradient_deviations(self): return self._problem_data.gradient_deviations is not None \ and 'g' in list(self._get_protocol_data().keys()) def _add_finalizing_result_maps(self, results_dict): super(DMRICompositeModel, self)._add_finalizing_result_maps(results_dict) log_likelihood_calc = LogLikelihoodCalculator() log_likelihoods = log_likelihood_calc.calculate(self, results_dict) k = self.get_nmr_estimable_parameters() n = self._problem_data.get_nmr_inst_per_problem() results_dict.update({'LogLikelihood': log_likelihoods}) results_dict.update(calculate_information_criterions(log_likelihoods, k, n)) def _prepare_problem_data(self, problem_data): """Update the problem data to make it suitable for this model. Some of the models in diffusion MRI can only handle a subset of all volumes. For example, the S0 model can only work with the unweigthed signals, or the Tensor model that can only handle a b-value up to 1.5e9 s/m^2. Overwrite this function to limit the problem data to a suitable range. Args: problem_data (DMRIProblemData): the problem data set by the user Returns: DMRIProblemData: either the same problem data or a changed copy. """ protocol = problem_data.protocol indices = self._get_suitable_volume_indices(problem_data) if len(indices) != protocol.length: self._logger.info('For this model, {}, we will use a subset of the protocol and DWI.'.format(self._name)) self._logger.info('Using {} out of {} volumes, indices: {}'.format( len(indices), protocol.length, str(indices).replace('\n', '').replace('[ ', '['))) new_protocol = protocol.get_new_protocol_with_indices(indices) new_dwi_volume = problem_data.dwi_volume[..., indices] return problem_data.copy_with_updates(new_protocol, new_dwi_volume) else: self._logger.info('No model protocol options to apply, using original protocol.') return problem_data def _check_data_consistency(self, problem_data): """Check the problem data for any strange anomalies. We do this here so that implementing models can add additional consistency checks, or skip the checks. Also, by doing this here instead of in the Protocol class we ensure that the warnings end up in the log file. The final argument for putting this here is that I do not want any log output in the protocol tab. Args: problem_data (DMRIProblemData): the problem data to analyze. """ protocol = problem_data.protocol def warn(warning): self._logger.warning('{}, proceeding with seemingly inconsistent values.'.format(warning)) if 'TE' in protocol and 'TR' in protocol: if any(np.greater(protocol['TE'], protocol['TR'])): warn('Volumes detected where TE > TR') if 'TE' in protocol: if any(np.greater_equal(protocol['TE'], 1)): warn('Volumes detected where TE >= 1 second') if 'TR' in protocol: if any(np.greater_equal(protocol['TR'], 50)): warn('Volumes detected where TR >= 50 seconds') if 'delta' in protocol and 'Delta' in protocol and any(map(protocol.is_column_real, ['delta', 'Delta'])): if any(np.greater_equal(protocol['delta'], protocol['Delta'])): warn('Volumes detected where (small) delta >= (big) Delta') if 'Delta' in protocol and 'TE' in protocol: if any(np.greater_equal(protocol['Delta'], protocol['TE'])): warn('Volumes detected where (big) Delta >= TE') if all(map(protocol.is_column_real, ['G', 'delta', 'Delta', 'b'])): if not np.allclose(VirtualColumnB().get_values(protocol), protocol['b']): warn('Estimated b-values (from G, Delta, delta) differ from given b-values') def _get_suitable_volume_indices(self, problem_data): """Usable in combination with _prepare_problem_data, return the suitable volume indices. Get a list of volume indices that the model can use. This function is meant to remove common boilerplate code from writing your own _prepare_problem_data object. Args: problem_data (DMRIProblemData): the problem data set by the user Returns: list: the list of indices we want to use for this model. """ return list(range(problem_data.protocol.length)) class DMRICompositeModelConfig(ComponentConfig): """The cascade config to inherit from. These configs are loaded on the fly by the DMRICompositeModelBuilder Attributes: name (str): the name of the model, defaults to the class name in_vivo_suitable (boolean): flag indicating if the model is suitable for in vivo data ex_vivo_suitable (boolean): flag indicating if the model is suitable for ex vivo data description (str): model description post_optimization_modifiers (list): a list of modification callbacks for use after optimization. Example: .. code-block:: python post_optimization_modifiers = [('SNIF', lambda d: 1 - d['Wcsf.w']), ...] dependencies (list): the dependencies between model parameters. Example: .. code-block:: python dependencies = [('Noddi_EC.kappa', SimpleAssignment('Noddi_IC.kappa')), ('NODDI_EC.theta', 'NODDI_IC.theta') ...] If a string is given this is interpreted as a SimpleAssignment dependency. In the example shown here both the kappa and theta parameters are dependend in the same way. model_expression (str): the model expression. For the syntax see: mdt.models.parsers.CompositeModelExpression.ebnf evaluation_model (EvaluationModel): the evaluation model to use during optimization signal_noise_model (SignalNoiseModel): optional signal noise decorator inits (dict): indicating the initialization values for the parameters. Example: .. code-block:: python inits = {'Stick.theta: pi} fixes (dict): indicating the constant value for the given parameters. Example: .. code-block:: python fixes = {'Ball.d': 3.0e-9} upper_bounds (dict): indicating the upper bounds for the given parameters. Example: .. code-block:: python upper_bounds = {'Stick.theta': pi} lower_bounds (dict): indicating the lower bounds for the given parameters. Example: .. code-block:: python lower_bounds = {'Stick.theta': 0} parameter_transforms (dict): the parameter transform to use for a specific parameter. Can also be a python callback function accepting as single parameter 'self', a reference to the build model. This overwrites the default parameter transform of the specified parameter to the given transformation. Example: .. code-block:: python parameter_transforms = { 'Tensor.dperp0': SinSqrClampTransform(), 'Tensor.dperp1': lambda self: SinSqrClampDependentTransform( [(self, self._get_parameter_by_name('Tensor.dperp0'))]) } add_default_weights_dependency (boolean): set to False to disable the automatic Weight-sum-to-one dependency. By default it is True and we add them. volume_selection (dict): the volume selection by this model. This can be used to limit the volumes used in the analysis to only the volumes included in the specification. Set to None, or an empty dict to disable. The options available are: * ``unweighted_threshold`` (float): the threshold differentiating between weighted and unweighted volumes * ``use_unweighted`` (bool): if we want to use unweighted volumes or not * ``use_weighted`` (bool): if we want to use the diffusion weigthed volumes or not * ``min_bval`` (float): the minimum b-value to include * ``max_bval`` (float): the maximum b-value to include If the method ``_get_suitable_volume_indices`` is overwritten, this does nothing. """ name = '' in_vivo_suitable = True ex_vivo_suitable = True description = '' post_optimization_modifiers = [] dependencies = [] model_expression = '' evaluation_model = OffsetGaussianEvaluationModel() signal_noise_model = None inits = {} fixes = {} upper_bounds = {} lower_bounds = {} parameter_transforms = {} add_default_weights_dependency = True volume_selection = None @classmethod def meta_info(cls): meta_info = deepcopy(ComponentConfig.meta_info()) meta_info.update({'name': cls.name, 'in_vivo_suitable': cls.in_vivo_suitable, 'ex_vivo_suitable': cls.ex_vivo_suitable, 'description': cls.description}) return meta_info class DMRICompositeModelBuilder(ComponentBuilder): def create_class(self, template): """Creates classes with as base class DMRICompositeModel Args: template (DMRICompositeModelConfig): the composite model config template to use for creating the class with the right init settings. """ class AutoCreatedDMRICompositeModel(method_binding_meta(template, DMRICompositeModel)): def __init__(self, *args): super(AutoCreatedDMRICompositeModel, self).__init__( deepcopy(template.name), CompartmentModelTree(parse(template.model_expression)), deepcopy(template.evaluation_model), signal_noise_model=deepcopy(template.signal_noise_model), add_default_weights_dependency=template.add_default_weights_dependency) self.add_parameter_dependencies(_resolve_dependencies(deepcopy(template.dependencies))) self.add_post_optimization_modifiers(deepcopy(template.post_optimization_modifiers)) for full_param_name, value in template.inits.items(): self.init(full_param_name, deepcopy(value)) for full_param_name, value in template.fixes.items(): self.fix(full_param_name, deepcopy(value)) for full_param_name, value in template.lower_bounds.items(): self.set_lower_bound(full_param_name, deepcopy(value)) for full_param_name, value in template.upper_bounds.items(): self.set_upper_bound(full_param_name, deepcopy(value)) for full_param_name, value in template.parameter_transforms.items(): if hasattr(value, '__call__'): self.set_parameter_transform(full_param_name, value(self)) else: self.set_parameter_transform(full_param_name, deepcopy(value)) def _get_suitable_volume_indices(self, problem_data): volume_selection = template.volume_selection if not volume_selection: return super(AutoCreatedDMRICompositeModel, self)._get_suitable_volume_indices(problem_data) use_unweighted = volume_selection.get('use_unweighted', True) use_weighted = volume_selection.get('use_weighted', True) unweighted_threshold = volume_selection.get('unweighted_threshold', 25e6) protocol = problem_data.protocol if protocol.has_column('g') and protocol.has_column('b'): if use_weighted: if 'min_bval' in volume_selection and 'max_bval' in volume_selection: protocol_indices = protocol.get_indices_bval_in_range(start=volume_selection['min_bval'], end=volume_selection['max_bval']) else: protocol_indices = protocol.get_weighted_indices(unweighted_threshold) else: protocol_indices = [] if use_unweighted: protocol_indices = list(protocol_indices) + \ list(protocol.get_unweighted_indices(unweighted_threshold)) else: return list(range(protocol.length)) return np.unique(protocol_indices) return AutoCreatedDMRICompositeModel def _resolve_dependencies(dependencies): """Resolve string dependencies to SimpleAssignment objects in the list of dependencies. Args: dependencies (list): the dependencies to resolve strings in Returns: list: the list of dependencies with dependency a proper object """ return_list = [] for param, dependency in dependencies: if isinstance(dependency, six.string_types): return_list.append((param, SimpleAssignment(dependency))) else: return_list.append((param, dependency)) return return_list PKeI]L8)/)/mdt/models/cascade.pyfrom copy import deepcopy import six import mdt from mdt.components_loader import ComponentConfig, ComponentBuilder, bind_function, method_binding_meta, get_meta_info from mdt.model_protocol_problem import NamedProtocolProblem from mdt.models.base import DMRIOptimizable __author__ = 'Robbert Harms' __date__ = "2015-04-24" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class DMRICascadeModelInterface(DMRIOptimizable): def __init__(self, *args, **kwargs): """The interface to cascade models. A cascade model is a model consisting of multi-compartment models or other cascade models. The idea is that it contains a number of models that are to be optimized one after another with the output results of the previous fit used to initialize the next model. """ super(DMRICascadeModelInterface, self).__init__(*args, **kwargs) self.double_precision = False @property def name(self): """Get the name of this cascade model. Returns: str: The name of this cascade model """ return '' def has_next(self): """Check if this cascade model has a next model. Returns: boolean: True if there is a next model, false otherwise. """ def get_next(self, output_previous_models): """Get the next model in the cascade. This is the only function called by the cascade model optimizer This class is supposed to remember which model is next. Args: output_previous_models (dict): The output of all the previous models. The first level of the dict is for the models and is indexed by the model name. The second layer contains all the maps. Returns: SampleModelInterface: The sample model used for the next fit_model. """ def reset(self): """Reset the iteration over the cascade. The implementing class should now reset the iteration such that get_next gets the first model again. """ def get_model(self, name): """Get one of the models in the cascade by name. Args: name (str): the name of the model we want to return Returns: the model we want to have or None if no model found """ def get_model_names(self): """Get the names of the models in this cascade in order of execution. Returns: list of str: the names of the models in this list """ def set_problem_data(self, problem_data): """Set the problem data in every model in the cascade.""" class SimpleCascadeModel(DMRICascadeModelInterface): def __init__(self, name, model_list): """Create a new cascade model from a given list of models. This class adds some standard bookkeeping to make implementing cascade models easier. Args: name (str): the name of this cascade model model_list (list of models): the list of models this cascade consists of """ super(DMRICascadeModelInterface, self).__init__() self._name = name self._model_list = model_list self._iteration_position = 0 self.problems_to_analyze = None @property def name(self): return self._name def has_next(self): return self._iteration_position != len(self._model_list) def get_next(self, output_previous_models): next_model = self._model_list[self._iteration_position] output_previous = {} if self._iteration_position > 0: output_previous = output_previous_models[self._model_list[self._iteration_position - 1].name] self._prepare_model(next_model, output_previous, output_previous_models) self._iteration_position += 1 return self._set_model_options(next_model) def reset(self): self._iteration_position = 0 def is_protocol_sufficient(self, protocol=None): for model in self._model_list: if not model.is_protocol_sufficient(protocol): return False return True def get_protocol_problems(self, protocol=None): problems = [] for model in self._model_list: problems.extend(map(lambda p: NamedProtocolProblem(p, model.name), model.get_protocol_problems(protocol))) return problems def get_required_protocol_names(self): protocol_names = [] for model in self._model_list: protocol_names.extend(model.get_required_protocol_names()) return list(set(protocol_names)) def get_model(self, name): for model in self._model_list: if model.name == name: return self._set_model_options(model) return None def get_model_names(self): return [model.name for model in self._model_list] def set_problem_data(self, problem_data): for model in self._model_list: model.set_problem_data(problem_data) def _set_model_options(self, model): """The final hook before we return a model from this class. This can set all kind of additional extra's to the model before we return it using any of the functions in this class Args: model: the model to which we want to set the final functions Returns: model: the same model with all extra's set. """ if self.problems_to_analyze: model.problems_to_analyze = self.problems_to_analyze model.double_precision = self.double_precision return model def _prepare_model(self, model, output_previous, output_all_previous): """Prepare the next model with the output of the previous model. By default this model initializes all parameter maps to the output of the previous model. Args: model: The model to prepare output_previous (dict): the output of the (direct) previous model. output_all_previous (dict): The output of all the previous models. Indexed first by model name, second by full parameter name. Returns: None, preparing should happen in-place. """ if not isinstance(model, DMRICascadeModelInterface) and output_previous: for key, value in output_previous.items(): if model.has_parameter(key): model.init(key, value) class CascadeConfig(ComponentConfig): """The cascade config to inherit from. These configs are loaded on the fly by the CascadeBuilder. Attributes: name (str): the name of this cascade description (str): the description models (tuple): the list of models we wish to optimize (in that order) Example: .. code-block:: python models = ('BallStick (Cascade)', 'Charmed_r1') inits (dict): per model the initializations from the previous model. Example: .. code-block:: python inits = {'Charmed_r1': [ ('Tensor.theta', 'Stick.theta'), ('Tensor.phi', 'Stick.phi'), ('w_res0.w', lambda output_previous, output_all_previous: output_previous['w_stick.w']) ] } In this example the Charmed_r1 model in the cascade initializes its Tensor compartment with a previous Ball&Stick model and initializes the restricted compartment volume fraction with the Stick fraction. You can either provide a string matching the parameter name of the exact previous model, or provide callback function that accepts both a dict containing the previous model estimates and a dict containing all previous model estimates by model name and returns a single initialization map or value. fixes (dict): per model the fixations from the previous model. Example: .. code-block:: python fixes = {'Charmed_r1': [('CharmedRestricted0.theta', 'Stick.theta'), ('CharmedRestricted0.phi', 'Stick.phi')]} The syntax is similar to that of the inits attribute. lower_bounds (dict): per model the lower bounds to set using the results from the previous model Example: .. code-block:: python lower_bounds = {'Charmed_r1': [ ('S0.s0', lambda output_previous, output_all_previous: 2 * np.min(output_previous['S0.s0'])) ]} The syntax is similar to that of the inits attribute. upper_bounds (dict): per model the upper bounds to set using the results from the previous model Example: .. code-block:: python upper_bounds = {'Charmed_r1': [ ('S0.s0', lambda output_previous, output_all_previous: 2 * np.max(output_previous['S0.s0'])) ]} The syntax is similar to that of the inits attribute. """ name = '' description = '' models = () inits = {} fixes = {} lower_bounds = {} upper_bounds = {} @bind_function def _prepare_model_cb(self, model, output_previous, output_all_previous): """Finalize the preparation of the model in this callback. This is called at the end of the regular _prepare_model function defined in the SimpleCascadeModel and as implemented by the AutoCreatedCascadeModel. Use this if you want to control more of the initialization of the next model than only the inits and fixes. """ @classmethod def meta_info(cls): meta_info = deepcopy(ComponentConfig.meta_info()) meta_info.update({'name': cls.name, 'description': cls.description, 'in_vivo_suitable': get_meta_info(cls.models[len(cls.models) - 1])['in_vivo_suitable'], 'ex_vivo_suitable': get_meta_info(cls.models[len(cls.models) - 1])['ex_vivo_suitable'] }) return meta_info class CascadeBuilder(ComponentBuilder): def create_class(self, template): """Creates classes with as base class SimpleCascadeModel Args: template (CascadeConfig): the cascade config template to use for creating the class with the right init settings. """ class AutoCreatedCascadeModel(method_binding_meta(template, SimpleCascadeModel)): def __init__(self, *args): new_args = [deepcopy(template.name), list(map(mdt.get_model, template.models))] for ind, arg in args: new_args[ind] = arg super(AutoCreatedCascadeModel, self).__init__(*new_args) def _prepare_model(self, model, output_previous, output_all_previous): super(AutoCreatedCascadeModel, self)._prepare_model(model, output_previous, output_all_previous) def parse_value(v): if isinstance(v, six.string_types): return output_previous[v] elif hasattr(v, '__call__'): return v(output_previous, output_all_previous) return v for item in template.inits.get(model.name, {}): model.init(item[0], parse_value(item[1])) for item in template.fixes.get(model.name, {}): model.fix(item[0], parse_value(item[1])) for item in template.lower_bounds.get(model.name, {}): model.set_lower_bound(item[0], parse_value(item[1])) for item in template.upper_bounds.get(model.name, {}): model.set_upper_bound(item[0], parse_value(item[1])) self._prepare_model_cb(model, output_previous, output_all_previous) return AutoCreatedCascadeModel PK9[qIa:/:/mdt/models/compartments.pyimport inspect import os from copy import deepcopy import six from mdt.components_loader import ComponentConfig, ComponentBuilder, ParametersLoader, method_binding_meta, \ ComponentConfigMeta from mdt.utils import spherical_to_cartesian from mot.model_building.cl_functions.base import ModelFunction from mot.model_building.cl_functions.parameters import CurrentObservationParam __author__ = 'Robbert Harms' __date__ = "2015-12-13" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class DMRICompartmentModelFunction(ModelFunction): def __init__(self, name, cl_function_name, parameter_list, cl_header, cl_code, dependency_list): """Create a new dMRI compartment model function. Args: name (str): the name of this compartment model cl_function_name (str): the name of this function in the CL kernel parameter_list (list of CLFunctionParameter): the list of the function parameters cl_header (str): the code for the CL header cl_code (str): the code for the function in CL dependency_list (list): the list of functions we depend on inside the kernel """ super(DMRICompartmentModelFunction, self).__init__(name, cl_function_name, parameter_list, dependency_list=dependency_list) self._cl_header = cl_header self._cl_code = cl_code def get_cl_header(self): inclusion_guard_name = 'DMRICM_' + self.cl_function_name + '_H' return ''' {dependencies} #ifndef {inclusion_guard_name} #define {inclusion_guard_name} {header} #endif // {inclusion_guard_name} '''.format(dependencies=self._get_cl_dependency_headers(), inclusion_guard_name=inclusion_guard_name, header=self._cl_header) def get_cl_code(self): inclusion_guard_name = 'DMRICM_' + self.cl_function_name + '_CL' return ''' {dependencies} #ifndef {inclusion_guard_name} #define {inclusion_guard_name} {header} #endif // {inclusion_guard_name} '''.format(dependencies=self._get_cl_dependency_code(), inclusion_guard_name=inclusion_guard_name, header=self._cl_code) def _get_vector_result_maps(self, theta, phi, vector_name='vec0'): """Convert spherical coordinates to cartesian vector in 3d Args: theta (ndarray): the double array with the theta values phi (ndarray): the double array with the phi values vector_name (str): the name for this vector, the common naming scheme is: .[_{0,1,2}] Returns: dict: containing the cartesian vector with the main the fibre direction. It returns an element .vec0 and elements vec0_ """ cartesian = spherical_to_cartesian(theta, phi) extra_dict = {'{}.{}'.format(self.name, vector_name): cartesian} for ind in range(3): extra_dict.update({'{}.{}_{}'.format(self.name, vector_name, ind): cartesian[:, ind]}) return extra_dict def _get_parameters_list(parameter_list): """Convert all the parameters in the given parameter list to actual parameter objects. Args: parameter_list (list): a list containing a mix of either parameter objects or strings. If it is a parameter we add a copy of it to the return list. If it is a string we will autoload it. Returns: list: the list of actual parameter objects """ parameters_loader = ParametersLoader() parameters = [] for item in parameter_list: if isinstance(item, six.string_types): if item == '_observation': parameters.append(CurrentObservationParam()) else: parameters.append(parameters_loader.load(item)) else: parameters.append(deepcopy(item)) return parameters def _construct_cl_function_definition(return_type, cl_function_name, parameters): """Create the CL function definition for a compartment function. This will construct something like (for the Stick model): .. code-block:: c mot_float_type cmStick(const mot_float_type4 g, const mot_float_type b, const mot_float_type d, const mot_float_type theta, const mot_float_type phi) Args: return_type (str): the return type cl_function_name (str): the name of the function parameters (list of CLFunctionParameter): the list of function parameters we use for the arguments Returns: str: the function definition (only the signature). """ def parameter_str(parameter): s = parameter.data_type.cl_type if parameter.data_type.pre_data_type_type_qualifiers: for qualifier in parameter.data_type.pre_data_type_type_qualifiers: s = qualifier + ' ' + s if parameter.data_type.address_space_qualifier: s = parameter.data_type.address_space_qualifier + ' ' + s if parameter.data_type.post_data_type_type_qualifier: s += ' ' + parameter.data_type.post_data_type_type_qualifier s += ' ' + parameter.name return s parameters_str = ',\n'.join(parameter_str(parameter) for parameter in parameters) return '{return_type} {cl_function_name}({parameters})'.format(return_type=return_type, cl_function_name=cl_function_name, parameters=parameters_str) class CompartmentConfigMeta(ComponentConfigMeta): def __new__(mcs, name, bases, attributes): """Extends the default meta class with extra functionality for the compartments. This adds the cl_function_name if it is not defined, and creates the correct cl_code and cl_header. """ result = super(CompartmentConfigMeta, mcs).__new__(mcs, name, bases, attributes) if 'cl_function_name' not in attributes: result.cl_function_name = 'cm{}'.format(name) # to prevent the base from loading the initial meta class. if any(isinstance(base, CompartmentConfigMeta) for base in bases): result.cl_code = mcs._get_cl_code(result, bases, attributes) result.cl_header = mcs._get_cl_header(result, bases, attributes) return result @classmethod def _get_cl_code(mcs, result, bases, attributes): if 'cl_code' in attributes and attributes['cl_code'] is not None: s = _construct_cl_function_definition( 'mot_float_type', result.cl_function_name, _get_parameters_list(result.parameter_list)) s += '{\n' + attributes['cl_code'] + '\n}' return s module_path = os.path.abspath(inspect.getfile(result)) path = os.path.join(os.path.dirname(module_path), os.path.splitext(os.path.basename(module_path))[0]) + '.cl' if os.path.isfile(path): with open(path, 'r') as f: return f.read() for base in bases: if hasattr(base, 'cl_code') and base.cl_code is not None: return base.cl_code @classmethod def _get_cl_header(mcs, result, bases, attributes): if 'cl_header' in attributes and attributes['cl_header'] is not None: return attributes['cl_header'] module_path = os.path.abspath(inspect.getfile(result)) path = os.path.join(os.path.dirname(module_path), os.path.splitext(os.path.basename(module_path))[0]) + '.h' if os.path.isfile(path): with open(path, 'r') as f: return f.read() for base in bases: if hasattr(base, 'cl_header') and base.cl_code is not None: return base.cl_header return _construct_cl_function_definition('mot_float_type', result.cl_function_name, _get_parameters_list(result.parameter_list)) + ';' class CompartmentConfig(six.with_metaclass(CompartmentConfigMeta, ComponentConfig)): """The compartment config to inherit from. These configs are loaded on the fly by the CompartmentBuilder. All methods you define are automatically bound to the DMRICompartmentModelFunction. Also, to do extra initialization you can define a method init. This method is called after object construction to allow for additional initialization. Also, this method is not added to the final object. Attributes: name (str): the name of the model, defaults to the class name description (str): model description cl_function_name (str): the name of the function in the CL kernel parameter_list (list): the list of parameters to use. If a parameter is a string we will use it automatically, if not it is supposed to be a CLFunctionParameter instance that we append directly. cl_header (CLHeaderDefinition): the CL header definition to use. Defaults to CLHeaderFromTemplate. cl_code (CLCodeDefinition): the CL code definition to use. Defaults to CLCodeFromAdjacentFile. dependency_list (list): the list of functions this function depends on, can contain string which will be resolved as library functions. """ name = '' description = '' cl_function_name = None parameter_list = [] cl_header = None cl_code = None dependency_list = [] class CompartmentBuildingBase(DMRICompartmentModelFunction): """Use this class in super calls if you want to overwrite methods in the inherited compartment configs. In python2 super needs a type to be able to do its work. This is the type you can give it to allow it to do its work. """ class CompartmentBuilder(ComponentBuilder): def create_class(self, template): """Creates classes with as base class CompartmentBuildingBase Args: template (CascadeConfig): the compartment config template to use for creating the class with the right init settings. """ class AutoCreatedDMRICompartmentModel(method_binding_meta(template, CompartmentBuildingBase)): def __init__(self, *args): new_args = [template.name, template.cl_function_name, _get_parameters_list(template.parameter_list), template.cl_header, template.cl_code, _resolve_dependencies(template.dependency_list)] for ind, already_set_arg in enumerate(args): new_args[ind] = already_set_arg super(AutoCreatedDMRICompartmentModel, self).__init__(*new_args) if hasattr(template, 'init'): template.init(self) return AutoCreatedDMRICompartmentModel def _resolve_dependencies(dependency_list): """Resolve the dependency list such that the result contains all functions. Args: dependency_list (list): the list of dependencies as given by the user. Elements can either include actual instances of :class:`~mot.model_building.cl_functions.base.CLFunction` or strings with the name of the component to auto-load. Returns: list: a new list with the string elements resolved as :class:`~mot.model_building.cl_functions.base.LibraryFunction`. """ from mdt.components_loader import LibraryFunctionsLoader lib_loader = LibraryFunctionsLoader() result = [] for dependency in dependency_list: if isinstance(dependency, six.string_types): result.append(lib_loader.load(dependency)) else: result.append(dependency) return result PKjUpI~`9/mdt/models/__init__.py__author__ = 'Robbert Harms' __date__ = "2015-10-27" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" PK'VpI33 3 .mdt/models/parsers/CompositeModelExpression.py# CAVEAT UTILITOR # # This file was automatically generated by Grako. # # https://pypi.python.org/pypi/grako/ # # Any changes you make to it will be overwritten the next time # the file is generated. from __future__ import print_function, division, absolute_import, unicode_literals from grako.parsing import graken, Parser __version__ = (2015, 12, 12, 11, 11, 12, 5) __all__ = [ 'CompositeModelExpressionParser', 'CompositeModelExpressionSemantics', ] class CompositeModelExpressionParser(Parser): def __init__(self, whitespace=None, nameguard=None, comments_re=None, eol_comments_re=None, ignorecase=None, left_recursion=True, **kwargs): super(CompositeModelExpressionParser, self).__init__( whitespace=whitespace, nameguard=nameguard, comments_re=comments_re, eol_comments_re=eol_comments_re, ignorecase=ignorecase, left_recursion=left_recursion, **kwargs ) @graken() def _result_(self): self._expr_() @graken() def _expr_(self): with self._choice(): with self._option(): self._term_() with self._group(): with self._choice(): with self._option(): self._token('+') with self._option(): self._token('-') self._error('expecting one of: + -') self._expr_() with self._option(): self._term_() self._error('no available options') @graken() def _term_(self): with self._choice(): with self._option(): self._factor_() with self._group(): with self._choice(): with self._option(): self._token('*') with self._option(): self._token('/') self._error('expecting one of: * /') self._term_() with self._option(): self._factor_() self._error('no available options') @graken() def _factor_(self): with self._choice(): with self._option(): self._token('(') self._expr_() self._token(')') with self._option(): self._model_() self._error('no available options') @graken() def _model_(self): self._model_name_() with self._optional(): self._token('(') self._nickname_() self._token(')') @graken() def _model_name_(self): self._pattern(r'[a-zA-Z_]\w*') @graken() def _nickname_(self): self._pattern(r'[a-zA-Z_]\w*') class CompositeModelExpressionSemantics(object): def result(self, ast): return ast def expr(self, ast): return ast def term(self, ast): return ast def factor(self, ast): return ast def model(self, ast): return ast def model_name(self, ast): return ast def nickname(self, ast): return ast PKBVpIGn%4mdt/models/parsers/CompositeModelExpressionParser.pyimport six from mdt.components_loader import CompartmentModelsLoader from .CompositeModelExpression import CompositeModelExpressionSemantics, CompositeModelExpressionParser class Semantics(CompositeModelExpressionSemantics): def __init__(self): super(Semantics, self).__init__() self._compartments_loader = CompartmentModelsLoader() def expr(self, ast): if not isinstance(ast, list): return ast if isinstance(ast, list): return ast[0], ast[2], ast[1] return ast def term(self, ast): if not isinstance(ast, list): return ast if isinstance(ast, list): return ast[0], ast[2], ast[1] return ast def factor(self, ast): if isinstance(ast, list): return ast[1] return ast def model(self, ast): if isinstance(ast, six.string_types): return self._compartments_loader.load(ast) else: return self._compartments_loader.load(ast[0], ast[2]) def parse(model_expression): """Parse the given model expression into a suitable model tree. Args: model_expression (str): the model expression string. Example: .. code-block:: none S0 * ( (Weight(Wball) * Ball) + (Weight(Wstick) * Stick ) ) If the model name is followed by parenthesis the string in parenthesis will represent the model's nickname. Returns: :class:`list`: the compartment model tree for use in composite models. """ parser = CompositeModelExpressionParser(parseinfo=False) return parser.parse(model_expression, rule_name='result', semantics=Semantics()) PKjUpIi mdt/models/parsers/__init__.py__author__ = 'Robbert Harms' __date__ = "2015-12-12" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" PKjUpI\C/66$mdt/cli_scripts/mdt_generate_mask.py#!/usr/bin/env python # PYTHON_ARGCOMPLETE_OK """Create a (brain) mask for the given DWI. This uses the median-otsu algorithm.""" import argparse import logging import os import mdt from argcomplete.completers import FilesCompleter import mot.configuration from mdt.shell_utils import BasicShellApplication from mot import cl_environments from mot.load_balance_strategies import EvenDistribution import textwrap __author__ = 'Robbert Harms' __date__ = "2015-08-18" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class GenerateMask(BasicShellApplication): def __init__(self): self.available_devices = {ind: env for ind, env in enumerate(cl_environments.CLEnvironmentFactory.smart_device_selection())} def _get_arg_parser(self): description = textwrap.dedent(__doc__) description += mdt.shell_utils.get_citation_message() epilog = textwrap.dedent(""" Examples of use: mdt-generate-mask data.nii.gz data.prtcl mdt-generate-mask data.nii.gz data.prtcl -o data_mask.nii.gz mdt-generate-mask data.nii.gz data.prtcl -o data_mask.nii.gz --median-radius 2 """) parser = argparse.ArgumentParser(description=description, epilog=epilog, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('dwi', action=mdt.shell_utils.get_argparse_extension_checker(['.nii', '.nii.gz', '.hdr', '.img']), help='the diffusion weighted image').completer = FilesCompleter(['nii', 'gz', 'hdr', 'img'], directories=False) parser.add_argument('protocol', action=mdt.shell_utils.get_argparse_extension_checker(['.prtcl']), help='the protocol file, see mdt-generate-protocol').\ completer = FilesCompleter(['prtcl'], directories=False) parser.add_argument('-o', '--output-name', action=mdt.shell_utils.get_argparse_extension_checker(['.nii', '.nii.gz', '.hdr', '.img']), help='the filename of the output file. Default is _mask.nii.gz').completer = \ FilesCompleter(['nii', 'gz', 'hdr', 'img'], directories=False) parser.add_argument('--median-radius', type=int, default=4, help="Radius (in voxels) of the applied median filter (default 4).") parser.add_argument('--numpass', type=int, default=4, help="Number of pass of the median filter (default 4).") parser.add_argument('--dilate', type=int, default=1, help="Number of iterations for binary dilation (default 1).") parser.add_argument('--cl-device-ind', type=int, nargs='*', choices=self.available_devices.keys(), help="The index of the device we would like to use. This follows the indices " "in mdt-list-devices and defaults to the first GPU.") parser.add_argument('--double', dest='double_precision', action='store_true', help="Calculate in double precision.") parser.add_argument('--float', dest='double_precision', action='store_false', help="Calculate in single precision. Default.") parser.set_defaults(double_precision=False) return parser def run(self, args): dwi_name = os.path.splitext(os.path.realpath(args.dwi))[0] dwi_name = dwi_name.replace('.nii', '') output_name = os.path.realpath(args.output_name) or dwi_name + '_mask.nii.gz' if args.cl_device_ind: if isinstance(args.cl_device_ind, int): mot.configuration.set_cl_environments([self.available_devices[args.cl_device_ind]]) else: mot.configuration.set_cl_environments([self.available_devices[ind] for ind in args.cl_device_ind]) mot.configuration.set_load_balancer(EvenDistribution()) mdt.create_median_otsu_brain_mask(os.path.realpath(args.dwi), os.path.realpath(args.protocol), output_name, median_radius=args.median_radius, numpass=args.numpass, dilate=args.dilate) logger = logging.getLogger(__name__) logger.info('Saved the mask to: {}'.format(output_name)) if __name__ == '__main__': GenerateMask().start() PKjUpIEa{{ mdt/cli_scripts/mdt_batch_fit.py#!/usr/bin/env python # PYTHON_ARGCOMPLETE_OK """Fits a batch profile to a set of data. This script can be used to fit multiple models to multiple datasets. It needs a batch profile with information about the subjects. If no batch profile is given, this routine will try to auto-detect a good batch profile. The most general batch profile is the 'DirPerSubject' profile which assumes that every subject has its own subdirectory under the given data folder. For details, please look up the batch profiles in your home folder. A few of the batch profile settings can be altered with arguments to this script. For example, use_gradient_deviations and models_to_fit override the values in the batch profile. """ import argparse import os import mdt from argcomplete.completers import FilesCompleter from mdt.batch_utils import batch_profile_factory, SelectedSubjects from mdt.components_loader import BatchProfilesLoader from mdt.shell_utils import BasicShellApplication from mot import cl_environments import textwrap __author__ = 'Robbert Harms' __date__ = "2015-08-18" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class BatchFit(BasicShellApplication): def __init__(self): self.available_devices = list((ind for ind, env in enumerate(cl_environments.CLEnvironmentFactory.smart_device_selection()))) def _get_arg_parser(self): description = textwrap.dedent(__doc__) description += mdt.shell_utils.get_citation_message() epilog = textwrap.dedent(""" Examples of use: mdt-batch-fit . mdt-batch-fit /data/mgh --batch-profile 'HCP_MGH' mdt-batch-fit . --subjects-index 0 1 2 --subjects-id 1003 1004 mdt-batch-fit . --dry-run """) batch_profiles = BatchProfilesLoader().list_all() parser = argparse.ArgumentParser(description=description, epilog=epilog, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('data_folder', help='the directory with the subject to fit').completer = FilesCompleter() parser.add_argument('-b', '--batch_profile', default=None, choices=batch_profiles, help='The batch profile (by name) to use during fitting. If not given a' 'batch profile is auto-detected.') parser.add_argument('--cl-device-ind', type=int, nargs='*', choices=self.available_devices, help="The index of the device we would like to use. This follows the indices " "in mdt-list-devices and defaults to the first GPU.") parser.add_argument('--recalculate', dest='recalculate', action='store_true', help="Recalculate the model(s) if the output exists.") parser.add_argument('--no-recalculate', dest='recalculate', action='store_false', help="Do not recalculate the model(s) if the output exists. (default)") parser.set_defaults(recalculate=False) parser.add_argument('--use-gradient-deviations', dest='use_gradient_deviations', action='store_true', help="Uses the gradient deviations. If not set, the default in the profile is used.") parser.add_argument('--no-gradient-deviations', dest='use_gradient_deviations', action='store_false', help="Disable the use of gradient deviations. If not set, the default " "in the profile is used.") parser.set_defaults(use_gradient_deviations=None) parser.add_argument('--double', dest='double_precision', action='store_true', help="Calculate in double precision.") parser.add_argument('--float', dest='double_precision', action='store_false', help="Calculate in single precision. (default)") parser.set_defaults(double_precision=False) parser.add_argument('--subjects-index', type=int, nargs='*', help="The index of the subjects we would like to fit. This reduces the set of" "subjects.") parser.add_argument('--subjects-id', type=str, nargs='*', help="The id of the subjects we would like to fit. This reduces the set of" "subjects.") parser.add_argument('--models-to-fit', type=str, nargs='*', help="The models to fit, this overrides the models in the batch profile.") parser.add_argument('--dry-run', dest='dry_run', action='store_true', help="Shows what it will do without the dry run argument.") parser.set_defaults(dry_run=False) parser.add_argument('--use-cascade-subdir', dest='cascade_subdir', action='store_true', help="Set if you want to create a subdirectory for the given cascade model" ", default is False.") parser.set_defaults(cascade_subdir=False) parser.add_argument('--tmp-results-dir', dest='tmp_results_dir', default='True', type=str, help='The directory for the temporary results. The default ("True") uses the config file ' 'setting. Set to the literal "None" to disable.').completer = FilesCompleter() return parser def run(self, args): batch_profile = batch_profile_factory(args.batch_profile, os.path.realpath(args.data_folder)) if args.use_gradient_deviations is not None: batch_profile.use_gradient_deviations = args.use_gradient_deviations if args.models_to_fit is not None: batch_profile.models_to_fit = args.models_to_fit subjects_selection = None if args.subjects_index or args.subjects_id: indices = args.subjects_index if args.subjects_index else [] subject_ids = args.subjects_id if args.subjects_id else [] subjects_selection = SelectedSubjects(indices=indices, subject_ids=subject_ids) tmp_results_dir = args.tmp_results_dir for match, to_set in [('true', True), ('false', False), ('none', None)]: if tmp_results_dir.lower() == match: tmp_results_dir = to_set break mdt.batch_fit(os.path.realpath(args.data_folder), subjects_selection=subjects_selection, batch_profile=batch_profile, recalculate=args.recalculate, cl_device_ind=args.cl_device_ind, double_precision=args.double_precision, dry_run=args.dry_run, cascade_subdir=args.cascade_subdir, tmp_results_dir=tmp_results_dir) if __name__ == '__main__': BatchFit().start() PKjUpIN mdt/cli_scripts/mdt_info_img.py#!/usr/bin/env python # PYTHON_ARGCOMPLETE_OK """Print some basic information about an image file.""" import argparse import os from mdt.nifti import load_nifti import textwrap from mdt.shell_utils import BasicShellApplication __author__ = 'Robbert Harms' __date__ = "2015-08-18" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class InfoImg(BasicShellApplication): def _get_arg_parser(self): description = textwrap.dedent(__doc__) description += self._get_citation_message() epilog = textwrap.dedent(""" Examples of use: mdt-info-img my_img.nii mdt-info-img *.nii """) parser = argparse.ArgumentParser(description=description, epilog=epilog, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('images', metavar='images', nargs="+", type=str, help="The input images") return parser def run(self, args): for image in args.images: image_path = os.path.realpath(image) img = load_nifti(image_path) header = img.get_header() print('{}'.format(image)) self.print_info(header) print('') def print_info(self, header): row_format = "{:<15}{}" print(row_format.format('data_type', str(header.get_data_dtype()).upper())) print(row_format.format('nmr_dim', len(header.get_data_shape()))) for ind, el in enumerate(header.get_data_shape()): print(row_format.format('dim{}'.format(ind), el)) for ind, el in enumerate(header.get_zooms()): print(row_format.format('pixdim{}'.format(ind), el)) if __name__ == '__main__': InfoImg().start() PKjUpIv )mdt/cli_scripts/mdt_generate_roi_slice.py#!/usr/bin/env python # PYTHON_ARGCOMPLETE_OK """Create a single slice mask that only includes the voxels in the selected slice.""" import argparse import os import mdt from argcomplete.completers import FilesCompleter import textwrap from mdt.nifti import load_nifti import mdt.utils from mdt.shell_utils import BasicShellApplication __author__ = 'Robbert Harms' __date__ = "2015-08-18" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class GenerateRoiSlice(BasicShellApplication): def _get_arg_parser(self): description = textwrap.dedent(__doc__) description += mdt.shell_utils.get_citation_message() epilog = textwrap.dedent(""" Examples of use: mdt-generate-roi-slice mask.nii.gz mdt-generate-roi-slice mask.nii.gz -d 1 -s 50 mdt-generate-roi-slice mask.nii.gz -d 1 -s 50 -o my_roi_1_50.nii.gz """) parser = argparse.ArgumentParser(description=description, epilog=epilog, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('mask', action=mdt.shell_utils.get_argparse_extension_checker(['.nii', '.nii.gz', '.hdr', '.img']), help='the mask to select a slice from').completer = \ FilesCompleter(['nii', 'gz', 'hdr', 'img'], directories=False) parser.add_argument('-d', '--dimension', type=int, help="The dimension to index (0, 1, 2, ...). Default is 2.") parser.add_argument('-s', '--slice', type=int, help="The slice to use in the selected dimension (0, 1, 2, ...)." "Defaults to center of chosen dimension.") parser.add_argument('-o', '--output-name', action=mdt.shell_utils.get_argparse_extension_checker(['.nii', '.nii.gz', '.hdr', '.img']), help='the filename of the output file. Default is __.nii.gz').\ completer = FilesCompleter(['nii', 'gz', 'hdr', 'img'], directories=False) return parser def run(self, args): shape = load_nifti(args.mask).shape roi_dimension = args.dimension if args.dimension is not None else 2 if roi_dimension > len(shape)-1 or roi_dimension < 0: print('Error: the given mask has only {0} dimensions with slices {1}.'.format(len(shape), shape)) exit(1) roi_slice = args.slice if args.slice is not None else shape[roi_dimension] // 2 if roi_slice > shape[roi_dimension]-1 or roi_slice < 0: print('Error: dimension {0} has only {1} slices.'.format(roi_dimension, shape[roi_dimension])) exit(1) mask_base_name = os.path.splitext(os.path.realpath(args.mask))[0] mask_base_name = mask_base_name.replace('.nii', '') if args.output_name: output_name = os.path.realpath(args.output_name) else: output_name = mask_base_name + '_{0}_{1}.nii.gz'.format(roi_dimension, roi_slice) mdt.utils.write_slice_roi(os.path.realpath(args.mask), roi_dimension, roi_slice, output_name, overwrite_if_exists=True) if __name__ == '__main__': GenerateRoiSlice().start() PKjUpI޽$mdt/cli_scripts/mdt_info_protocol.py#!/usr/bin/env python # PYTHON_ARGCOMPLETE_OK """Print some basic information about a protocol.""" import argparse import os import mdt from argcomplete.completers import FilesCompleter import textwrap from mdt.shell_utils import BasicShellApplication __author__ = 'Robbert Harms' __date__ = "2015-08-18" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class InfoProtocol(BasicShellApplication): def _get_arg_parser(self): description = textwrap.dedent(__doc__) description += self._get_citation_message() epilog = textwrap.dedent(""" Examples of use: mdt-info-protocol my_protocol.prtcl """) parser = argparse.ArgumentParser(description=description, epilog=epilog, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('protocol', action=mdt.shell_utils.get_argparse_extension_checker(['.prtcl']), help='the protocol file').completer = FilesCompleter(['prtcl'], directories=False) return parser def run(self, args): protocol = mdt.load_protocol(os.path.realpath(args.protocol)) self.print_info(protocol) def print_info(self, protocol): row_format = "{:<15}{}" print(row_format.format('nmr_rows', protocol.length)) print(row_format.format('nmr_unweighted', len(protocol.get_unweighted_indices()))) print(row_format.format('nmr_weighted', len(protocol.get_weighted_indices()))) print(row_format.format('nmr_shells', len(protocol.get_b_values_shells()))) shells = protocol.get_b_values_shells() shells_text = [] for shell in shells: occurrences = protocol.count_occurences('b', shell) shells_text.append('{0:0=.3f}e9 ({1})'.format(shell / 1e9, occurrences)) print(row_format.format('shells', ', '.join(shells_text))) print(row_format.format('nmr_columns', protocol.number_of_columns)) print(row_format.format('columns', ', '.join(protocol.column_names))) if __name__ == '__main__': InfoProtocol().start() PKjUpI3J#mdt/cli_scripts/mdt_list_devices.py#!/usr/bin/env python # PYTHON_ARGCOMPLETE_OK """This script prints information about the available devices on your computer.""" import argparse import textwrap import mdt from mdt.shell_utils import BasicShellApplication from mot import cl_environments __author__ = 'Robbert Harms' __date__ = "2015-08-18" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class ListDevices(BasicShellApplication): def _get_arg_parser(self): description = textwrap.dedent(__doc__) description += mdt.shell_utils.get_citation_message() parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-l', '--long', action='store_true', help='print all info about the devices') return parser def run(self, args): mdt.init_user_settings(pass_if_exists=True) for ind, env in enumerate(cl_environments.CLEnvironmentFactory.smart_device_selection()): print('Device {}:'.format(ind)) if args.long: print(repr(env)) else: print(str(env)) if __name__ == '__main__': ListDevices().start() PKjUpI6)mdt/cli_scripts/mdt_generate_bvec_bval.py#!/usr/bin/env python # PYTHON_ARGCOMPLETE_OK """Generate the bval and bvec file from a protocol file.""" import argparse import os import mdt from argcomplete.completers import FilesCompleter import textwrap import mdt.protocols from mdt.shell_utils import BasicShellApplication from mdt.protocols import write_bvec_bval __author__ = 'Robbert Harms' __date__ = "2015-08-18" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class GenerateBvecBval(BasicShellApplication): def __init__(self): mdt.init_user_settings(pass_if_exists=True) def _get_arg_parser(self): description = textwrap.dedent(__doc__) description += self._get_citation_message() epilog = textwrap.dedent(""" Examples of use: mdt-generate-bvec-bval my_protocol.prtcl mdt-generate-bvec-bval my_protocol.prtcl bvec_name.bvec bval_name.bval """) parser = argparse.ArgumentParser(description=description, epilog=epilog, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('protocol', help='the protocol file').completer = FilesCompleter() parser.add_argument('bvec', help="the output bvec file", nargs='?', default=None).completer = FilesCompleter() parser.add_argument('bval', help="the output bvec file", nargs='?', default=None).completer = FilesCompleter() return parser def run(self, args): protocol_base = os.path.join(os.path.dirname(os.path.realpath(args.protocol)), os.path.splitext(os.path.basename(args.protocol))[0]) if args.bvec: bvec = os.path.realpath(args.bvec) else: bvec = protocol_base + '.bvec' if args.bval: bval = os.path.realpath(args.bval) else: bval = protocol_base + '.bval' write_bvec_bval(mdt.load_protocol(os.path.realpath(args.protocol)), bvec, bval) if __name__ == '__main__': GenerateBvecBval().start() PKjUpI*ea a $mdt/cli_scripts/mdt_math_protocol.py#!/usr/bin/env python # PYTHON_ARGCOMPLETE_OK """Evaluate an expression on a protocol. This is meant to quickly change a protocol using mathematical expressions. The expressions can be any valid python string separated if needed with the semicolon (;). The columns of the input protocol are loaded and stored as arrays with as variable names the names of the columns. Next, the expression is evaluated on those columns and the result is stored in the indicated file. An additional function "rm()" is also available with wich you can remove columns from the protocol, and a function "add(, )" is available to add columns. When adding a column, the value can either be a scalar or a vector. Additionally the numpy library is available with prefix 'np.'. """ import argparse import os import numpy as np import mdt from argcomplete.completers import FilesCompleter import textwrap from mdt.shell_utils import BasicShellApplication __author__ = 'Robbert Harms' __date__ = "2015-08-18" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class MathProtocol(BasicShellApplication): def _get_arg_parser(self): description = textwrap.dedent(__doc__) description += self._get_citation_message() epilog = textwrap.dedent(""" Examples of use: mdt-math-protocol protocol.prtcl 'G *= 1e-3' mdt-math-protocol protocol.prtcl 'G *= 1e-3; TR /= 1000; TE /= 1000' mdt-math-protocol protocol.prtcl "rm('G')" mdt-math-protocol protocol.prtcl "add('TE', 50e-3)" """) parser = argparse.ArgumentParser(description=description, epilog=epilog, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('input_protocol', metavar='input_protocol', type=str, help="The input protocol") parser.add_argument('expr', metavar='expr', type=str, help="The expression to evaluate.") parser.add_argument('-o', '--output_file', help='the output protocol, defaults to the input protocol.').completer = FilesCompleter() return parser def run(self, args): if args.output_file is not None: output_file = os.path.realpath(args.output_file) else: output_file = os.path.realpath(args.input_protocol) protocol = mdt.load_protocol(os.path.realpath(args.input_protocol)) context_dict = {name: protocol.get_column(name) for name in protocol.column_names} def rm(column_name): protocol.remove_column(column_name) del context_dict[column_name] def add(column_name, value): protocol.add_column(column_name, value) context_dict[column_name] = value exec(args.expr, {'np': np, 'rm': rm, 'add': add}, context_dict) for name, value in context_dict.items(): protocol.update_column(name, value) mdt.write_protocol(protocol, output_file) if __name__ == '__main__': MathProtocol().start() PKjUpIn&& mdt/cli_scripts/mdt_view_maps.py#!/usr/bin/env python # PYTHON_ARGCOMPLETE_OK """Launches the MDT maps visualizer.""" import argparse import os import textwrap from argcomplete.completers import FilesCompleter from mdt.utils import init_user_settings from mdt import view_maps from mdt.visualization.maps.base import DataInfo from mdt.shell_utils import BasicShellApplication, get_citation_message __author__ = 'Robbert Harms' __date__ = "2015-08-18" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class GUI(BasicShellApplication): def __init__(self): init_user_settings(pass_if_exists=True) def _get_arg_parser(self): description = textwrap.dedent(__doc__) description += get_citation_message() parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('dir', metavar='dir', type=str, nargs='?', help='the directory to use', default=None).completer = FilesCompleter() parser.add_argument('-c', '--config', type=str, help='Use the given initial configuration').completer = \ FilesCompleter(['conf'], directories=False) parser.add_argument('-m', '--maximize', action='store_true', help="Maximize the shown window") parser.add_argument('--to-file', type=str, help="If set export the figure to the given filename") return parser def run(self, args): if args.dir: data = DataInfo.from_dir(os.path.realpath(args.dir)) else: data = DataInfo.from_dir(os.getcwd()) to_file = None if args.to_file: to_file = os.path.realpath(args.to_file) config = None if args.config: filename = os.path.realpath(args.config) if os.path.exists(filename): with open(filename, 'r') as f: config = f.read() view_maps(data, config, show_maximized=args.maximize, to_file=to_file) if __name__ == '__main__': GUI().start() PKjUpI< mdt/cli_scripts/mdt_model_fit.py#!/usr/bin/env python # PYTHON_ARGCOMPLETE_OK """Fit one of the models to the given data. This function can use two kinds of noise standard deviation, a global or a local (voxel wise). If the argument -n / --noise-std is not set, MDT uses a default automatic noise estimation which may be either global or local. To use a predefined global noise std please set the argument to a floating point value. To use a voxel wise noise std, please give it a filename with a map to use. """ import argparse import os import mdt from argcomplete.completers import FilesCompleter from mdt.shell_utils import BasicShellApplication from mot import cl_environments import textwrap __author__ = 'Robbert Harms' __date__ = "2015-08-18" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class ModelFit(BasicShellApplication): def __init__(self): self.available_devices = list((ind for ind, env in enumerate(cl_environments.CLEnvironmentFactory.smart_device_selection()))) def _get_arg_parser(self): description = textwrap.dedent(__doc__) description += mdt.shell_utils.get_citation_message() epilog = textwrap.dedent(""" Examples of use: mdt-model-fit "BallStick (Cascade)" data.nii.gz data.prtcl roi_mask_0_50.nii.gz mdt-model-fit "BallStick (Cascade)" data.nii.gz data.prtcl data_mask.nii.gz --no-recalculate mdt-model-fit "BallStick (Cascade)" data.nii.gz data.prtcl data_mask.nii.gz --cl-device-ind 1 mdt-model-fit "BallStick (Cascade)" data.nii.gz data.prtcl data_mask.nii.gz --cl-device-ind {0, 1} """) parser = argparse.ArgumentParser(description=description, epilog=epilog, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('model', metavar='model', choices=mdt.get_models_list(), help='model name, see mdt-list-models') parser.add_argument('dwi', action=mdt.shell_utils.get_argparse_extension_checker(['.nii', '.nii.gz', '.hdr', '.img']), help='the diffusion weighted image').completer = FilesCompleter(['nii', 'gz', 'hdr', 'img'], directories=False) parser.add_argument( 'protocol', action=mdt.shell_utils.get_argparse_extension_checker(['.prtcl']), help='the protocol file, see mdt-generate-protocol').completer = FilesCompleter(['prtcl'], directories=False) parser.add_argument('mask', action=mdt.shell_utils.get_argparse_extension_checker(['.nii', '.nii.gz', '.hdr', '.img']), help='the (brain) mask to use').completer = FilesCompleter(['nii', 'gz', 'hdr', 'img'], directories=False) parser.add_argument('-o', '--output_folder', help='the directory for the output, defaults to "output/" ' 'in the same directory as the dwi volume').completer = FilesCompleter() parser.add_argument('-n', '--noise-std', default=None, help='the noise std, defaults to None for automatic noise estimation.' 'Either set this to a value, or to a filename.') parser.add_argument('--gradient-deviations', action=mdt.shell_utils.get_argparse_extension_checker(['.nii', '.nii.gz', '.hdr', '.img']), help="The volume with the gradient deviations to use, in HCP WUMINN format.").\ completer = FilesCompleter(['nii', 'gz', 'hdr', 'img'], directories=False) parser.add_argument('--cl-device-ind', type=int, nargs='*', choices=self.available_devices, help="The index of the device we would like to use. This follows the indices " "in mdt-list-devices and defaults to the first GPU.") parser.add_argument('--recalculate', dest='recalculate', action='store_true', help="Recalculate the model(s) if the output exists. (default)") parser.add_argument('--no-recalculate', dest='recalculate', action='store_false', help="Do not recalculate the model(s) if the output exists.") parser.set_defaults(recalculate=True) parser.add_argument('--only-recalculate-last', dest='only_recalculate_last', action='store_true', help="Only recalculate the last model in a cascade. (default)") parser.add_argument('--recalculate-all', dest='only_recalculate_last', action='store_false', help="Recalculate all models in a cascade.") parser.set_defaults(only_recalculate_last=True) parser.add_argument('--double', dest='double_precision', action='store_true', help="Calculate in double precision.") parser.add_argument('--float', dest='double_precision', action='store_false', help="Calculate in single precision. (default)") parser.set_defaults(double_precision=False) parser.add_argument('--use-cascade-subdir', dest='cascade_subdir', action='store_true', help="Set if you want to create a subdirectory for the given cascade model" ", default is False.") parser.set_defaults(cascade_subdir=False) parser.add_argument('--tmp-results-dir', dest='tmp_results_dir', default='True', type=str, help='The directory for the temporary results. The default ("True") uses the config file ' 'setting. Set to the literal "None" to disable.').completer = FilesCompleter() return parser def run(self, args): mask_name = os.path.splitext(os.path.basename(os.path.realpath(args.mask)))[0] mask_name = mask_name.replace('.nii', '') output_folder = args.output_folder or os.path.join(os.path.dirname(args.dwi), 'output', mask_name) tmp_results_dir = args.tmp_results_dir for match, to_set in [('true', True), ('false', False), ('none', None)]: if tmp_results_dir.lower() == match: tmp_results_dir = to_set break noise_std = args.noise_std if noise_std is not None: if not os.path.isfile(os.path.realpath(noise_std)): noise_std = float(noise_std) mdt.fit_model(args.model, mdt.load_problem_data(os.path.realpath(args.dwi), os.path.realpath(args.protocol), os.path.realpath(args.mask), gradient_deviations=args.gradient_deviations, noise_std=noise_std), output_folder, recalculate=args.recalculate, only_recalculate_last=args.only_recalculate_last, cl_device_ind=args.cl_device_ind, double_precision=args.double_precision, cascade_subdir=args.cascade_subdir, tmp_results_dir=tmp_results_dir, save_user_script_info=None) if __name__ == '__main__': ModelFit().start() PKjUpI/Qjmdt/cli_scripts/mdt_math_img.py#!/usr/bin/env python # PYTHON_ARGCOMPLETE_OK """Evaluate an expression on a set of images. This is meant to quickly convert/combine one or two maps with a mathematical expression. The expression can be any valid python expression. The input list of images are loaded as numpy arrays and stored in the array 'input' and 'i'. Next, the expression is evaluated using the input images and the result is stored in the indicated file. In the expression you can either use the arrays 'input' or 'i' with linear indices, or/and you can use alphabetic characters for each image. For example, if you have specified 2 input images you can address them as: - input[0] or i[0] or a - input[1] or i[1] or b This linear alphabetic indexing works with every alphabetic character except for the 'i' since that one is reserved for the array. The module numpy is available under 'np' and some functions of MDT under 'mdt'. This allows expressions like:: np.mean(np.concatenate(i, axis=3), axis=3) to get the mean value per voxel of all the input images. It is possible to change the mode of evaluation from single expression to a more complex python statement using the switch --as-statement (the default is --as-expression). In a statement more complex python commands are allowed. In statement mode you must explicitly output the results using 'return'. (Basically it wraps your command in a function, of which the output is used as expression value). If no output file is specified and the output is of dimension 2 or lower we print the output directly to the console. """ import argparse import glob import os import numpy as np import mdt from argcomplete.completers import FilesCompleter import textwrap from mdt.shell_utils import BasicShellApplication from mdt.utils import split_image_path __author__ = 'Robbert Harms' __date__ = "2015-08-18" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class MathImg(BasicShellApplication): def _get_arg_parser(self): description = textwrap.dedent(__doc__) description += self._get_citation_message() epilog = textwrap.dedent(""" Examples of use: mdt-math-img fiso.nii ficvf.nii '(1-input[0]) * i[1]' -o Wic.w.nii.gz mdt-math-img fiso.nii ficvf.nii '(1-a) * b' -o Wic.w.nii.gz mdt-math-img *.nii.gz 'np.mean(np.concatenate(i, axis=3), axis=3)' -o output.nii.gz mdt-math-img FA.nii.gz 'np.mean(a)' mdt-math-img FA.nii white_matter_mask.nii 'np.mean(mdt.create_roi(a, b))' mdt-math-img images*.nii.gz mask.nii 'list(map(lambda f: np.mean(mdt.create_roi(f, i[-1])), i[0:-1]))' """) parser = argparse.ArgumentParser(description=description, epilog=epilog, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('input_files', metavar='input_files', nargs="+", type=str, help="The input images to use") parser.add_argument('expr', metavar='expr', type=str, help="The expression/statement to evaluate.") parser.add_argument('--as-expression', dest='as_expression', action='store_true', help="Evaluates the given string as an expression (default).") parser.add_argument('--as-statement', dest='as_expression', action='store_false', help="Evaluates the given string as an statement.") parser.set_defaults(as_expression=True) parser.add_argument('-o', '--output-file', action=mdt.shell_utils.get_argparse_extension_checker(['.nii', '.nii.gz', '.hdr', '.img']), help='the output file, if not set nothing is written').completer = \ FilesCompleter(['nii', 'gz', 'hdr', 'img'], directories=False) parser.add_argument('-4d', '--input-4d', action='store_true', help='Add a singleton dimension to all input 3d maps to make them 4d, this prevents ' 'some broadcast issues.') parser.add_argument('--verbose', '-v', action='store_true', help="Verbose, prints runtime information") return parser def run(self, args): write_output = args.output_file is not None if write_output: output_file = os.path.realpath(args.output_file) if os.path.isfile(output_file): os.remove(output_file) file_names = [] for file in args.input_files: file_names.extend(glob.glob(file)) if args.verbose: print('') images = [mdt.load_nifti(dwi_image).get_data() for dwi_image in file_names] if args.input_4d: images = self._images_3d_to_4d(images) context_dict = {'input': images, 'i': images, 'np': np, 'mdt': mdt} alpha_chars = list('abcdefghjklmnopqrstuvwxyz') for ind, image in enumerate(images): context_dict.update({alpha_chars[ind]: image}) if args.verbose: print('Input {ind} ({alpha}):'.format(ind=ind, alpha=alpha_chars[ind])) print(' name: {}'.format(split_image_path(file_names[ind])[1])) print(' shape: {}'.format(str(image.shape))) if args.verbose: print('') print("Evaluating: '{expr}'".format(expr=args.expr)) if args.as_expression: output = eval(args.expr, context_dict) else: expr = textwrap.dedent(''' def mdt_image_math(): {} output = mdt_image_math() ''').format(args.expr) exec(expr, context_dict) output = context_dict['output'] if args.verbose: print('') if isinstance(output, np.ndarray): print('Output shape: {shape}'.format(shape=str(output.shape))) else: print('Output is single value') print('Output: ') print('') print(output) else: if not write_output: print(output) if args.verbose: print('') if write_output: mdt.write_image(output_file, output, mdt.load_nifti(file_names[0]).get_header()) def _images_3d_to_4d(self, images): return list([image[..., np.newaxis] if len(image.shape) == 3 else image for image in images]) if __name__ == '__main__': MathImg().start() PKjUpI{~\\mdt/cli_scripts/MDT.py#!/usr/bin/env python # PYTHON_ARGCOMPLETE_OK from mdt.cli_scripts.mdt_gui import GUI __author__ = 'Robbert Harms' __date__ = "2015-08-18" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" """A shortcut for mdt_gui""" class GUI_Shortcut(GUI): pass if __name__ == '__main__': GUI_Shortcut().start() PKjUpI-{{(mdt/cli_scripts/mdt_generate_protocol.py#!/usr/bin/env python # PYTHON_ARGCOMPLETE_OK """Generate a protocol from a bvec and bval file. MDT uses a protocol file (with extension .prtcl) to store all the acquisition related values. This is a column based file which can hold, next to the b-values and gradient directions, the big Delta, small delta, gradient amplitude G and more of these extra acquisition details. """ import argparse import os from argcomplete.completers import FilesCompleter import textwrap import mdt.protocols from mdt.shell_utils import BasicShellApplication, get_citation_message from mdt.protocols import load_bvec_bval __author__ = 'Robbert Harms' __date__ = "2015-08-18" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class GenerateProtocol(BasicShellApplication): def __init__(self): mdt.init_user_settings(pass_if_exists=True) def _get_arg_parser(self): description = textwrap.dedent(__doc__) description += get_citation_message() epilog = textwrap.dedent(""" Examples of use: mdt-generate-protocol data.bvec data.bval mdt-generate-protocol data.bvec data.bval -o my_protocol.prtcl mdt-generate-protocol data.bvec data.bval mdt-generate-protocol data.bvec data.bval --Delta 30 --delta 20 mdt-generate-protocol data.bvec data.bval --sequence-timing-units 's' --Delta 0.03 mdt-generate-protocol data.bvec data.bval --TE ../my_TE_file.txt """) parser = argparse.ArgumentParser(description=description, epilog=epilog, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('bvec', help='the gradient vectors file').completer = FilesCompleter() parser.add_argument('bval', help='the gradient b-values').completer = FilesCompleter() parser.add_argument('-s', '--bval-scale-factor', type=float, help="We expect the b-values in the output protocol in units of s/m^2. " "Example use: 1 or 1e6. The default is autodetect.") parser.add_argument('-o', '--output_file', help='the output protocol, defaults to ".prtcl" in the same ' 'directory as the bvec file.').completer = FilesCompleter() parser.add_argument('--sequence-timing-units', choices=('ms', 's'), default='ms', help="The units of the sequence timings. The default is 'ms' which we will convert to 's'.") parser.add_argument('--G', help="The gradient amplitudes in T/m.") parser.add_argument('--maxG', help="The maximum gradient amplitude in T/m. This is only useful if we need to guess " "big Delta and small delta. Default is 0.04 T/m") parser.add_argument('--Delta', help="The big Delta to use, either a single number or a file with either a single number " "or one number per gradient direction.") parser.add_argument('--delta', help="The small delta to use, either a single number or a file with either a single number " "or one number per gradient direction.") parser.add_argument('--TE', help="The TE to use, either a single number or a file with either a single number " "or one number per gradient direction.") parser.add_argument('--TR', help="The TR to use, either a single number or a file with either a single number " "or one number per gradient direction.") return parser def run(self, args): bvec = os.path.realpath(args.bvec) bval = os.path.realpath(args.bval) if args.output_file: output_prtcl = os.path.realpath(args.output_file) else: output_prtcl = os.path.join(os.path.dirname(bvec), os.path.splitext(os.path.basename(bvec))[0] + '.prtcl') if args.bval_scale_factor: bval_scale_factor = float(args.bval_scale_factor) else: bval_scale_factor = 'auto' protocol = load_bvec_bval(bvec=bvec, bval=bval, bval_scale=bval_scale_factor) if args.G is None and args.maxG is not None: if os.path.isfile(str(args.maxG)): protocol.add_column_from_file('maxG', os.path.realpath(str(args.maxG)), 1) else: protocol.add_column('maxG', float(args.maxG)) if args.Delta is not None: add_sequence_timing_column_to_protocol(protocol, 'Delta', args.Delta, args.sequence_timing_units) if args.delta is not None: add_sequence_timing_column_to_protocol(protocol, 'delta', args.delta, args.sequence_timing_units) if args.TE is not None: add_sequence_timing_column_to_protocol(protocol, 'TE', args.TE, args.sequence_timing_units) if args.TR is not None: add_sequence_timing_column_to_protocol(protocol, 'TR', args.TR, args.sequence_timing_units) if args.G is not None: add_column_to_protocol(protocol, 'G', args.G, 1) mdt.protocols.write_protocol(protocol, output_prtcl) def add_column_to_protocol(protocol, column, value, mult_factor): if value is not None: if os.path.isfile(value): protocol.add_column_from_file(column, os.path.realpath(value), mult_factor) else: protocol.add_column(column, float(value) * mult_factor) def add_sequence_timing_column_to_protocol(protocol, column, value, units): mult_factor = 1e-3 if units == 'ms' else 1 add_column_to_protocol(protocol, column, value, mult_factor) if __name__ == '__main__': GenerateProtocol().start() PKjUpI8<^!mdt/cli_scripts/mdt_apply_mask.py#!/usr/bin/env python # PYTHON_ARGCOMPLETE_OK """Mask the background using the given mask. This function multiplies a given volume (or list of volumes) with a binary mask. Please note that this changes the input files (changes are in-place). """ import argparse import glob import os import mdt from argcomplete.completers import FilesCompleter from mdt.shell_utils import BasicShellApplication import textwrap __author__ = 'Robbert Harms' __date__ = "2015-08-18" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class ApplyMask(BasicShellApplication): def _get_arg_parser(self): description = textwrap.dedent(__doc__) description += mdt.shell_utils.get_citation_message() epilog = textwrap.dedent(""" Examples of use: mdt-apply-mask data.nii.gz -m roi_mask_0_50.nii.gz mdt-apply-mask *.nii.gz -m my_mask.nii.gz """) parser = argparse.ArgumentParser(description=description, epilog=epilog, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('input_files', metavar='input_files', nargs="+", type=str, help="The input images to use") parser.add_argument('-m', '--mask', required=True, action=mdt.shell_utils.get_argparse_extension_checker(['.nii', '.nii.gz', '.hdr', '.img']), help='the (brain) mask to use').completer = FilesCompleter(['nii', 'gz', 'hdr', 'img'], directories=False) return parser def run(self, args): mask = mdt.load_brain_mask(os.path.realpath(args.mask)) file_names = [] for file in args.input_files: file_names.extend(glob.glob(file)) for file in file_names: mdt.apply_mask_to_file(file, mask) if __name__ == '__main__': ApplyMask().start() PKjUpI"mdt/cli_scripts/mdt_list_models.py#!/usr/bin/env python # PYTHON_ARGCOMPLETE_OK """This script prints a list of all the models MDT can find in your home directory.""" import argparse import mdt import mdt.utils from mdt.shell_utils import BasicShellApplication __author__ = 'Robbert Harms' __date__ = "2015-08-18" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class ListModels(BasicShellApplication): def _get_arg_parser(self): description = __doc__ description += mdt.shell_utils.get_citation_message() parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-l', '--long', action='store_true', help='print the descriptions') return parser def run(self, args): mdt.init_user_settings(pass_if_exists=True) meta_info = mdt.get_models_meta_info() models = mdt.get_models_list() max_model_name = max(map(len, models)) for model in models: if args.long: print(('%-' + str(max_model_name + 2) + 's%-s') % (model, meta_info[model]['description'])) else: print(model) if __name__ == '__main__': ListModels().start() PKjUpIBQ! ! #mdt/cli_scripts/mdt_volume_merge.py#!/usr/bin/env python # PYTHON_ARGCOMPLETE_OK """Merge a list of volume images on the 4th dimension. Writes the result to a single image. Please note that by default this will sort the list of volume names based on a natural key sort. This is the most convenient option in the case of globbing files. You can disable this behaviour using the flag --no-sort. """ import argparse import glob import os from argcomplete.completers import FilesCompleter import textwrap from mdt.utils import volume_merge from mdt.shell_utils import BasicShellApplication, get_argparse_extension_checker __author__ = 'Robbert Harms' __date__ = "2015-08-18" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class VolumeMerge(BasicShellApplication): def _get_arg_parser(self): description = textwrap.dedent(__doc__) description += self._get_citation_message() epilog = textwrap.dedent(""" Examples of use: mdt-volume-merge merged.nii.gz *.nii.gz mdt-volume-merge --no-sort merged.nii.gz *.nii.gz """) parser = argparse.ArgumentParser(description=description, epilog=epilog, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-o', '--output_file', required=True, action=get_argparse_extension_checker(['.nii', '.nii.gz', '.hdr', '.img']), help='the filename of the output file').completer = \ FilesCompleter(['nii', 'gz', 'hdr', 'img'], directories=False) parser.add_argument("input_files", metavar="input_files", nargs="+", type=str, help="The input images to merge") parser.add_argument('--sort', dest='sort', action='store_true', help="Sort the input images using a natural sort (default)") parser.add_argument('--no-sort', dest='sort', action='store_false', help="Do not sort the input images") parser.set_defaults(sort=True) parser.add_argument('--no-merge-order-file', dest='no_merge_order_file', action='store_true', help="Do not write the merge order file") return parser def run(self, args): output_file = os.path.realpath(args.output_file) if os.path.isfile(output_file): os.remove(output_file) file_names = [] for file in args.input_files: file_names.extend(glob.glob(file)) concatenated_names = volume_merge(file_names, output_file, sort=args.sort) if not args.no_merge_order_file: info_output_file = os.path.splitext(output_file)[0].replace('.nii', '') + '_merge_order.txt' if os.path.isfile(info_output_file): os.remove(info_output_file) with open(info_output_file, 'w') as f: f.write('Files merged in this order:\n') for name in concatenated_names: f.write(name + '\n') if __name__ == '__main__': VolumeMerge().start() PKjUpImdt/cli_scripts/__init__.pyPKjUpIvKP)mdt/cli_scripts/mdt_init_user_settings.py#!/usr/bin/env python # PYTHON_ARGCOMPLETE_OK """This script is meant to update your home folder with the latest MDT models.""" import argparse import os import textwrap import mdt from mdt.configuration import get_config_dir from mdt.shell_utils import BasicShellApplication __author__ = 'Robbert Harms' __date__ = "2015-08-18" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class InitUserSettings(BasicShellApplication): def _get_arg_parser(self): description = textwrap.dedent(__doc__ + """ The location we will write to is: {} """.format(os.path.dirname(get_config_dir()))) description += mdt.shell_utils.get_citation_message() parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('--pass-if-exists', dest='pass_if_exists', action='store_true', help="do nothing if the config dir exists (default)") parser.add_argument('--always-overwrite', dest='pass_if_exists', action='store_false', help="always overwrite the config directory with the default settings") parser.set_defaults(pass_if_exists=False) return parser def run(self, args): mdt.init_user_settings(pass_if_exists=args.pass_if_exists) if __name__ == '__main__': InitUserSettings().start() PKjUpI$mdt/cli_scripts/mdt_gui.py#!/usr/bin/env python # PYTHON_ARGCOMPLETE_OK """Launches the MDT Graphical User Interface.""" import argparse import os import textwrap from argcomplete.completers import FilesCompleter from mdt import init_user_settings from mdt.gui.model_fit.qt_main import start_gui from mdt.shell_utils import BasicShellApplication, get_citation_message __author__ = 'Robbert Harms' __date__ = "2015-08-18" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class GUI(BasicShellApplication): def __init__(self): init_user_settings(pass_if_exists=True) def _get_arg_parser(self): description = textwrap.dedent(__doc__) description += get_citation_message() parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-d', '--dir', metavar='dir', type=str, help='the base directory for the file choosers', default=None).completer = FilesCompleter() return parser def run(self, args): if args.dir: cwd = os.path.realpath(args.dir) else: cwd = os.getcwd() start_gui(cwd) if __name__ == '__main__': GUI().start() PKjUpIZe mdt/data_loaders/noise_std.pyimport six import numpy as np import numbers from mdt.nifti import load_nifti __author__ = 'Robbert Harms' __date__ = "2015-08-25" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" def autodetect_noise_std_loader(data_source): """A function to get a noise std using the given data source. This tries to do auto detecting for the following data sources: - None: return 1 - double: uses the given single value for all voxels - ndarray: use a value per voxel (this should not be a roi list, it should be an actual volume of the same size as the dataset) - string (and not 'auto'): a filename we will try to parse as a noise std - the string 'auto': try to estimate the noise std Args: data_source: the data source from which to get a noise std Returns: NoiseStdLoader: a noise std loader instance. """ if isinstance(data_source, NoiseStdLoader): return data_source elif data_source is None: return NoiseEstimationLoader() elif isinstance(data_source, numbers.Number): return SingleValueNoiseStd(data_source) elif isinstance(data_source, np.ndarray): return VoxelWiseNoiseStd(data_source) elif isinstance(data_source, six.string_types): return LoadNoiseFromFile(data_source) raise ValueError('The given data source could not be recognized.') class NoiseStdLoader(object): """Interface for loading a noise std from different sources.""" def get_noise_std(self, problem_data): """The public method for getting the noise std from this loader. Args: problem_data (:class:`~mdt.utils.DMRIProblemData`): the dmri problem data in use. Some loaders might need this for loading the noise std. Returns: noise std, either a single value or an ndarray with a value per voxel """ class NoiseEstimationLoader(NoiseStdLoader): def __init__(self): """A loader that estimates the noise std from the problem data""" def get_noise_std(self, problem_data): from mdt.utils import estimate_noise_std return estimate_noise_std(problem_data) class SingleValueNoiseStd(NoiseStdLoader): def __init__(self, noise_std): """Returns the given noise std""" self._noise_std = noise_std def get_noise_std(self, problem_data): return self._noise_std class VoxelWiseNoiseStd(NoiseStdLoader): def __init__(self, noise_std_map): """Returns a noise std map with one value per voxel.""" self._noise_std_map = noise_std_map def get_noise_std(self, problem_data): return self._noise_std_map class LoadNoiseFromFile(NoiseStdLoader): def __init__(self, file_name): """Load a noise std from a file. This will try to detect if the given file is a text file with a single noise std, or if it is a nifti / map file with a voxel wise noise std. """ self._file_name = file_name def get_noise_std(self, problem_data): if self._file_name[-4:] == '.txt': with open(self._file_name, 'r') as f: return float(f.read()) return load_nifti(self._file_name).get_data() PKjUpI UUmdt/data_loaders/brain_mask.pyimport six import numpy as np __author__ = 'Robbert Harms' __date__ = "2015-08-25" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" def autodetect_brain_mask_loader(data_source): """A function to get a brain mask loader using the given data source. This tries to do auto detecting for the following data sources: - :class:`BrainMaskLoader` - strings (filenames) - ndarray (3d containing the mask) Args: data_source: the data source from which to get a brain_mask loader Returns: BrainMaskLoader: a brain_mask loader instance. """ if isinstance(data_source, BrainMaskLoader): return data_source elif isinstance(data_source, six.string_types): return BrainMaskFromFileLoader(data_source) elif isinstance(data_source, np.ndarray): return BrainMaskFromArray(data_source) raise ValueError('The given data source could not be recognized.') class BrainMaskLoader(object): """Interface for loading brain_masks from different sources.""" def get_data(self): """The public method used to get an instance of a brain mask. Returns: ndarray: 3d ndarray containing the brain mask """ class BrainMaskFromFileLoader(BrainMaskLoader): def __init__(self, filename): """Loads a brain mask from the given filename. Args: filename (str): the filename to use the brain mask from. """ self._filename = filename self._brain_mask = None self._header = None def get_data(self): if self._brain_mask is None: from mdt.nifti import load_nifti self._brain_mask = load_nifti(self._filename).get_data() > 0 return self._brain_mask class BrainMaskFromArray(BrainMaskLoader): def __init__(self, mask_data): """Adapter for returning an already loaded brain mask. Args: ndarray (ndarray): the brain mask data (3d matrix) """ self._mask_data = mask_data def get_data(self): return self._mask_data > 0 PKjUpIG~ mdt/data_loaders/protocol.pyimport os import six from mdt.protocols import Protocol, load_protocol, auto_load_protocol __author__ = 'Robbert Harms' __date__ = "2015-08-25" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" def autodetect_protocol_loader(data_source): """A function to get a protocol loader using the given data source. This tries to do auto detecting for the following data sources: - :class:`ProtocolLoader` - strings (filename or directory) - functions - protocol objects If a directory is given we try to auto use the protocol from sources in that directory. Args: data_source: the data source from which to get a protocol loader Returns: ProtocolLoader: a protocol loader instance. """ if isinstance(data_source, ProtocolLoader): return data_source elif isinstance(data_source, six.string_types): if os.path.isfile(data_source): return ProtocolFromFileLoader(data_source) else: return ProtocolFromDirLoader(data_source) elif hasattr(data_source, '__call__'): return ProtocolFromFunctionLoader(data_source) elif isinstance(data_source, Protocol): return ProtocolDirectLoader(data_source) raise ValueError('The given data source could not be recognized.') class ProtocolLoader(object): """Interface for loading protocols from different sources.""" def get_protocol(self): """The public method used to get an instance of a protocol. Returns: Protocol: a protocol object """ class ProtocolFromFileLoader(ProtocolLoader): def __init__(self, filename): """Loads a protocol from the given filename. Args: filename (str): the filename to use the protocol from. """ super(ProtocolFromFileLoader, self).__init__() self._filename = filename self._protocol = None def get_protocol(self): if self._protocol is None: self._protocol = load_protocol(self._filename) return self._protocol class ProtocolFromDirLoader(ProtocolLoader): def __init__(self, directory): """Loads a protocol from the given filename. Args: directory (str): the directory to use the protocol from. """ super(ProtocolFromDirLoader, self).__init__() self._directory = directory self._protocol = None def get_protocol(self): if self._protocol is None: self._protocol = auto_load_protocol(self._directory) return self._protocol class ProtocolDirectLoader(ProtocolLoader): def __init__(self, protocol): """Adapter for returning an already loaded protocol. Args: protocol (Protocol): the loaded protocol to return. """ super(ProtocolDirectLoader, self).__init__() self._protocol = protocol def get_protocol(self): return self._protocol class ProtocolFromFunctionLoader(ProtocolLoader): def __init__(self, func): """Load a protocol from a callback function. This class may apply caching. Args: func: the callback function to call on the moment the protocol is to be loaded. """ super(ProtocolFromFunctionLoader, self).__init__() self._func = func self._protocol = None def get_protocol(self): if self._protocol is None: self._protocol = self._func() return self._protocol PKjUpI$mdt/data_loaders/__init__.py__author__ = 'Robbert Harms' __date__ = "2015-08-25" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" PK0GsI mdt/data/logo.svg image/svg+xml MDT PKDPIk/mdt/data/mdt.conf# Specifics for the output format of optimization and sampling # the options gzip determine if the volumes are written as .nii or as .nii.gz output_format: optimization: gzip: True sampling: gzip: True # The default temporary results directory for optimization and sampling. Set to !!null to disable and to use the # per subject directory. For linux a good value can be: #tmp_results_dir: /tmp/mdt # where /tmp can be memory mapped. tmp_results_dir: !!null runtime_settings: # The single device index or a list with device indices to use during OpenCL processing. # For a list of possible values, please run mdt_list_devices or view the device list in the GUI. cl_device_ind: !!null optimization: # The default optimizer to use for all model fitting. general: name: 'Powell' settings: patience: 2 model_specific: {} # The model specific optimization routines. # For the multi-step optimizer use something like: # # '^NODDI$': # name: 'MultiStepOptimizer' # settings: # optimizers: # - name: 'GridSearch' # settings: # patience: 100 # grid_generator: # GaussianRandomGrid: {} # - name: 'Powell' # # Note the directive 'optimizers' to signify to the configuration loader to load optimizers recursively. sampling: # The default sampler to use for model sampling. general: name: 'MetropolisHastings' settings: nmr_samples: 500 burn_length: 500 sample_intervals: 5 proposal_update_intervals: 50 # options for estimating the noise std before model fitting and or sampling noise_std_estimating: # the optimization routine will use the given estimators in the given order estimators: - AllUnweightedVolumes - TwoUnweightedVolumes - AverageOfAir_ExtendedMask - AverageOfAir_DilatedMask # The strategies for processing the models processing_strategies: optimization: general: name: ProtocolDependent options: steps: [[0, 0], [100, 60000], [200, 40000]] model_specific: # The processing strategies for specific models. This overrides the general processing strategy. # This uses regex expressions for matching the model_name. As keys for the items you can either provide # a string with a single regex, or you can provide a !!python/tuple [...] with multiple keys that is # partially matched if needed. # # Example for Noddi in a cascade: # # !!python/tuple ['^NODDI \(Cascade[|a-zA-Z0-9_]*\)$', '^NODDI']: # ... '^S0$': name: AllVoxelsAtOnce '^\w+\-ExVivo$': name: VoxelRange options: nmr_voxels: 1000000 '^NODDI$': name: ProtocolDependent options: steps: [[0, 0], [100, 60000], [200, 40000]] '^CHARMED_r[1-2]$': name: ProtocolDependent options: steps: [[0, 0], [100, 60000], [200, 10000]] '^CHARMED_r3$': name: ProtocolDependent options: steps: [[0, 0], [100, 40000], [200, 10000]] '^BallStick_r3$': name: ProtocolDependent options: steps: [[0, 0], [100, 60000], [200, 40000]] sampling: general: name: VoxelRange options: nmr_voxels: 100000 model_specific: '^S0(-T2)?$': name: VoxelRange options: nmr_voxels: 1000000 logging: info_dict: version: 1 disable_existing_loggers: False formatters: simple: format: "[%(asctime)s] [%(levelname)s] [%(name)s] [%(funcName)s] - %(message)s" handlers: console: class: mdt.log_handlers.StdOutHandler level: INFO formatter: simple model_output_file: class: mdt.log_handlers.ModelOutputLogHandler level: DEBUG formatter: simple encoding: utf8 dispatch_handler: class: mdt.log_handlers.LogDispatchHandler level: INFO formatter: simple loggers: mot: level: DEBUG handlers: [console, model_output_file] mdt: level: DEBUG handlers: [console, model_output_file] root: level: INFO handlers: [dispatch_handler] PK.JsIz} mdt/data/logo_gui.pngPNG  IHDR(St pHYs otIME  91 $IDATx{PSWoI%$.Vm+Ek}:_κժu[gwgWgagZuNZDSBYGPPD !$@xroG:i ;7O;{/`W}whA9 h;Ql Hq`Z,6$tH@@ x44i( P)FM( Rp!NH5;0hWOT Cq^;DŽᩎKK[ZCnwH X|##pS% գR *HpT'NuzswO? 'rA׼ x^ !*/./0#H '@B Hh' d y}>H˓1&d}hW+Uza$؞*\kD7%=H  ] 0S42" #e#vV6b e#x!cUe4T1v]W͝w#S`ɏ~]i3Y_!}0$lF”7&0+e9x*6 aD^$I6}`$l F"an^HaE؃$dHX2 I@<;/<$@6h٠(R$N(R$E,@dH*K69*K*K,@Bl^sO熻ȑϊʪƼ́ys0{ ţ"#Cv 'l،VJQp8#ϻ7ض/^7{oq@ C^Q(iIS}osTe?pG&LlOLLwp.T_z4i?yu;Y{ԼY֝ٱu9~徾|s?:Fkv{ڴ;^L5Ig$ߝ9p3҂K4E#Cf=`7]"kVZJGgmWs\wI/_q-6ɚ'-U/}˚ßx[6^Z]AKJLHMIN8NǷ]M*;WS :9 `,(7f""~F\0o㍮]?YQp鉯{܂s8zMVKff sB"EW;$4-@3ӧMśbLOby͆-,04yMm{gyO8{jMہwE sNpˍ :s}Y,]" &k=G:"`3gdfcߵ{<ʉXX^1RqU+HE Sݻ2[lD ]vFM 7%JY|oA2DyEE\0nhj;vدepK5ڜeTmsGdAaEh0f멐Mv|2fS+Ya)D^mG[6R4jJ8e‘G Sxjp(5+FfRGw `v‘$I`KD3BHxA-x_z(U-Y!SLn_jROUu=HMyh7T^q\.w/1/ AqqĄKP0Uu>Q08%᧐.Xօ;~-91"|~|D4S.o{yafڴkr׻gfH`$X,LJJ(. <{'CMZYRi ,A>/3cX4#g7T  #.Vy'Kx\QjiSgvⲏ}:wFB`@AbWU?wzW$ H J`Xa*`0H)`h M) 04Ec?3À3@G$`]5+[12]onw([ c&Cw$1(0@S2cN0$bu&HX,0FJa {@lİ I0fbXoİ sH( XA ?[H &dbb &FY`b = A1 $c1jY1y19f.OZJHh'0 ev2@B;?DN` @B;Hpcx"HpW!27ë@B;H( $Ă@BEQ $LL @Bșv~ ޏ$x0.Z@`/a+iIENDB`PKAJsIޘʷ..mdt/data/logo_docs.pngPNG  IHDRA9GsBIT|d pHYs22(/tEXtSoftwarewww.inkscape.org< IDATxy|U7 K@(fAHrfD V[voljXtѶBb۩Z[ Q+괶(Z"5@HrA@nn9߳Gm-9O5=oK +(3L.wb Y$]/ez cEm%U_RyfҶ$ #/?njÖ MSKGwX}l&Ik t+cYKR@TOi@߁32,ےbmzVdǒK::%GKV%Z:;([:ߩkxDYLۺͲH;ym^ioٲoӍ5t\_T>Rqkl gśnmy5u tqZSS3bXf HS*^!hjMo<@[Ԛ)X@ ֚z@-t,09ZSS^!؏^!hFWt@YkY^Ԛz@DW^!uWt@$$e]vXȶ'ZS3[kZ~5 ?՚z%'ZS0mWt@ *+j+: Rk(A5  ֚z@^Vi%kY@^J0=933V?_Pk+ŕ(fW+:Zk`TkMB FWtr+OYvL:3Q5 3rWt"_k jMEE:ԚEڍZSt@ڨ5fԚHERBC4tQk,:`?Ԛ؃Z"Ԛ ¨5 ZӐ! bvךƫ$Tӳ9:DF@ȕ^~Ԛ!(mɚ",p!Di"ԚF!@)t8jM!XԚbo: 8ZS@)ZCQkT>Իtt;]./Ԛ"-:h ۫*Iq t0ZS8@5tPdIB5tp^?ty^:$YILς#abwPk / I)L ݨ5y:C^Ęm},6@)oBhjMW:u!Zѿd̪~FAPk !`?Ԛ"t *D)@iojL)@IԚ"ltC)ˆ@Ԛ"tG)@b{jM!(n"@ R֧13jM5|jMu:k~]뽒rMB,jM/g=2˲ZS@ @5@ԚF5jM|ZSmtB)o$ƞm+Y%jM6#G)~:c5C0*HTLpSԚ xZS]*jMo\C)vךJig@jMstF)`]GZ1^[*2= e: .h%cօg@h#jM"ZSt?AQk +5@7jM"H:8jMp7/Qnj(딵%=jMp ЁjM"Ё(,XԚE!W$nz!ЁjMh"Ё7h\dMu,$Z)Y:@Ԛ^7IhzA@)C"K(ū,#Lt5|ZS @t5^:`BPk i:YՒM <t#ԚpZS^ L$D) 55pL!Pk 4hjM)?Q1ܖUM)? Ё6H@R@)#ЁC@PPk ht`/Ԛ*hF) #yԚE)0!AԚR(?˖U%jM H_RQ`K?@X5jMD@jMDР@CW-L h͵ՒL&ŕÒ1JN7= @@G Pk F5-jM u:|ZSh;*L$r&ӒuC/Pk CèʾmiY tA)8@ר5 WZSWPk "(jM ".Ԛ?H:,op@ɺ[.2= `7)jMء㐨5` q@Ԛ@L:ZXyc,KwKkz@B@+{ge?D)1Ԛ@8A)rԚ@4!E)D B͵Ugx@ŠPk C@K&Qk EXAbx[4s7=`5Ԛ@Zӻ$3= tOT T%dӳ-+{gdطIyG5@ jMAF) !ԚD{̏]tA72⤄b1۠k>׺dzlԼ5U;EOЮ] i}-<@iL{~}QG@+:tTN=_ߔLj/l-[FKVm*-55 S4lq\R,5=J}:pcaXLG)#\N jͷ?ԫh񒕲>2ڃ@wEsi8|cz6\<%}R>ty'I>߰Y^y_{u{}Yg g'mKh0cIs1`X#eOoCo@w~OXG.!%} SmYs$ 1=Oee/G_xwRt'ޢKԵKgc!vWNGBHi/zbV(7⒋NQn[ߡC*Ɯmz D.uϿz23yMچ@?DEGQ;dg.TVƕݪ^Gv3= @OaeGW{r_y>l~ަGA@'!WgT\+Բr_ӻ>֧wO͚c 2(]kM7N}r_t cٝ}YGE: +'*Q|QqRBDZ^u~@^zҥf/8(HzK'>tyc5=$i]x{[ԷO8XAbb!#ɺIsuXl3nUӫ>x O"K*Ϥ4:w1atQ5ӯo.+B悂ʾۓMZHL{~/J&m#Oa85b֔~mfF\;wT<aeS~ Eez#_͋lMK =[f2XֳZL&ZtAѝ;ghM -Tљ Ӝo{v鰁J!Yzx,=1}ziqTTOUIq2y\u;v_~D8㴡^:ɳ{=:o0W\1ک$DqƝo$Я_G>Mɤ֮ۨ6w>;u'5R(SX,f;ܤ~az@Sk<7hQر^/Ows 8Y=vmݻwm?1tjMDz,M{k{G"ȶm5kSk:cq9uiCMtSkmZU${Zg胙Ɖy 65E.ͯ{ӟs'sT {lڼM?#*p,8*+_`z @? Qv?gY;U {uIzyGwvlz @?&Q#:n[k@Ys{봓;zbg1I5aF}}cүug~ߥK'Ucl}@o5RZ|ǯ9ݳG՞i}֤[m oM{@߯֔#ӟs|sÔ{lV#Ӟs0?9zG[f2Щ5 |y/5-ز^:~T?ko,tӞW]qa}ZEih=2i7TNfVV*8Or괿:~M/w[ld\ 5o>V|8|]y.9rmۋm򈪫ۡ'guN1scw‹oku_Ш{ #kwp#/+(븳"m<8vYۧ}wnrzuw#ˇׄ=+Zz ISkY3}G4}G8dU&x|_EԚ5naY!+T g7.}(q%Щ5E{WW;znݺj􅧤_plږuvu >5aA(]EԚ"mkڌ;~݉P;G^UC|MX]kjIΞA=\_Q\᥅mpZ2is0݁N)kzi?0mߎWV~Q5H}}̂DY}D)GCC=QGH׺֪CdR~5{]YY s ZS[z:<"h4f<׸+wګ8zMb>Oׄ9m tjMᦩ.t-C}TVGjrӛ=WTԚ /{G\5{t?L?␿Q#oU۲NO=Q5k=_ztԚ;ɤ;-lja77zEm߾"Lzvg޲ tjMax&4lȁ_1l .)pt=`֭:^sD)ב\iUs>C]=:b>v^GW7]^qGՖ-_єL5;v$У"W\^E)f /d{GQ5RCcefί-[rI޾89sv{QG^41?̈I-9kgŲI sI+.+jOmۜ-Adee*MzM;k[l]0cr,q%A(7jG:@W\>jmkLk`.&8Y=*G%OtỉX,I l>¾妉_swlǯp9ΞGq~0knmMQ%]!C0a#UC z{=:Z{kri̩B$3 4-[F^yT|yKkŒ*}jSmc,r{(`oS]xV5}j3c/]0\I\I6!4›Z#lNۡ'g5=pXڟw༴n&.zfɺFI[f ǟQ5lPٵAo0'z끆53إ@۶)3cPl֣U ڥ隫lP2h㾟bǮk4Ȓ}ek~v11U|1'?Pmէ{rΊ3-9ʖ}-:u]>{ObzȳO|ͅy&q@Ǟd,]/iGt}SBcHV|1(|w_0Ǖ U`fuCUdKHJ//p:ܛDJ T#y.-he53e$e7B4ㅷaۡ'R7q(\s]Ojjxfi5x&©)/iFg@pk,/{ZjZiE{< vU0Rw)##λb^}YjYjZMj#l^y_KqT 7Gƞɤ0@otKkfNYْx\mfMCy[nL}EZ&ׅYŒ3^):~wMKo-tOVz&GhYU=ua@߭u@jZӼQ5_5m?(IﶧI%=kz缩kھ}Ӌ`ܹ~uڥg// \\[3s458&MM GՐ (36Ã*_z jZњOqGՐ tCO?[f:%jZqh7n3v%_@KjW[z{Ń=#xނVa:n8\=5v4k>۠g{ 0+0ނVo[rOW׋/ʵlq]s%zbOԷO/gw=FczjZfM _NNMIOgL70ȋc #/QS3k+gedطYҷ? }} l>Ǯ}Gf=cCYSG uaCg&M!cB~մbzxI>3/i:GCz)z*%@W`ƛN+cYKR?3;3oCj۶0\$ed5lq::e` *W<ϽϒUz1 f3g.om$/6n3~Yc.;z}_y:*yQcU8WѸcg)ھ7ec7צi[l4wmid-@IDATiU̽ [3Mnz#Ӌz⩹ǀƐ i|FwǀE&w@p]w> nԴԿ/TW(Hz jZ'+kЦM@oRM?^+~6>G7{j̬ޥd5jT6V]Q~i3fCW^sm9RsZ`"I-YՒL vm?=f[g:)YHM+}PSKL#-z jjf]0*$Q 1 ,yu1@ojZ8mo~'ȳH@M+ZJ'( =}Դhjjuf빿.۶M!ۉVill sCE8)!ԴߢVh_6!G;lɂ/J/H,o˾Sz 7ŗsM\kz$Dɥ5;ҧc Y$IYOWW͢eY\o}cMa >IҤŠ+%]lz&m- Zs-[fVђjm =PŒFT̪0=v YKd۶n.Iڹs6o٦[eKlݮ͛wyK>߰Ik>a jJKXZM#Ix\]9qev4;k,~m K:tT.|qewkW8FRXx[-yF(Wʖ}YE5^شfq^ſi NL`C}VȰooF<@ t/Gi.vOQ -!5RA@M+C:Դ8v@$ˊa*3=  Ё+H=Vri"@V:ԴF!CM+M:RԴ±5 i:ԴGBM+^:AԴGo@W5q e{U1|:6'@Zi@.E#u-2hib1ԴGM+=kiGb\CM+v,/YԴ"ءjZU|%jZ5:0=tFM+:ȠaFjZF| riECiԴ",t;hla,GgA^_#;ERYdYctxx,]/iyTCAB@+iE"jZg|)~@M+oBhjZ:XAbxjZaii}V;t/?njVojZcOMK1= @B@@QӊpԴB5ء@(i[Q=:5C@Qԧ@Q "i 7"`PYWQ.:DW\1Ʋti >rڴf!5@Mk GnqI]EMkPD:.;i pHE㎳MSo: %ŕ(fWVYU[dˆi!%v6t@r-MS,YLu:ݨi5T@Qj;ti8z@Vo7QΡ\EM7ءZ,),~=t@`lZ_O-k1=ء}@]tYL"aVBaOMkSVvdKŚHDSjZ t@E@DBkZ t@@DRjZ9մCD^jZ t@`?Ai: bM+;t!(5:){M+@|[=t5HjZ t)wPdJPS38lM+LմVTܚvM+pkjieGiTxؒmdM+!NִrCie@Gҭi%6״rjkM+;t|.V8TM+@@ r^-֐5Iԁ@  bSdIENDB`PK$ZIWL1mdt/data/qt_designs/maps_visualizer/TabTextual.ui TabTextual 0 0 400 300 Form 0 0 0 0 0 4 TextLabel Qt::AlignCenter true TextConfigEditor QPlainTextEdit
..widgets
textConfigEdit
PKPIsIA,mdt/data/qt_designs/maps_visualizer/main.qrc arrow_redo.png arrow_undo.png ../../logo_gui.png PKd>~I[ 1mdt/data/qt_designs/maps_visualizer/TabGeneral.ui TabGeneral 0 0 963 704 Form 0 0 0 0 0 Qt::ScrollBarAlwaysOff true 0 -479 946 1211 6 6 6 6 0 10 QFrame::NoFrame QFrame::Plain 0 0 0 0 0 75 true Index Qt::Horizontal QFrame::NoFrame QFrame::Plain 6 6 0 0 6 3 3 / x 3 / x Dimension: Slice index: Volume: 3 / x QFrame::NoFrame QFrame::Plain 0 0 0 0 0 75 true Miscellaneous Qt::Horizontal QFrame::NoFrame QFrame::Plain 6 3 6 6 0 0 Colormap: Rotate: Show axis: Colorbar nmr ticks: 2 10 Flip up/down: Interpolation: Plot title: Mask: QFrame::NoFrame QFrame::Plain 0 0 0 0 0 75 true Display order Qt::Horizontal QFrame::NoFrame QFrame::Plain 6 6 0 0 6 3 0 1 QAbstractItemView::MultiSelection QFrame::NoFrame QFrame::Plain 0 0 0 0 0 75 true Map selection Qt::Horizontal QFrame::NoFrame QFrame::Plain 6 6 0 0 6 3 0 1 QAbstractItemView::MultiSelection 0 Deselect all Invert selection QFrame::NoFrame QFrame::Plain 0 0 0 0 0 75 true Info Qt::Horizontal QFrame::NoFrame QFrame::Plain 6 6 0 0 6 3 Directory: Map count: - - true Qt::LinksAccessibleByMouse Qt::Vertical 20 40 QFrame::NoFrame QFrame::Plain 0 0 0 0 0 75 true Font Qt::Horizontal QFrame::NoFrame QFrame::Plain 6 3 6 6 0 0 Family: Size: 1 QFrame::NoFrame QFrame::Plain 0 0 0 0 0 75 true Zoom Qt::Horizontal QFrame::NoFrame QFrame::Plain 6 6 0 0 6 3 y<sub>0</sub>: <html><head/><body><p>x<span style=" vertical-align:sub;">0</span>:</p></body></html> y<sub>1</sub>: x<sub>1</sub>: Zoom fit Reset CollapsablePanel QFrame
..widgets
1
CollapsablePanelHeader QLabel
..widgets
CollapsablePanelContent QFrame
..widgets
1
MapsReorderer QListWidget
..widgets
scrollArea general_dimension general_slice_index general_volume_index general_zoom_x_0 general_zoom_y_0 general_zoom_x_1 general_zoom_y_1 general_map_selection general_display_order general_colormap general_rotate general_show_axis general_colorbar_nmr_ticks
PK84_I9(8mdt/data/qt_designs/maps_visualizer/save_image_dialog.ui SaveImageDialog 0 0 722 253 Save Image 0 14 Save image true Export the current view to an image. Qt::Horizontal true Browse true true (The width of the image in pixels) true (Dots/pixels per inch) 100 9999 1280 true (The height of the image in pixels) Width: Output file: Height: 100 9999 720 DPI: 10 1000 100 Qt::Vertical 20 40 Qt::Horizontal Qt::Horizontal QDialogButtonBox::Cancel|QDialogButtonBox::Ok dpi_box outputFile_chooser outputFile_box buttonBox accepted() SaveImageDialog accept() 248 254 157 274 buttonBox rejected() SaveImageDialog reject() 316 260 286 274 PK$ZIjp552mdt/data/qt_designs/maps_visualizer/arrow_undo.pngPNG  IHDR;0bKGD pHYs  tIME  2ѡIDATH;al6 P(~6BSoPlZ`F B!"$M wkSLμ;sy<3d=@32q >ī.  g3QMoƻ 8N؈ ͨ4Ͽbwq bgڎȲ9 ?vo@g& oXVI,JW |{+A^|Ku%$=AY7:hδXc>n7 v>\d?ˑP'vKٳ 7.sJĕBbe]Dw[}h`c$vh[U `-aSxucYLԯNcIENDB`PK$ZIe??,mdt/data/qt_designs/maps_visualizer/Makefile###### EDIT ##################### # Directory with ui and resource files QT_RESOURCE_DIR = ./ # Directory for compiled QT_RESOURCES QT_COMPILED_DIR = ../../../gui/maps_visualizer/design # UI files to compile QT_UI_FILES = $(shell ls $(QT_RESOURCE-DIR)*.ui) # Qt resource files to compile QT_RESOURCES = main.qrc # the binaries PYUIC = pyuic5 --from-imports PYRCC = pyrcc5 ################################# # DO NOT EDIT FOLLOWING all : clean resources ui help: @echo "clean - remove all build designer files" @echo "ui - build only thy UI files" @echo "resources - build only the resources" @echo "all - build both the UI and the resources" QT_COMPILED_UI = $(QT_UI_FILES:%.ui=$(QT_COMPILED_DIR)/ui_%.py) QT_COMPILED_RESOURCES = $(QT_RESOURCES:%.qrc=$(QT_COMPILED_DIR)/%_rc.py) resources : $(QT_COMPILED_RESOURCES) ui : $(QT_COMPILED_UI) $(QT_COMPILED_DIR)/ui_%.py : $(QT_RESOURCE_DIR)/%.ui $(PYUIC) $< -o $@ $(QT_COMPILED_DIR)/%_rc.py : $(QT_RESOURCE_DIR)/%.qrc $(PYRCC) $< -o $@ clean : $(RM) -f $(QT_COMPILED_DIR)/ui_*.py $(QT_COMPILED_DIR)/*_rc.py PK$ZI(v2mdt/data/qt_designs/maps_visualizer/arrow_redo.pngPNG  IHDR;0bKGD pHYs  tIME  /9RIDATH׿KaWYQCAT`?ZĠAh5P!"  *"%o\/^}{NچY;0>2dۈ1ucqķp' ޅpsQj1Y; ڎS88C !É2񫸋[XULVd|OS ؎kx T=GXo0_C>|ݍ@@eո|KY1(lϊ, _,n(Ncd-lEoRb-3P*pHG՛x\W XF Cb ?ggIENDB`PK$ZI0"T T 5mdt/data/qt_designs/maps_visualizer/TabMapSpecific.ui TabMapSpecific 0 0 445 534 Form 0 0 0 0 0 Qt::ScrollBarAlwaysOff true 0 0 443 532 6 6 6 6 6 QFrame::NoFrame QFrame::Raised 0 0 0 0 0 0 Qt::Vertical 20 40 scrollArea_2 selectedMap PKbIsIޥ..1mdt/data/qt_designs/maps_visualizer/MainWindow.ui MapsVisualizer 0 0 1000 754 MDT Maps Visualizer :/main/logo:/main/logo 0 0 0 0 0 QFrame::StyledPanel QFrame::Sunken 0 0 0 0 0 Qt::Horizontal 0 6 0 0 250 0 0 General 0 0 0 0 0 0 Maps 0 0 0 0 0 0 Textual 0 2 6 0 Qt::Horizontal 40 20 Auto render Manually redraw the figure Redraw 16 16 Undo :/main/arrow_undo.png:/main/arrow_undo.png Redo :/main/arrow_redo.png:/main/arrow_redo.png Qt::Horizontal 40 20 0 0 0 0 1000 27 &File &Help &Open directory &Quit Ctrl+Q &Save image Ctrl+S &About &Extra plot options &Browse to current folder &Export settings &Import settings commandTabs actionQuit triggered() MapsVisualizer close() -1 -1 499 374 PK$ZIdC4!O!O9mdt/data/qt_designs/maps_visualizer/MapSpecificOptions.ui MapSpecificOptions 0 0 648 564 Form 0 0 0 0 0 10 Qt::Vertical 20 40 QFrame::NoFrame QFrame::Plain 0 0 0 0 0 75 true General Qt::Horizontal QFrame::NoFrame QFrame::Plain 6 6 0 0 6 3 Colorbar label: Colormap: Title (latex): QFrame::NoFrame QFrame::Plain 0 0 0 0 0 75 true Scale Qt::Horizontal QFrame::NoFrame QFrame::Plain 6 6 0 0 6 3 0 0 Enable: Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter 5 -10000000000.000000000000000 10000000000.000000000000000 0.010000000000000 5 -10000000000.000000000000000 10000000000.000000000000000 0.010000000000000 Min: Max: QFrame::NoFrame QFrame::Plain 0 0 0 0 0 75 true Info Qt::Horizontal QFrame::NoFrame QFrame::Plain 6 6 0 0 6 3 Minimum: TextLabel true Maximum: Filename: TextLabel TextLabel Shape: TextLabel QFrame::NoFrame QFrame::Plain 0 0 0 0 0 75 true Clipping Qt::Horizontal QFrame::NoFrame QFrame::Plain 6 6 0 0 6 3 0 0 Min: Enable: Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter 5 -9999999999.989999771118164 9999999999.989999771118164 0.010000000000000 5 -9999999999.989999771118164 9999999999.989999771118164 0.010000000000000 Max: CollapsablePanel QFrame
..widgets
1
CollapsablePanelHeader QLabel
..widgets
CollapsablePanelContent QFrame
..widgets
1
QDoubleSpinBoxDotSeparator QDoubleSpinBox
..widgets
map_title data_colorbar_label colormap data_scale_min data_scale_max use_data_scale_min use_data_scale_max data_set_use_scale data_clipping_min data_clipping_max use_data_clipping_min use_data_clipping_max data_set_use_clipping
PK$ZI<a-a-8mdt/data/qt_designs/model_fit/generate_brain_mask_tab.ui GenerateBrainMaskTabContent 0 0 827 427 Form 11 0 0 14 Generate brian mask true Create a whole brain mask using the median-otsu algorithm. QFrame::Sunken 1 Qt::Horizontal QLayout::SetDefaultConstraint 10 true (To create one, please see the tab "Generate protocol file") Number of passes: Browse true (Select the 4d diffusion weighted image) 0 0 1 4 Qt::Horizontal 40 20 0 Browse Select output file: Median radius: Browse Qt::Horizontal true (Default is <volume_name>_mask.nii.gz) Select 4d image: 0 0 Select protocol file: Final threshold: 1 4 Qt::Horizontal 40 20 1000000.000000000000000 Qt::Horizontal 40 20 true (Radius (in voxels) of the applied median filter) true (Number of median filter passes) true (Additional masking threshold as a signal intensity) Qt::Horizontal 6 0 false Generate false View mask Qt::Horizontal 40 20 Qt::Vertical 20 40 selectImageButton selectedImageText selectProtocolButton selectedProtocolText selectOutputButton selectedOutputText medianRadiusInput numberOfPassesInput finalThresholdInput generateButton viewButton PK$ZI]F1mdt/data/qt_designs/model_fit/icon_status_red.pngPNG  IHDR bKGD pHYs  tIME ;3]IIDAT8˭k\U?;o5iKIVBBHPwB*]֭Bw]Q5JŝK%4'L3o{- /\>={璇eO=jDįvY?$?2p|yzcǏ8;7G>1P\aܹ.-.~r>{P;,ssv$SʒVsFݻ\8s.|KJÞq33zYkt:.E`u{++XJ>xxK66dl+k4NOMO3Z[Aʈ H9O ]'Ƌ5P{.y)}6]çHޓ]ƽ"o] .^ɽn,Ù!JjQQe޳OXV3#;!s{sU1mh6]1#FdeISI'3;V&D=c"!E0!)aB`=L@?Ws ipnG75ml84 PH0njݥʸs<=^xJ )/k)To.Tit\\LV+df\'Ύ>Z_^/?0#ĈUZi3bJ)1J;l%,/Er'+5 1̶@?z! !~*&Ǹx#YvdJdV̀ÔT!l|띺:J`n7~sЪ-CVYoF#7ЬФna 6d۴y<*Yv*55(=Ė m߷l3hO\<IENDB`PK$ZI+fAmdt/data/qt_designs/model_fit/generate_protocol_load_gb_dialog.ui LoadGBDialog 0 0 831 227 Load g & b 0 14 Load g & b true Load the bvec (g) and bval (b) in the protocol Qt::Horizontal true Browse true true (The file containing the gradient directions) Bvec (g) file: Bval (b) file: true (The file containing the b-values) Browse true (We expect the b-values in the protocol in units of s/m^2) 1e6 B-value rescale: Qt::Vertical 20 40 Qt::Horizontal Qt::Horizontal QDialogButtonBox::Cancel|QDialogButtonBox::Ok bvalFileChooser bvalFileInput buttonBox accepted() LoadGBDialog accept() 248 254 157 274 buttonBox rejected() LoadGBDialog reject() 316 260 286 274 PK$ZIsQ(%%1mdt/data/qt_designs/model_fit/view_results_tab.ui ViewResultsTabContent 0 0 938 427 Form 11 0 14 View results true View a selection of maps in the given folder. QFrame::Sunken 1 Qt::Horizontal QLayout::SetDefaultConstraint 10 0 Deselect all Invert selection 0 0 Select maps: 0 1 QAbstractItemView::MultiSelection Qt::Horizontal true (Select the maps you would like to display) Browse Select input folder: true (Choose a directory with .nii(.gz) files) Qt::Horizontal 0 0 Qt::Horizontal 40 20 0 6 Initial dimension: 2 2 Qt::Horizontal 40 20 0 6 Initial slice: 3 / x Qt::Horizontal 40 20 Qt::Horizontal 6 0 View Qt::Horizontal 40 20 line_2 line selectFolderButton selectedFolderText selectMaps deselectAllButton invertSelectionButton initialDimensionChooser initialSliceChooser viewButton PK!IsI+M)mdt/data/qt_designs/model_fit/main_gui.ui MainWindow 0 0 870 650 0 0 Maastricht Diffusion Toolbox :/main_gui/logo:/main_gui/logo QTabWidget::Rounded 0 0 0 3 0 3 0 Qt::Vertical 0 1 0 Fit model Generate brain mask Generate ROI mask Generate protocol file View results 0 0 Droid Sans Mono QFrame::StyledPanel QFrame::Sunken Qt::ScrollBarAsNeeded Qt::ScrollBarAsNeeded QPlainTextEdit::NoWrap true 80 8 0 8 Qt::Horizontal 40 20 TextLabel TextLabel 0 0 870 27 &File &Help &Quit Ctrl+Q &Runtime settings &About actionExit triggered() MainWindow close() -1 -1 399 299 PK$ZI!T==&mdt/data/qt_designs/model_fit/Makefile###### EDIT ##################### # Directory with ui and resource files QT_RESOURCE_DIR = ./ # Directory for compiled QT_RESOURCES QT_COMPILED_DIR = ../../../gui/model_fit/design # UI files to compile QT_UI_FILES = $(shell ls $(QT_RESOURCE-DIR)*.ui) # Qt resource files to compile QT_RESOURCES = main_gui.qrc # the binaries PYUIC = pyuic5 --from-imports PYRCC = pyrcc5 ################################# # DO NOT EDIT FOLLOWING all : clean resources ui help: @echo "clean - remove all build designer files" @echo "ui - build only thy UI files" @echo "resources - build only the resources" @echo "all - build both the UI and the resources" QT_COMPILED_UI = $(QT_UI_FILES:%.ui=$(QT_COMPILED_DIR)/ui_%.py) QT_COMPILED_RESOURCES = $(QT_RESOURCES:%.qrc=$(QT_COMPILED_DIR)/%_rc.py) resources : $(QT_COMPILED_RESOURCES) ui : $(QT_COMPILED_UI) $(QT_COMPILED_DIR)/ui_%.py : $(QT_RESOURCE_DIR)/%.ui $(PYUIC) $< -o $@ $(QT_COMPILED_DIR)/%_rc.py : $(QT_RESOURCE_DIR)/%.qrc $(PYRCC) $< -o $@ clean : $(RM) -f $(QT_COMPILED_DIR)/ui_*.py $(QT_COMPILED_DIR)/*_rc.py PK$ZI׉N$N$6mdt/data/qt_designs/model_fit/generate_protocol_tab.ui GenerateProtocolTabContent 0 0 827 427 Form 11 0 14 Generate protocol file true Create a protocol file containing all your sequence information. QFrame::Sunken 1 Qt::Horizontal 0 0 0 0 # shells: 0 Qt::Horizontal 40 20 0 # columns: 0 Qt::Horizontal 40 20 0 # rows: 0 Qt::Horizontal 40 20 0 # weighted: 0 Qt::Horizontal 40 20 0 # unweighted: 0 Qt::Horizontal 40 20 Different shells: - Qt::Horizontal 40 20 Qt::Horizontal 0 0 Load g && b Load protocol Add / Update column Save as Clear Qt::Horizontal 40 20 protocol_table PK$ZIT-0,yy8mdt/data/qt_designs/model_fit/runtime_settings_dialog.ui RuntimeSettingsDialog 0 0 844 243 Runtime settings 0 14 Runtime settings true Runtime settings for all compute operations. Qt::Horizontal QAbstractItemView::MultiSelection true (Select the devices you would like to use) OpenCL devices: Qt::Vertical 20 40 Qt::Horizontal Qt::Horizontal QDialogButtonBox::Cancel|QDialogButtonBox::Ok buttonBox accepted() RuntimeSettingsDialog accept() 248 254 157 274 buttonBox rejected() RuntimeSettingsDialog reject() 316 260 286 274 PK$ZI^@mdt/data/qt_designs/model_fit/generate_protocol_update_dialog.ui UpdateColumnDialog 0 0 831 304 Add / Update column 0 14 Add / Update column true Add a column to the current protocol or overwrite an existing column. Qt::Horizontal true Browse true Method: From file Single value Column name: true (A single value for every row) true (The column name, for example "g", "b" or "TE") Scale: Single value: true (File with a single value, a row, a column or a matrix) true (Optionally, scale the input with this amount) Qt::Horizontal File input: Qt::Horizontal Qt::Vertical 20 40 Qt::Horizontal Qt::Horizontal QDialogButtonBox::Cancel|QDialogButtonBox::Ok columnNameInput inputMethodSelector singleValueInput fileInput selectedFile buttonBox accepted() UpdateColumnDialog accept() 248 254 157 274 buttonBox rejected() UpdateColumnDialog reject() 316 260 286 274 PK$ZI0=.=.<mdt/data/qt_designs/model_fit/optimization_options_dialog.ui OptimizationOptionsDialog 0 0 843 337 Optimization options 0 14 Optimization options true Advanced options for the model fitting procedure Qt::Horizontal Qt::Horizontal true (For cascades, if we want to recalculate the entire chain) true (Empty for auto detection, or set a scalar or a path to a nifti file) Yes recalculateAllGroup No true recalculateAllGroup Qt::Horizontal 40 20 true (Enables manual selection of the optimization routine) Patience: Optimization routine: Yes true defaultOptimizerGroup No defaultOptimizerGroup Qt::Horizontal 40 20 false false true (Scales the number of iterations) Recalculate all: Noise standard deviation: Float true floatPrecisionGroup Double floatPrecisionGroup Qt::Horizontal 40 20 Float precision: true (Manual select the routine to use) true (The precision for the calculations) Qt::Horizontal Use default optimizer: Qt::Horizontal false File browser Qt::Vertical 20 40 Qt::Horizontal Qt::Horizontal QDialogButtonBox::Cancel|QDialogButtonBox::Ok noiseStd noiseStdFileSelect floatPrecision doublePrecision defaultOptimizer_True defaultOptimizer_False optimizationRoutine patience recalculateAll_True recalculateAll_False buttonBox accepted() OptimizationOptionsDialog accept() 248 254 157 274 buttonBox rejected() OptimizationOptionsDialog reject() 316 260 286 274 PK$ZI(5ّ((.mdt/data/qt_designs/model_fit/fit_model_tab.ui FitModelTabContent 0 0 1047 427 Form 11 0 14 Fit model true Optimize a model to your data. QFrame::Sunken 1 Qt::Horizontal QLayout::SetDefaultConstraint 10 Qt::Horizontal Browse Select DWI: Qt::Horizontal true (Select your protocol file, see tab "Generate protocol file") 0 0 Select brain mask: true (Select your preprocessed 4d diffusion weighted volume) 0 0 Select model: true (Select your brain mask, see tab "Generate brain mask") 0 0 Select output folder: 0 0 Select protocol file: true (Defaults to "output/<mask_name>" in the DWI directory) Browse true (Please select a model) Browse Browse Optimization options Qt::Horizontal 40 20 true (Additional settings) Qt::Horizontal 6 0 true Run Qt::Horizontal 40 20 Qt::Vertical 20 40 line_2 line verticalSpacer selectDWI selectedDWI selectMask selectedMask selectProtocol selectedProtocol selectOutputFolder selectedOutputFolder modelSelection runButton PK$ZIt3mdt/data/qt_designs/model_fit/icon_status_green.pngPNG  IHDR tEXtSoftwareAdobe ImageReadyqe<IDATxڬۋEƿ]=aɘb ID肊T? /ė >)B *5 $b:e..6]5VsVտW?K^<8*&"7iSXP<{zy%~G|V;W}x=]?e;i6]ZfqvU.^;o8Sxr!b fq/^{tGBQ`ip+ U5n:BY;}A؂nQb{{PuTtp(EE>a]ė9py/](u@I5SNO RShcwФsKv n $Hf|7[ć@j# IᕼB:X6WA-4+DО/%LLv^ֵqʄ݀nA 7 ^Xh1ABoj߸䥳=Ԉq^ ge3cx5n (;MT7ĶE[TC~_ \V8`N"bn2Wqlw$Pt!H4 Q ?lhcնIe@E}QhVz:! T G]w>0VyN~#Rbz* dg0(4T[sꂛ4f{eHY3i ػnnHmsK9 *n,Y/j}H9xZūɰK畷+.oƆke+(^/KIMisP5 ?3O9Nofz dnvǦ h?f>QDIENDB`PK$ZIyyYy%y%6mdt/data/qt_designs/model_fit/generate_roi_mask_tab.ui GenerateROIMaskTabContent 0 0 827 427 Form 11 0 0 14 Generate ROI mask true Create a mask with a Region Of Interest including only the voxels in the selected slice. QFrame::Sunken 1 Qt::Horizontal QLayout::SetDefaultConstraint 10 0 2 2 Qt::Horizontal 40 20 Select brain mask: 0 0 Select output file: true (The index of the single slice in the current dimension) Browse true (Select your brain mask) true (The dimension of the single slice) 0 0 10000 0 3 / x Qt::Horizontal 40 20 Select dimension: true (Default is <mask_name>_<dim>_<slice>.nii.gz) 0 0 Browse Select slice: Qt::Horizontal 6 0 false Generate false View ROI Qt::Horizontal 40 20 Qt::Vertical 20 40 selectMaskButton selectedMaskText dimensionInput sliceInput selectOutputFileInput selectedOutputFileText generateButton viewButton PKIsIKċ-mdt/data/qt_designs/model_fit/about_dialog.ui AboutDialog 0 0 594 379 About MDT 3 6 6 6 6 0 0 16 :/main_gui/logo Qt::Vertical 20 40 14 75 true MDT 12 true Maastricht Diffusion Toolbox 0 20 11 <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Droid Sans'; font-size:9pt; font-weight:400; font-style:normal;"> <p style=" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:11pt;">Version: {version}</span></p> <p style=" margin-top:20px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:11pt;">The Maastricht Diffusion Toolbox is a model recovery toolbox primarily meant for diffusion MRI analysis.</span></p> <p style=" margin-top:20px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:11pt;">Software development by Robbert Harms, under the (Phd) supervision of Alard Roebroeck, at Maastricht University.</span></p> <p style=" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:11pt;">Contributors:</span></p> <ul style="margin-top: 0px; margin-bottom: 0px; margin-left: 0px; margin-right: 0px; -qt-list-indent: 1;"><li style=" font-size:11pt;" style=" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Robbert Harms</li> <li style=" font-size:11pt;" style=" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Alard Roebroeck</li> <li style=" font-size:11pt;" style=" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Francisco Fritz</li></ul></body></html> Qt::RichText true Qt::Vertical 20 40 Qt::Horizontal 40 20 Qt::Horizontal QDialogButtonBox::Close buttonBox accepted() AboutDialog accept() 248 254 157 274 buttonBox rejected() AboutDialog reject() 316 260 286 274 PKIsI t*mdt/data/qt_designs/model_fit/main_gui.qrc icon_status_green.png icon_status_red.png ../../logo_gui.png PKjUpIIޠgg6mdt/data/components/standard/parameters/static_maps.py"""Definitions of the static data parameters. These parameters are in usage similar to fixed free parameters. They are defined as static data parameters to make clear that they are meant to carry additional observational data about a problem. Please choose the parameter type for a model and parameter carefully since the type signifies how the parameter and its data are handled during model construction. """ from mdt.models.parameters import StaticMapParameterConfig __author__ = 'Robbert Harms' __date__ = "2016-02-14" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class b1_static(StaticMapParameterConfig): data_type = 'mot_float_type' value = 1 class fa_static(StaticMapParameterConfig): data_type = 'mot_float_type' value = 1 class Sw_static(StaticMapParameterConfig): """This parameter is created only for linear T1 decay fitting of GRE data with variable flip angle. S_weighted is defined as the input data divided by the :math:`tan(flip_angle) -> S_weighted = data / tan (flip_angle * B1_map)` """ data_type = 'mot_float_type' value = 1 class T1_static(StaticMapParameterConfig): data_type = 'mot_float_type' class T2_static(StaticMapParameterConfig): data_type = 'mot_float_type' class T2s_static(StaticMapParameterConfig): data_type = 'mot_float_type' PKwI3mdt/data/components/standard/parameters/protocol.py"""Definitions of the protocol parameters The type of these parameters signifies that the data for this parameter should come from the protocol defined in the model data. These will never be optimized and are always set to the data defined in the protocol. Please choose the parameter type for a model and parameter carefully since the type signifies how the parameter and its data are handled during model construction. """ from mdt.models.parameters import ProtocolParameterConfig __author__ = 'Robbert Harms' __date__ = "2015-12-12" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class g(ProtocolParameterConfig): data_type = 'mot_float_type4' class b(ProtocolParameterConfig): pass class G(ProtocolParameterConfig): pass class Delta(ProtocolParameterConfig): pass class delta(ProtocolParameterConfig): pass class TE(ProtocolParameterConfig): pass class TM(ProtocolParameterConfig): pass class Ti(ProtocolParameterConfig): pass class TR(ProtocolParameterConfig): pass class flip_angle(ProtocolParameterConfig): pass # For STEAM/TSE sequences, depending on the model in which they are used. class Refoc_fa1(ProtocolParameterConfig): pass # For STEAM/TSE sequences, depending on the model in which they are used. class Refoc_fa2(ProtocolParameterConfig): pass # For STEAM/TSE sequences, depending on the model in which they are used. class SEf(ProtocolParameterConfig): pass PKjUpI}5mdt/data/components/standard/parameters/model_data.py"""Definitions of the model data parameters. These are meant for model specific data that the model needs to work. You can of course inline these variables in the code for one of the models (which is faster), but this way lets the user change the specifics of the model by changing the data in the model data parameters. Please choose the parameter type for a model and parameter carefully since the type signifies how the parameter and its data are handled during model construction. """ import numpy as np from mdt.models.parameters import ModelDataParameterConfig __author__ = 'Robbert Harms' __date__ = "2015-12-12" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" # charmed default, used in GDRCylindersFixed model class gamma_radii(ModelDataParameterConfig): data_type = 'global const mot_float_type* const' value = 1e-6 * np.array([1.5, 2.5, 3.5, 4.5, 5.5, 6.5]) class gamma_cyl_weights(ModelDataParameterConfig): data_type = 'global const mot_float_type* const' value = np.array([0.0211847200855742, 0.107169623942214, 0.194400551313197, 0.266676876170322, 0.214921653661151, 0.195646574827541]) class nmr_gamma_cyl_weights(ModelDataParameterConfig): data_type = 'int' value = 6 PK8|ID4/mdt/data/components/standard/parameters/free.py"""Definitions of the free parameters. The free parameters are meant to be used for parameters that one wants to optimize. They can be fixed to a certain value to disable them from being optimized in a given situation, but they remain classified as 'optimizable' parameters. Please choose the parameter type for a model and parameter carefully since the type signifies how the parameter and its data are handled during model construction. """ import numpy as np from mdt.models.parameters import FreeParameterConfig from mot.model_building.parameter_functions.priors import AbsSinHalfPrior, AbsSinPrior from mot.model_building.parameter_functions.proposals import GaussianProposal, CircularGaussianProposal from mot.model_building.parameter_functions.sample_statistics import CircularGaussianPSS from mot.model_building.parameter_functions.transformations import ClampTransform, AbsModPiTransform, \ SinSqrClampTransform, CosSqrClampTransform __author__ = 'Robbert Harms' __date__ = "2015-12-12" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class s0(FreeParameterConfig): init_value = 1e4 lower_bound = 1e-5 upper_bound = 1e10 parameter_transform = ClampTransform() sampling_proposal = GaussianProposal(std=25.0) class T1(FreeParameterConfig): init_value = 0.03 lower_bound = 0.0 upper_bound = 4.0 parameter_transform = ClampTransform() sampling_proposal = GaussianProposal(0.0001) class E1(FreeParameterConfig): """This parameter is defined *only* for linear decay T1 fitting in GRE data *with* TR constant. This parameter is also defined in the SSFP equation. However, in SSFP this parameter is from the protocol (!) E1 = exp( -TR / T1 ). After estimation of this parameter, T1 can be recovered by applying the next equation: -TR / log( E1 ). """ init_value = 0.37 lower_bound = 0.0 upper_bound = 1.0 parameter_transform = ClampTransform() sampling_proposal = GaussianProposal(0.0001) class T2(FreeParameterConfig): init_value = 0.01 lower_bound = 0.0 upper_bound = 2.0 parameter_transform = ClampTransform() sampling_proposal = GaussianProposal(0.0001) class T2s(FreeParameterConfig): init_value = 0.01 lower_bound = 0.0 upper_bound = 1.0 parameter_transform = ClampTransform() sampling_proposal = GaussianProposal(0.0001) class R1(FreeParameterConfig): """R1 = 1/T1, for linear T1Dec or other models. """ init_value = 2 lower_bound = 0.25 upper_bound = 100.0 parameter_transform = ClampTransform() sampling_proposal = GaussianProposal(0.0001) class R2(FreeParameterConfig): """R2 = 1/T2, for linear T2Dec or other models.""" init_value = 5 lower_bound = 0.5 upper_bound = 500.0 parameter_transform = ClampTransform() sampling_proposal = GaussianProposal(0.0001) class R2s(FreeParameterConfig): """R2s = 1/T2s, for lineaR T2sDec or other models.""" init_value = 10 lower_bound = 1 upper_bound = 50.0 parameter_transform = ClampTransform() sampling_proposal = GaussianProposal(0.0001) class theta(FreeParameterConfig): init_value = 1 / 2.0 * np.pi lower_bound = 0 upper_bound = np.pi parameter_transform = AbsModPiTransform() sampling_proposal = CircularGaussianProposal(np.pi, 0.02) sampling_prior = AbsSinHalfPrior() sampling_statistics = CircularGaussianPSS() class phi(FreeParameterConfig): init_value = 1 / 2.0 * np.pi lower_bound = 0 upper_bound = np.pi parameter_transform = AbsModPiTransform() sampling_proposal = CircularGaussianProposal(np.pi, 0.02) sampling_prior = AbsSinPrior() sampling_statistics = CircularGaussianPSS() class psi(FreeParameterConfig): init_value = 1 / 2.0 * np.pi lower_bound = 0 upper_bound = np.pi parameter_transform = AbsModPiTransform() sampling_proposal = CircularGaussianProposal(np.pi, 0.02) sampling_prior = AbsSinPrior() sampling_statistics = CircularGaussianPSS() class d(FreeParameterConfig): init_value = 1.7e-9 lower_bound = 0 upper_bound = 1.0e-8 parameter_transform = SinSqrClampTransform() sampling_proposal = GaussianProposal(1e-14) class dperp0(FreeParameterConfig): init_value = 1.7e-10 lower_bound = 0 upper_bound = 1.0e-8 parameter_transform = SinSqrClampTransform() sampling_proposal = GaussianProposal(1e-15) class dperp1(FreeParameterConfig): init_value = 1.7e-11 lower_bound = 0 upper_bound = 1.0e-8 parameter_transform = SinSqrClampTransform() sampling_proposal = GaussianProposal(1e-15) class R(FreeParameterConfig): init_value = 2.0e-6 lower_bound = 1e-6 upper_bound = 20e-6 parameter_transform = CosSqrClampTransform() sampling_proposal = GaussianProposal(1e-6) class kappa(FreeParameterConfig): init_value = 1 lower_bound = 1e-5 upper_bound = 2 * np.pi parameter_transform = CosSqrClampTransform() sampling_proposal = GaussianProposal(0.01) # for use in the GDRCylinder model class gamma_k(FreeParameterConfig): init_value = 1 lower_bound = 0 upper_bound = 20 parameter_transform = SinSqrClampTransform() sampling_proposal = GaussianProposal(1.0) # for use in the GDRCylinder model class gamma_beta(FreeParameterConfig): init_value = 1 lower_bound = 1.0e-7 upper_bound = 3.0e-7 parameter_transform = SinSqrClampTransform() sampling_proposal = GaussianProposal(1e-7) # for use in the GDRCylinder model class gamma_nmr_cyl(FreeParameterConfig): init_value = 5 lower_bound = 1 upper_bound = 10 parameter_transform = SinSqrClampTransform() sampling_proposal = GaussianProposal(1) PKjUpIOr>>@mdt/data/components/standard/processing_strategies/VoxelRange.pyfrom mdt.processing_strategies import ChunksProcessingStrategy __author__ = 'Robbert Harms' __date__ = "2015-11-29" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" meta_info = {'title': 'Fit in chunks of voxel ranges', 'description': 'Processes a model in chunks defined by ranges of voxels.'} class VoxelRange(ChunksProcessingStrategy): def __init__(self, nmr_voxels=40000, **kwargs): """Optimize a given dataset in batches of the given number of voxels Args: nmr_voxels (int): the number of voxels per batch Attributes: nmr_voxels (int): the number of voxels per chunk """ super(VoxelRange, self).__init__(**kwargs) self.nmr_voxels = nmr_voxels def _chunks_generator(self, model, problem_data, output_path, worker, total_roi_indices): for ind_start in range(0, len(total_roi_indices), self.nmr_voxels): ind_end = min(len(total_roi_indices), ind_start + self.nmr_voxels) yield total_roi_indices[ind_start:ind_end] PKjUpI^G5 5 Gmdt/data/components/standard/processing_strategies/ProtocolDependent.pyfrom mdt.components_loader import load_component from mdt.processing_strategies import SimpleProcessingStrategy __author__ = 'Robbert Harms' __date__ = "2015-11-29" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" meta_info = {'title': 'Applies the VoxelRange strategy depending on the protocol.', 'description': 'This looks at the size of the protocol and based on that determines the voxel range.'} class ProtocolDependent(SimpleProcessingStrategy): def __init__(self, steps=((0, None), (100, 50000), (200, 20000)), **kwargs): """A meta strategy using VoxelRange AllVoxelsAtOnce depending on the protocol length This will look at the protocol of the given model and determine, based on the number of rows in the protocol, which voxel range to use. A voxel range of None or 0 means we want to fit all the voxels at once (this will use the AllVoxelsAtOnce strategy for that). During lookup of the protocol length we take the maximum step that is lower than the protocol length. If no suitable lookup is present, we use the AllVoxelsAtOnce strategy. Args: steps (list[tuple[int, int]]): the steps of the voxel ranges. The first item in the tuple is the protocol length, the second the voxel range. We assume that voxel ranges are in ascending order. """ super(ProtocolDependent, self).__init__(**kwargs) self._steps = steps def run(self, model, problem_data, output_path, recalculate, worker_generator): strategy = self._get_strategy(problem_data) return strategy.run(model, problem_data, output_path, recalculate, worker_generator) def _get_strategy(self, problem_data): for col_length, voxel_range in reversed(self._steps): if int(col_length) < problem_data.get_nmr_inst_per_problem(): if voxel_range: return load_component('processing_strategies', 'VoxelRange', nmr_voxels=int(voxel_range), tmp_dir=self._tmp_dir, honor_voxels_to_analyze=self._honor_voxels_to_analyze) return load_component('processing_strategies', 'AllVoxelsAtOnce', tmp_dir=self._tmp_dir, honor_voxels_to_analyze=self._honor_voxels_to_analyze) PKjUpIӡfEmdt/data/components/standard/processing_strategies/AllVoxelsAtOnce.pyfrom mdt.processing_strategies import ChunksProcessingStrategy __author__ = 'Robbert Harms' __date__ = "2015-11-29" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" meta_info = {'title': 'All slices at once', 'description': 'Processes the whole dataset at once.'} class AllVoxelsAtOnce(ChunksProcessingStrategy): """Run all slices at once.""" def _chunks_generator(self, model, problem_data, output_path, worker, total_roi_indices): yield total_roi_indices PKԃI ((7mdt/data/components/standard/composite_models/Tensor.pyfrom mdt.models.composite import DMRICompositeModelConfig __author__ = 'Robbert Harms' __date__ = "2015-06-22" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class Tensor(DMRICompositeModelConfig): ex_vivo_suitable = False description = 'The standard Tensor model with in vivo defaults.' model_expression = ''' S0 * Tensor ''' inits = {'Tensor.d': 1.7e-9, 'Tensor.dperp0': 1.7e-10, 'Tensor.dperp1': 1.7e-10} volume_selection = {'unweighted_threshold': 25e6, 'use_unweighted': True, 'use_weighted': True, 'min_bval': 0, 'max_bval': 1.5e9 + 0.1e9} class TensorExVivo(Tensor): name = 'Tensor-ExVivo' ex_vivo_suitable = True in_vivo_suitable = False description = 'The standard Tensor model with ex vivo defaults.' inits = {'Tensor.d': 1e-9, 'Tensor.dperp0': 0.6e-10, 'Tensor.dperp1': 0.6e-10} volume_selection = None PKUpI}I((8mdt/data/components/standard/composite_models/CHARMED.pyfrom mdt.models.composite import DMRICompositeModelConfig __author__ = 'Robbert Harms' __date__ = "2015-06-22" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class CHARMED_r1(DMRICompositeModelConfig): ex_vivo_suitable = False description = 'The CHARMED model with 1 restricted compartments' model_expression = ''' S0 * ( (Weight(w_hin0) * Tensor) + (Weight(w_res0) * CHARMEDRestricted(CHARMEDRestricted0)) ) ''' lower_bounds = {'Tensor.d': 1e-9, 'Tensor.dperp0': 0.3e-9, 'Tensor.dperp1': 0.3e-9, 'CHARMEDRestricted0.d': 0.3e-9} upper_bounds = {'Tensor.d': 5e-9, 'Tensor.dperp0': 5e-9, 'Tensor.dperp1': 3e-9, 'CHARMEDRestricted0.d': 3e-9} inits = {'Tensor.d': 1.2e-9, 'Tensor.dperp0': 0.5e-9, 'Tensor.dperp1': 0.5e-9, 'CHARMEDRestricted0.d': 1e-9} post_optimization_modifiers = [ ('FR', lambda results: 1 - results['w_hin0.w']) ] class CHARMED_r2(DMRICompositeModelConfig): ex_vivo_suitable = False description = 'The CHARMED model with 2 restricted compartments' model_expression = ''' S0 * ( (Weight(w_hin0) * Tensor) + (Weight(w_res0) * CHARMEDRestricted(CHARMEDRestricted0)) + (Weight(w_res1) * CHARMEDRestricted(CHARMEDRestricted1)) ) ''' lower_bounds = {'Tensor.d': 1e-9, 'Tensor.dperp0': 0.3e-9, 'Tensor.dperp1': 0.3e-9, 'CHARMEDRestricted0.d': 0.3e-9, 'CHARMEDRestricted1.d': 0.3e-9} upper_bounds = {'Tensor.d': 5e-9, 'Tensor.dperp0': 5e-9, 'Tensor.dperp1': 3e-9, 'CHARMEDRestricted0.d': 3e-9, 'CHARMEDRestricted1.d': 3e-9} inits = {'Tensor.d': 1.2e-9, 'Tensor.dperp0': 0.5e-9, 'Tensor.dperp1': 0.5e-9, 'CHARMEDRestricted0.d': 1e-9, 'CHARMEDRestricted1.d': 1e-9} post_optimization_modifiers = [ ('FR', lambda results: 1 - results['w_hin0.w']) ] class CHARMED_r3(DMRICompositeModelConfig): ex_vivo_suitable = False description = 'The standard CHARMED model with 3 restricted compartments' model_expression = ''' S0 * ( (Weight(w_hin0) * Tensor) + (Weight(w_res0) * CHARMEDRestricted(CHARMEDRestricted0)) + (Weight(w_res1) * CHARMEDRestricted(CHARMEDRestricted1)) + (Weight(w_res2) * CHARMEDRestricted(CHARMEDRestricted2)) ) ''' lower_bounds = {'Tensor.d': 1e-9, 'Tensor.dperp0': 0.3e-9, 'Tensor.dperp1': 0.3e-9, 'CHARMEDRestricted0.d': 0.3e-9, 'CHARMEDRestricted1.d': 0.3e-9, 'CHARMEDRestricted2.d': 0.3e-9} upper_bounds = {'Tensor.d': 5e-9, 'Tensor.dperp0': 5e-9, 'Tensor.dperp1': 3e-9, 'CHARMEDRestricted0.d': 3e-9, 'CHARMEDRestricted1.d': 3e-9, 'CHARMEDRestricted2.d': 3e-9} inits = {'Tensor.d': 1.2e-9, 'Tensor.dperp0': 0.5e-9, 'Tensor.dperp1': 0.5e-9, 'CHARMEDRestricted0.d': 1e-9, 'CHARMEDRestricted1.d': 1e-9, 'CHARMEDRestricted2.d': 1e-9, 'w_res2.w': 0} post_optimization_modifiers = [ ('FR', lambda results: 1 - results['w_hin0.w']) ] PKUpI0 ;mdt/data/components/standard/composite_models/BallSticks.pyfrom mdt.models.composite import DMRICompositeModelConfig __author__ = 'Robbert Harms' __date__ = "2015-06-22" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class BallStick_r1(DMRICompositeModelConfig): ex_vivo_suitable = False description = 'The default Ball & Stick model' model_expression = ''' S0 * ( (Weight(w_ball) * Ball) + (Weight(w_stick) * Stick) ) ''' fixes = {'Ball.d': 3.0e-9, 'Stick.d': 1.7e-9} post_optimization_modifiers = [('FS', lambda results: 1 - results['w_ball.w'])] class BallStick_r1_ExVivo(BallStick_r1): name = 'BallStick_r1-ExVivo' in_vivo_suitable = False ex_vivo_suitable = True description = 'The Ball & Stick model with ex vivo defaults', fixes = {'Ball.d': 2.0e-9, 'Stick.d': 0.6e-9} class BallStick_r2(DMRICompositeModelConfig): ex_vivo_suitable = False description = 'The Ball & 2x Stick model' model_expression = ''' S0 * ( (Weight(w_ball) * Ball) + (Weight(w_stick0) * Stick(Stick0)) + (Weight(w_stick1) * Stick(Stick1)) ) ''' fixes = {'Ball.d': 3.0e-9, 'Stick0.d': 1.7e-9, 'Stick1.d': 1.7e-9} post_optimization_modifiers = [('FS', lambda results: 1 - results['w_ball.w'])] class BallStick_r2_ExVivo(BallStick_r2): name = 'BallStick_r2-ExVivo' in_vivo_suitable = False ex_vivo_suitable = True description = 'The Ball & 2x Stick model with ex vivo defaults' fixes = {'Ball.d': 2.0e-9, 'Stick0.d': 0.6e-9, 'Stick1.d': 0.6e-9} class BallStick_r3(DMRICompositeModelConfig): ex_vivo_suitable = False description = 'The Ball & 3x Stick model' model_expression = ''' S0 * ( (Weight(w_ball) * Ball) + (Weight(w_stick0) * Stick(Stick0)) + (Weight(w_stick1) * Stick(Stick1)) + (Weight(w_stick2) * Stick(Stick2)) ) ''' fixes = {'Ball.d': 3.0e-9, 'Stick0.d': 1.7e-9, 'Stick1.d': 1.7e-9, 'Stick2.d': 1.7e-9} inits = {'w_stick2.w': 0} post_optimization_modifiers = [('FS', lambda results: 1 - results['w_ball.w'])] class BallStick_r3_ExVivo(BallStick_r3): name = 'BallStick_r3-ExVivo' ex_vivo_suitable = True in_vivo_suitable = False description = 'The Ball & 3x Stick model with ex vivo defaults' fixes = {'Ball.d': 2.0e-9, 'Stick0.d': 0.6e-9, 'Stick1.d': 0.6e-9, 'Stick2.d': 0.6e-9} PK؃I@4mdt/data/components/standard/composite_models/s0s.pyfrom mdt.models.composite import DMRICompositeModelConfig __author__ = 'Robbert Harms' __date__ = "2015-06-22" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class S0(DMRICompositeModelConfig): description = 'Models the unweighted signal (aka. b0).' model_expression = 'S0' volume_selection = {'unweighted_threshold': 25e6, 'use_unweighted': True, 'use_weighted': False} PK)AuImv9mdt/data/components/standard/composite_models/ActiveAx.pyfrom mdt.models.composite import DMRICompositeModelConfig __author__ = 'Robbert Harms' __date__ = "2015-06-22" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class ActiveAx(DMRICompositeModelConfig): ex_vivo_suitable = False description = 'The standard ActiveAx model' model_expression = ''' S0 * ((Weight(w_ic) * CylinderGPD) + (Weight(w_ec) * Zeppelin) + (Weight(w_csf) * Ball)) ''' fixes = {'CylinderGPD.d': 1.7e-9, 'Zeppelin.d': 1.7e-9, 'Ball.d': 3.0e-9} dependencies = [('Zeppelin.dperp0', 'Zeppelin.d * (w_ec.w / (w_ec.w + w_ic.w))')] PK-AuIR4[NN6mdt/data/components/standard/composite_models/NODDI.pyfrom mdt.models.composite import DMRICompositeModelConfig from mot.model_building.parameter_functions.dependencies import AbstractParameterDependency __author__ = 'Robbert Harms' __date__ = "2015-06-22" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class NODDITortuosityParameterDependency(AbstractParameterDependency): def __init__(self, d, w_ec, w_ic, ): self._d = d self._w_ec = w_ec self._w_ic = w_ic @property def pre_transform_code(self): return ''' mot_float_type _tortuosity_mult_{d} = {w_ec} / ({w_ec} + {w_ic}); if(!isnormal(_tortuosity_mult_{d})){{ _tortuosity_mult_{d} = 0.01; }} '''.format(d=self._d, w_ec=self._w_ec, w_ic=self._w_ic) @property def assignment_code(self): return '{d} * _tortuosity_mult_{d}'.format(d=self._d) @property def fixed(self): return True @property def has_side_effects(self): return False class NODDI(DMRICompositeModelConfig): ex_vivo_suitable = False description = 'The standard NODDI model' model_expression = ''' S0 * ((Weight(w_csf) * Ball) + (Weight(w_ic) * NODDI_IC) + (Weight(w_ec) * NODDI_EC) ) ''' fixes = {'NODDI_IC.d': 1.7e-9, 'NODDI_IC.R': 0.0, 'NODDI_EC.d': 1.7e-9, 'Ball.d': 3.0e-9} dependencies = ( ('NODDI_EC.dperp0', NODDITortuosityParameterDependency('NODDI_EC.d', 'w_ec.w', 'w_ic.w')), ('NODDI_EC.kappa', 'NODDI_IC.kappa'), ('NODDI_EC.theta', 'NODDI_IC.theta'), ('NODDI_EC.phi', 'NODDI_IC.phi') ) post_optimization_modifiers = [ ('NDI', lambda d: d['w_ic.w'] / (d['w_ic.w'] + d['w_ec.w'])), ('ODI', lambda d: d['NODDI_IC.odi']) ] PKjUpI>Imdt/data/components/standard/noise_std_estimators/AllUnweightedVolumes.pyimport numpy as np from mdt import create_roi from mdt.utils import ComplexNoiseStdEstimator from mdt.exceptions import NoiseStdEstimationNotPossible __author__ = 'Robbert Harms' __date__ = "2015-11-20" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class AllUnweightedVolumes(ComplexNoiseStdEstimator): def estimate(self, problem_data, **kwargs): """Calculate the standard deviation of the error using all unweighted volumes. This calculates per voxel (in the brain mas) the std over all unweighted volumes and takes the mean of those estimates as the standard deviation of the noise. The method is taken from Camino (http://camino.cs.ucl.ac.uk/index.php?n=Man.Estimatesnr). """ unweighted_indices = problem_data.protocol.get_unweighted_indices() unweighted_volumes = problem_data.dwi_volume[..., unweighted_indices] if len(unweighted_indices) < 2: raise NoiseStdEstimationNotPossible('Not enough unweighted volumes for this estimator.') voxel_list = create_roi(unweighted_volumes, problem_data.mask) return np.mean(np.std(voxel_list, axis=1)) def __str__(self): return __name__ PKjUpIeeImdt/data/components/standard/noise_std_estimators/TwoUnweightedVolumes.pyimport numpy as np from mdt import create_roi from mdt.utils import ComplexNoiseStdEstimator from mdt.exceptions import NoiseStdEstimationNotPossible __author__ = 'Robbert Harms' __date__ = "2015-11-20" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class TwoUnweightedVolumes(ComplexNoiseStdEstimator): def estimate(self, problem_data, **kwargs): """Calculate the standard deviation of the error using the first two unweighted volumes/ This subtracts the values of the first two unweighted volumes from each other, calculates the std over the results and divides that by sqrt(2). The method is taken from Camino (http://camino.cs.ucl.ac.uk/index.php?n=Man.Estimatesnr). Returns: float: single value representing the sigma for the given volume """ unweighted_indices = problem_data.protocol.get_unweighted_indices() unweighted_volumes = problem_data.dwi_volume[..., unweighted_indices] if len(unweighted_indices) < 2: raise NoiseStdEstimationNotPossible('Not enough unweighted volumes for this estimator.') diff = unweighted_volumes[..., 0] - unweighted_volumes[..., 1] voxel_values = create_roi(diff, problem_data.mask) return np.std(voxel_values) / np.sqrt(2) def __str__(self): return __name__ PKjUpI-!> > Nmdt/data/components/standard/noise_std_estimators/AverageOfAir_ExtendedMask.pyfrom mdt.utils import ComplexNoiseStdEstimator, create_roi from mdt.exceptions import NoiseStdEstimationNotPossible import numpy as np __author__ = 'Robbert Harms' __date__ = "2016-04-11" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class AverageOfAir_ExtendedMask(ComplexNoiseStdEstimator): def estimate(self, problem_data, **kwargs): """Calculate the standard deviation of the error using the air (voxels outside the brain), This procedure first finds the extreme points of the given brain mask in all dimensions. Next, it extends this mask (as a sort cross) in all dimensions to mask out the mask and possible ghostings. Finally we mask the first N voxels at the edges of the brain since the may be zero-filled. We use all the remaining voxels for the noise std calculation. We then calculate per voxel the std of the noise and use that to estimate the noise of the original complex image domain using: sigma_complex = sqrt(2.0 / (4.0 - PI)) * stddev(signal in background region) Finally, we take the median value of all calculated std's. """ voxels = self._get_air_voxels(problem_data) if not len(voxels): raise NoiseStdEstimationNotPossible('No voxels in air found.') return np.median(np.sqrt(2.0 / (4.0 - np.pi)) * np.std(voxels, axis=1)) def _get_air_voxels(self, problem_data, border_offset=3): """Get a two dimensional list with all the voxels in the air. Returns: ndarray: The first dimension is the list of voxels, the second the signal per voxel. """ indices = np.where(problem_data.mask > 0) max_dims = np.max(indices, axis=1) min_dims = np.min(indices, axis=1) mask = np.copy(problem_data.mask) mask[min_dims[0]:max_dims[0]] = True mask[:, min_dims[1]:max_dims[1], :] = True mask[..., min_dims[2]:max_dims[2]] = True mask[0:border_offset] = True mask[-border_offset:] = True mask[:, 0:border_offset, :] = True mask[:, -border_offset:, :] = True mask[..., 0:border_offset] = True mask[..., -border_offset:] = True return create_roi(problem_data.dwi_volume, np.invert(mask)) def __str__(self): return __name__ PKjUpIuLMmdt/data/components/standard/noise_std_estimators/AverageOfAir_DilatedMask.pyfrom mdt.utils import ComplexNoiseStdEstimator, create_roi from mdt.exceptions import NoiseStdEstimationNotPossible import numpy as np from scipy.ndimage.morphology import binary_dilation __author__ = 'Robbert Harms' __date__ = "2016-04-11" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class AverageOfAir_DilatedMask(ComplexNoiseStdEstimator): def estimate(self, problem_data, **kwargs): """Calculate the standard deviation of the error using the air (voxels outside the brain), This procedure first dilates the given brian mask a little bit to smooth out the edges. Finally we mask the first n voxels at the edges of the data volume since the may be zero-filled. We use all the remaining voxels for the noise std calculation. We then calculate per voxel the std of the noise and use that to estimate the noise of the original complex image domain using: sigma_complex = sqrt(2.0 / (4.0 - PI)) * stddev(signal in background region) Finally, we take the median value of all calculated std's. """ voxels = self._get_air_voxels(problem_data) if not len(voxels): raise NoiseStdEstimationNotPossible('No voxels in air found.') return np.median(np.sqrt(2.0 / (4.0 - np.pi)) * np.std(voxels, axis=1)) def _get_air_voxels(self, problem_data, border_offset=3): """Get a two dimensional list with all the voxels in the air. Returns: ndarray: The first dimension is the list of voxels, the second the signal per voxel. """ mask = np.copy(problem_data.mask) mask = binary_dilation(mask, iterations=1) mask[0:border_offset] = True mask[-border_offset:] = True mask[:, 0:border_offset, :] = True mask[:, -border_offset:, :] = True mask[..., 0:border_offset] = True mask[..., -border_offset:] = True return create_roi(problem_data.dwi_volume, np.invert(mask)) def __str__(self): return __name__ PKHrIҁo 6mdt/data/components/standard/batch_profiles/HCP_MGH.pyimport glob import os import mdt from mdt.batch_utils import SimpleBatchProfile, BatchFitProtocolLoader, SimpleSubjectInfo __author__ = 'Robbert Harms' __date__ = "2015-07-13" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" meta_info = {'title': 'HCP MGH', 'description': 'The profile for the MGH data from the Human Connectome project', 'directory_layout': ''' This assumes that you downloaded and extracted the MGH data in one folder and that you now have one folder per subject. You can provide the noise standard deviation to use using a noise_std file containing a single float. Example directory layout: /mgh_*/diff/preproc/mri/diff_preproc.nii(.gz) /mgh_*/diff/preproc/bvals.txt /mgh_*/diff/preproc/bvecs_fsl_moco_norm.txt Optional items (these will take precedence if present): /mgh_*/diff/preproc/diff_preproc.bval /mgh_*/diff/preproc/diff_preproc.bvec /mgh_*/diff/preproc/diff_preproc.prtcl /mgh_*/diff/preproc/diff_preproc_mask.nii(.gz) /mgh_*/diff/preproc/mri/diff_preproc_mask.nii(.gz) /mgh_*/diff/preproc/noise_std.{txt,nii,nii.gz} '''} class HCP_MGH(SimpleBatchProfile): def __init__(self): super(HCP_MGH, self).__init__() self._output_base_dir = 'diff/preproc/output' def _get_subjects(self): dirs = sorted([os.path.basename(f) for f in glob.glob(os.path.join(self._root_dir, '*'))]) subjects = [] for subject_id in dirs: pjoin = mdt.make_path_joiner(self._root_dir, subject_id, 'diff', 'preproc') if os.path.isdir(pjoin()): dwi_fname = list(glob.glob(pjoin('mri', 'diff_preproc.nii*')))[0] noise_std = self._autoload_noise_std(subject_id, file_path=pjoin('noise_std')) bval_fname = pjoin('bvals.txt') if os.path.isfile(pjoin('diff_preproc.bval')): bval_fname = pjoin('diff_preproc.bval') bvec_fname = pjoin('bvecs_fsl_moco_norm.txt') if os.path.isfile(pjoin('diff_preproc.bvec')): bvec_fname = pjoin('diff_preproc.bvec') prtcl_fname = None if os.path.isfile(pjoin('diff_preproc.prtcl')): prtcl_fname = pjoin('diff_preproc.prtcl') mask_fname = None if list(glob.glob(pjoin('diff_preproc_mask.nii*'))): mask_fname = list(glob.glob(pjoin('diff_preproc_mask.nii*')))[0] if mask_fname is None: if list(glob.glob(pjoin('mri', 'diff_preproc_mask.nii*'))): mask_fname = list(glob.glob(pjoin('mri', 'diff_preproc_mask.nii*')))[0] protocol_loader = BatchFitProtocolLoader( pjoin(), protocol_fname=prtcl_fname, bvec_fname=bvec_fname, bval_fname=bval_fname, protocol_columns={'Delta': 21.8e-3, 'delta': 12.9e-3, 'TR': 8800e-3, 'TE': 57e-3}) output_dir = self._get_subject_output_dir(subject_id, mask_fname) subjects.append(SimpleSubjectInfo(subject_id, dwi_fname, protocol_loader, mask_fname, output_dir, noise_std=noise_std)) return subjects def __str__(self): return meta_info['title'] PKO\I5bK K 9mdt/data/components/standard/batch_profiles/HCP_WUMINN.pyimport glob import os import mdt from mdt.batch_utils import SimpleBatchProfile, SimpleSubjectInfo __author__ = 'Robbert Harms' __date__ = "2015-07-13" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" meta_info = {'title': 'HCP WU-Minn', 'description': 'The profile for the WU-Minn data from the Human Connectome project', 'directory_layout': ''' This assumes that you downloaded and extracted the WU-Minn data in one folder which gives one folder per subject. You can provide the noise standard deviation to use using a noise_std file containing a single float. Example directory layout: /*/T1w/Diffusion/data.nii.gz /*/T1w/Diffusion/bvals /*/T1w/Diffusion/bvecs /*/T1w/Diffusion/nodif_brain_mask.nii.gz /*/T1w/Diffusion/grad_dev.nii.gz Optional items (these will take precedence if present): /*/T1w/Diffusion/data.bval /*/T1w/Diffusion/data.bvec /*/T1w/Diffusion/data.prtcl /*/T1w/Diffusion/data_mask.nii(.gz) /*/T1w/Diffusion/noise_std '''} class HCP_WUMINN(SimpleBatchProfile): def __init__(self): super(HCP_WUMINN, self).__init__() self.use_gradient_deviations = False self._output_base_dir = 'T1w/Diffusion/output' self._append_mask_name_to_output_sub_dir = False def _get_subjects(self): subjects = [] for subject_id in sorted([os.path.basename(f) for f in glob.glob(os.path.join(self._root_dir, '*'))]): pjoin = mdt.make_path_joiner(self._root_dir, subject_id, 'T1w', 'Diffusion') if os.path.isdir(pjoin()): subject_info = self._get_subject_in_directory(subject_id, pjoin) if subject_info: subjects.append(subject_info) return subjects def _get_subject_in_directory(self, subject_id, pjoin): """Get the information about the given subject in the given directory. Args: subject_id (str): the id of this subject we are loading pjoin (PathJoiner): the path joiner pointing to the directory of that subject Returns: SimpleSubjectInfo or None: the subject info for this particular subject """ noise_std = self._autoload_noise_std(subject_id, file_path=pjoin('noise_std')) protocol_loader = self._autoload_protocol( pjoin(), protocols_to_try=['data.prtcl'], bvals_to_try=['data.bval', 'bvals'], bvecs_to_try=['data.bvec', 'bvecs'], protocol_columns={'Delta': 43.1e-3, 'delta': 10.6e-3, 'TE': 89.5e-3, 'TR': 5520e-3}) mask_fname = self._get_first_existing_nifti(['data_mask', 'nodif_brain_mask'], prepend_path=pjoin()) output_dir = self._get_subject_output_dir(subject_id, mask_fname) return SimpleSubjectInfo(subject_id, pjoin('data'), protocol_loader, mask_fname, output_dir, gradient_deviations=pjoin('grad_dev'), use_gradient_deviations=self.use_gradient_deviations, noise_std=noise_std) def __str__(self): return meta_info['title'] PKHrIzp8mdt/data/components/standard/batch_profiles/SingleDir.pyimport glob import os import mdt from mdt.batch_utils import SimpleBatchProfile, BatchFitProtocolLoader, SimpleSubjectInfo from mdt.utils import split_image_path __author__ = 'Robbert Harms' __date__ = "2015-07-13" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" meta_info = {'title': 'Single directory', 'description': 'All files in one directory, distinct name per subject.', 'directory_layout': ''' For every subject in the directory the base name must be equal to be recognized as belonging to one subject. Example directory layout: /a.nii.gz /a.bval /a.bvec /b.nii /b.prtcl /c.nii.gz /c.prtcl /c_mask.nii.gz This gives three subjects, 'a', 'b' and 'c' with a Diffusion Weighted Image recognized by the extension .nii(.gz) and a bvec and bval or protocol file for the protocol information. Subject 'c' also has a brain mask associated. If there is a file mask.nii(.gz) present in the directory it is used as the default mask for all the subjects in the single directory that do not have their own mask. You can provide the noise standard deviation to use using a noise_std file (per subject). This should either be a .txt file (global noise) or a nifti file (voxel wise noise). Optional items (all case sensitive): /.TE /.Delta /.delta /.noise_std.{txt,nii,nii.gz} '''} class SingleDir(SimpleBatchProfile): def _get_subjects(self): pjoin = mdt.make_path_joiner(self._root_dir) files = [os.path.basename(f) for f in glob.glob(pjoin('*'))] basenames = sorted(list({split_image_path(f)[1] for f in files})) subjects = [] protocol_options = ['TE', 'TR', 'Delta', 'delta', 'maxG'] default_mask = None if list(glob.glob(pjoin('mask.nii*'))): default_mask = list(glob.glob(pjoin('mask.nii*')))[0] for basename in basenames: dwi_fname = None if basename + '.nii' in files: dwi_fname = pjoin(basename + '.nii') elif basename + '.nii.gz' in files: dwi_fname = pjoin(basename + '.nii.gz') elif basename + '.hdr' in files and basename + '.img' in files: dwi_fname = pjoin(basename + '.hdr') noise_std = self._autoload_noise_std(basename, file_path=pjoin(basename + '.noise_std')) prtcl_fname = None if basename + '.prtcl' in files: prtcl_fname = pjoin(basename + '.prtcl') bval_fname = None if basename + '.bval' in files: bval_fname = pjoin(basename + '.bval') bvec_fname = None if basename + '.bvec' in files: bvec_fname = pjoin(basename + '.bvec') mask_fname = default_mask if basename + '_mask.nii' in files: mask_fname = pjoin(basename + '_mask.nii') elif basename + '_mask.nii.gz' in files: mask_fname = pjoin(basename + '_mask.nii.gz') extra_cols_from_file = {} for option in protocol_options: if basename + '.' + option in files: extra_cols_from_file.update({option: pjoin(basename + '.' + option)}) if dwi_fname and (prtcl_fname or (bval_fname and bvec_fname)): protocol_loader = BatchFitProtocolLoader( pjoin(), protocol_fname=prtcl_fname, bvec_fname=bvec_fname, bval_fname=bval_fname, protocol_columns=extra_cols_from_file) output_dir = self._get_subject_output_dir(basename, mask_fname, pjoin(self.output_base_dir, basename)) subjects.append(SimpleSubjectInfo(basename, dwi_fname, protocol_loader, mask_fname, output_dir, noise_std=noise_std)) return subjects def __str__(self): return meta_info['title'] PKaMrI;/ )44<mdt/data/components/standard/batch_profiles/DirPerSubject.pyimport glob import os import mdt from mdt.batch_utils import SimpleBatchProfile, BatchFitProtocolLoader, SimpleSubjectInfo __author__ = 'Robbert Harms' __date__ = "2015-07-13" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" meta_info = {'title': 'Directory per subject', 'description': 'General layout for batch fitting with a folder per subject.', 'directory_layout': ''' Every subject has its own directory. For every type of file (protocol, bvec, bval, TE, Delta, delta, DWI, mask) we use the first one found. You can provide the noise standard deviation to use using a noise_std file containing a single float. Example directory layout: /a/*.nii(.gz) /a/*bval* /a/*bvec* /b/... /c/... Optional items: /*/prtcl /*/*.prtcl /*/TE (in seconds) /*/delta (in seconds) /*/Delta (in seconds) /*/*_mask.nii(.gz) /*/noise_std.{txt,nii,nii.gz} The optional items TE, Delta and delta provide extra information about the protocol. They should either contain exactly 1 value (for all protocol lines), or a value per protocol line. '''} class DirPerSubject(SimpleBatchProfile): def __init__(self): super(DirPerSubject, self).__init__() self.use_gradient_deviations = False def _get_subjects(self): subjects = [] for subject_id in sorted([os.path.basename(f) for f in glob.glob(os.path.join(self._root_dir, '*'))]): pjoin = mdt.make_path_joiner(self._root_dir, subject_id) subject_info = self._get_subject_in_directory(subject_id, pjoin) if subject_info: subjects.append(subject_info) return subjects def _get_subject_in_directory(self, subject_id, pjoin): """Get the information about the given subject in the given directory. Args: subject_id (str): the id of this subject we are loading pjoin (PathJoiner): the path joiner pointing to the directory of that subject Returns: SimpleSubjectInfo or None: the subject info for this particular subject """ niftis = glob.glob(pjoin('*.nii*')) dwis = list(filter(lambda v: all(name not in v for name in ['_mask', 'grad_dev', 'noise_std']), niftis)) masks = list(filter(lambda v: '_mask' in v, niftis)) grad_devs = list(filter(lambda v: 'grad_dev' in v, niftis)) protocols = glob.glob(pjoin('*prtcl')) bvals = glob.glob(pjoin('*bval*')) bvecs = glob.glob(pjoin('*bvec*')) noise_std = self._autoload_noise_std(subject_id) if dwis: dwi_fname = dwis[0] mask_fnames = [pjoin('mask.nii')] mask_fnames.extend(masks) mask_fname = self._get_first_existing_nifti(mask_fnames) grad_dev = grad_devs[0] if grad_devs else None protocol_fname = protocols[0] if protocols else None bval_fname = bvals[0] if bvals else None bvec_fname = bvecs[0] if bvecs else None if dwi_fname and (protocol_fname or (bval_fname and bvec_fname)): protocol_loader = BatchFitProtocolLoader( os.path.join(self._root_dir, subject_id), protocol_fname=protocol_fname, bvec_fname=bvec_fname, bval_fname=bval_fname) output_dir = self._get_subject_output_dir(subject_id, mask_fname) return SimpleSubjectInfo(subject_id, dwi_fname, protocol_loader, mask_fname, output_dir, gradient_deviations=grad_dev, use_gradient_deviations=self.use_gradient_deviations, noise_std=noise_std) return None def __str__(self): return meta_info['title'] PKjUpI(YGmdt/data/components/standard/library_functions/NeumannCylPerpPGSESum.pyfrom pkg_resources import resource_filename from mot.cl_data_type import CLDataType from mot.model_building.cl_functions.base import LibraryFunction from mot.model_building.cl_functions.parameters import LibraryParameter __author__ = 'Robbert Harms' __date__ = "2015-06-21" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class NeumannCylPerpPGSESum(LibraryFunction): def __init__(self): super(NeumannCylPerpPGSESum, self).__init__( 'double', 'NeumannCylPerpPGSE', (LibraryParameter(CLDataType.from_string('mot_float_type'), 'Delta'), LibraryParameter(CLDataType.from_string('mot_float_type'), 'delta'), LibraryParameter(CLDataType.from_string('mot_float_type'), 'd'), LibraryParameter(CLDataType.from_string('mot_float_type'), 'R')), resource_filename(__name__, 'NeumannCylPerpPGSESum.h'), resource_filename(__name__, 'NeumannCylPerpPGSESum.cl'), {}, ()) PKtVqI3IIFmdt/data/components/standard/library_functions/NeumannCylPerpPGSESum.h#ifndef NEUMANN_CYL_PERP_PGSE_SUM_H #define NEUMANN_CYL_PERP_PGSE_SUM_H /** * Author = Robbert Harms * Date = 2014-02-01 * License = LGPL v3 * Maintainer = Robbert Harms * Email = robbert.harms@maastrichtuniversity.nl */ /** * This function returns the summation of the signal attenuation in perpendicular direction (LePerp) * for Radius R, according to the Neumann model. * * The summation is the sum over the Bessel roots up to a accuracy of 1e-8, it does not * calculate the complete signal for a cylinder dMRI compartment model. */ mot_float_type NeumannCylPerpPGSESum(const mot_float_type Delta, const mot_float_type delta, const mot_float_type d, const mot_float_type R); #endif //NEUMANN_CYL_PERP_PGSE_SUM_H PKHjGmdt/data/components/standard/library_functions/NeumannCylPerpPGSESum.cl#ifndef NEUMANN_CYL_PERP_PGSE_SUM_CL #define NEUMANN_CYL_PERP_PGSE_SUM_CL /** * Author = Robbert Harms * Date = 2014-02-01 * License = LGPL v3 * Maintainer = Robbert Harms * Email = robbert.harms@maastrichtuniversity.nl */ /** * See the header definition for explanation */ mot_float_type NeumannCylPerpPGSESum(const mot_float_type Delta, const mot_float_type delta, const mot_float_type d, const mot_float_type R){ if(R == 0.0 || R < MOT_EPSILON){ return 0; } const mot_float_type cl_jnp_zeros[] = { 1.84118378, 5.33144277, 8.53631637, 11.7060049 , 14.86358863, 18.01552786, 21.16436986, 24.31132686, 27.45705057, 30.60192297, 33.7461829 , 36.88998741, 40.03344405, 43.17662897, 46.31959756, 49.46239114, 52.60504111, 55.74757179, 58.8900023 , 62.03234787 }; const int cl_jnp_zeros_length = 20; double sum = 0; mot_float_type dam; mot_float_type amrdiv; for(int i = 0; i < cl_jnp_zeros_length; i++){ amrdiv = cl_jnp_zeros[i] / R; dam = d * amrdiv * amrdiv; sum += (2 * dam * delta - 2 + (2 * exp(-dam * delta)) + (2 * exp(-dam * Delta)) - exp(-dam * (Delta - delta)) - exp(-dam * (Delta + delta))) / ((dam * amrdiv * dam * amrdiv) * ((R * amrdiv * R * amrdiv) - 1)); } return (mot_float_type)sum; } #endif //NEUMANN_CYL_PERP_PGSE_SUM_CL PKǣxI~rr=mdt/data/components/standard/library_functions/MRIConstants.h/** * Author = Robbert Harms * Date = 2014-09-21 * License = LGPL v3 * Maintainer = Robbert Harms * Email = robbert.harms@maastrichtuniversity.nl */ /** * This file provides a list of MRI specific constants used throughout the compartment CL functions. */ /** * Gamma represent the gyromagnetic ratio of protons in water (nucleus of H) * and are in units of (rad s^-1 T^-1) **/ #ifndef GAMMA_H #define GAMMA_H 267.5987E6 #endif /** * The same GAMMA as defined above but then divided by 2*pi, in units of s^-1 T^-1 **/ #ifndef GAMMA_H_HZ #define GAMMA_H_HZ (GAMMA_H / (2 * M_PI)) #endif /** * The square of the original GAMMA defined above */ #ifndef GAMMA_H_SQ #define GAMMA_H_SQ (GAMMA_H * GAMMA_H) #endif /** * The square of the GAMMA divided by 2*pi from above */ #ifndef GAMMA_H_HZ_SQ #define GAMMA_H_HZ_SQ (GAMMA_H_HZ * GAMMA_H_HZ) #endif PKH`η>mdt/data/components/standard/library_functions/MRIConstants.cl/** * Author = Robbert Harms * Date = 2014-09-21 * License = LGPL v3 * Maintainer = Robbert Harms * Email = robbert.harms@maastrichtuniversity.nl */ /** * A stub for consistency. See MRIConstants.h for the definitions. */ PKjUpI \++>mdt/data/components/standard/library_functions/MRIConstants.pyfrom pkg_resources import resource_filename from mot.model_building.cl_functions.base import LibraryFunction __author__ = 'Robbert Harms' __date__ = "2015-06-21" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class MRIConstants(LibraryFunction): def __init__(self): super(MRIConstants, self).__init__( '', '', (), resource_filename(__name__, 'MRIConstants.h'), resource_filename(__name__, 'MRIConstants.cl'), {}, ()) PKG|IO5mdt/data/components/standard/cascade_models/Tensor.pyimport numpy as np from mdt.models.cascade import CascadeConfig __author__ = 'Robbert Harms' __date__ = "2015-06-22" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class Tensor(CascadeConfig): name = 'Tensor (Cascade)' description = 'Cascade for Tensor.' models = ('BallStick_r1 (Cascade)', 'Tensor') inits = {'Tensor': [('Tensor.theta', 'Stick.theta'), ('Tensor.phi', 'Stick.phi')]} class TensorFixed(CascadeConfig): name = 'Tensor (Cascade|fixed)' description = 'Cascade for Tensor with fixed angles.' models = ('BallStick_r1 (Cascade)', 'Tensor') fixes = {'Tensor': [('Tensor.theta', 'Stick.theta'), ('Tensor.phi', 'Stick.phi')]} class TensorS0(CascadeConfig): name = 'Tensor (Cascade|S0)' description = 'Cascade for Tensor initialized with only an S0 fit.' models = ('S0', 'Tensor') class TensorExVivo(Tensor): name = 'Tensor-ExVivo (Cascade)' description = 'Cascade for Tensor with ex vivo defaults.' models = ('BallStick_r1-ExVivo (Cascade)', 'Tensor-ExVivo') PKjUpI쿭6mdt/data/components/standard/cascade_models/CHARMED.pyfrom mdt.models.cascade import CascadeConfig __author__ = 'Robbert Harms' __date__ = "2015-06-22" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class CHARMED1(CascadeConfig): name = 'CHARMED_r1 (Cascade)' description = 'Initializes the directions to Ball & Stick.' models = ('BallStick_r1 (Cascade)', 'CHARMED_r1') inits = {'CHARMED_r1': [('CHARMEDRestricted0.theta', 'Stick.theta'), ('CHARMEDRestricted0.phi', 'Stick.phi'), ('Tensor.theta', 'Stick.theta'), ('Tensor.phi', 'Stick.phi'), ('w_res0.w', 'w_stick.w')]} class CHARMEDR1S0(CascadeConfig): name = 'CHARMED_r1 (Cascade|S0)' description = 'Cascade for CHARMED r1 initialized with only an S0 fit.' models = ('S0', 'CHARMED_r1') class CHARMEDR1Fixed(CascadeConfig): name = 'CHARMED_r1 (Cascade|fixed)' description = 'Fixes the directions to Ball & Stick.' models = ('BallStick_r1 (Cascade)', 'CHARMED_r1') inits = {'CHARMED_r1': [('Tensor.theta', 'Stick.theta'), ('Tensor.phi', 'Stick.phi'), ('w_res0.w', 'w_stick.w')]} fixes = {'CHARMED_r1': [('CHARMEDRestricted0.theta', 'Stick.theta'), ('CHARMEDRestricted0.phi', 'Stick.phi')]} class CHARMEDR2(CascadeConfig): name = 'CHARMED_r2 (Cascade)' description = 'Initializes the directions to 2x Ball & Stick.' models = ('BallStick_r2 (Cascade)', 'CHARMED_r2') inits = {'CHARMED_r2': [('Tensor.theta', 'Stick0.theta'), ('Tensor.phi', 'Stick0.phi'), ('CHARMEDRestricted0.theta', 'Stick0.theta'), ('CHARMEDRestricted0.phi', 'Stick0.phi'), ('CHARMEDRestricted1.theta', 'Stick1.theta'), ('CHARMEDRestricted1.phi', 'Stick1.phi'), ('w_res0.w', 'w_stick0.w'), ('w_res1.w', 'w_stick1.w')]} class CHARMEDR2S0(CascadeConfig): name = 'CHARMED_r2 (Cascade|S0)' description = 'Initializes with only an S0 fit.' models = ('S0', 'CHARMED_r2') class CHARMEDR2Fixed(CascadeConfig): name = 'CHARMED_r2 (Cascade|fixed)' description = 'Fixes the directions to 2x Ball & Stick.' models = ('BallStick_r2 (Cascade)', 'CHARMED_r2') inits = {'CHARMED_r2': [('Tensor.theta', 'Stick0.theta'), ('Tensor.phi', 'Stick0.phi'), ('w_res0.w', 'w_stick0.w'), ('w_res1.w', 'w_stick1.w')]} fixes = {'CHARMED_r2': [('CHARMEDRestricted0.theta', 'Stick0.theta'), ('CHARMEDRestricted0.phi', 'Stick0.phi'), ('CHARMEDRestricted1.theta', 'Stick1.theta'), ('CHARMEDRestricted1.phi', 'Stick1.phi'), ]} class CHARMED_r3(CascadeConfig): name = 'CHARMED_r3 (Cascade)' description = 'Initializes the directions to 3x Ball & Stick.' models = ('BallStick_r3 (Cascade)', 'CHARMED_r3') inits = {'CHARMED_r3': [('Tensor.theta', 'Stick0.theta'), ('Tensor.phi', 'Stick0.phi'), ('w_res0.w', 'w_stick0.w'), ('w_res1.w', 'w_stick1.w'), ('w_res2.w', 'w_stick2.w'), ('CHARMEDRestricted0.theta', 'Stick0.theta'), ('CHARMEDRestricted0.phi', 'Stick0.phi'), ('CHARMEDRestricted1.theta', 'Stick1.theta'), ('CHARMEDRestricted1.phi', 'Stick1.phi'), ('CHARMEDRestricted2.theta', 'Stick2.theta'), ('CHARMEDRestricted2.phi', 'Stick2.phi'), ]} class CHARMED_r3_S0(CascadeConfig): name = 'CHARMED_r3 (Cascade|S0)' description = 'Initializes with only an S0 fit.' models = ('S0', 'CHARMED_r3') class CHARMED_r3_Fixed(CascadeConfig): name = 'CHARMED_r3 (Cascade|fixed)' description = 'Fixes the directions to 3x Ball & Stick.' models = ('BallStick_r3 (Cascade)', 'CHARMED_r3') inits = {'CHARMED_r3': [('Tensor.theta', 'Stick0.theta'), ('Tensor.phi', 'Stick0.phi'), ('w_res0.w', 'w_stick0.w'), ('w_res1.w', 'w_stick1.w'), ('w_res2.w', 'w_stick2.w')]} fixes = {'CHARMED_r3': [('CHARMEDRestricted0.theta', 'Stick0.theta'), ('CHARMEDRestricted0.phi', 'Stick0.phi'), ('CHARMEDRestricted1.theta', 'Stick1.theta'), ('CHARMEDRestricted1.phi', 'Stick1.phi'), ('CHARMEDRestricted2.theta', 'Stick2.theta'), ('CHARMEDRestricted2.phi', 'Stick2.phi')]} PKdIX2//9mdt/data/components/standard/cascade_models/BallSticks.pyimport numpy as np from mdt.models.cascade import CascadeConfig __author__ = 'Robbert Harms' __date__ = "2015-06-22" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class BallStick_r1(CascadeConfig): name = 'BallStick_r1 (Cascade)' description = 'Cascade for Ballstick' models = ('S0', 'BallStick_r1') lower_bounds = {'BallStick_r1': [ ('S0.s0', lambda output_previous, output_all_previous: 2 * np.min(output_previous['S0.s0'])) ]} upper_bounds = {'BallStick_r1': [ ('S0.s0', lambda output_previous, output_all_previous: 2 * np.max(output_previous['S0.s0'])) ]} class BallStick_r1_S0(BallStick_r1): name = 'BallStick_r1 (Cascade|S0)' class BallStick_r1_ExVivo(BallStick_r1): name = 'BallStick_r1-ExVivo (Cascade)' description = 'Cascade for Ballstick with ex vivo defaults.' models = ('S0', 'BallStick_r1-ExVivo') class BallStick_r2(CascadeConfig): name = 'BallStick_r2 (Cascade)' description = 'Cascade for BallStick_r2.' models = ('BallStick_r1 (Cascade)', 'BallStick_r2') inits = {'BallStick_r2': [('Stick0.theta', 'Stick.theta'), ('Stick0.phi', 'Stick.phi'), ('w_stick0.w', 'w_stick.w'), ('w_stick1.w', 0.0)]} class BallStick_r2_ExVivo(BallStick_r2): name = 'BallStick_r2-ExVivo (Cascade)' description = 'Cascade for BallStick_r2 with ex vivo defaults.' models = ('BallStick_r1-ExVivo (Cascade)', 'BallStick_r2-ExVivo') class BallStick_r3(CascadeConfig): name = 'BallStick_r3 (Cascade)' description = 'Cascade for BallStick_r3.' models = ('BallStick_r2 (Cascade)', 'BallStick_r3') inits = {'BallStick_r3': [('w_stick2.w', 0.0)]} class BallStick_r3_ExVivo(BallStick_r3): name = 'BallStick_r3-ExVivo (Cascade)' description = 'Cascade for BallStick_r3 with ex vivo defaults.' models = ('BallStick_r2-ExVivo (Cascade)', 'BallStick_r3-ExVivo') PK@qI|4mdt/data/components/standard/cascade_models/NODDI.pyfrom mdt.models.cascade import CascadeConfig __author__ = 'Robbert Harms' __date__ = "2015-06-22" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class NODDI(CascadeConfig): name = 'NODDI (Cascade)' description = 'Cascade for NODDI initialized from Ball&Stick.' models = ('BallStick_r1 (Cascade)', 'NODDI') inits = {'NODDI': [('w_ic.w', lambda output_previous, _: output_previous['w_stick.w'] / 2.0), ('w_ec.w', lambda output_previous, _: output_previous['w_stick.w'] / 2.0), ('w_csf.w', 'w_ball.w'), ('NODDI_IC.theta', 'Stick.theta'), ('NODDI_IC.phi', 'Stick.phi')]} class NODDI_S0(CascadeConfig): name = 'NODDI (Cascade|S0)' description = 'Cascade for NODDI initialized with only an S0 fit.' models = ('S0', 'NODDI') class NODDI_Fixed(CascadeConfig): name = 'NODDI (Cascade|fixed)' description = 'Cascade for NODDI with fixed directions from Ball&Stick.' models = ('BallStick_r1 (Cascade)', 'NODDI') inits = {'NODDI': [('w_ic.w', lambda output_previous, _: output_previous['w_stick.w'] / 2.0), ('w_ec.w', lambda output_previous, _: output_previous['w_stick.w'] / 2.0), ('w_csf.w', 'w_ball.w')]} fixes = {'NODDI': [('NODDI_IC.theta', 'Stick.theta'), ('NODDI_IC.phi', 'Stick.phi')]} PKjUpI5@B=mdt/data/components/standard/compartment_models/LinMPM_Fit.pyfrom mdt.models.compartments import CompartmentConfig __author__ = 'Francisco.Lagos' """MPM fitting (Weiskopf, 2016 ESMRMB Workshop) This fitting is a model published by Helms (2008) and Weiskopf (2011) to determinate biological properties of the tissue/sample in function *of several images*, which includes T1w, PDw and MTw images. This function is still an approximation and, if the assumptions of those approximations hold for ex-vivo tissue, then can be used in this data. """ class LinMPM_Fit(CompartmentConfig): parameter_list = ('TR', 'flip_angle', 'b1_static', 'T1') cl_code = 'return log(flip_angle * b1_static) + log(TR / T1) - log( pown(flip_angle * b1_static, 2) / 2 + ( TR / T1 ) ) ;' PKXqI@iBmdt/data/components/standard/compartment_models/ExpT1ExpT2STEAM.pyfrom mdt.models.compartments import CompartmentConfig __author__ = 'Francisco.Lagos' # From protocol, if the signal is SE, we can setup TM = 0 in all the volumes, # which returns to the standard SE signal decay class ExpT1ExpT2STEAM(CompartmentConfig): """Generalised STEAM equation. From protocol, if the signal is SE, we can setup TM = 0 in all the volumes, which returns to the standard SE signal decay This equation can be used to calculate relaxation time (T1/T2) from spin echo (SE) and/or stimulated spin echo (STE) data. It is important to notice that in the protocol you have to define some parameters in a specific way: (1) For SE data, the original equation contains only the first refocusing pulse variable, but half of this value and in the power of two (sin(Refoc_fa1/2)**2). For that it is needed to define Refoc_fa2 = Refoc_fa1 and Refoc_fa1 has to be HALF of the used FA in the protocol (then, also Refoc_fa2). Also, the 0.5 factor is not included, then SEf (Spin echo flag) should be 0. Finally, TM (mixing time) has to be 0. (2) For STE data, this equation is used totally. Just SEf = 1. """ parameter_list = ('SEf', 'TM', 'TE', 'flip_angle', 'Refoc_fa1', 'Refoc_fa2', 'T1', 'T2') cl_code = """ return pow(0.5, SEf) * sin(flip_angle) * sin(Refoc_fa1) * sin(Refoc_fa2) * exp(-TE / T2) * exp(-TM / T1); """ PKpIPx0 0 9mdt/data/components/standard/compartment_models/Tensor.pyimport numpy as np import mdt from mdt.components_loader import bind_function from mdt.models.compartments import CompartmentConfig from mdt.cl_routines.mapping.dti_measures import DTIMeasures from mdt.utils import eigen_vectors_from_tensor __author__ = 'Robbert Harms' __date__ = "2015-06-21" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class Tensor(CompartmentConfig): parameter_list = ('g', 'b', 'd', 'dperp0', 'dperp1', 'theta', 'phi', 'psi') @bind_function def get_extra_results_maps(self, results_dict): eigen_vectors = eigen_vectors_from_tensor(results_dict[self.name + '.theta'], results_dict[self.name + '.phi'], results_dict[self.name + '.psi']) eigen_values = Tensor.create_eigen_values_matrix([results_dict[self.name + '.d'], results_dict[self.name + '.dperp0'], results_dict[self.name + '.dperp1']]) ranking = Tensor.get_ranking_matrix(eigen_values) voxels_range = np.arange(ranking.shape[0]) sorted_eigen_values = np.concatenate([eigen_values[voxels_range, ranking[:, ind], None] for ind in range(ranking.shape[1])], axis=1) sorted_eigen_vectors = np.concatenate([eigen_vectors[voxels_range, ranking[:, ind], None, :] for ind in range(ranking.shape[1])], axis=1) fa, md = DTIMeasures().calculate(eigen_values) extra_maps = {self.name + '.eigen_ranking': ranking, self.name + '.FA': fa, self.name + '.MD': md, self.name + '.AD': sorted_eigen_values[:, 0], self.name + '.RD': (sorted_eigen_values[:, 1] + sorted_eigen_values[:, 2]) / 2.0} for ind in range(3): extra_maps.update({self.name + '.vec' + repr(ind): eigen_vectors[:, ind, :], self.name + '.sorted_vec' + repr(ind): sorted_eigen_vectors[:, ind, :], self.name + '.sorted_eigval{}'.format(ind): sorted_eigen_values[:, ind]}) for dimension in range(3): extra_maps.update({self.name + '.vec' + repr(ind) + '_' + repr(dimension): eigen_vectors[:, ind, dimension], self.name + '.sorted_vec' + repr(ind) + '_' + repr(dimension): sorted_eigen_vectors[:, ind, dimension] }) return extra_maps @staticmethod def ensure_2d(array): if len(array.shape) < 2: return array[None, :] return array @staticmethod def create_eigen_values_matrix(diffusivities): return Tensor.ensure_2d(np.squeeze(np.dstack(diffusivities))) @staticmethod def get_ranking_matrix(eigen_values): return Tensor.ensure_2d(np.squeeze(np.argsort(eigen_values, axis=1)[:, ::-1])) PK]uI;?mdt/data/components/standard/compartment_models/GDRCylinders.pyfrom mdt.models.compartments import CompartmentConfig from mdt.components_loader import CompartmentModelsLoader, bind_function __author__ = 'Robbert Harms' __date__ = "2015-06-21" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class GDRCylinders(CompartmentConfig): parameter_list = ('g', 'G', 'Delta', 'delta', 'd', 'theta', 'phi', 'gamma_k', 'gamma_beta', 'gamma_nmr_cyl') dependency_list = (CompartmentModelsLoader().load('CylinderGPD'),) @bind_function def get_extra_results_maps(self, results_dict): return self._get_vector_result_maps(results_dict[self.name + '.theta'], results_dict[self.name + '.phi']) PKHaB__<mdt/data/components/standard/compartment_models/SphereGPD.cl/** * Author = Robbert Harms * Date = 2014-02-05 * License = LGPL v3 * Maintainer = Robbert Harms * Email = robbert.harms@maastrichtuniversity.nl */ mot_float_type cmSphereGPD(const mot_float_type Delta, const mot_float_type delta, const mot_float_type d, const mot_float_type R){ const mot_float_type cl_jnp_zeros[] = { 1.84118378, 5.33144277, 8.53631637, 11.7060049 , 14.86358863, 18.01552786, 21.16436986, 24.31132686, 27.45705057, 30.60192297, 33.7461829 , 36.88998741, 40.03344405, 43.17662897, 46.31959756, 49.46239114, 52.60504111, 55.74757179, 58.8900023 , 62.03234787 }; const int cl_jnp_zeros_length = 20; mot_float_type sum = 0; mot_float_type dam; mot_float_type amrdiv; // The summation below differs from that of CylinderGPD by having a -2 instead of a -1 in the denominator. for(int i = 0; i < cl_jnp_zeros_length; i++){ amrdiv = cl_jnp_zeros[i] / R; dam = d * pown(amrdiv, 2); sum += (2 * dam * delta - 2 + (2 * exp(-dam * delta)) + (2 * exp(-dam * Delta)) - exp(-dam * (Delta - delta)) - exp(-dam * (Delta + delta))) / (pown(dam * amrdiv, 2) * (pown(R * amrdiv, 2) - 2)); } return exp(-2 * GAMMA_H_SQ * pown(G, 2) * sum); } PKH3ODD?mdt/data/components/standard/compartment_models/GDRCylinders.cl/** * Author = Robbert Harms * Date = 2014-02-05 * License = LGPL v3 * Maintainer = Robbert Harms * Email = robbert.harms@maastrichtuniversity.nl */ /** Small number constant used in continued fraction gamma evaluation */ #define GDRCYL_FPMIN 1E-30 /** Small number constant used in gamma series evaluation */ #define GDRCYL_EPS 3E-7 /** Max number of iterations in series evaluation */ #define GDRCYL_ITMAX 100 mot_float_type gammaCDF(const mot_float_type k, const mot_float_type theta, const mot_float_type x); mot_float_type gammp(const mot_float_type a, const mot_float_type x); mot_float_type gser(const mot_float_type a, const mot_float_type x); mot_float_type gcf(const mot_float_type a, const mot_float_type x); mot_float_type findGammaCDFCrossing(mot_float_type startx, mot_float_type stopx, const mot_float_type offset, const mot_float_type convergence, const mot_float_type gamma_k, const mot_float_type gamma_beta); mot_float_type cmGDRCylinders(const mot_float_type4 g, const mot_float_type G, const mot_float_type Delta, const mot_float_type delta, const mot_float_type d, const mot_float_type theta, const mot_float_type phi, const mot_float_type gamma_k, const mot_float_type gamma_beta, const mot_float_type gamma_nmr_cyl){ int nmr_cyl = round(gamma_nmr_cyl); mot_float_type lower = findGammaCDFCrossing(0, gamma_beta*gamma_k, 1.0/nmr_cyl, 1e-20, gamma_k, gamma_beta); mot_float_type upper = findGammaCDFCrossing(lower, nmr_cyl*gamma_beta*gamma_k, (1-1.0/nmr_cyl), 1e-20, gamma_k, gamma_beta); mot_float_type binWidth = (upper-lower)/nmr_cyl; mot_float_type gamma_cyl_weight = 0; mot_float_type gamma_cyl_radius = 0; mot_float_type signal = 0; for(int i = 0; i < nmr_cyl; i++){ gamma_cyl_radius = lower + (i+0.5)*binWidth; gamma_cyl_weight = (gammaCDF(gamma_k, gamma_beta, lower + (i+1)*binWidth) - gammaCDF(gamma_k, gamma_beta, lower + i*binWidth)) / (1 - (2.0/nmr_cyl)); signal += gamma_cyl_weight * cmCylinderGPD(g, G, Delta, delta, d, theta, phi, gamma_cyl_radius); } return signal; } /** * Taken from Camino * Calculates the cumulative Gamma function up to the value given * * @param k gamma shape param * @param theta gamma scale param * @param x top end upper limit of integral * * @return gamma(x/theta)/Gamma(k) * gamma(k, z)= incomplete gamma fn * Gamma(k)= gamma function * * */ mot_float_type gammaCDF(const mot_float_type k, const mot_float_type theta, const mot_float_type x){ return gammp(k, x/theta); } /** * Taken from Camino * Returns the incomplete gamma function P(a; x). * see NRC p. 218. */ mot_float_type gammp(const mot_float_type a, const mot_float_type x){ if(x<0.0 || a <= 0.0){ return NAN; } if(x < (a + 1.0)){ return gser(a, x); } return 1.0 - gcf(a, x); } /** * Returns the incomplete gamma function P(a; x) evaluated by its * series representation as gamser. */ mot_float_type gser(const mot_float_type a, const mot_float_type x){ mot_float_type sum; mot_float_type del; mot_float_type ap; if(x <= 0.0){ if (x < 0.0){ return NAN; } return 0.0; } else{ ap=a; del = sum = 1.0 / a; for(int n = 1; n <= GDRCYL_ITMAX; n++){ ++ap; del *= x/ap; sum += del; if(fabs(del) < fabs(sum) * GDRCYL_EPS){ return sum*exp(-x + a * log(x) - lgamma(a)); } } } return NAN; } /* * Returns the incomplete gamma function Q(a; x) evaluated by its continued * fraction representation. */ mot_float_type gcf(const mot_float_type a, const mot_float_type x){ int i; mot_float_type an,b,c,d,del,h; //Set up for evaluating continued fraction by modified Lentz's method (x5.2) with b0 = 0. b=x+1.0-a; c=1.0/GDRCYL_FPMIN; d=1.0/b; h=d; for(i=1; i<=GDRCYL_ITMAX; i++){ an = -i*(i-a); b += 2.0; d=an*d+b; if(fabs(d) < GDRCYL_FPMIN){ d=GDRCYL_FPMIN; } c=b+an/c; if(fabs(c) < GDRCYL_FPMIN){ c=GDRCYL_FPMIN; } d=1.0/d; del=d*c; h *= del; if(fabs(del-1.0) < GDRCYL_EPS){ break; } } if(i > GDRCYL_ITMAX) return NAN; return exp(-x+a*log(x)-lgamma(a))*h; } //Using Brent root finding to determine cdfs mot_float_type findGammaCDFCrossing(mot_float_type startx, mot_float_type stopx, const mot_float_type offset, const mot_float_type convergence, const mot_float_type gamma_k, const mot_float_type gamma_beta){ mot_float_type fstartx = gammaCDF(gamma_k, gamma_beta, startx) - offset; mot_float_type fstopx = gammaCDF(gamma_k, gamma_beta, stopx) - offset; mot_float_type delta = fabs(stopx-startx); if(fstartx * fstopx > 0){ if (fstartx>0){ fstartx = gammaCDF(gamma_k, gamma_beta, 0) - offset; } else if (fstopx<0){ fstopx = gammaCDF(gamma_k, gamma_beta, stopx/gamma_k) - offset; } else{ return NAN; } } mot_float_type root = startx; mot_float_type froot = fstartx; bool mflag=1; mot_float_type s = 0; mot_float_type de = 0; while(!(delta < convergence || fstartx == 0 || fstopx == 0)){ if (fstartx != froot && fstopx != froot){ //inverse interpolation s = startx * fstopx * froot / ((fstartx-fstopx)*(fstartx-froot)); s += stopx * fstartx * froot / ((fstopx-fstartx)*(fstopx-froot)); s += root * fstartx * fstopx / ((froot-fstartx)*(froot-stopx)); } else{ //secant method s = stopx - fstopx * (stopx-startx) / (fstopx-fstartx); } //bisection if( !((s >= (3*startx+stopx)/4) && (s<=stopx) || (s<=(3*startx+stopx)/4) && (s>stopx)) || mflag && (fabs(s-stopx) >= fabs(stopx-root)/2) || !mflag && (fabs(s-stopx) >= fabs(root-de)/2) || mflag && (fabs(stopx-root) < delta) || !mflag && (fabs(root-delta) < delta)){ s = (startx + stopx) / 2; mflag=1; } else{ mflag=0; } mot_float_type fs=gammaCDF(gamma_k, gamma_beta, s) - offset; de=root; root=stopx; froot=fstopx; if ((fstartx * fs) < 0){ stopx=s; fstopx=fs; } else{ startx = s; fstartx = fs; } if (fabs(fstartx) < fabs(fstopx)){ //swap startx and stopx mot_float_type tmp=stopx; mot_float_type ftmp=fstopx; stopx=startx; fstopx=fstartx; startx=tmp; fstartx=ftmp; } delta=fabs(stopx-startx); } return s; } #undef GDRCYL_FPMIN #undef GDRCYL_EPS #undef GDRCYL_ITMAX PKjUpI*ץ //;mdt/data/components/standard/compartment_models/ExpT2Dec.pyfrom mdt.models.compartments import CompartmentConfig __author__ = 'Robbert Harms' __date__ = "2015-06-21" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class ExpT2Dec(CompartmentConfig): parameter_list = ('TE', 'T2') cl_code = 'return exp(-TE / T2);' PKjUpIcf{8mdt/data/components/standard/compartment_models/Stick.pyfrom mdt.components_loader import bind_function from mdt.models.compartments import CompartmentConfig __author__ = 'Robbert Harms' __date__ = "2015-06-21" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class Stick(CompartmentConfig): parameter_list = ('g', 'b', 'd', 'theta', 'phi') cl_code = ''' return exp(-b * d * pown(dot(g, (mot_float_type4)(cos(phi) * sin(theta), sin(phi) * sin(theta), cos(theta), 0.0)), 2)); ''' @bind_function def get_extra_results_maps(self, results_dict): return self._get_vector_result_maps(results_dict[self.name + '.theta'], results_dict[self.name + '.phi']) PKXqIxGrAmdt/data/components/standard/compartment_models/AstroCylinders.pyfrom mdt.models.compartments import CompartmentConfig __author__ = 'Robbert Harms' __date__ = "2015-06-21" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class AstroCylinders(CompartmentConfig): parameter_list = ('g', 'b', 'G', 'Delta', 'delta', 'd', 'R') dependency_list = ['MRIConstants', 'NeumannCylPerpPGSESum'] cl_code = ''' mot_float_type sum = NeumannCylPerpPGSESum(Delta, delta, d, R); mot_float_type lperp = (-2 * GAMMA_H_SQ * sum); mot_float_type lpar = -b * 1.0/pown(G, 2) * d; return (sqrt(M_PI) / (2 * G * sqrt(lperp - lpar))) * exp(pown(G, 2) * lperp) * erf(G * sqrt(lperp - lpar)); ''' PKXqIvShh;mdt/data/components/standard/compartment_models/NODDI_IC.pyfrom mdt.models.compartments import CompartmentConfig from mdt.components_loader import bind_function import numpy as np __author__ = 'Robbert Harms' __date__ = "2015-06-21" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class NODDI_IC(CompartmentConfig): parameter_list = ('g', 'b', 'G', 'Delta', 'delta', 'd', 'theta', 'phi', 'kappa', 'R') dependency_list = ('CerfErfi', 'MRIConstants', 'NeumannCylPerpPGSESum') @bind_function def get_extra_results_maps(self, results_dict): maps = self._get_vector_result_maps(results_dict[self.name + '.theta'], results_dict[self.name + '.phi']) maps.update({self.name + '.odi': np.arctan2(1.0, results_dict[self.name + '.kappa'] * 10) * 2 / np.pi}) return maps PKjUpIa5mdt/data/components/standard/compartment_models/S0.pyfrom mdt.models.compartments import CompartmentConfig __author__ = 'Robbert Harms' __date__ = "2015-06-21" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class S0(CompartmentConfig): parameter_list = ('s0',) cl_code = 'return s0;' PKjUpIr::=mdt/data/components/standard/compartment_models/ExpT1DecTR.pyfrom mdt.models.compartments import CompartmentConfig __author__ = 'Robbert Harms' __date__ = "2015-06-21" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class ExpT1DecTR(CompartmentConfig): parameter_list = ('TR', 'T1') cl_code = 'return abs(1 - exp(-TR / T1));' PKjUpI&&6mdt/data/components/standard/compartment_models/Dot.pyfrom mdt.models.compartments import CompartmentConfig __author__ = 'Robbert Harms' __date__ = "2015-06-21" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class Dot(CompartmentConfig): parameter_list = () cl_code = 'return (mot_float_type)1.0;' PKs[xIO%9mdt/data/components/standard/compartment_models/Tensor.cl/** * Author = Robbert Harms * Date = 2014-02-05 * License = LGPL v3 * Maintainer = Robbert Harms * Email = robbert.harms@maastrichtuniversity.nl */ /** * Generate the compartment model signal for the Tensor model. * @params g the protocol gradient vector with (x, y, z) * @params b the protocol b * @params d the parameter d * @params theta the parameter theta * @params phi the parameter phi * @params dperp parameter perpendicular diffusion 1 * @params dperp2 parameter perpendicular diffusion 2 * @params psi the third rotation angle */ mot_float_type cmTensor( const mot_float_type4 g, const mot_float_type b, const mot_float_type d, const mot_float_type dperp, const mot_float_type dperp2, const mot_float_type theta, const mot_float_type phi, const mot_float_type psi){ mot_float_type cos_theta; mot_float_type sin_theta = sincos(theta, &cos_theta); mot_float_type cos_phi; mot_float_type sin_phi = sincos(phi, &cos_phi); mot_float_type cos_psi; mot_float_type sin_psi = sincos(psi, &cos_psi); mot_float_type4 n1 = (mot_float_type4)(cos_phi * sin_theta, sin_phi * sin_theta, cos_theta, 0.0); // rotate n1 by 90 degrees, changing, x, y and z mot_float_type rotation_factor = sin(theta+(M_PI_2_F)); mot_float_type4 n2 = (mot_float_type4)(rotation_factor * cos_phi, rotation_factor * sin_phi, cos(theta+(M_PI_2_F)), 0.0); // uses Rodrigues' formula to rotate n2 by psi around n1 // using a multiplication factor "select(1, -1, n1.z < 0 || ((n1.z == 0.0) && n1.x < 0.0))" to // prevent commutative problems in the cross product between n1xn2 n2 = n2 * cos_psi + (cross(n2, select(1, -1, n1.z < 0 || ((n1.z == 0.0) && n1.x < 0.0)) * n1) * sin_psi) + (n1 * dot(n1, n2) * (1-cos_psi)); return exp(-b * (d * pown(dot(n1, g), 2) + dperp * pown(dot(n2, g), 2) + dperp2 * pown(dot(cross(n1, n2), g), 2) ) ); } PKjUpIHHAmdt/data/components/standard/compartment_models/ExpT1ExpT2sGRE.pyfrom mdt.models.compartments import CompartmentConfig __author__ = 'Francisco.Lagos' class ExpT1ExpT2sGRE(CompartmentConfig): parameter_list = ('TR', 'TE', 'flip_angle', 'T1', 'T2s') cl_code = """ return sin(flip_angle) * (1 - exp(-TR / T1)) / (1 - cos(flip_angle) * exp(-TR / T1)) * exp(-TE / T2s); """ PK&NxIG\+ + Dmdt/data/components/standard/compartment_models/CHARMEDRestricted.cl/** * Author = Robbert Harms * Date = 2014-11-06 * License = LGPL v3 * Maintainer = Robbert Harms * Email = robbert.harms@maastrichtuniversity.nl */ mot_float_type cmCHARMEDRestricted(const mot_float_type4 g, const mot_float_type b, const mot_float_type G, const mot_float_type Delta, const mot_float_type delta, const mot_float_type TE, const mot_float_type d, const mot_float_type theta, const mot_float_type phi){ const mot_float_type q_magnitude_2 = GAMMA_H_HZ_SQ * (G * G) * (delta * delta); const mot_float_type direction_2 = pown(dot(g, (mot_float_type4)(cos(phi) * sin(theta), sin(phi) * sin(theta), cos(theta), 0.0)), 2); const mot_float_type signal_par = -(4 * (M_PI_F * M_PI_F) * q_magnitude_2 * direction_2 * (Delta - (delta / 3.0)) * d); const mot_float_type signal_perp_tmp1 = -( (4 * (M_PI_F * M_PI_F) * q_magnitude_2 * (1 - direction_2) * (7/96.0)) / (d * (TE / 2.0))); const mot_float_type signal_perp_tmp2 = (99/112.0) / (d * (TE / 2.0)); // R is the radius of the cylinder in meters // cylinder_weight R^4 R^2 return (0.021184720085574 * exp(signal_par + (signal_perp_tmp1 * 5.0625e-24 * (2 - (signal_perp_tmp2 * 2.25e-12))))) + (0.107169623942214 * exp(signal_par + (signal_perp_tmp1 * 3.90625e-23 * (2 - (signal_perp_tmp2 * 6.25e-12))))) + (0.194400551313197 * exp(signal_par + (signal_perp_tmp1 * 1.500625e-22 * (2 - (signal_perp_tmp2 * 1.225e-11))))) + (0.266676876170322 * exp(signal_par + (signal_perp_tmp1 * 4.100625e-22 * (2 - (signal_perp_tmp2 * 2.025e-11))))) + (0.214921653661151 * exp(signal_par + (signal_perp_tmp1 * 9.150625e-22 * (2 - (signal_perp_tmp2 * 3.025e-11))))) + (0.195646574827541 * exp(signal_par + (signal_perp_tmp1 * 1.785061655e-21 * (2 - (signal_perp_tmp2 * 4.224999e-11))))); } PKXqI=NdBB<mdt/data/components/standard/compartment_models/SphereGPD.pyfrom mdt.models.compartments import CompartmentConfig __author__ = 'Robbert Harms' __date__ = "2015-06-21" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class SphereGPD(CompartmentConfig): parameter_list = ('Delta', 'delta', 'd', 'R') dependency_list = ('MRIConstants',) PK&wIѬwDmdt/data/components/standard/compartment_models/CHARMEDRestricted.pyfrom mdt.components_loader import bind_function from mdt.models.compartments import CompartmentConfig __author__ = 'Robbert Harms' __date__ = "2015-06-21" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class CHARMEDRestricted(CompartmentConfig): parameter_list = ('g', 'b', 'G', 'Delta', 'delta', 'TE', 'd', 'theta', 'phi') dependency_list = ('MRIConstants',) @bind_function def get_extra_results_maps(self, results_dict): return self._get_vector_result_maps(results_dict[self.name + '.theta'], results_dict[self.name + '.phi']) PKjUpIt B;mdt/data/components/standard/compartment_models/LinT2Dec.pyfrom mdt.models.compartments import CompartmentConfig __author__ = 'Francisco.Lagos' class LinT2Dec(CompartmentConfig): parameter_list = ('TE', 'R2') cl_code = 'return -TE * R2;' PKXqIzzz>mdt/data/components/standard/compartment_models/CylinderGPD.pyfrom mdt.models.compartments import CompartmentConfig from mdt.components_loader import bind_function __author__ = 'Robbert Harms' __date__ = "2015-06-21" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class CylinderGPD(CompartmentConfig): parameter_list = ('g', 'G', 'Delta', 'delta', 'd', 'theta', 'phi', 'R') dependency_list = ('MRIConstants', 'NeumannCylPerpPGSESum') cl_code = ''' mot_float_type sum = NeumannCylPerpPGSESum(Delta, delta, d, R); const mot_float_type4 n = (mot_float_type4)(cos(phi) * sin(theta), sin(phi) * sin(theta), cos(theta), 0.0); mot_float_type omega = (G == 0.0) ? M_PI_2 : acos(dot(n, g * G) / (G * length(n))); return exp(-2 * GAMMA_H_SQ * pown(G * sin(omega), 2) * sum) * exp(-(Delta - (delta/3.0)) * pown(GAMMA_H * delta * G * cos(omega), 2) * d); ''' @bind_function def get_extra_results_maps(self, results_dict): return self._get_vector_result_maps(results_dict[self.name + '.theta'], results_dict[self.name + '.phi']) PK]?[I+ʊ..;mdt/data/components/standard/compartment_models/NODDI_IC.cl/** * Author = Robbert Harms * Date = 2/26/14 * License = LGPL v3 * Maintainer = Robbert Harms * Email = robbert.harms@maastrichtuniversity.nl */ // do not change this value! It would require adding approximations to the functions below #define NODDI_IC_MAX_POLYNOMIAL_ORDER 6 // sqrt(pi)/2 #define M_SQRTPI_2_F 0.8862269254527580f // sqrt(pi) #define M_SQRTPI_F 1.7724538509055160f void NODDI_IC_LegendreGaussianIntegral(const mot_float_type x, mot_float_type* result); void NODDI_IC_WatsonSHCoeff(const mot_float_type kappa, mot_float_type* result); void NODDI_IC_create_legendre_terms(const mot_float_type x, mot_float_type* const legendre_terms); /** * Generate the compartment model signal for the NODDI Intra Cellular (Stick with dispersion) compartment. * If Radius is fixed to 0 the model behaves as a stick (with dispersion), if non-fixed the model behaves as a * cylinder (with dispersion). * * It may seem redundant to have both G/Delta/delta and b as arguments. But that is for speed reasons. b is most * of the time available anyway, and G/Delta/delta is only needed if R is not fixed (still it must be provided for). * * @params g from the protocol /scheme * @params b from the protocol /scheme * @params G from the protocol / scheme * @params Delta big delta from the protocol / scheme * @params delta small delta from the protocol / scheme * @params d parameter * @params theta parameter * @params phi parameter * @params kappa parameter (concentration parameter of the Watson's distribution) * @params R the radius of the cylinder */ mot_float_type cmNODDI_IC(const mot_float_type4 g, const mot_float_type b, const mot_float_type G, const mot_float_type Delta, const mot_float_type delta, const mot_float_type d, const mot_float_type theta, const mot_float_type phi, const mot_float_type kappa_non_scaled, const mot_float_type R){ const mot_float_type kappa = kappa_non_scaled * 10; mot_float_type cosTheta = dot(g, (mot_float_type4)(cos(phi) * sin(theta), sin(phi) * sin(theta), cos(theta), 0.0)); if(fabs(cosTheta) > 1){ cosTheta = cosTheta / fabs(cosTheta); } mot_float_type LePerp = -2 * GAMMA_H_SQ * (G*G) * NeumannCylPerpPGSESum(Delta, delta, d, R); mot_float_type ePerp = exp(LePerp); mot_float_type Lpmp = LePerp + d * b; mot_float_type watson_coeff[NODDI_IC_MAX_POLYNOMIAL_ORDER + 1]; NODDI_IC_WatsonSHCoeff(kappa, watson_coeff); mot_float_type lgi[NODDI_IC_MAX_POLYNOMIAL_ORDER + 1]; NODDI_IC_LegendreGaussianIntegral(Lpmp, lgi); // split the summation into two parts to save one array (reusing the lgi array for the legendre terms) for(int i = 0; i < NODDI_IC_MAX_POLYNOMIAL_ORDER + 1; i++){ watson_coeff[i] *= lgi[i] * sqrt((i + 0.25)/M_PI_F); } NODDI_IC_create_legendre_terms(cosTheta, lgi); mot_float_type signal = 0.0; for(int i = 0; i < NODDI_IC_MAX_POLYNOMIAL_ORDER + 1; i++){ signal += lgi[i] * watson_coeff[i]; } return ePerp * signal / 2.0; } /** * This will create the legendre terms we need for the NODDI IC model. * * For the NODDI IC model we need to have a few legendre terms for the same position (argument to x) * with linearly increasing degrees of step size 2. * * This is a specialized version of the function of the function firstLegendreTerm in the MOT library. * * That is, this will fill the given array legendre_terms with the values: * [0] = firstLegendreTerm(x, 0) * [1] = firstLegendreTerm(x, 2 * 1) * [2] = firstLegendreTerm(x, 2 * 2) * [3] = firstLegendreTerm(x, 2 * 3) * ... */ void NODDI_IC_create_legendre_terms(const mot_float_type x, mot_float_type* const legendre_terms){ // this is the default if fabs(x) == 1.0 // to eliminate the branch I added this to the front, this saves an if/else. // also, since we are after the legendre terms with a list with n = [0, 2*1, 2*2, 2*3, 2*4, ...] // the legendre terms collaps to this loop if fabs(x) == 1.0 if(fabs(x) == 1.0){ for(int i = 0; i < NODDI_IC_MAX_POLYNOMIAL_ORDER + 1; i++){ legendre_terms[i] = 1.0; } return; } legendre_terms[0] = 1.0; mot_float_type P0 = 1.0; mot_float_type P1 = x; mot_float_type Pn; for(int k = 1; k < NODDI_IC_MAX_POLYNOMIAL_ORDER + 1; k++){ Pn = ((2 * k + 1) * x * P1 - (k * P0)) / (k + 1); P0 = P1; P1 = Pn; legendre_terms[k] = Pn; Pn = ((2 * (k+1) + 1) * x * P1 - ((k+1) * P0)) / ((k+1) + 1); P0 = P1; P1 = Pn; } } /** Copied from the Matlab NODDI toolbox function [L, D] = legendreGaussianIntegral(x, n) Computes legendre gaussian integrals up to the order specified and the derivatives if requested The integral takes the following form, in Mathematica syntax, L[x, n] = Integrate[Exp[-x \mu^2] Legendre[2*n, \mu], {\mu, -1, 1}] D[x, n] = Integrate[Exp[-x \mu^2] (-\mu^2) Legendre[2*n, \mu], {\mu, -1, 1}] original author: Gary Hui Zhang (gary.zhang@ucl.ac.uk) */ void NODDI_IC_LegendreGaussianIntegral(const mot_float_type x, mot_float_type* const result){ if(x > 0.05){ // exact mot_float_type tmp[NODDI_IC_MAX_POLYNOMIAL_ORDER + 1]; tmp[0] = M_SQRTPI_F * erf(sqrt(x))/sqrt(x); for(int i = 1; i < NODDI_IC_MAX_POLYNOMIAL_ORDER + 1; i++){ tmp[i] = (-exp(-x) + (i - 0.5) * tmp[i-1]) / x; } result[0] = tmp[0]; result[1] = -0.5*tmp[0] + 1.5*tmp[1]; result[2] = 0.375*tmp[0] - 3.75*tmp[1] + 4.375*tmp[2]; result[3] = -0.3125*tmp[0] + 6.5625*tmp[1] - 19.6875*tmp[2] + 14.4375*tmp[3]; result[4] = 0.2734375*tmp[0] - 9.84375*tmp[1] + 54.140625*tmp[2] - 93.84375*tmp[3] + 50.2734375*tmp[4]; result[5] = -(63/256.0)*tmp[0] + (3465/256.0)*tmp[1] - (30030/256.0)*tmp[2] + (90090/256.0)*tmp[3] - (109395/256.0)*tmp[4] + (46189/256.0)*tmp[5]; result[6] = (231/1024.0)*tmp[0] - (18018/1024.0)*tmp[1] + (225225/1024.0)*tmp[2] - (1021020/1024.0)*tmp[3] + (2078505/1024.0)*tmp[4] - (1939938/1024.0)*tmp[5] + (676039/1024.0)*tmp[6]; } else{ // approximate mot_float_type tmp[NODDI_IC_MAX_POLYNOMIAL_ORDER - 1]; tmp[0] = x * x; tmp[1] = tmp[0] * x; tmp[2] = tmp[1] * x; tmp[3] = tmp[2] * x; tmp[4] = tmp[3] * x; result[0] = 2 - 2*x/3.0 + tmp[0]/5 - tmp[1]/21.0 + tmp[2]/108.0; result[1] = -4*x/15.0 + 4*tmp[0]/35.0 - 2*tmp[1]/63.0 + 2*tmp[2]/297.0; result[2] = 8*tmp[0]/315.0 - 8*tmp[1]/693.0 + 4*tmp[2]/1287.0; result[3] = -16*tmp[1]/9009.0 + 16*tmp[2]/19305.0; result[4] = 32*tmp[2]/328185.0; result[5] = -64*tmp[3]/14549535.0; result[6] = 128*tmp[4]/760543875.0; } } /** function [C, D] = WatsonSHCoeff(k) Computes the spherical harmonic (SH) coefficients of the Watson's distribution with the concentration parameter k (kappa) up to the 12th order and the derivatives if requested. Truncating at the 12th order gives good approximation for kappa up to 64. Note that the SH coefficients of the odd orders are always zero. author: Gary Hui Zhang (gary.zhang@ucl.ac.uk) */ void NODDI_IC_WatsonSHCoeff(const mot_float_type kappa, mot_float_type* const result){ result[0] = M_SQRTPI_F * 2; if(kappa <= 30){ mot_float_type ks[NODDI_IC_MAX_POLYNOMIAL_ORDER - 1]; ks[0] = kappa * kappa; ks[1] = ks[0] * kappa; ks[2] = ks[1] * kappa; ks[3] = ks[2] * kappa; ks[4] = ks[3] * kappa; if(kappa > 0.1){ // exact mot_float_type sks[NODDI_IC_MAX_POLYNOMIAL_ORDER]; sks[0] = sqrt(kappa); sks[1] = sks[0] * kappa; sks[2] = sks[1] * kappa; sks[3] = sks[2] * kappa; sks[4] = sks[3] * kappa; sks[5] = sks[4] * kappa; mot_float_type erfik = ferfi(sks[0]); mot_float_type ierfik = 1/erfik; mot_float_type ek = exp(kappa); mot_float_type dawsonk = M_SQRTPI_2_F * erfik/ek; result[1] = 3 * sks[0] - (3 + 2 * kappa) * dawsonk; result[1] = sqrt(5.0) * result[1] * ek; result[1] = result[1]*ierfik/kappa; result[2] = (105 + 60*kappa + 12*ks[0] )*dawsonk; result[2] = result[2] -105*sks[0] + 10*sks[1]; result[2] = .375*result[2]*ek/ks[0]; result[2] = result[2]*ierfik; result[3] = -3465 - 1890*kappa - 420*ks[0] - 40*ks[1] ; result[3] = result[3]*dawsonk; result[3] = result[3] + 3465*sks[0] - 420*sks[1] + 84*sks[2]; result[3] = result[3]*sqrt(13*M_PI_F)/64/ks[1]; result[3] = result[3]/dawsonk; result[4] = 675675 + 360360*kappa + 83160*ks[0] + 10080*ks[1] + 560*ks[2] ; result[4] = result[4]*dawsonk; result[4] = result[4] - 675675*sks[0] + 90090*sks[1] - 23100*sks[2] + 744*sks[3]; result[4] = sqrt(17.0)*result[4]*ek; result[4] = result[4]/512.0/ks[2]; result[4] = result[4]*ierfik; result[5] = -43648605 - 22972950*kappa - 5405400*ks[0] - 720720*ks[1] - 55440*ks[2] - 2016*ks[3]; result[5] = result[5]*dawsonk; result[5] = result[5] + 43648605*sks[0] - 6126120*sks[1] + 1729728*sks[2] - 82368*sks[3] + 5104*sks[4]; result[5] = sqrt(21*M_PI_F)*result[5]/4096.0/ks[3]; result[5] = result[5]/dawsonk; result[6] = 7027425405 + 3666482820*kappa + 872972100*ks[0] + 122522400*ks[1] + 10810800*ks[2] + 576576*ks[3] + 14784*ks[4]; result[6] = result[6]*dawsonk; result[6] = result[6] - 7027425405*sks[0] + 1018467450*sks[1] - 302630328*sks[2] + 17153136*sks[3] - 1553552*sks[4] + 25376*sks[5]; result[6] = 5*result[6]*ek; result[6] = result[6]/16384.0/ks[4]; result[6] = result[6]*ierfik; } else{ // approximate result[1] = (4/3.0*kappa + 8/63.0*ks[0]) * sqrt(M_PI_F/5.0); result[2] = (8/21.0*ks[0] + 32/693.0*ks[1]) * (sqrt(M_PI_F)*0.2); result[3] = (16/693.0*ks[1] + 32/10395.0*ks[2]) * sqrt(M_PI_F/13); result[4] = (32/19305.0*ks[2]) * sqrt(M_PI_F/17); result[5] = 64*sqrt(M_PI_F/21)*ks[3]/692835.0; result[6] = 128*sqrt(M_PI_F)*ks[4]/152108775.0; } } else{ // large mot_float_type lnkd[NODDI_IC_MAX_POLYNOMIAL_ORDER]; lnkd[0] = log(kappa) - log(30.0); lnkd[1] = lnkd[0] * lnkd[0]; lnkd[2] = lnkd[1] * lnkd[0]; lnkd[3] = lnkd[2] * lnkd[0]; lnkd[4] = lnkd[3] * lnkd[0]; lnkd[5] = lnkd[4] * lnkd[0]; result[1] = 7.52308 + 0.411538*lnkd[0] - 0.214588*lnkd[1] + 0.0784091*lnkd[2] - 0.023981*lnkd[3] + 0.00731537*lnkd[4] - 0.0026467*lnkd[5]; result[2] = 8.93718 + 1.62147*lnkd[0] - 0.733421*lnkd[1] + 0.191568*lnkd[2] - 0.0202906*lnkd[3] - 0.00779095*lnkd[4] + 0.00574847*lnkd[5]; result[3] = 8.87905 + 3.35689*lnkd[0] - 1.15935*lnkd[1] + 0.0673053*lnkd[2] + 0.121857*lnkd[3] - 0.066642*lnkd[4] + 0.0180215*lnkd[5]; result[4] = 7.84352 + 5.03178*lnkd[0] - 1.0193*lnkd[1] - 0.426362*lnkd[2] + 0.328816*lnkd[3] - 0.0688176*lnkd[4] - 0.0229398*lnkd[5]; result[5] = 6.30113 + 6.09914*lnkd[0] - 0.16088*lnkd[1] - 1.05578*lnkd[2] + 0.338069*lnkd[3] + 0.0937157*lnkd[4] - 0.106935*lnkd[5]; result[6] = 4.65678 + 6.30069*lnkd[0] + 1.13754*lnkd[1] - 1.38393*lnkd[2] - 0.0134758*lnkd[3] + 0.331686*lnkd[4] - 0.105954*lnkd[5]; } } #undef M_SQRTPI_2_F #undef M_SQRTPI_F PKjUpI=:mdt/data/components/standard/compartment_models/MPM_Fit.pyfrom mdt.models.compartments import CompartmentConfig __author__ = 'Francisco.Lagos' """MPM fitting (Weiskopf, 2016 ESMRMB Workshop) This fitting is a model published by Helms (2008) and Weiskopf (2011) to determinate biological properties of the tissue/sample in function *of several images*, which includes T1w, PDw and MTw images. This function is still an approximation and, if the assumptions of those approximations hold for ex-vivo tissue, then can be used in this data. """ class MPM_Fit(CompartmentConfig): parameter_list = ('TR', 'flip_angle', 'b1_static', 'T1') cl_code = 'return (flip_angle * b1_static) * ( (TR / T1) / ( pown(flip_angle * b1_static, 2) / 2 + ( TR / T1 ) ) );' PKHcuuImdt/data/components/standard/compartment_models/GDRCylindersFixedRadii.cl/** * Author = Robbert Harms * Date = 2014-02-05 * License = LGPL v3 * Maintainer = Robbert Harms * Email = robbert.harms@maastrichtuniversity.nl */ /** * Generate the compartment model signal for the Gamma Distributed Radii model. * * This is a fixed version of the GDRCylinders model. This means that the different radii are not calculated * dynamically by means of a Gamma distribution. Rather, the list of radii and the corresponding weights * are given as fixed values. * * @params gamma_cyl_radii, the list of radii that should be used for calculating the cylinders. * @params gamma_cyl_weights, the list of weights per radius. * @params nmr_gamma_cyl, the number of cylinders we provided * */ mot_float_type cmGDRCylindersFixedRadii(const mot_float_type4 g, const mot_float_type G, const mot_float_type Delta, const mot_float_type delta, const mot_float_type d, const mot_float_type theta, const mot_float_type phi, global const mot_float_type* const gamma_cyl_radii, global const mot_float_type* const gamma_cyl_weights, const int nmr_gamma_cyl_fixed){ mot_float_type signal = 0; for(int i = 0; i < nmr_gamma_cyl_fixed; i++){ signal += gamma_cyl_weights[i] * cmCylinderGPD(g, G, Delta, delta, d, theta, phi, gamma_cyl_radii[i]); } return signal; } PKjUpIEE@mdt/data/components/standard/compartment_models/ExpT1ExpT2GRE.pyfrom mdt.models.compartments import CompartmentConfig __author__ = 'Francisco.Lagos' class ExpT1ExpT2GRE(CompartmentConfig): parameter_list = ('TR', 'TE', 'flip_angle', 'T1', 'T2') cl_code = """ return sin(flip_angle) * (1 - exp(-TR / T1)) / (1 - cos(flip_angle) * exp(-TR / T1)) * exp(-TE / T2); """ PKjUpIQ@mdt/data/components/standard/compartment_models/ExpT2DecSTEAM.pyfrom mdt.models.compartments import CompartmentConfig __author__ = 'Francisco J. Fritz' __date__ = "2016-09-06" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class ExpT2DecSTEAM(CompartmentConfig): parameter_list = ('SEf', 'TE', 'flip_angle', 'Refoc_fa1', 'Refoc_fa2', 'T2') cl_code = """ return pow(0.5, SEf) * sin(flip_angle) * sin(Refoc_fa1) * sin(Refoc_fa2) * exp(-TE / T2); """ PKjUpIVgg;mdt/data/components/standard/compartment_models/Zeppelin.pyfrom mdt.components_loader import bind_function from mdt.models.compartments import CompartmentConfig __author__ = 'Robbert Harms' __date__ = "2015-06-21" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class Zeppelin(CompartmentConfig): parameter_list = ('g', 'b', 'd', 'dperp0', 'theta', 'phi') cl_code = ''' return exp(-b * (((d - dperp) * pown(dot(g, (mot_float_type4)(cos(phi) * sin(theta), sin(phi) * sin(theta), cos(theta), 0.0)), 2) ) + dperp)); ''' @bind_function def get_extra_results_maps(self, results_dict): return self._get_vector_result_maps(results_dict[self.name + '.theta'], results_dict[self.name + '.phi']) PKjUpIkG;mdt/data/components/standard/compartment_models/NODDI_EC.pyfrom mdt.components_loader import bind_function from mdt.models.compartments import CompartmentConfig from mot.model_building.cl_functions.library_functions import CerfDawson __author__ = 'Robbert Harms' __date__ = "2015-06-21" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class NODDI_EC(CompartmentConfig): parameter_list = ('g', 'b', 'd', 'dperp0', 'theta', 'phi', 'kappa') dependency_list = (CerfDawson(),) @bind_function def get_extra_results_maps(self, results_dict): return self._get_vector_result_maps(results_dict[self.name + '.theta'], results_dict[self.name + '.phi']) PKjUpINY.>mdt/data/components/standard/compartment_models/AstroSticks.pyfrom mdt.models.compartments import CompartmentConfig __author__ = 'Robbert Harms' __date__ = "2015-06-21" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class AstroSticks(CompartmentConfig): parameter_list = ('g', 'G', 'b', 'd') cl_code = ''' if(b == 0){ return 1; } return sqrt(M_PI) / (2 * G * sqrt((b / (G*G)) * d)) * erf(G * sqrt((b / (G*G)) * d)); ''' PKjUpIM w''7mdt/data/components/standard/compartment_models/Ball.pyfrom mdt.models.compartments import CompartmentConfig __author__ = 'Robbert Harms' __date__ = "2015-06-21" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class Ball(CompartmentConfig): parameter_list = ('b', 'd') cl_code = 'return exp(-d * b);' PKjUpI,LL>mdt/data/components/standard/compartment_models/ExpT1DecGRE.pyfrom mdt.models.compartments import CompartmentConfig __author__ = 'Francisco.Lagos' class ExpT1DecGRE(CompartmentConfig): parameter_list = ('TR', 'flip_angle', 'b1_static', 'T1') cl_code = """ return sin(flip_angle * b1_static) * (1 - exp(-TR / T1)) / (1 - cos(flip_angle * b1_static) * exp(-TR / T1) ); """PKjUpIb'(=mdt/data/components/standard/compartment_models/ExpT1DecTM.pyfrom mdt.models.compartments import CompartmentConfig __author__ = 'Francisco J. Fritz' __date__ = "2016-09-06" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class ExpT1DecTM(CompartmentConfig): parameter_list = ('SEf', 'TM', 'flip_angle', 'Refoc_fa1', 'Refoc_fa2', 'T1') cl_code = 'return pow(0.5, SEf) * sin(flip_angle) * sin(Refoc_fa1) * sin(Refoc_fa2) * exp(-TM / T1);' PK7?[I,ee;mdt/data/components/standard/compartment_models/NODDI_EC.cl/** * Author = Robbert Harms * Date = 2/26/14 * License = LGPL v3 * Maintainer = Robbert Harms * Email = robbert.harms@maastrichtuniversity.nl */ /** * Generate the compartment model signal for the NODDI Extra Cellular compartment * @params g from the protocol /scheme * @params b from the protocol / scheme * @params d parameter * @params theta parameter * @params phi parameter * @params dperp parameter (hindered diffusivity outside the cylinders in perpendicular directions) * @params kappa parameter (concentration parameter of the Watson's distribution) */ mot_float_type cmNODDI_EC(const mot_float_type4 g, const mot_float_type b, const mot_float_type d, const mot_float_type dperp, const mot_float_type theta, const mot_float_type phi, const mot_float_type kappa){ const mot_float_type kappa_scaled = kappa * 10; mot_float_type tmp; mot_float_type dw_0, dw_1; if(kappa_scaled > 1e-5){ tmp = sqrt(kappa_scaled)/dawson(sqrt(kappa_scaled)); dw_0 = ( -(d - dperp) + 2 * dperp * kappa_scaled + (d - dperp) * tmp) / (2.0 * kappa_scaled); dw_1 = ( (d - dperp) + 2 * (d+dperp) * kappa_scaled - (d - dperp) * tmp) / (4.0 * kappa_scaled); } else{ tmp = 2 * (d - dperp) * kappa_scaled; dw_0 = ((2 * dperp + d) / 3.0) + (tmp/22.5) + ((tmp * kappa_scaled) / 236.0); dw_1 = ((2 * dperp + d) / 3.0) - (tmp/45.0) - ((tmp * kappa_scaled) / 472.0); } return (mot_float_type) exp(-b * fma((dw_0 - dw_1), pown(dot(g, (mot_float_type4)(cos(phi) * sin(theta), sin(phi) * sin(theta), cos(theta), 0)), 2), dw_1)); } PKjUpILZ//Imdt/data/components/standard/compartment_models/GDRCylindersFixedRadii.pyfrom mdt.models.compartments import CompartmentConfig from mdt.components_loader import CompartmentModelsLoader, bind_function __author__ = 'Robbert Harms' __date__ = "2015-06-21" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" compartment_loader = CompartmentModelsLoader() class GDRCylindersFixedRadii(CompartmentConfig): parameter_list = ('g', 'G', 'Delta', 'delta', 'd', 'theta', 'phi', 'gamma_radii', 'gamma_cyl_weights', 'nmr_gamma_cyl_weights') dependency_list = (compartment_loader.load('CylinderGPD'),) @bind_function def get_extra_results_maps(self, results_dict): return self._get_vector_result_maps(results_dict[self.name + '.theta'], results_dict[self.name + '.phi']) PKjUpIq,+UU;mdt/data/components/standard/compartment_models/LinT1GRE.pyfrom mdt.models.compartments import CompartmentConfig __author__ = 'Francisco.Lagos' """Lineal T1 fitting (Weiskopf, 2016 ESMRMB Workshop) This fitting is the extension of the standard GRE equation for flip angles lower than 90deg. This modelling allows a linear fitting of the data if is enough data to support it. In principle, it should not be a problem if only two points are used, however the addition of a constant in the equation could give some kind of uncertainty. B1 has to be normalized *in function of the reference voltage, the angle distribution and the reference angle*. Here I assume that TR <<< T1, then exp(-TR/T1) ~ 1 - TR/T1. Then the equation becomes 'simpler'. However, fi this condition is not achieved, then return to the standard equation. Also, DATA HAS TO BE PROCESSED BEFORE TO USE THIS EQUATION. Please apply log() on the data. """ class LinT1GRE(CompartmentConfig): parameter_list = ('Sw_static', 'E1') #cl_code = """ # return sin( B1 * angle ) / ( 1 - cos( B1 * angle ) * exp( - TR / T1 )); #""" cl_code = """ return Sw_static * E1; """PKjUpI=:!!mdt/gui/utils.pyimport time from contextlib import contextmanager from functools import wraps from PyQt5.QtCore import QObject, pyqtSignal, QFileSystemWatcher, pyqtSlot, QTimer from PyQt5.QtCore import QTimer from PyQt5.QtWidgets import QApplication from mdt.nifti import yield_nifti_info from mdt.log_handlers import LogListenerInterface __author__ = 'Robbert Harms' __date__ = "2015-08-20" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class QtManager(object): windows = [] @staticmethod def get_qt_application_instance(): q_app = QApplication.instance() if q_app is None: q_app = QApplication([]) q_app.lastWindowClosed.connect(QtManager.empty_windows_list) return q_app @staticmethod def exec_(): if QtManager.windows: QtManager.get_qt_application_instance().exec_() @staticmethod def add_window(window): QtManager.windows.append(window) @staticmethod def empty_windows_list(): QtManager.windows = [] def center_window(window): """Center the given window on the screen. Args: q_app (QApplication): for desktop information window (QMainWindow): the window to center """ q_app = QApplication.instance() frame_gm = window.frameGeometry() screen = q_app.desktop().screenNumber(q_app.desktop().cursor().pos()) center_point = q_app.desktop().screenGeometry(screen).center() frame_gm.moveCenter(center_point) window.move(frame_gm.topLeft()) def function_message_decorator(header, footer): """This creates and returns a decorator that prints a header and footer before executing the function. Args: header (str): the header text, we will add extra decoration to it footer (str): the footer text, we will add extra decoration to it Returns: decorator function """ def _called_decorator(dec_func): @wraps(dec_func) def _decorator(*args, **kwargs): print('') print(header) print('-'*20) response = dec_func(*args, **kwargs) print('-'*20) print(footer) return response return _decorator return _called_decorator @contextmanager def blocked_signals(*widgets): """Small context in which the signals of the given widget are blocked. Args: widgets (QWidget): one or more widgets """ def apply_block(bool_val): for w in widgets: w.blockSignals(bool_val) apply_block(True) yield apply_block(False) def print_welcome_message(): """Prints a small welcome message for after the GUI has loaded. This prints to stdout. We expect the GUI to catch the stdout events and redirect them to the GUI. """ from mdt import VERSION print('Welcome to MDT version {}.'.format(VERSION)) print('') print('This area is reserved for log output.') print('-------------------------------------') class ForwardingListener(LogListenerInterface): def __init__(self, queue): """Forwards all incoming messages to the given _logging_update_queue. Instances of this class can be used as a log listener to the MDT LogDispatchHandler and as a sys.stdout replacement. Args: queue (Queue): the _logging_update_queue to forward the messages to """ self._queue = queue def emit(self, record, formatted_message): self._queue.put(formatted_message + "\n") def write(self, string): self._queue.put(string) image_files_filters = ['Nifti (*.nii *.nii.gz)', 'IMG, HDR (*.img)', 'All files (*)'] protocol_files_filters = ['MDT protocol (*.prtcl)', 'Text files (*.txt)', 'All files (*)'] class UpdateDescriptor(object): def __init__(self, attribute_name): """Descriptor that will emit a state_updated_signal at each update. This accesses from the instance the attribute name prepended with an underscore (_). """ self._attribute_name = attribute_name def __get__(self, instance, owner): return getattr(instance, '_' + self._attribute_name) def __set__(self, instance, value): setattr(instance, '_' + self._attribute_name, value) instance.state_updated_signal.emit(self._attribute_name) class MessageReceiver(QObject): text_message_signal = pyqtSignal(str) finished = pyqtSignal() def __init__(self, queue, *args, **kwargs): """A QObject (to be run in a QThread) which sits waiting for data to come through a Queue.Queue(). It blocks until data is available, and one it has got something from the _logging_update_queue, it sends it to the "MainThread" by emitting a Qt Signal. Attributes: is_running (boolean): set to False to stop the receiver. """ super(MessageReceiver, self).__init__(*args, **kwargs) self.queue = queue self.is_running = True def run(self): while self.is_running: if not self.queue.empty(): self.text_message_signal.emit(self.queue.get()) time.sleep(0.001) self.finished.emit() class MainTab(object): def tab_opened(self): """Called when this tab is selected by the user.""" class DirectoryImageWatcher(QObject): image_updates = pyqtSignal(tuple, tuple) def __init__(self, directory=None): """Watches a given directory for added, removed and/or updated nifti files. Args: directory (str): the initial directory to watch. You can set a new one with set_directory(). Signals images_updates (list, list, dict): sent when images are updated in the watch directory. It contains the list of additions, removals and changes where changes is a dict mapping the old name to a new one. """ super(DirectoryImageWatcher, self).__init__() self._watched_dir = None self._current_files = [] self._watcher = QFileSystemWatcher() self._watcher.directoryChanged.connect(self._directory_changed) self._timer = QTimer() self._timer.timeout.connect(self._timer_event) self._timer.timeout.connect(self._timer.stop) if directory: self.set_directory(directory) def set_directory(self, directory): """Set the watched directory to the given directory. Args: directory (str): the new directory to watch for added, removed and/or changed nifti files. """ if self._watched_dir: self._watcher.removePath(self._watched_dir) self._watched_dir = directory self._watcher.addPath(directory) self._current_files = list(el[1] for el in yield_nifti_info(directory)) @pyqtSlot(str) def _directory_changed(self, directory): if directory == self._watched_dir: self._timer.start(100) @pyqtSlot() def _timer_event(self): new_file_list = list(el[1] for el in yield_nifti_info(self._watched_dir)) removals = set(self._current_files).difference(new_file_list) additions = set(new_file_list).difference(self._current_files) self._current_files = new_file_list self.image_updates.emit(tuple(additions), tuple(removals)) class TimedUpdate(QTimer): def __init__(self, update_cb): """Creates a timer that can delay running a given callback function. Every time the user adds a delayed callback the timer gets reset to the new value and we will wait that new value until calling the callback with the last data given. Args: update_cb (function): the function we would like to run after a timer has run out """ super(TimedUpdate, self).__init__() self._cb_values = [] self._update_cb = update_cb self.timeout.connect(self._call_update_cb) self.timeout.connect(self.stop) def add_delayed_callback(self, delay, *cb_values): """Pushes a new delay to calling the callback function. Args: delay (int): the time in ms to wait cb_values (*list): the list of values to use as arguments to the callback function. Leave empty to disable. """ self._cb_values = cb_values self.start(delay) def _call_update_cb(self): if self._cb_values: self._update_cb(*self._cb_values) else: self._update_cb() PKjUpIR]mdt/gui/__init__.py__author__ = 'Robbert Harms' __date__ = "2014-11-27" __license__ = "LGPL v3" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl"PKjUpIϸ [A!A!mdt/gui/maps_visualizer/base.pyimport copy from mdt.visualization.dict_conversion import ConvertDictElements from mdt.visualization.maps.base import SingleMapConfig, MapPlotConfig, Zoom, Point class PlottingFrame(object): def __init__(self, controller, plotting_info_viewer=None): super(PlottingFrame, self).__init__() self._controller = controller self._plotting_info_viewer = plotting_info_viewer or NoOptPlottingFrameInfoViewer() def set_auto_rendering(self, auto_render): """Set if this plotting frame should auto render itself on every configuration update, or not. Args: auto_render (boolean): if True the plotting frame should auto render, if False it should only render on manual updates. """ def redraw(self): """Tell the plotting frame to do a redraw.""" def export_image(self, filename, width, height, dpi=100): """Export the current view as an image. Args: filename (str): where to write the file width (int): the width in pixels height (int): the height in pixels dpi (int): the dpi of the result """ class PlottingFrameInfoViewer(object): def __init__(self): """Implementations of this class can be given to a PlottingFrame to update viewing information. As an interface is bridges the gap between the rest of the GUI and the PlottingFrame and can encapsulate highlighting interesting aspects of one of the plots. """ def set_voxel_info(self, onscreen_coords, data_index, value): """Highlight a single voxel. Args: onscreen_coords (tuple of x,y): the coordinates of the voxel onscreen data_index (tuple of x,y,z,v): the 4d coordinates of the corresponding voxel in the data value (float): the value of the object in the 4d coordinates. """ def clear_voxel_info(self): """Tell the info viewer that we are no longer looking at a specific voxel.""" class NoOptPlottingFrameInfoViewer(PlottingFrameInfoViewer): def set_voxel_info(self, onscreen_coords, data_index, value): super(NoOptPlottingFrameInfoViewer, self).set_voxel_info(onscreen_coords, data_index, value) print(onscreen_coords, data_index, value) def clear_voxel_info(self): super(NoOptPlottingFrameInfoViewer, self).clear_voxel_info() print('clear') def cast_value(value, desired_type, alt_value): """Cast the given value to the desired type, on failure returns the alternative value. Args: value (object): the value to cast to the given type desired_type (:class:`type`): the type to cast to alt_value (object): the alternative value if casting threw exceptions Returns: the desired casted value or the alternative value if casting failed. """ try: return desired_type(value) except TypeError: return alt_value except ValueError: return alt_value class ConfigAction(object): def __init__(self): """Allows apply and unapply of configuration changes.""" self._previous_config = None def apply(self, data_info, configuration): """Apply the changes to the given configuration and return a new one. This should return a new configuration with the applied changes and should not update the given configuration. By default this method calls _apply(configuration) to facilitate quick implementation. Args: data_info (DataInfo): the current data information configuration (DisplayConfiguration): the configuration object Returns: MapPlotConfig: the updated configuration """ self._previous_config = configuration new_config = copy.deepcopy(configuration) updated_new_config = self._apply(data_info, new_config) if updated_new_config: return updated_new_config return new_config def unapply(self): """Return the configuration as it was before the application of this function. Returns: MapPlotConfig: the previous configuration """ return self._previous_config def _apply(self, data_info, configuration): """Facilitates quick implementation, called by apply() One can set configuration changes immediately to the given configuration. If nothing is returned we will use the given configuration as the new configuration. Args: data_info (DataInfo): the current data information configuration (MapPlotConfig): the configuration object Returns: GeneralConfiguration or None: the updated configuration. If nothing is returned we use the one given as argument. """ class SimpleConfigAction(ConfigAction): config_attribute = None def __init__(self, new_value): """A simple configuration action this sets the given value to the config attribute of the configuration.""" super(SimpleConfigAction, self).__init__() self.new_value = new_value def _apply(self, data_info, configuration): setattr(configuration, self.config_attribute, self.new_value) return self._extra_actions(data_info, configuration) def _extra_actions(self, data_info, configuration): """Called by the default configuration action to apply additional changes""" return configuration class SimpleMapSpecificConfigAction(SimpleConfigAction): config_attribute = None def __init__(self, map_name, new_value): super(SimpleMapSpecificConfigAction, self).__init__(new_value) self.map_name = map_name def _apply(self, data_info, configuration): if self.map_name not in configuration.map_plot_options: configuration.map_plot_options[self.map_name] = SingleMapConfig() single_map_config = super(SimpleMapSpecificConfigAction, self)._apply( data_info, configuration.map_plot_options[self.map_name]) if single_map_config is None: del configuration.map_plot_options[self.map_name] return configuration def _extra_actions(self, data_info, configuration): single_map_config = super(SimpleMapSpecificConfigAction, self)._extra_actions(data_info, configuration) if single_map_config == SingleMapConfig(): return None return single_map_config class Controller(object): def __init__(self): """Controller interface""" super(Controller, self).__init__() def set_data(self, data_info, config=None): """Set new data to visualize. Args: data_info (mdt.visualization.maps.base.DataInfo): the new data to visualize config (MapPlotConfig): the new configuration for the data If given, we will display the new data immediately with the given config """ def get_data(self): """Get the current data. Returns: mdt.visualization.maps.base.DataInfo: the current data information """ def set_config(self, general_config): """Set the general configuration to the given config. Setting this should automatically update all the listeners. Args: general_config (MapPlotConfig): the general configuration """ def get_config(self): """Get the current configuration. Returns: MapPlotConfig: the current general configuration. """ def apply_action(self, action): """Apply a new configuration action. If there is no difference between the current config and the one generated by this new action, the action will not be stored in history and will not need to be applied. Args: action (mdt.gui.maps_visualizer.base.ConfigAction): the configuration action to add and apply """ def undo(self): """Undo a previous configuration action""" def redo(self): """Reapply a previously undone configuration action""" def has_undo(self): """Check if this controller has an undo action available. Returns: boolean: True if an undo action is available. """ def has_redo(self): """Check if this controller has an redo action available. Returns: boolean: True if an redo action is available. """ PKz?~I6"mdt/gui/maps_visualizer/actions.pyfrom mdt.visualization.maps.base import Zoom, Point from .base import ConfigAction, SimpleConfigAction, SimpleMapSpecificConfigAction class SetDimension(SimpleConfigAction): config_attribute = 'dimension' def _extra_actions(self, data_info, configuration): if self.new_value != self._previous_config.dimension: max_slice = data_info.get_max_slice_index(self.new_value, configuration.maps_to_show) if configuration.slice_index > max_slice: configuration = SetSliceIndex(max_slice // 2).apply(data_info, configuration) return SetZoom(Zoom.no_zoom()).apply(data_info, configuration) class SetSliceIndex(SimpleConfigAction): config_attribute = 'slice_index' class SetVolumeIndex(SimpleConfigAction): config_attribute = 'volume_index' class SetMapsToShow(SimpleConfigAction): config_attribute = 'maps_to_show' class SetColormap(SimpleConfigAction): config_attribute = 'colormap' class SetRotate(SimpleConfigAction): config_attribute = 'rotate' def _extra_actions(self, data_info, configuration): if self.new_value != self._previous_config.rotate: new_rotation = self.new_value - self._previous_config.rotate if self._previous_config.flipud: new_rotation *= -1 new_zoom = self._previous_config.zoom.get_rotated( new_rotation, data_info.get_max_x_index(configuration.dimension, self._previous_config.rotate, configuration.maps_to_show) + 1, data_info.get_max_y_index(configuration.dimension, self._previous_config.rotate, configuration.maps_to_show) + 1) return SetZoom(new_zoom).apply(data_info, configuration) class SetZoom(SimpleConfigAction): config_attribute = 'zoom' class SetGeneralMask(SimpleConfigAction): config_attribute = 'mask_name' class SetPlotTitle(SimpleConfigAction): config_attribute = 'title' class SetMapTitle(SimpleMapSpecificConfigAction): config_attribute = 'title' class SetMapColorbarLabel(SimpleMapSpecificConfigAction): config_attribute = 'colorbar_label' class SetMapScale(SimpleMapSpecificConfigAction): config_attribute = 'scale' class SetMapClipping(SimpleMapSpecificConfigAction): config_attribute = 'clipping' class SetMapColormap(SimpleMapSpecificConfigAction): config_attribute = 'colormap' class NewConfigAction(ConfigAction): def __init__(self, new_config): super(NewConfigAction, self).__init__() self.new_config = new_config def apply(self, data_info, configuration): self._previous_config = configuration return self.new_config def unapply(self): return self._previous_config class SetFont(SimpleConfigAction): config_attribute = 'font' class SetShowAxis(SimpleConfigAction): config_attribute = 'show_axis' class SetColorBarNmrTicks(SimpleConfigAction): config_attribute = 'colorbar_nmr_ticks' class SetInterpolation(SimpleConfigAction): config_attribute = 'interpolation' class SetFlipud(SimpleConfigAction): config_attribute = 'flipud' def _extra_actions(self, data_info, configuration): if self.new_value != self._previous_config.flipud: max_y = data_info.get_max_y_index(configuration.dimension, configuration.rotate, configuration.maps_to_show) + 1 new_zoom = Zoom(Point(configuration.zoom.p0.x, max_y - configuration.zoom.p1.y), Point(configuration.zoom.p1.x, max_y - configuration.zoom.p0.y)) return SetZoom(new_zoom).apply(data_info, configuration) PKLxIF8Yb8989mdt/gui/maps_visualizer/main.pyimport matplotlib import signal import yaml from PyQt5.QtCore import QObject, pyqtSlot from PyQt5.QtCore import QTimer from PyQt5.QtCore import QUrl from PyQt5.QtCore import pyqtSignal from PyQt5.QtGui import QDesktopServices from PyQt5.QtWidgets import QDialog from PyQt5.QtWidgets import QDialogButtonBox from PyQt5.QtWidgets import QFileDialog from PyQt5.QtWidgets import QLabel from PyQt5.QtWidgets import QMainWindow from mdt.gui.maps_visualizer.actions import NewConfigAction, SetMapsToShow from mdt.gui.maps_visualizer.config_tabs.tab_general import TabGeneral from mdt.gui.maps_visualizer.config_tabs.tab_map_specific import TabMapSpecific from mdt.gui.maps_visualizer.config_tabs.tab_textual import TabTextual from mdt.gui.maps_visualizer.design.ui_save_image_dialog import Ui_SaveImageDialog matplotlib.use('Qt5Agg') import mdt from mdt.gui.maps_visualizer.base import Controller, PlottingFrameInfoViewer from mdt.visualization.maps.base import DataInfo, MapPlotConfig from mdt.gui.maps_visualizer.renderers.matplotlib_renderer import MatplotlibPlotting from mdt.gui.model_fit.design.ui_about_dialog import Ui_AboutDialog from mdt.gui.utils import center_window, DirectoryImageWatcher, QtManager from mdt.gui.maps_visualizer.design.ui_MainWindow import Ui_MapsVisualizer class MapsVisualizerWindow(QMainWindow, Ui_MapsVisualizer): def __init__(self, controller, parent=None, enable_directory_watcher=True): super(MapsVisualizerWindow, self).__init__(parent) self.setupUi(self) self._controller = controller self._controller.new_data.connect(self.set_new_data) self._controller.new_config.connect(self.set_new_config) self._directory_watcher = DirectoryImageWatcher() if enable_directory_watcher: self._directory_watcher.image_updates.connect(self._directory_changed) self._coordinates_label = QLabel() self.statusBar().addPermanentWidget(self._coordinates_label) self.statusBar().setStyleSheet("QStatusBar::item { border: 0px solid black }; ") self.plotting_info_to_statusbar = PlottingFrameInfoToStatusBar(self._coordinates_label) self.plotting_frame = MatplotlibPlotting(controller, parent=parent, plotting_info_viewer=self.plotting_info_to_statusbar) self.plotLayout.addWidget(self.plotting_frame) self.tab_general = TabGeneral(controller, self) self.generalTabPosition.addWidget(self.tab_general) self.tab_specific = TabMapSpecific(controller, self) self.mapSpecificTabPosition.addWidget(self.tab_specific) self.tab_textual = TabTextual(controller, self) self.textInfoTabPosition.addWidget(self.tab_textual) self.auto_rendering.setChecked(True) self.auto_rendering.stateChanged.connect(self._set_auto_rendering) self.manual_render.clicked.connect(lambda: self.plotting_frame.redraw()) self.actionAbout.triggered.connect(lambda: AboutDialog(self).exec_()) self.actionOpen_directory.triggered.connect(self._open_new_directory) self.actionSaveImage.triggered.connect(lambda: ExportImageDialog(self, self.plotting_frame).exec_()) self.actionBrowse_to_current_folder.triggered.connect( lambda: QDesktopServices.openUrl(QUrl.fromLocalFile(self._controller.get_data().directory))) self.actionSave_settings.triggered.connect(lambda: self._save_settings()) self.actionLoad_settings.triggered.connect(lambda: self._load_settings()) self.undo_config.setDisabled(not self._controller.has_undo()) self.redo_config.setDisabled(not self._controller.has_redo()) self.undo_config.clicked.connect(lambda: self._controller.undo()) self.redo_config.clicked.connect(lambda: self._controller.redo()) @pyqtSlot(DataInfo) def set_new_data(self, data_info): self.actionBrowse_to_current_folder.setDisabled(self._controller.get_data().directory is None) if self._controller.get_data().directory is not None: self._directory_watcher.set_directory(self._controller.get_data().directory) @pyqtSlot(MapPlotConfig) def set_new_config(self, config): self.undo_config.setDisabled(not self._controller.has_undo()) self.redo_config.setDisabled(not self._controller.has_redo()) def _open_new_directory(self): initial_dir = self._controller.get_data().directory new_dir = QFileDialog(self).getExistingDirectory(caption='Select a folder', directory=initial_dir) if new_dir: data = DataInfo.from_dir(new_dir) config = MapPlotConfig() if len(data.maps): config.slice_index = data.get_max_slice_index(config.dimension) // 2 if self._controller.get_data().maps: start_gui(data, config, app_exec=False) else: self._controller.set_data(data, config) @pyqtSlot(tuple, tuple) def _directory_changed(self, additions, removals): data = DataInfo.from_dir(self._controller.get_data().directory) config = self._controller.get_config() config.maps_to_show = [m for m in config.maps_to_show if m not in removals] self._controller.set_data(data, config) @pyqtSlot() def _set_auto_rendering(self): auto_render = self.auto_rendering.isChecked() self.plotting_frame.set_auto_rendering(auto_render) if auto_render: self.plotting_frame.redraw() def send_sigint(self, *args): self.close() def _save_settings(self): """Save the current settings as a text file. Args: file_name: the filename to write to """ config_file = ['conf (*.conf)', 'All files (*)'] file_name, used_filter = QFileDialog().getSaveFileName(caption='Select the GUI config file', filter=';;'.join(config_file)) if file_name: with open(file_name, 'w') as f: f.write(self._controller.get_config().to_yaml()) def _load_settings(self): config_file = ['conf (*.conf)', 'All files (*)'] file_name, used_filter = QFileDialog().getOpenFileName(caption='Select the GUI config file', filter=';;'.join(config_file)) if file_name: with open(file_name, 'r') as f: try: self._controller.apply_action(NewConfigAction(MapPlotConfig.from_yaml(f.read()))) except yaml.parser.ParserError: pass except yaml.scanner.ScannerError: pass except ValueError: pass def set_window_title(self, title): self.setWindowTitle('MDT Maps Visualizer - {}'.format(title)) class PlottingFrameInfoToStatusBar(PlottingFrameInfoViewer): def __init__(self, status_bar_label): super(PlottingFrameInfoToStatusBar, self).__init__() self._status_bar_label = status_bar_label def set_voxel_info(self, onscreen_coords, data_index, value): super(PlottingFrameInfoToStatusBar, self).set_voxel_info(onscreen_coords, data_index, value) self._status_bar_label.setText("{}, {}, {:.3f}".format(onscreen_coords, data_index, value)) def clear_voxel_info(self): super(PlottingFrameInfoToStatusBar, self).clear_voxel_info() self._status_bar_label.setText("") class ExportImageDialog(Ui_SaveImageDialog, QDialog): previous_values = {'width': None, 'height': None, 'dpi': None, 'output_file': None} def __init__(self, parent, plotting_frame): super(ExportImageDialog, self).__init__(parent) self.setupUi(self) self._plotting_frame = plotting_frame self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False) self.buttonBox.button(QDialogButtonBox.Ok).clicked.connect(self._export_image) self.outputFile_box.textChanged.connect(self._update_ok_button) self.outputFile_chooser.clicked.connect(lambda: self._select_file()) if self.previous_values['width']: self.width_box.setValue(self.previous_values['width']) if self.previous_values['height']: self.height_box.setValue(self.previous_values['height']) if self.previous_values['dpi']: self.dpi_box.setValue(self.previous_values['dpi']) if self.previous_values['output_file']: self.outputFile_box.setText(self.previous_values['output_file']) @pyqtSlot() def _update_ok_button(self): self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(self.outputFile_box.text() != '') def _select_file(self): graphical_image_filters = ['png (*.png)', 'svg (*.svg)', 'All files (*)'] open_file, used_filter = QFileDialog().getSaveFileName(caption='Select the output file', filter=';;'.join(graphical_image_filters)) if open_file: self.outputFile_box.setText(open_file) self._update_ok_button() def _export_image(self): self._plotting_frame.export_image(self.outputFile_box.text(), self.width_box.value(), self.height_box.value(), dpi=self.dpi_box.value()) self.previous_values['width'] = self.width_box.value() self.previous_values['height'] = self.height_box.value() self.previous_values['dpi'] = self.dpi_box.value() self.previous_values['output_file'] = self.outputFile_box.text() class AboutDialog(Ui_AboutDialog, QDialog): def __init__(self, parent): super(AboutDialog, self).__init__(parent) self.setupUi(self) self.contentLabel.setText(self.contentLabel.text().replace('{version}', mdt.__version__)) class QtController(Controller, QObject): new_data = pyqtSignal(DataInfo) new_config = pyqtSignal(MapPlotConfig) def __init__(self): super(QtController, self).__init__() self._data_info = DataInfo({}) self._actions_history = [] self._redoable_actions = [] self._current_config = MapPlotConfig() def set_data(self, data_info, config=None): self._data_info = data_info self._actions_history = [] self._redoable_actions = [] if not config: config = MapPlotConfig() elif not isinstance(config, MapPlotConfig): config = MapPlotConfig.from_dict(config.to_dict()) config.maps_to_show = list(filter(lambda k: k in data_info.maps, config.maps_to_show)) self._apply_config(config) self.new_data.emit(data_info) self.new_config.emit(self._current_config) def get_data(self): return self._data_info def set_config(self, general_config): applied = self._apply_config(general_config) if applied: self._actions_history.clear() self._redoable_actions.clear() self.new_config.emit(self._current_config) def get_config(self): return self._current_config def apply_action(self, action): applied = self._apply_config(action.apply(self._data_info, self._current_config)) if applied: self._actions_history.append(action) self._redoable_actions = [] self.new_config.emit(self._current_config) def undo(self): if len(self._actions_history): action = self._actions_history.pop() self._apply_config(action.unapply()) self._redoable_actions.append(action) self.new_config.emit(self._current_config) def redo(self): if len(self._redoable_actions): action = self._redoable_actions.pop() self._apply_config(action.apply(self._data_info, self._current_config)) self._actions_history.append(action) self.new_config.emit(self._current_config) def has_undo(self): return len(self._actions_history) > 0 def has_redo(self): return len(self._redoable_actions) > 0 def _apply_config(self, new_config): """Apply the current configuration. Args: new_config (MapPlotConfig): the new configuration to apply Returns: bool: if the configuration was applied or not. If the difference with the current configuration and the old one is None, False is returned. Else True is returned. """ validated_config = new_config.validate(self._data_info) if self._current_config != validated_config: self._current_config = validated_config return True return False def start_gui(data=None, config=None, controller=None, app_exec=True, show_maximized=False, window_title=None, enable_directory_watcher=True): """Start the GUI with the given data and configuration. Args: data (DataInfo): the initial set of data config (MapPlotConfig): the initial configuration controller (QtController): the controller to use in the application app_exec (boolean): if true we execute the Qt application, set to false to disable. show_maximized (true): if we want to show the window in a maximized state window_title (str): the title of the window enable_directory_watcher (boolean): if the directory watcher should be enabled/disabled. If the directory watcher is enabled, the viewer will automatically add new maps when added to the folder and also automatically remove maps when they are removed from the directory. It is useful to disable this if you want to have multiple viewers open with old results. Returns: MapsVisualizerWindow: the generated window """ controller = controller or QtController() app = QtManager.get_qt_application_instance() # catches the sigint timer = QTimer() timer.start(500) timer.timeout.connect(lambda: None) main = MapsVisualizerWindow(controller, enable_directory_watcher=enable_directory_watcher) main.set_window_title(window_title) signal.signal(signal.SIGINT, main.send_sigint) center_window(main) if show_maximized: main.showMaximized() main.show() if data: controller.set_data(data, config) elif config: controller.set_config(config) QtManager.add_window(main) if app_exec: QtManager.exec_() return main PKjUpIy."mdt/gui/maps_visualizer/widgets.pyfrom PyQt5.QtCore import QEvent from PyQt5.QtCore import pyqtSignal from PyQt5.QtCore import pyqtSlot from PyQt5.QtWidgets import QDoubleSpinBox from PyQt5.QtWidgets import QFrame from PyQt5.QtWidgets import QLabel from PyQt5.QtWidgets import QListWidget from PyQt5.QtWidgets import QPlainTextEdit from mdt.gui.utils import TimedUpdate class CollapsablePanel(QFrame): def __init__(self, parent=None): super(CollapsablePanel, self).__init__(parent) def toggle(self): content = self.findChild(CollapsablePanelContent) if content.isVisible(): content.hide() else: content.show() def set_collapse(self, collapse): content = self.findChild(CollapsablePanelContent) if collapse: content.hide() else: content.show() class CollapsablePanelHeader(QLabel): def mousePressEvent(self, QMouseEvent): super(CollapsablePanelHeader, self).mousePressEvent(QMouseEvent) self.parent().toggle() class CollapsablePanelContent(QFrame): def __init__(self, parent=None): super(CollapsablePanelContent, self).__init__(parent) class TextConfigEditor(QPlainTextEdit): new_config = pyqtSignal(str) def __init__(self, *args): super(TextConfigEditor, self).__init__(*args) self._timer = TimedUpdate(self._timer_event) self.textChanged.connect(lambda: self._timer.add_delayed_callback(400)) @pyqtSlot() def _timer_event(self): self.new_config.emit(self.toPlainText()) class MapsReorderer(QListWidget): items_reordered = pyqtSignal() def __init__(self, *args): super(MapsReorderer, self).__init__(*args) self.installEventFilter(self) def eventFilter(self, sender, event): if event.type() == QEvent.ChildRemoved: self.items_reordered.emit() return False class QDoubleSpinBoxDotSeparator(QDoubleSpinBox): def __init__(self, *args, **kwargs): super(QDoubleSpinBoxDotSeparator, self).__init__(*args, **kwargs) def valueFromText(self, text): return float(text) def textFromValue(self, value): return str(value) PKjUpI#mdt/gui/maps_visualizer/__init__.pyPKl>~IRiG36mdt/gui/maps_visualizer/design/ui_save_image_dialog.py# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'save_image_dialog.ui' # # Created by: PyQt5 UI code generator 5.4.2 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_SaveImageDialog(object): def setupUi(self, SaveImageDialog): SaveImageDialog.setObjectName("SaveImageDialog") SaveImageDialog.resize(722, 253) self.verticalLayout = QtWidgets.QVBoxLayout(SaveImageDialog) self.verticalLayout.setObjectName("verticalLayout") self.verticalLayout_3 = QtWidgets.QVBoxLayout() self.verticalLayout_3.setSpacing(0) self.verticalLayout_3.setObjectName("verticalLayout_3") self.label_3 = QtWidgets.QLabel(SaveImageDialog) font = QtGui.QFont() font.setPointSize(14) self.label_3.setFont(font) self.label_3.setObjectName("label_3") self.verticalLayout_3.addWidget(self.label_3) self.label_4 = QtWidgets.QLabel(SaveImageDialog) font = QtGui.QFont() font.setItalic(True) self.label_4.setFont(font) self.label_4.setObjectName("label_4") self.verticalLayout_3.addWidget(self.label_4) self.verticalLayout.addLayout(self.verticalLayout_3) self.line = QtWidgets.QFrame(SaveImageDialog) self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setObjectName("line") self.verticalLayout.addWidget(self.line) self.gridLayout = QtWidgets.QGridLayout() self.gridLayout.setObjectName("gridLayout") self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName("horizontalLayout") self.outputFile_chooser = QtWidgets.QPushButton(SaveImageDialog) self.outputFile_chooser.setEnabled(True) self.outputFile_chooser.setObjectName("outputFile_chooser") self.horizontalLayout.addWidget(self.outputFile_chooser) self.outputFile_box = QtWidgets.QLineEdit(SaveImageDialog) self.outputFile_box.setEnabled(True) self.outputFile_box.setObjectName("outputFile_box") self.horizontalLayout.addWidget(self.outputFile_box) self.horizontalLayout.setStretch(1, 1) self.gridLayout.addLayout(self.horizontalLayout, 3, 1, 1, 1) self.label_12 = QtWidgets.QLabel(SaveImageDialog) font = QtGui.QFont() font.setItalic(True) self.label_12.setFont(font) self.label_12.setObjectName("label_12") self.gridLayout.addWidget(self.label_12, 0, 2, 1, 1) self.label_11 = QtWidgets.QLabel(SaveImageDialog) font = QtGui.QFont() font.setItalic(True) self.label_11.setFont(font) self.label_11.setObjectName("label_11") self.gridLayout.addWidget(self.label_11, 2, 2, 1, 1) self.width_box = QtWidgets.QSpinBox(SaveImageDialog) self.width_box.setMinimum(100) self.width_box.setMaximum(9999) self.width_box.setProperty("value", 1280) self.width_box.setObjectName("width_box") self.gridLayout.addWidget(self.width_box, 0, 1, 1, 1) self.label_14 = QtWidgets.QLabel(SaveImageDialog) font = QtGui.QFont() font.setItalic(True) self.label_14.setFont(font) self.label_14.setObjectName("label_14") self.gridLayout.addWidget(self.label_14, 1, 2, 1, 1) self.label_6 = QtWidgets.QLabel(SaveImageDialog) self.label_6.setObjectName("label_6") self.gridLayout.addWidget(self.label_6, 0, 0, 1, 1) self.label_7 = QtWidgets.QLabel(SaveImageDialog) self.label_7.setObjectName("label_7") self.gridLayout.addWidget(self.label_7, 3, 0, 1, 1) self.label = QtWidgets.QLabel(SaveImageDialog) self.label.setObjectName("label") self.gridLayout.addWidget(self.label, 1, 0, 1, 1) self.height_box = QtWidgets.QSpinBox(SaveImageDialog) self.height_box.setMinimum(100) self.height_box.setMaximum(9999) self.height_box.setProperty("value", 720) self.height_box.setObjectName("height_box") self.gridLayout.addWidget(self.height_box, 1, 1, 1, 1) self.label_5 = QtWidgets.QLabel(SaveImageDialog) self.label_5.setObjectName("label_5") self.gridLayout.addWidget(self.label_5, 2, 0, 1, 1) self.dpi_box = QtWidgets.QSpinBox(SaveImageDialog) self.dpi_box.setMinimum(10) self.dpi_box.setMaximum(1000) self.dpi_box.setProperty("value", 100) self.dpi_box.setObjectName("dpi_box") self.gridLayout.addWidget(self.dpi_box, 2, 1, 1, 1) self.verticalLayout.addLayout(self.gridLayout) spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.verticalLayout.addItem(spacerItem) self.line_3 = QtWidgets.QFrame(SaveImageDialog) self.line_3.setFrameShape(QtWidgets.QFrame.HLine) self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_3.setObjectName("line_3") self.verticalLayout.addWidget(self.line_3) self.buttonBox = QtWidgets.QDialogButtonBox(SaveImageDialog) self.buttonBox.setOrientation(QtCore.Qt.Horizontal) self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok) self.buttonBox.setObjectName("buttonBox") self.verticalLayout.addWidget(self.buttonBox) self.retranslateUi(SaveImageDialog) self.buttonBox.accepted.connect(SaveImageDialog.accept) self.buttonBox.rejected.connect(SaveImageDialog.reject) QtCore.QMetaObject.connectSlotsByName(SaveImageDialog) SaveImageDialog.setTabOrder(self.dpi_box, self.outputFile_chooser) SaveImageDialog.setTabOrder(self.outputFile_chooser, self.outputFile_box) def retranslateUi(self, SaveImageDialog): _translate = QtCore.QCoreApplication.translate SaveImageDialog.setWindowTitle(_translate("SaveImageDialog", "Save Image")) self.label_3.setText(_translate("SaveImageDialog", "Save image")) self.label_4.setText(_translate("SaveImageDialog", "Export the current view to an image.")) self.outputFile_chooser.setText(_translate("SaveImageDialog", "Browse")) self.label_12.setText(_translate("SaveImageDialog", "(The width of the image in pixels)")) self.label_11.setText(_translate("SaveImageDialog", "(Dots/pixels per inch)")) self.label_14.setText(_translate("SaveImageDialog", "(The height of the image in pixels)")) self.label_6.setText(_translate("SaveImageDialog", "Width:")) self.label_7.setText(_translate("SaveImageDialog", "Output file:")) self.label.setText(_translate("SaveImageDialog", "Height:")) self.label_5.setText(_translate("SaveImageDialog", "DPI:")) PKl>~IG0~rFrF)mdt/gui/maps_visualizer/design/main_rc.py# -*- coding: utf-8 -*- # Resource object code # # Created by: The Resource Compiler for PyQt5 (Qt v5.4.2) # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore qt_resource_data = b"\ \x00\x00\x02\x1f\ \x89\ \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ \x00\x00\x1e\x00\x00\x00\x1e\x08\x06\x00\x00\x00\x3b\x30\xae\xa2\ \x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\ \xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\ \x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\ \xe0\x09\x07\x0b\x00\x2f\xed\xd3\x39\x52\x00\x00\x01\xac\x49\x44\ \x41\x54\x48\xc7\xe5\xd7\xbf\x4b\xd5\x61\x14\xc7\xf1\x57\x96\x59\ \x94\x51\xa0\x43\x41\x14\x54\x60\x3f\xfe\x85\x86\x5a\xc4\xa0\xad\ \xc5\xc5\xc1\x41\xb0\xad\x68\xb0\xb1\x35\x50\x07\xc1\xb1\x06\xa9\ \xd0\x21\x08\x22\xdc\x0c\x1a\x0c\x2a\x9a\x92\x88\x90\x84\x84\x8a\ \x22\x25\x13\xc2\xf4\xb6\x9c\x6f\x5c\x2f\xf7\xc7\xf7\xfe\xfa\x5e\ \xa2\x03\x87\xef\xf2\x9c\xe7\x7d\x1e\xce\xe7\x7b\xce\xf3\xf0\x8f\ \xdb\x4e\xec\xcd\x1a\xda\x86\x8b\xb8\x87\xb3\x59\x82\x3b\x30\x81\ \x1c\x3e\xe0\x32\xda\xd3\x64\xdb\x08\xdb\x88\xef\x31\xdc\xc7\x75\ \x1c\xcc\xe2\xc4\x63\x71\xe2\xc4\xb7\x70\x07\x27\x9b\x09\xde\x85\ \xd1\x02\x70\xe2\x73\x51\xff\x86\x8b\x6a\x1f\xba\x31\x59\x02\x9c\ \xc3\x3b\x0c\x85\xf2\xff\xda\x8e\x94\x90\x53\x38\x83\xe3\x38\x82\ \x43\xf1\xfb\xec\x09\x21\x9d\xc3\x89\x32\xf1\xab\xb8\x8b\x5b\x58\ \xa9\x04\xeb\xc6\x55\x4c\xe3\x05\x96\xb0\x56\xe6\x64\x95\x7c\x13\ \x4f\xd0\x53\x0a\xd8\x8e\x6b\x78\x15\xd9\xe5\x1a\xec\x0b\xb8\x54\ \x08\x3d\x8c\x47\x58\x6f\x02\x30\xdf\x5f\xe6\x43\xbb\xf0\xbc\xc9\ \xc0\x1c\x3e\xe1\x7c\x02\xdd\x8d\x07\x19\x40\x9f\x86\x40\xdb\x12\ \x65\xf7\xd5\xb8\xd1\x0f\x7c\x4b\x59\x9a\x31\x1c\x28\x6c\x00\xcf\ \x8a\x2c\xdc\x0a\x5f\xc1\x2c\x6e\x86\x28\x4e\x63\x7f\xc1\x64\x1a\ \x2d\x03\xfc\x89\xc1\xe0\x6c\xb3\xa3\x45\x16\x6f\x84\x00\x06\x52\ \x8c\xbb\x62\x2d\x33\xf1\xb7\xb8\x50\x2a\x70\xa0\x48\xf1\x47\xea\ \xec\xd5\x9b\x78\x5c\xa9\x57\xdf\xce\x0b\x58\x46\x7f\x0d\x43\x62\ \x3c\x6f\x8f\xd5\x48\xa4\xb3\x52\xe0\x54\x04\xac\xe3\x46\x8d\xd3\ \x29\xa9\xf1\x7b\x0c\xa7\x0d\x9c\x89\xa0\xf9\x3a\xa6\xd3\x95\x68\ \x87\xbd\xd5\x04\x4e\x85\x98\x86\x1a\x30\x97\xab\x1a\x6d\x5f\xf1\ \x1b\x0f\xeb\x04\xff\xaa\x16\xbc\x84\xcf\x91\x40\xa6\x37\xc4\x37\ \x58\x6c\xc5\xd5\xf4\x35\xbe\xb4\x02\xfc\x3d\xe0\x99\xbf\x00\x84\ \xb8\x3e\xb6\xe2\x09\xd2\xe1\x7f\xb1\x3f\x67\xf2\xf7\xa8\x9a\x67\ \xae\xf6\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\ \x00\x00\x0b\x85\ \x89\ \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ \x00\x00\x86\x00\x00\x00\x9b\x08\x02\x00\x00\x00\x28\x53\x74\x83\ \x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xaa\x00\x00\x0d\xaa\ \x01\x6f\x05\x82\xdc\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xe0\x0b\ \x13\x09\x11\x1b\x39\x81\xa9\x31\x00\x00\x0b\x24\x49\x44\x41\x54\ \x78\xda\xed\x9d\x7b\x50\x53\x57\x1e\xc7\x6f\x08\x49\x14\x05\x25\ \x0e\x24\x2e\x1a\x56\x6d\x2b\x45\x6b\x15\xa9\x7d\x3a\xd5\x11\x5f\ \xed\xee\xce\xba\xa3\xd5\xaa\x75\x5b\x67\x77\x67\x57\x67\xd7\xdd\ \xfe\x61\x67\x96\xd9\x5a\x75\xed\xb8\xda\x4e\xbb\xdd\xed\x5a\x44\ \x53\xa5\x42\xb5\x59\x95\x47\x50\x50\x11\xb7\xe2\x03\x44\x03\x88\ \x09\x21\x84\xbc\x80\x24\x40\x78\x85\x18\x72\x6f\x1e\xfb\x47\x3a\ \x69\xb8\xb9\x09\x18\xee\x3b\xe7\xf7\xe7\xb9\x37\xf7\x9e\x9c\x4f\ \xce\xef\xfb\x3b\xbf\x7b\xcf\x2f\x1c\x9f\xcf\x07\x01\xa3\xda\x60\ \x18\xa9\xab\x57\x7d\xf8\x77\xa9\xc1\x68\x89\x07\xc3\x41\x39\x0c\ \x8b\xb5\xcf\x68\xb2\x1e\x3b\x51\x6c\xb1\xd8\x20\x08\x02\x48\xa8\ \x87\x71\xb2\xa0\xdc\x60\xb2\x5a\x2c\x36\x17\x8c\x00\x24\x74\x81\ \x01\xc3\x48\x40\x40\x00\x12\x0a\x78\x34\x34\x69\x8e\x9d\x28\x09\ \x85\x01\x90\x50\x29\x1b\x8a\x46\x4d\x28\x0c\x80\x84\x52\xd9\x70\ \x21\xe1\x4e\x06\x48\x88\x35\x8f\xc7\x3b\x30\x68\x57\xb7\x9a\xf2\ \x4f\x95\x85\xf3\x54\x00\x09\xa9\x93\x43\xd3\xd6\x71\xa5\xea\x5e\ \xf5\xf7\x0f\xda\xda\x3b\xc7\x84\x01\x90\x90\xe1\xa9\x8e\x4b\x4b\ \x5b\x5a\x8d\x83\x43\xc3\x6e\xb7\x77\x9c\x9f\x05\x48\xa8\x94\x0d\ \x80\x84\x58\xf3\xf9\x7c\x23\x23\x70\x53\xb3\xf6\xab\x93\x25\xe3\ \x94\x0d\x80\x84\xd8\xc9\xd1\xd9\xd5\xa3\x52\x1b\x0b\x8a\x2a\x9a\ \x9a\xb5\xd1\xc1\x00\x48\x70\xf6\x54\x27\x4e\xcb\x75\x7a\x73\x77\ \x4f\x3f\x0c\xbb\x27\x72\x41\x80\x04\xd7\xbc\x88\x0b\x99\x78\x5e\ \x1d\x20\x21\x2a\x2f\x02\x90\xd0\x2e\x2f\x02\x90\x30\x23\xc0\x05\ \x48\x88\x94\x0d\xbc\x27\x07\x40\x42\x0b\xd9\x00\x48\x68\x27\x1b\ \x00\x09\xed\x64\x03\x20\x19\x97\x79\x7d\x3e\x87\xc3\xf9\x48\xa9\ \xcb\x93\x96\x92\xe3\xa9\x00\x92\x31\x26\x87\xc1\x64\x7d\xa0\x68\ \x95\x15\x57\x2b\x55\x7a\x92\x61\x00\x24\xd8\x9e\x2a\xff\x94\x5c\ \xdb\xde\xd9\x6b\x1b\x44\x10\x37\x25\x3d\x01\x48\xa8\x94\x0d\x80\ \x84\xfa\x00\x17\x20\xa1\x5d\x80\x0b\x90\x30\xc3\x53\xc5\x34\x12\ \x32\xf3\x22\x00\x09\x23\x65\x23\x76\x91\xd0\x56\x36\x62\x11\x09\ \xcd\x65\x23\xb6\x90\x78\xbd\xde\x21\xfb\x63\x55\x8b\xfe\xf8\xd7\ \x65\x34\xf7\x54\x31\x81\x04\x86\x91\x76\x5d\x57\xcd\x9d\x87\xe5\ \x15\x77\xd4\x1a\x23\x53\x60\xb0\x13\xc9\x8f\xaf\x19\x7e\x5d\xa6\ \x69\x33\xf5\xf7\xdb\x11\xb7\x87\x59\x5f\x21\x9e\x7d\x30\x18\x24\ \x1b\x6c\x46\xc2\x94\x00\x37\x26\x90\x30\x2b\xc0\x65\x39\x12\xd6\ \x78\x2a\x36\x20\x61\x44\x5e\x24\x86\x90\xb0\x49\x36\x18\x8f\x84\ \x7d\xb2\xc1\x60\x24\x6c\x95\x0d\x46\x22\x61\x6e\x5e\x84\x9d\x48\ \x18\x9d\x17\x61\x1b\x12\x16\xe4\x45\xd8\x83\x24\xa6\x64\x83\x01\ \x48\x58\x1f\xe0\x32\x09\x49\x8c\x04\xb8\xcc\x40\x02\x3c\x15\x8d\ \x90\xb0\x3b\x2f\xc2\x3c\x24\x40\x36\x68\x84\x04\xc8\x06\x8d\x90\ \x00\xd9\xa0\x11\x92\x28\xca\x52\x01\x24\xc4\x4e\x8e\x28\xca\x52\ \x01\x24\x04\xe7\x45\x9e\xbc\x2c\x15\x40\x02\x64\x83\xbd\x48\xf0\ \x2a\x4b\x05\x90\xe0\x36\x39\xf0\x2a\x4b\x05\x90\xe0\xe6\xa9\xf0\ \x2a\x4b\x05\x90\xe0\x9a\x17\xc1\xa3\x2c\x15\x40\x42\x6c\x5e\xe4\ \xb9\x05\x73\x4f\xe7\xe7\x86\xbb\xc8\x91\xcf\x8a\xce\xca\xaa\xc6\ \xbc\x97\xec\xcc\x81\x79\x73\xd3\x30\x0f\xb5\xb4\x1a\xb6\xbe\x7b\ \x20\xb4\xfd\xf2\xc5\xa3\x22\x91\x10\xf3\x23\x43\x76\x07\x0c\xbb\ \x9d\xce\x11\xc7\xe3\x91\x11\x27\x6c\xb6\xd8\x8c\x1d\x56\xa5\x4a\ \xff\xa0\x51\xe3\x70\x38\x23\xf7\xe4\xcf\xbb\x37\xed\xd8\xb6\x0e\ \x2f\x06\x5e\x8f\x37\x7b\xf9\x6f\x71\x40\x82\x20\x88\xd9\xf2\x43\ \x5e\xa4\xa1\x51\xe3\x8a\x28\x1b\xf1\xf1\xdc\x69\x49\x53\xc2\x1d\ \x7d\x6f\xfb\xfa\x73\xff\xbd\x1e\xf9\x9f\x54\x96\x65\x3f\x9b\xb5\ \xf8\x99\x70\x47\xa7\x26\x4c\xc6\x6c\x4f\x4c\x4c\x08\x77\xdf\x70\ \xed\x1e\x8f\xa7\xe6\xf6\xc3\xef\x2e\x54\x5f\xa9\xaa\xf3\x7a\xb1\ \xbb\x34\x69\x12\x3f\xc2\xd7\x79\xe2\x75\xb4\xd7\x3b\xd1\x59\x82\ \x7b\x80\xfb\xd4\xbc\x59\xaf\xbc\xb4\xf0\xd6\x9d\x87\x11\xce\xd9\ \xb1\x75\x1d\x39\x7e\x83\xcb\xe5\xbe\xbe\x7c\xf1\xeb\xcb\x17\xb7\ \xb4\x1a\x73\x3f\x3a\xde\xd0\xd4\x46\x6b\xc7\xe5\x76\x7b\xfa\xfa\ \x87\xda\xb4\x1d\xf9\xa7\xe4\xf8\x06\xb8\x3b\xde\x5e\x1b\x01\xc9\ \x4c\xf1\x8c\x35\xab\xb2\x49\x1e\x9a\x8c\x67\x24\xdf\x9d\x39\x70\ \xf8\x93\x33\xd2\x82\x4b\x34\x45\x02\x23\xee\x96\x16\x43\xb1\xfc\ \x66\xed\x3d\xa5\xce\x60\xc6\x37\xc0\x5d\xb9\x22\x6b\x56\x5a\x4a\ \x47\x67\x0f\xe6\xd1\x6d\x9b\x57\x73\xb9\x5c\xf2\x95\x96\x1b\x17\ \x97\xbb\x77\xc7\xe4\x49\x82\x2f\x8f\x5f\xa4\x17\x12\x04\x71\x9b\ \x2d\x36\xa3\xc9\x9a\x27\x2d\x55\xaa\xf4\xf6\xe1\xc7\x1e\x8f\x17\ \xf7\x2f\xbf\x7d\xcb\x9a\xc3\x9f\x16\x86\x1e\x12\x08\x78\x5b\x36\ \xae\xc2\xf1\x5e\x5a\x5d\x97\xba\xd5\x08\x41\x10\x9f\xcf\x4b\x4a\ \x4c\x48\x4d\x49\x4e\x97\x88\x38\x1c\x4e\xb8\xf3\xff\xf2\xc7\xb7\ \xda\xf5\x5d\x97\xaf\xd4\xfe\x18\x4d\xa8\x8d\x97\x2a\xef\x86\x3b\ \x7f\xdd\xea\x17\xe3\xe2\xd0\x57\x53\xa9\x0d\x3a\xbd\x39\xdc\xe2\ \xfa\x09\x90\x04\x60\x9c\x2c\x28\x37\x9a\xac\x66\x22\xf3\x22\x9b\ \x7e\xb5\xf2\xf3\x7f\xcb\x46\x5c\x30\xaa\xfd\xe7\x6f\xbc\x9a\x9c\ \x9c\x88\xe3\x8d\xae\x5d\xaf\x3f\xf2\x59\x51\x70\xcb\xf4\xe9\x89\ \xaf\xbf\xf6\xfc\x7b\xef\xbc\xf1\xdc\x82\xb9\xa1\xe7\x73\x38\x9c\ \xa3\x1f\xef\x7a\xa4\xd2\x1b\x4d\x56\x7f\x4b\xb1\xfc\x66\xb1\xfc\ \x66\xb8\xeb\xab\x1b\x0a\xe3\xe2\xd0\x73\xfa\x42\xc9\xff\xc6\xe9\ \x00\xe3\x22\xe4\x45\x86\x87\x1f\xdf\x57\xa8\xf7\x1d\x92\xee\x3b\ \x24\xad\xbd\xa7\x34\x18\x2d\xf8\xf2\x40\xc5\x33\xd3\xa7\x4d\xfd\ \xc5\x9b\xaf\x62\xca\x4c\x84\x4f\xe1\x62\x03\x03\xf6\x12\x79\xcd\ \x86\x2d\xb9\x07\x0f\x9f\xc6\x2c\x30\x34\x79\x92\xe0\x83\xf7\xb7\ \x92\xe3\xb8\xe2\xc2\x4d\x0e\x6d\x7b\x67\x79\xe5\xdd\x4f\xbe\x38\ \x1b\x80\x81\x7b\x6a\xa4\x4d\xdb\x81\x12\x8f\x77\xb6\xae\x45\x9d\ \x93\x9d\x95\xb1\x20\x73\x4e\x70\xcb\x8d\x9b\x0a\x82\xc6\xc2\xe7\ \xf3\x9d\x3a\x73\xf9\x83\xbf\x7d\x85\x19\x8e\xaf\x59\xb5\x2c\x5d\ \x22\xa6\x00\x09\x82\xb8\x8d\x26\x6b\xed\x3d\xe5\xc1\x7f\x14\xfc\ \xeb\xd8\xf9\xe6\x47\x3a\x22\x60\x04\xe2\xf1\x33\x67\xaf\x04\xb7\ \x64\x66\xfc\xf4\x85\xa5\x19\xa3\x63\xdf\xb5\xa8\xf8\x7b\x3c\x8b\ \xca\x89\x58\x89\xbc\xa6\x58\x5e\x83\x31\x52\x71\x9c\x55\x2b\xb2\ \x48\x45\x82\x20\x88\x1f\x86\xdf\x53\xdd\xbb\xaf\x32\x5b\x6c\x44\ \x97\x09\x93\x5d\xa8\x76\x8e\xb8\x46\x4d\x94\x20\x37\x25\x4a\x15\ \xae\xcd\x59\x16\x7c\xb4\xbc\xf2\xae\xad\x6f\x90\xe8\x41\xf9\x32\ \xef\x02\xe6\x44\x79\xed\xe5\x45\xe4\x45\x5c\x30\xec\x6e\x68\x6a\ \x3b\x76\xa2\xd8\xaf\xe1\xa4\x65\x70\x07\x06\x87\x4b\xe4\x35\xc1\ \x01\xd5\xda\x9c\x65\xa2\x54\xa1\xb5\xbb\x0f\x82\xa0\x6d\x9b\x73\ \xe2\xe3\x47\xe9\x64\x41\x61\x45\x68\x30\x83\xbb\xe9\xf4\x66\xad\ \xae\xeb\xa9\x90\xe4\x4d\x76\xd6\x7c\x32\x66\x89\xdf\x53\xd5\xd5\ \x2b\xff\xf9\x1f\x59\x1d\x61\xb2\x11\xc1\xbe\x29\xaa\x44\xa5\x5e\ \xb6\x6d\xce\xf1\x47\xa8\x5b\x36\xe5\x04\x1f\x52\x34\x6a\x9a\x9a\ \xb5\xe4\xf4\x4a\xd1\xd8\x1a\xda\x38\x65\xca\xe4\x84\xc9\x02\xc2\ \x91\x04\x3c\x95\xa2\x51\x33\xe2\xa2\xe0\xf1\x46\x4b\xab\xb1\xb6\ \x5e\x19\xdc\xf2\xf6\x5b\x39\x02\x01\xef\x67\xeb\x5f\x99\x21\x4c\ \x0a\x6e\x3f\x5d\x58\x41\x5a\xaf\x7a\x7b\xb1\xdd\x23\xbe\xe1\x38\ \x36\x92\x63\x27\x4b\x9a\x9a\xdb\x3b\x3a\x7b\x28\x7c\xbc\x51\x50\ \x38\x6a\xa2\x08\x93\x93\xd6\xaf\x7e\x09\x25\xec\xdd\x3d\x03\x15\ \x57\x6b\x49\xeb\xd2\x70\x98\x64\x70\xe2\xd4\x04\xc2\x91\xec\xfe\ \xdd\x86\x45\x0b\xe7\xa6\xfd\x24\x85\xc7\xa3\xec\xcd\xc7\xab\xd7\ \xeb\xbb\xcc\xbd\xc1\x2d\x7b\xdf\xdf\x8a\x5a\xb5\x9d\x95\x5d\x23\ \xb3\x24\x69\x52\x22\x76\xa2\x77\x70\xc8\x41\x38\x92\xa5\x4b\xe6\ \x7f\xf4\xd7\x9d\xfb\x73\x77\x2e\x79\xfe\x69\x3e\x9f\x47\x09\x12\ \x8f\xc7\x53\x78\xee\x6a\x70\x8b\x28\x35\x19\x15\x9a\x7f\x2b\xbb\ \x46\x66\x97\x52\x47\x77\x20\x60\xfd\xfd\x76\xc2\x91\x08\xf8\x3c\ \xc9\x6c\xd1\xb2\xec\x67\xf7\xec\xda\xf8\xe2\x0b\x99\x12\x89\x98\ \x12\x30\xe7\xce\x5f\x8f\x90\x1a\xb8\x7c\xa5\xb6\xbb\x67\x80\xcc\ \xfe\x60\x06\x57\x43\x76\x47\x68\xbe\x87\x90\x75\x09\x87\x03\x09\ \x04\xbc\xc5\x8b\x9e\xde\x9f\xbb\xf3\x40\xee\x4e\x4a\xc0\xf4\xf7\ \xdb\xcb\x2e\xdd\x0a\x2b\x36\x45\x15\x64\x76\x66\x41\xe6\x9c\xd9\ \x69\xa9\xa1\xed\x75\xf5\x2d\xe4\xad\x4b\x20\x08\x12\x08\x78\x92\ \xd9\x22\xb1\x48\x28\x99\x2d\x32\x9a\xac\xf9\xa7\xca\xb4\xba\x4e\ \x5b\xef\x20\x69\x9b\x04\x4f\x17\x55\x6c\xdc\xb0\x22\xb4\xbd\xa9\ \x59\xab\x68\xd4\x90\x89\x64\xcf\xae\x8d\x98\xed\xb7\xee\x3e\x24\ \x15\x49\x60\xba\xa4\x4b\x44\x33\xc5\x42\xb1\x48\x78\xbf\x41\x2d\ \xbb\x78\xc3\x5f\x7a\x9d\x84\xae\x28\x55\xfa\xfa\x07\x2d\xd9\x59\ \x19\x21\x53\xa4\x92\x4c\x1e\xef\x6e\x5f\xbf\x6a\xc5\x52\xcc\xf4\ \x4f\x55\x75\x3d\xd9\x48\x02\xc6\xe7\xf3\xe6\xcd\x4d\x13\x8b\x84\ \xe9\xb3\xc5\x79\xd2\xd2\xc0\x92\x9e\x84\x68\x18\x85\xc4\xd6\x37\ \x54\x5e\x71\x87\x1c\x18\x5c\x2e\x77\xcf\xee\x8d\x7f\xf8\xcd\x2f\ \x31\x8f\xca\x2f\xdd\xee\xec\xea\xa5\x0c\x09\x04\x41\x71\x71\x9c\ \xc4\xc4\x84\xa5\x4b\xe6\xef\xcf\xdd\x19\xfc\xbc\x84\x50\x30\x95\ \x55\x75\xd6\xee\x3e\x51\xaa\x30\x38\xf6\x25\xe1\xa7\x90\x2e\x11\ \xaf\x58\xbe\xf8\xd7\xdb\xd6\x85\xcb\xf5\x0e\x0f\x3b\x8f\x7e\xfe\ \x2d\x39\xbf\x8c\x31\xd6\x22\x7c\x7e\x7c\xba\x44\x34\x53\x3c\xc3\ \x2f\x30\x79\xd2\x52\xa5\x4a\x67\x1f\x76\x7a\xbd\x84\xbc\x6d\xed\ \x76\x7b\x7e\xff\xa7\x4f\x67\xa5\xa5\x04\x5a\x6e\xd7\x3e\xc2\xfd\ \x2e\x6f\xae\x7b\x79\x61\xe6\x1c\x7f\xf2\x66\xda\xb4\xa9\xa2\x94\ \xe4\xc8\x6b\x72\x8f\xd7\xbb\x67\xef\x17\x66\x8b\x8d\x16\x48\x02\ \x60\x24\x12\x91\x58\x2c\x4c\x4a\x4a\x28\x2e\xab\x09\x3c\x7b\x27\ \xa2\x43\x4d\xcd\x5a\xa2\x13\x59\xb3\xd2\x52\x82\xa9\x8f\x69\x1f\ \x1f\xf9\xe6\xc6\xf7\x0a\x88\x2c\x1b\xef\x8a\x9d\x03\x41\x02\x3e\ \x2f\x33\x63\x8e\x58\x34\x23\x67\xe5\xd2\xe0\x37\x54\x20\xf6\x1a\ \x0c\x23\x1f\x1e\x94\xca\x2e\x56\x93\x79\xd3\x27\x4b\xa2\xf0\x78\ \x5c\x51\x6a\xf2\xf4\x69\x53\x67\xa5\xa5\xa2\x76\xe2\xb2\x8f\xc7\ \x7d\x85\x3a\x77\x7f\xbe\xa6\xad\x83\xe4\xfb\x46\x93\xd7\x42\xad\ \x60\x02\xbb\x40\xd9\x41\x62\xc4\x05\x57\x55\xdf\x3f\x77\xfe\x7a\ \xe4\x57\xfd\xe8\x85\x24\xb0\x82\xf1\x83\xe1\xf3\xe3\x83\xdf\x09\ \xc6\x3c\xdf\xe5\x42\x4c\x1d\xdd\xa8\x46\x8b\xb5\x2f\x8a\x5b\x63\ \x5f\xaa\x1b\xfb\x52\x5d\xe6\x5e\x77\x98\xa5\xee\x63\xe7\x08\x0c\ \xbb\x87\x86\x1c\x2e\x18\x71\x38\x9c\x66\x8b\xcd\xd4\xd1\xa3\x52\ \xeb\x9b\x95\xba\x09\xe6\x37\x4d\x1d\xdd\xa8\x27\x6f\x10\x04\xd9\ \xed\xce\xf1\x8e\xad\x6f\xc2\x4f\x48\x5c\x2e\x8c\x8a\x02\x10\xb0\ \x68\x0d\x07\x24\x10\x04\xf9\x7c\xa3\xf6\x97\xe8\xf5\x66\x6b\x4f\ \x3f\x55\x7f\xef\x05\x90\xa0\xc1\xa0\x76\x61\x81\x21\xa6\x12\x89\ \xdf\xbc\xfe\xbd\x8a\x0f\x47\xed\x55\x04\x03\x4d\x25\x12\x20\x30\ \x34\x45\x82\x12\x98\xc0\xbe\x77\xdc\xdf\xef\x06\x48\xa2\x04\x83\ \xaa\x0e\x01\xc6\x9d\x4a\x24\x7e\x73\xbb\xbd\x03\x83\x76\xb5\xc6\ \x94\x1f\x54\x5b\x16\x8c\x3e\x95\x48\x80\xc0\xd0\x14\x09\x4a\x60\ \x58\x96\x89\x61\x2a\x92\x60\x30\xa8\xdd\xd9\x00\x06\x95\x48\x80\ \x1f\xa3\x29\x12\x94\x1f\x03\x60\x68\x81\x04\x08\x0c\x4d\x91\x00\ \x81\xa1\x29\x12\x20\x30\x34\x45\x82\xf2\x63\x3f\xfc\x33\xc3\x80\ \xdd\x1d\x33\xff\xcc\x40\x47\x24\xc1\x60\xda\xf5\x5d\x35\xb7\x9b\ \xca\x2b\xef\xaa\x5b\x8d\x31\x32\x5d\xe8\x8b\xc4\x6f\x1e\x8f\xd7\ \x6e\x77\x28\x5b\x0c\xc7\x63\x26\x13\x43\x77\x24\x31\x28\x30\xcc\ \x40\x12\x53\x81\x32\x63\x90\xc4\x4e\xa0\xcc\x30\x24\xb1\xe0\xc7\ \x18\x89\x04\x62\x75\x26\x86\xa9\x48\x58\x2c\x30\xcc\x46\xc2\x4a\ \x81\x61\x03\x12\x96\x09\x0c\x7b\x90\x40\x6c\xc9\xc4\xb0\x0a\x49\ \x30\x18\xe6\x66\x62\x58\x88\xc4\x6f\xcc\xcd\xc4\xb0\x16\x09\x73\ \x05\x86\xe5\x48\x98\x18\x28\xb3\x1f\x09\xe3\x02\xe5\x58\x41\xc2\ \x20\x3f\x16\x5b\x48\x20\x26\x64\x62\x62\x0e\x09\xfd\x05\x26\x46\ \x91\xd0\x59\x60\x62\x1a\x09\x3d\x05\x06\x20\x41\xfb\x31\xf2\x0b\ \x91\x01\x24\x63\x80\x31\x98\xac\x0f\x14\x6a\x59\x31\x79\x85\xc8\ \x00\x92\x31\xcc\xeb\xf5\x39\x1c\xce\x66\xa5\x2e\x4f\x5a\x4a\x89\ \x1f\x03\x48\x68\x27\x30\x00\x09\xed\x02\x65\x80\x84\x76\x81\x32\ \x40\x42\x3b\x3f\x06\x90\x44\xe3\xc7\x08\x05\x03\x90\xd0\x4e\x60\ \x00\x12\xda\x09\x0c\x40\x42\x3b\x81\x01\x48\x70\xf3\x63\x78\x15\ \x22\x03\x48\x70\x03\x83\x57\x21\x32\x80\x04\x37\xc3\xab\x10\x19\ \x40\x42\x3b\x81\x01\x48\x88\x15\x98\x28\x0a\x91\x01\x24\xc4\x82\ \x89\xa2\x10\x19\x40\x42\xac\x45\x51\x88\x0c\x20\xa1\x9d\xc0\x00\ \x24\x14\x08\x4c\xe4\x4c\x0c\x40\x42\x01\x98\xc8\x99\x18\x80\x84\ \x76\x7e\x0c\x20\xa1\xde\x8f\xa1\xc0\x00\x24\xb4\x13\x98\x78\x30\ \x2e\x14\x5a\xf0\x7f\x8e\x40\x10\xb4\xef\x90\xd4\x60\xb4\xfc\x1f\ \x2f\xcd\x61\x2b\x97\xb1\x69\xc3\x00\x00\x00\x00\x49\x45\x4e\x44\ \xae\x42\x60\x82\ \x00\x00\x02\x35\ \x89\ \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ \x00\x00\x1e\x00\x00\x00\x1e\x08\x06\x00\x00\x00\x3b\x30\xae\xa2\ \x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\ \xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\ \x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\ \xe0\x09\x07\x0b\x01\x15\x32\xc4\xd1\xa1\x00\x00\x01\xc2\x49\x44\ \x41\x54\x48\xc7\xc5\xd7\x3b\x8f\x0e\x61\x14\x07\xf0\xdf\xde\x6c\ \x36\xae\x11\x0a\x85\x50\x28\xc8\x7e\x02\x85\xc2\x36\x42\xa2\x53\ \x6f\x50\x6c\xa1\xa3\x5a\x1a\x9f\x60\xdf\x46\xe2\x0b\x08\x42\x21\ \x11\x22\x24\x84\x84\xb8\x04\xd9\x06\x11\xf7\x4d\x96\x20\xcb\x12\ \x77\x6b\xbd\x9a\x53\x4c\xc6\xfb\xce\xbc\xb3\xb3\x3b\x73\x92\xd3\ \xcc\xcc\x79\xfe\xe7\x3c\xcf\xff\xf9\x9f\x33\xd4\x64\x3d\x05\xbf\ \x1f\x40\x33\xbc\x32\x1b\xc4\x71\x0c\xa1\xbb\x0a\xc0\x3e\xec\xc4\ \xab\xa8\xf4\x08\xfa\xcb\x2e\x9a\x97\xf9\x0a\x1c\xc0\x09\xac\x8b\ \x67\x33\xf3\x51\x4d\x6f\xc6\xbb\x0d\x38\x84\xdd\xe8\xaa\xea\x3c\ \x87\x70\x35\x41\xa4\xa4\x8f\xe5\x24\x3c\x67\x96\x8f\xe0\x49\x1b\ \xd0\x26\x8e\x62\x35\x16\x97\x21\x59\x57\xea\x3c\x0f\x63\x0f\x96\ \x67\xc4\x3c\xc7\x83\x38\xeb\x9f\xf8\x81\x69\xbc\x09\x02\x3e\xc2\ \xd3\x4e\x13\xd8\x88\x0b\x98\xcd\xa8\x34\xcf\xbf\x62\x02\x77\x71\ \x0a\xfb\x62\x67\xda\xda\x8e\xc8\xb2\x39\xcf\xfe\x09\xf7\xb1\x3f\ \xae\xe4\x7f\x76\x6f\x01\x40\x93\xfe\x1d\x67\xb1\x26\x0d\xbc\x05\ \x6f\x17\x18\xbc\x89\xdb\x58\x95\x16\x91\xf5\xb8\x56\x01\xf8\x49\ \x2c\x4a\x57\xbe\x0c\x8d\x0e\xb7\xee\x03\xbe\xcc\x11\x7c\x7b\x2b\ \x41\xea\xc5\x5e\x7c\xcb\x08\x1c\x4b\x75\xb5\x25\xd8\x14\x24\x3d\ \x88\x8b\x41\xac\xbf\xe1\xe9\xf8\xeb\x59\x02\xb4\x15\x8f\xdb\x00\ \x37\x3a\x68\x12\x03\x18\x0e\xe2\xce\xb4\x58\x63\xad\x1c\xad\x3e\ \xdf\xe2\x6e\x37\x0a\x76\xa7\xd1\x16\xe4\x1d\xce\x1a\x04\x3e\xe2\ \x5c\x64\x3f\x98\x00\xbb\x83\xcb\x91\x50\x27\x76\x03\xaf\xb1\x19\ \x4b\xe3\xd9\xb3\xbc\x09\xe4\x37\x2e\xe1\x73\xa8\xdb\x4a\xdc\xc4\ \x95\x02\xc0\x42\x62\xbb\xe3\xea\xf6\xe1\x65\x11\x5d\xdf\x16\xb2\ \xba\xab\x44\x77\xba\x15\x5b\x7d\xba\x68\x60\xd9\xc9\x63\x24\xc8\ \x76\xac\x68\x5b\xfb\x55\x12\xf8\x0c\xfe\x60\xaa\x92\xa1\x2d\x61\ \x53\x78\x87\x89\xaa\x81\xe1\x05\x1e\xd6\x01\xfc\x1e\xe3\x75\x00\ \x8f\x63\xba\xa7\x06\xe0\x59\x4c\xd6\xf1\xdb\xd4\xaf\x4e\xfb\x07\ \xaf\xf4\xfc\x12\x63\x89\xd7\xd3\x00\x00\x00\x00\x49\x45\x4e\x44\ \xae\x42\x60\x82\ " qt_resource_name = b"\ \x00\x04\ \x00\x07\x37\xfe\ \x00\x6d\ \x00\x61\x00\x69\x00\x6e\ \x00\x0e\ \x08\x9f\xe0\x67\ \x00\x61\ \x00\x72\x00\x72\x00\x6f\x00\x77\x00\x5f\x00\x72\x00\x65\x00\x64\x00\x6f\x00\x2e\x00\x70\x00\x6e\x00\x67\ \x00\x04\ \x00\x07\x35\xdf\ \x00\x6c\ \x00\x6f\x00\x67\x00\x6f\ \x00\x0e\ \x07\x9f\xe0\xe7\ \x00\x61\ \x00\x72\x00\x72\x00\x6f\x00\x77\x00\x5f\x00\x75\x00\x6e\x00\x64\x00\x6f\x00\x2e\x00\x70\x00\x6e\x00\x67\ " qt_resource_struct = b"\ \x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\ \x00\x00\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x02\ \x00\x00\x00\x30\x00\x00\x00\x00\x00\x01\x00\x00\x02\x23\ \x00\x00\x00\x3e\x00\x00\x00\x00\x00\x01\x00\x00\x0d\xac\ \x00\x00\x00\x0e\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\ " def qInitResources(): QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data) def qCleanupResources(): QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data) qInitResources() PKl>~IN/mdt/gui/maps_visualizer/design/ui_TabTextual.py# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'TabTextual.ui' # # Created by: PyQt5 UI code generator 5.4.2 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_TabTextual(object): def setupUi(self, TabTextual): TabTextual.setObjectName("TabTextual") TabTextual.resize(400, 300) self.gridLayout = QtWidgets.QGridLayout(TabTextual) self.gridLayout.setContentsMargins(0, 0, 0, 0) self.gridLayout.setHorizontalSpacing(0) self.gridLayout.setVerticalSpacing(4) self.gridLayout.setObjectName("gridLayout") self.textConfigEdit = TextConfigEditor(TabTextual) self.textConfigEdit.setObjectName("textConfigEdit") self.gridLayout.addWidget(self.textConfigEdit, 0, 0, 1, 1) self.correctness_label = QtWidgets.QLabel(TabTextual) self.correctness_label.setAlignment(QtCore.Qt.AlignCenter) self.correctness_label.setWordWrap(True) self.correctness_label.setObjectName("correctness_label") self.gridLayout.addWidget(self.correctness_label, 1, 0, 1, 1) self.retranslateUi(TabTextual) QtCore.QMetaObject.connectSlotsByName(TabTextual) def retranslateUi(self, TabTextual): _translate = QtCore.QCoreApplication.translate TabTextual.setWindowTitle(_translate("TabTextual", "Form")) self.correctness_label.setText(_translate("TabTextual", "TextLabel")) from ..widgets import TextConfigEditor PKl>~If&L 3mdt/gui/maps_visualizer/design/ui_TabMapSpecific.py# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'TabMapSpecific.ui' # # Created by: PyQt5 UI code generator 5.4.2 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_TabMapSpecific(object): def setupUi(self, TabMapSpecific): TabMapSpecific.setObjectName("TabMapSpecific") TabMapSpecific.resize(445, 534) self.gridLayout = QtWidgets.QGridLayout(TabMapSpecific) self.gridLayout.setContentsMargins(0, 0, 0, 0) self.gridLayout.setSpacing(0) self.gridLayout.setObjectName("gridLayout") self.scrollArea_2 = QtWidgets.QScrollArea(TabMapSpecific) self.scrollArea_2.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) self.scrollArea_2.setWidgetResizable(True) self.scrollArea_2.setObjectName("scrollArea_2") self.scrollAreaWidgetContents_2 = QtWidgets.QWidget() self.scrollAreaWidgetContents_2.setGeometry(QtCore.QRect(0, 0, 443, 532)) self.scrollAreaWidgetContents_2.setObjectName("scrollAreaWidgetContents_2") self.verticalLayout = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents_2) self.verticalLayout.setContentsMargins(6, 6, 6, 6) self.verticalLayout.setSpacing(6) self.verticalLayout.setObjectName("verticalLayout") self.selectedMap = QtWidgets.QComboBox(self.scrollAreaWidgetContents_2) self.selectedMap.setObjectName("selectedMap") self.verticalLayout.addWidget(self.selectedMap) self.frame = QtWidgets.QFrame(self.scrollAreaWidgetContents_2) self.frame.setFrameShape(QtWidgets.QFrame.NoFrame) self.frame.setFrameShadow(QtWidgets.QFrame.Raised) self.frame.setObjectName("frame") self.gridLayout_4 = QtWidgets.QGridLayout(self.frame) self.gridLayout_4.setContentsMargins(0, 0, 0, 0) self.gridLayout_4.setSpacing(0) self.gridLayout_4.setObjectName("gridLayout_4") self.mapSpecificOptionsPosition = QtWidgets.QGridLayout() self.mapSpecificOptionsPosition.setSpacing(0) self.mapSpecificOptionsPosition.setObjectName("mapSpecificOptionsPosition") self.gridLayout_4.addLayout(self.mapSpecificOptionsPosition, 0, 0, 1, 1) self.verticalLayout.addWidget(self.frame) spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.verticalLayout.addItem(spacerItem) self.scrollArea_2.setWidget(self.scrollAreaWidgetContents_2) self.gridLayout.addWidget(self.scrollArea_2, 0, 0, 1, 1) self.retranslateUi(TabMapSpecific) QtCore.QMetaObject.connectSlotsByName(TabMapSpecific) TabMapSpecific.setTabOrder(self.scrollArea_2, self.selectedMap) def retranslateUi(self, TabMapSpecific): _translate = QtCore.QCoreApplication.translate TabMapSpecific.setWindowTitle(_translate("TabMapSpecific", "Form")) PKl>~I[qC{,{,/mdt/gui/maps_visualizer/design/ui_MainWindow.py# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'MainWindow.ui' # # Created by: PyQt5 UI code generator 5.4.2 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MapsVisualizer(object): def setupUi(self, MapsVisualizer): MapsVisualizer.setObjectName("MapsVisualizer") MapsVisualizer.resize(1000, 754) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(":/main/logo"), QtGui.QIcon.Normal, QtGui.QIcon.Off) MapsVisualizer.setWindowIcon(icon) self.centralwidget = QtWidgets.QWidget(MapsVisualizer) self.centralwidget.setObjectName("centralwidget") self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.centralwidget) self.verticalLayout_2.setContentsMargins(0, 0, 0, 0) self.verticalLayout_2.setSpacing(0) self.verticalLayout_2.setObjectName("verticalLayout_2") self.frame_2 = QtWidgets.QFrame(self.centralwidget) self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel) self.frame_2.setFrameShadow(QtWidgets.QFrame.Sunken) self.frame_2.setObjectName("frame_2") self.gridLayout_2 = QtWidgets.QGridLayout(self.frame_2) self.gridLayout_2.setContentsMargins(0, 0, 0, 0) self.gridLayout_2.setSpacing(0) self.gridLayout_2.setObjectName("gridLayout_2") self.splitter = QtWidgets.QSplitter(self.frame_2) self.splitter.setOrientation(QtCore.Qt.Horizontal) self.splitter.setObjectName("splitter") self.verticalLayoutWidget = QtWidgets.QWidget(self.splitter) self.verticalLayoutWidget.setObjectName("verticalLayoutWidget") self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget) self.verticalLayout.setContentsMargins(-1, -1, -1, 6) self.verticalLayout.setSpacing(0) self.verticalLayout.setObjectName("verticalLayout") self.commandTabs = QtWidgets.QTabWidget(self.verticalLayoutWidget) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.commandTabs.sizePolicy().hasHeightForWidth()) self.commandTabs.setSizePolicy(sizePolicy) self.commandTabs.setMinimumSize(QtCore.QSize(250, 0)) self.commandTabs.setObjectName("commandTabs") self.generalOptions = QtWidgets.QWidget() self.generalOptions.setAccessibleName("") self.generalOptions.setObjectName("generalOptions") self.gridLayout_4 = QtWidgets.QGridLayout(self.generalOptions) self.gridLayout_4.setContentsMargins(0, 0, 0, 0) self.gridLayout_4.setSpacing(0) self.gridLayout_4.setObjectName("gridLayout_4") self.generalTabPosition = QtWidgets.QGridLayout() self.generalTabPosition.setSpacing(0) self.generalTabPosition.setObjectName("generalTabPosition") self.gridLayout_4.addLayout(self.generalTabPosition, 0, 0, 1, 1) self.commandTabs.addTab(self.generalOptions, "") self.mapOptions = QtWidgets.QWidget() self.mapOptions.setObjectName("mapOptions") self.gridLayout = QtWidgets.QGridLayout(self.mapOptions) self.gridLayout.setContentsMargins(0, 0, 0, 0) self.gridLayout.setSpacing(0) self.gridLayout.setObjectName("gridLayout") self.mapSpecificTabPosition = QtWidgets.QGridLayout() self.mapSpecificTabPosition.setSpacing(0) self.mapSpecificTabPosition.setObjectName("mapSpecificTabPosition") self.gridLayout.addLayout(self.mapSpecificTabPosition, 0, 0, 1, 1) self.commandTabs.addTab(self.mapOptions, "") self.textInfoTab = QtWidgets.QWidget() self.textInfoTab.setObjectName("textInfoTab") self.gridLayout_9 = QtWidgets.QGridLayout(self.textInfoTab) self.gridLayout_9.setObjectName("gridLayout_9") self.textInfoTabPosition = QtWidgets.QGridLayout() self.textInfoTabPosition.setSpacing(0) self.textInfoTabPosition.setObjectName("textInfoTabPosition") self.gridLayout_9.addLayout(self.textInfoTabPosition, 0, 0, 1, 1) self.commandTabs.addTab(self.textInfoTab, "") self.verticalLayout.addWidget(self.commandTabs) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setContentsMargins(-1, 6, -1, 0) self.horizontalLayout.setSpacing(2) self.horizontalLayout.setObjectName("horizontalLayout") spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem) self.auto_rendering = QtWidgets.QCheckBox(self.verticalLayoutWidget) self.auto_rendering.setObjectName("auto_rendering") self.horizontalLayout.addWidget(self.auto_rendering) self.manual_render = QtWidgets.QPushButton(self.verticalLayoutWidget) self.manual_render.setIconSize(QtCore.QSize(16, 16)) self.manual_render.setObjectName("manual_render") self.horizontalLayout.addWidget(self.manual_render) self.undo_config = QtWidgets.QPushButton(self.verticalLayoutWidget) self.undo_config.setText("") icon1 = QtGui.QIcon() icon1.addPixmap(QtGui.QPixmap(":/main/arrow_undo.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.undo_config.setIcon(icon1) self.undo_config.setObjectName("undo_config") self.horizontalLayout.addWidget(self.undo_config) self.redo_config = QtWidgets.QPushButton(self.verticalLayoutWidget) self.redo_config.setText("") icon2 = QtGui.QIcon() icon2.addPixmap(QtGui.QPixmap(":/main/arrow_redo.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.redo_config.setIcon(icon2) self.redo_config.setObjectName("redo_config") self.horizontalLayout.addWidget(self.redo_config) spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem1) self.verticalLayout.addLayout(self.horizontalLayout) self.verticalLayout.setStretch(0, 1) self.gridLayoutWidget = QtWidgets.QWidget(self.splitter) self.gridLayoutWidget.setObjectName("gridLayoutWidget") self.plotLayout = QtWidgets.QVBoxLayout(self.gridLayoutWidget) self.plotLayout.setContentsMargins(-1, -1, -1, 0) self.plotLayout.setSpacing(0) self.plotLayout.setObjectName("plotLayout") self.gridLayout_2.addWidget(self.splitter, 0, 0, 1, 1) self.verticalLayout_2.addWidget(self.frame_2) self.verticalLayout_2.setStretch(0, 1) MapsVisualizer.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MapsVisualizer) self.menubar.setGeometry(QtCore.QRect(0, 0, 1000, 27)) self.menubar.setObjectName("menubar") self.menuFile = QtWidgets.QMenu(self.menubar) self.menuFile.setObjectName("menuFile") self.menuAbout = QtWidgets.QMenu(self.menubar) self.menuAbout.setObjectName("menuAbout") MapsVisualizer.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MapsVisualizer) self.statusbar.setObjectName("statusbar") MapsVisualizer.setStatusBar(self.statusbar) self.actionOpen_directory = QtWidgets.QAction(MapsVisualizer) self.actionOpen_directory.setObjectName("actionOpen_directory") self.actionQuit = QtWidgets.QAction(MapsVisualizer) self.actionQuit.setObjectName("actionQuit") self.actionSaveImage = QtWidgets.QAction(MapsVisualizer) self.actionSaveImage.setObjectName("actionSaveImage") self.actionAbout = QtWidgets.QAction(MapsVisualizer) self.actionAbout.setObjectName("actionAbout") self.actionExtra_plot_options = QtWidgets.QAction(MapsVisualizer) self.actionExtra_plot_options.setObjectName("actionExtra_plot_options") self.actionBrowse_to_current_folder = QtWidgets.QAction(MapsVisualizer) self.actionBrowse_to_current_folder.setObjectName("actionBrowse_to_current_folder") self.actionSave_settings = QtWidgets.QAction(MapsVisualizer) self.actionSave_settings.setObjectName("actionSave_settings") self.actionLoad_settings = QtWidgets.QAction(MapsVisualizer) self.actionLoad_settings.setObjectName("actionLoad_settings") self.menuFile.addAction(self.actionOpen_directory) self.menuFile.addSeparator() self.menuFile.addAction(self.actionBrowse_to_current_folder) self.menuFile.addSeparator() self.menuFile.addAction(self.actionSave_settings) self.menuFile.addAction(self.actionLoad_settings) self.menuFile.addSeparator() self.menuFile.addAction(self.actionSaveImage) self.menuFile.addSeparator() self.menuFile.addAction(self.actionQuit) self.menuAbout.addAction(self.actionAbout) self.menubar.addAction(self.menuFile.menuAction()) self.menubar.addAction(self.menuAbout.menuAction()) self.retranslateUi(MapsVisualizer) self.commandTabs.setCurrentIndex(0) self.actionQuit.triggered.connect(MapsVisualizer.close) QtCore.QMetaObject.connectSlotsByName(MapsVisualizer) def retranslateUi(self, MapsVisualizer): _translate = QtCore.QCoreApplication.translate MapsVisualizer.setWindowTitle(_translate("MapsVisualizer", "MDT Maps Visualizer")) self.commandTabs.setTabText(self.commandTabs.indexOf(self.generalOptions), _translate("MapsVisualizer", "General")) self.commandTabs.setTabText(self.commandTabs.indexOf(self.mapOptions), _translate("MapsVisualizer", "Maps")) self.commandTabs.setTabText(self.commandTabs.indexOf(self.textInfoTab), _translate("MapsVisualizer", "Textual")) self.auto_rendering.setText(_translate("MapsVisualizer", "Auto render")) self.manual_render.setToolTip(_translate("MapsVisualizer", "Manually redraw the figure")) self.manual_render.setText(_translate("MapsVisualizer", "Redraw")) self.undo_config.setToolTip(_translate("MapsVisualizer", "Undo")) self.redo_config.setToolTip(_translate("MapsVisualizer", "Redo")) self.menuFile.setTitle(_translate("MapsVisualizer", "&File")) self.menuAbout.setTitle(_translate("MapsVisualizer", "&Help")) self.actionOpen_directory.setText(_translate("MapsVisualizer", "&Open directory")) self.actionQuit.setText(_translate("MapsVisualizer", "&Quit")) self.actionQuit.setShortcut(_translate("MapsVisualizer", "Ctrl+Q")) self.actionSaveImage.setText(_translate("MapsVisualizer", "&Save image")) self.actionSaveImage.setShortcut(_translate("MapsVisualizer", "Ctrl+S")) self.actionAbout.setText(_translate("MapsVisualizer", "&About")) self.actionExtra_plot_options.setText(_translate("MapsVisualizer", "&Extra plot options")) self.actionBrowse_to_current_folder.setText(_translate("MapsVisualizer", "&Browse to current folder")) self.actionSave_settings.setText(_translate("MapsVisualizer", "&Export settings")) self.actionLoad_settings.setText(_translate("MapsVisualizer", "&Import settings")) from . import main_rc PKl>~Ia׍}F}F7mdt/gui/maps_visualizer/design/ui_MapSpecificOptions.py# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'MapSpecificOptions.ui' # # Created by: PyQt5 UI code generator 5.4.2 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MapSpecificOptions(object): def setupUi(self, MapSpecificOptions): MapSpecificOptions.setObjectName("MapSpecificOptions") MapSpecificOptions.resize(648, 564) self.gridLayout = QtWidgets.QGridLayout(MapSpecificOptions) self.gridLayout.setContentsMargins(0, 0, 0, 0) self.gridLayout.setHorizontalSpacing(0) self.gridLayout.setVerticalSpacing(10) self.gridLayout.setObjectName("gridLayout") spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.gridLayout.addItem(spacerItem, 4, 0, 1, 1) self.frame = CollapsablePanel(MapSpecificOptions) self.frame.setFrameShape(QtWidgets.QFrame.NoFrame) self.frame.setFrameShadow(QtWidgets.QFrame.Plain) self.frame.setObjectName("frame") self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.frame) self.verticalLayout_5.setContentsMargins(0, 0, 0, 0) self.verticalLayout_5.setSpacing(0) self.verticalLayout_5.setObjectName("verticalLayout_5") self.label_18 = CollapsablePanelHeader(self.frame) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.label_18.setFont(font) self.label_18.setObjectName("label_18") self.verticalLayout_5.addWidget(self.label_18) self.line_3 = QtWidgets.QFrame(self.frame) self.line_3.setFrameShape(QtWidgets.QFrame.HLine) self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_3.setObjectName("line_3") self.verticalLayout_5.addWidget(self.line_3) self.frame_4 = CollapsablePanelContent(self.frame) self.frame_4.setFrameShape(QtWidgets.QFrame.NoFrame) self.frame_4.setFrameShadow(QtWidgets.QFrame.Plain) self.frame_4.setObjectName("frame_4") self.gridLayout_3 = QtWidgets.QGridLayout(self.frame_4) self.gridLayout_3.setContentsMargins(6, 6, 0, 0) self.gridLayout_3.setHorizontalSpacing(6) self.gridLayout_3.setVerticalSpacing(3) self.gridLayout_3.setObjectName("gridLayout_3") self.label_3 = QtWidgets.QLabel(self.frame_4) self.label_3.setObjectName("label_3") self.gridLayout_3.addWidget(self.label_3, 1, 0, 1, 1) self.map_title = QtWidgets.QLineEdit(self.frame_4) self.map_title.setObjectName("map_title") self.gridLayout_3.addWidget(self.map_title, 0, 1, 1, 1) self.colormap = QtWidgets.QComboBox(self.frame_4) self.colormap.setObjectName("colormap") self.gridLayout_3.addWidget(self.colormap, 2, 1, 1, 1) self.label_9 = QtWidgets.QLabel(self.frame_4) self.label_9.setObjectName("label_9") self.gridLayout_3.addWidget(self.label_9, 2, 0, 1, 1) self.data_colorbar_label = QtWidgets.QLineEdit(self.frame_4) self.data_colorbar_label.setObjectName("data_colorbar_label") self.gridLayout_3.addWidget(self.data_colorbar_label, 1, 1, 1, 1) self.label_21 = QtWidgets.QLabel(self.frame_4) self.label_21.setObjectName("label_21") self.gridLayout_3.addWidget(self.label_21, 0, 0, 1, 1) self.verticalLayout_5.addWidget(self.frame_4) self.gridLayout.addWidget(self.frame, 0, 0, 1, 1) self.frame_3 = CollapsablePanel(MapSpecificOptions) self.frame_3.setFrameShape(QtWidgets.QFrame.NoFrame) self.frame_3.setFrameShadow(QtWidgets.QFrame.Plain) self.frame_3.setObjectName("frame_3") self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.frame_3) self.verticalLayout_7.setContentsMargins(0, 0, 0, 0) self.verticalLayout_7.setSpacing(0) self.verticalLayout_7.setObjectName("verticalLayout_7") self.label_22 = CollapsablePanelHeader(self.frame_3) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.label_22.setFont(font) self.label_22.setObjectName("label_22") self.verticalLayout_7.addWidget(self.label_22) self.line_5 = QtWidgets.QFrame(self.frame_3) self.line_5.setFrameShape(QtWidgets.QFrame.HLine) self.line_5.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_5.setObjectName("line_5") self.verticalLayout_7.addWidget(self.line_5) self.frame_6 = CollapsablePanelContent(self.frame_3) self.frame_6.setFrameShape(QtWidgets.QFrame.NoFrame) self.frame_6.setFrameShadow(QtWidgets.QFrame.Plain) self.frame_6.setObjectName("frame_6") self.gridLayout_6 = QtWidgets.QGridLayout(self.frame_6) self.gridLayout_6.setContentsMargins(6, 6, 0, 0) self.gridLayout_6.setHorizontalSpacing(6) self.gridLayout_6.setVerticalSpacing(3) self.gridLayout_6.setObjectName("gridLayout_6") self.gridLayout_7 = QtWidgets.QGridLayout() self.gridLayout_7.setContentsMargins(-1, 0, -1, 0) self.gridLayout_7.setObjectName("gridLayout_7") self.use_data_scale_max = QtWidgets.QCheckBox(self.frame_6) self.use_data_scale_max.setText("") self.use_data_scale_max.setObjectName("use_data_scale_max") self.gridLayout_7.addWidget(self.use_data_scale_max, 2, 2, 1, 1) self.label_4 = QtWidgets.QLabel(self.frame_6) self.label_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_4.setObjectName("label_4") self.gridLayout_7.addWidget(self.label_4, 3, 1, 1, 1) self.data_scale_min = QDoubleSpinBoxDotSeparator(self.frame_6) self.data_scale_min.setSpecialValueText("") self.data_scale_min.setDecimals(5) self.data_scale_min.setMinimum(-10000000000.0) self.data_scale_min.setMaximum(10000000000.0) self.data_scale_min.setSingleStep(0.01) self.data_scale_min.setObjectName("data_scale_min") self.gridLayout_7.addWidget(self.data_scale_min, 0, 1, 1, 1) self.data_scale_max = QDoubleSpinBoxDotSeparator(self.frame_6) self.data_scale_max.setDecimals(5) self.data_scale_max.setMinimum(-10000000000.0) self.data_scale_max.setMaximum(10000000000.0) self.data_scale_max.setSingleStep(0.01) self.data_scale_max.setObjectName("data_scale_max") self.gridLayout_7.addWidget(self.data_scale_max, 2, 1, 1, 1) self.use_data_scale_min = QtWidgets.QCheckBox(self.frame_6) self.use_data_scale_min.setText("") self.use_data_scale_min.setObjectName("use_data_scale_min") self.gridLayout_7.addWidget(self.use_data_scale_min, 0, 2, 1, 1) self.data_set_use_scale = QtWidgets.QCheckBox(self.frame_6) self.data_set_use_scale.setText("") self.data_set_use_scale.setObjectName("data_set_use_scale") self.gridLayout_7.addWidget(self.data_set_use_scale, 3, 2, 1, 1) self.label = QtWidgets.QLabel(self.frame_6) self.label.setObjectName("label") self.gridLayout_7.addWidget(self.label, 0, 0, 1, 1) self.label_5 = QtWidgets.QLabel(self.frame_6) self.label_5.setObjectName("label_5") self.gridLayout_7.addWidget(self.label_5, 2, 0, 1, 1) self.gridLayout_7.setColumnStretch(1, 1) self.gridLayout_6.addLayout(self.gridLayout_7, 0, 0, 1, 1) self.verticalLayout_7.addWidget(self.frame_6) self.gridLayout.addWidget(self.frame_3, 1, 0, 1, 1) self.frame_2 = CollapsablePanel(MapSpecificOptions) self.frame_2.setFrameShape(QtWidgets.QFrame.NoFrame) self.frame_2.setFrameShadow(QtWidgets.QFrame.Plain) self.frame_2.setObjectName("frame_2") self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.frame_2) self.verticalLayout_6.setContentsMargins(0, 0, 0, 0) self.verticalLayout_6.setSpacing(0) self.verticalLayout_6.setObjectName("verticalLayout_6") self.label_20 = CollapsablePanelHeader(self.frame_2) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.label_20.setFont(font) self.label_20.setObjectName("label_20") self.verticalLayout_6.addWidget(self.label_20) self.line_4 = QtWidgets.QFrame(self.frame_2) self.line_4.setFrameShape(QtWidgets.QFrame.HLine) self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_4.setObjectName("line_4") self.verticalLayout_6.addWidget(self.line_4) self.frame_5 = CollapsablePanelContent(self.frame_2) self.frame_5.setFrameShape(QtWidgets.QFrame.NoFrame) self.frame_5.setFrameShadow(QtWidgets.QFrame.Plain) self.frame_5.setObjectName("frame_5") self.gridLayout_4 = QtWidgets.QGridLayout(self.frame_5) self.gridLayout_4.setContentsMargins(6, 6, 0, 0) self.gridLayout_4.setHorizontalSpacing(6) self.gridLayout_4.setVerticalSpacing(3) self.gridLayout_4.setObjectName("gridLayout_4") self.label_26 = QtWidgets.QLabel(self.frame_5) self.label_26.setObjectName("label_26") self.gridLayout_4.addWidget(self.label_26, 3, 0, 1, 1) self.info_file_location = QtWidgets.QLabel(self.frame_5) self.info_file_location.setWordWrap(True) self.info_file_location.setObjectName("info_file_location") self.gridLayout_4.addWidget(self.info_file_location, 0, 1, 1, 1) self.label_11 = QtWidgets.QLabel(self.frame_5) self.label_11.setObjectName("label_11") self.gridLayout_4.addWidget(self.label_11, 2, 0, 1, 1) self.label_24 = QtWidgets.QLabel(self.frame_5) self.label_24.setObjectName("label_24") self.gridLayout_4.addWidget(self.label_24, 0, 0, 1, 1) self.info_maximum = QtWidgets.QLabel(self.frame_5) self.info_maximum.setObjectName("info_maximum") self.gridLayout_4.addWidget(self.info_maximum, 2, 1, 1, 1) self.info_minimum = QtWidgets.QLabel(self.frame_5) self.info_minimum.setObjectName("info_minimum") self.gridLayout_4.addWidget(self.info_minimum, 3, 1, 1, 1) self.label_6 = QtWidgets.QLabel(self.frame_5) self.label_6.setObjectName("label_6") self.gridLayout_4.addWidget(self.label_6, 1, 0, 1, 1) self.info_shape = QtWidgets.QLabel(self.frame_5) self.info_shape.setObjectName("info_shape") self.gridLayout_4.addWidget(self.info_shape, 1, 1, 1, 1) self.gridLayout_4.setColumnStretch(1, 1) self.verticalLayout_6.addWidget(self.frame_5) self.gridLayout.addWidget(self.frame_2, 3, 0, 1, 1) self.info_Clipping = CollapsablePanel(MapSpecificOptions) self.info_Clipping.setFrameShape(QtWidgets.QFrame.NoFrame) self.info_Clipping.setFrameShadow(QtWidgets.QFrame.Plain) self.info_Clipping.setObjectName("info_Clipping") self.verticalLayout_8 = QtWidgets.QVBoxLayout(self.info_Clipping) self.verticalLayout_8.setContentsMargins(0, 0, 0, 0) self.verticalLayout_8.setSpacing(0) self.verticalLayout_8.setObjectName("verticalLayout_8") self.label_25 = CollapsablePanelHeader(self.info_Clipping) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.label_25.setFont(font) self.label_25.setObjectName("label_25") self.verticalLayout_8.addWidget(self.label_25) self.line_6 = QtWidgets.QFrame(self.info_Clipping) self.line_6.setFrameShape(QtWidgets.QFrame.HLine) self.line_6.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_6.setObjectName("line_6") self.verticalLayout_8.addWidget(self.line_6) self.frame_8 = CollapsablePanelContent(self.info_Clipping) self.frame_8.setFrameShape(QtWidgets.QFrame.NoFrame) self.frame_8.setFrameShadow(QtWidgets.QFrame.Plain) self.frame_8.setObjectName("frame_8") self.gridLayout_8 = QtWidgets.QGridLayout(self.frame_8) self.gridLayout_8.setContentsMargins(6, 6, 0, 0) self.gridLayout_8.setHorizontalSpacing(6) self.gridLayout_8.setVerticalSpacing(3) self.gridLayout_8.setObjectName("gridLayout_8") self.gridLayout_9 = QtWidgets.QGridLayout() self.gridLayout_9.setContentsMargins(-1, 0, -1, 0) self.gridLayout_9.setObjectName("gridLayout_9") self.label_7 = QtWidgets.QLabel(self.frame_8) self.label_7.setObjectName("label_7") self.gridLayout_9.addWidget(self.label_7, 0, 0, 1, 1) self.data_set_use_clipping = QtWidgets.QCheckBox(self.frame_8) self.data_set_use_clipping.setText("") self.data_set_use_clipping.setObjectName("data_set_use_clipping") self.gridLayout_9.addWidget(self.data_set_use_clipping, 2, 2, 1, 1) self.use_data_clipping_min = QtWidgets.QCheckBox(self.frame_8) self.use_data_clipping_min.setText("") self.use_data_clipping_min.setObjectName("use_data_clipping_min") self.gridLayout_9.addWidget(self.use_data_clipping_min, 0, 2, 1, 1) self.label_2 = QtWidgets.QLabel(self.frame_8) self.label_2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_2.setObjectName("label_2") self.gridLayout_9.addWidget(self.label_2, 2, 1, 1, 1) self.data_clipping_min = QDoubleSpinBoxDotSeparator(self.frame_8) self.data_clipping_min.setDecimals(5) self.data_clipping_min.setMinimum(-9999999999.99) self.data_clipping_min.setMaximum(9999999999.99) self.data_clipping_min.setSingleStep(0.01) self.data_clipping_min.setObjectName("data_clipping_min") self.gridLayout_9.addWidget(self.data_clipping_min, 0, 1, 1, 1) self.data_clipping_max = QDoubleSpinBoxDotSeparator(self.frame_8) self.data_clipping_max.setDecimals(5) self.data_clipping_max.setMinimum(-9999999999.99) self.data_clipping_max.setMaximum(9999999999.99) self.data_clipping_max.setSingleStep(0.01) self.data_clipping_max.setObjectName("data_clipping_max") self.gridLayout_9.addWidget(self.data_clipping_max, 1, 1, 1, 1) self.use_data_clipping_max = QtWidgets.QCheckBox(self.frame_8) self.use_data_clipping_max.setText("") self.use_data_clipping_max.setObjectName("use_data_clipping_max") self.gridLayout_9.addWidget(self.use_data_clipping_max, 1, 2, 1, 1) self.label_8 = QtWidgets.QLabel(self.frame_8) self.label_8.setObjectName("label_8") self.gridLayout_9.addWidget(self.label_8, 1, 0, 1, 1) self.gridLayout_9.setColumnStretch(1, 1) self.gridLayout_8.addLayout(self.gridLayout_9, 0, 0, 1, 1) self.verticalLayout_8.addWidget(self.frame_8) self.gridLayout.addWidget(self.info_Clipping, 2, 0, 1, 1) self.retranslateUi(MapSpecificOptions) QtCore.QMetaObject.connectSlotsByName(MapSpecificOptions) MapSpecificOptions.setTabOrder(self.map_title, self.data_colorbar_label) MapSpecificOptions.setTabOrder(self.data_colorbar_label, self.colormap) MapSpecificOptions.setTabOrder(self.colormap, self.data_scale_min) MapSpecificOptions.setTabOrder(self.data_scale_min, self.data_scale_max) MapSpecificOptions.setTabOrder(self.data_scale_max, self.use_data_scale_min) MapSpecificOptions.setTabOrder(self.use_data_scale_min, self.use_data_scale_max) MapSpecificOptions.setTabOrder(self.use_data_scale_max, self.data_set_use_scale) MapSpecificOptions.setTabOrder(self.data_set_use_scale, self.data_clipping_min) MapSpecificOptions.setTabOrder(self.data_clipping_min, self.data_clipping_max) MapSpecificOptions.setTabOrder(self.data_clipping_max, self.use_data_clipping_min) MapSpecificOptions.setTabOrder(self.use_data_clipping_min, self.use_data_clipping_max) MapSpecificOptions.setTabOrder(self.use_data_clipping_max, self.data_set_use_clipping) def retranslateUi(self, MapSpecificOptions): _translate = QtCore.QCoreApplication.translate MapSpecificOptions.setWindowTitle(_translate("MapSpecificOptions", "Form")) self.label_18.setText(_translate("MapSpecificOptions", "General")) self.label_3.setText(_translate("MapSpecificOptions", "Colorbar label:")) self.label_9.setText(_translate("MapSpecificOptions", "Colormap:")) self.label_21.setText(_translate("MapSpecificOptions", "Title (latex):")) self.label_22.setText(_translate("MapSpecificOptions", "Scale")) self.label_4.setText(_translate("MapSpecificOptions", "Enable:")) self.label.setText(_translate("MapSpecificOptions", "Min:")) self.label_5.setText(_translate("MapSpecificOptions", "Max:")) self.label_20.setText(_translate("MapSpecificOptions", "Info")) self.label_26.setText(_translate("MapSpecificOptions", "Minimum:")) self.info_file_location.setText(_translate("MapSpecificOptions", "TextLabel")) self.label_11.setText(_translate("MapSpecificOptions", "Maximum:")) self.label_24.setText(_translate("MapSpecificOptions", "Filename:")) self.info_maximum.setText(_translate("MapSpecificOptions", "TextLabel")) self.info_minimum.setText(_translate("MapSpecificOptions", "TextLabel")) self.label_6.setText(_translate("MapSpecificOptions", "Shape:")) self.info_shape.setText(_translate("MapSpecificOptions", "TextLabel")) self.label_25.setText(_translate("MapSpecificOptions", "Clipping")) self.label_7.setText(_translate("MapSpecificOptions", "Min:")) self.label_2.setText(_translate("MapSpecificOptions", "Enable:")) self.label_8.setText(_translate("MapSpecificOptions", "Max:")) from ..widgets import CollapsablePanel, CollapsablePanelContent, CollapsablePanelHeader, QDoubleSpinBoxDotSeparator PKl>~Iuu/mdt/gui/maps_visualizer/design/ui_TabGeneral.py# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'TabGeneral.ui' # # Created by: PyQt5 UI code generator 5.4.2 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_TabGeneral(object): def setupUi(self, TabGeneral): TabGeneral.setObjectName("TabGeneral") TabGeneral.resize(963, 704) self.gridLayout = QtWidgets.QGridLayout(TabGeneral) self.gridLayout.setContentsMargins(0, 0, 0, 0) self.gridLayout.setSpacing(0) self.gridLayout.setObjectName("gridLayout") self.scrollArea = QtWidgets.QScrollArea(TabGeneral) self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) self.scrollArea.setWidgetResizable(True) self.scrollArea.setObjectName("scrollArea") self.scrollAreaWidgetContents = QtWidgets.QWidget() self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, -479, 946, 1211)) self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents") self.gridLayout_6 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents) self.gridLayout_6.setContentsMargins(6, 6, 6, 6) self.gridLayout_6.setHorizontalSpacing(0) self.gridLayout_6.setVerticalSpacing(10) self.gridLayout_6.setObjectName("gridLayout_6") self.frame = CollapsablePanel(self.scrollAreaWidgetContents) self.frame.setFrameShape(QtWidgets.QFrame.NoFrame) self.frame.setFrameShadow(QtWidgets.QFrame.Plain) self.frame.setObjectName("frame") self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.frame) self.verticalLayout_4.setContentsMargins(0, 0, 0, 0) self.verticalLayout_4.setSpacing(0) self.verticalLayout_4.setObjectName("verticalLayout_4") self.label_13 = CollapsablePanelHeader(self.frame) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.label_13.setFont(font) self.label_13.setObjectName("label_13") self.verticalLayout_4.addWidget(self.label_13) self.line_2 = QtWidgets.QFrame(self.frame) self.line_2.setFrameShape(QtWidgets.QFrame.HLine) self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_2.setObjectName("line_2") self.verticalLayout_4.addWidget(self.line_2) self.frame_3 = CollapsablePanelContent(self.frame) self.frame_3.setFrameShape(QtWidgets.QFrame.NoFrame) self.frame_3.setFrameShadow(QtWidgets.QFrame.Plain) self.frame_3.setObjectName("frame_3") self.gridLayout_2 = QtWidgets.QGridLayout(self.frame_3) self.gridLayout_2.setContentsMargins(6, 6, 0, 0) self.gridLayout_2.setHorizontalSpacing(6) self.gridLayout_2.setVerticalSpacing(3) self.gridLayout_2.setObjectName("gridLayout_2") self.horizontalLayout_7 = QtWidgets.QHBoxLayout() self.horizontalLayout_7.setContentsMargins(3, -1, -1, -1) self.horizontalLayout_7.setObjectName("horizontalLayout_7") self.label_10 = QtWidgets.QLabel(self.frame_3) self.label_10.setObjectName("label_10") self.horizontalLayout_7.addWidget(self.label_10) self.maximumDimension = QtWidgets.QLabel(self.frame_3) self.maximumDimension.setObjectName("maximumDimension") self.horizontalLayout_7.addWidget(self.maximumDimension) self.gridLayout_2.addLayout(self.horizontalLayout_7, 0, 2, 1, 1) self.horizontalLayout_6 = QtWidgets.QHBoxLayout() self.horizontalLayout_6.setContentsMargins(3, -1, -1, -1) self.horizontalLayout_6.setObjectName("horizontalLayout_6") self.label_9 = QtWidgets.QLabel(self.frame_3) self.label_9.setObjectName("label_9") self.horizontalLayout_6.addWidget(self.label_9) self.maximumIndex = QtWidgets.QLabel(self.frame_3) self.maximumIndex.setObjectName("maximumIndex") self.horizontalLayout_6.addWidget(self.maximumIndex) self.gridLayout_2.addLayout(self.horizontalLayout_6, 1, 2, 1, 1) self.label_14 = QtWidgets.QLabel(self.frame_3) self.label_14.setObjectName("label_14") self.gridLayout_2.addWidget(self.label_14, 0, 0, 1, 1) self.general_dimension = QtWidgets.QSpinBox(self.frame_3) self.general_dimension.setObjectName("general_dimension") self.gridLayout_2.addWidget(self.general_dimension, 0, 1, 1, 1) self.label_16 = QtWidgets.QLabel(self.frame_3) self.label_16.setObjectName("label_16") self.gridLayout_2.addWidget(self.label_16, 1, 0, 1, 1) self.general_slice_index = QtWidgets.QSpinBox(self.frame_3) self.general_slice_index.setObjectName("general_slice_index") self.gridLayout_2.addWidget(self.general_slice_index, 1, 1, 1, 1) self.label_15 = QtWidgets.QLabel(self.frame_3) self.label_15.setObjectName("label_15") self.gridLayout_2.addWidget(self.label_15, 2, 0, 1, 1) self.general_volume_index = QtWidgets.QSpinBox(self.frame_3) self.general_volume_index.setObjectName("general_volume_index") self.gridLayout_2.addWidget(self.general_volume_index, 2, 1, 1, 1) self.horizontalLayout_10 = QtWidgets.QHBoxLayout() self.horizontalLayout_10.setContentsMargins(3, -1, -1, -1) self.horizontalLayout_10.setObjectName("horizontalLayout_10") self.label_25 = QtWidgets.QLabel(self.frame_3) self.label_25.setObjectName("label_25") self.horizontalLayout_10.addWidget(self.label_25) self.maximumVolume = QtWidgets.QLabel(self.frame_3) self.maximumVolume.setObjectName("maximumVolume") self.horizontalLayout_10.addWidget(self.maximumVolume) self.gridLayout_2.addLayout(self.horizontalLayout_10, 2, 2, 1, 1) self.gridLayout_2.setColumnStretch(1, 1) self.verticalLayout_4.addWidget(self.frame_3) self.gridLayout_6.addWidget(self.frame, 0, 0, 1, 1) self.general_Miscellaneous = CollapsablePanel(self.scrollAreaWidgetContents) self.general_Miscellaneous.setFrameShape(QtWidgets.QFrame.NoFrame) self.general_Miscellaneous.setFrameShadow(QtWidgets.QFrame.Plain) self.general_Miscellaneous.setObjectName("general_Miscellaneous") self.verticalLayout_8 = QtWidgets.QVBoxLayout(self.general_Miscellaneous) self.verticalLayout_8.setContentsMargins(0, 0, 0, 0) self.verticalLayout_8.setSpacing(0) self.verticalLayout_8.setObjectName("verticalLayout_8") self.label_23 = CollapsablePanelHeader(self.general_Miscellaneous) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.label_23.setFont(font) self.label_23.setObjectName("label_23") self.verticalLayout_8.addWidget(self.label_23) self.line_6 = QtWidgets.QFrame(self.general_Miscellaneous) self.line_6.setFrameShape(QtWidgets.QFrame.HLine) self.line_6.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_6.setObjectName("line_6") self.verticalLayout_8.addWidget(self.line_6) self.frame_10 = CollapsablePanelContent(self.general_Miscellaneous) self.frame_10.setFrameShape(QtWidgets.QFrame.NoFrame) self.frame_10.setFrameShadow(QtWidgets.QFrame.Plain) self.frame_10.setObjectName("frame_10") self.formLayout = QtWidgets.QFormLayout(self.frame_10) self.formLayout.setContentsMargins(6, 6, 0, 0) self.formLayout.setHorizontalSpacing(6) self.formLayout.setVerticalSpacing(3) self.formLayout.setObjectName("formLayout") self.label = QtWidgets.QLabel(self.frame_10) self.label.setObjectName("label") self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label) self.general_colormap = QtWidgets.QComboBox(self.frame_10) self.general_colormap.setObjectName("general_colormap") self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.general_colormap) self.label_2 = QtWidgets.QLabel(self.frame_10) self.label_2.setObjectName("label_2") self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_2) self.general_rotate = QtWidgets.QComboBox(self.frame_10) self.general_rotate.setObjectName("general_rotate") self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.general_rotate) self.label_4 = QtWidgets.QLabel(self.frame_10) self.label_4.setObjectName("label_4") self.formLayout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label_4) self.general_show_axis = QtWidgets.QCheckBox(self.frame_10) self.general_show_axis.setText("") self.general_show_axis.setObjectName("general_show_axis") self.formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.general_show_axis) self.label_5 = QtWidgets.QLabel(self.frame_10) self.label_5.setObjectName("label_5") self.formLayout.setWidget(5, QtWidgets.QFormLayout.LabelRole, self.label_5) self.general_colorbar_nmr_ticks = QtWidgets.QSpinBox(self.frame_10) self.general_colorbar_nmr_ticks.setMinimum(2) self.general_colorbar_nmr_ticks.setProperty("value", 10) self.general_colorbar_nmr_ticks.setObjectName("general_colorbar_nmr_ticks") self.formLayout.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.general_colorbar_nmr_ticks) self.label_11 = QtWidgets.QLabel(self.frame_10) self.label_11.setObjectName("label_11") self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_11) self.general_flipud = QtWidgets.QCheckBox(self.frame_10) self.general_flipud.setText("") self.general_flipud.setObjectName("general_flipud") self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.general_flipud) self.label_27 = QtWidgets.QLabel(self.frame_10) self.label_27.setObjectName("label_27") self.formLayout.setWidget(6, QtWidgets.QFormLayout.LabelRole, self.label_27) self.general_interpolation = QtWidgets.QComboBox(self.frame_10) self.general_interpolation.setObjectName("general_interpolation") self.formLayout.setWidget(6, QtWidgets.QFormLayout.FieldRole, self.general_interpolation) self.label_28 = QtWidgets.QLabel(self.frame_10) self.label_28.setObjectName("label_28") self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_28) self.plot_title = QtWidgets.QLineEdit(self.frame_10) self.plot_title.setObjectName("plot_title") self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.plot_title) self.label_29 = QtWidgets.QLabel(self.frame_10) self.label_29.setObjectName("label_29") self.formLayout.setWidget(7, QtWidgets.QFormLayout.LabelRole, self.label_29) self.mask_name = QtWidgets.QComboBox(self.frame_10) self.mask_name.setObjectName("mask_name") self.formLayout.setWidget(7, QtWidgets.QFormLayout.FieldRole, self.mask_name) self.verticalLayout_8.addWidget(self.frame_10) self.gridLayout_6.addWidget(self.general_Miscellaneous, 7, 0, 1, 1) self.general_DisplayOrder = CollapsablePanel(self.scrollAreaWidgetContents) self.general_DisplayOrder.setFrameShape(QtWidgets.QFrame.NoFrame) self.general_DisplayOrder.setFrameShadow(QtWidgets.QFrame.Plain) self.general_DisplayOrder.setObjectName("general_DisplayOrder") self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.general_DisplayOrder) self.verticalLayout_7.setContentsMargins(0, 0, 0, 0) self.verticalLayout_7.setSpacing(0) self.verticalLayout_7.setObjectName("verticalLayout_7") self.label_22 = CollapsablePanelHeader(self.general_DisplayOrder) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.label_22.setFont(font) self.label_22.setObjectName("label_22") self.verticalLayout_7.addWidget(self.label_22) self.line_5 = QtWidgets.QFrame(self.general_DisplayOrder) self.line_5.setFrameShape(QtWidgets.QFrame.HLine) self.line_5.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_5.setObjectName("line_5") self.verticalLayout_7.addWidget(self.line_5) self.frame_9 = CollapsablePanelContent(self.general_DisplayOrder) self.frame_9.setFrameShape(QtWidgets.QFrame.NoFrame) self.frame_9.setFrameShadow(QtWidgets.QFrame.Plain) self.frame_9.setObjectName("frame_9") self.gridLayout_11 = QtWidgets.QGridLayout(self.frame_9) self.gridLayout_11.setContentsMargins(6, 6, 0, 0) self.gridLayout_11.setHorizontalSpacing(6) self.gridLayout_11.setVerticalSpacing(3) self.gridLayout_11.setObjectName("gridLayout_11") self.general_display_order = MapsReorderer(self.frame_9) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(1) sizePolicy.setHeightForWidth(self.general_display_order.sizePolicy().hasHeightForWidth()) self.general_display_order.setSizePolicy(sizePolicy) self.general_display_order.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection) self.general_display_order.setObjectName("general_display_order") self.gridLayout_11.addWidget(self.general_display_order, 0, 0, 1, 1) self.verticalLayout_7.addWidget(self.frame_9) self.gridLayout_6.addWidget(self.general_DisplayOrder, 4, 0, 1, 1) self.frame_6 = CollapsablePanel(self.scrollAreaWidgetContents) self.frame_6.setFrameShape(QtWidgets.QFrame.NoFrame) self.frame_6.setFrameShadow(QtWidgets.QFrame.Plain) self.frame_6.setObjectName("frame_6") self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.frame_6) self.verticalLayout_6.setContentsMargins(0, 0, 0, 0) self.verticalLayout_6.setSpacing(0) self.verticalLayout_6.setObjectName("verticalLayout_6") self.label_21 = CollapsablePanelHeader(self.frame_6) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.label_21.setFont(font) self.label_21.setObjectName("label_21") self.verticalLayout_6.addWidget(self.label_21) self.line_4 = QtWidgets.QFrame(self.frame_6) self.line_4.setFrameShape(QtWidgets.QFrame.HLine) self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_4.setObjectName("line_4") self.verticalLayout_6.addWidget(self.line_4) self.frame_7 = CollapsablePanelContent(self.frame_6) self.frame_7.setFrameShape(QtWidgets.QFrame.NoFrame) self.frame_7.setFrameShadow(QtWidgets.QFrame.Plain) self.frame_7.setObjectName("frame_7") self.gridLayout_5 = QtWidgets.QGridLayout(self.frame_7) self.gridLayout_5.setContentsMargins(6, 6, 0, 0) self.gridLayout_5.setHorizontalSpacing(6) self.gridLayout_5.setVerticalSpacing(3) self.gridLayout_5.setObjectName("gridLayout_5") self.general_map_selection = QtWidgets.QListWidget(self.frame_7) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(1) sizePolicy.setHeightForWidth(self.general_map_selection.sizePolicy().hasHeightForWidth()) self.general_map_selection.setSizePolicy(sizePolicy) self.general_map_selection.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection) self.general_map_selection.setObjectName("general_map_selection") self.gridLayout_5.addWidget(self.general_map_selection, 0, 0, 1, 1) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setContentsMargins(-1, -1, -1, 0) self.horizontalLayout.setObjectName("horizontalLayout") self.general_deselect_all_maps = QtWidgets.QPushButton(self.frame_7) self.general_deselect_all_maps.setObjectName("general_deselect_all_maps") self.horizontalLayout.addWidget(self.general_deselect_all_maps) self.general_invert_map_selection = QtWidgets.QPushButton(self.frame_7) self.general_invert_map_selection.setObjectName("general_invert_map_selection") self.horizontalLayout.addWidget(self.general_invert_map_selection) self.gridLayout_5.addLayout(self.horizontalLayout, 1, 0, 1, 1) self.verticalLayout_6.addWidget(self.frame_7) self.gridLayout_6.addWidget(self.frame_6, 2, 0, 1, 1) self.general_Info = CollapsablePanel(self.scrollAreaWidgetContents) self.general_Info.setFrameShape(QtWidgets.QFrame.NoFrame) self.general_Info.setFrameShadow(QtWidgets.QFrame.Plain) self.general_Info.setObjectName("general_Info") self.verticalLayout_9 = QtWidgets.QVBoxLayout(self.general_Info) self.verticalLayout_9.setContentsMargins(0, 0, 0, 0) self.verticalLayout_9.setSpacing(0) self.verticalLayout_9.setObjectName("verticalLayout_9") self.label_24 = CollapsablePanelHeader(self.general_Info) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.label_24.setFont(font) self.label_24.setObjectName("label_24") self.verticalLayout_9.addWidget(self.label_24) self.line_7 = QtWidgets.QFrame(self.general_Info) self.line_7.setFrameShape(QtWidgets.QFrame.HLine) self.line_7.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_7.setObjectName("line_7") self.verticalLayout_9.addWidget(self.line_7) self.frame_8 = CollapsablePanelContent(self.general_Info) self.frame_8.setFrameShape(QtWidgets.QFrame.NoFrame) self.frame_8.setFrameShadow(QtWidgets.QFrame.Plain) self.frame_8.setObjectName("frame_8") self.gridLayout_12 = QtWidgets.QGridLayout(self.frame_8) self.gridLayout_12.setContentsMargins(6, 6, 0, 0) self.gridLayout_12.setHorizontalSpacing(6) self.gridLayout_12.setVerticalSpacing(3) self.gridLayout_12.setObjectName("gridLayout_12") self.label_3 = QtWidgets.QLabel(self.frame_8) self.label_3.setObjectName("label_3") self.gridLayout_12.addWidget(self.label_3, 1, 0, 1, 1) self.label_7 = QtWidgets.QLabel(self.frame_8) self.label_7.setObjectName("label_7") self.gridLayout_12.addWidget(self.label_7, 2, 0, 1, 1) self.general_info_nmr_maps = QtWidgets.QLabel(self.frame_8) self.general_info_nmr_maps.setObjectName("general_info_nmr_maps") self.gridLayout_12.addWidget(self.general_info_nmr_maps, 2, 1, 1, 1) self.general_info_directory = QtWidgets.QLabel(self.frame_8) self.general_info_directory.setWordWrap(True) self.general_info_directory.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse) self.general_info_directory.setObjectName("general_info_directory") self.gridLayout_12.addWidget(self.general_info_directory, 1, 1, 1, 1) self.gridLayout_12.setColumnStretch(1, 1) self.verticalLayout_9.addWidget(self.frame_8) self.gridLayout_6.addWidget(self.general_Info, 9, 0, 1, 1) spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.gridLayout_6.addItem(spacerItem, 10, 0, 1, 1) self.general_Font = CollapsablePanel(self.scrollAreaWidgetContents) self.general_Font.setFrameShape(QtWidgets.QFrame.NoFrame) self.general_Font.setFrameShadow(QtWidgets.QFrame.Plain) self.general_Font.setObjectName("general_Font") self.verticalLayout_10 = QtWidgets.QVBoxLayout(self.general_Font) self.verticalLayout_10.setContentsMargins(0, 0, 0, 0) self.verticalLayout_10.setSpacing(0) self.verticalLayout_10.setObjectName("verticalLayout_10") self.label_26 = CollapsablePanelHeader(self.general_Font) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.label_26.setFont(font) self.label_26.setObjectName("label_26") self.verticalLayout_10.addWidget(self.label_26) self.line_8 = QtWidgets.QFrame(self.general_Font) self.line_8.setFrameShape(QtWidgets.QFrame.HLine) self.line_8.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_8.setObjectName("line_8") self.verticalLayout_10.addWidget(self.line_8) self.frame_11 = CollapsablePanelContent(self.general_Font) self.frame_11.setFrameShape(QtWidgets.QFrame.NoFrame) self.frame_11.setFrameShadow(QtWidgets.QFrame.Plain) self.frame_11.setObjectName("frame_11") self.formLayout_2 = QtWidgets.QFormLayout(self.frame_11) self.formLayout_2.setContentsMargins(6, 6, 0, 0) self.formLayout_2.setHorizontalSpacing(6) self.formLayout_2.setVerticalSpacing(3) self.formLayout_2.setObjectName("formLayout_2") self.label_6 = QtWidgets.QLabel(self.frame_11) self.label_6.setObjectName("label_6") self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_6) self.general_font_family = QtWidgets.QComboBox(self.frame_11) self.general_font_family.setObjectName("general_font_family") self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.general_font_family) self.label_8 = QtWidgets.QLabel(self.frame_11) self.label_8.setObjectName("label_8") self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_8) self.general_font_size = QtWidgets.QSpinBox(self.frame_11) self.general_font_size.setMinimum(1) self.general_font_size.setObjectName("general_font_size") self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.general_font_size) self.verticalLayout_10.addWidget(self.frame_11) self.gridLayout_6.addWidget(self.general_Font, 6, 0, 1, 1) self.general_Zoom = CollapsablePanel(self.scrollAreaWidgetContents) self.general_Zoom.setFrameShape(QtWidgets.QFrame.NoFrame) self.general_Zoom.setFrameShadow(QtWidgets.QFrame.Plain) self.general_Zoom.setObjectName("general_Zoom") self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.general_Zoom) self.verticalLayout_5.setContentsMargins(0, 0, 0, 0) self.verticalLayout_5.setSpacing(0) self.verticalLayout_5.setObjectName("verticalLayout_5") self.label_17 = CollapsablePanelHeader(self.general_Zoom) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.label_17.setFont(font) self.label_17.setObjectName("label_17") self.verticalLayout_5.addWidget(self.label_17) self.line_3 = QtWidgets.QFrame(self.general_Zoom) self.line_3.setFrameShape(QtWidgets.QFrame.HLine) self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_3.setObjectName("line_3") self.verticalLayout_5.addWidget(self.line_3) self.frame_5 = CollapsablePanelContent(self.general_Zoom) self.frame_5.setFrameShape(QtWidgets.QFrame.NoFrame) self.frame_5.setFrameShadow(QtWidgets.QFrame.Plain) self.frame_5.setObjectName("frame_5") self.gridLayout_10 = QtWidgets.QGridLayout(self.frame_5) self.gridLayout_10.setContentsMargins(6, 6, 0, 0) self.gridLayout_10.setHorizontalSpacing(6) self.gridLayout_10.setVerticalSpacing(3) self.gridLayout_10.setObjectName("gridLayout_10") self.general_zoom_x_1 = QtWidgets.QSpinBox(self.frame_5) self.general_zoom_x_1.setObjectName("general_zoom_x_1") self.gridLayout_10.addWidget(self.general_zoom_x_1, 1, 3, 1, 1) self.label_19 = QtWidgets.QLabel(self.frame_5) self.label_19.setObjectName("label_19") self.gridLayout_10.addWidget(self.label_19, 7, 0, 1, 1) self.label_20 = QtWidgets.QLabel(self.frame_5) self.label_20.setObjectName("label_20") self.gridLayout_10.addWidget(self.label_20, 1, 0, 1, 1) self.general_zoom_x_0 = QtWidgets.QSpinBox(self.frame_5) self.general_zoom_x_0.setObjectName("general_zoom_x_0") self.gridLayout_10.addWidget(self.general_zoom_x_0, 1, 1, 1, 1) self.general_zoom_y_1 = QtWidgets.QSpinBox(self.frame_5) self.general_zoom_y_1.setObjectName("general_zoom_y_1") self.gridLayout_10.addWidget(self.general_zoom_y_1, 7, 3, 1, 1) self.label_18 = QtWidgets.QLabel(self.frame_5) self.label_18.setObjectName("label_18") self.gridLayout_10.addWidget(self.label_18, 7, 2, 1, 1) self.general_zoom_y_0 = QtWidgets.QSpinBox(self.frame_5) self.general_zoom_y_0.setObjectName("general_zoom_y_0") self.gridLayout_10.addWidget(self.general_zoom_y_0, 7, 1, 1, 1) self.label_12 = QtWidgets.QLabel(self.frame_5) self.label_12.setObjectName("label_12") self.gridLayout_10.addWidget(self.label_12, 1, 2, 1, 1) self.general_zoom_fit = QtWidgets.QPushButton(self.frame_5) self.general_zoom_fit.setObjectName("general_zoom_fit") self.gridLayout_10.addWidget(self.general_zoom_fit, 8, 1, 1, 1) self.general_zoom_reset = QtWidgets.QPushButton(self.frame_5) self.general_zoom_reset.setObjectName("general_zoom_reset") self.gridLayout_10.addWidget(self.general_zoom_reset, 8, 3, 1, 1) self.gridLayout_10.setColumnStretch(1, 1) self.gridLayout_10.setColumnStretch(3, 1) self.verticalLayout_5.addWidget(self.frame_5) self.gridLayout_6.addWidget(self.general_Zoom, 5, 0, 1, 1) self.scrollArea.setWidget(self.scrollAreaWidgetContents) self.gridLayout.addWidget(self.scrollArea, 0, 0, 1, 1) self.retranslateUi(TabGeneral) QtCore.QMetaObject.connectSlotsByName(TabGeneral) TabGeneral.setTabOrder(self.scrollArea, self.general_dimension) TabGeneral.setTabOrder(self.general_dimension, self.general_slice_index) TabGeneral.setTabOrder(self.general_slice_index, self.general_volume_index) TabGeneral.setTabOrder(self.general_volume_index, self.general_zoom_x_0) TabGeneral.setTabOrder(self.general_zoom_x_0, self.general_zoom_y_0) TabGeneral.setTabOrder(self.general_zoom_y_0, self.general_zoom_x_1) TabGeneral.setTabOrder(self.general_zoom_x_1, self.general_zoom_y_1) TabGeneral.setTabOrder(self.general_zoom_y_1, self.general_map_selection) TabGeneral.setTabOrder(self.general_map_selection, self.general_display_order) TabGeneral.setTabOrder(self.general_display_order, self.general_colormap) TabGeneral.setTabOrder(self.general_colormap, self.general_rotate) TabGeneral.setTabOrder(self.general_rotate, self.general_show_axis) TabGeneral.setTabOrder(self.general_show_axis, self.general_colorbar_nmr_ticks) def retranslateUi(self, TabGeneral): _translate = QtCore.QCoreApplication.translate TabGeneral.setWindowTitle(_translate("TabGeneral", "Form")) self.label_13.setText(_translate("TabGeneral", "Index")) self.label_10.setText(_translate("TabGeneral", "/ ")) self.maximumDimension.setText(_translate("TabGeneral", "x")) self.label_9.setText(_translate("TabGeneral", "/ ")) self.maximumIndex.setText(_translate("TabGeneral", "x")) self.label_14.setText(_translate("TabGeneral", "Dimension:")) self.label_16.setText(_translate("TabGeneral", "Slice index:")) self.label_15.setText(_translate("TabGeneral", "Volume:")) self.label_25.setText(_translate("TabGeneral", "/ ")) self.maximumVolume.setText(_translate("TabGeneral", "x")) self.label_23.setText(_translate("TabGeneral", "Miscellaneous")) self.label.setText(_translate("TabGeneral", "Colormap:")) self.label_2.setText(_translate("TabGeneral", "Rotate:")) self.label_4.setText(_translate("TabGeneral", "Show axis:")) self.label_5.setText(_translate("TabGeneral", "Colorbar nmr ticks:")) self.label_11.setText(_translate("TabGeneral", "Flip up/down:")) self.label_27.setText(_translate("TabGeneral", "Interpolation:")) self.label_28.setText(_translate("TabGeneral", "Plot title:")) self.label_29.setText(_translate("TabGeneral", "Mask:")) self.label_22.setText(_translate("TabGeneral", "Display order")) self.label_21.setText(_translate("TabGeneral", "Map selection")) self.general_deselect_all_maps.setText(_translate("TabGeneral", "Deselect all")) self.general_invert_map_selection.setText(_translate("TabGeneral", "Invert selection")) self.label_24.setText(_translate("TabGeneral", "Info")) self.label_3.setText(_translate("TabGeneral", "Directory: ")) self.label_7.setText(_translate("TabGeneral", "Map count:")) self.general_info_nmr_maps.setText(_translate("TabGeneral", "-")) self.general_info_directory.setText(_translate("TabGeneral", "-")) self.label_26.setText(_translate("TabGeneral", "Font")) self.label_6.setText(_translate("TabGeneral", "Family:")) self.label_8.setText(_translate("TabGeneral", "Size:")) self.label_17.setText(_translate("TabGeneral", "Zoom")) self.label_19.setText(_translate("TabGeneral", "y0:")) self.label_20.setText(_translate("TabGeneral", "

x0:

")) self.label_18.setText(_translate("TabGeneral", "y1:")) self.label_12.setText(_translate("TabGeneral", "x1:")) self.general_zoom_fit.setText(_translate("TabGeneral", "Zoom fit")) self.general_zoom_reset.setText(_translate("TabGeneral", "Reset")) from ..widgets import CollapsablePanel, CollapsablePanelContent, CollapsablePanelHeader, MapsReorderer PKjUpI*mdt/gui/maps_visualizer/design/__init__.pyPK=~I*7&''8mdt/gui/maps_visualizer/renderers/matplotlib_renderer.pyimport matplotlib import numpy as np matplotlib.use('Qt5Agg') from PyQt5.QtCore import QTimer from mdt.visualization.maps.matplotlib_renderer import MapsVisualizer from matplotlib.figure import Figure from PyQt5 import QtWidgets, QtCore from PyQt5.QtCore import pyqtSlot from PyQt5.QtWidgets import QWidget, QVBoxLayout from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas from mdt.gui.maps_visualizer.base import PlottingFrame from mdt.visualization.maps.base import DataInfo, MapPlotConfig class MatplotlibPlotting(PlottingFrame, QWidget): def __init__(self, controller, parent=None, plotting_info_viewer=None): super(MatplotlibPlotting, self).__init__(controller, plotting_info_viewer=plotting_info_viewer) self._controller.new_data.connect(self.set_new_data) self._controller.new_config.connect(self.set_new_config) self._auto_render = True self.figure = Figure() self.visualizer = MapsVisualizer(self._controller.get_data(), self.figure) self._axes_data = self.visualizer.render(self._controller.get_config()) self.canvas = FigureCanvas(self.figure) self.canvas.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding) self.canvas.updateGeometry() layout = QVBoxLayout() layout.setSpacing(0) layout.setContentsMargins(0, 0, 0, 0) layout.addWidget(self.canvas) self.setLayout(layout) self.setParent(parent) self.setFocusPolicy(QtCore.Qt.StrongFocus) self.setFocus() self._redraw_timer = QTimer() self._redraw_timer.timeout.connect(self._timer_event) self._redraw_timer.timeout.connect(self._redraw_timer.stop) self._mouse_interaction = _MouseInteraction(self.figure, self._plotting_info_viewer) self._mouse_interaction.update_axes_data(self._axes_data) self._previous_config = None self.setMinimumWidth(100) def export_image(self, filename, width, height, dpi=100): width_inch = width / dpi height_inch = height / dpi figure = Figure(figsize=(width_inch, height_inch), dpi=dpi) visualizer = MapsVisualizer(self._controller.get_data(), figure) FigureCanvas(figure) visualizer.to_file(filename, self._controller.get_config(), dpi=dpi) def set_auto_rendering(self, auto_render): self._auto_render = auto_render def redraw(self): self._redraw() @pyqtSlot() def _timer_event(self): self._redraw() @pyqtSlot(DataInfo) def set_new_data(self, data_info): self.visualizer = MapsVisualizer(data_info, self.figure) self._redraw_timer.start(300) @pyqtSlot(MapPlotConfig) def set_new_config(self, configuration): if not self._previous_config or configuration.visible_changes(self._previous_config): self._previous_config = configuration if self._auto_render: self._redraw_timer.start(300) def _redraw(self): self.figure.clf() self._axes_data = self.visualizer.render(self._controller.get_config()) self._mouse_interaction.update_axes_data(self._axes_data) self.figure.canvas.draw() class _MouseInteraction(object): def __init__(self, figure, plotting_info_viewer): self.figure = figure self.plotting_info_viewer = plotting_info_viewer self._axes_data = [] self.figure.canvas.mpl_connect('button_release_event', self._button_released) self.figure.canvas.mpl_connect('motion_notify_event', self._mouse_motion) def update_axes_data(self, axes_data): """Set the updated axes data. Needs to be called if the axes are updated. Args: axes_data (list of AxisData): the information about the axes """ self._axes_data = axes_data def _button_released(self, event): axis_data = self._get_matching_axis_data(event.inaxes) if axis_data: x, y = int(np.round(event.xdata)), int(np.round(event.ydata)) index = axis_data.coordinates_to_index(x, y) value = axis_data.get_value(index) # todo draw info box on the figure # print(x, y, index, value) def _mouse_motion(self, event): axis_data = self._get_matching_axis_data(event.inaxes) if axis_data: x, y = int(np.round(event.xdata)), int(np.round(event.ydata)) index = axis_data.coordinates_to_index(x, y) value = axis_data.get_value(index) self.plotting_info_viewer.set_voxel_info((x, y), tuple(index), float(value)) else: self.plotting_info_viewer.clear_voxel_info() def _get_matching_axis_data(self, axis): """Get the axis data matching the given axis. Args: Axis: the matplotlib axis to match Returns: AxisData: our data container for that axis """ if axis: for axes_data in self._axes_data: if axes_data.axis == axis: return axes_data return None PKjUpI-mdt/gui/maps_visualizer/renderers/__init__.pyPKiA~ID"6/?/?2mdt/gui/maps_visualizer/config_tabs/tab_general.pyimport copy import os from PyQt5.QtCore import pyqtSlot, Qt from PyQt5.QtWidgets import QWidget, QAbstractItemView from mdt.gui.maps_visualizer.actions import SetDimension, SetSliceIndex, SetVolumeIndex, SetColormap, SetRotate, \ SetZoom, SetShowAxis, SetColorBarNmrTicks, SetMapsToShow, SetFont, SetInterpolation, SetFlipud, SetPlotTitle, \ SetGeneralMask from mdt.gui.maps_visualizer.design.ui_TabGeneral import Ui_TabGeneral from mdt.gui.utils import blocked_signals, TimedUpdate from mdt.visualization.maps.base import Zoom, Point, DataInfo, Font, MapPlotConfig __author__ = 'Robbert Harms' __date__ = "2016-09-03" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class TabGeneral(QWidget, Ui_TabGeneral): def __init__(self, controller, parent=None): super(TabGeneral, self).__init__(parent) self.setupUi(self) self._controller = controller self._controller.new_data.connect(self.set_new_data) self._controller.new_config.connect(self.set_new_config) self.general_display_order.setDragDropMode(QAbstractItemView.InternalMove) self.general_display_order.setSelectionMode(QAbstractItemView.SingleSelection) self.general_colormap.addItems(self._controller.get_config().get_available_colormaps()) self.general_rotate.addItems(['0', '90', '180', '270']) self.general_rotate.setCurrentText(str(self._controller.get_config().rotate)) self.general_DisplayOrder.set_collapse(True) self.general_Miscellaneous.set_collapse(True) self.general_Zoom.set_collapse(True) self.general_Font.set_collapse(True) self.general_dimension.valueChanged.connect(lambda v: self._controller.apply_action(SetDimension(v))) self.general_slice_index.valueChanged.connect(lambda v: self._controller.apply_action(SetSliceIndex(v))) self.general_volume_index.valueChanged.connect(lambda v: self._controller.apply_action(SetVolumeIndex(v))) self.general_colormap.currentIndexChanged.connect( lambda i: self._controller.apply_action(SetColormap(self.general_colormap.itemText(i)))) self.general_rotate.currentIndexChanged.connect( lambda i: self._controller.apply_action(SetRotate(int(self.general_rotate.itemText(i))))) self._map_selection_timer = TimedUpdate(self._update_maps_to_show) self.general_map_selection.itemSelectionChanged.connect( lambda: self._map_selection_timer.add_delayed_callback(500)) self.general_deselect_all_maps.clicked.connect(self._deleselect_all_maps) self.general_invert_map_selection.clicked.connect(self._invert_map_selection) self.general_zoom_x_0.valueChanged.connect(self._update_zoom) self.general_zoom_x_1.valueChanged.connect(self._update_zoom) self.general_zoom_y_0.valueChanged.connect(self._update_zoom) self.general_zoom_y_1.valueChanged.connect(self._update_zoom) self.plot_title.textEdited.connect(lambda txt: self._controller.apply_action(SetPlotTitle(txt))) self.general_zoom_reset.clicked.connect(lambda: self._controller.apply_action(SetZoom(Zoom.no_zoom()))) self.general_zoom_fit.clicked.connect(self._zoom_fit) self.general_display_order.items_reordered.connect(self._reorder_maps) self.general_show_axis.clicked.connect(lambda: self._controller.apply_action( SetShowAxis(self.general_show_axis.isChecked()))) self.general_colorbar_nmr_ticks.valueChanged.connect( lambda v: self._controller.apply_action(SetColorBarNmrTicks(v))) self.general_font_family.addItems(Font.font_names()) self.general_font_family.currentTextChanged.connect( lambda v: self._controller.apply_action(SetFont(self._controller.get_config().font.get_updated(family=v)))) self.general_font_size.valueChanged.connect( lambda: self._controller.apply_action( SetFont(self._controller.get_config().font.get_updated(size=self.general_font_size.value())))) self.general_interpolation.addItems(self._controller.get_config().get_available_interpolations()) self.general_interpolation.currentTextChanged.connect( lambda v: self._controller.apply_action(SetInterpolation(v))) self.general_flipud.clicked.connect(lambda: self._controller.apply_action( SetFlipud(self.general_flipud.isChecked()))) self.mask_name.currentIndexChanged.connect(self._update_mask_name) @pyqtSlot(DataInfo) def set_new_data(self, data_info): if data_info.directory: self.general_info_directory.setText(self._split_long_path_elements(data_info.directory)) else: self.general_info_directory.setText('-') if len(data_info.maps): self.general_info_nmr_maps.setText(str(len(data_info.maps))) else: self.general_info_nmr_maps.setText('0') with blocked_signals(self.general_map_selection): self.general_map_selection.clear() self.general_map_selection.addItems(data_info.sorted_keys) for index, map_name in enumerate(data_info.sorted_keys): item = self.general_map_selection.item(index) item.setData(Qt.UserRole, map_name) with blocked_signals(self.mask_name): self.mask_name.clear() self.mask_name.insertItem(0, '-- None --') self.mask_name.insertItems(1, data_info.sorted_keys) @pyqtSlot(MapPlotConfig) def set_new_config(self, config): data_info = self._controller.get_data() map_names = config.maps_to_show with blocked_signals(self.general_dimension): try: max_dimension = data_info.get_max_dimension(map_names) self.general_dimension.setMaximum(max_dimension) self.maximumDimension.setText(str(max_dimension)) except ValueError: self.general_dimension.setMaximum(0) self.maximumDimension.setText(str(0)) self.general_dimension.setValue(config.dimension) with blocked_signals(self.general_slice_index): try: max_slice = data_info.get_max_slice_index(config.dimension, map_names) self.general_slice_index.setMaximum(max_slice) self.maximumIndex.setText(str(max_slice)) except ValueError: self.general_slice_index.setMaximum(0) self.maximumIndex.setText(str(0)) self.general_slice_index.setValue(config.slice_index) with blocked_signals(self.general_volume_index): try: max_volume = data_info.get_max_volume_index(map_names) self.general_volume_index.setMaximum(max_volume) self.maximumVolume.setText(str(max_volume)) except ValueError: self.general_volume_index.setMaximum(0) self.maximumVolume.setText(str(0)) self.general_volume_index.setValue(config.volume_index) with blocked_signals(self.general_colormap): self.general_colormap.setCurrentText(config.colormap) with blocked_signals(self.general_rotate): self.general_rotate.setCurrentText(str(config.rotate)) if self.general_map_selection.count(): for map_name, map_config in config.map_plot_options.items(): if map_config.title: index = data_info.sorted_keys.index(map_name) item = self.general_map_selection.item(index) item.setData(Qt.DisplayRole, map_name + ' (' + map_config.title + ')') self.general_map_selection.blockSignals(True) for index, map_name in enumerate(data_info.sorted_keys): item = self.general_map_selection.item(index) if item: item.setSelected(map_name in map_names) self.general_map_selection.blockSignals(False) try: max_x = data_info.get_max_x_index(config.dimension, config.rotate, map_names) max_y = data_info.get_max_y_index(config.dimension, config.rotate, map_names) with blocked_signals(self.general_zoom_x_0, self.general_zoom_x_1, self.general_zoom_y_0, self.general_zoom_y_1): self.general_zoom_x_0.setMaximum(max_x) self.general_zoom_x_0.setValue(config.zoom.p0.x) self.general_zoom_x_1.setMaximum(max_x) self.general_zoom_x_1.setMinimum(config.zoom.p0.x) self.general_zoom_x_1.setValue(config.zoom.p1.x) self.general_zoom_y_0.setMaximum(max_y) self.general_zoom_y_0.setValue(config.zoom.p0.y) self.general_zoom_y_1.setMaximum(max_y) self.general_zoom_y_1.setMinimum(config.zoom.p0.y) self.general_zoom_y_1.setValue(config.zoom.p1.y) if config.zoom.p0.x == 0 and config.zoom.p1.x == 0: self.general_zoom_x_1.setValue(max_x) if config.zoom.p0.y == 0 and config.zoom.p1.y == 0: self.general_zoom_y_1.setValue(max_y) except ValueError: pass with blocked_signals(self.plot_title): self.plot_title.setText(config.title) with blocked_signals(self.general_display_order): self.general_display_order.clear() self.general_display_order.addItems(map_names) for index, map_name in enumerate(map_names): item = self.general_display_order.item(index) item.setData(Qt.UserRole, map_name) if map_name in config.map_plot_options and config.map_plot_options[map_name].title: title = config.map_plot_options[map_name].title item.setData(Qt.DisplayRole, map_name + ' (' + title + ')') with blocked_signals(self.general_show_axis): self.general_show_axis.setChecked(config.show_axis) with blocked_signals(self.general_colorbar_nmr_ticks): self.general_colorbar_nmr_ticks.setValue(config.colorbar_nmr_ticks) with blocked_signals(self.general_font_family): self.general_font_family.setCurrentText(config.font.family) with blocked_signals(self.general_font_size): self.general_font_size.setValue(config.font.size) with blocked_signals(self.general_interpolation): self.general_interpolation.setCurrentText(config.interpolation) with blocked_signals(self.general_flipud): self.general_flipud.setChecked(config.flipud) with blocked_signals(self.mask_name): if config.mask_name and config.mask_name in data_info.maps: for ind in range(self.mask_name.count()): if self.mask_name.itemText(ind) == config.mask_name: self.mask_name.setCurrentIndex(ind) break else: self.mask_name.setCurrentIndex(0) @pyqtSlot() def _reorder_maps(self): items = [self.general_display_order.item(ind) for ind in range(self.general_display_order.count())] map_names = [item.data(Qt.UserRole) for item in items] self._controller.apply_action(SetMapsToShow(map_names)) @pyqtSlot() def _update_maps_to_show(self): map_names = copy.copy(self._controller.get_config().maps_to_show) for item in [self.general_map_selection.item(ind) for ind in range(self.general_map_selection.count())]: map_name = item.data(Qt.UserRole) if item.isSelected(): if map_name not in map_names: self._insert_alphabetically(map_name, map_names) else: if map_name in map_names: map_names.remove(map_name) self._controller.apply_action(SetMapsToShow(map_names)) @pyqtSlot() def _deleselect_all_maps(self): self._controller.apply_action(SetMapsToShow([])) @pyqtSlot() def _invert_map_selection(self): self._controller.apply_action(SetMapsToShow( set(self._controller.get_data().maps.keys()).difference(set(self._controller.get_config().maps_to_show)))) @pyqtSlot() def _zoom_fit(self): data_info = self._controller.get_data() config = self._controller.get_config() def add_padding(bounding_box, max_x, max_y): bounding_box[0].x = max(bounding_box[0].x - 1, 0) bounding_box[0].y = max(bounding_box[0].y - 1, 0) bounding_box[1].y = min(bounding_box[1].y + 2, max_y) bounding_box[1].x = min(bounding_box[1].x + 2, max_x) return bounding_box if config.maps_to_show or len(data_info.maps): bounding_box = data_info.get_bounding_box(config.dimension, config.slice_index, config.volume_index, config.rotate, config.maps_to_show) max_y = data_info.get_max_y_index(config.dimension, rotate=config.rotate, map_names=config.maps_to_show) max_x = data_info.get_max_x_index(config.dimension, rotate=config.rotate, map_names=config.maps_to_show) if not config.flipud: # Since the renderer plots with a left top coordinate system, # we need to flip the y coordinates upside down by default. tmp = max_y - bounding_box[0].y bounding_box[0].y = max_y - bounding_box[1].y bounding_box[1].y = tmp bounding_box = add_padding(bounding_box, max_x, max_y) self._controller.apply_action(SetZoom(Zoom(*bounding_box))) @pyqtSlot() def _update_zoom(self): np0x, np0y = self.general_zoom_x_0.value(), self.general_zoom_y_0.value() np1x, np1y = self.general_zoom_x_1.value(), self.general_zoom_y_1.value() if np0x > np1x: np1x = np0x if np0y > np1y: np1y = np0y self._controller.apply_action(SetZoom(Zoom.from_coords(np0x, np0y, np1x, np1y))) @staticmethod def _insert_alphabetically(new_item, item_list): for ind, item in enumerate(item_list): if item > new_item: item_list.insert(ind, new_item) return item_list.append(new_item) @pyqtSlot(int) def _update_mask_name(self, index): if index == 0: self._controller.apply_action(SetGeneralMask(None)) else: self._controller.apply_action(SetGeneralMask(self.mask_name.itemText(index))) def _split_long_path_elements(self, original_path, max_single_element_length=25): """Split long path elements into smaller ones using spaces Args: original_path (str): the path you want to split max_single_element_length (int): the maximum length allowed per path component (folders and filename). Returns: str: the same path but with spaces in long path elements. The result will no longer be a valid path. """ def split(p): listing = [] def _split(el): if el: head, tail = os.path.split(el) if not tail: listing.append(head) else: _split(head) listing.append(tail) _split(p) return listing elements = list(split(original_path)) new_elements = [] for el in elements: if len(el) > max_single_element_length: item = '' for i in range(0, len(el), max_single_element_length): item += el[i:i + max_single_element_length] + ' ' item = item[:-1] new_elements.append(item) else: new_elements.append(el) return os.path.join(*new_elements) PKɬ}Ie,:  2mdt/gui/maps_visualizer/config_tabs/tab_textual.pyimport yaml from PyQt5.QtCore import pyqtSlot from PyQt5.QtWidgets import QWidget from mdt.gui.maps_visualizer.actions import NewConfigAction from mdt.gui.maps_visualizer.design.ui_TabTextual import Ui_TabTextual from mdt.gui.utils import blocked_signals from mdt.visualization.maps.base import DataInfo, MapPlotConfig __author__ = 'Robbert Harms' __date__ = "2016-09-03" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class TabTextual(QWidget, Ui_TabTextual): def __init__(self, controller, parent=None): super(TabTextual, self).__init__(parent) self.setupUi(self) self._controller = controller self._controller.new_data.connect(self.set_new_data) self._controller.new_config.connect(self.set_new_config) self.textConfigEdit.new_config.connect(self._config_from_string) self._update_status_indication(True) self._flags = {'updating_config_from_string': False} @pyqtSlot(DataInfo) def set_new_data(self, data_info): pass @pyqtSlot(MapPlotConfig) def set_new_config(self, config): with blocked_signals(self.textConfigEdit): if not self._flags['updating_config_from_string']: self.textConfigEdit.setPlainText(config.to_yaml()) self._update_status_indication(True) @pyqtSlot(str) def _config_from_string(self, text): self._flags['updating_config_from_string'] = True text = text.replace('\t', ' '*4) try: if text.strip() != '': new_config = MapPlotConfig.from_yaml(text) new_config.validate(self._controller.get_data()) self._controller.apply_action(NewConfigAction(new_config)) self._update_status_indication(True) except Exception as exc: self._update_status_indication(False, str(exc)) pass finally: self._flags['updating_config_from_string'] = False def _update_status_indication(self, status, status_message=''): if status: self.textConfigEdit.setStyleSheet("border: 1px solid green") else: self.textConfigEdit.setStyleSheet("border: 1px solid red") self.correctness_label.setText(status_message) PKjUpI=V/mdt/gui/maps_visualizer/config_tabs/__init__.py__author__ = 'Robbert Harms' __date__ = "2016-09-03" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" PKSZI]887mdt/gui/maps_visualizer/config_tabs/tab_map_specific.pyfrom PyQt5.QtCore import pyqtSlot, Qt from PyQt5.QtWidgets import QWidget from mdt.gui.maps_visualizer.actions import SetMapTitle, SetMapColormap, SetMapScale, SetMapClipping, \ SetMapColorbarLabel from mdt.gui.maps_visualizer.design.ui_MapSpecificOptions import Ui_MapSpecificOptions from mdt.gui.maps_visualizer.design.ui_TabMapSpecific import Ui_TabMapSpecific from mdt.gui.utils import blocked_signals, TimedUpdate from mdt.visualization.maps.base import DataInfo, SingleMapConfig, MapPlotConfig __author__ = 'Robbert Harms' __date__ = "2016-09-03" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class TabMapSpecific(QWidget, Ui_TabMapSpecific): def __init__(self, controller, parent=None): super(TabMapSpecific, self).__init__(parent) self.setupUi(self) self.map_specific_tab = MapSpecificOptions(controller, self) self.mapSpecificOptionsPosition.addWidget(self.map_specific_tab) self._controller = controller self._controller.new_data.connect(self.set_new_data) self._controller.new_config.connect(self.set_new_config) self.selectedMap.currentIndexChanged.connect( lambda ind: self._update_map_specifics(self.selectedMap.itemData(ind, Qt.UserRole))) @pyqtSlot(DataInfo) def set_new_data(self, data_info): pass @pyqtSlot(MapPlotConfig) def set_new_config(self, config): map_names = config.maps_to_show with blocked_signals(self.selectedMap): current_selected = self.selectedMap.currentData(Qt.UserRole) self.selectedMap.clear() self.selectedMap.addItems(map_names) for index, map_name in enumerate(map_names): self.selectedMap.setItemData(index, map_name, Qt.UserRole) if map_name in config.map_plot_options and config.map_plot_options[map_name].title: title = config.map_plot_options[map_name].title self.selectedMap.setItemData(index, map_name + ' (' + title + ')', Qt.DisplayRole) for ind in range(self.selectedMap.count()): if self.selectedMap.itemData(ind, Qt.UserRole) == current_selected: self.selectedMap.setCurrentIndex(ind) break if self.selectedMap.count(): self._update_map_specifics(self.selectedMap.currentData(Qt.UserRole)) else: self._update_map_specifics(None) def _update_map_specifics(self, map_name): """Set the map specific options to reflect the settings of the given map""" if map_name is None: self.map_specific_tab.reset() else: self.map_specific_tab.use(map_name) class MapSpecificOptions(QWidget, Ui_MapSpecificOptions): def __init__(self, controller, parent=None): super(MapSpecificOptions, self).__init__(parent) self.setupUi(self) self._controller = controller self._current_map = None self.colormap.addItems(['-- Use global --'] + self._controller.get_config().get_available_colormaps()) self.colormap.currentIndexChanged.connect(self._update_colormap) self.data_clipping_min.valueChanged.connect(self._update_clipping_min) self.data_clipping_max.valueChanged.connect(self._update_clipping_max) self.data_scale_min.valueChanged.connect(self._update_scale_min) self.data_scale_max.valueChanged.connect(self._update_scale_max) self.data_set_use_scale.stateChanged.connect(self._set_use_scale) self.use_data_scale_min.stateChanged.connect(self._set_use_data_scale_min) self.use_data_scale_max.stateChanged.connect(self._set_use_data_scale_max) self.data_set_use_clipping.stateChanged.connect(self._set_use_clipping) self.use_data_clipping_min.stateChanged.connect(self._set_use_data_clipping_min) self.use_data_clipping_max.stateChanged.connect(self._set_use_data_clipping_max) self._title_timer = TimedUpdate(self._update_map_title) self.map_title.textChanged.connect(lambda v: self._title_timer.add_delayed_callback(500, v)) self._colorbar_label_timer = TimedUpdate(self._update_colorbar_label) self.data_colorbar_label.textChanged.connect(lambda v: self._colorbar_label_timer.add_delayed_callback(500, v)) self.info_Clipping.set_collapse(True) def reset(self): """Set all the values to their defaults""" self._current_map = None self.colormap.setCurrentText('hot') with blocked_signals(self.map_title): self.map_title.setText('') with blocked_signals(self.data_colorbar_label): self.data_colorbar_label.setText('') with blocked_signals(self.data_clipping_min): self.data_clipping_min.setValue(0) with blocked_signals(self.data_clipping_max): self.data_clipping_max.setValue(0) with blocked_signals(self.data_scale_min): self.data_scale_min.setValue(0) with blocked_signals(self.data_scale_max): self.data_scale_max.setValue(0) with blocked_signals(self.use_data_clipping_min): self.use_data_clipping_min.setChecked(False) with blocked_signals(self.use_data_clipping_max): self.use_data_clipping_max.setChecked(False) with blocked_signals(self.use_data_scale_min): self.use_data_scale_min.setChecked(False) with blocked_signals(self.use_data_scale_max): self.use_data_scale_max.setChecked(False) self.info_file_location.setText('-') self.info_maximum.setText('-') self.info_minimum.setText('-') self.info_shape.setText('-') def use(self, map_name): """Load the settings of the given map""" self._current_map = map_name try: map_info = self._controller.get_config().map_plot_options[map_name] except KeyError: map_info = SingleMapConfig() data_info = self._controller.get_data() vmin = data_info.maps[map_name].min() vmax = data_info.maps[map_name].max() with blocked_signals(self.map_title): self.map_title.setText(map_info.title if map_info.title else '') with blocked_signals(self.data_colorbar_label): self.data_colorbar_label.setText(map_info.colorbar_label if map_info.colorbar_label else '') with blocked_signals(self.colormap): if map_info.colormap is None: self.colormap.setCurrentIndex(0) else: self.colormap.setCurrentText(map_info.colormap) with blocked_signals(self.data_clipping_min): self.data_clipping_min.setValue(map_info.clipping.vmin) with blocked_signals(self.data_clipping_max): self.data_clipping_max.setValue(map_info.clipping.vmax) with blocked_signals(self.data_scale_min): self.data_scale_min.setValue(map_info.scale.vmin) with blocked_signals(self.data_scale_max): self.data_scale_max.setValue(map_info.scale.vmax) with blocked_signals(self.data_set_use_scale): self.data_set_use_scale.setChecked(map_info.scale.use_min or map_info.scale.use_max) with blocked_signals(self.data_set_use_clipping): self.data_set_use_clipping.setChecked(map_info.clipping.use_min or map_info.clipping.use_max) with blocked_signals(self.use_data_clipping_min): self.use_data_clipping_min.setChecked(map_info.clipping.use_min) with blocked_signals(self.use_data_clipping_max): self.use_data_clipping_max.setChecked(map_info.clipping.use_max) with blocked_signals(self.use_data_scale_min): self.use_data_scale_min.setChecked(map_info.scale.use_min) with blocked_signals(self.use_data_scale_max): self.use_data_scale_max.setChecked(map_info.scale.use_max) map_filename = data_info.get_file_name(map_name) if map_filename: self.info_file_location.setText(map_filename) self.info_maximum.setText(str(vmax)) self.info_minimum.setText(str(vmin)) self.info_shape.setText(str(data_info.maps[map_name].shape)) def _get_current_map_config(self): current_config = self._controller.get_config() current_map_config = current_config.map_plot_options.get(self._current_map, SingleMapConfig()) return current_map_config @pyqtSlot(str) def _update_map_title(self, string): if self._current_map: if string == '': string = None self._controller.apply_action(SetMapTitle(self._current_map, string)) @pyqtSlot(str) def _update_colorbar_label(self, string): if self._current_map: if string == '': string = None self._controller.apply_action(SetMapColorbarLabel(self._current_map, string)) @pyqtSlot(int) def _update_colormap(self, index): if self._current_map: if index == 0: self._controller.apply_action(SetMapColormap(self._current_map, None)) else: self._controller.apply_action(SetMapColormap(self._current_map, self.colormap.itemText(index))) @pyqtSlot(float) def _update_scale_min(self, value): if self._current_map: current_scale = self._get_current_map_config().scale if current_scale.use_min and current_scale.use_max and value > current_scale.vmax: new_scale = current_scale.get_updated(vmin=value, vmax=value) else: new_scale = current_scale.get_updated(vmin=value) self._controller.apply_action(SetMapScale(self._current_map, new_scale)) @pyqtSlot(float) def _update_scale_max(self, value): if self._current_map: current_scale = self._get_current_map_config().scale if current_scale.use_min and current_scale.use_max and value < current_scale.vmin: new_scale = current_scale.get_updated(vmin=value, vmax=value) else: new_scale = current_scale.get_updated(vmax=value) self._controller.apply_action(SetMapScale(self._current_map, new_scale)) @pyqtSlot(int) def _set_use_scale(self, use_scale): if self._current_map: current_scale = self._get_current_map_config().scale if use_scale and current_scale.vmax < current_scale.vmin: new_scale = current_scale.get_updated(use_min=use_scale, use_max=use_scale, vmax=current_scale.vmin) else: new_scale = current_scale.get_updated(use_min=use_scale, use_max=use_scale) self._controller.apply_action(SetMapScale(self._current_map, new_scale)) @pyqtSlot(int) def _set_use_data_scale_min(self, use_scale): if self._current_map: if use_scale and self._get_current_map_config().scale.use_max: self._set_use_scale(True) else: new_scale = self._get_current_map_config().scale.get_updated(use_min=use_scale) self._controller.apply_action(SetMapScale(self._current_map, new_scale)) @pyqtSlot(int) def _set_use_data_scale_max(self, use_scale): if self._current_map: if use_scale and self._get_current_map_config().scale.use_min: self._set_use_scale(True) else: new_scale = self._get_current_map_config().scale.get_updated(use_max=use_scale) self._controller.apply_action(SetMapScale(self._current_map, new_scale)) @pyqtSlot(float) def _update_clipping_min(self, value): if self._current_map: current_clipping = self._get_current_map_config().clipping if current_clipping.use_min and current_clipping.use_max and value > current_clipping.vmax: new_clipping = current_clipping.get_updated(vmin=value, vmax=value) else: new_clipping = current_clipping.get_updated(vmin=value) self._controller.apply_action(SetMapClipping(self._current_map, new_clipping)) @pyqtSlot(float) def _update_clipping_max(self, value): if self._current_map: current_clipping = self._get_current_map_config().clipping if current_clipping.use_min and current_clipping.use_max and value < current_clipping.vmin: new_clipping = current_clipping.get_updated(vmin=value, vmax=value) else: new_clipping = current_clipping.get_updated(vmax=value) self._controller.apply_action(SetMapClipping(self._current_map, new_clipping)) @pyqtSlot(int) def _set_use_clipping(self, use_clipping): if self._current_map: current_clipping = self._get_current_map_config().clipping if use_clipping and current_clipping.vmax < current_clipping.vmin: new_clipping = current_clipping.get_updated(use_min=use_clipping, use_max=use_clipping, vmax=current_clipping.vmin) else: new_clipping = current_clipping.get_updated(use_min=use_clipping, use_max=use_clipping) self._controller.apply_action(SetMapClipping(self._current_map, new_clipping)) @pyqtSlot(int) def _set_use_data_clipping_min(self, use_clipping): if self._current_map: if use_clipping and self._get_current_map_config().clipping.use_max: self._set_use_clipping(True) else: new_clipping = self._get_current_map_config().clipping.get_updated(use_min=use_clipping) self._controller.apply_action(SetMapClipping(self._current_map, new_clipping)) @pyqtSlot(int) def _set_use_data_clipping_max(self, use_clipping): if self._current_map: if use_clipping and self._get_current_map_config().clipping.use_min: self._set_use_clipping(True) else: new_clipping = self._get_current_map_config().clipping.get_updated(use_max=use_clipping) self._controller.apply_action(SetMapClipping(self._current_map, new_clipping)) PKrVpIh""mdt/gui/model_fit/qt_main.pyimport signal import sys from PyQt5.QtCore import pyqtSignal from mdt.gui.model_fit.design.ui_about_dialog import Ui_AboutDialog from mdt.gui.model_fit.design.ui_runtime_settings_dialog import Ui_RuntimeSettingsDialog from mdt.gui.model_fit.tabs.fit_model_tab import FitModelTab from mdt.gui.model_fit.tabs.generate_brain_mask_tab import GenerateBrainMaskTab from mdt.gui.model_fit.tabs.generate_roi_mask_tab import GenerateROIMaskTab from mdt.gui.model_fit.tabs.view_results_tab import ViewResultsTab import mdt.utils import mot.configuration from mdt.configuration import update_gui_config from mdt.gui.model_fit.tabs.generate_protocol_tab import GenerateProtocolTab from mot.cl_environments import CLEnvironmentFactory from mot.load_balance_strategies import EvenDistribution try: #python 2.7 from Queue import Queue except ImportError: # python 3.4 from queue import Queue from PyQt5 import QtGui from PyQt5.QtCore import QThread, QTimer, pyqtSlot from PyQt5.QtWidgets import QMainWindow, QDialog, QDialogButtonBox from mdt.gui.model_fit.design.ui_main_gui import Ui_MainWindow from mdt.gui.utils import print_welcome_message, ForwardingListener, MessageReceiver, center_window, QtManager from mdt.gui.model_fit.utils import SharedState from mdt.log_handlers import LogDispatchHandler __author__ = 'Robbert Harms' __date__ = "2016-06-26" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class MDTGUISingleModel(QMainWindow, Ui_MainWindow): def __init__(self, shared_state, computations_thread): super(MDTGUISingleModel, self).__init__() self.setupUi(self) self._shared_state = shared_state self._computations_thread = computations_thread self._computations_thread.signal_starting.connect(self.computations_started) self._computations_thread.signal_finished.connect(self.computations_finished) self._stdout_old = sys.stdout self._stderr_old = sys.stderr self._logging_update_queue = Queue() self._logging_update_thread = QThread() self._message_receiver = MessageReceiver(self._logging_update_queue) self._message_receiver.text_message_signal.connect(self.update_log) self._message_receiver.moveToThread(self._logging_update_thread) self._logging_update_thread.started.connect(self._message_receiver.run) self._logging_update_thread.start() self._connect_output_textbox() self.actionExit.setShortcuts(['Ctrl+q', 'Ctrl+w']) self.action_RuntimeSettings.triggered.connect(lambda: RuntimeSettingsDialog(self).exec_()) self.actionAbout.triggered.connect(lambda: AboutDialog(self).exec_()) self.executionStatusLabel.setText('Idle') self.executionStatusIcon.setPixmap(QtGui.QPixmap(":/main_gui/icon_status_red.png")) self.fit_model_tab = FitModelTab(shared_state, self._computations_thread) self.fit_model_tab.setupUi(self.fitModelTab) self.generate_mask_tab = GenerateBrainMaskTab(shared_state, self._computations_thread) self.generate_mask_tab.setupUi(self.generateBrainMaskTab) self.view_results_tab = ViewResultsTab(shared_state, self._computations_thread) self.view_results_tab.setupUi(self.viewResultsTab) self.generate_roi_mask_tab = GenerateROIMaskTab(shared_state, self._computations_thread) self.generate_roi_mask_tab.setupUi(self.generateROIMaskTab) self.generate_protocol_tab = GenerateProtocolTab(shared_state, self._computations_thread) self.generate_protocol_tab.setupUi(self.generateProtocolTab) self.tabs = [self.fit_model_tab, self.generate_mask_tab, self.generate_roi_mask_tab, self.generate_protocol_tab, self.view_results_tab] self.MainTabs.currentChanged.connect(lambda index: self.tabs[index].tab_opened()) def _connect_output_textbox(self): sys.stdout = ForwardingListener(self._logging_update_queue) sys.stderr = ForwardingListener(self._logging_update_queue) LogDispatchHandler.add_listener(ForwardingListener(self._logging_update_queue)) print_welcome_message() def closeEvent(self, event): sys.stdout = self._stdout_old sys.stderr = self._stderr_old self._message_receiver.is_running = False self._logging_update_thread.quit() self._logging_update_thread.wait(10) super(MDTGUISingleModel, self).closeEvent(event) def send_sigint(self, *args): self.close() @pyqtSlot() def computations_started(self): self.executionStatusLabel.setText('Computing') self.executionStatusIcon.setPixmap(QtGui.QPixmap(":/main_gui/icon_status_green.png")) @pyqtSlot() def computations_finished(self): self.executionStatusLabel.setText('Idle') self.executionStatusIcon.setPixmap(QtGui.QPixmap(":/main_gui/icon_status_red.png")) @pyqtSlot(str) def update_log(self, string): sb = self.loggingTextBox.verticalScrollBar() scrollbar_position = sb.value() autoscroll = scrollbar_position == sb.maximum() self.loggingTextBox.moveCursor(QtGui.QTextCursor.End) self.loggingTextBox.insertPlainText(string) if autoscroll: sb.setValue(sb.maximum()) else: sb.setValue(scrollbar_position) class RuntimeSettingsDialog(Ui_RuntimeSettingsDialog, QDialog): def __init__(self, parent): super(RuntimeSettingsDialog, self).__init__(parent) self.setupUi(self) self.all_cl_devices = CLEnvironmentFactory.smart_device_selection() self.user_selected_devices = mot.configuration.get_cl_environments() self.cldevicesSelection.itemSelectionChanged.connect(self.selection_updated) self.cldevicesSelection.insertItems(0, [str(cl_device) for cl_device in self.all_cl_devices]) load_balancer = mot.configuration.get_load_balancer() lb_used_devices = load_balancer.get_used_cl_environments(self.all_cl_devices) for ind, device in enumerate(self.all_cl_devices): self.cldevicesSelection.item(ind).setSelected(device in self.user_selected_devices and device in lb_used_devices) self.buttonBox.button(QDialogButtonBox.Ok).clicked.connect(self._update_settings) @pyqtSlot() def selection_updated(self): self.buttonBox.button(QDialogButtonBox.Ok).setEnabled( any(self.cldevicesSelection.item(ind).isSelected() for ind in range(self.cldevicesSelection.count()))) def _update_settings(self): selection = [ind for ind in range(self.cldevicesSelection.count()) if self.cldevicesSelection.item(ind).isSelected()] mot.configuration.set_cl_environments([self.all_cl_devices[ind] for ind in selection]) mot.configuration.set_load_balancer(EvenDistribution()) update_gui_config({'runtime_settings': {'cl_device_ind': selection}}) class AboutDialog(Ui_AboutDialog, QDialog): def __init__(self, parent): super(AboutDialog, self).__init__(parent) self.setupUi(self) self.contentLabel.setText(self.contentLabel.text().replace('{version}', mdt.__version__)) class ComputationsThread(QThread): signal_starting = pyqtSignal() signal_finished = pyqtSignal() def __init__(self, *args, **kwargs): """This is the thread handler for the computations. When running computations using this thread please connect signals to the starting and finished slot of this class. These handlers notify the main window of the computations. """ super(ComputationsThread, self).__init__(*args, **kwargs) @pyqtSlot() def starting(self): self.signal_starting.emit() @pyqtSlot() def finished(self): self.signal_finished.emit() def start_gui(base_dir=None, app_exec=True): """Start the model fitting GUI. Args: base_dir (str): the starting directory for the file opening actions app_exec (boolean): if true we execute the Qt application, set to false to disable. """ try: mdt.configuration.load_user_gui() except IOError: pass app = QtManager.get_qt_application_instance() state = SharedState() state.base_dir = base_dir computations_thread = ComputationsThread() computations_thread.start() # catches the sigint timer = QTimer() timer.start(500) timer.timeout.connect(lambda: None) composite_model_gui = MDTGUISingleModel(state, computations_thread) signal.signal(signal.SIGINT, composite_model_gui.send_sigint) center_window(composite_model_gui) composite_model_gui.show() QtManager.add_window(composite_model_gui) if app_exec: QtManager.exec_() if __name__ == '__main__': start_gui() PKlVpIiO&&mdt/gui/model_fit/utils.pyfrom PyQt5.QtCore import QObject, pyqtSignal from mdt.gui.utils import UpdateDescriptor __author__ = 'Robbert Harms' __date__ = "2015-08-20" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class SharedState(QObject): state_updated_signal = pyqtSignal(str) def __init__(self, *args, **kwargs): """The shared state for the model fitting GUI Attributes: base_dir (str): the base dir for all file opening operations dimension_index (int): the dimension index used in various operations slice_index (int): the slice index used in various operations """ super(SharedState, self).__init__(*args, **kwargs) shared_attributes = {'base_dir': None, 'dimension_index': 0, 'slice_index': 0, 'output_folder': None} for key, value in shared_attributes.items(): def get_attribute_setter(attribute_key): def setter(value): setattr(self, attribute_key, value) return setter setattr(self, '_' + key, value) setattr(SharedState, key, UpdateDescriptor(key)) setattr(self, 'set_' + key, get_attribute_setter(key)) PKjUpImdt/gui/model_fit/__init__.pyPKIsIE,,6mdt/gui/model_fit/design/ui_runtime_settings_dialog.py# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'runtime_settings_dialog.ui' # # Created by: PyQt5 UI code generator 5.4.2 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_RuntimeSettingsDialog(object): def setupUi(self, RuntimeSettingsDialog): RuntimeSettingsDialog.setObjectName("RuntimeSettingsDialog") RuntimeSettingsDialog.resize(844, 243) self.verticalLayout = QtWidgets.QVBoxLayout(RuntimeSettingsDialog) self.verticalLayout.setObjectName("verticalLayout") self.verticalLayout_3 = QtWidgets.QVBoxLayout() self.verticalLayout_3.setSpacing(0) self.verticalLayout_3.setObjectName("verticalLayout_3") self.label_3 = QtWidgets.QLabel(RuntimeSettingsDialog) font = QtGui.QFont() font.setPointSize(14) self.label_3.setFont(font) self.label_3.setObjectName("label_3") self.verticalLayout_3.addWidget(self.label_3) self.label_4 = QtWidgets.QLabel(RuntimeSettingsDialog) font = QtGui.QFont() font.setItalic(True) self.label_4.setFont(font) self.label_4.setObjectName("label_4") self.verticalLayout_3.addWidget(self.label_4) self.verticalLayout.addLayout(self.verticalLayout_3) self.line = QtWidgets.QFrame(RuntimeSettingsDialog) self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setObjectName("line") self.verticalLayout.addWidget(self.line) self.gridLayout = QtWidgets.QGridLayout() self.gridLayout.setObjectName("gridLayout") self.cldevicesSelection = QtWidgets.QListWidget(RuntimeSettingsDialog) self.cldevicesSelection.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection) self.cldevicesSelection.setObjectName("cldevicesSelection") self.gridLayout.addWidget(self.cldevicesSelection, 0, 1, 1, 1) self.label_10 = QtWidgets.QLabel(RuntimeSettingsDialog) font = QtGui.QFont() font.setItalic(True) self.label_10.setFont(font) self.label_10.setObjectName("label_10") self.gridLayout.addWidget(self.label_10, 0, 2, 1, 1) self.label = QtWidgets.QLabel(RuntimeSettingsDialog) self.label.setObjectName("label") self.gridLayout.addWidget(self.label, 0, 0, 1, 1) self.verticalLayout.addLayout(self.gridLayout) spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.verticalLayout.addItem(spacerItem) self.line_3 = QtWidgets.QFrame(RuntimeSettingsDialog) self.line_3.setFrameShape(QtWidgets.QFrame.HLine) self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_3.setObjectName("line_3") self.verticalLayout.addWidget(self.line_3) self.buttonBox = QtWidgets.QDialogButtonBox(RuntimeSettingsDialog) self.buttonBox.setOrientation(QtCore.Qt.Horizontal) self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok) self.buttonBox.setObjectName("buttonBox") self.verticalLayout.addWidget(self.buttonBox) self.retranslateUi(RuntimeSettingsDialog) self.buttonBox.accepted.connect(RuntimeSettingsDialog.accept) self.buttonBox.rejected.connect(RuntimeSettingsDialog.reject) QtCore.QMetaObject.connectSlotsByName(RuntimeSettingsDialog) def retranslateUi(self, RuntimeSettingsDialog): _translate = QtCore.QCoreApplication.translate RuntimeSettingsDialog.setWindowTitle(_translate("RuntimeSettingsDialog", "Runtime settings")) self.label_3.setText(_translate("RuntimeSettingsDialog", "Runtime settings")) self.label_4.setText(_translate("RuntimeSettingsDialog", "Runtime settings for all compute operations.")) self.label_10.setText(_translate("RuntimeSettingsDialog", "(Select the devices you would like to use)")) self.label.setText(_translate("RuntimeSettingsDialog", "OpenCL devices:")) PKIsI,,/mdt/gui/model_fit/design/ui_view_results_tab.py# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'view_results_tab.ui' # # Created by: PyQt5 UI code generator 5.4.2 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_ViewResultsTabContent(object): def setupUi(self, ViewResultsTabContent): ViewResultsTabContent.setObjectName("ViewResultsTabContent") ViewResultsTabContent.resize(938, 427) self.verticalLayout = QtWidgets.QVBoxLayout(ViewResultsTabContent) self.verticalLayout.setContentsMargins(-1, 11, -1, -1) self.verticalLayout.setObjectName("verticalLayout") self.verticalLayout_2 = QtWidgets.QVBoxLayout() self.verticalLayout_2.setSpacing(0) self.verticalLayout_2.setObjectName("verticalLayout_2") self.label = QtWidgets.QLabel(ViewResultsTabContent) font = QtGui.QFont() font.setPointSize(14) self.label.setFont(font) self.label.setObjectName("label") self.verticalLayout_2.addWidget(self.label) self.label_2 = QtWidgets.QLabel(ViewResultsTabContent) font = QtGui.QFont() font.setItalic(True) self.label_2.setFont(font) self.label_2.setObjectName("label_2") self.verticalLayout_2.addWidget(self.label_2) self.verticalLayout.addLayout(self.verticalLayout_2) self.line = QtWidgets.QFrame(ViewResultsTabContent) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setLineWidth(1) self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setObjectName("line") self.verticalLayout.addWidget(self.line) self.gridLayout = QtWidgets.QGridLayout() self.gridLayout.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint) self.gridLayout.setHorizontalSpacing(10) self.gridLayout.setObjectName("gridLayout") self.horizontalLayout_3 = QtWidgets.QHBoxLayout() self.horizontalLayout_3.setContentsMargins(-1, 0, -1, -1) self.horizontalLayout_3.setObjectName("horizontalLayout_3") self.deselectAllButton = QtWidgets.QPushButton(ViewResultsTabContent) self.deselectAllButton.setObjectName("deselectAllButton") self.horizontalLayout_3.addWidget(self.deselectAllButton) self.invertSelectionButton = QtWidgets.QPushButton(ViewResultsTabContent) self.invertSelectionButton.setObjectName("invertSelectionButton") self.horizontalLayout_3.addWidget(self.invertSelectionButton) self.gridLayout.addLayout(self.horizontalLayout_3, 3, 1, 1, 1) self.label_3 = QtWidgets.QLabel(ViewResultsTabContent) self.label_3.setMinimumSize(QtCore.QSize(0, 0)) self.label_3.setObjectName("label_3") self.gridLayout.addWidget(self.label_3, 2, 0, 1, 1) self.selectMaps = QtWidgets.QListWidget(ViewResultsTabContent) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(1) sizePolicy.setHeightForWidth(self.selectMaps.sizePolicy().hasHeightForWidth()) self.selectMaps.setSizePolicy(sizePolicy) self.selectMaps.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection) self.selectMaps.setObjectName("selectMaps") self.gridLayout.addWidget(self.selectMaps, 2, 1, 1, 1) self.line_4 = QtWidgets.QFrame(ViewResultsTabContent) self.line_4.setFrameShape(QtWidgets.QFrame.HLine) self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_4.setObjectName("line_4") self.gridLayout.addWidget(self.line_4, 1, 0, 1, 3) self.label_5 = QtWidgets.QLabel(ViewResultsTabContent) font = QtGui.QFont() font.setItalic(True) self.label_5.setFont(font) self.label_5.setObjectName("label_5") self.gridLayout.addWidget(self.label_5, 2, 2, 1, 1) self.horizontalLayout_2 = QtWidgets.QHBoxLayout() self.horizontalLayout_2.setObjectName("horizontalLayout_2") self.selectFolderButton = QtWidgets.QPushButton(ViewResultsTabContent) self.selectFolderButton.setObjectName("selectFolderButton") self.horizontalLayout_2.addWidget(self.selectFolderButton) self.selectedFolderText = QtWidgets.QLineEdit(ViewResultsTabContent) self.selectedFolderText.setText("") self.selectedFolderText.setObjectName("selectedFolderText") self.horizontalLayout_2.addWidget(self.selectedFolderText) self.gridLayout.addLayout(self.horizontalLayout_2, 0, 1, 1, 1) self.label_6 = QtWidgets.QLabel(ViewResultsTabContent) self.label_6.setObjectName("label_6") self.gridLayout.addWidget(self.label_6, 0, 0, 1, 1) self.label_4 = QtWidgets.QLabel(ViewResultsTabContent) font = QtGui.QFont() font.setItalic(True) self.label_4.setFont(font) self.label_4.setObjectName("label_4") self.gridLayout.addWidget(self.label_4, 0, 2, 1, 1) self.line_3 = QtWidgets.QFrame(ViewResultsTabContent) self.line_3.setFrameShape(QtWidgets.QFrame.HLine) self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_3.setObjectName("line_3") self.gridLayout.addWidget(self.line_3, 4, 0, 1, 3) self.horizontalLayout_4 = QtWidgets.QHBoxLayout() self.horizontalLayout_4.setContentsMargins(0, -1, 0, -1) self.horizontalLayout_4.setObjectName("horizontalLayout_4") spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_4.addItem(spacerItem) self.horizontalLayout_9 = QtWidgets.QHBoxLayout() self.horizontalLayout_9.setContentsMargins(-1, -1, 6, -1) self.horizontalLayout_9.setSpacing(0) self.horizontalLayout_9.setObjectName("horizontalLayout_9") self.label_7 = QtWidgets.QLabel(ViewResultsTabContent) self.label_7.setObjectName("label_7") self.horizontalLayout_9.addWidget(self.label_7) self.horizontalLayout_4.addLayout(self.horizontalLayout_9) self.initialDimensionChooser = QtWidgets.QSpinBox(ViewResultsTabContent) self.initialDimensionChooser.setMaximum(2) self.initialDimensionChooser.setProperty("value", 2) self.initialDimensionChooser.setObjectName("initialDimensionChooser") self.horizontalLayout_4.addWidget(self.initialDimensionChooser) spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_4.addItem(spacerItem1) self.horizontalLayout_10 = QtWidgets.QHBoxLayout() self.horizontalLayout_10.setContentsMargins(-1, -1, 6, -1) self.horizontalLayout_10.setSpacing(0) self.horizontalLayout_10.setObjectName("horizontalLayout_10") self.label_9 = QtWidgets.QLabel(ViewResultsTabContent) self.label_9.setObjectName("label_9") self.horizontalLayout_10.addWidget(self.label_9) self.horizontalLayout_4.addLayout(self.horizontalLayout_10) self.initialSliceChooser = QtWidgets.QSpinBox(ViewResultsTabContent) self.initialSliceChooser.setObjectName("initialSliceChooser") self.horizontalLayout_4.addWidget(self.initialSliceChooser) self.horizontalLayout_5 = QtWidgets.QHBoxLayout() self.horizontalLayout_5.setContentsMargins(3, -1, -1, -1) self.horizontalLayout_5.setObjectName("horizontalLayout_5") self.label_8 = QtWidgets.QLabel(ViewResultsTabContent) self.label_8.setObjectName("label_8") self.horizontalLayout_5.addWidget(self.label_8) self.maximumIndexLabel = QtWidgets.QLabel(ViewResultsTabContent) self.maximumIndexLabel.setObjectName("maximumIndexLabel") self.horizontalLayout_5.addWidget(self.maximumIndexLabel) self.horizontalLayout_4.addLayout(self.horizontalLayout_5) spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_4.addItem(spacerItem2) self.gridLayout.addLayout(self.horizontalLayout_4, 5, 1, 1, 1) self.verticalLayout.addLayout(self.gridLayout) self.line_2 = QtWidgets.QFrame(ViewResultsTabContent) self.line_2.setFrameShape(QtWidgets.QFrame.HLine) self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_2.setObjectName("line_2") self.verticalLayout.addWidget(self.line_2) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setContentsMargins(-1, 6, -1, 0) self.horizontalLayout.setObjectName("horizontalLayout") self.viewButton = QtWidgets.QPushButton(ViewResultsTabContent) self.viewButton.setObjectName("viewButton") self.horizontalLayout.addWidget(self.viewButton) spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem3) self.verticalLayout.addLayout(self.horizontalLayout) self.line_2.raise_() self.line.raise_() self.retranslateUi(ViewResultsTabContent) QtCore.QMetaObject.connectSlotsByName(ViewResultsTabContent) ViewResultsTabContent.setTabOrder(self.selectFolderButton, self.selectedFolderText) ViewResultsTabContent.setTabOrder(self.selectedFolderText, self.selectMaps) ViewResultsTabContent.setTabOrder(self.selectMaps, self.deselectAllButton) ViewResultsTabContent.setTabOrder(self.deselectAllButton, self.invertSelectionButton) ViewResultsTabContent.setTabOrder(self.invertSelectionButton, self.initialDimensionChooser) ViewResultsTabContent.setTabOrder(self.initialDimensionChooser, self.initialSliceChooser) ViewResultsTabContent.setTabOrder(self.initialSliceChooser, self.viewButton) def retranslateUi(self, ViewResultsTabContent): _translate = QtCore.QCoreApplication.translate ViewResultsTabContent.setWindowTitle(_translate("ViewResultsTabContent", "Form")) self.label.setText(_translate("ViewResultsTabContent", "View results")) self.label_2.setText(_translate("ViewResultsTabContent", "View a selection of maps in the given folder.")) self.deselectAllButton.setText(_translate("ViewResultsTabContent", "Deselect all")) self.invertSelectionButton.setText(_translate("ViewResultsTabContent", "Invert selection")) self.label_3.setText(_translate("ViewResultsTabContent", "Select maps:")) self.label_5.setText(_translate("ViewResultsTabContent", "(Select the maps you would like to display)")) self.selectFolderButton.setText(_translate("ViewResultsTabContent", "Browse")) self.label_6.setText(_translate("ViewResultsTabContent", "Select input folder:")) self.label_4.setText(_translate("ViewResultsTabContent", "(Choose a directory with .nii(.gz) files)")) self.label_7.setText(_translate("ViewResultsTabContent", "Initial dimension:")) self.label_9.setText(_translate("ViewResultsTabContent", "Initial slice:")) self.label_8.setText(_translate("ViewResultsTabContent", "/ ")) self.maximumIndexLabel.setText(_translate("ViewResultsTabContent", "x")) self.viewButton.setText(_translate("ViewResultsTabContent", "View")) PKIsI+mdt/gui/model_fit/design/ui_about_dialog.py# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'about_dialog.ui' # # Created by: PyQt5 UI code generator 5.4.2 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_AboutDialog(object): def setupUi(self, AboutDialog): AboutDialog.setObjectName("AboutDialog") AboutDialog.resize(594, 379) self.verticalLayout = QtWidgets.QVBoxLayout(AboutDialog) self.verticalLayout.setContentsMargins(6, 6, 6, 6) self.verticalLayout.setSpacing(3) self.verticalLayout.setObjectName("verticalLayout") self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setSpacing(0) self.horizontalLayout.setObjectName("horizontalLayout") self.verticalLayout_2 = QtWidgets.QVBoxLayout() self.verticalLayout_2.setContentsMargins(-1, -1, 16, -1) self.verticalLayout_2.setSpacing(0) self.verticalLayout_2.setObjectName("verticalLayout_2") self.label_2 = QtWidgets.QLabel(AboutDialog) self.label_2.setText("") self.label_2.setPixmap(QtGui.QPixmap(":/main_gui/logo")) self.label_2.setObjectName("label_2") self.verticalLayout_2.addWidget(self.label_2) spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.verticalLayout_2.addItem(spacerItem) self.horizontalLayout.addLayout(self.verticalLayout_2) self.verticalLayout_3 = QtWidgets.QVBoxLayout() self.verticalLayout_3.setObjectName("verticalLayout_3") self.label = QtWidgets.QLabel(AboutDialog) font = QtGui.QFont() font.setPointSize(14) font.setBold(True) font.setWeight(75) self.label.setFont(font) self.label.setObjectName("label") self.verticalLayout_3.addWidget(self.label) self.label_3 = QtWidgets.QLabel(AboutDialog) font = QtGui.QFont() font.setPointSize(12) font.setItalic(True) self.label_3.setFont(font) self.label_3.setObjectName("label_3") self.verticalLayout_3.addWidget(self.label_3) self.contentBoxLayout = QtWidgets.QVBoxLayout() self.contentBoxLayout.setContentsMargins(0, 20, -1, -1) self.contentBoxLayout.setObjectName("contentBoxLayout") self.contentLabel = QtWidgets.QLabel(AboutDialog) font = QtGui.QFont() font.setPointSize(11) self.contentLabel.setFont(font) self.contentLabel.setTextFormat(QtCore.Qt.RichText) self.contentLabel.setWordWrap(True) self.contentLabel.setObjectName("contentLabel") self.contentBoxLayout.addWidget(self.contentLabel) self.verticalLayout_3.addLayout(self.contentBoxLayout) spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.verticalLayout_3.addItem(spacerItem1) self.horizontalLayout.addLayout(self.verticalLayout_3) spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem2) self.verticalLayout.addLayout(self.horizontalLayout) self.buttonBox = QtWidgets.QDialogButtonBox(AboutDialog) self.buttonBox.setOrientation(QtCore.Qt.Horizontal) self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Close) self.buttonBox.setObjectName("buttonBox") self.verticalLayout.addWidget(self.buttonBox) self.retranslateUi(AboutDialog) self.buttonBox.accepted.connect(AboutDialog.accept) self.buttonBox.rejected.connect(AboutDialog.reject) QtCore.QMetaObject.connectSlotsByName(AboutDialog) def retranslateUi(self, AboutDialog): _translate = QtCore.QCoreApplication.translate AboutDialog.setWindowTitle(_translate("AboutDialog", "About MDT")) self.label.setText(_translate("AboutDialog", "MDT")) self.label_3.setText(_translate("AboutDialog", "Maastricht Diffusion Toolbox")) self.contentLabel.setText(_translate("AboutDialog", "\n" "\n" "

Version: {version}

\n" "

The Maastricht Diffusion Toolbox is a model recovery toolbox primarily meant for diffusion MRI analysis.

\n" "

Software development by Robbert Harms, under the (Phd) supervision of Alard Roebroeck, at Maastricht University.

\n" "

Contributors:

\n" "
  • Robbert Harms
  • \n" "
  • Alard Roebroeck
  • \n" "
  • Francisco Fritz
")) from . import main_gui_rc PKIsITT'mdt/gui/model_fit/design/main_gui_rc.py# -*- coding: utf-8 -*- # Resource object code # # Created by: The Resource Compiler for PyQt5 (Qt v5.4.2) # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore qt_resource_data = b"\ \x00\x00\x03\xe1\ \x89\ \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ \x00\x00\x14\x00\x00\x00\x14\x08\x06\x00\x00\x00\x8d\x89\x1d\x0d\ \x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\ \x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\ \x79\x71\xc9\x65\x3c\x00\x00\x03\x83\x49\x44\x41\x54\x78\xda\xac\ \x94\xdb\x8b\x1c\x45\x14\xc6\xbf\xd3\xd5\x5d\xdd\x3d\xb3\x61\xc9\ \x98\x9d\x18\x62\x20\xa8\x49\xc0\xa7\x88\x44\x85\xe8\x82\x8a\x17\ \x54\xf4\x1f\xf0\xd9\xdb\x3f\x20\xe8\x82\x2f\xea\x8b\x10\xc4\x97\ \xa0\x20\x3e\x29\x42\x1e\x0d\x2a\x8a\x04\x35\x98\x88\x09\x24\x8b\ \x62\xa2\xc9\x3a\xd9\xc9\xec\x65\x2e\x99\x9d\xeb\xf6\xa5\x2e\x9e\ \x9e\x99\x36\x13\x5d\x35\x0f\x56\x73\xfa\x56\xd5\xbf\xfa\xce\x57\ \xa7\x8b\xac\xb5\xf8\x3f\x1b\xfd\x4b\x9f\xc4\x5e\x3c\x84\xfd\x38\ \xec\xdd\x2a\xee\x26\x22\x37\x69\xa8\xf3\xb8\x84\x53\x58\xc2\xd7\ \x50\xe8\xdf\x3c\xf0\x00\x9e\x9d\x7b\x7a\xee\xb5\x17\x9f\x79\xe1\ \xd0\xfc\x1d\xf3\xb8\x25\xd8\x01\x87\x08\x1b\xf1\x06\x7e\xa8\x9e\ \xc6\xd1\xe3\x47\x7f\xaa\x7c\xba\xfc\x0e\xce\xe1\x83\xff\x56\x3b\ \x8f\x57\x0f\x7d\x78\x8f\x3d\x5d\x3f\x65\x3b\x69\xc7\x36\xe3\xa6\ \x5d\x8f\xd6\xed\x5a\xb4\x66\x1b\x71\xc3\x76\x55\xd7\x2e\x5e\x3b\ \x6f\x9f\x38\xf6\xb8\xc5\x53\x78\x97\xbf\x72\xa7\x21\x62\x0a\x66\ \x71\x2f\x5e\x9a\x7b\xae\x74\xe4\xcd\x47\xde\x42\x51\xce\x60\x69\ \x70\x19\x2b\xd1\x0a\xd6\xe2\x55\x8e\x35\xac\x6e\xae\xa0\x3a\xa8\ \x42\x59\x85\x83\x3b\x0f\xe2\xa4\xff\xed\x7d\x9d\x41\x8f\xd8\x82\ \x13\xb9\xa0\x9c\x6e\x51\x62\xb7\x9e\xc4\x1b\x7b\xca\x7b\x50\xd7\ \x75\x54\xba\x95\xd1\x74\xe4\x70\xaf\xf3\xe7\x28\xc0\xf0\x45\x01\ \x45\x3e\xf6\x95\xf7\x61\xf9\xb1\xda\x02\x5d\xc4\x97\xb6\x8a\x93\ \x39\x70\xa4\xce\x79\x00\x2f\xdb\x5d\x28\x75\xc4\x06\xce\x0e\xce\ \x40\x49\x35\xee\x15\x53\x4e\x4f\x80\xbc\x20\xf0\x52\x89\xba\x53\ \x07\xb6\x83\x68\x9e\x9e\xb7\x1f\xd9\xef\xb3\x11\x63\x85\x0e\xb6\ \xb9\x77\x8a\x07\x9d\x80\xd0\xa4\x06\x16\xa3\x73\x90\x8e\x0f\x4b\ \x76\x0c\x99\x6e\x0c\x24\x06\xaa\x48\xa3\x66\xaa\x08\x7c\x37\x5b\ \xc4\x87\xe3\x40\xed\xb2\x11\x6a\x23\x20\x49\xda\xe1\x95\xbc\xdd\ \x42\x3a\xb0\x12\x58\x36\x57\x10\xf2\x41\x99\xb4\x2d\xca\x34\x2b\ \xdd\xd8\x44\xd0\x9e\x86\x2f\x25\x4c\xc9\xee\x4c\xcb\x76\xaf\x5e\ \xd6\xb5\x71\xca\x84\xd0\xf5\xdd\x80\x03\x6e\x41\xc0\x09\x09\xda\ \x37\x20\x8f\xfe\x5e\x58\x99\x68\xc7\xc2\x31\x02\x41\x12\x42\x6f\ \x6a\x18\xdf\xb8\xe4\xa5\xb3\xd7\x3d\xd4\x88\x84\x71\xfb\x5e\x20\ \x67\x65\x81\xa1\x33\x63\xb0\xf0\x78\x35\x88\x6e\xf0\xd0\x1a\x0b\ \x93\x1a\x28\xce\x3b\x4d\xf4\xc8\xeb\x54\xab\x18\x9b\xb8\x96\x99\ \x37\x02\xda\xc4\xb6\x45\x5b\x54\x43\xdf\xdf\x1d\x04\x12\x7e\xc1\ \x87\x5f\xf4\x20\x5c\x56\xeb\x38\xd3\xe2\x60\x8c\x81\x4e\x14\xa7\ \x9c\x22\xf6\x62\xc4\x6e\x02\xd3\x32\x57\xf5\xba\xbe\x9a\x03\xb3\ \x71\xc3\xe4\x6c\xfa\x99\xf7\xa8\x77\xbf\x24\x89\x50\x04\x08\xdc\ \x00\xae\x74\x21\x18\x48\x34\x96\x98\xa9\xd3\xda\x20\x51\x09\x3f\ \xb1\x87\x8a\xe1\x6c\x68\xf2\x63\xef\xb8\xd5\xb6\x85\x49\x85\x65\ \x85\x40\xfd\xef\xfa\x9f\xa8\x45\x7d\x51\x68\x56\x96\x7a\x90\xbc\ \x3a\x21\x05\x08\x9d\x02\x0a\x54\xe0\xfb\x02\x02\x0a\x47\xef\x5d\ \xee\x77\x86\xac\x3e\x12\x30\xbf\xdb\x56\xef\x9b\xc1\xc7\x79\x02\ \x4e\x7e\xa3\x23\xb3\x52\x7f\xaf\xf9\xba\xb9\x62\x7a\x14\xb1\x2a\ \x8e\x0c\x9e\x01\xd8\x04\x64\x67\xcf\x30\x28\xe6\xa4\xfa\xac\x98\ \x7f\x10\x34\xa0\x1b\xef\xb7\x16\x54\x5b\xfd\x92\x73\xc4\xf5\xea\ \x82\x9b\x34\xd3\xca\xf0\xe7\xe8\xf2\xb6\xbb\x66\x0e\x7b\x65\xbf\ \x48\x59\xba\x9c\x84\xb1\xc4\x33\x02\x69\xc4\xde\x0d\xd8\xbb\x6e\ \x82\xfe\xaf\xc3\x6e\xf5\x48\x6d\xa1\x73\xa6\x9b\xa9\x4b\x39\xe2\ \x0c\x2a\x6e\x2c\x59\x88\xb8\x1e\x2f\xb5\xbe\x6a\x9f\xb0\x7d\x48\ \x39\xeb\xdd\xe6\x78\x14\x5a\xc5\xab\xb9\xc9\xb0\x8d\x04\xc3\x4b\ \x83\xde\xea\xb1\xfa\xe7\x95\xb7\x2b\xaf\xf4\x2e\xf4\xbf\xe0\x6f\ \xc6\x86\x82\x6b\x65\x8b\xed\x2b\x9b\xc0\xcf\xc3\xdf\xee\x1f\x28\ \xde\x5e\xd8\x2f\x4b\xb2\x9c\x49\x4d\xdb\x69\x73\x50\x19\xfe\x16\ \x35\xa2\x0b\xdc\x3f\x98\xc0\xe2\xf1\xcf\xf8\xcf\xfb\xa1\x33\xa9\ \x4f\x39\xb9\x4e\x6f\x0f\x66\x12\x7a\x0a\x64\x6e\x76\xc7\xa6\x09\ \x68\xab\xed\xc1\xfc\x15\x94\xb7\x3f\x04\x18\x00\x66\x94\xa6\x18\ \x04\x3e\x51\x44\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\ \ \x00\x00\x03\xbc\ \x89\ \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ \x00\x00\x14\x00\x00\x00\x14\x08\x06\x00\x00\x00\x8d\x89\x1d\x0d\ \x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\ \xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\ \x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\ \xe0\x06\x1b\x0b\x3b\x33\x5d\xff\xf5\xd3\x00\x00\x03\x49\x49\x44\ \x41\x54\x38\xcb\xad\x94\xcf\x6b\x5c\x55\x14\xc7\x3f\xe7\xdc\x3b\ \x6f\xde\xd8\xfc\xc0\xa8\x35\x69\x4b\x49\x91\x56\xeb\x42\xc8\x42\ \x14\x8a\x48\xbb\x50\x77\x42\xff\x08\x8b\xee\x2a\x82\xa0\x5d\xd6\ \xad\x08\x42\x77\xee\xa4\x8a\xd0\x5d\x51\x11\x35\x14\x4a\xc5\x9d\ \x15\x4b\xb5\xa1\x25\xa5\xed\x34\x89\x19\x27\xc9\x4c\x33\x6f\xe6\ \xdd\x7b\x8f\x8b\xbc\xa7\x89\x2d\xa5\x0b\x2f\x5c\xde\xe2\x1e\x3e\ \xef\xfb\x3d\xf7\x7b\x8f\x98\x19\xff\xe7\x92\x87\x9c\x65\x4f\xc1\ \xd1\x3d\xce\x1d\x99\x6a\xb5\xe6\x44\xc4\xaf\x15\xc5\xe5\x76\x59\ \xfe\xb4\x02\x3f\x24\xe8\x3f\x32\x70\x06\xde\x7c\x79\x7a\xfa\xc3\ \x63\xc7\x8f\xbf\x38\x3b\x37\x47\x3e\x31\x81\x88\x50\xf4\xfb\xb4\ \xaf\x5c\x61\xfe\xdc\xb9\xdf\x2e\x2d\x2e\x7e\x72\x13\x3e\x7b\x18\ \x50\x00\x3b\x2c\xf2\xc1\x1b\x73\x73\x1f\xbd\x76\xf2\x24\x53\x07\ \x0e\x90\xca\x92\x14\xe3\x56\x81\x73\xb8\x46\x83\xde\xdd\xbb\x5c\ \x38\x73\x86\xf3\x17\x2e\x7c\xfa\x4b\x4a\xef\x1a\x84\x1a\xe2\xb6\ \xc3\x9e\x71\xee\xed\xa3\x33\x33\x1f\xbf\x7a\xe2\x04\x59\x9e\xd3\ \x6b\xb7\x19\x74\x3a\x14\xdd\x2e\x45\xb7\xcb\x60\x75\x95\x7b\x2b\ \x2b\x58\x4a\xec\x3e\x78\x90\x78\xf5\xea\x4b\xf7\x36\x36\x64\xc5\ \x6c\xbe\x16\xe7\x2b\xa0\x8d\xa9\x1e\x9a\x6b\x34\x4e\x4f\x4d\x4f\ \x33\x5a\x5b\xa3\xb3\xb4\x84\x13\x41\x00\x91\xca\x88\x19\x09\x48\ \x80\xe6\x39\x4f\xec\xdf\xcf\x0b\xed\xf6\xa9\xbb\xf0\x5d\x27\xc6\ \x8b\x35\x50\x00\x7b\x2e\xcb\xde\x79\xdc\xfb\x29\xfa\x7d\x36\xae\ \x5d\xc3\xa7\x84\xdb\xd6\x13\x01\xac\xda\x11\x48\xde\x93\xba\x5d\ \xc6\xbd\x97\xc3\x22\x6f\x5d\x1c\x0c\x2e\x01\xe6\xab\xe2\xf1\x19\ \xe7\x5e\xc9\xbd\x87\x6e\x97\xc1\xc2\x02\xcd\x2c\xc3\x99\x21\x95\ \x4a\x6a\xa0\x19\x51\x84\x51\x08\xa4\xe5\x65\x9a\xde\xb3\x4f\xf5\ \x58\x56\x14\x33\x23\xb3\x3b\x1e\xa0\x21\xf2\xe4\xa4\x73\x7b\x73\ \x55\x9a\x31\x12\xdb\x6d\x68\x36\x11\xd5\x1d\xb7\xa6\x95\x5d\x31\ \x23\x8d\x46\x64\x65\x49\x53\x95\x49\xb3\xa7\x27\x9d\x9b\xfd\x33\ \x84\x3b\xb5\xe5\x56\x26\x92\xb7\x44\xd8\xe5\x3d\x63\x22\xe4\x21\ \xe0\x45\x1e\xa8\x30\x98\x21\x29\x61\xde\x13\x42\x60\xa8\xea\x3d\ \x4c\xfe\xd3\xc3\x04\x85\x40\x3f\x57\x9d\x1c\x73\x8e\x09\xef\x69\ \xa9\xd2\x70\x6e\x47\xb6\xea\x37\x35\x8a\x11\xaf\x8a\x6d\xc1\xc0\ \x6c\x38\x34\xfb\x0b\x50\x0f\x48\x30\xeb\x6e\x9a\xdd\x6a\xa9\xee\ \xdd\xa5\xca\xb8\x73\x3c\xe6\x3d\x5e\x15\x15\x01\xf9\xd7\x78\x4a\ \x89\xa1\x08\x0a\x94\x29\x91\xa9\xd2\x2f\xcb\xdb\x6b\x29\xdd\x06\ \x54\xab\x1f\x6f\x2e\x84\xf0\xb5\x54\xfd\x69\x88\x90\x89\x90\xab\ \xd2\x74\x8e\x5c\x95\x5c\x95\x4c\x84\x86\xc8\x56\x2b\x80\x64\x06\ \x66\x5c\x1b\x8d\xce\x27\xb3\xce\x8e\x3e\xff\x5a\x14\x5f\x5e\x2f\ \xcb\x3f\x30\x23\xc4\x88\x55\xc5\x5a\x15\x69\x1d\x1d\x33\x62\x4a\ \x14\x29\x31\x4a\x89\xa5\x18\x3b\x97\x8b\xe2\x6c\x25\x2c\xd5\x2f\ \x45\xa3\xd9\xe6\x72\x8c\xed\xe7\x9b\xcd\xd7\x27\xa0\xe9\x2b\x35\ \xce\x0c\x31\xc3\xcc\xb6\xec\x86\x40\x3f\x04\x7a\x21\xd0\x09\x21\ \x7e\xd5\xeb\xbd\xd7\x0e\xe1\xc7\x2a\x9e\xa1\x06\x26\xc0\xaf\xc7\ \xb8\x78\x23\x84\xeb\xb3\x59\x76\x64\x4a\x64\x97\x56\xa3\xcd\x80\ \x98\x12\xc3\x94\x18\x54\xc0\x9b\x21\x6c\x7c\xd1\xeb\x9d\xba\x3a\ \x1c\x9e\x05\x4a\x60\x08\x98\xdb\x16\xb3\x04\xb8\x6e\x8c\x37\x7e\ \x1e\x0e\xe7\x07\x90\x8d\xa9\xee\x73\xd0\xaa\x2d\xf6\x43\xe0\x56\ \x59\xf6\xbe\xdf\xdc\xfc\xe6\xf3\xf5\xf5\xf7\x6f\x8e\x46\xdf\x02\ \x23\xa0\xa8\x14\xde\x37\xbe\x1c\xd0\xac\xf7\x84\xf7\xcf\xee\xf1\ \xfe\xd0\xa4\xea\x6e\x01\xed\xa5\xb4\xda\x0e\x61\xa1\x1b\xc2\xef\ \xc0\xbd\x0a\x36\x64\xdb\xb4\x79\xd0\x3c\xd4\x2a\x9f\x59\xf5\xad\ \xef\xa4\x76\x91\x2a\x35\x35\x28\x3d\xea\xc4\x96\x0a\xe4\xfe\x93\ \xed\xb4\x6d\xdf\xb7\xfe\x06\x6c\x33\xa9\x68\x4f\x5c\x3c\x17\x00\ \x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\ \x00\x00\x0b\x87\ \x89\ \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ \x00\x00\x86\x00\x00\x00\x9b\x08\x02\x00\x00\x00\x28\x53\x74\x83\ \x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xaa\x00\x00\x0d\xaa\ \x01\x6f\x05\x82\xdc\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xe0\x0b\ \x13\x09\x0e\x2d\x3b\x61\x32\x36\x00\x00\x0b\x26\x49\x44\x41\x54\ \x78\xda\xed\x9d\x7b\x50\x53\x57\x1e\xc7\x6f\x12\x92\x28\x0a\x4a\ \x1c\x49\x1c\x34\xac\xda\x2a\xc5\x3e\x14\xa9\x7d\x3a\xda\x11\x5f\ \xed\x6e\x67\xbb\xa3\xd5\xaa\x75\x5b\x77\x77\xa6\x6b\x67\xd7\xdd\ \xfe\x61\x67\x96\xd9\x5a\x75\xed\xb8\xda\x4e\x1f\xbb\x5d\x0b\x68\ \x8a\x54\xa8\x36\xab\xf2\x08\x0a\x2a\xe2\x56\x7c\x80\x68\x00\x31\ \x0f\x42\xc8\x0b\x48\x02\x84\x57\x88\x21\xf7\xe6\xb1\x7f\xa4\x93\ \x86\x9b\x07\x18\xee\x3b\xe7\xf7\xe7\xb9\x37\xf7\x9e\x9c\x4f\xce\ \xef\xfb\x3b\xbf\x7b\xcf\x2f\x2c\x9f\xcf\x07\x01\x23\xdb\x60\x18\ \x69\x68\x54\x7e\xf4\x0f\x89\xc1\x68\x49\x00\xc3\x41\x3a\x0c\x8b\ \xb5\xdf\x68\xb2\x1e\x3b\x5e\x6a\xb1\xd8\x20\x08\x02\x48\xc8\x87\ \x71\xa2\xa8\xd2\x60\xb2\x5a\x2c\x36\x17\x8c\x00\x24\x54\x81\x01\ \xc3\x48\x40\x40\x00\x12\x12\x78\x34\xb5\x68\x8e\x1d\x2f\x0b\x85\ \x01\x90\x90\x29\x1b\xf2\x66\x4d\x28\x0c\x80\x84\x54\xd9\x70\x21\ \x91\x4e\x06\x48\xf0\x35\x8f\xc7\x3b\x38\x64\x57\xb7\x99\x0a\x0a\ \x2b\x22\x79\x2a\x80\x84\xd0\xc9\xa1\x69\xef\xbc\x54\x73\xa7\xf6\ \xc7\x7b\xed\x1d\x5d\xe3\xc2\x00\x48\x88\xf0\x54\xf9\x92\x72\x55\ \x9b\x71\x68\x78\xc4\xed\xf6\x4e\xf0\xb3\x00\x09\x99\xb2\x01\x90\ \xe0\x6b\x3e\x9f\x6f\x74\x14\x6e\x69\xd5\x7e\x73\xa2\x6c\x82\xb2\ \x01\x90\xe0\x3b\x39\xba\xba\x7b\x95\x6a\x63\x51\x49\x55\x4b\xab\ \x36\x36\x18\x00\x09\xc6\x9e\xea\xf8\x49\x99\x4e\x6f\xee\xe9\x1d\ \x80\x61\xf7\x64\x2e\x08\x90\x60\x9a\x17\x71\x21\x93\xcf\xab\x03\ \x24\x78\xe5\x45\x00\x12\xca\xe5\x45\x00\x12\x7a\x04\xb8\x00\x09\ \x9e\xb2\x81\xf5\xe4\x00\x48\x28\x21\x1b\x00\x09\xe5\x64\x03\x20\ \xa1\x9c\x6c\x00\x24\x13\x32\xaf\xcf\xe7\x70\x38\x1f\x28\x74\x79\ \x92\x72\x62\x3c\x15\x40\x32\xce\xe4\x30\x98\xac\xf7\xe4\x6d\xd2\ \xd2\x5a\x85\x52\x4f\x30\x0c\x80\x24\xbc\xa7\x2a\x28\x94\x69\x3b\ \xba\xfa\x6c\x43\x08\xe2\x26\xa5\x27\x00\x09\x99\xb2\x01\x90\x90\ \x1f\xe0\x02\x24\x94\x0b\x70\x01\x12\x7a\x78\xaa\xb8\x46\x42\x64\ \x5e\x04\x20\xa1\xa5\x6c\xc4\x2f\x12\xca\xca\x46\x3c\x22\xa1\xb8\ \x6c\xc4\x17\x12\xaf\xd7\x3b\x6c\x7f\xa8\x54\xe9\xf3\xbf\xad\xa0\ \xb8\xa7\x8a\x0b\x24\x30\x8c\x74\xe8\xba\xeb\x6e\xdd\xaf\xac\xba\ \xa5\xd6\x18\xe9\x02\x83\x99\x48\x7e\x7e\xcd\xf0\xdb\x0a\x4d\xbb\ \x69\x60\xc0\x8e\xb8\x3d\xf4\xfa\x0a\x09\xcc\x83\x41\x23\xd9\x60\ \x32\x12\xba\x04\xb8\x71\x81\x84\x5e\x01\x2e\xc3\x91\x30\xc6\x53\ \x31\x01\x09\x2d\xf2\x22\x71\x84\x84\x49\xb2\x41\x7b\x24\xcc\x93\ \x0d\x1a\x23\x61\xaa\x6c\xd0\x12\x09\x7d\xf3\x22\xcc\x44\x42\xeb\ \xbc\x08\xd3\x90\x30\x20\x2f\xc2\x1c\x24\x71\x25\x1b\x34\x40\xc2\ \xf8\x00\x97\x4e\x48\xe2\x24\xc0\xa5\x07\x12\xe0\xa9\x28\x84\x84\ \xd9\x79\x11\xfa\x21\x01\xb2\x41\x21\x24\x40\x36\x28\x84\x04\xc8\ \x06\x85\x90\xc4\x50\x96\x0a\x20\xc1\x77\x72\xc4\x50\x96\x0a\x20\ \xc1\x39\x2f\xf2\xe8\x65\xa9\x00\x12\x20\x1b\xcc\x45\x82\x55\x59\ \x2a\x80\x04\xb3\xc9\x81\x55\x59\x2a\x80\x04\x33\x4f\x85\x55\x59\ \x2a\x80\x04\xd3\xbc\x08\x16\x65\xa9\x00\x12\x7c\xf3\x22\x4f\x2d\ \x59\x70\xb2\x20\x37\xd2\x45\x8e\x7c\x5e\x72\x5a\x5a\x33\xee\xbd\ \xa4\xa7\x0e\x2c\x5c\x90\x16\xf6\x90\xaa\xcd\xb0\xed\x9d\x03\xa1\ \xed\x17\xcf\x1f\x15\x0a\x05\x61\x3f\x32\x6c\x77\xc0\xb0\xdb\xe9\ \x1c\x75\x3c\x1c\x1d\x75\xc2\x66\x8b\xcd\xd8\x69\x55\x28\xf5\xf7\ \x9a\x35\x0e\x87\x33\x7a\x4f\xfe\xf2\xfe\xe6\x9d\xdb\x37\x60\xc5\ \xc0\xeb\xf1\x66\xaf\xfc\x03\x06\x48\x10\x04\x31\x5b\x7e\xca\x8b\ \x34\x35\x6b\x5c\x51\x65\x23\x21\x81\x33\x23\x79\x5a\xa4\xa3\xef\ \xee\xd8\x78\xe6\xbf\x57\xa3\xff\x93\xca\x8a\xec\x27\xb2\x96\x2e\ \x8a\x74\x74\x7a\xe2\xd4\xb0\xed\x49\x49\x89\x91\xee\x1b\xa9\xdd\ \xe3\xf1\xd4\xdd\xbc\xff\xc3\xb9\xda\x4b\x35\x0d\x5e\x6f\xf8\x2e\ \x4d\x99\xc2\x8b\xf2\x75\x1e\x79\x1d\xed\xf5\x4e\x76\x96\x60\x1e\ \xe0\x3e\xb6\x70\xee\x8b\xcf\x3f\x79\xe3\xd6\xfd\x28\xe7\xec\xdc\ \xb6\x81\x18\xbf\xc1\xe1\x70\x56\xad\x5c\xba\x6a\xe5\x52\x55\x9b\ \x31\xf7\xe3\xfc\xa6\x96\x76\x4a\x3b\x2e\xb7\xdb\xd3\x3f\x30\xdc\ \xae\xed\x2c\x28\x94\x61\x1b\xe0\xee\x7c\x6b\x7d\x14\x24\x73\x44\ \xb3\xd6\xad\xc9\x26\x78\x68\x32\x16\x89\x7f\x38\x75\xe0\xf0\xa7\ \xa7\x24\x45\x17\x28\x8a\x04\x46\xdc\x2a\x95\xa1\x54\x76\xbd\xfe\ \x8e\x42\x67\x30\x63\x1b\xe0\xbe\xb2\x3a\x6b\x6e\xda\xec\xce\xae\ \xde\xb0\x47\xb7\x6f\x59\xcb\xe1\x70\x88\x57\x5a\x0e\x9b\x9d\xbb\ \x77\xe7\xd4\x29\xfc\xaf\xf3\xcf\x53\x0b\x09\x82\xb8\xcd\x16\x9b\ \xd1\x64\xcd\x93\x94\x2b\x94\x7a\xfb\xc8\x43\x8f\xc7\x8b\xf9\x97\ \xdf\xb1\x75\xdd\xe1\xcf\x8a\x43\x0f\xf1\xf9\xdc\xad\x9b\xd6\x60\ \x78\x2f\xad\xae\x5b\xdd\x66\x84\x20\x88\xc7\xe3\x26\x27\x25\xa6\ \xce\x4e\x49\x17\x0b\x59\x2c\x56\xa4\xf3\xff\xfa\xa7\x37\x3b\xf4\ \xdd\x17\x2f\xd5\xff\x1c\x4d\xa8\x8d\x17\xaa\x6f\x47\x3a\x7f\xc3\ \xda\xe7\xd8\x6c\xf4\xd5\x94\x6a\x83\x4e\x6f\x8e\xb4\xb8\x7e\x04\ \x24\x01\x18\x27\x8a\x2a\x8d\x26\xab\x19\xcf\xbc\xc8\xe6\xdf\xbc\ \xf2\xc5\xbf\xa5\xa3\x2e\x18\xd5\xfe\xab\x57\x5f\x4a\x49\x49\xc2\ \xf0\x46\x57\xae\x36\x1e\xf9\xbc\x24\xb8\x65\xe6\xcc\xa4\x55\x2f\ \x3f\xf3\xee\xdb\xaf\x3e\xb5\x64\x41\xe8\xf9\x2c\x16\xeb\xe8\x27\ \xbb\x1f\x28\xf5\x46\x93\xd5\xdf\x52\x2a\xbb\x5e\x2a\xbb\x1e\xe9\ \xfa\xea\xa6\x62\x36\x1b\x3d\xa7\xcf\x95\xfd\x6f\x82\x0e\x90\x1d\ \x25\x2f\x32\x32\xf2\xf0\xae\x5c\xbd\xef\x90\x64\xdf\x21\x49\xfd\ \x1d\x85\xc1\x68\xc1\x96\x07\x2a\x9e\x99\x39\x63\xfa\xeb\xaf\xbd\ \x14\x56\x66\xa2\x7c\x0a\x13\x1b\x1c\xb4\x97\xc9\xea\xde\xd8\x9a\ \x7b\xf0\xf0\xc9\xb0\x05\x86\xa6\x4e\xe1\x7f\xf8\xc1\x36\x62\x1c\ \x17\x3b\xd2\xe4\xd0\x76\x74\x55\x56\xdf\xfe\xf4\xab\xd3\x01\x18\ \x98\xa7\x46\xda\xb5\x9d\x28\xf1\x78\x7b\xdb\x7a\xd4\x39\xd9\x59\ \x19\x4b\x32\xe7\x07\xb7\x5c\xbb\x2e\xc7\x69\x2c\x7c\x3e\x5f\xe1\ \xa9\x8b\x1f\xfe\xfd\x9b\xb0\xe1\xf8\xba\x35\x2b\xd2\xc5\x22\x12\ \x90\x20\x88\xdb\x68\xb2\xd6\xdf\x51\x1c\xfc\x67\xd1\xbf\x8e\x9d\ \x6d\x7d\xa0\xc3\x03\x46\x20\x1e\x3f\x75\xfa\x52\x70\x4b\x66\xc6\ \x2f\x9e\x5d\x9e\x31\x36\xf6\x5d\x8f\x8a\xbf\x27\xb2\xa8\x9c\x8c\ \x95\xc9\xea\x4a\x65\x75\x61\x46\x8a\xcd\x5a\xb3\x3a\x8b\x50\x24\ \x08\x82\xf8\x61\xf8\x3d\xd5\x9d\xbb\x4a\xb3\xc5\x86\x77\x99\x30\ \xe9\xb9\x5a\xe7\xa8\x6b\xcc\x44\x09\x72\x53\xc2\x54\xc1\xfa\x9c\ \x15\xc1\x47\x2b\xab\x6f\xdb\xfa\x87\xf0\x1e\x94\xaf\xf3\xce\x85\ \x9d\x28\x2f\xbf\xf0\x34\x71\x11\x17\x0c\xbb\x9b\x5a\xda\x8f\x1d\ \x2f\xf5\x6b\x38\x61\x19\xdc\xc1\xa1\x91\x32\x59\x5d\x70\x40\xb5\ \x3e\x67\x85\x30\x55\x60\xed\xe9\x87\x20\x68\xfb\x96\x9c\x84\x84\ \x31\x3a\x59\x54\x5c\x15\x1a\xcc\x60\x6e\x3a\xbd\x59\xab\xeb\x7e\ \x2c\x24\x79\x93\x9d\xb5\x98\x88\x59\xe2\xf7\x54\x0d\x8d\x8a\x2f\ \xff\x23\x6d\xc0\x4d\x36\xa2\xd8\x77\x25\xd5\xa8\xd4\xcb\xf6\x2d\ \x39\xfe\x08\x75\xeb\xe6\x9c\xe0\x43\xf2\x66\x4d\x4b\xab\x96\x98\ \x5e\xc9\x9b\xdb\x42\x1b\xa7\x4d\x9b\x9a\x38\x95\x8f\x3b\x92\x80\ \xa7\x92\x37\x6b\x46\x5d\x24\x3c\xde\x50\xb5\x19\xeb\x1b\x15\xc1\ \x2d\x6f\xbd\x99\xc3\xe7\x73\x7f\xb9\xf1\xc5\x59\x82\xe4\xe0\xf6\ \x93\xc5\x55\x84\xf5\xaa\xaf\x2f\xbc\x7b\xc4\x36\x1c\x0f\x8f\x24\ \xbf\x50\xa6\x6a\x33\x9a\x2d\x36\x04\x21\x6d\xb7\x40\x51\xf1\x98\ \x89\x22\x48\x49\xde\xb8\xf6\x79\x94\xb0\xf7\xf4\x0e\x56\x5d\xae\ \x27\xac\x4b\x23\x11\x92\xc1\x49\xd3\x13\x71\x47\xf2\xde\xef\x5e\ \xcf\x58\x24\x16\x09\x67\xa1\xbc\x36\x91\x76\xf9\x6a\x63\xb7\xb9\ \x2f\xb8\x65\xef\x07\xdb\x50\xab\xb6\xd3\xd2\x2b\x44\x96\x24\x4d\ \x4e\x0a\x9f\xe8\x1d\x1a\x76\xe0\x8e\x64\xf9\xb2\xc5\x1f\xff\x6d\ \xd7\xfe\xdc\x5d\xcb\x9e\x79\x9c\xc7\xe3\x92\x82\xc4\xe3\xf1\x14\ \x9f\xb9\x1c\xdc\x22\x4c\x4d\x41\x85\xe6\xdf\x4b\xaf\x10\xd9\xa5\ \xd4\xb1\x1d\x08\xd8\xc0\x80\x1d\x77\x24\x7c\x1e\x57\x3c\x4f\xb8\ \x22\xfb\x89\x3d\xbb\x37\x3d\xf7\x6c\xa6\x58\x2c\x22\x05\xcc\x99\ \xb3\x57\xa3\xa4\x06\x2e\x5e\xaa\xef\xe9\x1d\x24\xb2\x3f\x61\x83\ \xab\x61\xbb\x23\x34\xdf\x83\xcb\xba\x84\xc5\x82\xf8\x7c\xee\xd2\ \xa7\x1f\xdf\x9f\xbb\xeb\x40\xee\x2e\x52\xc0\x0c\x0c\xd8\x2b\x2e\ \xdc\x88\x28\x36\x25\x55\x44\x76\x66\x49\xe6\xfc\x79\x69\xa9\xa1\ \xed\x0d\x8d\x2a\xe2\xd6\x25\x10\x04\xf1\xf9\x5c\xf1\x3c\xa1\x48\ \x28\x10\xcf\x13\x1a\x4d\xd6\x82\xc2\x0a\xad\xae\xcb\xd6\x37\x44\ \xd8\x26\xc1\x93\x25\x55\x9b\xde\x58\x1d\xda\xde\xd2\xaa\x95\x37\ \x6b\x88\x44\xb2\x67\xf7\xa6\xb0\xed\x37\x6e\xdf\x27\x14\x49\x60\ \xba\xa4\x8b\x85\x73\x44\x02\x91\x50\x70\xb7\x49\x2d\x3d\x7f\xcd\ \x5f\x7a\x9d\x80\xae\x28\x94\xfa\xc6\x7b\xaa\xec\xac\x8c\x90\x29\ \x52\x4d\x24\x8f\x77\x76\x6c\x5c\xb3\x7a\x79\xd8\xf4\x4f\x4d\x6d\ \x23\xd1\x48\x02\xc6\xe3\x71\x17\x2e\x48\x13\x09\x05\xe9\xf3\x44\ \x79\x92\xf2\xc0\x92\x9e\x80\x68\x18\x85\xc4\xd6\x3f\x5c\x59\x75\ \x8b\x18\x18\x1c\x0e\x67\xcf\xfb\x9b\xfe\xf8\xfb\x5f\x87\x3d\x2a\ \xbb\x70\xb3\xab\xbb\x8f\x34\x24\x10\x04\xb1\xd9\xac\xa4\xa4\xc4\ \xe5\xcb\x16\xef\xcf\xdd\x15\xfc\xbc\x04\x57\x30\xd5\x35\x0d\xd6\ \x9e\x7e\x61\xaa\x20\x38\xf6\x25\xe0\xa7\x90\x2e\x16\xad\x5e\xb9\ \xf4\xb7\xdb\x37\x44\xca\xf5\x8e\x8c\x38\x8f\x7e\xf1\x3d\x31\xbf\ \x8c\x71\x1e\x61\xf1\x78\x09\xe9\x62\xe1\x1c\xd1\x2c\xbf\xc0\xe4\ \x49\xca\x15\x4a\x9d\x7d\xc4\xe9\xf5\xe2\xf2\xb6\xb5\xdb\xed\x79\ \xef\xcf\x9f\xcd\x4d\x9b\x1d\x68\xb9\x59\xff\x00\xf3\xbb\xbc\xb6\ \xe1\x85\x27\x33\xe7\xfb\x93\x37\x33\x66\x4c\x17\xce\x4e\x89\xbe\ \x26\xf7\x78\xbd\x7b\xf6\x7e\x65\xb6\xd8\x28\x81\x24\x00\x46\x2c\ \x16\x8a\x44\x82\xe4\xe4\xc4\xd2\x8a\xba\xc0\xb3\x77\x3c\x3a\xd4\ \xd2\xaa\xc5\x3b\x91\x35\x37\x6d\x76\x30\xf5\x71\xed\x93\x23\xdf\ \x5d\xfb\x51\x0e\x11\x65\x13\x7d\x1d\x82\x05\x41\x7c\x1e\x37\x33\ \x63\xbe\x48\x38\x2b\xe7\x95\xe5\xc1\x6f\xa8\x40\xcc\x35\x18\x46\ \x3e\x3a\x28\x91\x9e\xaf\x25\xf2\xa6\x8f\xf6\xd2\x10\x97\xcb\x11\ \xa6\xa6\xcc\x9c\x31\x7d\x6e\x5a\x2a\x6a\x27\x2e\xf3\x78\xdc\x95\ \xab\x73\xf7\x17\x68\xda\x3b\x09\xbe\x6f\x2c\xaf\xd6\xa1\x56\x30\ \x81\x5d\xa0\xcc\x20\x31\xea\x82\x6b\x6a\xef\x9e\x39\x7b\x35\xfa\ \xab\x7e\xd4\x42\x12\x58\xc1\xf8\xc1\xf0\x78\x09\xc1\xef\x04\x87\ \x3d\xdf\xe5\x42\x4c\x9d\x3d\xa8\x46\x8b\xb5\x3f\x86\x5b\x87\xbf\ \x54\x4f\xf8\x4b\x75\x9b\xfb\xdc\x11\x96\xba\x0f\x9d\xa3\x30\xec\ \x1e\x1e\x76\xb8\x60\xc4\xe1\x70\x9a\x2d\x36\x53\x67\xaf\x52\xad\ \x6f\x55\xe8\x26\x99\xdf\x34\x75\xf6\x84\xe6\x70\xed\x76\xe7\x44\ \xc7\xd6\x37\xe9\x27\x24\x2e\x57\x98\x8a\x02\x10\xb0\x58\x0d\x03\ \x24\x10\x04\xf9\x7c\x63\xf6\x97\xe8\xf5\x66\x6b\xef\x00\xea\xb7\ \xc6\x82\x20\xb0\xc3\x81\x38\x24\xc1\x60\x50\xbb\xb0\xc0\x10\x93\ \x89\xc4\x6f\x5e\xff\x5e\xc5\xfb\x63\xf6\x2a\x82\x81\x26\x13\x09\ \x10\x18\x8a\x22\x41\x09\x4c\x60\xdf\x3b\xe6\xef\x77\x03\x24\x31\ \x82\x41\x55\x87\x00\xe3\x4e\x26\x12\xbf\xb9\xdd\xde\xc1\x21\xbb\ \x5a\x63\x2a\x08\xaa\x2d\x0b\x46\x9f\x4c\x24\x40\x60\x28\x8a\x04\ \x25\x30\x0c\xcb\xc4\xd0\x15\x49\x30\x18\xd4\xee\x6c\x00\x83\x4c\ \x24\xc0\x8f\x51\x14\x09\xca\x8f\x01\x30\x94\x40\x02\x04\x86\xa2\ \x48\x80\xc0\x50\x14\x09\x10\x18\x8a\x22\x41\xf9\xb1\x9f\xfe\x99\ \x61\xd0\xee\x8e\x9b\x7f\x66\xa0\x22\x92\x60\x30\x1d\xfa\xee\xba\ \x9b\x2d\x95\xd5\xb7\xd5\x6d\xc6\x38\x99\x2e\xd4\x45\xe2\x37\x8f\ \xc7\x6b\xb7\x3b\x14\x2a\x43\x7e\xdc\x64\x62\xa8\x8e\x24\x0e\x05\ \x86\x1e\x48\xe2\x2a\x50\xa6\x0d\x92\xf8\x09\x94\x69\x86\x24\x1e\ \xfc\x18\x2d\x91\x40\x8c\xce\xc4\xd0\x15\x09\x83\x05\x86\xde\x48\ \x18\x29\x30\x4c\x40\xc2\x30\x81\x61\x0e\x12\x88\x29\x99\x18\x46\ \x21\x09\x06\x43\xdf\x4c\x0c\x03\x91\xf8\x8d\xbe\x99\x18\xc6\x22\ \xa1\xaf\xc0\x30\x1c\x09\x1d\x03\x65\xe6\x23\xa1\x5d\xa0\x1c\x2f\ \x48\x68\xe4\xc7\xe2\x0b\x09\x44\x87\x4c\x4c\xdc\x21\xa1\xbe\xc0\ \xc4\x29\x12\x2a\x0b\x4c\x5c\x23\xa1\xa6\xc0\x00\x24\x68\x3f\x46\ \x7c\x21\x32\x80\x64\x1c\x30\x06\x93\xf5\x9e\x5c\x2d\x2d\x25\xae\ \x10\x19\x40\x32\x8e\x79\xbd\x3e\x87\xc3\xd9\xaa\xd0\xe5\x49\xca\ \x49\xf1\x63\x00\x09\xe5\x04\x06\x20\xa1\x5c\xa0\x0c\x90\x50\x2e\ \x50\x06\x48\x28\xe7\xc7\x00\x92\x58\xfc\x18\xae\x60\x00\x12\xca\ \x09\x0c\x40\x42\x39\x81\x01\x48\x28\x27\x30\x00\x09\x66\x7e\x2c\ \x52\x21\x32\x80\x84\x34\x30\x58\x15\x22\x03\x48\x30\x33\xac\x0a\ \x91\x01\x24\x94\x13\x18\x80\x04\x5f\x81\x89\xa1\x10\x19\x40\x82\ \x2f\x98\x18\x0a\x91\x01\x24\xf8\x5a\x0c\x85\xc8\x00\x12\xca\x09\ \x0c\x40\x42\x82\xc0\x44\xcf\xc4\x00\x24\x24\x80\x89\x9e\x89\x01\ \x48\x28\xe7\xc7\x00\x12\xf2\xfd\x18\x0a\x0c\x40\x42\x39\x81\x49\ \x00\xe3\x42\xa2\x05\xff\xe7\x08\x04\x41\xfb\x0e\x49\x0c\x46\xcb\ \xff\x01\x9b\x2b\x62\x2b\xf6\xcf\x4d\x61\x00\x00\x00\x00\x49\x45\ \x4e\x44\xae\x42\x60\x82\ " qt_resource_name = b"\ \x00\x08\ \x08\x04\x50\xd9\ \x00\x6d\ \x00\x61\x00\x69\x00\x6e\x00\x5f\x00\x67\x00\x75\x00\x69\ \x00\x15\ \x05\x4a\x3d\x47\ \x00\x69\ \x00\x63\x00\x6f\x00\x6e\x00\x5f\x00\x73\x00\x74\x00\x61\x00\x74\x00\x75\x00\x73\x00\x5f\x00\x67\x00\x72\x00\x65\x00\x65\x00\x6e\ \x00\x2e\x00\x70\x00\x6e\x00\x67\ \x00\x13\ \x03\x39\xdc\x47\ \x00\x69\ \x00\x63\x00\x6f\x00\x6e\x00\x5f\x00\x73\x00\x74\x00\x61\x00\x74\x00\x75\x00\x73\x00\x5f\x00\x72\x00\x65\x00\x64\x00\x2e\x00\x70\ \x00\x6e\x00\x67\ \x00\x04\ \x00\x07\x35\xdf\ \x00\x6c\ \x00\x6f\x00\x67\x00\x6f\ " qt_resource_struct = b"\ \x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\ \x00\x00\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x02\ \x00\x00\x00\x72\x00\x00\x00\x00\x00\x01\x00\x00\x07\xa5\ \x00\x00\x00\x46\x00\x00\x00\x00\x00\x01\x00\x00\x03\xe5\ \x00\x00\x00\x16\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\ " def qInitResources(): QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data) def qCleanupResources(): QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data) qInitResources() PKIsI v))4mdt/gui/model_fit/design/ui_generate_protocol_tab.py# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'generate_protocol_tab.ui' # # Created by: PyQt5 UI code generator 5.4.2 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_GenerateProtocolTabContent(object): def setupUi(self, GenerateProtocolTabContent): GenerateProtocolTabContent.setObjectName("GenerateProtocolTabContent") GenerateProtocolTabContent.resize(827, 427) self.verticalLayout = QtWidgets.QVBoxLayout(GenerateProtocolTabContent) self.verticalLayout.setContentsMargins(-1, 11, -1, -1) self.verticalLayout.setObjectName("verticalLayout") self.verticalLayout_2 = QtWidgets.QVBoxLayout() self.verticalLayout_2.setSpacing(0) self.verticalLayout_2.setObjectName("verticalLayout_2") self.label = QtWidgets.QLabel(GenerateProtocolTabContent) font = QtGui.QFont() font.setPointSize(14) self.label.setFont(font) self.label.setObjectName("label") self.verticalLayout_2.addWidget(self.label) self.label_2 = QtWidgets.QLabel(GenerateProtocolTabContent) font = QtGui.QFont() font.setItalic(True) self.label_2.setFont(font) self.label_2.setObjectName("label_2") self.verticalLayout_2.addWidget(self.label_2) self.verticalLayout.addLayout(self.verticalLayout_2) self.line = QtWidgets.QFrame(GenerateProtocolTabContent) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setLineWidth(1) self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setObjectName("line") self.verticalLayout.addWidget(self.line) self.protocol_table = QtWidgets.QTableWidget(GenerateProtocolTabContent) self.protocol_table.setToolTip("") self.protocol_table.setRowCount(0) self.protocol_table.setColumnCount(0) self.protocol_table.setObjectName("protocol_table") self.verticalLayout.addWidget(self.protocol_table) self.gridLayout = QtWidgets.QGridLayout() self.gridLayout.setContentsMargins(-1, 0, -1, -1) self.gridLayout.setObjectName("gridLayout") self.horizontalLayout_6 = QtWidgets.QHBoxLayout() self.horizontalLayout_6.setContentsMargins(-1, 0, -1, -1) self.horizontalLayout_6.setObjectName("horizontalLayout_6") self.label_18 = QtWidgets.QLabel(GenerateProtocolTabContent) self.label_18.setObjectName("label_18") self.horizontalLayout_6.addWidget(self.label_18) self.nmrShells = QtWidgets.QLabel(GenerateProtocolTabContent) self.nmrShells.setObjectName("nmrShells") self.horizontalLayout_6.addWidget(self.nmrShells) spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_6.addItem(spacerItem) self.gridLayout.addLayout(self.horizontalLayout_6, 0, 3, 1, 1) self.horizontalLayout_7 = QtWidgets.QHBoxLayout() self.horizontalLayout_7.setContentsMargins(-1, 0, -1, -1) self.horizontalLayout_7.setObjectName("horizontalLayout_7") self.label_13 = QtWidgets.QLabel(GenerateProtocolTabContent) self.label_13.setObjectName("label_13") self.horizontalLayout_7.addWidget(self.label_13) self.nmrColumns = QtWidgets.QLabel(GenerateProtocolTabContent) self.nmrColumns.setObjectName("nmrColumns") self.horizontalLayout_7.addWidget(self.nmrColumns) spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_7.addItem(spacerItem1) self.gridLayout.addLayout(self.horizontalLayout_7, 0, 4, 1, 1) self.horizontalLayout_3 = QtWidgets.QHBoxLayout() self.horizontalLayout_3.setContentsMargins(-1, 0, -1, -1) self.horizontalLayout_3.setObjectName("horizontalLayout_3") self.label_8 = QtWidgets.QLabel(GenerateProtocolTabContent) self.label_8.setObjectName("label_8") self.horizontalLayout_3.addWidget(self.label_8) self.nmrRows = QtWidgets.QLabel(GenerateProtocolTabContent) self.nmrRows.setObjectName("nmrRows") self.horizontalLayout_3.addWidget(self.nmrRows) spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_3.addItem(spacerItem2) self.gridLayout.addLayout(self.horizontalLayout_3, 0, 0, 1, 1) self.horizontalLayout_5 = QtWidgets.QHBoxLayout() self.horizontalLayout_5.setContentsMargins(-1, 0, -1, -1) self.horizontalLayout_5.setObjectName("horizontalLayout_5") self.label_17 = QtWidgets.QLabel(GenerateProtocolTabContent) self.label_17.setObjectName("label_17") self.horizontalLayout_5.addWidget(self.label_17) self.nmrWeighted = QtWidgets.QLabel(GenerateProtocolTabContent) self.nmrWeighted.setObjectName("nmrWeighted") self.horizontalLayout_5.addWidget(self.nmrWeighted) spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_5.addItem(spacerItem3) self.gridLayout.addLayout(self.horizontalLayout_5, 0, 2, 1, 1) self.horizontalLayout_4 = QtWidgets.QHBoxLayout() self.horizontalLayout_4.setContentsMargins(-1, 0, -1, -1) self.horizontalLayout_4.setObjectName("horizontalLayout_4") self.label_16 = QtWidgets.QLabel(GenerateProtocolTabContent) self.label_16.setObjectName("label_16") self.horizontalLayout_4.addWidget(self.label_16) self.nmrUnweighted = QtWidgets.QLabel(GenerateProtocolTabContent) self.nmrUnweighted.setObjectName("nmrUnweighted") self.horizontalLayout_4.addWidget(self.nmrUnweighted) spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_4.addItem(spacerItem4) self.gridLayout.addLayout(self.horizontalLayout_4, 0, 1, 1, 1) self.horizontalLayout_9 = QtWidgets.QHBoxLayout() self.horizontalLayout_9.setObjectName("horizontalLayout_9") self.label_4 = QtWidgets.QLabel(GenerateProtocolTabContent) self.label_4.setObjectName("label_4") self.horizontalLayout_9.addWidget(self.label_4) self.differentShells = QtWidgets.QLabel(GenerateProtocolTabContent) self.differentShells.setObjectName("differentShells") self.horizontalLayout_9.addWidget(self.differentShells) spacerItem5 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_9.addItem(spacerItem5) self.gridLayout.addLayout(self.horizontalLayout_9, 1, 0, 1, 5) self.verticalLayout.addLayout(self.gridLayout) self.line_3 = QtWidgets.QFrame(GenerateProtocolTabContent) self.line_3.setFrameShape(QtWidgets.QFrame.HLine) self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_3.setObjectName("line_3") self.verticalLayout.addWidget(self.line_3) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setContentsMargins(-1, 0, -1, 0) self.horizontalLayout.setObjectName("horizontalLayout") self.loadGB = QtWidgets.QPushButton(GenerateProtocolTabContent) self.loadGB.setObjectName("loadGB") self.horizontalLayout.addWidget(self.loadGB) self.loadProtocolButton = QtWidgets.QPushButton(GenerateProtocolTabContent) self.loadProtocolButton.setObjectName("loadProtocolButton") self.horizontalLayout.addWidget(self.loadProtocolButton) self.loadColumnButton = QtWidgets.QPushButton(GenerateProtocolTabContent) self.loadColumnButton.setObjectName("loadColumnButton") self.horizontalLayout.addWidget(self.loadColumnButton) self.saveButton = QtWidgets.QPushButton(GenerateProtocolTabContent) self.saveButton.setObjectName("saveButton") self.horizontalLayout.addWidget(self.saveButton) self.clearButton = QtWidgets.QPushButton(GenerateProtocolTabContent) self.clearButton.setObjectName("clearButton") self.horizontalLayout.addWidget(self.clearButton) spacerItem6 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem6) self.verticalLayout.addLayout(self.horizontalLayout) self.retranslateUi(GenerateProtocolTabContent) QtCore.QMetaObject.connectSlotsByName(GenerateProtocolTabContent) def retranslateUi(self, GenerateProtocolTabContent): _translate = QtCore.QCoreApplication.translate GenerateProtocolTabContent.setWindowTitle(_translate("GenerateProtocolTabContent", "Form")) self.label.setText(_translate("GenerateProtocolTabContent", "Generate protocol file")) self.label_2.setText(_translate("GenerateProtocolTabContent", "Create a protocol file containing all your sequence information.")) self.label_18.setText(_translate("GenerateProtocolTabContent", "# shells:")) self.nmrShells.setText(_translate("GenerateProtocolTabContent", "0")) self.label_13.setText(_translate("GenerateProtocolTabContent", "# columns:")) self.nmrColumns.setText(_translate("GenerateProtocolTabContent", "0")) self.label_8.setText(_translate("GenerateProtocolTabContent", "# rows:")) self.nmrRows.setText(_translate("GenerateProtocolTabContent", "0")) self.label_17.setText(_translate("GenerateProtocolTabContent", "# weighted:")) self.nmrWeighted.setText(_translate("GenerateProtocolTabContent", "0")) self.label_16.setText(_translate("GenerateProtocolTabContent", "# unweighted:")) self.nmrUnweighted.setText(_translate("GenerateProtocolTabContent", "0")) self.label_4.setText(_translate("GenerateProtocolTabContent", "Different shells:")) self.differentShells.setText(_translate("GenerateProtocolTabContent", "-")) self.loadGB.setText(_translate("GenerateProtocolTabContent", "Load g && b")) self.loadProtocolButton.setText(_translate("GenerateProtocolTabContent", "Load protocol")) self.loadColumnButton.setText(_translate("GenerateProtocolTabContent", "Add / Update column")) self.saveButton.setText(_translate("GenerateProtocolTabContent", "Save as")) self.clearButton.setText(_translate("GenerateProtocolTabContent", "Clear")) PKIsIY7070,mdt/gui/model_fit/design/ui_fit_model_tab.py# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'fit_model_tab.ui' # # Created by: PyQt5 UI code generator 5.4.2 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_FitModelTabContent(object): def setupUi(self, FitModelTabContent): FitModelTabContent.setObjectName("FitModelTabContent") FitModelTabContent.resize(1047, 427) self.verticalLayout = QtWidgets.QVBoxLayout(FitModelTabContent) self.verticalLayout.setContentsMargins(-1, 11, -1, -1) self.verticalLayout.setObjectName("verticalLayout") self.verticalLayout_2 = QtWidgets.QVBoxLayout() self.verticalLayout_2.setSpacing(0) self.verticalLayout_2.setObjectName("verticalLayout_2") self.label = QtWidgets.QLabel(FitModelTabContent) font = QtGui.QFont() font.setPointSize(14) self.label.setFont(font) self.label.setObjectName("label") self.verticalLayout_2.addWidget(self.label) self.label_2 = QtWidgets.QLabel(FitModelTabContent) font = QtGui.QFont() font.setItalic(True) self.label_2.setFont(font) self.label_2.setObjectName("label_2") self.verticalLayout_2.addWidget(self.label_2) self.verticalLayout.addLayout(self.verticalLayout_2) self.line = QtWidgets.QFrame(FitModelTabContent) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setLineWidth(1) self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setObjectName("line") self.verticalLayout.addWidget(self.line) self.gridLayout = QtWidgets.QGridLayout() self.gridLayout.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint) self.gridLayout.setHorizontalSpacing(10) self.gridLayout.setObjectName("gridLayout") self.line_3 = QtWidgets.QFrame(FitModelTabContent) self.line_3.setFrameShape(QtWidgets.QFrame.HLine) self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_3.setObjectName("line_3") self.gridLayout.addWidget(self.line_3, 4, 0, 1, 3) self.horizontalLayout_2 = QtWidgets.QHBoxLayout() self.horizontalLayout_2.setObjectName("horizontalLayout_2") self.selectDWI = QtWidgets.QPushButton(FitModelTabContent) self.selectDWI.setObjectName("selectDWI") self.horizontalLayout_2.addWidget(self.selectDWI) self.selectedDWI = QtWidgets.QLineEdit(FitModelTabContent) self.selectedDWI.setText("") self.selectedDWI.setObjectName("selectedDWI") self.horizontalLayout_2.addWidget(self.selectedDWI) self.gridLayout.addLayout(self.horizontalLayout_2, 0, 1, 1, 1) self.label_6 = QtWidgets.QLabel(FitModelTabContent) self.label_6.setObjectName("label_6") self.gridLayout.addWidget(self.label_6, 0, 0, 1, 1) self.line_4 = QtWidgets.QFrame(FitModelTabContent) self.line_4.setFrameShape(QtWidgets.QFrame.HLine) self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_4.setObjectName("line_4") self.gridLayout.addWidget(self.line_4, 6, 0, 1, 3) self.label_11 = QtWidgets.QLabel(FitModelTabContent) font = QtGui.QFont() font.setItalic(True) self.label_11.setFont(font) self.label_11.setObjectName("label_11") self.gridLayout.addWidget(self.label_11, 2, 2, 1, 1) self.label_3 = QtWidgets.QLabel(FitModelTabContent) self.label_3.setMinimumSize(QtCore.QSize(0, 0)) self.label_3.setObjectName("label_3") self.gridLayout.addWidget(self.label_3, 1, 0, 1, 1) self.label_4 = QtWidgets.QLabel(FitModelTabContent) font = QtGui.QFont() font.setItalic(True) self.label_4.setFont(font) self.label_4.setObjectName("label_4") self.gridLayout.addWidget(self.label_4, 0, 2, 1, 1) self.label_12 = QtWidgets.QLabel(FitModelTabContent) self.label_12.setMinimumSize(QtCore.QSize(0, 0)) self.label_12.setObjectName("label_12") self.gridLayout.addWidget(self.label_12, 5, 0, 1, 1) self.modelSelection = QtWidgets.QComboBox(FitModelTabContent) self.modelSelection.setObjectName("modelSelection") self.gridLayout.addWidget(self.modelSelection, 5, 1, 1, 1) self.label_5 = QtWidgets.QLabel(FitModelTabContent) font = QtGui.QFont() font.setItalic(True) self.label_5.setFont(font) self.label_5.setObjectName("label_5") self.gridLayout.addWidget(self.label_5, 1, 2, 1, 1) self.label_13 = QtWidgets.QLabel(FitModelTabContent) self.label_13.setMinimumSize(QtCore.QSize(0, 0)) self.label_13.setObjectName("label_13") self.gridLayout.addWidget(self.label_13, 3, 0, 1, 1) self.label_10 = QtWidgets.QLabel(FitModelTabContent) self.label_10.setMinimumSize(QtCore.QSize(0, 0)) self.label_10.setObjectName("label_10") self.gridLayout.addWidget(self.label_10, 2, 0, 1, 1) self.label_14 = QtWidgets.QLabel(FitModelTabContent) font = QtGui.QFont() font.setItalic(True) self.label_14.setFont(font) self.label_14.setObjectName("label_14") self.gridLayout.addWidget(self.label_14, 3, 2, 1, 1) self.horizontalLayout_7 = QtWidgets.QHBoxLayout() self.horizontalLayout_7.setObjectName("horizontalLayout_7") self.selectOutputFolder = QtWidgets.QPushButton(FitModelTabContent) self.selectOutputFolder.setObjectName("selectOutputFolder") self.horizontalLayout_7.addWidget(self.selectOutputFolder) self.selectedOutputFolder = QtWidgets.QLineEdit(FitModelTabContent) self.selectedOutputFolder.setText("") self.selectedOutputFolder.setObjectName("selectedOutputFolder") self.horizontalLayout_7.addWidget(self.selectedOutputFolder) self.gridLayout.addLayout(self.horizontalLayout_7, 3, 1, 1, 1) self.label_15 = QtWidgets.QLabel(FitModelTabContent) font = QtGui.QFont() font.setItalic(True) self.label_15.setFont(font) self.label_15.setObjectName("label_15") self.gridLayout.addWidget(self.label_15, 5, 2, 1, 1) self.horizontalLayout_3 = QtWidgets.QHBoxLayout() self.horizontalLayout_3.setObjectName("horizontalLayout_3") self.selectMask = QtWidgets.QPushButton(FitModelTabContent) self.selectMask.setObjectName("selectMask") self.horizontalLayout_3.addWidget(self.selectMask) self.selectedMask = QtWidgets.QLineEdit(FitModelTabContent) self.selectedMask.setText("") self.selectedMask.setObjectName("selectedMask") self.horizontalLayout_3.addWidget(self.selectedMask) self.gridLayout.addLayout(self.horizontalLayout_3, 1, 1, 1, 1) self.horizontalLayout_6 = QtWidgets.QHBoxLayout() self.horizontalLayout_6.setObjectName("horizontalLayout_6") self.selectProtocol = QtWidgets.QPushButton(FitModelTabContent) self.selectProtocol.setObjectName("selectProtocol") self.horizontalLayout_6.addWidget(self.selectProtocol) self.selectedProtocol = QtWidgets.QLineEdit(FitModelTabContent) self.selectedProtocol.setText("") self.selectedProtocol.setObjectName("selectedProtocol") self.horizontalLayout_6.addWidget(self.selectedProtocol) self.gridLayout.addLayout(self.horizontalLayout_6, 2, 1, 1, 1) self.horizontalLayout_4 = QtWidgets.QHBoxLayout() self.horizontalLayout_4.setObjectName("horizontalLayout_4") self.optimizationOptionsButton = QtWidgets.QPushButton(FitModelTabContent) self.optimizationOptionsButton.setObjectName("optimizationOptionsButton") self.horizontalLayout_4.addWidget(self.optimizationOptionsButton) spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_4.addItem(spacerItem) self.gridLayout.addLayout(self.horizontalLayout_4, 7, 1, 1, 1) self.label_17 = QtWidgets.QLabel(FitModelTabContent) font = QtGui.QFont() font.setItalic(True) self.label_17.setFont(font) self.label_17.setObjectName("label_17") self.gridLayout.addWidget(self.label_17, 7, 2, 1, 1) self.verticalLayout.addLayout(self.gridLayout) self.line_2 = QtWidgets.QFrame(FitModelTabContent) self.line_2.setFrameShape(QtWidgets.QFrame.HLine) self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_2.setObjectName("line_2") self.verticalLayout.addWidget(self.line_2) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setContentsMargins(-1, 6, -1, 0) self.horizontalLayout.setObjectName("horizontalLayout") self.runButton = QtWidgets.QPushButton(FitModelTabContent) self.runButton.setEnabled(True) self.runButton.setObjectName("runButton") self.horizontalLayout.addWidget(self.runButton) spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem1) self.verticalLayout.addLayout(self.horizontalLayout) spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.verticalLayout.addItem(spacerItem2) self.line_2.raise_() self.line.raise_() self.retranslateUi(FitModelTabContent) QtCore.QMetaObject.connectSlotsByName(FitModelTabContent) FitModelTabContent.setTabOrder(self.selectDWI, self.selectedDWI) FitModelTabContent.setTabOrder(self.selectedDWI, self.selectMask) FitModelTabContent.setTabOrder(self.selectMask, self.selectedMask) FitModelTabContent.setTabOrder(self.selectedMask, self.selectProtocol) FitModelTabContent.setTabOrder(self.selectProtocol, self.selectedProtocol) FitModelTabContent.setTabOrder(self.selectedProtocol, self.selectOutputFolder) FitModelTabContent.setTabOrder(self.selectOutputFolder, self.selectedOutputFolder) FitModelTabContent.setTabOrder(self.selectedOutputFolder, self.modelSelection) FitModelTabContent.setTabOrder(self.modelSelection, self.runButton) def retranslateUi(self, FitModelTabContent): _translate = QtCore.QCoreApplication.translate FitModelTabContent.setWindowTitle(_translate("FitModelTabContent", "Form")) self.label.setText(_translate("FitModelTabContent", "Fit model")) self.label_2.setText(_translate("FitModelTabContent", "Optimize a model to your data.")) self.selectDWI.setText(_translate("FitModelTabContent", "Browse")) self.label_6.setText(_translate("FitModelTabContent", "Select DWI:")) self.label_11.setText(_translate("FitModelTabContent", "(Select your protocol file, see tab \"Generate protocol file\")")) self.label_3.setText(_translate("FitModelTabContent", "Select brain mask:")) self.label_4.setText(_translate("FitModelTabContent", "(Select your preprocessed 4d diffusion weighted volume)")) self.label_12.setText(_translate("FitModelTabContent", "Select model:")) self.label_5.setText(_translate("FitModelTabContent", "(Select your brain mask, see tab \"Generate brain mask\")")) self.label_13.setText(_translate("FitModelTabContent", "Select output folder:")) self.label_10.setText(_translate("FitModelTabContent", "Select protocol file:")) self.label_14.setText(_translate("FitModelTabContent", "(Defaults to \"output/\" in the DWI directory)")) self.selectOutputFolder.setText(_translate("FitModelTabContent", "Browse")) self.label_15.setText(_translate("FitModelTabContent", "(Please select a model)")) self.selectMask.setText(_translate("FitModelTabContent", "Browse")) self.selectProtocol.setText(_translate("FitModelTabContent", "Browse")) self.optimizationOptionsButton.setText(_translate("FitModelTabContent", "Optimization options")) self.label_17.setText(_translate("FitModelTabContent", "(Additional settings)")) self.runButton.setText(_translate("FitModelTabContent", "Run")) PKIsIDE++4mdt/gui/model_fit/design/ui_generate_roi_mask_tab.py# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'generate_roi_mask_tab.ui' # # Created by: PyQt5 UI code generator 5.4.2 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_GenerateROIMaskTabContent(object): def setupUi(self, GenerateROIMaskTabContent): GenerateROIMaskTabContent.setObjectName("GenerateROIMaskTabContent") GenerateROIMaskTabContent.resize(827, 427) self.verticalLayout = QtWidgets.QVBoxLayout(GenerateROIMaskTabContent) self.verticalLayout.setContentsMargins(-1, 11, -1, -1) self.verticalLayout.setObjectName("verticalLayout") self.verticalLayout_2 = QtWidgets.QVBoxLayout() self.verticalLayout_2.setContentsMargins(-1, -1, -1, 0) self.verticalLayout_2.setSpacing(0) self.verticalLayout_2.setObjectName("verticalLayout_2") self.label = QtWidgets.QLabel(GenerateROIMaskTabContent) font = QtGui.QFont() font.setPointSize(14) self.label.setFont(font) self.label.setObjectName("label") self.verticalLayout_2.addWidget(self.label) self.label_2 = QtWidgets.QLabel(GenerateROIMaskTabContent) font = QtGui.QFont() font.setItalic(True) self.label_2.setFont(font) self.label_2.setObjectName("label_2") self.verticalLayout_2.addWidget(self.label_2) self.verticalLayout.addLayout(self.verticalLayout_2) self.line = QtWidgets.QFrame(GenerateROIMaskTabContent) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setLineWidth(1) self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setObjectName("line") self.verticalLayout.addWidget(self.line) self.gridLayout = QtWidgets.QGridLayout() self.gridLayout.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint) self.gridLayout.setHorizontalSpacing(10) self.gridLayout.setObjectName("gridLayout") self.horizontalLayout_5 = QtWidgets.QHBoxLayout() self.horizontalLayout_5.setObjectName("horizontalLayout_5") self.dimensionInput = QtWidgets.QSpinBox(GenerateROIMaskTabContent) self.dimensionInput.setMinimum(0) self.dimensionInput.setMaximum(2) self.dimensionInput.setProperty("value", 2) self.dimensionInput.setObjectName("dimensionInput") self.horizontalLayout_5.addWidget(self.dimensionInput) spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_5.addItem(spacerItem) self.gridLayout.addLayout(self.horizontalLayout_5, 1, 1, 1, 1) self.label_6 = QtWidgets.QLabel(GenerateROIMaskTabContent) self.label_6.setObjectName("label_6") self.gridLayout.addWidget(self.label_6, 0, 0, 1, 1) self.label_3 = QtWidgets.QLabel(GenerateROIMaskTabContent) self.label_3.setMinimumSize(QtCore.QSize(0, 0)) self.label_3.setObjectName("label_3") self.gridLayout.addWidget(self.label_3, 3, 0, 1, 1) self.label_10 = QtWidgets.QLabel(GenerateROIMaskTabContent) font = QtGui.QFont() font.setItalic(True) self.label_10.setFont(font) self.label_10.setObjectName("label_10") self.gridLayout.addWidget(self.label_10, 2, 2, 1, 1) self.horizontalLayout_2 = QtWidgets.QHBoxLayout() self.horizontalLayout_2.setObjectName("horizontalLayout_2") self.selectMaskButton = QtWidgets.QPushButton(GenerateROIMaskTabContent) self.selectMaskButton.setObjectName("selectMaskButton") self.horizontalLayout_2.addWidget(self.selectMaskButton) self.selectedMaskText = QtWidgets.QLineEdit(GenerateROIMaskTabContent) self.selectedMaskText.setText("") self.selectedMaskText.setObjectName("selectedMaskText") self.horizontalLayout_2.addWidget(self.selectedMaskText) self.gridLayout.addLayout(self.horizontalLayout_2, 0, 1, 1, 1) self.label_4 = QtWidgets.QLabel(GenerateROIMaskTabContent) font = QtGui.QFont() font.setItalic(True) self.label_4.setFont(font) self.label_4.setObjectName("label_4") self.gridLayout.addWidget(self.label_4, 0, 2, 1, 1) self.label_5 = QtWidgets.QLabel(GenerateROIMaskTabContent) font = QtGui.QFont() font.setItalic(True) self.label_5.setFont(font) self.label_5.setObjectName("label_5") self.gridLayout.addWidget(self.label_5, 1, 2, 1, 1) self.horizontalLayout_3 = QtWidgets.QHBoxLayout() self.horizontalLayout_3.setContentsMargins(-1, 0, -1, -1) self.horizontalLayout_3.setObjectName("horizontalLayout_3") self.sliceInput = QtWidgets.QSpinBox(GenerateROIMaskTabContent) self.sliceInput.setMinimum(0) self.sliceInput.setMaximum(10000) self.sliceInput.setProperty("value", 0) self.sliceInput.setObjectName("sliceInput") self.horizontalLayout_3.addWidget(self.sliceInput) self.horizontalLayout_6 = QtWidgets.QHBoxLayout() self.horizontalLayout_6.setContentsMargins(3, -1, -1, -1) self.horizontalLayout_6.setObjectName("horizontalLayout_6") self.label_7 = QtWidgets.QLabel(GenerateROIMaskTabContent) self.label_7.setObjectName("label_7") self.horizontalLayout_6.addWidget(self.label_7) self.maxSliceLabel = QtWidgets.QLabel(GenerateROIMaskTabContent) self.maxSliceLabel.setObjectName("maxSliceLabel") self.horizontalLayout_6.addWidget(self.maxSliceLabel) self.horizontalLayout_3.addLayout(self.horizontalLayout_6) spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_3.addItem(spacerItem1) self.gridLayout.addLayout(self.horizontalLayout_3, 2, 1, 1, 1) self.label_11 = QtWidgets.QLabel(GenerateROIMaskTabContent) self.label_11.setObjectName("label_11") self.gridLayout.addWidget(self.label_11, 1, 0, 1, 1) self.label_14 = QtWidgets.QLabel(GenerateROIMaskTabContent) font = QtGui.QFont() font.setItalic(True) self.label_14.setFont(font) self.label_14.setObjectName("label_14") self.gridLayout.addWidget(self.label_14, 3, 2, 1, 1) self.horizontalLayout_4 = QtWidgets.QHBoxLayout() self.horizontalLayout_4.setContentsMargins(0, -1, 0, -1) self.horizontalLayout_4.setObjectName("horizontalLayout_4") self.selectOutputFileInput = QtWidgets.QPushButton(GenerateROIMaskTabContent) self.selectOutputFileInput.setObjectName("selectOutputFileInput") self.horizontalLayout_4.addWidget(self.selectOutputFileInput) self.selectedOutputFileText = QtWidgets.QLineEdit(GenerateROIMaskTabContent) self.selectedOutputFileText.setObjectName("selectedOutputFileText") self.horizontalLayout_4.addWidget(self.selectedOutputFileText) self.gridLayout.addLayout(self.horizontalLayout_4, 3, 1, 1, 1) self.label_12 = QtWidgets.QLabel(GenerateROIMaskTabContent) self.label_12.setObjectName("label_12") self.gridLayout.addWidget(self.label_12, 2, 0, 1, 1) self.verticalLayout.addLayout(self.gridLayout) self.line_2 = QtWidgets.QFrame(GenerateROIMaskTabContent) self.line_2.setFrameShape(QtWidgets.QFrame.HLine) self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_2.setObjectName("line_2") self.verticalLayout.addWidget(self.line_2) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setContentsMargins(-1, 6, -1, 0) self.horizontalLayout.setObjectName("horizontalLayout") self.generateButton = QtWidgets.QPushButton(GenerateROIMaskTabContent) self.generateButton.setEnabled(False) self.generateButton.setObjectName("generateButton") self.horizontalLayout.addWidget(self.generateButton) self.viewButton = QtWidgets.QPushButton(GenerateROIMaskTabContent) self.viewButton.setEnabled(False) self.viewButton.setObjectName("viewButton") self.horizontalLayout.addWidget(self.viewButton) spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem2) self.verticalLayout.addLayout(self.horizontalLayout) spacerItem3 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.verticalLayout.addItem(spacerItem3) self.retranslateUi(GenerateROIMaskTabContent) QtCore.QMetaObject.connectSlotsByName(GenerateROIMaskTabContent) GenerateROIMaskTabContent.setTabOrder(self.selectMaskButton, self.selectedMaskText) GenerateROIMaskTabContent.setTabOrder(self.selectedMaskText, self.dimensionInput) GenerateROIMaskTabContent.setTabOrder(self.dimensionInput, self.sliceInput) GenerateROIMaskTabContent.setTabOrder(self.sliceInput, self.selectOutputFileInput) GenerateROIMaskTabContent.setTabOrder(self.selectOutputFileInput, self.selectedOutputFileText) GenerateROIMaskTabContent.setTabOrder(self.selectedOutputFileText, self.generateButton) GenerateROIMaskTabContent.setTabOrder(self.generateButton, self.viewButton) def retranslateUi(self, GenerateROIMaskTabContent): _translate = QtCore.QCoreApplication.translate GenerateROIMaskTabContent.setWindowTitle(_translate("GenerateROIMaskTabContent", "Form")) self.label.setText(_translate("GenerateROIMaskTabContent", "Generate ROI mask")) self.label_2.setText(_translate("GenerateROIMaskTabContent", "Create a mask with a Region Of Interest including only the voxels in the selected slice.")) self.label_6.setText(_translate("GenerateROIMaskTabContent", "Select brain mask:")) self.label_3.setText(_translate("GenerateROIMaskTabContent", "Select output file:")) self.label_10.setText(_translate("GenerateROIMaskTabContent", "(The index of the single slice in the current dimension)")) self.selectMaskButton.setText(_translate("GenerateROIMaskTabContent", "Browse")) self.label_4.setText(_translate("GenerateROIMaskTabContent", "(Select your brain mask)")) self.label_5.setText(_translate("GenerateROIMaskTabContent", "(The dimension of the single slice)")) self.label_7.setText(_translate("GenerateROIMaskTabContent", "/ ")) self.maxSliceLabel.setText(_translate("GenerateROIMaskTabContent", "x")) self.label_11.setText(_translate("GenerateROIMaskTabContent", "Select dimension:")) self.label_14.setText(_translate("GenerateROIMaskTabContent", "(Default is __.nii.gz)")) self.selectOutputFileInput.setText(_translate("GenerateROIMaskTabContent", "Browse")) self.label_12.setText(_translate("GenerateROIMaskTabContent", "Select slice:")) self.generateButton.setText(_translate("GenerateROIMaskTabContent", "Generate")) self.viewButton.setText(_translate("GenerateROIMaskTabContent", "View ROI")) PKIsIrrvv?mdt/gui/model_fit/design/ui_generate_protocol_load_gb_dialog.py# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'generate_protocol_load_gb_dialog.ui' # # Created by: PyQt5 UI code generator 5.4.2 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_LoadGBDialog(object): def setupUi(self, LoadGBDialog): LoadGBDialog.setObjectName("LoadGBDialog") LoadGBDialog.resize(831, 227) self.verticalLayout = QtWidgets.QVBoxLayout(LoadGBDialog) self.verticalLayout.setObjectName("verticalLayout") self.verticalLayout_3 = QtWidgets.QVBoxLayout() self.verticalLayout_3.setSpacing(0) self.verticalLayout_3.setObjectName("verticalLayout_3") self.label_3 = QtWidgets.QLabel(LoadGBDialog) font = QtGui.QFont() font.setPointSize(14) self.label_3.setFont(font) self.label_3.setObjectName("label_3") self.verticalLayout_3.addWidget(self.label_3) self.label_4 = QtWidgets.QLabel(LoadGBDialog) font = QtGui.QFont() font.setItalic(True) self.label_4.setFont(font) self.label_4.setObjectName("label_4") self.verticalLayout_3.addWidget(self.label_4) self.verticalLayout.addLayout(self.verticalLayout_3) self.line = QtWidgets.QFrame(LoadGBDialog) self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setObjectName("line") self.verticalLayout.addWidget(self.line) self.gridLayout = QtWidgets.QGridLayout() self.gridLayout.setObjectName("gridLayout") self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName("horizontalLayout") self.bvalFileChooser = QtWidgets.QPushButton(LoadGBDialog) self.bvalFileChooser.setEnabled(True) self.bvalFileChooser.setObjectName("bvalFileChooser") self.horizontalLayout.addWidget(self.bvalFileChooser) self.bvalFileInput = QtWidgets.QLineEdit(LoadGBDialog) self.bvalFileInput.setEnabled(True) self.bvalFileInput.setObjectName("bvalFileInput") self.horizontalLayout.addWidget(self.bvalFileInput) self.gridLayout.addLayout(self.horizontalLayout, 1, 1, 1, 1) self.label_11 = QtWidgets.QLabel(LoadGBDialog) font = QtGui.QFont() font.setItalic(True) self.label_11.setFont(font) self.label_11.setObjectName("label_11") self.gridLayout.addWidget(self.label_11, 0, 2, 1, 1) self.label_5 = QtWidgets.QLabel(LoadGBDialog) self.label_5.setObjectName("label_5") self.gridLayout.addWidget(self.label_5, 0, 0, 1, 1) self.label_6 = QtWidgets.QLabel(LoadGBDialog) self.label_6.setObjectName("label_6") self.gridLayout.addWidget(self.label_6, 1, 0, 1, 1) self.label_12 = QtWidgets.QLabel(LoadGBDialog) font = QtGui.QFont() font.setItalic(True) self.label_12.setFont(font) self.label_12.setObjectName("label_12") self.gridLayout.addWidget(self.label_12, 1, 2, 1, 1) self.horizontalLayout_2 = QtWidgets.QHBoxLayout() self.horizontalLayout_2.setObjectName("horizontalLayout_2") self.bvecFileChooser = QtWidgets.QPushButton(LoadGBDialog) self.bvecFileChooser.setObjectName("bvecFileChooser") self.horizontalLayout_2.addWidget(self.bvecFileChooser) self.bvecFileInput = QtWidgets.QLineEdit(LoadGBDialog) self.bvecFileInput.setObjectName("bvecFileInput") self.horizontalLayout_2.addWidget(self.bvecFileInput) self.gridLayout.addLayout(self.horizontalLayout_2, 0, 1, 1, 1) self.label_13 = QtWidgets.QLabel(LoadGBDialog) font = QtGui.QFont() font.setItalic(True) self.label_13.setFont(font) self.label_13.setObjectName("label_13") self.gridLayout.addWidget(self.label_13, 2, 2, 1, 1) self.bvalScale = QtWidgets.QLineEdit(LoadGBDialog) self.bvalScale.setObjectName("bvalScale") self.gridLayout.addWidget(self.bvalScale, 2, 1, 1, 1) self.label_7 = QtWidgets.QLabel(LoadGBDialog) self.label_7.setObjectName("label_7") self.gridLayout.addWidget(self.label_7, 2, 0, 1, 1) self.verticalLayout.addLayout(self.gridLayout) spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.verticalLayout.addItem(spacerItem) self.line_3 = QtWidgets.QFrame(LoadGBDialog) self.line_3.setFrameShape(QtWidgets.QFrame.HLine) self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_3.setObjectName("line_3") self.verticalLayout.addWidget(self.line_3) self.buttonBox = QtWidgets.QDialogButtonBox(LoadGBDialog) self.buttonBox.setOrientation(QtCore.Qt.Horizontal) self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok) self.buttonBox.setObjectName("buttonBox") self.verticalLayout.addWidget(self.buttonBox) self.retranslateUi(LoadGBDialog) self.buttonBox.accepted.connect(LoadGBDialog.accept) self.buttonBox.rejected.connect(LoadGBDialog.reject) QtCore.QMetaObject.connectSlotsByName(LoadGBDialog) LoadGBDialog.setTabOrder(self.bvalFileChooser, self.bvalFileInput) def retranslateUi(self, LoadGBDialog): _translate = QtCore.QCoreApplication.translate LoadGBDialog.setWindowTitle(_translate("LoadGBDialog", "Load g & b")) self.label_3.setText(_translate("LoadGBDialog", "Load g & b")) self.label_4.setText(_translate("LoadGBDialog", "Load the bvec (g) and bval (b) in the protocol")) self.bvalFileChooser.setText(_translate("LoadGBDialog", "Browse")) self.label_11.setText(_translate("LoadGBDialog", "(The file containing the gradient directions)")) self.label_5.setText(_translate("LoadGBDialog", "Bvec (g) file:")) self.label_6.setText(_translate("LoadGBDialog", "Bval (b) file:")) self.label_12.setText(_translate("LoadGBDialog", "(The file containing the b-values)")) self.bvecFileChooser.setText(_translate("LoadGBDialog", "Browse")) self.label_13.setText(_translate("LoadGBDialog", "(We expect the b-values in the\n" "protocol in units of s/m^2)")) self.bvalScale.setText(_translate("LoadGBDialog", "1e6")) self.label_7.setText(_translate("LoadGBDialog", "B-value rescale:")) PKIsI ٴ{R7R7:mdt/gui/model_fit/design/ui_optimization_options_dialog.py# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'optimization_options_dialog.ui' # # Created by: PyQt5 UI code generator 5.4.2 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_OptimizationOptionsDialog(object): def setupUi(self, OptimizationOptionsDialog): OptimizationOptionsDialog.setObjectName("OptimizationOptionsDialog") OptimizationOptionsDialog.resize(843, 337) self.verticalLayout = QtWidgets.QVBoxLayout(OptimizationOptionsDialog) self.verticalLayout.setObjectName("verticalLayout") self.verticalLayout_3 = QtWidgets.QVBoxLayout() self.verticalLayout_3.setSpacing(0) self.verticalLayout_3.setObjectName("verticalLayout_3") self.label_3 = QtWidgets.QLabel(OptimizationOptionsDialog) font = QtGui.QFont() font.setPointSize(14) self.label_3.setFont(font) self.label_3.setObjectName("label_3") self.verticalLayout_3.addWidget(self.label_3) self.label_4 = QtWidgets.QLabel(OptimizationOptionsDialog) font = QtGui.QFont() font.setItalic(True) self.label_4.setFont(font) self.label_4.setObjectName("label_4") self.verticalLayout_3.addWidget(self.label_4) self.verticalLayout.addLayout(self.verticalLayout_3) self.line = QtWidgets.QFrame(OptimizationOptionsDialog) self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setObjectName("line") self.verticalLayout.addWidget(self.line) self.gridLayout = QtWidgets.QGridLayout() self.gridLayout.setObjectName("gridLayout") self.line_5 = QtWidgets.QFrame(OptimizationOptionsDialog) self.line_5.setFrameShape(QtWidgets.QFrame.HLine) self.line_5.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_5.setObjectName("line_5") self.gridLayout.addWidget(self.line_5, 3, 0, 1, 3) self.label_13 = QtWidgets.QLabel(OptimizationOptionsDialog) font = QtGui.QFont() font.setItalic(True) self.label_13.setFont(font) self.label_13.setObjectName("label_13") self.gridLayout.addWidget(self.label_13, 9, 2, 1, 1) self.label_14 = QtWidgets.QLabel(OptimizationOptionsDialog) font = QtGui.QFont() font.setItalic(True) self.label_14.setFont(font) self.label_14.setObjectName("label_14") self.gridLayout.addWidget(self.label_14, 0, 2, 1, 1) self.horizontalLayout_3 = QtWidgets.QHBoxLayout() self.horizontalLayout_3.setObjectName("horizontalLayout_3") self.recalculateAll_True = QtWidgets.QRadioButton(OptimizationOptionsDialog) self.recalculateAll_True.setObjectName("recalculateAll_True") self.recalculateAllGroup = QtWidgets.QButtonGroup(OptimizationOptionsDialog) self.recalculateAllGroup.setObjectName("recalculateAllGroup") self.recalculateAllGroup.addButton(self.recalculateAll_True) self.horizontalLayout_3.addWidget(self.recalculateAll_True) self.recalculateAll_False = QtWidgets.QRadioButton(OptimizationOptionsDialog) self.recalculateAll_False.setChecked(True) self.recalculateAll_False.setObjectName("recalculateAll_False") self.recalculateAllGroup.addButton(self.recalculateAll_False) self.horizontalLayout_3.addWidget(self.recalculateAll_False) spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_3.addItem(spacerItem) self.gridLayout.addLayout(self.horizontalLayout_3, 9, 1, 1, 1) self.label_10 = QtWidgets.QLabel(OptimizationOptionsDialog) font = QtGui.QFont() font.setItalic(True) self.label_10.setFont(font) self.label_10.setObjectName("label_10") self.gridLayout.addWidget(self.label_10, 4, 2, 1, 1) self.label_6 = QtWidgets.QLabel(OptimizationOptionsDialog) self.label_6.setObjectName("label_6") self.gridLayout.addWidget(self.label_6, 6, 0, 1, 1) self.label_5 = QtWidgets.QLabel(OptimizationOptionsDialog) self.label_5.setObjectName("label_5") self.gridLayout.addWidget(self.label_5, 5, 0, 1, 1) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName("horizontalLayout") self.defaultOptimizer_True = QtWidgets.QRadioButton(OptimizationOptionsDialog) self.defaultOptimizer_True.setChecked(True) self.defaultOptimizer_True.setObjectName("defaultOptimizer_True") self.defaultOptimizerGroup = QtWidgets.QButtonGroup(OptimizationOptionsDialog) self.defaultOptimizerGroup.setObjectName("defaultOptimizerGroup") self.defaultOptimizerGroup.addButton(self.defaultOptimizer_True) self.horizontalLayout.addWidget(self.defaultOptimizer_True) self.defaultOptimizer_False = QtWidgets.QRadioButton(OptimizationOptionsDialog) self.defaultOptimizer_False.setObjectName("defaultOptimizer_False") self.defaultOptimizerGroup.addButton(self.defaultOptimizer_False) self.horizontalLayout.addWidget(self.defaultOptimizer_False) spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem1) self.gridLayout.addLayout(self.horizontalLayout, 4, 1, 1, 1) self.optimizationRoutine = QtWidgets.QComboBox(OptimizationOptionsDialog) self.optimizationRoutine.setEnabled(False) self.optimizationRoutine.setEditable(False) self.optimizationRoutine.setObjectName("optimizationRoutine") self.gridLayout.addWidget(self.optimizationRoutine, 5, 1, 1, 1) self.label_12 = QtWidgets.QLabel(OptimizationOptionsDialog) font = QtGui.QFont() font.setItalic(True) self.label_12.setFont(font) self.label_12.setObjectName("label_12") self.gridLayout.addWidget(self.label_12, 6, 2, 1, 1) self.label = QtWidgets.QLabel(OptimizationOptionsDialog) self.label.setObjectName("label") self.gridLayout.addWidget(self.label, 9, 0, 1, 1) self.label_7 = QtWidgets.QLabel(OptimizationOptionsDialog) self.label_7.setObjectName("label_7") self.gridLayout.addWidget(self.label_7, 0, 0, 1, 1) self.horizontalLayout_2 = QtWidgets.QHBoxLayout() self.horizontalLayout_2.setObjectName("horizontalLayout_2") self.floatPrecision = QtWidgets.QRadioButton(OptimizationOptionsDialog) self.floatPrecision.setChecked(True) self.floatPrecision.setObjectName("floatPrecision") self.floatPrecisionGroup = QtWidgets.QButtonGroup(OptimizationOptionsDialog) self.floatPrecisionGroup.setObjectName("floatPrecisionGroup") self.floatPrecisionGroup.addButton(self.floatPrecision) self.horizontalLayout_2.addWidget(self.floatPrecision) self.doublePrecision = QtWidgets.QRadioButton(OptimizationOptionsDialog) self.doublePrecision.setObjectName("doublePrecision") self.floatPrecisionGroup.addButton(self.doublePrecision) self.horizontalLayout_2.addWidget(self.doublePrecision) spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem2) self.gridLayout.addLayout(self.horizontalLayout_2, 2, 1, 1, 1) self.label_8 = QtWidgets.QLabel(OptimizationOptionsDialog) self.label_8.setObjectName("label_8") self.gridLayout.addWidget(self.label_8, 2, 0, 1, 1) self.label_11 = QtWidgets.QLabel(OptimizationOptionsDialog) font = QtGui.QFont() font.setItalic(True) self.label_11.setFont(font) self.label_11.setObjectName("label_11") self.gridLayout.addWidget(self.label_11, 5, 2, 1, 1) self.label_15 = QtWidgets.QLabel(OptimizationOptionsDialog) font = QtGui.QFont() font.setItalic(True) self.label_15.setFont(font) self.label_15.setObjectName("label_15") self.gridLayout.addWidget(self.label_15, 2, 2, 1, 1) self.line_4 = QtWidgets.QFrame(OptimizationOptionsDialog) self.line_4.setFrameShape(QtWidgets.QFrame.HLine) self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_4.setObjectName("line_4") self.gridLayout.addWidget(self.line_4, 8, 0, 1, 3) self.label_2 = QtWidgets.QLabel(OptimizationOptionsDialog) self.label_2.setObjectName("label_2") self.gridLayout.addWidget(self.label_2, 4, 0, 1, 1) self.line_2 = QtWidgets.QFrame(OptimizationOptionsDialog) self.line_2.setFrameShape(QtWidgets.QFrame.HLine) self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_2.setObjectName("line_2") self.gridLayout.addWidget(self.line_2, 1, 0, 1, 3) self.patience = QtWidgets.QLineEdit(OptimizationOptionsDialog) self.patience.setEnabled(False) self.patience.setObjectName("patience") self.gridLayout.addWidget(self.patience, 6, 1, 1, 1) self.horizontalLayout_5 = QtWidgets.QHBoxLayout() self.horizontalLayout_5.setObjectName("horizontalLayout_5") self.noiseStd = QtWidgets.QLineEdit(OptimizationOptionsDialog) self.noiseStd.setObjectName("noiseStd") self.horizontalLayout_5.addWidget(self.noiseStd) self.noiseStdFileSelect = QtWidgets.QPushButton(OptimizationOptionsDialog) self.noiseStdFileSelect.setObjectName("noiseStdFileSelect") self.horizontalLayout_5.addWidget(self.noiseStdFileSelect) self.gridLayout.addLayout(self.horizontalLayout_5, 0, 1, 1, 1) self.verticalLayout.addLayout(self.gridLayout) spacerItem3 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.verticalLayout.addItem(spacerItem3) self.line_3 = QtWidgets.QFrame(OptimizationOptionsDialog) self.line_3.setFrameShape(QtWidgets.QFrame.HLine) self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_3.setObjectName("line_3") self.verticalLayout.addWidget(self.line_3) self.buttonBox = QtWidgets.QDialogButtonBox(OptimizationOptionsDialog) self.buttonBox.setOrientation(QtCore.Qt.Horizontal) self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok) self.buttonBox.setObjectName("buttonBox") self.verticalLayout.addWidget(self.buttonBox) self.retranslateUi(OptimizationOptionsDialog) self.buttonBox.accepted.connect(OptimizationOptionsDialog.accept) self.buttonBox.rejected.connect(OptimizationOptionsDialog.reject) QtCore.QMetaObject.connectSlotsByName(OptimizationOptionsDialog) OptimizationOptionsDialog.setTabOrder(self.noiseStd, self.noiseStdFileSelect) OptimizationOptionsDialog.setTabOrder(self.noiseStdFileSelect, self.floatPrecision) OptimizationOptionsDialog.setTabOrder(self.floatPrecision, self.doublePrecision) OptimizationOptionsDialog.setTabOrder(self.doublePrecision, self.defaultOptimizer_True) OptimizationOptionsDialog.setTabOrder(self.defaultOptimizer_True, self.defaultOptimizer_False) OptimizationOptionsDialog.setTabOrder(self.defaultOptimizer_False, self.optimizationRoutine) OptimizationOptionsDialog.setTabOrder(self.optimizationRoutine, self.patience) OptimizationOptionsDialog.setTabOrder(self.patience, self.recalculateAll_True) OptimizationOptionsDialog.setTabOrder(self.recalculateAll_True, self.recalculateAll_False) def retranslateUi(self, OptimizationOptionsDialog): _translate = QtCore.QCoreApplication.translate OptimizationOptionsDialog.setWindowTitle(_translate("OptimizationOptionsDialog", "Optimization options")) self.label_3.setText(_translate("OptimizationOptionsDialog", "Optimization options")) self.label_4.setText(_translate("OptimizationOptionsDialog", "Advanced options for the model fitting procedure")) self.label_13.setText(_translate("OptimizationOptionsDialog", "(For cascades, if we want to recalculate the entire chain)")) self.label_14.setText(_translate("OptimizationOptionsDialog", "(Empty for auto detection, or set a scalar or a path to a nifti file)")) self.recalculateAll_True.setText(_translate("OptimizationOptionsDialog", "Yes ")) self.recalculateAll_False.setText(_translate("OptimizationOptionsDialog", "No")) self.label_10.setText(_translate("OptimizationOptionsDialog", "(Enables manual selection of the optimization routine)")) self.label_6.setText(_translate("OptimizationOptionsDialog", "Patience:")) self.label_5.setText(_translate("OptimizationOptionsDialog", "Optimization routine:")) self.defaultOptimizer_True.setText(_translate("OptimizationOptionsDialog", "Yes ")) self.defaultOptimizer_False.setText(_translate("OptimizationOptionsDialog", "No")) self.label_12.setText(_translate("OptimizationOptionsDialog", "(Scales the number of iterations)")) self.label.setText(_translate("OptimizationOptionsDialog", "Recalculate all:")) self.label_7.setText(_translate("OptimizationOptionsDialog", "Noise standard deviation:")) self.floatPrecision.setText(_translate("OptimizationOptionsDialog", "Float")) self.doublePrecision.setText(_translate("OptimizationOptionsDialog", "Double")) self.label_8.setText(_translate("OptimizationOptionsDialog", "Float precision:")) self.label_11.setText(_translate("OptimizationOptionsDialog", "(Manual select the routine to use)")) self.label_15.setText(_translate("OptimizationOptionsDialog", "(The precision for the calculations)")) self.label_2.setText(_translate("OptimizationOptionsDialog", "Use default optimizer:")) self.noiseStdFileSelect.setText(_translate("OptimizationOptionsDialog", "File browser")) PKIsIז'mdt/gui/model_fit/design/ui_main_gui.py# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'main_gui.ui' # # Created by: PyQt5 UI code generator 5.4.2 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(870, 650) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth()) MainWindow.setSizePolicy(sizePolicy) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(":/main_gui/logo"), QtGui.QIcon.Normal, QtGui.QIcon.Off) MainWindow.setWindowIcon(icon) MainWindow.setTabShape(QtWidgets.QTabWidget.Rounded) self.centralwidget = QtWidgets.QWidget(MainWindow) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth()) self.centralwidget.setSizePolicy(sizePolicy) self.centralwidget.setObjectName("centralwidget") self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.centralwidget) self.verticalLayout_2.setContentsMargins(3, 0, 3, 0) self.verticalLayout_2.setSpacing(0) self.verticalLayout_2.setObjectName("verticalLayout_2") self.splitter = QtWidgets.QSplitter(self.centralwidget) self.splitter.setOrientation(QtCore.Qt.Vertical) self.splitter.setObjectName("splitter") self.MainTabs = QtWidgets.QTabWidget(self.splitter) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(1) sizePolicy.setHeightForWidth(self.MainTabs.sizePolicy().hasHeightForWidth()) self.MainTabs.setSizePolicy(sizePolicy) self.MainTabs.setObjectName("MainTabs") self.fitModelTab = QtWidgets.QWidget() self.fitModelTab.setObjectName("fitModelTab") self.MainTabs.addTab(self.fitModelTab, "") self.generateBrainMaskTab = QtWidgets.QWidget() self.generateBrainMaskTab.setObjectName("generateBrainMaskTab") self.MainTabs.addTab(self.generateBrainMaskTab, "") self.generateROIMaskTab = QtWidgets.QWidget() self.generateROIMaskTab.setObjectName("generateROIMaskTab") self.MainTabs.addTab(self.generateROIMaskTab, "") self.generateProtocolTab = QtWidgets.QWidget() self.generateProtocolTab.setObjectName("generateProtocolTab") self.MainTabs.addTab(self.generateProtocolTab, "") self.viewResultsTab = QtWidgets.QWidget() self.viewResultsTab.setObjectName("viewResultsTab") self.MainTabs.addTab(self.viewResultsTab, "") self.loggingTextBox = QtWidgets.QPlainTextEdit(self.splitter) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.loggingTextBox.sizePolicy().hasHeightForWidth()) self.loggingTextBox.setSizePolicy(sizePolicy) font = QtGui.QFont() font.setFamily("Droid Sans Mono") self.loggingTextBox.setFont(font) self.loggingTextBox.setFrameShape(QtWidgets.QFrame.StyledPanel) self.loggingTextBox.setFrameShadow(QtWidgets.QFrame.Sunken) self.loggingTextBox.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) self.loggingTextBox.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded) self.loggingTextBox.setLineWrapMode(QtWidgets.QPlainTextEdit.NoWrap) self.loggingTextBox.setReadOnly(True) self.loggingTextBox.setPlainText("") self.loggingTextBox.setTabStopWidth(80) self.loggingTextBox.setObjectName("loggingTextBox") self.verticalLayout_2.addWidget(self.splitter) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setContentsMargins(-1, 0, 8, -1) self.horizontalLayout.setSpacing(8) self.horizontalLayout.setObjectName("horizontalLayout") spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem) self.executionStatusLabel = QtWidgets.QLabel(self.centralwidget) self.executionStatusLabel.setObjectName("executionStatusLabel") self.horizontalLayout.addWidget(self.executionStatusLabel) self.executionStatusIcon = QtWidgets.QLabel(self.centralwidget) self.executionStatusIcon.setObjectName("executionStatusIcon") self.horizontalLayout.addWidget(self.executionStatusIcon) self.verticalLayout_2.addLayout(self.horizontalLayout) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 870, 27)) self.menubar.setObjectName("menubar") self.menuMenu = QtWidgets.QMenu(self.menubar) self.menuMenu.setObjectName("menuMenu") self.menuHelp = QtWidgets.QMenu(self.menubar) self.menuHelp.setObjectName("menuHelp") MainWindow.setMenuBar(self.menubar) self.actionExit = QtWidgets.QAction(MainWindow) self.actionExit.setObjectName("actionExit") self.action_RuntimeSettings = QtWidgets.QAction(MainWindow) self.action_RuntimeSettings.setObjectName("action_RuntimeSettings") self.actionAbout = QtWidgets.QAction(MainWindow) self.actionAbout.setObjectName("actionAbout") self.menuMenu.addAction(self.action_RuntimeSettings) self.menuMenu.addSeparator() self.menuMenu.addAction(self.actionExit) self.menuHelp.addAction(self.actionAbout) self.menubar.addAction(self.menuMenu.menuAction()) self.menubar.addAction(self.menuHelp.menuAction()) self.retranslateUi(MainWindow) self.MainTabs.setCurrentIndex(0) self.actionExit.triggered.connect(MainWindow.close) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate("MainWindow", "Maastricht Diffusion Toolbox")) self.MainTabs.setTabText(self.MainTabs.indexOf(self.fitModelTab), _translate("MainWindow", "Fit model")) self.MainTabs.setTabText(self.MainTabs.indexOf(self.generateBrainMaskTab), _translate("MainWindow", "Generate brain mask")) self.MainTabs.setTabText(self.MainTabs.indexOf(self.generateROIMaskTab), _translate("MainWindow", "Generate ROI mask")) self.MainTabs.setTabText(self.MainTabs.indexOf(self.generateProtocolTab), _translate("MainWindow", "Generate protocol file")) self.MainTabs.setTabText(self.MainTabs.indexOf(self.viewResultsTab), _translate("MainWindow", "View results")) self.executionStatusLabel.setText(_translate("MainWindow", "TextLabel")) self.executionStatusIcon.setText(_translate("MainWindow", "TextLabel")) self.menuMenu.setTitle(_translate("MainWindow", "&File")) self.menuHelp.setTitle(_translate("MainWindow", "&Help")) self.actionExit.setText(_translate("MainWindow", "&Quit")) self.actionExit.setShortcut(_translate("MainWindow", "Ctrl+Q")) self.action_RuntimeSettings.setText(_translate("MainWindow", "&Runtime settings")) self.actionAbout.setText(_translate("MainWindow", "&About")) from . import main_gui_rc PKIsIeZ666mdt/gui/model_fit/design/ui_generate_brain_mask_tab.py# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'generate_brain_mask_tab.ui' # # Created by: PyQt5 UI code generator 5.4.2 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_GenerateBrainMaskTabContent(object): def setupUi(self, GenerateBrainMaskTabContent): GenerateBrainMaskTabContent.setObjectName("GenerateBrainMaskTabContent") GenerateBrainMaskTabContent.resize(827, 427) self.verticalLayout = QtWidgets.QVBoxLayout(GenerateBrainMaskTabContent) self.verticalLayout.setContentsMargins(-1, 11, -1, -1) self.verticalLayout.setObjectName("verticalLayout") self.verticalLayout_2 = QtWidgets.QVBoxLayout() self.verticalLayout_2.setContentsMargins(-1, -1, -1, 0) self.verticalLayout_2.setSpacing(0) self.verticalLayout_2.setObjectName("verticalLayout_2") self.label = QtWidgets.QLabel(GenerateBrainMaskTabContent) font = QtGui.QFont() font.setPointSize(14) self.label.setFont(font) self.label.setObjectName("label") self.verticalLayout_2.addWidget(self.label) self.label_2 = QtWidgets.QLabel(GenerateBrainMaskTabContent) font = QtGui.QFont() font.setItalic(True) self.label_2.setFont(font) self.label_2.setObjectName("label_2") self.verticalLayout_2.addWidget(self.label_2) self.verticalLayout.addLayout(self.verticalLayout_2) self.line = QtWidgets.QFrame(GenerateBrainMaskTabContent) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setLineWidth(1) self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setObjectName("line") self.verticalLayout.addWidget(self.line) self.gridLayout = QtWidgets.QGridLayout() self.gridLayout.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint) self.gridLayout.setHorizontalSpacing(10) self.gridLayout.setObjectName("gridLayout") self.label_5 = QtWidgets.QLabel(GenerateBrainMaskTabContent) font = QtGui.QFont() font.setItalic(True) self.label_5.setFont(font) self.label_5.setObjectName("label_5") self.gridLayout.addWidget(self.label_5, 1, 2, 1, 1) self.label_12 = QtWidgets.QLabel(GenerateBrainMaskTabContent) self.label_12.setObjectName("label_12") self.gridLayout.addWidget(self.label_12, 5, 0, 1, 1) self.horizontalLayout_2 = QtWidgets.QHBoxLayout() self.horizontalLayout_2.setObjectName("horizontalLayout_2") self.selectImageButton = QtWidgets.QPushButton(GenerateBrainMaskTabContent) self.selectImageButton.setObjectName("selectImageButton") self.horizontalLayout_2.addWidget(self.selectImageButton) self.selectedImageText = QtWidgets.QLineEdit(GenerateBrainMaskTabContent) self.selectedImageText.setText("") self.selectedImageText.setObjectName("selectedImageText") self.horizontalLayout_2.addWidget(self.selectedImageText) self.gridLayout.addLayout(self.horizontalLayout_2, 0, 1, 1, 1) self.label_4 = QtWidgets.QLabel(GenerateBrainMaskTabContent) font = QtGui.QFont() font.setItalic(True) self.label_4.setFont(font) self.label_4.setObjectName("label_4") self.gridLayout.addWidget(self.label_4, 0, 2, 1, 1) self.horizontalLayout_4 = QtWidgets.QHBoxLayout() self.horizontalLayout_4.setContentsMargins(0, -1, 0, -1) self.horizontalLayout_4.setObjectName("horizontalLayout_4") self.medianRadiusInput = QtWidgets.QSpinBox(GenerateBrainMaskTabContent) self.medianRadiusInput.setMinimum(1) self.medianRadiusInput.setProperty("value", 4) self.medianRadiusInput.setObjectName("medianRadiusInput") self.horizontalLayout_4.addWidget(self.medianRadiusInput) spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_4.addItem(spacerItem) self.gridLayout.addLayout(self.horizontalLayout_4, 4, 1, 1, 1) self.horizontalLayout_3 = QtWidgets.QHBoxLayout() self.horizontalLayout_3.setContentsMargins(-1, 0, -1, -1) self.horizontalLayout_3.setObjectName("horizontalLayout_3") self.selectOutputButton = QtWidgets.QPushButton(GenerateBrainMaskTabContent) self.selectOutputButton.setObjectName("selectOutputButton") self.horizontalLayout_3.addWidget(self.selectOutputButton) self.selectedOutputText = QtWidgets.QLineEdit(GenerateBrainMaskTabContent) self.selectedOutputText.setObjectName("selectedOutputText") self.horizontalLayout_3.addWidget(self.selectedOutputText) self.gridLayout.addLayout(self.horizontalLayout_3, 2, 1, 1, 1) self.label_8 = QtWidgets.QLabel(GenerateBrainMaskTabContent) self.label_8.setObjectName("label_8") self.gridLayout.addWidget(self.label_8, 2, 0, 1, 1) self.label_11 = QtWidgets.QLabel(GenerateBrainMaskTabContent) self.label_11.setObjectName("label_11") self.gridLayout.addWidget(self.label_11, 4, 0, 1, 1) self.horizontalLayout_5 = QtWidgets.QHBoxLayout() self.horizontalLayout_5.setObjectName("horizontalLayout_5") self.selectProtocolButton = QtWidgets.QPushButton(GenerateBrainMaskTabContent) self.selectProtocolButton.setObjectName("selectProtocolButton") self.horizontalLayout_5.addWidget(self.selectProtocolButton) self.selectedProtocolText = QtWidgets.QLineEdit(GenerateBrainMaskTabContent) self.selectedProtocolText.setObjectName("selectedProtocolText") self.horizontalLayout_5.addWidget(self.selectedProtocolText) self.gridLayout.addLayout(self.horizontalLayout_5, 1, 1, 1, 1) self.line_3 = QtWidgets.QFrame(GenerateBrainMaskTabContent) self.line_3.setFrameShape(QtWidgets.QFrame.HLine) self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_3.setObjectName("line_3") self.gridLayout.addWidget(self.line_3, 3, 0, 1, 3) self.label_10 = QtWidgets.QLabel(GenerateBrainMaskTabContent) font = QtGui.QFont() font.setItalic(True) self.label_10.setFont(font) self.label_10.setObjectName("label_10") self.gridLayout.addWidget(self.label_10, 2, 2, 1, 1) self.label_6 = QtWidgets.QLabel(GenerateBrainMaskTabContent) self.label_6.setObjectName("label_6") self.gridLayout.addWidget(self.label_6, 0, 0, 1, 1) self.label_3 = QtWidgets.QLabel(GenerateBrainMaskTabContent) self.label_3.setMinimumSize(QtCore.QSize(0, 0)) self.label_3.setObjectName("label_3") self.gridLayout.addWidget(self.label_3, 1, 0, 1, 1) self.label_13 = QtWidgets.QLabel(GenerateBrainMaskTabContent) self.label_13.setObjectName("label_13") self.gridLayout.addWidget(self.label_13, 6, 0, 1, 1) self.horizontalLayout_6 = QtWidgets.QHBoxLayout() self.horizontalLayout_6.setObjectName("horizontalLayout_6") self.numberOfPassesInput = QtWidgets.QSpinBox(GenerateBrainMaskTabContent) self.numberOfPassesInput.setMinimum(1) self.numberOfPassesInput.setProperty("value", 4) self.numberOfPassesInput.setObjectName("numberOfPassesInput") self.horizontalLayout_6.addWidget(self.numberOfPassesInput) spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_6.addItem(spacerItem1) self.gridLayout.addLayout(self.horizontalLayout_6, 5, 1, 1, 1) self.horizontalLayout_7 = QtWidgets.QHBoxLayout() self.horizontalLayout_7.setObjectName("horizontalLayout_7") self.finalThresholdInput = QtWidgets.QDoubleSpinBox(GenerateBrainMaskTabContent) self.finalThresholdInput.setLocale(QtCore.QLocale(QtCore.QLocale.C, QtCore.QLocale.AnyCountry)) self.finalThresholdInput.setPrefix("") self.finalThresholdInput.setSuffix("") self.finalThresholdInput.setMaximum(1000000.0) self.finalThresholdInput.setObjectName("finalThresholdInput") self.horizontalLayout_7.addWidget(self.finalThresholdInput) spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_7.addItem(spacerItem2) self.gridLayout.addLayout(self.horizontalLayout_7, 6, 1, 1, 1) self.label_14 = QtWidgets.QLabel(GenerateBrainMaskTabContent) font = QtGui.QFont() font.setItalic(True) self.label_14.setFont(font) self.label_14.setObjectName("label_14") self.gridLayout.addWidget(self.label_14, 4, 2, 1, 1) self.label_15 = QtWidgets.QLabel(GenerateBrainMaskTabContent) font = QtGui.QFont() font.setItalic(True) self.label_15.setFont(font) self.label_15.setObjectName("label_15") self.gridLayout.addWidget(self.label_15, 5, 2, 1, 1) self.label_16 = QtWidgets.QLabel(GenerateBrainMaskTabContent) font = QtGui.QFont() font.setItalic(True) self.label_16.setFont(font) self.label_16.setObjectName("label_16") self.gridLayout.addWidget(self.label_16, 6, 2, 1, 1) self.verticalLayout.addLayout(self.gridLayout) self.line_2 = QtWidgets.QFrame(GenerateBrainMaskTabContent) self.line_2.setFrameShape(QtWidgets.QFrame.HLine) self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_2.setObjectName("line_2") self.verticalLayout.addWidget(self.line_2) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setContentsMargins(-1, 6, -1, 0) self.horizontalLayout.setObjectName("horizontalLayout") self.generateButton = QtWidgets.QPushButton(GenerateBrainMaskTabContent) self.generateButton.setEnabled(False) self.generateButton.setObjectName("generateButton") self.horizontalLayout.addWidget(self.generateButton) self.viewButton = QtWidgets.QPushButton(GenerateBrainMaskTabContent) self.viewButton.setEnabled(False) self.viewButton.setObjectName("viewButton") self.horizontalLayout.addWidget(self.viewButton) spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem3) self.verticalLayout.addLayout(self.horizontalLayout) spacerItem4 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.verticalLayout.addItem(spacerItem4) self.retranslateUi(GenerateBrainMaskTabContent) QtCore.QMetaObject.connectSlotsByName(GenerateBrainMaskTabContent) GenerateBrainMaskTabContent.setTabOrder(self.selectImageButton, self.selectedImageText) GenerateBrainMaskTabContent.setTabOrder(self.selectedImageText, self.selectProtocolButton) GenerateBrainMaskTabContent.setTabOrder(self.selectProtocolButton, self.selectedProtocolText) GenerateBrainMaskTabContent.setTabOrder(self.selectedProtocolText, self.selectOutputButton) GenerateBrainMaskTabContent.setTabOrder(self.selectOutputButton, self.selectedOutputText) GenerateBrainMaskTabContent.setTabOrder(self.selectedOutputText, self.medianRadiusInput) GenerateBrainMaskTabContent.setTabOrder(self.medianRadiusInput, self.numberOfPassesInput) GenerateBrainMaskTabContent.setTabOrder(self.numberOfPassesInput, self.finalThresholdInput) GenerateBrainMaskTabContent.setTabOrder(self.finalThresholdInput, self.generateButton) GenerateBrainMaskTabContent.setTabOrder(self.generateButton, self.viewButton) def retranslateUi(self, GenerateBrainMaskTabContent): _translate = QtCore.QCoreApplication.translate GenerateBrainMaskTabContent.setWindowTitle(_translate("GenerateBrainMaskTabContent", "Form")) self.label.setText(_translate("GenerateBrainMaskTabContent", "Generate brian mask")) self.label_2.setText(_translate("GenerateBrainMaskTabContent", "Create a whole brain mask using the median-otsu algorithm.")) self.label_5.setText(_translate("GenerateBrainMaskTabContent", "(To create one, please see the tab \"Generate protocol file\")")) self.label_12.setText(_translate("GenerateBrainMaskTabContent", "Number of passes:")) self.selectImageButton.setText(_translate("GenerateBrainMaskTabContent", "Browse")) self.label_4.setText(_translate("GenerateBrainMaskTabContent", "(Select the 4d diffusion weighted image)")) self.selectOutputButton.setText(_translate("GenerateBrainMaskTabContent", "Browse")) self.label_8.setText(_translate("GenerateBrainMaskTabContent", "Select output file:")) self.label_11.setText(_translate("GenerateBrainMaskTabContent", "Median radius:")) self.selectProtocolButton.setText(_translate("GenerateBrainMaskTabContent", "Browse")) self.label_10.setText(_translate("GenerateBrainMaskTabContent", "(Default is _mask.nii.gz)")) self.label_6.setText(_translate("GenerateBrainMaskTabContent", "Select 4d image:")) self.label_3.setText(_translate("GenerateBrainMaskTabContent", "Select protocol file:")) self.label_13.setText(_translate("GenerateBrainMaskTabContent", "Final threshold:")) self.label_14.setText(_translate("GenerateBrainMaskTabContent", "(Radius (in voxels) of the applied median filter)")) self.label_15.setText(_translate("GenerateBrainMaskTabContent", "(Number of median filter passes)")) self.label_16.setText(_translate("GenerateBrainMaskTabContent", "(Additional masking threshold as a signal intensity)")) self.generateButton.setText(_translate("GenerateBrainMaskTabContent", "Generate")) self.viewButton.setText(_translate("GenerateBrainMaskTabContent", "View mask")) PKjUpI($mdt/gui/model_fit/design/__init__.py__author__ = 'Robbert Harms' __date__ = "2016-06-26" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" PKIsI-!!>mdt/gui/model_fit/design/ui_generate_protocol_update_dialog.py# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'generate_protocol_update_dialog.ui' # # Created by: PyQt5 UI code generator 5.4.2 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_UpdateColumnDialog(object): def setupUi(self, UpdateColumnDialog): UpdateColumnDialog.setObjectName("UpdateColumnDialog") UpdateColumnDialog.resize(831, 304) self.verticalLayout = QtWidgets.QVBoxLayout(UpdateColumnDialog) self.verticalLayout.setObjectName("verticalLayout") self.verticalLayout_3 = QtWidgets.QVBoxLayout() self.verticalLayout_3.setSpacing(0) self.verticalLayout_3.setObjectName("verticalLayout_3") self.label_3 = QtWidgets.QLabel(UpdateColumnDialog) font = QtGui.QFont() font.setPointSize(14) self.label_3.setFont(font) self.label_3.setObjectName("label_3") self.verticalLayout_3.addWidget(self.label_3) self.label_4 = QtWidgets.QLabel(UpdateColumnDialog) font = QtGui.QFont() font.setItalic(True) self.label_4.setFont(font) self.label_4.setObjectName("label_4") self.verticalLayout_3.addWidget(self.label_4) self.verticalLayout.addLayout(self.verticalLayout_3) self.line = QtWidgets.QFrame(UpdateColumnDialog) self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setObjectName("line") self.verticalLayout.addWidget(self.line) self.gridLayout = QtWidgets.QGridLayout() self.gridLayout.setObjectName("gridLayout") self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName("horizontalLayout") self.fileInput = QtWidgets.QPushButton(UpdateColumnDialog) self.fileInput.setEnabled(True) self.fileInput.setObjectName("fileInput") self.horizontalLayout.addWidget(self.fileInput) self.selectedFile = QtWidgets.QLineEdit(UpdateColumnDialog) self.selectedFile.setEnabled(True) self.selectedFile.setObjectName("selectedFile") self.horizontalLayout.addWidget(self.selectedFile) self.gridLayout.addLayout(self.horizontalLayout, 4, 1, 1, 1) self.label_2 = QtWidgets.QLabel(UpdateColumnDialog) self.label_2.setObjectName("label_2") self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1) self.inputMethodSelector = QtWidgets.QComboBox(UpdateColumnDialog) self.inputMethodSelector.setObjectName("inputMethodSelector") self.inputMethodSelector.addItem("") self.inputMethodSelector.addItem("") self.gridLayout.addWidget(self.inputMethodSelector, 1, 1, 1, 1) self.label = QtWidgets.QLabel(UpdateColumnDialog) self.label.setObjectName("label") self.gridLayout.addWidget(self.label, 0, 0, 1, 1) self.valueScale = QtWidgets.QLineEdit(UpdateColumnDialog) self.valueScale.setObjectName("valueScale") self.gridLayout.addWidget(self.valueScale, 6, 1, 1, 1) self.label_11 = QtWidgets.QLabel(UpdateColumnDialog) font = QtGui.QFont() font.setItalic(True) self.label_11.setFont(font) self.label_11.setObjectName("label_11") self.gridLayout.addWidget(self.label_11, 3, 2, 1, 1) self.label_10 = QtWidgets.QLabel(UpdateColumnDialog) font = QtGui.QFont() font.setItalic(True) self.label_10.setFont(font) self.label_10.setObjectName("label_10") self.gridLayout.addWidget(self.label_10, 0, 2, 1, 1) self.label_7 = QtWidgets.QLabel(UpdateColumnDialog) self.label_7.setObjectName("label_7") self.gridLayout.addWidget(self.label_7, 6, 0, 1, 1) self.label_5 = QtWidgets.QLabel(UpdateColumnDialog) self.label_5.setObjectName("label_5") self.gridLayout.addWidget(self.label_5, 3, 0, 1, 1) self.columnNameInput = QtWidgets.QLineEdit(UpdateColumnDialog) self.columnNameInput.setObjectName("columnNameInput") self.gridLayout.addWidget(self.columnNameInput, 0, 1, 1, 1) self.label_12 = QtWidgets.QLabel(UpdateColumnDialog) font = QtGui.QFont() font.setItalic(True) self.label_12.setFont(font) self.label_12.setObjectName("label_12") self.gridLayout.addWidget(self.label_12, 4, 2, 1, 1) self.label_13 = QtWidgets.QLabel(UpdateColumnDialog) font = QtGui.QFont() font.setItalic(True) self.label_13.setFont(font) self.label_13.setObjectName("label_13") self.gridLayout.addWidget(self.label_13, 6, 2, 1, 1) self.line_2 = QtWidgets.QFrame(UpdateColumnDialog) self.line_2.setFrameShape(QtWidgets.QFrame.HLine) self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_2.setObjectName("line_2") self.gridLayout.addWidget(self.line_2, 2, 0, 1, 3) self.label_6 = QtWidgets.QLabel(UpdateColumnDialog) self.label_6.setObjectName("label_6") self.gridLayout.addWidget(self.label_6, 4, 0, 1, 1) self.singleValueInput = QtWidgets.QLineEdit(UpdateColumnDialog) self.singleValueInput.setObjectName("singleValueInput") self.gridLayout.addWidget(self.singleValueInput, 3, 1, 1, 1) self.line_4 = QtWidgets.QFrame(UpdateColumnDialog) self.line_4.setFrameShape(QtWidgets.QFrame.HLine) self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_4.setObjectName("line_4") self.gridLayout.addWidget(self.line_4, 5, 0, 1, 3) self.verticalLayout.addLayout(self.gridLayout) spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.verticalLayout.addItem(spacerItem) self.line_3 = QtWidgets.QFrame(UpdateColumnDialog) self.line_3.setFrameShape(QtWidgets.QFrame.HLine) self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_3.setObjectName("line_3") self.verticalLayout.addWidget(self.line_3) self.buttonBox = QtWidgets.QDialogButtonBox(UpdateColumnDialog) self.buttonBox.setOrientation(QtCore.Qt.Horizontal) self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok) self.buttonBox.setObjectName("buttonBox") self.verticalLayout.addWidget(self.buttonBox) self.retranslateUi(UpdateColumnDialog) self.buttonBox.accepted.connect(UpdateColumnDialog.accept) self.buttonBox.rejected.connect(UpdateColumnDialog.reject) QtCore.QMetaObject.connectSlotsByName(UpdateColumnDialog) UpdateColumnDialog.setTabOrder(self.columnNameInput, self.inputMethodSelector) UpdateColumnDialog.setTabOrder(self.inputMethodSelector, self.singleValueInput) UpdateColumnDialog.setTabOrder(self.singleValueInput, self.fileInput) UpdateColumnDialog.setTabOrder(self.fileInput, self.selectedFile) def retranslateUi(self, UpdateColumnDialog): _translate = QtCore.QCoreApplication.translate UpdateColumnDialog.setWindowTitle(_translate("UpdateColumnDialog", "Add / Update column")) self.label_3.setText(_translate("UpdateColumnDialog", "Add / Update column")) self.label_4.setText(_translate("UpdateColumnDialog", "Add a column to the current protocol or overwrite an existing column.")) self.fileInput.setText(_translate("UpdateColumnDialog", "Browse")) self.label_2.setText(_translate("UpdateColumnDialog", "Method:")) self.inputMethodSelector.setItemText(0, _translate("UpdateColumnDialog", "From file")) self.inputMethodSelector.setItemText(1, _translate("UpdateColumnDialog", "Single value")) self.label.setText(_translate("UpdateColumnDialog", "Column name: ")) self.label_11.setText(_translate("UpdateColumnDialog", "(A single value for every row)")) self.label_10.setText(_translate("UpdateColumnDialog", "(The column name, for example \"g\", \"b\" or \"TE\")")) self.label_7.setText(_translate("UpdateColumnDialog", "Scale:")) self.label_5.setText(_translate("UpdateColumnDialog", "Single value:")) self.label_12.setText(_translate("UpdateColumnDialog", "(File with a single value, a row, a column or a matrix)")) self.label_13.setText(_translate("UpdateColumnDialog", "(Optionally, scale the input with this amount)")) self.label_6.setText(_translate("UpdateColumnDialog", "File input:")) PKjUpI R1mdt/gui/model_fit/tabs/generate_brain_mask_tab.pyimport os from mdt.nifti import load_nifti import numpy as np from PyQt5.QtCore import pyqtSlot, QObject, pyqtSignal from PyQt5.QtWidgets import QFileDialog from mdt import load_brain_mask, create_median_otsu_brain_mask from mdt.visualization.maps.base import DataInfo, MapPlotConfig from mdt.gui.maps_visualizer.main import start_gui from mdt.gui.model_fit.design.ui_generate_brain_mask_tab import Ui_GenerateBrainMaskTabContent from mdt.gui.utils import function_message_decorator, image_files_filters, protocol_files_filters, MainTab __author__ = 'Robbert Harms' __date__ = "2016-06-26" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class GenerateBrainMaskTab(MainTab, Ui_GenerateBrainMaskTabContent): def __init__(self, shared_state, computations_thread): self._shared_state = shared_state self._computations_thread = computations_thread self._generate_mask_worker = GenerateMaskWorker() def setupUi(self, tab_content): super(GenerateBrainMaskTab, self).setupUi(tab_content) self.selectImageButton.clicked.connect(lambda: self._select_image()) self.selectProtocolButton.clicked.connect(lambda: self._select_protocol()) self.selectOutputButton.clicked.connect(lambda: self._select_output()) self.viewButton.clicked.connect(self.view_mask) self.generateButton.clicked.connect(self.generate_mask) self.selectedImageText.textChanged.connect(self._check_enable_action_buttons) self.selectedOutputText.textChanged.connect(self._check_enable_action_buttons) self.selectedProtocolText.textChanged.connect(self._check_enable_action_buttons) def _select_image(self): initial_dir = self._shared_state.base_dir if self.selectedImageText.text() != '': initial_dir = self.selectedImageText.text() open_file, used_filter = QFileDialog().getOpenFileName( caption='Select the 4d diffusion weighted image', directory=initial_dir, filter=';;'.join(image_files_filters)) if os.path.isfile(open_file): self.selectedImageText.setText(open_file) self._shared_state.base_dir = os.path.dirname(open_file) def _select_protocol(self): initial_dir = self._shared_state.base_dir if self.selectedProtocolText.text() != '': initial_dir = self.selectedProtocolText.text() open_file, used_filter = QFileDialog().getOpenFileName( caption='Select the protocol', directory=initial_dir, filter=';;'.join(protocol_files_filters)) if os.path.isfile(open_file): self.selectedProtocolText.setText(open_file) self._shared_state.base_dir = os.path.dirname(open_file) def _select_output(self): initial_dir = self._shared_state.base_dir if self.selectedOutputText.text() != '': initial_dir = self.selectedOutputText.text() output_file_name, used_filter = QFileDialog().getSaveFileName( caption='Select the output file', directory=initial_dir, filter=';;'.join(image_files_filters)) if output_file_name: self.selectedOutputText.setText(output_file_name) def _check_enable_action_buttons(self): self.generateButton.setEnabled(os.path.isfile(self.selectedImageText.text()) and os.path.isfile(self.selectedProtocolText.text()) and os.path.isdir(os.path.dirname(self.selectedOutputText.text()))) self.viewButton.setEnabled(os.path.isfile(self.selectedImageText.text()) and os.path.isfile(self.selectedOutputText.text())) @pyqtSlot() def view_mask(self): mask = np.expand_dims(load_brain_mask(self.selectedOutputText.text()), axis=3) image_data = load_nifti(self.selectedImageText.text()).get_data() masked_image = image_data * mask data = DataInfo({'Masked': masked_image, 'DWI': image_data}, directory=os.path.dirname(self.selectedImageText.text())) config = MapPlotConfig() config.dimension = 2 config.slice_index = image_data.shape[2] // 2.0 config.maps_to_show = ['DWI', 'Masked'] start_gui(data=data, config=config, app_exec=False) @pyqtSlot() def generate_mask(self): self._generate_mask_worker.set_args(self.selectedImageText.text(), self.selectedProtocolText.text(), self.selectedOutputText.text(), median_radius=self.medianRadiusInput.value(), numpass=self.numberOfPassesInput.value(), mask_threshold=self.finalThresholdInput.value()) self._computations_thread.start() self._generate_mask_worker.moveToThread(self._computations_thread) self._generate_mask_worker.starting.connect(self._computations_thread.starting) self._generate_mask_worker.finished.connect(self._computations_thread.finished) self._generate_mask_worker.starting.connect(lambda: self.generateButton.setEnabled(False)) self._generate_mask_worker.finished.connect(lambda: self.generateButton.setEnabled(True)) self._generate_mask_worker.finished.connect(lambda: self.viewButton.setEnabled(True)) self._generate_mask_worker.starting.emit() class GenerateMaskWorker(QObject): starting = pyqtSignal() finished = pyqtSignal() def __init__(self): super(GenerateMaskWorker, self).__init__() self.starting.connect(self.run) self._args = [] self._kwargs = {} def set_args(self, *args, **kwargs): self._args = args self._kwargs = kwargs @function_message_decorator('Started creating a mask.', 'Finished creating a mask.') @pyqtSlot() def run(self): create_median_otsu_brain_mask(*self._args, **self._kwargs) self.finished.emit() PKjUpIrb""/mdt/gui/model_fit/tabs/generate_roi_mask_tab.pyimport os from mdt.nifti import load_nifti from PyQt5.QtCore import pyqtSlot, QObject, pyqtSignal from PyQt5.QtWidgets import QFileDialog from mdt.visualization.maps.base import DataInfo, MapPlotConfig from mdt.gui.maps_visualizer.main import start_gui from mdt.gui.model_fit.design.ui_generate_roi_mask_tab import Ui_GenerateROIMaskTabContent from mdt.gui.utils import function_message_decorator, image_files_filters, MainTab from mdt.utils import split_image_path, write_slice_roi __author__ = 'Robbert Harms' __date__ = "2016-06-26" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class GenerateROIMaskTab(MainTab, Ui_GenerateROIMaskTabContent): def __init__(self, shared_state, computations_thread): self._shared_state = shared_state self._computations_thread = computations_thread self._generate_mask_worker = GenerateROIMaskWorker() def setupUi(self, tab_content): super(GenerateROIMaskTab, self).setupUi(tab_content) self.selectMaskButton.clicked.connect(lambda: self._select_mask()) self.selectOutputFileInput.clicked.connect(lambda: self._select_output_file()) self.viewButton.clicked.connect(self.view_mask) self.generateButton.clicked.connect(self.generate_roi_mask) self.selectedMaskText.textChanged.connect(self._check_enable_action_buttons) self.selectedOutputFileText.textChanged.connect(self._check_enable_action_buttons) self.selectedMaskText.textChanged.connect(self.mask_file_changed) self.dimensionInput.valueChanged.connect(self.update_dimension) self.sliceInput.valueChanged.connect(self.update_slice_index) def _select_mask(self): initial_dir = self._shared_state.base_dir if self.selectedMaskText.text() != '': initial_dir = self.selectedMaskText.text() open_file, used_filter = QFileDialog().getOpenFileName( caption='Select the brain mask', directory=initial_dir, filter=';;'.join(image_files_filters)) if os.path.isfile(open_file): self.selectedMaskText.setText(open_file) self.mask_file_changed() self._shared_state.base_dir = os.path.dirname(open_file) def _select_output_file(self): output_file_name, used_filter = QFileDialog().getSaveFileName( caption='Select the output file', directory=self._shared_state.base_dir, filter=';;'.join(image_files_filters)) if output_file_name: self.selectedOutputFileText.setText(output_file_name) def _check_enable_action_buttons(self): self.generateButton.setEnabled(os.path.isfile(self.selectedMaskText.text()) and os.path.isdir(os.path.dirname(self.selectedOutputFileText.text()))) self.viewButton.setEnabled(os.path.isfile(self.selectedMaskText.text()) and os.path.isfile(self.selectedOutputFileText.text())) @pyqtSlot() def view_mask(self): data = DataInfo({'Original mask': load_nifti(self.selectedMaskText.text()).get_data(), 'Slice mask': load_nifti(self.selectedOutputFileText.text()).get_data()}, directory=os.path.dirname(self.selectedMaskText.text())) config = MapPlotConfig() config.dimension = self.dimensionInput.value() config.slice_index = self.sliceInput.value() config.maps_to_show = ['Original mask', 'Slice mask'] start_gui(data=data, config=config, app_exec=False) @pyqtSlot() def generate_roi_mask(self): self._generate_mask_worker.set_args(mask=self.selectedMaskText.text(), output=self.selectedOutputFileText.text(), dimension=self.dimensionInput.value(), slice=self.sliceInput.value()) self._computations_thread.start() self._generate_mask_worker.moveToThread(self._computations_thread) self._generate_mask_worker.starting.connect(self._computations_thread.starting) self._generate_mask_worker.finished.connect(self._computations_thread.finished) self._generate_mask_worker.starting.connect(lambda: self.generateButton.setEnabled(False)) self._generate_mask_worker.finished.connect(lambda: self.generateButton.setEnabled(True)) self._generate_mask_worker.finished.connect(lambda: self.viewButton.setEnabled(True)) self._generate_mask_worker.starting.emit() @pyqtSlot(int) def update_dimension(self, value): if os.path.isfile(self.selectedMaskText.text()): self.update_slice_selector() self.update_output_file_text() @pyqtSlot(int) def update_slice_index(self, value): self.update_output_file_text() def mask_file_changed(self): self.dimensionInput.setValue(2) self.update_slice_selector() self.update_output_file_text() def update_slice_selector(self): if os.path.isfile(self.selectedMaskText.text()): dimension_max = load_nifti(self.selectedMaskText.text()).shape[self.dimensionInput.value()] self.sliceInput.setMaximum(dimension_max) self.sliceInput.setValue(dimension_max // 2.0) self.maxSliceLabel.setText(str(dimension_max)) else: self.sliceInput.setValue(0) self.maxSliceLabel.setText('x') def update_output_file_text(self): if os.path.isfile(self.selectedMaskText.text()): folder, basename, ext = split_image_path(self.selectedMaskText.text()) folder_base = os.path.join(folder, basename) if self.selectedOutputFileText.text() == '': self.selectedOutputFileText.setText('{}_{}_{}.nii.gz'.format(folder_base, self.dimensionInput.value(), self.sliceInput.value())) elif self.selectedOutputFileText.text()[0:len(folder_base)] == folder_base: self.selectedOutputFileText.setText('{}_{}_{}.nii.gz'.format(folder_base, self.dimensionInput.value(), self.sliceInput.value())) class GenerateROIMaskWorker(QObject): starting = pyqtSignal() finished = pyqtSignal() def __init__(self): super(GenerateROIMaskWorker, self).__init__() self.starting.connect(self.run) self._args = [] self._kwargs = {} def set_args(self, *args, **kwargs): self._args = args self._kwargs = kwargs @function_message_decorator('Started with generating a slice ROI', 'Finished generating a slice ROI') @pyqtSlot() def run(self): write_slice_roi(self._kwargs['mask'], self._kwargs['dimension'], self._kwargs['slice'], self._kwargs['output'], overwrite_if_exists=True) self.finished.emit() PKjUpI>ND8D8/mdt/gui/model_fit/tabs/generate_protocol_tab.pyimport os from collections import OrderedDict from PyQt5 import QtCore from PyQt5.QtCore import pyqtSlot, Qt from PyQt5.QtGui import QBrush from PyQt5.QtWidgets import QFileDialog, QTableWidgetItem, QAbstractItemView, QMenu, QMessageBox, \ QDialog, QDialogButtonBox from mdt.gui.model_fit.design.ui_generate_protocol_load_gb_dialog import Ui_LoadGBDialog from mdt.gui.model_fit.design.ui_generate_protocol_update_dialog import Ui_UpdateColumnDialog import mdt from mdt.gui.model_fit.design.ui_generate_protocol_tab import Ui_GenerateProtocolTabContent from mdt.gui.utils import protocol_files_filters, MainTab from mdt.protocols import Protocol, load_bvec_bval __author__ = 'Robbert Harms' __date__ = "2016-06-27" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class GenerateProtocolTab(MainTab, Ui_GenerateProtocolTabContent): def __init__(self, shared_state, computations_thread): self._shared_state = shared_state self._protocol = Protocol() self._opened_file = self._shared_state.base_dir self._tab_content = None self._system_columns = OrderedDict([['#', self._create_volume_number_column]]) def setupUi(self, tab_content): super(GenerateProtocolTab, self).setupUi(tab_content) self._tab_content = tab_content self.loadProtocolButton.clicked.connect(lambda: self._select_protocol()) self.saveButton.clicked.connect(lambda: self._save_protocol()) self.loadColumnButton.clicked.connect(lambda: self._load_column_action()) self.loadGB.clicked.connect(lambda: self._load_g_and_b()) self.clearButton.clicked.connect(self._clear_table) self.protocol_table.setSortingEnabled(True) headers = self.protocol_table.horizontalHeader() headers.setContextMenuPolicy(Qt.CustomContextMenu) headers.customContextMenuRequested.connect(self.show_header_context_menu) headers.setSelectionMode(QAbstractItemView.SingleSelection) def _select_protocol(self): open_file, used_filter = QFileDialog().getOpenFileName( caption='Select the protocol', directory=self._shared_state.base_dir, filter=';;'.join(protocol_files_filters)) if open_file: self._shared_state.base_dir = os.path.dirname(open_file) self.load_protocol(open_file) def _save_protocol(self): output_file_name, used_filter = QFileDialog().getSaveFileName( caption='Save the protocol as', directory=self._opened_file, filter=';;'.join(protocol_files_filters)) if os.path.isdir(os.path.dirname(output_file_name)) and self._protocol.length: mdt.write_protocol(self._protocol, output_file_name) print('Saved protocol as: {}'.format(output_file_name)) @pyqtSlot() def _clear_table(self): self._protocol = Protocol() self._update_views() def load_protocol(self, file_name): self._protocol = mdt.protocols.load_protocol(file_name) self._update_views() self._opened_file = file_name print('Loaded protocol: {}'.format(file_name)) def _update_views(self): self._update_protocol_info() self._update_table_view() def _update_protocol_info(self): self.nmrRows.setText(str(self._protocol.length)) try: self.nmrUnweighted.setText(str(len(self._protocol.get_unweighted_indices()))) except KeyError: self.nmrUnweighted.setText('0') try: self.nmrWeighted.setText(str(len(self._protocol.get_weighted_indices()))) except KeyError: self.nmrWeighted.setText('0') try: self.nmrShells.setText(str(len(self._protocol.get_b_values_shells()))) except KeyError: self.nmrShells.setText('0') self.nmrColumns.setText(str(self._protocol.number_of_columns)) try: shells = self._protocol.get_b_values_shells() shells_text = [] for shell in shells: occurrences = self._protocol.count_occurences('b', shell) shells_text.append('{0:0=.3f}e9 ({1})'.format(shell/1e9, occurrences)) self.differentShells.setText(', '.join(shells_text)) except KeyError: self.differentShells.setText('-') def _update_table_view(self): all_column_names, real_column_names, estimated_column_names, system_column_names = self._get_column_names() self.protocol_table.clear() self.protocol_table.setRowCount(self._protocol.length) self.protocol_table.setColumnCount(len(all_column_names)) for index, column_name in enumerate(all_column_names): header_cell = QTableWidgetItem(column_name) if column_name in estimated_column_names: header_cell.setToolTip('This column is estimated from the other columns in the protocol.') self.protocol_table.setHorizontalHeaderItem(index, header_cell) for column_ind, column_name in enumerate(all_column_names): if column_name in system_column_names: generate_function = self._system_columns[column_name] cells = generate_function() for row, cell in enumerate(cells): self.protocol_table.setItem(row, column_ind, cell) else: try: values = self._protocol.get_column(column_name) for row in range(self._protocol.length): cell = NumericalSortedTableItem('{:e}'.format(values[row, 0])) cell.setFlags(QtCore.Qt.ItemIsEnabled) if column_name in estimated_column_names: cell.setBackground(QBrush(Qt.lightGray)) self.protocol_table.setItem(row, column_ind, cell) except KeyError: for row in range(self._protocol.length): cell = QTableWidgetItem('?') cell.setFlags(QtCore.Qt.ItemIsEnabled) cell.setBackground(QBrush(Qt.lightGray)) self.protocol_table.setItem(row, column_ind, cell) self.protocol_table.resizeColumnsToContents() def _get_column_names(self): real_column_names = self._protocol.column_names if len(real_column_names): estimated_column_names = self._protocol.estimated_column_names else: estimated_column_names = [] system_column_names = list(self._system_columns.keys()) all_column_names = system_column_names + real_column_names + estimated_column_names return [all_column_names, real_column_names, estimated_column_names, system_column_names] def _create_volume_number_column(self): """Callback function to generate the volume number column cells. This should return a list of cells in the correct order. """ cells = [] for volume_nmr in range(self._protocol.length): cell = NumericalSortedTableItem(str(volume_nmr)) cell.setFlags(QtCore.Qt.ItemIsEnabled) cell.setBackground(QBrush(Qt.lightGray)) cells.append(cell) return cells @pyqtSlot() def show_header_context_menu(self, position): all_column_names, real_column_names, estimated_column_names, system_column_names = self._get_column_names() column_index = self.protocol_table.horizontalHeader().logicalIndexAt(position) column_name = all_column_names[column_index] if column_name in real_column_names: menu = QMenu() remove_action = menu.addAction("&Remove column") ac = menu.exec_(self.protocol_table.horizontalHeader().mapToGlobal(position)) if ac == remove_action: quit_msg = "Are you sure you want to remove the " \ "column '{}' from the protocol".format(column_name) reply = QMessageBox.question(self._tab_content, 'Delete confirmation', quit_msg, QMessageBox.Yes, QMessageBox.No) if reply == QMessageBox.Yes: self._protocol.remove_column(column_name) self._update_views() def _load_column_action(self): dialog = LoadColumnDialog(self._shared_state, self._tab_content) return_value = dialog.exec_() if return_value: dialog.update_protocol(self._protocol) self._update_views() def _load_g_and_b(self): dialog = LoadGBDialog(self._shared_state, self._tab_content) return_value = dialog.exec_() if return_value: self._protocol = dialog.get_protocol() self._update_views() class NumericalSortedTableItem(QTableWidgetItem): def __lt__(self, other): if isinstance(other, QTableWidgetItem): try: this_value = float(self.text()) other_value = float(other.text()) return this_value < other_value except ValueError: pass return super(NumericalSortedTableItem, self).__lt__(other) class LoadColumnDialog(Ui_UpdateColumnDialog, QDialog): def __init__(self, shared_state, parent): super(LoadColumnDialog, self).__init__(parent) self._input_options = {'from_file': 0, 'from_value': 1} self._shared_state = shared_state self.setupUi(self) self.inputMethodSelector.currentIndexChanged.connect(self.enable_correct_inputs) self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False) self.columnNameInput.textChanged.connect(self._update_ok_button) self.singleValueInput.textChanged.connect(self._update_ok_button) self.fileInput.clicked.connect(lambda: self._select_value_file()) self.selectedFile.textChanged.connect(self._update_ok_button) self.enable_correct_inputs(self.inputMethodSelector.currentIndex()) def update_protocol(self, protocol): column_name = self.columnNameInput.text() if column_name: try: scale = float(self.valueScale.text()) except ValueError: scale = 1 if self.inputMethodSelector.currentIndex() == self._input_options['from_value']: value = float(self.singleValueInput.text()) protocol.add_column(column_name, value * scale) else: protocol.add_column_from_file(column_name, self.selectedFile.text(), scale) @pyqtSlot(int) def enable_correct_inputs(self, selection): if selection == self._input_options['from_value']: self.singleValueInput.setDisabled(False) self.fileInput.setDisabled(True) self.selectedFile.setDisabled(True) else: self.singleValueInput.setDisabled(True) self.fileInput.setDisabled(False) self.selectedFile.setDisabled(False) @pyqtSlot() def _update_ok_button(self): self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(self.columnNameInput.text() != '' and self._has_value()) def _has_value(self): if self.inputMethodSelector.currentIndex() == self._input_options['from_value']: if self.singleValueInput.text() != '': try: float(self.singleValueInput.text()) return True except ValueError: pass return False else: if os.path.isfile(self.selectedFile.text()): return True return False def _select_value_file(self): initial_dir = self._shared_state.base_dir if self.selectedFile.text() != '': initial_dir = self.selectedFile.text() open_file, used_filter = QFileDialog().getOpenFileName(caption='Select the column info file', directory=initial_dir) if open_file: self.selectedFile.setText(open_file) self._shared_state.base_dir = os.path.dirname(open_file) self._update_ok_button() class LoadGBDialog(Ui_LoadGBDialog, QDialog): def __init__(self, shared_state, parent): super(LoadGBDialog, self).__init__(parent) self._shared_state = shared_state self.setupUi(self) self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False) self.bvecFileInput.textChanged.connect(self._update_ok_button) self.bvalFileInput.textChanged.connect(self._update_ok_button) self.bvecFileChooser.clicked.connect(lambda: self._select_bvec_file()) self.bvalFileChooser.clicked.connect(lambda: self._select_bval_file()) def get_protocol(self): try: bval_scale = float(self.bvalScale.text()) except: bval_scale = 1 return load_bvec_bval(bvec=self.bvecFileInput.text(), bval=self.bvalFileInput.text(), bval_scale=bval_scale) @pyqtSlot() def _update_ok_button(self): enable = os.path.isfile(self.bvalFileInput.text()) and os.path.isfile(self.bvecFileInput.text()) self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(enable) def _select_bvec_file(self): initial_dir = self._shared_state.base_dir if self.bvecFileInput.text() != '': initial_dir = self.bvecFileInput.text() open_file, used_filter = QFileDialog().getOpenFileName(caption='Select the bvec file', directory=initial_dir) if open_file: self.bvecFileInput.setText(open_file) self._shared_state.base_dir = os.path.dirname(open_file) self._update_ok_button() def _select_bval_file(self): initial_dir = self._shared_state.base_dir if self.bvalFileInput.text() != '': initial_dir = self.bvalFileInput.text() open_file, used_filter = QFileDialog().getOpenFileName(caption='Select the bval file', directory=initial_dir) if open_file: self.bvalFileInput.setText(open_file) self._shared_state.base_dir = os.path.dirname(open_file) self._update_ok_button() PKjUpI\*==*mdt/gui/model_fit/tabs/view_results_tab.pyimport glob import os from PyQt5.QtCore import pyqtSlot from PyQt5.QtWidgets import QFileDialog from mdt import results_preselection_names from mdt.nifti import load_nifti from mdt.visualization.maps.base import DataInfo, MapPlotConfig from mdt.gui.maps_visualizer.main import start_gui from mdt.gui.model_fit.design.ui_view_results_tab import Ui_ViewResultsTabContent from mdt.gui.utils import MainTab from mdt.utils import split_image_path __author__ = 'Robbert Harms' __date__ = "2016-06-27" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class ViewResultsTab(MainTab, Ui_ViewResultsTabContent): def __init__(self, shared_state, computations_thread): self._shared_state = shared_state self._parameter_files = {} self._folder = None def setupUi(self, ViewResultsTabContent): super(ViewResultsTab, self).setupUi(ViewResultsTabContent) self.selectFolderButton.clicked.connect(lambda: self._select_folder()) self.selectedFolderText.textChanged.connect(self.directory_updated) self.viewButton.clicked.connect(self.view_maps) self.invertSelectionButton.clicked.connect(self.invert_selection) self.deselectAllButton.clicked.connect(self.deselect_all) self.initialSliceChooser.valueChanged.connect(self._shared_state.set_slice_index) self.initialDimensionChooser.valueChanged.connect(self._shared_state.set_dimension_index) self.initialSliceChooser.setMaximum(0) def open_dir(self, directory): self.selectedFolderText.setText(directory) self.directory_updated(directory) def _select_folder(self): initial_dir = self._shared_state.base_dir if self.selectedFolderText.text() != '': initial_dir = self.selectedFolderText.text() folder = QFileDialog().getExistingDirectory(caption='Select directory to view', directory=initial_dir) if os.path.isdir(folder): self.selectedFolderText.setText(folder) self._shared_state.base_dir = folder @pyqtSlot(str) def directory_updated(self, folder): if os.path.isfile(folder): folder = os.path.dirname(folder) self._folder = folder result_files = glob.glob(os.path.join(folder, '*.nii*')) def get_name(img_path): return split_image_path(os.path.basename(img_path))[1] self._parameter_files = {get_name(f): get_name(f) for f in result_files} items_list = sorted(self._parameter_files.keys()) selected_items = results_preselection_names(sorted(self._parameter_files.keys())) self.selectMaps.clear() self.selectMaps.addItems(items_list) for item in [self.selectMaps.item(index) for index in range(self.selectMaps.count())]: if item.text() in selected_items: item.setSelected(True) if items_list: shape = load_nifti(result_files[0]).shape maximum = shape[self.initialDimensionChooser.value()] self.initialSliceChooser.setMaximum(maximum) if self.initialSliceChooser.value() == 0 or self.initialSliceChooser.value() >= maximum: self.initialSliceChooser.setValue(maximum // 2.0) self.maximumIndexLabel.setText(str(maximum)) @pyqtSlot() def invert_selection(self): for item in [self.selectMaps.item(index) for index in range(self.selectMaps.count())]: item.setSelected(not item.isSelected()) @pyqtSlot() def deselect_all(self): for item in [self.selectMaps.item(index) for index in range(self.selectMaps.count())]: item.setSelected(False) @pyqtSlot() def view_maps(self): maps_to_show = [] for item in [self.selectMaps.item(index) for index in range(self.selectMaps.count())]: if item.isSelected(): maps_to_show.append(item.text()) data = DataInfo.from_dir(self._folder) config = MapPlotConfig() config.maps_to_show = maps_to_show config.dimension = self.initialDimensionChooser.value() config.slice_index = self.initialSliceChooser.value() start_gui(data=data, config=config, app_exec=False) def tab_opened(self): if self._shared_state.output_folder != '': self.selectedFolderText.setText(self._shared_state.output_folder) PKjUpI)"mdt/gui/model_fit/tabs/__init__.py__author__ = 'Robbert Harms' __date__ = "2016-06-27" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" PKq}II 6.22'mdt/gui/model_fit/tabs/fit_model_tab.pyimport os from PyQt5.QtCore import pyqtSlot, QObject, pyqtSignal from PyQt5.QtWidgets import QFileDialog, QMessageBox, QDialog, QDialogButtonBox from mdt.gui.model_fit.design.ui_optimization_options_dialog import Ui_OptimizationOptionsDialog import mdt from mdt.gui.model_fit.design.ui_fit_model_tab import Ui_FitModelTabContent from mdt.gui.utils import function_message_decorator, image_files_filters, protocol_files_filters, MainTab from mdt.utils import split_image_path from mot.factory import get_optimizer_by_name __author__ = 'Robbert Harms' __date__ = "2016-06-27" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class FitModelTab(MainTab, Ui_FitModelTabContent): def __init__(self, shared_state, computations_thread): self._shared_state = shared_state self._computations_thread = computations_thread self._run_model_worker = RunModelWorker() self._tab_content = None self._optim_options = OptimOptions() def setupUi(self, tab_content): super(FitModelTab, self).setupUi(tab_content) self._tab_content = tab_content self.selectDWI.clicked.connect(lambda: self._select_dwi()) self.selectMask.clicked.connect(lambda: self._select_mask()) self.selectProtocol.clicked.connect(lambda: self._select_protocol()) self.selectOutputFolder.clicked.connect(lambda: self._select_output()) self.selectedDWI.textChanged.connect(self._check_enable_action_buttons) self.selectedMask.textChanged.connect(self._check_enable_action_buttons) self.selectedProtocol.textChanged.connect(self._check_enable_action_buttons) self.runButton.clicked.connect(self.run_model) self.optimizationOptionsButton.clicked.connect(self._run_optimization_options_dialog) self.modelSelection.addItems(list(sorted(mdt.get_models_list()))) self.modelSelection.setCurrentText('BallStick_r1 (Cascade)') self._check_enable_action_buttons() def _select_dwi(self): initial_dir = self._shared_state.base_dir if self.selectedDWI.text() != '': initial_dir = self.selectedDWI.text() open_file, used_filter = QFileDialog().getOpenFileName( caption='Select the 4d diffusion weighted image', directory=initial_dir, filter=';;'.join(image_files_filters)) if os.path.isfile(open_file): self.selectedDWI.setText(open_file) self._shared_state.base_dir = os.path.dirname(open_file) self.update_output_folder_text() def _select_mask(self): initial_dir = self._shared_state.base_dir if self.selectedMask.text() != '': initial_dir = self.selectedMask.text() open_file, used_filter = QFileDialog().getOpenFileName( caption='Select the brain mask', directory=initial_dir, filter=';;'.join(image_files_filters)) if os.path.isfile(open_file): self.selectedMask.setText(open_file) self._shared_state.base_dir = os.path.dirname(open_file) self.update_output_folder_text() def _select_output(self): initial_dir = self._shared_state.base_dir if self.selectedOutputFolder.text() != '': initial_dir = self.selectedOutputFolder.text() output_file_name = QFileDialog().getExistingDirectory( caption='Select the output folder', directory=initial_dir) if output_file_name: self.selectedOutputFolder.setText(output_file_name) def _select_protocol(self): initial_dir = self._shared_state.base_dir if self.selectedProtocol.text() != '': initial_dir = self.selectedProtocol.text() open_file, used_filter = QFileDialog().getOpenFileName( caption='Select the protocol', directory=initial_dir, filter=';;'.join(protocol_files_filters)) if os.path.isfile(open_file): self.selectedProtocol.setText(open_file) self._shared_state.base_dir = os.path.dirname(open_file) def _check_enable_action_buttons(self): self.runButton.setEnabled( os.path.isfile(self.selectedDWI.text()) and os.path.isfile(self.selectedMask.text()) and os.path.isfile(self.selectedProtocol.text()) and self.selectedOutputFolder.text() != '') def update_output_folder_text(self): if os.path.isfile(self.selectedDWI.text()) and os.path.isfile(self.selectedMask.text()): folder_base = os.path.join(os.path.dirname(self.selectedDWI.text()), 'output', split_image_path(self.selectedMask.text())[1]) self.selectedOutputFolder.setText(folder_base) def _run_optimization_options_dialog(self): dialog = OptimizationOptionsDialog(self._shared_state, self._tab_content, self._optim_options) return_value = dialog.exec_() if return_value: dialog.write_config() @pyqtSlot() def run_model(self): model = mdt.get_model(self.modelSelection.currentText()) protocol = mdt.load_protocol(self.selectedProtocol.text()) if not model.is_protocol_sufficient(protocol): msg = ProtocolWarningBox(model.get_protocol_problems(protocol)) msg.exec_() return self._run_model_worker.set_args( model, mdt.load_problem_data(self.selectedDWI.text(), self.selectedProtocol.text(), self.selectedMask.text(), noise_std=self._optim_options.noise_std), self.selectedOutputFolder.text(), recalculate=True, double_precision=self._optim_options.double_precision, only_recalculate_last=not self._optim_options.recalculate_all, optimizer=self._optim_options.get_optimizer(), save_user_script_info=False) self._computations_thread.start() self._run_model_worker.moveToThread(self._computations_thread) self._run_model_worker.starting.connect(self._computations_thread.starting) self._run_model_worker.finished.connect(self._computations_thread.finished) self._run_model_worker.starting.connect(lambda: self.runButton.setEnabled(False)) self._run_model_worker.finished.connect(lambda: self.runButton.setEnabled(True)) self._run_model_worker.finished.connect( lambda: self._shared_state.set_output_folder(self._get_full_model_output_path())) self._run_model_worker.starting.emit() def _get_full_model_output_path(self): parts = [self.selectedOutputFolder.text()] parts.append(self.modelSelection.currentText().split(' ')[0]) return os.path.join(*parts) class ProtocolWarningBox(QMessageBox): def __init__(self, problems, *args): super(ProtocolWarningBox, self).__init__(*args) self.setIcon(QMessageBox.Warning) self.setWindowTitle("Insufficient protocol") self.setText("The provided protocol is insufficient for this model.") self.setInformativeText("The reported problems are: \n{}".format('\n'.join(' - ' + str(p) for p in problems))) self._in_resize = False def resizeEvent(self, event): if not self._in_resize: self._in_resize = True self.setFixedWidth(self.children()[-1].size().width() + 200) self._in_resize = False class OptimizationOptionsDialog(Ui_OptimizationOptionsDialog, QDialog): def __init__(self, shared_state, parent, config): super(OptimizationOptionsDialog, self).__init__(parent) self._shared_state = shared_state self._config = config self.setupUi(self) self.noiseStdFileSelect.clicked.connect(lambda: self._select_std_file()) self.noiseStd.textChanged.connect(self._check_enable_ok_button) self.patience.textChanged.connect(self._check_enable_ok_button) self.optimizationRoutine.addItems(sorted(OptimOptions.optim_routines.keys())) self.optimizationRoutine.currentIndexChanged.connect(self._update_default_patience) self.defaultOptimizerGroup.buttonClicked.connect(self._update_optimization_routine_selection) self._load_config() def write_config(self): """Write to the config the user selected options""" noise_std_value = self.noiseStd.text() if noise_std_value == '': self._config.noise_std = None else: self._config.noise_std = noise_std_value try: self._config.noise_std = float(noise_std_value) except ValueError: pass self._config.double_precision = self.doublePrecision.isChecked() self._config.recalculate_all = self.recalculateAll_True.isChecked() self._config.use_model_default_optimizer = self.defaultOptimizer_True.isChecked() self._config.optimizer = OptimOptions.optim_routines[self.optimizationRoutine.currentText()] self._config.patience = int(self.patience.text()) def _load_config(self): """Load the settings from the config into the GUI""" if self._config.noise_std is not None: self.noiseStd.setText(str(self._config.noise_std)) self.doublePrecision.setChecked(self._config.double_precision) self.recalculateAll_True.setChecked(self._config.recalculate_all) self.defaultOptimizer_False.setChecked(not self._config.use_model_default_optimizer) self._update_optimization_routine_selection() self.optimizationRoutine.setCurrentText({v: k for k, v in OptimOptions.optim_routines.items()}[self._config.optimizer]) self.patience.setText(str(self._config.patience)) def _select_std_file(self): open_file, used_filter = QFileDialog().getOpenFileName( caption='Select a noise std volume', directory=self._shared_state.base_dir, filter=';;'.join(image_files_filters)) if open_file: self._shared_state.base_dir = os.path.dirname(open_file) self.noiseStd.setText(open_file) def _check_enable_ok_button(self): noise_std_value = self.noiseStd.text() noise_std_value_is_float = False try: float(noise_std_value) noise_std_value_is_float = True except ValueError: pass enabled = noise_std_value == '' or noise_std_value_is_float or os.path.isfile(noise_std_value) if self.defaultOptimizer_False.isChecked(): try: int(self.patience.text()) except ValueError: enabled = False self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(enabled) def _update_optimization_routine_selection(self): self.optimizationRoutine.setDisabled(self.defaultOptimizer_True.isChecked()) self.patience.setDisabled(self.defaultOptimizer_True.isChecked()) def _update_default_patience(self): optimizer = get_optimizer_by_name(OptimOptions.optim_routines[self.optimizationRoutine.currentText()]) self.patience.setText(str(optimizer.default_patience)) class OptimOptions(object): optim_routines = {'Powell\'s method': 'Powell', 'Nelder-Mead Simplex': 'NMSimplex', 'Levenberg Marquardt': 'LevenbergMarquardt'} def __init__(self): """Storage class for communication between the options dialog and the main frame""" self.use_model_default_optimizer = True self.double_precision = False self.optimizer = mdt.configuration.get_general_optimizer_name() self.patience = mdt.configuration.get_general_optimizer_settings()['patience'] if self.patience is None: self.patience = get_optimizer_by_name(self.optimizer).default_patience self.recalculate_all = False self.noise_std = None def get_optimizer(self): if self.use_model_default_optimizer: return None optimizer = get_optimizer_by_name(self.optimizer) return optimizer(patience=self.patience) class RunModelWorker(QObject): starting = pyqtSignal() finished = pyqtSignal() def __init__(self): super(RunModelWorker, self).__init__() self.starting.connect(self.run) self._args = [] self._kwargs = {} def set_args(self, *args, **kwargs): self._args = args self._kwargs = kwargs @function_message_decorator('Starting model fitting, please wait.', 'Finished model fitting. You can view the results using the "View results" tab.') @pyqtSlot() def run(self): mdt.fit_model(*self._args, **self._kwargs) self.finished.emit() PKjUpIbbmdt/visualization/layouts.pyimport itertools import numpy as np from matplotlib.gridspec import GridSpec from mdt.visualization.dict_conversion import SimpleClassConversion, IntConversion, SimpleDictConversion __author__ = 'Robbert Harms' __date__ = "2016-09-02" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class GridLayout(object): def __init__(self, spacings=None): super(GridLayout, self).__init__() self.spacings = spacings or {'left': 0.10, 'right': 0.86, 'top': 0.97, 'bottom': 0.04, 'wspace': 0.5, 'hspace': 0.2} if self.spacings['top'] < self.spacings['bottom']: raise ValueError('The top ({}) can not be smaller than the bottom ({}) in the spacings'.format( self.spacings['top'], self.spacings['bottom'])) if self.spacings['left'] > self.spacings['right']: raise ValueError('Left ({}) can not be larger than right ({}) in the spacings'.format( self.spacings['left'], self.spacings['right'])) @classmethod def get_conversion_info(cls): return SimpleClassConversion(cls, cls._get_attribute_conversions()) @classmethod def _get_attribute_conversions(cls): return {'spacings': SimpleDictConversion(desired_type=float)} def get_gridspec(self, figure, nmr_plots): """Get the grid layout specifier for the given figure using the given number of plots. Args: figure (Figure): the figure to add the axis to nmr_plots (int): the total number of plots Returns: GridLayoutSpecifier: the specifier we can ask new subplot axis from """ def __eq__(self, other): if not isinstance(other, GridLayout): return NotImplemented return isinstance(other, type(self)) and other.spacings == self.spacings def __ne__(self, other): return not self.__eq__(other) class GridLayoutSpecifier(object): def __init__(self, gridspec, figure, positions=None): """Create a grid layout specifier using the given gridspec and the given figure. Args: gridspec (GridSpec): the gridspec to use figure (Figure): the figure to generate subplots for positions (:class:`list`): if given, a list with grid spec indices for every requested axis """ self.gridspec = gridspec self.figure = figure self.positions = positions def get_axis(self, index): gridspec_ind = self.gridspec[index] if self.positions is not None: gridspec_ind = self.gridspec[self.positions[index]] return self.figure.add_subplot(gridspec_ind) class AutoGridLayout(GridLayout): def get_gridspec(self, figure, nmr_plots): rows, cols = self._get_row_cols_square(nmr_plots) return GridLayoutSpecifier(GridSpec(rows, cols, **self.spacings), figure) def _get_row_cols_square(self, nmr_plots): defaults = ((1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (2, 3), (2, 3)) if nmr_plots < len(defaults): return defaults[nmr_plots - 1] else: cols = np.ceil(nmr_plots / 3.0) rows = np.ceil(float(nmr_plots) / cols) rows = int(rows) cols = int(cols) return rows, cols class Rectangular(GridLayout): def __init__(self, rows=None, cols=None, spacings=None): super(Rectangular, self).__init__(spacings=spacings) self.rows = rows self.cols = cols if self.rows is not None: self.rows = int(self.rows) if self.rows < 1: raise ValueError('The number of rows ({}) can not be smaller than 1.'.format(self.rows)) if self.cols is not None: self.cols = int(self.cols) if self.cols < 1: raise ValueError('The number of columns ({}) can not be smaller than 1.'.format(self.rows)) @classmethod def _get_attribute_conversions(cls): conversions = super(Rectangular, cls)._get_attribute_conversions() conversions.update({'rows': IntConversion(), 'cols': IntConversion()}) return conversions def get_gridspec(self, figure, nmr_plots): rows = self.rows cols = self.cols if rows is None and cols is None: return AutoGridLayout(spacings=self.spacings).get_gridspec(figure, nmr_plots) if rows is None: rows = int(np.ceil(nmr_plots / cols)) if cols is None: cols = int(np.ceil(nmr_plots / rows)) if rows * cols < nmr_plots: cols = int(np.ceil(nmr_plots / rows)) return GridLayoutSpecifier(GridSpec(rows, cols, **self.spacings), figure) def __eq__(self, other): if not isinstance(other, GridLayout): return NotImplemented return isinstance(other, type(self)) and other.rows == self.rows and other.cols == self.cols \ and other.spacings == self.spacings class LowerTriangular(GridLayout): def __init__(self, spacings=None): super(LowerTriangular, self).__init__(spacings=spacings) def get_gridspec(self, figure, nmr_plots): size, positions = self._get_size_and_position(nmr_plots) return GridLayoutSpecifier(GridSpec(size, size, **self.spacings), figure, positions=positions) def _get_size_and_position(self, nmr_plots): size = self._get_lowest_triangle_length(nmr_plots) positions = [] for x, y in itertools.product(range(size), range(size)): if x >= y: positions.append(x * size + y) return size, positions @staticmethod def _get_lowest_triangle_length(nmr_plots): for n in range(1, nmr_plots): if 0.5 * (n ** 2 + n) >= nmr_plots: return n return nmr_plots class SingleColumn(GridLayout): def get_gridspec(self, figure, nmr_plots): return GridLayoutSpecifier(GridSpec(nmr_plots, 1, **self.spacings), figure) class SingleRow(GridLayout): def get_gridspec(self, figure, nmr_plots): return GridLayoutSpecifier(GridSpec(1, nmr_plots, **self.spacings), figure) PKjUpI4T mdt/visualization/samples.pyimport matplotlib.pyplot as plt from matplotlib.gridspec import GridSpec from matplotlib.widgets import Slider from scipy.stats import norm import matplotlib.mlab as mlab __author__ = 'Robbert Harms' __date__ = "2016-09-02" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class SampleVisualizer(object): def __init__(self, voxels): self._voxels = voxels self.voxel_ind = 0 self.maps_to_show = sorted(self._voxels.keys()) self.names = {} self._figure = None self.show_sliders = True self._max_voxel_ind = 0 self._updating_sliders = False self._voxel_slider = None self._show_trace = True self._nmr_bins = 30 self._show_slider = True self._fit_gaussian = True def show(self, voxel_ind=0, names=None, maps_to_show=None, to_file=None, block=True, maximize=False, show_trace=True, nmr_bins=20, window_title=None, show_sliders=True, fit_gaussian=True, figure_options=None): """Show the samples per voxel. Args: voxel_ind (int): the voxel to show the samples from. names (dict): A list of names for the different maps. Use as ``{map_name: display_name}`` that is, the key is the name of the map in the volumes dictionary and the display name is the string that will be used as title for that map. maps_to_show (:class:`list`): A list of maps to show. The items in this list must correspond to the keys in the volumes dictionary. to_file (string, optional, default None): If to_file is not None it is supposed to be a filename where the image will be saved. If not set to None, nothing will be displayed, the results will directly be saved. Already existing items will be overwritten. block (boolean): If we want to block after calling the plots or not. Set this to False if you do not want the routine to block after drawing. In doing so you manually need to block. maximize (boolean): if we want to display the window maximized or not show_trace (boolean): if we show the trace of each map or not nmr_bins (dict or int): either a single value or one per map name show_sliders (boolean): if we show the slider or not fit_gaussian (boolean): if we fit and show a normal distribution (Gaussian) to the histogram or not window_title (str): the title of the window. If None, the default title is used figure_options (dict) options for the figure """ figure_options = figure_options or {'figsize': (18, 16)} self._figure = plt.figure(**figure_options) if names: self.names = names if maps_to_show: self.maps_to_show = maps_to_show self.voxel_ind = voxel_ind self._nmr_bins = nmr_bins or self._nmr_bins self._show_trace = show_trace self.show_sliders = show_sliders self._fit_gaussian = fit_gaussian self._setup() if maximize: mng = plt.get_current_fig_manager() mng.window.showMaximized() if window_title: mng = plt.get_current_fig_manager() mng.canvas.set_window_title(window_title) if to_file: plt.savefig(to_file) plt.close() else: plt.draw() if block: plt.show(True) def set_voxel(self, voxel_ind): voxel_ind = round(voxel_ind) if not self._updating_sliders: self._updating_sliders = True self.voxel_ind = int(round(voxel_ind)) self._voxel_slider.set_val(voxel_ind) self._rerender() self._voxel_slider.set_val(voxel_ind) self._updating_sliders = False def _setup(self): self._rerender() self._max_voxel_ind = max([self._voxels[map_name].shape[0] for map_name in self.maps_to_show]) y_positions = [0.008] if self.show_sliders: ax = self._figure.add_axes([0.25, y_positions[0], 0.5, 0.01], axisbg='Wheat') self._voxel_slider = _DiscreteSlider(ax, 'Voxel', 0, self._max_voxel_ind - 1, valinit=self.voxel_ind, valfmt='%i', color='DarkSeaGreen', closedmin=True, closedmax=True) self._voxel_slider.on_changed(self.set_voxel) def _rerender(self): nmr_maps = len(self.maps_to_show) if self._show_trace: nmr_maps *= 2 grid = GridSpec(nmr_maps, 1, left=0.04, right=0.96, top=0.94, bottom=0.06, hspace=0.2) i = 0 for map_name in self.maps_to_show: samples = self._voxels[map_name] title = map_name if map_name in self.names: title = self.names[map_name] if isinstance(self._nmr_bins, dict) and map_name in self._nmr_bins: nmr_bins = self._nmr_bins[map_name] else: nmr_bins = self._nmr_bins hist_plot = plt.subplot(grid[i]) n, bins, patches = hist_plot.hist(samples[self.voxel_ind, :], nmr_bins, normed=True) plt.title(title) i += 1 if self._fit_gaussian: mu, sigma = norm.fit(samples[self.voxel_ind, :]) bincenters = 0.5*(bins[1:] + bins[:-1]) y = mlab.normpdf(bincenters, mu, sigma) hist_plot.plot(bincenters, y, 'r', linewidth=1) if self._show_trace: trace_plot = plt.subplot(grid[i]) trace_plot.plot(samples[self.voxel_ind, :]) i += 1 class _DiscreteSlider(Slider): """A matplotlib slider widget with discrete steps.""" def __init__(self, *args, **kwargs): """Identical to Slider.__init__, except for the "increment" and kwarg. Args: increment (float): specifies the step size that the slider will be discritized to.""" self.inc = kwargs.pop('increment', 0.25) Slider.__init__(self, *args, **kwargs) def set_max(self, new_max): orig_val = self.val self.set_val(self.valmin) self.valmax = new_max self.ax.set_xlim((self.valmin, self.valmax)) if orig_val >= new_max: self.set_val((new_max + self.valmin) / 2.0) else: self.set_val(orig_val) def set_val(self, val): discrete_val = int(val / self.inc) * self.inc # We can't just call Slider.set_val(self, discrete_val), because this # will prevent the slider from updating properly (it will get stuck at # the first step and not "slide"). Instead, we'll keep track of the # the continuous value as self.val and pass in the discrete value to # everything else. xy = self.poly.xy xy[2] = discrete_val, 1 xy[3] = discrete_val, 0 self.poly.xy = xy self.valtext.set_text(self.valfmt % discrete_val) if self.drawon: self.ax.figure.canvas.draw() self.val = val if not self.eventson: return for cid, func in self.observers.items(): func(discrete_val) PKjUpImdt/visualization/utils.pyimport numpy as np from matplotlib.ticker import LinearLocator __author__ = 'Robbert Harms' __date__ = "2014-02-14" __license__ = "LGPL v3" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class MyColourBarTickLocator(LinearLocator): def __init__(self, min_val,max_val, **kwargs): super(MyColourBarTickLocator, self).__init__(**kwargs) self.min_val = min_val self.max_val = max_val def __call__(self): locations = LinearLocator.__call__(self) new_locations = [] for location in locations: if np.absolute(location) < 0.01: new_locations.append(float("{:.1e}".format(location))) else: new_locations.append(np.round(location, 2)) if np.isclose(new_locations[-1], self.max_val) or new_locations[-1] >= self.max_val: new_locations[-1] = self.max_val if new_locations[0] <= self.min_val: new_locations[0] = self.min_val return new_locations PKjUpIumdt/visualization/__init__.py__author__ = 'Robbert Harms' __date__ = "2016-09-02" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" PKjUpIOag{$mdt/visualization/dict_conversion.pyfrom collections import MutableMapping __author__ = 'Robbert Harms' __date__ = "2016-09-03" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class ConversionSpecification(object): def __init__(self): """Specifies how the content of an object is to be converted from and to a dictionary.""" def to_dict(self, obj): """Convert the given value to a dictionary. Args: obj (object): the value to convert to a dictionary Returns: dict: the resulting converted dictionary """ def from_dict(self, value): """Generate a result value from the given dictionary Args: value (object): the dictionary to convert back to a value Returns: object: the value represented by the dict. """ class SimpleClassConversion(ConversionSpecification): def __init__(self, class_type, attribute_conversions): super(SimpleClassConversion, self).__init__() self._class_type = class_type self._attribute_conversions = attribute_conversions def to_dict(self, obj): result_dict = {} conversion_info = self._attribute_conversions for key, converter in conversion_info.items(): result_dict[key] = converter.to_dict(getattr(obj, key)) return result_dict def from_dict(self, value): init_kwargs = {} for key, converter in self._attribute_conversions.items(): if key in value: init_kwargs[key] = converter.from_dict(value[key]) return self._class_type(**init_kwargs) class ConvertDictElements(ConversionSpecification): def __init__(self, conversion_type): """Converts all the elements in the value (a dictionary) using the given conversion type.""" super(ConvertDictElements, self).__init__() self._conversion_type = conversion_type def to_dict(self, obj): return {key: self._conversion_type.to_dict(v) for key, v in obj.items()} def from_dict(self, value): return {key: self._conversion_type.from_dict(v) for key, v in value.items()} class ConvertDynamicFromModule(ConversionSpecification): def __init__(self, module): """Performs dynamic lookup by loading the class from the given module. This requires that the class we are dynamically loading has a get_conversion_info() class method that returns the conversion specification for that class. Args: module (module): the python module to use for loading the data from dict """ super(ConvertDynamicFromModule, self).__init__() self._module = module def to_dict(self, obj): return [obj.__class__.__name__, obj.get_conversion_info().to_dict(obj)] def from_dict(self, value): try: cls = getattr(self._module, value[0]) except AttributeError: raise ValueError('The given class "{}" could not be found.'.format(value[0])) return cls.get_conversion_info().from_dict(value[1]) class IdentityConversion(ConversionSpecification): def __init__(self, desired_type=None, allow_null=True): """Performs identity conversion between simple types. Args: desired_type (:class:`type`): if not None we cast the from_dict value to the given type allow_null (bool): if True we allow None during type casting """ super(IdentityConversion, self).__init__() self._desired_type = desired_type self._allow_none = allow_null def to_dict(self, obj): if obj is None: if self._allow_none: return None else: raise ValueError('The object is supposed to be not None.') else: if self._desired_type: return self._desired_type(obj) return obj def from_dict(self, value): if value is None: if self._allow_none: return None else: raise ValueError('The object is supposed to be not None.') else: if self._desired_type: return self._desired_type(value) return value class StringConversion(IdentityConversion): def __init__(self, allow_null=True): super(StringConversion, self).__init__(str, allow_null=allow_null) class SimpleDictConversion(IdentityConversion): def __init__(self, desired_type=None, allow_null=True): """Converts all the objects in the given dict. Args: desired_type (:class:`type`): if not None we cast the from_dict value to the given type allow_null (bool): if True we allow None during type casting """ super(IdentityConversion, self).__init__() self._desired_type = desired_type self._allow_none = allow_null def to_dict(self, obj): if obj is None: if self._allow_none: return None else: raise ValueError('The object is supposed to be not None.') else: if self._desired_type and isinstance(obj, MutableMapping): return {key: self._desired_type(v) for key, v in obj.items()} return obj def from_dict(self, value): if value is None: if self._allow_none: return None else: raise ValueError('The object is supposed to be not None.') else: if self._desired_type and isinstance(value, MutableMapping): return {key: self._desired_type(v) for key, v in value.items()} return value class IntConversion(IdentityConversion): def __init__(self, allow_null=True): super(IntConversion, self).__init__(int, allow_null=allow_null) class FloatConversion(IdentityConversion): def __init__(self, allow_null=True): super(FloatConversion, self).__init__(float, allow_null=allow_null) class SimpleListConversion(IdentityConversion): def __init__(self, allow_null=True): super(SimpleListConversion, self).__init__(list, allow_null=allow_null) class BooleanConversion(IdentityConversion): def __init__(self, allow_null=True): super(BooleanConversion, self).__init__(bool, allow_null=allow_null) class WhiteListConversion(ConversionSpecification): def __init__(self, white_list, default): """Allow only elements from the given white list. If the element is not one of them, revert to the default. Args: white_list (list of object): list of allowable objects default (object): the default fallback object """ super(WhiteListConversion, self).__init__() self.white_list = white_list self.default = default def to_dict(self, obj): if obj not in self.white_list: return self.default return obj def from_dict(self, value): if value not in self.white_list: return self.default return value PK=~I$$mdt/visualization/maps/base.pyimport glob import warnings from copy import deepcopy import nibabel import numpy as np import yaml import matplotlib.font_manager import mdt import mdt.visualization.layouts from mdt import nifti from mdt.deferred_mappings import DeferredActionDict from mdt.visualization.dict_conversion import StringConversion, \ SimpleClassConversion, IntConversion, SimpleListConversion, BooleanConversion, \ ConvertDictElements, ConvertDynamicFromModule, FloatConversion, WhiteListConversion from mdt.visualization.layouts import Rectangular __author__ = 'Robbert Harms' __date__ = "2016-09-02" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class MapPlotConfig(object): def __init__(self, dimension=2, slice_index=0, volume_index=0, rotate=90, colormap='hot', maps_to_show=None, font=None, grid_layout=None, colorbar_nmr_ticks=10, show_axis=True, zoom=None, map_plot_options=None, interpolation='bilinear', flipud=None, title=None, mask_name=None): """Container for all plot related settings. Args: dimension (int): the dimension we are viewing slice_index (int): the slice in the dimension we are viewing volume_index (int): in the case of multiple volumes (4th dimension) which index we are in. rotate (int): the rotation factor, multiple of 90. By default we rotate 90 degrees to show most in-vivo datasets in a natural way. colormap (str): the name of the colormap to use maps_to_show (list of str): the names of the maps to show font (int): the font settings grid_layout (GridLayout): the layout of the grid colorbar_nmr_ticks (int): the number of ticks on the colorbar show_axis (bool): if we show the axis or not zoom (Zoom): the zoom setting for all the plots map_plot_options (dict): per map the map specific plot options interpolation (str): one of the available interpolations flipud (boolean): if True we flip the image upside down title (str): the title to this plot mask_name (str): the name of the mask to apply to the maps prior to display """ super(MapPlotConfig, self).__init__() self.dimension = dimension self.slice_index = slice_index self.volume_index = volume_index self.rotate = rotate self.colormap = colormap self.maps_to_show = maps_to_show or [] self.zoom = zoom or Zoom.no_zoom() self.font = font or Font() self.colorbar_nmr_ticks = colorbar_nmr_ticks self.show_axis = show_axis if self.show_axis is None: self.show_axis = True self.grid_layout = grid_layout or Rectangular() self.interpolation = interpolation or 'bilinear' self.flipud = flipud if self.flipud is None: self.flipud = False self.map_plot_options = map_plot_options or {} self.title = title self.mask_name = mask_name if interpolation not in self.get_available_interpolations(): raise ValueError('The given interpolation ({}) is not supported.'.format(interpolation)) if self.colormap not in self.get_available_colormaps(): raise ValueError('The given colormap ({}) is not supported.'.format(self.colormap)) if self.rotate not in [0, 90, 180, 270]: raise ValueError('The given rotation ({}) is not supported, use 90 ' 'degree angles within 360.'.format(self.rotate)) if self.dimension is None: raise ValueError('The dimension can not be None.') if self.slice_index is None: raise ValueError('The slice index can not be None.') if self.volume_index is None: raise ValueError('The volume index can not be None.') if self.rotate is None: raise ValueError('The rotation can not be None.') if self.dimension < 0: raise ValueError('The dimension can not be smaller than 0, {} given.'.format(self.dimension)) @classmethod def get_available_interpolations(cls): return get_available_interpolations() @classmethod def get_available_colormaps(cls): return get_available_colormaps() @classmethod def get_conversion_info(cls): return SimpleClassConversion(cls, cls._get_attribute_conversions()) @classmethod def _get_attribute_conversions(cls): return {'dimension': IntConversion(), 'slice_index': IntConversion(), 'volume_index': IntConversion(), 'rotate': IntConversion(), 'colormap': StringConversion(), 'maps_to_show': SimpleListConversion(), 'zoom': Zoom.get_conversion_info(), 'font': Font.get_conversion_info(), 'colorbar_nmr_ticks': IntConversion(), 'show_axis': BooleanConversion(), 'map_plot_options': ConvertDictElements(SingleMapConfig.get_conversion_info()), 'grid_layout': ConvertDynamicFromModule(mdt.visualization.layouts), 'interpolation': WhiteListConversion(cls.get_available_interpolations(), 'bilinear'), 'flipud': BooleanConversion(allow_null=False), 'title': StringConversion(), 'mask_name': StringConversion() } @classmethod def from_yaml(cls, text): return cls.get_conversion_info().from_dict(yaml.load(text)) @classmethod def from_dict(cls, config_dict): return cls.get_conversion_info().from_dict(config_dict) def to_dict(self): return self.get_conversion_info().to_dict(self) def to_yaml(self): return yaml.safe_dump(self.get_conversion_info().to_dict(self)) def visible_changes(self, old_config): """Checks if there are any visible changes between this configuration and the other. This method can implement knowledge that allows the visualization routine to check if it would need to update the plot or not. It expects that the configuration you wish to display is the one on which this method is called. Args: old_config (MapPlotConfig): the previous configuration Returns: bool: if the differences between this configuration and the other would result in visible differences. """ def visible_difference_in_map_plot_options(): for key in set(self.map_plot_options.keys()): if key in self.maps_to_show: if key not in old_config.map_plot_options: return True if self.map_plot_options[key].visible_changes(old_config.map_plot_options[key]): return True return False if any(getattr(self, key) != getattr(old_config, key) for key in filter(lambda key: key != 'map_plot_options', self.__dict__)): return True return visible_difference_in_map_plot_options() def validate(self, data_info): if data_info.maps: self._validate_maps_to_show(data_info) self._validate_dimension(data_info) for key in self.__dict__: if hasattr(self, '_validate_' + key): getattr(self, '_validate_' + key)(data_info) return self def _validate_maps_to_show(self, data_info): if any(map(lambda k: k not in data_info.maps, self.maps_to_show)): raise ValueError('One or more of the given maps to show is not in the data: {}'. format(set(self.maps_to_show).difference(set(data_info.maps)))) def _validate_dimension(self, data_info): max_dim = data_info.get_max_dimension(map_names=self.maps_to_show) if self.dimension is None or self.dimension > max_dim: raise ValueError('The dimension ({}) can not be higher than {}.'.format(self.dimension, max_dim)) def _validate_slice_index(self, data_info): max_slice_index = data_info.get_max_slice_index(self.dimension, map_names=self.maps_to_show) if self.slice_index is None or self.slice_index > max_slice_index or self.slice_index < 0: raise ValueError('The slice index ({}) can not be higher than ' '{} or lower than 0.'.format(self.slice_index, max_slice_index)) def _validate_volume_index(self, data_info): max_volume_index = data_info.get_max_volume_index(map_names=self.maps_to_show) if self.volume_index > max_volume_index or self.volume_index < 0: raise ValueError('The volume index ({}) can not be higher than ' '{} or lower than 0.'.format(self.volume_index, max_volume_index)) def _validate_zoom(self, data_info): max_x = data_info.get_max_x_index(self.dimension, self.rotate) max_y = data_info.get_max_y_index(self.dimension, self.rotate) if self.zoom.p1.x > max_x: raise ValueError('The zoom maximum x ({}) can not be larger than {}'.format(self.zoom.p1.x, max_x)) if self.zoom.p1.y > max_y: raise ValueError('The zoom maximum y ({}) can not be larger than {}'.format(self.zoom.p1.y, max_y)) def _validate_mask_name(self, data_info): if self.mask_name: if self.mask_name not in data_info.maps: raise ValueError('The given global mask is not found in the list of maps.') def _validate_map_plot_options(self, data_info): for key in self.map_plot_options: if key not in data_info.maps: del self.map_plot_options[key] for key, value in self.map_plot_options.items(): if value is not None: self.map_plot_options[key] = value.validate(data_info) def __repr__(self): return str(self.get_conversion_info().to_dict(self)) def __eq__(self, other): if not isinstance(other, MapPlotConfig): return NotImplemented for key, value in self.__dict__.items(): if value != getattr(other, key): return False return True def __ne__(self, other): return not self.__eq__(other) class SingleMapConfig(object): def __init__(self, title=None, scale=None, clipping=None, colormap=None, colorbar_label=None, title_spacing=None, mask_name=None): """Creates the configuration for a single map plot. Args: title (str): the title of this plot, can contain latex using the matplotlib latex syntax scale (Scale): the scaling for the values in this map clipping (Clipping): the clipping to apply to the values prior to plotting colormap (str): the matplotlib colormap to use colorbar_label (str): the label for the colorbar title_spacing (float): the spacing between the top of the plots and the title mask_name (str): the name of the mask used to mask the data prior to visualization """ super(SingleMapConfig, self).__init__() self.title = title self.title_spacing = title_spacing self.scale = scale or Scale() self.clipping = clipping or Clipping() self.colormap = colormap self.colorbar_label = colorbar_label self.mask_name = mask_name if self.colormap is not None and self.colormap not in self.get_available_colormaps(): raise ValueError('The given colormap ({}) is not supported.'.format(self.colormap)) @classmethod def get_conversion_info(cls): return SimpleClassConversion(cls, cls._get_attribute_conversions()) @classmethod def _get_attribute_conversions(cls): return {'title': StringConversion(), 'scale': Scale.get_conversion_info(), 'clipping': Clipping.get_conversion_info(), 'colormap': StringConversion(), 'colorbar_label': StringConversion(), 'title_spacing': FloatConversion(), 'mask_name': StringConversion()} @classmethod def get_available_colormaps(cls): return get_available_colormaps() @classmethod def from_yaml(cls, text): return cls.get_conversion_info().from_dict(yaml.load(text)) @classmethod def from_dict(cls, config_dict): return cls.get_conversion_info().from_dict(config_dict) def to_dict(self): return self.get_conversion_info().to_dict(self) def to_yaml(self): return yaml.safe_dump(self.get_conversion_info().to_dict(self)) def visible_changes(self, old_config): """Checks if there are any visible changes between this configuration and the other. This method can implement knowledge that allows the visualization routine to check if it would need to update the plot or not. It expects that the configuration you wish to display is the one on which this method is called. Args: old_config (SingleMapConfig): the previous configuration Returns: bool: if the differences between this configuration and the other would result in visible differences. """ def filtered_attributes(): filtered = ['scale', 'clipping'] return [key for key in self.__dict__ if key not in filtered] def visible_changes_in_scale(): return self.scale.visible_changes(old_config.scale) def visible_changes_in_clipping(): return self.clipping.visible_changes(old_config.clipping) if any(getattr(self, key) != getattr(old_config, key) for key in filtered_attributes()): return True return visible_changes_in_clipping() or visible_changes_in_scale() def validate(self, data_info): for key in self.__dict__: if hasattr(self, '_validate_' + key): getattr(self, '_validate_' + key)(data_info) return self def __repr__(self): return str(self.get_conversion_info().to_dict(self)) def __eq__(self, other): if not isinstance(other, SingleMapConfig): return NotImplemented for key, value in self.__dict__.items(): if value != getattr(other, key): return False return True def __ne__(self, other): return not self.__eq__(other) class Zoom(object): def __init__(self, p0, p1): """Container for zooming a map between the two given points. Args: p0 (Point): the lower left corner of the zoomed area p1 (Point): the upper right corner of the zoomed area """ self.p0 = p0 self.p1 = p1 if p0.x > p1.x or p0.y > p1.y: raise ValueError('The lower left point ({}, {}) should be smaller than the upper right point ({}, {})'. format(p0.x, p0.y, p1.x, p1.y)) if p0.x < 0 or p0.y < 0 or p1.x < 0 or p1.y < 0: raise ValueError('The zoom box ({}, {}), ({}, {}) can not ' 'be negative in any way.'.format(p0.x, p0.y, p1.x, p1.y)) if self.p0 is None or self.p1 is None: raise ValueError('One of the zoom points is None.') @classmethod def from_coords(cls, x0, y0, x1, y1): return cls(Point(x0, y0), Point(x1, y1)) @classmethod def no_zoom(cls): return cls(Point(0, 0), Point(0, 0)) @classmethod def get_conversion_info(cls): return SimpleClassConversion(cls, cls._get_attribute_conversions()) @classmethod def _get_attribute_conversions(cls): point_converter = Point.get_conversion_info() return {'p0': point_converter, 'p1': point_converter} def get_rotated(self, rotation, x_dimension, y_dimension): """Return a new Zoom instance rotated with the given factor. This rotates the zoom box in the same way as the image is rotated. Args: rotation (int): the rotation by which to rotate in steps of 90 degrees x_dimension (int): the dimension of the image in the x coordinate y_dimension (int): the dimension of the image in the y coordinate Returns: Zoom: the rotated instance """ dimensions = [x_dimension, y_dimension] p0 = self.p0 p1 = self.p1 nmr_90_rotations = rotation % 360 // 90 for _ in range(nmr_90_rotations): dimensions = np.roll(dimensions, 1) new_p0 = Point(np.min([dimensions[0] - p0.y, dimensions[0] - p1.y]), np.min([p0.x, p1.x])) new_p1 = Point(np.max([dimensions[0] - p0.y, dimensions[0] - p1.y]), np.max([p0.x, p1.x])) p0 = new_p0 p1 = new_p1 if p0.x >= dimensions[0] - 1 or p0.x < 0: p0 = p0.get_updated(x=0) if p0.y >= dimensions[1] - 1 or p0.y < 0: p0 = p0.get_updated(y=0) if p1.x >= dimensions[0] - 1: p1 = p1.get_updated(x=dimensions[0] - 1) if p1.y >= dimensions[1] - 1: p1 = p1.get_updated(y=dimensions[1] - 1) return Zoom(p0, p1) def apply(self, data): """Apply the zoom to the given 2d array and return the new array. Args: data (ndarray): the data to zoom in on """ correct = self.p0.x < data.shape[1] and self.p1.x < data.shape[1] \ and self.p0.y < data.shape[0] and self.p1.y < data.shape[0] \ and self.p0.x < self.p1.x and self.p0.y < self.p1.y if correct: return data[self.p0.y:self.p1.y, self.p0.x:self.p1.x] return data def __repr__(self): return str(self.get_conversion_info().to_dict(self)) def __eq__(self, other): if not isinstance(other, Zoom): return NotImplemented for key, value in self.__dict__.items(): if value != getattr(other, key): return False return True def __ne__(self, other): return not self.__eq__(other) class Point(object): def __init__(self, x, y): """Container for a single point""" self.x = x self.y = y def get_updated(self, **kwargs): """Get a new Point object with updated arguments. Args: **kwargs (dict): the new keyword values, when given these take precedence over the current ones. Returns: Point: a new scale with updated values. """ new_values = dict(x=self.x, y=self.y) new_values.update(**kwargs) return Point(**new_values) @classmethod def get_conversion_info(cls): return SimpleClassConversion(cls, cls._get_attribute_conversions()) @classmethod def _get_attribute_conversions(cls): return {'x': IntConversion(allow_null=False), 'y': IntConversion(allow_null=False)} def rotate90(self, nmr_rotations): """Rotate this point around a 90 degree angle Args: nmr_rotations (int): the number of 90 degreee rotations, can be negative Returns: Point: the rotated point """ def rotate_coordinate(x, y, nmr_rotations): rotation_matrix = np.array([[0, -1], [1, 0]]) rx, ry = x, y for rotation in range(1, nmr_rotations + 1): rx, ry = rotation_matrix.dot([rx, ry]) return rx, ry return Point(*rotate_coordinate(self.x, self.y, nmr_rotations)) def __repr__(self): return 'Point(x={}, y={})'.format(self.x, self.y) def __eq__(self, other): if not isinstance(other, Point): return NotImplemented for key, value in self.__dict__.items(): if value != getattr(other, key): return False return True def __ne__(self, other): return not self.__eq__(other) class Clipping(object): def __init__(self, vmin=0, vmax=0, use_min=False, use_max=False): """Container for the map clipping information""" self.vmin = vmin self.vmax = vmax self.use_min = use_min self.use_max = use_max if use_min and use_max and vmin > vmax: raise ValueError('The minimum clipping ({}) can not be larger than the maximum clipping({})'.format( vmin, vmax)) def apply(self, data): """Apply the clipping to the given 2d array and return the new array. Args: data (ndarray): the data to clip """ if self.use_max or self.use_min: clipping_min = data.min() if self.use_min: clipping_min = self.vmin clipping_max = data.max() if self.use_max: clipping_max = self.vmax return np.clip(data, clipping_min, clipping_max) return data def visible_changes(self, old_clipping): """Checks if there are any visible changes between this clipping and the other. This method can implement knowledge that allows the visualization routine to check if it would need to update the plot or not. It expects that the clipping you wish to use is the one on which this method is called. Args: old_clipping (Clipping): the previous clipping Returns: bool: if the differences between this clipping and the other would result in visible differences. """ if self.use_min and not old_clipping.use_min: return True if self.use_max and not old_clipping.use_max: return True def visible_changes_in_min(): if self.vmin == old_clipping.vmin: return False else: return self.use_min def visible_changes_in_max(): if self.vmax == old_clipping.vmax: return False else: return self.use_max return visible_changes_in_max() or visible_changes_in_min() def get_updated(self, **kwargs): """Get a new Clipping object with updated arguments. Args: **kwargs (dict): the new keyword values, when given these take precedence over the current ones. Returns: Clipping: a new scale with updated values. """ new_values = dict(vmin=self.vmin, vmax=self.vmax, use_min=self.use_min, use_max=self.use_max) new_values.update(**kwargs) return Clipping(**new_values) @classmethod def get_conversion_info(cls): return SimpleClassConversion(cls, cls._get_attribute_conversions()) @classmethod def _get_attribute_conversions(cls): return {'vmax': FloatConversion(allow_null=False), 'vmin': FloatConversion(allow_null=False), 'use_min': BooleanConversion(allow_null=False), 'use_max': BooleanConversion(allow_null=False)} def __eq__(self, other): if not isinstance(other, Clipping): return NotImplemented for key, value in self.__dict__.items(): if value != getattr(other, key): return False return True def __ne__(self, other): return not self.__eq__(other) class Scale(object): def __init__(self, vmin=0, vmax=0, use_min=False, use_max=False): """Container the map scaling information""" self.vmin = vmin self.vmax = vmax self.use_min = use_min self.use_max = use_max if use_min and use_max and vmin > vmax: raise ValueError('The minimum scale ({}) can not be larger than the maximum scale ({})'.format(vmin, vmax)) @classmethod def get_conversion_info(cls): return SimpleClassConversion(cls, cls._get_attribute_conversions()) @classmethod def _get_attribute_conversions(cls): return {'vmax': FloatConversion(allow_null=False), 'vmin': FloatConversion(allow_null=False), 'use_min': BooleanConversion(allow_null=False), 'use_max': BooleanConversion(allow_null=False)} def visible_changes(self, old_scale): """Checks if there are any visible changes between this scale and the other. This method can implement knowledge that allows the visualization routine to check if it would need to update the plot or not. It expects that the scale you wish to use is the one on which this method is called. Args: old_scale (Scale): the previous scale Returns: bool: if the differences between this scale and the other would result in visible differences. """ if self.use_min and not old_scale.use_min: return True if self.use_max and not old_scale.use_max: return True def visible_changes_in_min(): if self.vmin == old_scale.vmin: return False else: return self.use_min def visible_changes_in_max(): if self.vmax == old_scale.vmax: return False else: return self.use_max return visible_changes_in_max() or visible_changes_in_min() def get_updated(self, **kwargs): """Get a new Scale object with updated arguments. Args: **kwargs (dict): the new keyword values, when given these take precedence over the current ones. Returns: Scale: a new scale with updated values. """ new_values = dict(vmin=self.vmin, vmax=self.vmax, use_min=self.use_min, use_max=self.use_max) new_values.update(**kwargs) return Scale(**new_values) def __eq__(self, other): if not isinstance(other, Scale): return NotImplemented for key, value in self.__dict__.items(): if value != getattr(other, key): return False return True def __ne__(self, other): return not self.__eq__(other) class Font(object): def __init__(self, family='sans-serif', size=14): """Information about the font to use Args: name: the name of the font to use size: the size of the font (> 0). """ self.family = family self.size = size if family not in self.font_names(): raise ValueError("The given font \"{}\" is not recognized.".format(family)) if size < 1: raise ValueError("The size ({}) can not be smaller than 1".format(str(size))) def get_updated(self, **kwargs): """Get a new Font object with updated arguments. Args: **kwargs (dict): the new keyword values, when given these take precedence over the current ones. Returns: Font: a new Font with updated values. """ new_values = dict(family=self.family, size=self.size) new_values.update(**kwargs) return Font(**new_values) @property def name(self): return self.family @classmethod def font_names(cls): """Get the name of supported fonts Returns: list of str: the name of the supported fonts and font families. """ with warnings.catch_warnings(): warnings.simplefilter("ignore") fonts = matplotlib.font_manager.get_fontconfig_fonts() names = [matplotlib.font_manager.FontProperties(fname=font_name).get_name() for font_name in fonts] return list(sorted(['sans-serif', 'serif', 'cursive', 'fantasy', 'monospace'])) + list(sorted(names)) @classmethod def get_conversion_info(cls): return SimpleClassConversion(cls, cls._get_attribute_conversions()) @classmethod def _get_attribute_conversions(cls): return {'family': StringConversion(), 'size': IntConversion()} def __eq__(self, other): if not isinstance(other, Font): return NotImplemented for key, value in self.__dict__.items(): if value != getattr(other, key): return False return True def __ne__(self, other): return not self.__eq__(other) class DataInfo(object): def __init__(self, maps, directory=None): """A container for basic information about the volume maps we are viewing. Args: maps (dict): the dictionary with the maps to view, these maps can either be arrays with values or nibabel proxy images. directory (str): the directory from which the maps where loaded """ self._maps = maps self.directory = directory self.map_info = {key: SingleMapInfo(key, value) for key, value in maps.items()} self.sorted_keys = list(sorted(maps.keys())) @property def maps(self): return DeferredActionDict(lambda k, v: self.map_info[k].data, self.map_info) @classmethod def from_dir(cls, directory): if directory is None: return cls({}, None) return cls(nifti.load_all_niftis(directory), directory) def get_file_name(self, map_name): """Get the file name of the given map Returns: None if the map could not be found on dir, else a string with the file path. """ if not self.directory: return None items = list(glob.glob(self.directory + '/{}.nii*'.format(map_name))) if items: return items[0] return None def get_max_dimension(self, map_names=None): """Get the minimum of the maximum dimension index over the maps Args: map_names (list of str): if given we will only scan the given list of maps Returns: int: either, 0, 1, 2 as the maximum dimension index in the maps. """ map_names = map_names or self._maps.keys() if not map_names: raise ValueError('No maps to search in.') return min(self.map_info[map_name].max_dimension() for map_name in map_names) def get_max_slice_index(self, dimension, map_names=None): """Get the maximum slice index in the given map on the given dimension. Args: dimension (int): the dimension we want the slice index of (maximum 3) map_names (list of str): if given we will only scan the given list of maps Returns: int: the minimum of the maximum slice indices over the given maps in the given dimension. """ map_names = map_names or self._maps.keys() max_dimension = self.get_max_dimension(map_names) if not map_names: raise ValueError('No maps to search in.') if dimension > max_dimension: raise ValueError('Dimension can not exceed {}.'.format(max_dimension)) return min(self.map_info[map_name].max_slice_index(dimension) for map_name in map_names) def get_max_volume_index(self, map_names=None): """Get the maximum volume index in the given maps. In contrast to the max dimension and max slice index functions, this gives the maximum over all the images. This since handling different volumes is implemented in the viewer. Args: map_names (list of str): if given we will only scan the given list of maps Returns: int: the maximum volume index in the given list of maps. Starts from 0. """ map_names = map_names or self._maps.keys() if not map_names: raise ValueError('No maps to search in.') return max(self.map_info[map_name].max_volume_index() for map_name in map_names) def get_index_first_non_zero_slice(self, dimension, map_names=None): """Get the index of the first non zero slice in the maps. Args: dimension (int): the dimension to search in map_names (list of str): if given we will only scan the given list of maps Returns: int: the slice index with the first non zero values. """ map_names = map_names or self._maps.keys() if not map_names: raise ValueError('No maps to search in.') for map_name in map_names: index = self.map_info[map_name].get_index_first_non_zero_slice(dimension) if index is not None: return index return 0 def slice_has_data(self, dimension, slice_index, map_names=None): """Check if at least one of the maps has non zero numbers on the given slice. Args: dimension (int): the dimension to search in slice_index (int): the index of the slice in the given dimension map_names (list of str): if given we will only scan the given list of maps Returns: bool: true if at least on of the maps has data in the given slice """ map_names = map_names or self._maps.keys() if not map_names: raise ValueError('No maps to search in.') for map_name in map_names: if self.map_info[map_name].slice_has_data(dimension, slice_index): return True return False def get_max_x_index(self, dimension, rotate=0, map_names=None): """Get the maximum x index supported over the images. In essence this gets the lowest x index found. Args: dimension (int): the dimension to search in rotate (int): the rotation factor by which we rotate the slices within the given dimension map_names (list of str): if given we will only scan the given list of maps Returns: int: the maximum x-index found. """ map_names = map_names or self._maps.keys() if not map_names: raise ValueError('No maps to search in.') return min(self.map_info[map_name].get_max_x_index(dimension, rotate) for map_name in map_names) def get_max_y_index(self, dimension, rotate=0, map_names=None): """Get the maximum y index supported over the images. In essence this gets the lowest y index found. Args: dimension (int): the dimension to search in rotate (int): the rotation factor by which we rotate the slices within the given dimension map_names (list of str): if given we will only scan the given list of maps Returns: int: the maximum y-index found. """ map_names = map_names or self._maps.keys() if not map_names: raise ValueError('No maps to search in.') return min(self.map_info[map_name].get_max_y_index(dimension, rotate) for map_name in map_names) def get_bounding_box(self, dimension, slice_index, volume_index, rotate, map_names=None): """Get the bounding box of the images. Args: dimension (int): the dimension to search in slice_index (int): the slice index in that dimension volume_index (int): the current volume index rotate (int): the angle by which to rotate the image before getting the bounding box map_names (list of str): if given we will only scan the given list of maps Returns: tuple of Point: two point designating first the upper left corner and second the lower right corner of the bounding box. """ map_names = map_names or self._maps.keys() if not map_names: raise ValueError('No maps to search in.') bounding_boxes = [self.map_info[map_name].get_bounding_box(dimension, slice_index, volume_index, rotate) for map_name in map_names] p0x = min([bbox[0].x for bbox in bounding_boxes]) p0y = min([bbox[0].y for bbox in bounding_boxes]) p1x = max([bbox[1].x for bbox in bounding_boxes]) p1y = max([bbox[1].y for bbox in bounding_boxes]) return Point(p0x, p0y), Point(p1x, p1y) class SingleMapInfo(object): def __init__(self, map_name, data): """Holds information about a single map. Args: map_name (str): the name of the map data (ndarray or :class:`nibabel.nifti1.Nifti1Image`): the value of the map or the proxy to it """ self.map_name = map_name self._data = data self.shape = self._data.shape @property def data(self): if isinstance(self._data, nibabel.nifti1.Nifti1Image): return self._data.get_data() return self._data def max_dimension(self): """Get the maximum dimension index in this map. The maximum value returned by this method is 2 and the minimum is 0. Returns: int: in the range 0, 1, 2 """ return min(len(self.shape), 3) - 1 def max_slice_index(self, dimension): """Get the maximum slice index on the given dimension. Args: dimension (int): the dimension we want the slice index of (maximum 3) Returns: int: the maximum slice index in the given dimension. """ return self.shape[dimension] - 1 def slice_has_data(self, dimension, slice_index): """Check if this map has non zero values in the given slice index. Args: dimension (int): the dimension we want the slice index of (maximum 3) slice_index (int): the slice index to look in Returns: int: the maximum slice index in the given dimension. """ slice_indexing = [slice(None)] * (self.max_dimension() + 1) slice_indexing[dimension] = slice_index return np.count_nonzero(self.data[slice_indexing]) def max_volume_index(self): """Get the maximum volume index in this map. The minimum is 0. Returns: int: the maximum volume index. """ if len(self.shape) > 3: return self.shape[3] - 1 return 0 def get_index_first_non_zero_slice(self, dimension): """Get the index of the first non zero slice in this map. Args: dimension (int): the dimension to search in Returns: int: the slice index with the first non zero values. """ slice_index = [slice(None)] * (self.max_dimension() + 1) if dimension > len(slice_index) - 1: raise ValueError('The given dimension {} is not supported.'.format(dimension)) for index in range(self.shape[dimension]): slice_index[dimension] = index if np.count_nonzero(self.data[slice_index]) > 0: return index return 0 def get_max_x_index(self, dimension, rotate=0): """Get the maximum x index. Args: dimension (int): the dimension to search in rotate (int): the value by which to rotate the slices in the given dimension Returns: int: the maximum x index """ shape = list(self.shape)[0:3] del shape[dimension] if rotate // 90 % 2 == 0: return max(0, shape[1] - 1) return max(0, shape[0] - 1) def get_max_y_index(self, dimension, rotate=0): """Get the maximum y index. Args: dimension (int): the dimension to search in rotate (int): the value by which to rotate the slices in the given dimension Returns: int: the maximum y index """ shape = list(self.shape)[0:3] del shape[dimension] if rotate // 90 % 2 == 0: return max(0, shape[0] - 1) return max(0, shape[1] - 1) def get_size_in_dimension(self, dimension, rotate=0): """Get the shape of the 2d view on the data in the given dimension. This basically returns a pair of (max_x, max_y). Args: dimension (int): the dimension to search in rotate (int): the value by which to rotate the slices in the given dimension Returns: tuple: (max_x, max_y) """ return self.get_max_x_index(dimension, rotate), self.get_max_y_index(dimension, rotate) def get_bounding_box(self, dimension, slice_index, volume_index, rotate): """Get the bounding box of this map when displayed using the given indicing. Args: dimension (int): the dimension to search in slice_index (int): the slice index in that dimension volume_index (int): the current volume index rotate (int): the angle by which to rotate the image before getting the bounding box Returns: tuple of Point: two point designating first the upper left corner and second the lower right corner of the bounding box. """ def bbox(image): rows = np.any(image, axis=1) cols = np.any(image, axis=0) rows_where = np.where(rows) if np.size(rows_where): row_min, row_max = np.where(rows)[0][[0, -1]] column_min, column_max = np.where(cols)[0][[0, -1]] return row_min, row_max, column_min, column_max return 0, image.shape[0]-1, 0, image.shape[1]-1 slice_indexing = [slice(None)] * (self.max_dimension() + 1) slice_indexing[dimension] = slice_index image = self.data[slice_indexing] if len(image.shape) > 2: if image.shape[2] > 1: image = image[..., volume_index] else: image = image[..., 0] if rotate: image = np.rot90(image, rotate // 90) row_min, row_max, column_min, column_max = bbox(image) return Point(column_min, row_min), Point(column_max, row_max) def get_available_interpolations(): """The available interpolations for either the general map plot config or the map specifics. Do not call these for outside use, rather, consult the class method of the specific config you want to change. Returns: list of str: the list of available interpolations. """ return ['none', 'nearest', 'bilinear', 'bicubic', 'spline16', 'spline36', 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos'] def get_available_colormaps(): """The available colormaps for either the general map plot config or the map specifics. Do not call these for outside use, rather, consult the class method of the specific config you want to change. Returns: list of str: the list of available colormaps. """ return sorted(matplotlib.cm.datad) PK@~Ip7,,-mdt/visualization/maps/matplotlib_renderer.pyimport os import numpy as np from matplotlib import pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable from mdt import get_slice_in_dimension from mdt.visualization.maps.base import Clipping, Scale, Point from mdt.visualization.utils import MyColourBarTickLocator __author__ = 'Robbert Harms' __date__ = "2016-09-02" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" class MapsVisualizer(object): def __init__(self, data_info, figure): self._data_info = data_info self._figure = figure def render(self, plot_config): """Render all the maps to the figure. This is for use in GUI embedded situations. Returns: list of AxisData: the list with the drawn axes and the accompanying data """ renderer = Renderer(self._data_info, self._figure, plot_config) renderer.render() return renderer.image_axes def to_file(self, file_name, plot_config, **kwargs): """Renders the figures to the given filename.""" Renderer(self._data_info, self._figure, plot_config).render() if not os.path.isdir(os.path.dirname(file_name)): os.makedirs(os.path.dirname(file_name)) kwargs['dpi'] = kwargs.get('dpi') or 300 self._figure.savefig(file_name, **kwargs) def show(self, plot_config, block=True, maximize=False, window_title=None): """Show the data contained in this visualizer using the specifics in this function call. Args: plot_config (mdt.visualization.maps.base.MapPlotConfig): the plot configuration block (boolean): If we want to block after calling the plots or not. Set this to False if you do not want the routine to block after drawing. In doing so you manually need to block. maximize (boolean): if we want to display the window maximized or not window_title (str): the title of the window. If None, the default title is used """ Renderer(self._data_info, self._figure, plot_config).render() if maximize: mng = plt.get_current_fig_manager() mng.window.showMaximized() if window_title: mng = plt.get_current_fig_manager() mng.canvas.set_window_title(window_title) if block: plt.show(True) class AxisData(object): def __init__(self, axis, map_name, map_info, plot_config): """Contains a reference to a drawn matpotlib axis and to the accompanying data. Args: axis (Axis): the matpotlib axis map_name (str): the name/key of this map map_info (SingleMapInfo): the map information plot_config (MapPlotConfig): the map plot configuration """ self.axis = axis self.map_name = map_name self._map_info = map_info self._plot_config = plot_config def coordinates_to_index(self, x, y): """Converts data coordinates to index coordinates of the array. Args: x (int): The x-coordinate in data coordinates. y (int): The y-coordinate in data coordinates. Returns x, y, z, v : Index coordinates of the map associated with the image. """ shape = self._map_info.get_size_in_dimension(self._plot_config.dimension, self._plot_config.rotate) # correct for zoom x += self._plot_config.zoom.p0.x y += self._plot_config.zoom.p0.y # correct for flip upside down if not self._plot_config.flipud: y = self._map_info.get_max_y_index(self._plot_config.dimension, self._plot_config.rotate) - y # correct for displayed axis, the view is x-data on y-image and y-data on x-image x, y = y, x # rotate the point rotated = Point(x, y).rotate90((-1 * self._plot_config.rotate % 360) // 90) # translate the point back to a new origin if self._plot_config.rotate == 90: rotated.y = shape[1] + rotated.y elif self._plot_config.rotate == 180: rotated.x = shape[1] + rotated.x rotated.y = shape[0] + rotated.y elif self._plot_config.rotate == 270: rotated.x = shape[0] + rotated.x # create the index index = [rotated.x, rotated.y] index.insert(self._plot_config.dimension, self._plot_config.slice_index) if len(self._map_info.data.shape) > 3: if self._plot_config.volume_index < self._map_info.data.shape[3]: index.append(self._plot_config.volume_index) else: index.append(self._map_info.data.shape[3] - 1) return index def get_value(self, index): """Get the value of this axis data at the given index. Args: index (tuple)): the 3d or 4d index to the map corresponding to this axis data (x, y, z, [v]) Returns: float: the value at the given index. """ return self._map_info.data[tuple(index)] class Renderer(object): def __init__(self, data_info, figure, plot_config): """Create a new renderer for the given volumes on the given figure using the given configuration. This renders the images with flipped upside down with the origin at the bottom left. The upside down flip is necessary to allow counter-clockwise rotation. Args: data_info (DataInfo): the information about the maps to show figure (Figure): the matplotlib figure to draw on plot_config (mdt.visualization.maps.base.MapPlotConfig): the plot configuration """ self._data_info = data_info self._figure = figure self._plot_config = plot_config self.image_axes = [] def render(self): """Render the maps""" grid_layout_specifier = self._plot_config.grid_layout.get_gridspec( self._figure, len(self._plot_config.maps_to_show)) if self._plot_config.title: self._figure.suptitle(self._plot_config.title, fontsize=self._plot_config.font.size, family=self._plot_config.font.name) for ind, map_name in enumerate(self._plot_config.maps_to_show): axis = grid_layout_specifier.get_axis(ind) axis_data = self._render_map(map_name, axis) self.image_axes.append(axis_data) def _render_map(self, map_name, axis): axis.set_title(self._get_title(map_name), y=self._get_title_spacing(map_name)) axis.axis('on' if self._plot_config.show_axis else 'off') data = self._get_image(map_name) if self._plot_config.rotate: data = np.rot90(data, self._plot_config.rotate // 90) if not self._plot_config.flipud: # by default we flipud to correct for matplotlib lower origin. If the user # sets flipud, we do not need to to it data = np.flipud(data) data = self._plot_config.zoom.apply(data) plot_options = self._get_map_plot_options(map_name) plot_options['origin'] = 'lower' plot_options['interpolation'] = self._plot_config.interpolation vf = axis.imshow(data, **plot_options) divider = make_axes_locatable(axis) colorbar_axis = divider.append_axes("right", size="5%", pad=0.05) self._add_colorbar(map_name, colorbar_axis, vf, self._get_map_attr(map_name, 'colorbar_label')) self._apply_font(axis, colorbar_axis) return AxisData(axis, map_name, self._data_info.map_info[map_name], self._plot_config) def _apply_font(self, image_axis, colorbar_axis): items = [image_axis.xaxis.label, image_axis.yaxis.label] items.extend(image_axis.get_xticklabels()) items.extend(image_axis.get_yticklabels()) items.extend(colorbar_axis.yaxis.get_ticklabels()) for item in items: item.set_fontsize(self._plot_config.font.size - 2) item.set_family(self._plot_config.font.name) image_axis.title.set_fontsize(self._plot_config.font.size) image_axis.title.set_family(self._plot_config.font.name) colorbar_axis.yaxis.label.set_fontsize(self._plot_config.font.size) colorbar_axis.yaxis.label.set_family(self._plot_config.font.name) colorbar_axis.yaxis.offsetText.set_fontsize(self._plot_config.font.size - 3) colorbar_axis.yaxis.offsetText.set_family(self._plot_config.font.name) def _add_colorbar(self, map_name, axis, image_figure, colorbar_label): kwargs = dict(cax=axis, ticks=self._get_tick_locator(map_name)) if colorbar_label: kwargs.update(dict(label=colorbar_label)) cbar = plt.colorbar(image_figure, **kwargs) cbar.formatter.set_powerlimits((-3, 4)) cbar.ax.yaxis.set_offset_position('left') cbar.update_ticks() if cbar.ax.get_yticklabels(): cbar.ax.get_yticklabels()[-1].set_verticalalignment('top') return cbar def _get_map_attr(self, map_name, option, default=None): if map_name in self._plot_config.map_plot_options: value = getattr(self._plot_config.map_plot_options[map_name], option) if value: return value return default def _get_title(self, map_name): return self._get_map_attr(map_name, 'title', map_name) def _get_title_spacing(self, map_name): return 1 + self._get_map_attr(map_name, 'title_spacing', 0) def _get_map_plot_options(self, map_name): output_dict = {'vmin': self._data_info.maps[map_name].min(), 'vmax': self._data_info.maps[map_name].max(), 'cmap': self._get_map_attr(map_name, 'colormap', self._plot_config.colormap)} scale = self._get_map_attr(map_name, 'scale', Scale()) if scale.use_max: output_dict['vmax'] = scale.vmax if scale.use_min: output_dict['vmin'] = scale.vmin return output_dict def _get_image(self, map_name): """Get the 2d image to display for the given data.""" data = self._data_info.maps[map_name] dimension = self._plot_config.dimension slice_index = self._plot_config.slice_index volume_index = self._plot_config.volume_index def get_slice(data): slice = get_slice_in_dimension(data, dimension, slice_index) if len(slice.shape) > 2: if volume_index < slice.shape[2]: slice = np.squeeze(slice[:, :, volume_index]) else: slice = np.squeeze(slice[:, :, slice.shape[2] - 1]) return slice slice = get_slice(data) slice = self._get_map_attr(map_name, 'clipping', Clipping()).apply(slice) mask_name = self._get_map_attr(map_name, 'mask_name', self._plot_config.mask_name) if mask_name: slice = slice * (get_slice(self._data_info.maps[mask_name]) > 0) return slice def _get_tick_locator(self, map_name): min_val, max_val = self._data_info.maps[map_name].min(), self._data_info.maps[map_name].max() scale = self._get_map_attr(map_name, 'scale', Scale()) if scale.use_max: max_val = scale.vmax if scale.use_min: min_val = scale.vmin return MyColourBarTickLocator(min_val, max_val, numticks=self._plot_config.colorbar_nmr_ticks) PKjUpIu"mdt/visualization/maps/__init__.py__author__ = 'Robbert Harms' __date__ = "2016-09-02" __maintainer__ = "Robbert Harms" __email__ = "robbert.harms@maastrichtuniversity.nl" PKII+nJ J #mdt-0.9.5.dist-info/DESCRIPTION.rst############################ Maastricht Diffusion Toolbox ############################ The Maastricht Diffusion Toolbox, MDT, is a framework and library for GPU (graphics card) accelerated diffusion modeling. MDT's object oriented and modular design allows arbitrary user specification and combination of dMRI compartment models, diffusion microstructure models, likelihood functions and optimization algorithms. Many diffusion microstructure models are included, and new models can be added simply by adding Python script files. The GPU accelerated computations allow for ~60x faster model fitting; e.g. the 81 volume example NODDI dataset can be fitted whole brain in about 40 seconds, which makes MDT ideal for population studies. Additionally, MDT can be extended to other modalities and models such as quantitative MRI relaxometry. ******* Summary ******* * Free software: LGPL v3 license * Scriptable modeling * Full documentation: http://maastrichtdiffusiontoolbox.readthedocs.io * Project home: https://github.com/cbclab/MDT * Uses the `GitLab workflow `_ * Tags: diffusion, dMRI, MRI, optimization, parallel, opencl, python ************************ Quick installation guide ************************ The basic requirements for MDT are: * Python 3.x (recommended) or Python 2.7 * OpenCL 1.2 (or higher) support in GPU driver or CPU runtime **Linux** For Ubuntu >= 16 you can use: * ``sudo add-apt-repository ppa:robbert-harms/cbclab`` * ``sudo apt-get update`` * ``sudo apt-get install python3-mdt`` For Debian users and Ubuntu < 16 users, install MDT with: * ``sudo apt-get install python3 python3-pip python3-pyopencl python3-numpy python3-nibabel python3-pyqt5 python3-matplotlib python3-six python3-yaml python3-argcomplete libpng-dev libfreetype6-dev libxft-dev`` * ``sudo pip3 install mdt`` **Windows** The installation on Windows is a little bit more complex and the following is only a quick reference guide. For complete instructions please view the `complete documentation `_. * Install Anaconda Python 3.5 * Install MOT using the guide at https://mot.readthedocs.io * Open a Anaconda shell and type: ``pip install mdt`` For more information and installation instructions, please see: https://maastrichtdiffusiontoolbox.readthedocs.org PKIZvxx$mdt-0.9.5.dist-info/entry_points.txt[console_scripts] MDT = mdt.cli_scripts.MDT:GUI_Shortcut.console_script mdt-apply-mask = mdt.cli_scripts.mdt_apply_mask:ApplyMask.console_script mdt-batch-fit = mdt.cli_scripts.mdt_batch_fit:BatchFit.console_script mdt-generate-bvec-bval = mdt.cli_scripts.mdt_generate_bvec_bval:GenerateBvecBval.console_script mdt-generate-mask = mdt.cli_scripts.mdt_generate_mask:GenerateMask.console_script mdt-generate-protocol = mdt.cli_scripts.mdt_generate_protocol:GenerateProtocol.console_script mdt-generate-roi-slice = mdt.cli_scripts.mdt_generate_roi_slice:GenerateRoiSlice.console_script mdt-gui = mdt.cli_scripts.mdt_gui:GUI.console_script mdt-info-img = mdt.cli_scripts.mdt_info_img:InfoImg.console_script mdt-info-protocol = mdt.cli_scripts.mdt_info_protocol:InfoProtocol.console_script mdt-init-user-settings = mdt.cli_scripts.mdt_init_user_settings:InitUserSettings.console_script mdt-list-devices = mdt.cli_scripts.mdt_list_devices:ListDevices.console_script mdt-list-models = mdt.cli_scripts.mdt_list_models:ListModels.console_script mdt-math-img = mdt.cli_scripts.mdt_math_img:MathImg.console_script mdt-math-protocol = mdt.cli_scripts.mdt_math_protocol:MathProtocol.console_script mdt-model-fit = mdt.cli_scripts.mdt_model_fit:ModelFit.console_script mdt-view-maps = mdt.cli_scripts.mdt_view_maps:GUI.console_script mdt-volume-merge = mdt.cli_scripts.mdt_volume_merge:VolumeMerge.console_script PKIJ$^^!mdt-0.9.5.dist-info/metadata.json{"classifiers": ["Environment :: Console", "Environment :: X11 Applications :: Qt", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)", "Development Status :: 5 - Production/Stable", "Natural Language :: English", "Operating System :: POSIX :: Linux", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Topic :: Scientific/Engineering"], "extensions": {"python.commands": {"wrap_console": {"MDT": "mdt.cli_scripts.MDT:GUI_Shortcut.console_script", "mdt-apply-mask": "mdt.cli_scripts.mdt_apply_mask:ApplyMask.console_script", "mdt-batch-fit": "mdt.cli_scripts.mdt_batch_fit:BatchFit.console_script", "mdt-generate-bvec-bval": "mdt.cli_scripts.mdt_generate_bvec_bval:GenerateBvecBval.console_script", "mdt-generate-mask": "mdt.cli_scripts.mdt_generate_mask:GenerateMask.console_script", "mdt-generate-protocol": "mdt.cli_scripts.mdt_generate_protocol:GenerateProtocol.console_script", "mdt-generate-roi-slice": "mdt.cli_scripts.mdt_generate_roi_slice:GenerateRoiSlice.console_script", "mdt-gui": "mdt.cli_scripts.mdt_gui:GUI.console_script", "mdt-info-img": "mdt.cli_scripts.mdt_info_img:InfoImg.console_script", "mdt-info-protocol": "mdt.cli_scripts.mdt_info_protocol:InfoProtocol.console_script", "mdt-init-user-settings": "mdt.cli_scripts.mdt_init_user_settings:InitUserSettings.console_script", "mdt-list-devices": "mdt.cli_scripts.mdt_list_devices:ListDevices.console_script", "mdt-list-models": "mdt.cli_scripts.mdt_list_models:ListModels.console_script", "mdt-math-img": "mdt.cli_scripts.mdt_math_img:MathImg.console_script", "mdt-math-protocol": "mdt.cli_scripts.mdt_math_protocol:MathProtocol.console_script", "mdt-model-fit": "mdt.cli_scripts.mdt_model_fit:ModelFit.console_script", "mdt-view-maps": "mdt.cli_scripts.mdt_view_maps:GUI.console_script", "mdt-volume-merge": "mdt.cli_scripts.mdt_volume_merge:VolumeMerge.console_script"}}, "python.details": {"contacts": [{"email": "robbert.harms@maastrichtuniversity.nl", "name": "Robbert Harms", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://github.com/cbclab/MDT"}}, "python.exports": {"console_scripts": {"MDT": "mdt.cli_scripts.MDT:GUI_Shortcut.console_script", "mdt-apply-mask": "mdt.cli_scripts.mdt_apply_mask:ApplyMask.console_script", "mdt-batch-fit": "mdt.cli_scripts.mdt_batch_fit:BatchFit.console_script", "mdt-generate-bvec-bval": "mdt.cli_scripts.mdt_generate_bvec_bval:GenerateBvecBval.console_script", "mdt-generate-mask": "mdt.cli_scripts.mdt_generate_mask:GenerateMask.console_script", "mdt-generate-protocol": "mdt.cli_scripts.mdt_generate_protocol:GenerateProtocol.console_script", "mdt-generate-roi-slice": "mdt.cli_scripts.mdt_generate_roi_slice:GenerateRoiSlice.console_script", "mdt-gui": "mdt.cli_scripts.mdt_gui:GUI.console_script", "mdt-info-img": "mdt.cli_scripts.mdt_info_img:InfoImg.console_script", "mdt-info-protocol": "mdt.cli_scripts.mdt_info_protocol:InfoProtocol.console_script", "mdt-init-user-settings": "mdt.cli_scripts.mdt_init_user_settings:InitUserSettings.console_script", "mdt-list-devices": "mdt.cli_scripts.mdt_list_devices:ListDevices.console_script", "mdt-list-models": "mdt.cli_scripts.mdt_list_models:ListModels.console_script", "mdt-math-img": "mdt.cli_scripts.mdt_math_img:MathImg.console_script", "mdt-math-protocol": "mdt.cli_scripts.mdt_math_protocol:MathProtocol.console_script", "mdt-model-fit": "mdt.cli_scripts.mdt_model_fit:ModelFit.console_script", "mdt-view-maps": "mdt.cli_scripts.mdt_view_maps:GUI.console_script", "mdt-volume-merge": "mdt.cli_scripts.mdt_volume_merge:VolumeMerge.console_script"}}}, "extras": [], "generator": "bdist_wheel (0.26.0)", "keywords": ["mdt", "diffusion", "MRI", "model", "recovery", "imaging", "analysis"], "license": "LGPL v3", "metadata_version": "2.0", "name": "mdt", "run_requires": [{"requires": ["argcomplete", "grako", "matplotlib (>=1.5.1)", "mot", "nibabel", "numpy (>=1.9.0)", "pyopencl (>=2013.1)", "pyyaml", "scipy (>=0.12.1)", "six"]}], "summary": "Maastricht Diffusion Toolbox", "test_requires": [{"requires": []}], "version": "0.9.5"}PKILE !mdt-0.9.5.dist-info/top_level.txtmdt tests PKIndnnmdt-0.9.5.dist-info/WHEELWheel-Version: 1.0 Generator: bdist_wheel (0.26.0) Root-Is-Purelib: true Tag: py2-none-any Tag: py3-none-any PKIdmdt-0.9.5.dist-info/METADATAMetadata-Version: 2.0 Name: mdt Version: 0.9.5 Summary: Maastricht Diffusion Toolbox Home-page: https://github.com/cbclab/MDT Author: Robbert Harms Author-email: robbert.harms@maastrichtuniversity.nl License: LGPL v3 Keywords: mdt,diffusion MRI,model recovery,imaging analysis Platform: UNKNOWN Classifier: Environment :: Console Classifier: Environment :: X11 Applications :: Qt Classifier: Intended Audience :: Developers Classifier: Intended Audience :: Science/Research Classifier: License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+) Classifier: Development Status :: 5 - Production/Stable Classifier: Natural Language :: English Classifier: Operating System :: POSIX :: Linux Classifier: Operating System :: MacOS :: MacOS X Classifier: Operating System :: Microsoft :: Windows Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Topic :: Scientific/Engineering Requires-Dist: argcomplete Requires-Dist: grako Requires-Dist: matplotlib (>=1.5.1) Requires-Dist: mot Requires-Dist: nibabel Requires-Dist: numpy (>=1.9.0) Requires-Dist: pyopencl (>=2013.1) Requires-Dist: pyyaml Requires-Dist: scipy (>=0.12.1) Requires-Dist: six ############################ Maastricht Diffusion Toolbox ############################ The Maastricht Diffusion Toolbox, MDT, is a framework and library for GPU (graphics card) accelerated diffusion modeling. MDT's object oriented and modular design allows arbitrary user specification and combination of dMRI compartment models, diffusion microstructure models, likelihood functions and optimization algorithms. Many diffusion microstructure models are included, and new models can be added simply by adding Python script files. The GPU accelerated computations allow for ~60x faster model fitting; e.g. the 81 volume example NODDI dataset can be fitted whole brain in about 40 seconds, which makes MDT ideal for population studies. Additionally, MDT can be extended to other modalities and models such as quantitative MRI relaxometry. ******* Summary ******* * Free software: LGPL v3 license * Scriptable modeling * Full documentation: http://maastrichtdiffusiontoolbox.readthedocs.io * Project home: https://github.com/cbclab/MDT * Uses the `GitLab workflow `_ * Tags: diffusion, dMRI, MRI, optimization, parallel, opencl, python ************************ Quick installation guide ************************ The basic requirements for MDT are: * Python 3.x (recommended) or Python 2.7 * OpenCL 1.2 (or higher) support in GPU driver or CPU runtime **Linux** For Ubuntu >= 16 you can use: * ``sudo add-apt-repository ppa:robbert-harms/cbclab`` * ``sudo apt-get update`` * ``sudo apt-get install python3-mdt`` For Debian users and Ubuntu < 16 users, install MDT with: * ``sudo apt-get install python3 python3-pip python3-pyopencl python3-numpy python3-nibabel python3-pyqt5 python3-matplotlib python3-six python3-yaml python3-argcomplete libpng-dev libfreetype6-dev libxft-dev`` * ``sudo pip3 install mdt`` **Windows** The installation on Windows is a little bit more complex and the following is only a quick reference guide. For complete instructions please view the `complete documentation `_. * Install Anaconda Python 3.5 * Install MOT using the guide at https://mot.readthedocs.io * Open a Anaconda shell and type: ``pip install mdt`` For more information and installation instructions, please see: https://maastrichtdiffusiontoolbox.readthedocs.org PKIYRRmdt-0.9.5.dist-info/RECORDmdt/__init__.py,sha256=uM2b4C2FLSeJRzLV66Sicffrcuig80kotrmK8CUlTdc,28884 mdt/__version__.py,sha256=_y9ceGeC4u-BokX7pTrOCjunfWD5u9u6IbUlHOBt4ps,220 mdt/batch_utils.py,sha256=bP6_fdJxHMO6o5s_j6XyQhKbKXDNTwDQ_LHsxnYoP6U,26384 mdt/components_loader.py,sha256=2jgrT7dee71Ud7hS5zLK3XZYLh88f0M-vJkPWIm7qo0,29119 mdt/configuration.py,sha256=Wqr4XZrjuZtMiLTsFUys3TCQAgacgga0LRHKMr--M0M,23435 mdt/deferred_mappings.py,sha256=ddlqpYDcnzvPlAznd9cgXdRmxYC2W5rK-TSz0gNaxoM,4934 mdt/exceptions.py,sha256=mHSD_vyZCWrY9p5kClK0fFs5O0O0KPzNhdKMKJg8oQs,1145 mdt/log_handlers.py,sha256=_glE6DP3piRbC3YM9woEviA4Mnh84uePzAS4w6USAIg,4737 mdt/masking.py,sha256=6WtnTjMFbxUhtl56yUlZ6y4MRkbLgXsezidFOiG0xGI,7749 mdt/model_fitting.py,sha256=d-LfhhaYqXQOYAwEDC6GtQnhamk_j9sM_37tkYtjZ3M,20407 mdt/model_protocol_problem.py,sha256=l4z34EcUuFuWOTM8Ug-ccpCpTbNl1DoDueXR5bKBFV8,1894 mdt/model_sampling.py,sha256=iTkqnB7ZvMpeEHPHwcNczNl-rdoEpfNdn031a-PQH1E,2856 mdt/nifti.py,sha256=TJK9OUDYStP5i6n22i8HrKZ4MqZ3EuTyEgv1iE4xuM8,12187 mdt/processing_strategies.py,sha256=Bh1_JfnTV7vZJvX_bosFP4f_XaBFK97RiHvgBQRWp3I,21988 mdt/protocols.py,sha256=0gFIlDUgBxDflmISZf5Jz3KfQKJSXrsZx37FsgSJkFg,32860 mdt/shell_utils.py,sha256=6N7W1fnvu933iG_cLtIIOny722I5aGnOuSnqD4dclUc,3050 mdt/simulations.py,sha256=0sAbNx7v1iEbv1BQEd9syiC4QTtk_5nZg53_v7toCII,13118 mdt/user_script_info.py,sha256=rhWoIcTnbGcYD--fPgFcLhO95TFo_s05CjwgkF7cQ1U,3556 mdt/utils.py,sha256=ML9XfK7Qde4-AMQqAsJoVYcb5amSkzZ4giZHLNnK-2Q,58000 mdt/cl_routines/__init__.py,sha256=SG-oRt_3ljeStkQKAiMOjOEPgrP5jVLfNVnNPV-dCC8,161 mdt/cl_routines/mapping/__init__.py,sha256=SG-oRt_3ljeStkQKAiMOjOEPgrP5jVLfNVnNPV-dCC8,161 mdt/cl_routines/mapping/calculate_eigenvectors.py,sha256=eqOVhSJr7xh-8Cw6t2OTNXrgtuR6e05K3wreE74LiKs,6385 mdt/cl_routines/mapping/dti_measures.py,sha256=VIqSVgyq8JDDhf4hzIUNjDHoMPMZ4tn0lDT4D4oohM8,6216 mdt/cli_scripts/MDT.py,sha256=1e2AS5dct4M8GWYLXZYZ8MT2ClvIlGKYupbxtM6hjoc,348 mdt/cli_scripts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 mdt/cli_scripts/mdt_apply_mask.py,sha256=Tfl6tR95IlHfn2_ciZLo85vJ1Mul7CxAnw0wVycoyFw,2015 mdt/cli_scripts/mdt_batch_fit.py,sha256=lulohUXDVngwvLUl6M7aEHvIYbaWknGDqxG8cNK5gG8,7035 mdt/cli_scripts/mdt_generate_bvec_bval.py,sha256=JizE3DzDW4RLwgEEW0HhJ7x7CM6PCaS9o3cCWsBnibI,2064 mdt/cli_scripts/mdt_generate_mask.py,sha256=8XCIISkkxDlYZmvuKaUEQSX-06X9oMG8W4cByRfkmWs,4662 mdt/cli_scripts/mdt_generate_protocol.py,sha256=8xG1RlYWLqNEmfUKIU-JltfK4gTdY4nO5yl-2LhDQ2M,6011 mdt/cli_scripts/mdt_generate_roi_slice.py,sha256=u8sT4wOO8YZzlNiO3VzMVVkKW5K7uDOY9UwTyvccBHo,3341 mdt/cli_scripts/mdt_gui.py,sha256=ERyK31y9QbIL54FFui_QOwH3jubcxZWaIz_KUiibRx0,1247 mdt/cli_scripts/mdt_info_img.py,sha256=37cO-qXoscxTJua5q4cmWFrm0QVlUZv8EKmoQvSfwKg,1819 mdt/cli_scripts/mdt_info_protocol.py,sha256=YFXbHbgbgTGG_v-_ulCDLCw2k-x4IPmBcA-k3JZnihw,2193 mdt/cli_scripts/mdt_init_user_settings.py,sha256=8pvs7LyuB-UTWB4V9y9HJCy5pfJw8-5uzuT5irMchak,1425 mdt/cli_scripts/mdt_list_devices.py,sha256=JdZRorzMMvEXtpxxIyKYzq7EZW2dVcOEFnwmofPjQ1A,1205 mdt/cli_scripts/mdt_list_models.py,sha256=C64xgW4T0h2Pj-Pqil2J5k09N03JO0vqhi3dgciRzs0,1243 mdt/cli_scripts/mdt_math_img.py,sha256=DNTtTNHnmEZ1BL-qDOYvtw6t94kNNegMkUxt_oI-koE,6583 mdt/cli_scripts/mdt_math_protocol.py,sha256=1qryi7mj4pxW28qQwMxxX92SCwL70IqrdU4U8MbH3EA,3169 mdt/cli_scripts/mdt_model_fit.py,sha256=7HMajLN0gUO01fAmwJqfrIiR8DsRXSRUW-x4PMv3LTM,7694 mdt/cli_scripts/mdt_view_maps.py,sha256=W1NtqgcvOqW4EsMks3C-Mn7C7KgSEQq4NJVpghJH1fo,2086 mdt/cli_scripts/mdt_volume_merge.py,sha256=a4RuJQitokG0dclVZiNiPRtcGFp7vHHrk-LZh1kjYo8,3105 mdt/data/logo.svg,sha256=qWTN5E6-b9JsDkcSPIA6pe-q0Jzta0eFjyo8kgosviI,3479 mdt/data/logo_docs.png,sha256=JgPkNqc54AElE90k9tCh21r-dtBn8XbGbBvEnZc79sI,11983 mdt/data/logo_gui.png,sha256=afRu9XgEZBAQwrxJ68H65JbJlyFcH1adGWVhD08UNxk,2949 mdt/data/mdt.conf,sha256=T2rk4y3jqxDKaBs4LvP1bOj4SEpHQhrx0wzrD3Nlyuk,5059 mdt/data/components/standard/batch_profiles/DirPerSubject.py,sha256=rZuno9UNpdQVyFGj5fCA6igODXmIHvIxeaNzg866OPU,3892 mdt/data/components/standard/batch_profiles/HCP_MGH.py,sha256=YhlBi5S36nbRMFyS2D7Xxpeq1oXLBYv2zSKYDZhu5M4,3360 mdt/data/components/standard/batch_profiles/HCP_WUMINN.py,sha256=Xz0TlXHMd3dp45Spddzf2OQoyTM3z1WSbWs6NiXwSg4,3147 mdt/data/components/standard/batch_profiles/SingleDir.py,sha256=7qH-iCZTcsFcl8Mf47OuFbxtx91r6BWMb9idQjHSQmI,3996 mdt/data/components/standard/cascade_models/BallSticks.py,sha256=WkIEzsMkkbFwrEzTPmsdK8-9MA0mB7hZrQsT_2lU0ck,2095 mdt/data/components/standard/cascade_models/CHARMED.py,sha256=A2P6PxWuphU9t1zoztWtJcRwz0rfvW3saKuWVsRUu48,5148 mdt/data/components/standard/cascade_models/NODDI.py,sha256=DDSRoya8qnYK1Bx9VjsQxiKjtO232-8dfOdUqnl-yj8,1481 mdt/data/components/standard/cascade_models/Tensor.py,sha256=T1wJfeQyyaG2Voi0gfyNfNFAdPAPAe23L6vChFNlefA,1178 mdt/data/components/standard/compartment_models/AstroCylinders.py,sha256=yZiHkEYaPEOHWQGslxsar1z_sfu2V1nY4qi9hKrodV4,756 mdt/data/components/standard/compartment_models/AstroSticks.py,sha256=8w_h7Z0a28-5rVG9fpYjjCmXvJKn7xCy7xMrp6HwNS0,448 mdt/data/components/standard/compartment_models/Ball.py,sha256=o442KRo28Ssgb4HH_ohW0f9hHjjPrAr82uYLEPWP3ug,295 mdt/data/components/standard/compartment_models/CHARMEDRestricted.cl,sha256=pUcmRrYoM9e3OzLJlm89d4cEInWQqhx0wj4jEktPwiQ,2347 mdt/data/components/standard/compartment_models/CHARMEDRestricted.py,sha256=Bq4KmUrOnZiOYXqs81oNru5hizClpnz-nsQCOh8jTBE,640 mdt/data/components/standard/compartment_models/CylinderGPD.py,sha256=CZAXBrN5Lu221n-rWSegYOi87dikD0Ijy1D5NlhDrEQ,1146 mdt/data/components/standard/compartment_models/Dot.py,sha256=FLPjWNMod_RfyIOs9vVCxnfdpyFkpS0HcYIFDgD3Ebw,294 mdt/data/components/standard/compartment_models/ExpT1DecGRE.py,sha256=GUL98St5JBFo2zIiyARNr8foDWkygq55nO3r4I_KUtY,332 mdt/data/components/standard/compartment_models/ExpT1DecTM.py,sha256=l90ur6xr6qK_PMo5UE3ro6ZCLqxpn_yvY1VzIoXnvGM,425 mdt/data/components/standard/compartment_models/ExpT1DecTR.py,sha256=p8Fz1UJbWoCniSRzBeMY0zE_vB8XV8FEPj43U3t3rWM,314 mdt/data/components/standard/compartment_models/ExpT1ExpT2GRE.py,sha256=v2SX1vryVHp66qaxGITZquLrNkq7G5C5KIST7Z7uxDs,325 mdt/data/components/standard/compartment_models/ExpT1ExpT2STEAM.py,sha256=jfWKggGGZXpZ0nIAT0XElfv0lH8zBE2ZKnVIJtNl_2g,1408 mdt/data/components/standard/compartment_models/ExpT1ExpT2sGRE.py,sha256=iZvQ71fxJQbNk19KGx18XPKi_c-RdCMYgxZ80SGJtUU,328 mdt/data/components/standard/compartment_models/ExpT2Dec.py,sha256=MyWOCcxDKOZvJ9QGyAowQPoNV4zrZ94X4IiG4Ion98E,303 mdt/data/components/standard/compartment_models/ExpT2DecSTEAM.py,sha256=7yWuwpRV4WaUBnvFh5TXjdaFjSTRv__0zRn5SJOYDNE,446 mdt/data/components/standard/compartment_models/GDRCylinders.cl,sha256=8N5Ihb40mEFMnf0jp5P8GxB2JGfJOgTJ9UPYQWx9AyA,7492 mdt/data/components/standard/compartment_models/GDRCylinders.py,sha256=WXt7FtAfUVry_rcOtQrojwKvtgvAZdfECwQSK7vokgk,722 mdt/data/components/standard/compartment_models/GDRCylindersFixedRadii.cl,sha256=RS3WOBr0WEdzYvrGtrYGK6K4pdlJhUs9TnrtbuXD-a4,1653 mdt/data/components/standard/compartment_models/GDRCylindersFixedRadii.py,sha256=4Mpc6vX8NkQW0S555U4yomD-q_UxjRkuinbTaEKLqOY,815 mdt/data/components/standard/compartment_models/LinMPM_Fit.py,sha256=-dWvTOs20MOLxQaGXvDl-SlVJTqnwrGyksdYp29XwOY,713 mdt/data/components/standard/compartment_models/LinT1GRE.py,sha256=jH-d2GYGFYl6SLtVGz9eHUZ-B90cDDXUW-_oYxZccwM,1109 mdt/data/components/standard/compartment_models/LinT2Dec.py,sha256=cwQnIpqIdY9V-FeFAqKN-W7X6Fbni0sRyauz5VKlF9U,192 mdt/data/components/standard/compartment_models/MPM_Fit.py,sha256=jiPGHKINlZ_Tda1jDcgBGzB7K5e0PFDaBygKDPiYq3w,704 mdt/data/components/standard/compartment_models/NODDI_EC.cl,sha256=1sBjhlgTc_h3ouoa7vh1j_mqWfPbTNDNGzCGi7QP-ug,1893 mdt/data/components/standard/compartment_models/NODDI_EC.py,sha256=t6c_NSb2PJb28Wr5GfOxOuMB3uU7hnxknM_kiAPPDcg,692 mdt/data/components/standard/compartment_models/NODDI_IC.cl,sha256=W3NzhNIQbYfH-U9x1ZrvjCvaH04mQGbd_HBzY03iJNU,11914 mdt/data/components/standard/compartment_models/NODDI_IC.py,sha256=6L_BQjR-6zCgB5dK_GOpCzMw0b6grc2ohDvIUD23SpE,872 mdt/data/components/standard/compartment_models/S0.py,sha256=jdPT6oAr4u3GGMPkJ9ppKS_KJbImW7dfV61O4Lqmd9E,281 mdt/data/components/standard/compartment_models/SphereGPD.cl,sha256=8ZFP3lWswX_nMnOCXrg7gFyFh7zEeq_K_IbJx0b70wg,1375 mdt/data/components/standard/compartment_models/SphereGPD.py,sha256=hJI-zJMKxa62p8E7l_elSSiL7nUY_Vdwr5WZY2k3M6M,322 mdt/data/components/standard/compartment_models/Stick.py,sha256=TAJDc1Y1yy5cY4O9mz271zY3hR4Rs_umIrivLdYXXVw,771 mdt/data/components/standard/compartment_models/Tensor.cl,sha256=bVlLAIn2iJFM17EPyThvvDo50pK-2QqmFFcZh-F-IAI,2194 mdt/data/components/standard/compartment_models/Tensor.py,sha256=CH8NmPRMYtNL6khzPAOV3a7sZcRFS1OqpUjCKnkwzVw,3120 mdt/data/components/standard/compartment_models/Zeppelin.py,sha256=kuZCTCHBRUWX7-OB7ybQo-0yOxC66opzB6S0Y7FCmxM,871 mdt/data/components/standard/composite_models/ActiveAx.py,sha256=5SAvX1eIGy5F31c6Yv8i44VD_3Gr1z4WRvO5jrE0eIc,669 mdt/data/components/standard/composite_models/BallSticks.py,sha256=FnVEILQjQOM7No1Wo5CJpu3b8JxBT--WOiq-v3jbdRE,2592 mdt/data/components/standard/composite_models/CHARMED.py,sha256=OxKxO7zQZeb-8D1cJXlUunyB0TfDTBwXuAjGFABSmBY,3624 mdt/data/components/standard/composite_models/NODDI.py,sha256=Q3suafIFb6YSfIW-SPyKK4Sg6KVFJiQMXSQGXBa0XPU,1870 mdt/data/components/standard/composite_models/Tensor.py,sha256=7Q8cCEbInAUSn48QTjo0xmUlImONpWtSHpbVv1k_o_Y,1064 mdt/data/components/standard/composite_models/s0s.py,sha256=IRfQ8n5TKW0By6vvCpmKf-O8p3Tq6NxqBkVAiTf_yDo,473 mdt/data/components/standard/library_functions/MRIConstants.cl,sha256=akLGvAsiLcEEiJDC84SAHvZeZdypGAqAxXrxla5gqQA,231 mdt/data/components/standard/library_functions/MRIConstants.h,sha256=ngO7BOYEJXG6UsELHkR9wKIByV58iqrCFHmaWFHiQyU,882 mdt/data/components/standard/library_functions/MRIConstants.py,sha256=_2V3_-QpFCrTHsSVofYKXVa0aCdc3hi5lXt_6VD5KKk,555 mdt/data/components/standard/library_functions/NeumannCylPerpPGSESum.cl,sha256=VEDggtkfgo3o8GQbTSW4pQ7X7VTqrDahnXkmyP5PTMk,1524 mdt/data/components/standard/library_functions/NeumannCylPerpPGSESum.h,sha256=okOoL2F9xO7SJCZZ_3TGzzuOJ3R8gREAu_PECRygIV4,841 mdt/data/components/standard/library_functions/NeumannCylPerpPGSESum.py,sha256=t16xaBlsIIbprCcbjRidNcu1BjSaexfr8TguA1EC_KA,1032 mdt/data/components/standard/noise_std_estimators/AllUnweightedVolumes.py,sha256=kOu1DeG9QSinWzhznd1Owl-ywvRfAeZ9w8ZZQZ4-ulo,1235 mdt/data/components/standard/noise_std_estimators/AverageOfAir_DilatedMask.py,sha256=VRz5rx32g5BFYBDjYsd1tbxz4-ajxadiGtw3L2SYTGc,2074 mdt/data/components/standard/noise_std_estimators/AverageOfAir_ExtendedMask.py,sha256=iG_nRmYg6AblB_O4iDMHzkUqSVkh-I4obF8CI4AVamg,2366 mdt/data/components/standard/noise_std_estimators/TwoUnweightedVolumes.py,sha256=7nJ7P2PDi6trSnrz5WTDkcAtYSJdtCpvo57VIMSas54,1381 mdt/data/components/standard/parameters/free.py,sha256=FshMBp7ZEpLw5FqLdthwaGmFGwUptafmjeWtg2ll_TM,5796 mdt/data/components/standard/parameters/model_data.py,sha256=CVZesS0kOb5CCTtCFyuTVkJg9jj6TUErF-ByUPuqA0c,1305 mdt/data/components/standard/parameters/protocol.py,sha256=dhHVyOT03A2JXMfuI88_p_LhgX3ZACro6zQCReqaigg,1495 mdt/data/components/standard/parameters/static_maps.py,sha256=Qet6orlwLNcjY-Q6nRpVFx055jX2kkl51B6MKzct-aQ,1383 mdt/data/components/standard/processing_strategies/AllVoxelsAtOnce.py,sha256=DNHrjHabfE2hyQm8pcsXcbaeoqrdXrtNNlYGx5Q6szE,528 mdt/data/components/standard/processing_strategies/ProtocolDependent.py,sha256=JkqtqWt1pXRskjC1l6SG6-fSAMrtgJyRVWKy6p8aiVU,2357 mdt/data/components/standard/processing_strategies/VoxelRange.py,sha256=cE-6CsihXffzwKAcg0HumtFmaNY2hS-dbB5rsp0NNWw,1086 mdt/data/qt_designs/maps_visualizer/MainWindow.ui,sha256=LOtLYAQa3f9wMCMGibSJPBXiJFVB6f0sXw0SomLJ-48,11910 mdt/data/qt_designs/maps_visualizer/Makefile,sha256=JigZgijhKu8tZVm42HjwtI9V2wb2_R_QWGP7h5fPd_I,1087 mdt/data/qt_designs/maps_visualizer/MapSpecificOptions.ui,sha256=giJd2N-OTDb6Vg870v8cwdxXCgma8rpYX0FMvWlnzmI,20257 mdt/data/qt_designs/maps_visualizer/TabGeneral.ui,sha256=-4E1k1vnGt8im7kpCGsHfwo1MPlh2K6RH4tuqKBCnK0,33967 mdt/data/qt_designs/maps_visualizer/TabMapSpecific.ui,sha256=4Z37BkcsfdUWIRaojeQhLjcYeRRDpvdYgN9_xvE1OQc,3412 mdt/data/qt_designs/maps_visualizer/TabTextual.ui,sha256=PT8bMx3rPn-JDZ1WjPEKU3mKgWu4JDenjJVBzq2OiKo,1555 mdt/data/qt_designs/maps_visualizer/arrow_redo.png,sha256=kfboRovuJwrWRKMF0d0kW3VS6QrivAt6zTDU40obEbw,543 mdt/data/qt_designs/maps_visualizer/arrow_undo.png,sha256=1VxpF0CqBSJRnOLy22xcay_44zmRkBiegdXOZkUtRH0,565 mdt/data/qt_designs/maps_visualizer/main.qrc,sha256=K1yUqyCa8H2jy5VuIXo4zs2nBtgZP8kN09B5aw_pT4Y,169 mdt/data/qt_designs/maps_visualizer/save_image_dialog.ui,sha256=A355rmHUKOd9wGsdA0VNxh8TSbPGpK2kJ7kuD4uEqTM,6398 mdt/data/qt_designs/model_fit/Makefile,sha256=jLcbsfpmp937d_m6GQYQASg0Xe_HdwlTe0exWvnqbR0,1085 mdt/data/qt_designs/model_fit/about_dialog.ui,sha256=SsM9dWAO7Xj75bxDA7YeDGEtD7KKcjEMG4Nvm4JUVV0,7429 mdt/data/qt_designs/model_fit/fit_model_tab.ui,sha256=bQmr1kvIbVcCvCKzNu4Xgu-BgAaT1o0gOZXEF-cqdf8,10385 mdt/data/qt_designs/model_fit/generate_brain_mask_tab.ui,sha256=ZzRwugFaLPRMsxyavHg1WaeUGYp5uqy8bxcujf4CY9c,11617 mdt/data/qt_designs/model_fit/generate_protocol_load_gb_dialog.ui,sha256=AYk6cM6Dq3laplj9Q_yMq_VpwJEXzT6Pv5Gjt6FZ_Tg,5773 mdt/data/qt_designs/model_fit/generate_protocol_tab.ui,sha256=sMsvRWS_v93sDgq8b4imV2yUw-hDftzOgLg6YlzzjD0,9294 mdt/data/qt_designs/model_fit/generate_protocol_update_dialog.ui,sha256=Sw5Uo15zVcQzDECbxR1eOzgE9gZ0IOmK8D0LXWmglKA,7136 mdt/data/qt_designs/model_fit/generate_roi_mask_tab.ui,sha256=ZzqPp-1hRjQ4zwaYUYaztdGziSSwciKM8d7LuKUFPqA,9593 mdt/data/qt_designs/model_fit/icon_status_green.png,sha256=0DaO2npGGlnW3QFJOhslxa7So2b_jkalCcqLMzj5StQ,993 mdt/data/qt_designs/model_fit/icon_status_red.png,sha256=RKulvefIS1istruTj8YZsovc4NPpW7mIAqhxK7xyrUA,956 mdt/data/qt_designs/model_fit/main_gui.qrc,sha256=zxTUU9ZGtWL2Rb_9SmwEyEwCARhK0IhIBtayJf7cEGM,200 mdt/data/qt_designs/model_fit/main_gui.ui,sha256=Ga_C-fnuNinZNCxogvPl0c8bnztP4epawokARTfm3Zo,6587 mdt/data/qt_designs/model_fit/optimization_options_dialog.ui,sha256=XftIY-q2KaB6-JEQRV68cbT1jtsQt7rwv1l8Gw6Todg,11837 mdt/data/qt_designs/model_fit/runtime_settings_dialog.ui,sha256=zGwX39EyPouAsaw57JhIofpkdWAxIrlNMS5H48XD31M,3705 mdt/data/qt_designs/model_fit/view_results_tab.ui,sha256=_sS67TtS_DCYdepu4ix1EAoLxkymuYuBnFB5gvsW7Rg,9635 mdt/data_loaders/__init__.py,sha256=l2_mYDfWnSHMFib6aKv_N0lh9Uy2-SuwhZntzeJPIys,138 mdt/data_loaders/brain_mask.py,sha256=r1axyZMjtjUecJohLR-0nysvVLr3ZyKUwyWdAevRwQs,2133 mdt/data_loaders/noise_std.py,sha256=dIc_QWYVpKDvCiYn5CinUOPQmI-AS3DuspBdKhtUnxI,3296 mdt/data_loaders/protocol.py,sha256=_hTzfO8KZjMxRToPVeN-txQ7gwOjIqOjd7qyxTD6XMg,3550 mdt/gui/__init__.py,sha256=r-1EeFwJvqklzdg6McyT-BgGDcOXaZq4iVjFwMses2k,161 mdt/gui/utils.py,sha256=0mL11Me3nNoRMGaQv8C26fRcmI2L2c4P_3FdujWPxDw,8652 mdt/gui/maps_visualizer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 mdt/gui/maps_visualizer/actions.py,sha256=x3pMwg5_Ob34IABzUALQQHPYPAm_2gY-HjLcvcJBW6g,3783 mdt/gui/maps_visualizer/base.py,sha256=TnhXNJzzePGDJwu92kOFjt7foLltF8Xu00F69hhUV9I,8513 mdt/gui/maps_visualizer/main.py,sha256=dBh01QdinabonvR0ddPjxQcA6r9nzwtjZYQTpYLiFsU,14648 mdt/gui/maps_visualizer/widgets.py,sha256=_KYaXBOlo4MBNDHita8MK1-8s8HxaRsgVPIGBnEn5Vo,2189 mdt/gui/maps_visualizer/config_tabs/__init__.py,sha256=C3xGg01QornJZ_dgDHFP2Qd2UZ-G1Oc26CXvQzmHie8,138 mdt/gui/maps_visualizer/config_tabs/tab_general.py,sha256=9LnqLmoWO63ORqbTJru22gyMAdYXNfDds7kumvn34Xo,16175 mdt/gui/maps_visualizer/config_tabs/tab_map_specific.py,sha256=ysILqg8canVHb50staAGZ5I_RTz_cP4i_W_rAbULqGE,14352 mdt/gui/maps_visualizer/config_tabs/tab_textual.py,sha256=f2inZQWyn69KbTFKm5Z-uXM6TO37BTQOXRAFKpQE57M,2307 mdt/gui/maps_visualizer/design/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 mdt/gui/maps_visualizer/design/main_rc.py,sha256=5nCbnMova9Vs9DaWRjlzT3YgMCRDGneWf9EgkfgS7Wc,18034 mdt/gui/maps_visualizer/design/ui_MainWindow.py,sha256=xwCROXoeM4pABn8moKkluJIqttnAjlvXrvmMV2Y0ZKc,11387 mdt/gui/maps_visualizer/design/ui_MapSpecificOptions.py,sha256=A8FN1ZEAuJGD3Qv-OAdbfzeWOpl-OYTORbilrCeQR8A,18045 mdt/gui/maps_visualizer/design/ui_TabGeneral.py,sha256=VvRxplMzDMMD-e3WYDoypt8jo1tBIYS2sxH3fGxwPcU,30147 mdt/gui/maps_visualizer/design/ui_TabMapSpecific.py,sha256=PGaMyY6c-rpyOMZJBpGpFLEg_-TmArDBWnfg7ZSs0Zc,2978 mdt/gui/maps_visualizer/design/ui_TabTextual.py,sha256=J78be5IxgFy-cK9MdiiiTPXLzmbOer43zIZA_pomFAE,1531 mdt/gui/maps_visualizer/design/ui_save_image_dialog.py,sha256=8zcrQ_7HY26748kYFeadR0kX_98tMSe5rzT2dd8mwUg,6913 mdt/gui/maps_visualizer/renderers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 mdt/gui/maps_visualizer/renderers/matplotlib_renderer.py,sha256=OFZXA7Vnpmap55-PGs-8X1M2799joTo1bgUMZyGudwY,5159 mdt/gui/model_fit/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 mdt/gui/model_fit/qt_main.py,sha256=YPnabEM_s_3bHGxU58uWun4iwGF1kgQ7X9kLUF4JEcE,8922 mdt/gui/model_fit/utils.py,sha256=kYS_T977n26PjPKr0vOiB7Vpl6hulBccCWgB515HbxA,1318 mdt/gui/model_fit/design/__init__.py,sha256=YO1sinIazVULwtE8ahmd-jeTsZWMI9d4yv2A_SlmDdU,138 mdt/gui/model_fit/design/main_gui_rc.py,sha256=7oM5kBcVSAkoJMXEHyhpEYoO_csrs0CQtwVjXI2EpIs,21644 mdt/gui/model_fit/design/ui_about_dialog.py,sha256=8nNqGbkcjJA5SIHSOOmVxRo5giAA_fDS4tjZWVjvHwI,6123 mdt/gui/model_fit/design/ui_fit_model_tab.py,sha256=jI-DtCt9oh-Tr7to5K_CVFU1NWE_yclGzUeXd8ODdp8,12343 mdt/gui/model_fit/design/ui_generate_brain_mask_tab.py,sha256=GUG2H-zYUsSjlwv3-80gzxW2X2pNLnzi6nKLE9U9bHQ,14056 mdt/gui/model_fit/design/ui_generate_protocol_load_gb_dialog.py,sha256=C_zWwjwrPYdlvh2t8aU5WDIdAA4pdyZzlNO0TPUaR-I,6518 mdt/gui/model_fit/design/ui_generate_protocol_tab.py,sha256=Y7mRj0XFqwKtHqGpFU7nlPHbAAW7uy0DrktWinmTxME,10636 mdt/gui/model_fit/design/ui_generate_protocol_update_dialog.py,sha256=7XLaN4Ri_XZ84oSrmAb7q3jSg45-OoNN3YE2Iz89ZWE,8578 mdt/gui/model_fit/design/ui_generate_roi_mask_tab.py,sha256=mpKgnlnEG2G2MYInRViVMPbFIoh5TKQaG4wvTV4LQH8,11234 mdt/gui/model_fit/design/ui_main_gui.py,sha256=5a6ljMKuCddogamTeOTzsgnSOeaOhz_jIBTUNUxMHSs,7911 mdt/gui/model_fit/design/ui_optimization_options_dialog.py,sha256=jX7UDxi1k8BHIqye0mANUc0XP31Qe41ekkslb9gpS0o,14162 mdt/gui/model_fit/design/ui_runtime_settings_dialog.py,sha256=BH1t670w1LtW8gCF0tc1LyYsxxRQLM6e17FFVswxcTQ,4140 mdt/gui/model_fit/design/ui_view_results_tab.py,sha256=wt2y5AYHHxtbxkdyl2O_dtIf9uGA7LHBrm8cjDF53TM,11462 mdt/gui/model_fit/tabs/__init__.py,sha256=-wawPV3GbKNqOH6aILwzTSd9ogSxa6_tTSwumUjVhuc,138 mdt/gui/model_fit/tabs/fit_model_tab.py,sha256=hcdNKFqor6ttFsla8PWnPfz4d397IFXVU9seC03FGGo,12941 mdt/gui/model_fit/tabs/generate_brain_mask_tab.py,sha256=tOCZ2lMX5k-CeD-16dSDQGatrEG5H1WnxH5vbd4TVZE,6131 mdt/gui/model_fit/tabs/generate_protocol_tab.py,sha256=xP65fpVeTLsUl5Mt17qn_2ZMZyKR8sLpp6CfLGtOrzw,14404 mdt/gui/model_fit/tabs/generate_roi_mask_tab.py,sha256=v7Ecy6sIR4YWDMaPQeCoKR-LO6LYFYstQ1Hqu9Ybl6c,7202 mdt/gui/model_fit/tabs/view_results_tab.py,sha256=PLFBfi9ZkLtjmJ2_Ihr3_sg5YnjAZLiebargMw7IpSg,4413 mdt/models/__init__.py,sha256=ihLS8SJph07QeefSML5TjnCFWU8SWx6olkG85EqX9Ow,138 mdt/models/base.py,sha256=TUOdNcyCUgnYcv_fzyfo0YvQNlJ_Vl6AcvFqT4Agf6s,2667 mdt/models/cascade.py,sha256=bs_bsW0t7RHGMiKvI1MrbNH6AGc1G-z3QnLduarSVks,12073 mdt/models/compartments.py,sha256=0USt1vAEUvTGbkukBS1-aLezpebrQPa4m2QrDebtzMU,12090 mdt/models/composite.py,sha256=aNGsUyV3Jn5oxftLLA5jjtPmsLv6pbwqf7DTJf23Uto,21443 mdt/models/parameters.py,sha256=IITCJl46JSo-AVd2nghbDLSUrt25pn4cF-Kp5RcKLHE,5148 mdt/models/parsers/CompositeModelExpression.py,sha256=AkdeCwgtT_MrAfpgdzLOP6N2dwYpgGXyGXuyZDLsuXA,3379 mdt/models/parsers/CompositeModelExpressionParser.py,sha256=T6-Vhs5Y7_IZ6v95QRkiegNK-rEt9zWC1RYUCn2DEBA,1701 mdt/models/parsers/__init__.py,sha256=2I3J4yc3ugVC4uFSzsizmnniwLmq6oKltk2RuBflLMo,138 mdt/visualization/__init__.py,sha256=lzhmErypOVXQ2mYJYXjqVFIa22_vUgTeKJHZya0C3lg,138 mdt/visualization/dict_conversion.py,sha256=cw52exj8l5FwwXhAtBcYPyX1bxZ-B1Ez9jz9hkakyQs,7100 mdt/visualization/layouts.py,sha256=qjBXYgQFN1DxBRSPU0FRMTWxqQLkBBjsMXFvznVCD2c,6242 mdt/visualization/samples.py,sha256=eX4_GtzHxhpvox4dsdO6rkqRha7cO8mRAYujsO35o_I,7424 mdt/visualization/utils.py,sha256=UeQKsR7AOeGRDF4cl2BCXaJMjeicUaSGKj2A8uq-xUM,1040 mdt/visualization/maps/__init__.py,sha256=lzhmErypOVXQ2mYJYXjqVFIa22_vUgTeKJHZya0C3lg,138 mdt/visualization/maps/base.py,sha256=qlzi6d3V_WG5nZOvPnfpRO0che8PIy1Sz34RaN777Bc,43044 mdt/visualization/maps/matplotlib_renderer.py,sha256=LLvbn8F95nAXDWBELcJUigqyugDa9uvTG2MH7-SrO50,11500 mdt-0.9.5.dist-info/DESCRIPTION.rst,sha256=y0Q7SuccvHau6VBL2pdnOn3obkRYhLCSzdl1FNdTC6s,2378 mdt-0.9.5.dist-info/METADATA,sha256=EwQP6SsQc5rBMhuBgwluNn1YHmzX5kawh0OTjs9Ivgw,3798 mdt-0.9.5.dist-info/RECORD,, mdt-0.9.5.dist-info/WHEEL,sha256=GrqQvamwgBV4nLoJe0vhYRSWzWsx7xjlt74FT0SWYfE,110 mdt-0.9.5.dist-info/entry_points.txt,sha256=FH7tdSL2enHHcEoArRN2oiGC2PgsLEeuG0NJ9PciTP4,1400 mdt-0.9.5.dist-info/metadata.json,sha256=a6fVYmlE3I-sJADwSUmm377imSCCcdKst-xhg_2v0ts,4446 mdt-0.9.5.dist-info/top_level.txt,sha256=WHSD8V0nXgCTT0L1cSXPklFvQjKdZG1oeuWvO-C0PnQ,10 tests/__init__.py,sha256=IdOAwaxVUNgXhaYH0xuLRZIycRf1PM8QA0zcbcDF6VA,161 PKjUpI1tests/__init__.pyPKjUpI"mdt/log_handlers.pyPKht}It,qqmdt/components_loader.pyPKAyI@OOwmdt/model_fitting.pyPKjUpIQXn&EE`mdt/masking.pyPKUpI,i( ( mdt/model_sampling.pyPKjUpIIFF,mdt/deferred_mappings.pyPKjUpI mdt/user_script_info.pyPKjUpIc  mdt/shell_utils.pyPKjUpIlW >3>3,mdt/simulations.pyPK5rIMByyI`mdt/exceptions.pyPKd\I$oggdmdt/batch_utils.pyPK1I1mdt/__version__.pyPKjUpI~;// =mdt/nifti.pyPKjUpIgffmdt/model_protocol_problem.pyPKeI'AI mdt/utils.pyPKq}Iy8\\]mdt/protocols.pyPK\|I;ppgmdt/__init__.pyPK}vI~I[ 1mdt/data/qt_designs/maps_visualizer/TabGeneral.uiPK84_I9(8mdt/data/qt_designs/maps_visualizer/save_image_dialog.uiPK$ZIjp552mdt/data/qt_designs/maps_visualizer/arrow_undo.pngPK$ZIe??,xmdt/data/qt_designs/maps_visualizer/MakefilePK$ZI(v2mdt/data/qt_designs/maps_visualizer/arrow_redo.pngPK$ZI0"T T 5pmdt/data/qt_designs/maps_visualizer/TabMapSpecific.uiPKbIsIޥ..1mdt/data/qt_designs/maps_visualizer/MainWindow.uiPK$ZIdC4!O!O9mdt/data/qt_designs/maps_visualizer/MapSpecificOptions.uiPK$ZI<a-a-8dFmdt/data/qt_designs/model_fit/generate_brain_mask_tab.uiPK$ZI]F1tmdt/data/qt_designs/model_fit/icon_status_red.pngPK$ZI+fA&xmdt/data/qt_designs/model_fit/generate_protocol_load_gb_dialog.uiPK$ZIsQ(%%1mdt/data/qt_designs/model_fit/view_results_tab.uiPK!IsI+M)mdt/data/qt_designs/model_fit/main_gui.uiPK$ZI!T==&mdt/data/qt_designs/model_fit/MakefilePK$ZI׉N$N$6mdt/data/qt_designs/model_fit/generate_protocol_tab.uiPK$ZIT-0,yy8)mdt/data/qt_designs/model_fit/runtime_settings_dialog.uiPK$ZI^@ mdt/data/qt_designs/model_fit/generate_protocol_update_dialog.uiPK$ZI0=.=.<6# mdt/data/qt_designs/model_fit/optimization_options_dialog.uiPK$ZI(5ّ((.Q mdt/data/qt_designs/model_fit/fit_model_tab.uiPK$ZIt3z mdt/data/qt_designs/model_fit/icon_status_green.pngPK$ZIyyYy%y%6~ mdt/data/qt_designs/model_fit/generate_roi_mask_tab.uiPKIsIKċ- mdt/data/qt_designs/model_fit/about_dialog.uiPKIsI t* mdt/data/qt_designs/model_fit/main_gui.qrcPKjUpIIޠgg6 mdt/data/components/standard/parameters/static_maps.pyPKwI3 mdt/data/components/standard/parameters/protocol.pyPKjUpI}5 mdt/data/components/standard/parameters/model_data.pyPK8|ID4/X mdt/data/components/standard/parameters/free.pyPKjUpIOr>>@I mdt/data/components/standard/processing_strategies/VoxelRange.pyPKjUpI^G5 5 G mdt/data/components/standard/processing_strategies/ProtocolDependent.pyPKjUpIӡfE mdt/data/components/standard/processing_strategies/AllVoxelsAtOnce.pyPKԃI ((7 mdt/data/components/standard/composite_models/Tensor.pyPKUpI}I((8o mdt/data/components/standard/composite_models/CHARMED.pyPKUpI0 ; mdt/data/components/standard/composite_models/BallSticks.pyPK؃I@4f mdt/data/components/standard/composite_models/s0s.pyPK)AuImv9 mdt/data/components/standard/composite_models/ActiveAx.pyPK-AuIR4[NN6 mdt/data/components/standard/composite_models/NODDI.pyPKjUpI>I'& mdt/data/components/standard/noise_std_estimators/AllUnweightedVolumes.pyPKjUpIeeIa+ mdt/data/components/standard/noise_std_estimators/TwoUnweightedVolumes.pyPKjUpI-!> > N-1 mdt/data/components/standard/noise_std_estimators/AverageOfAir_ExtendedMask.pyPKjUpIuLM: mdt/data/components/standard/noise_std_estimators/AverageOfAir_DilatedMask.pyPKHrIҁo 6\C mdt/data/components/standard/batch_profiles/HCP_MGH.pyPKO\I5bK K 9P mdt/data/components/standard/batch_profiles/HCP_WUMINN.pyPKHrIzp8r] mdt/data/components/standard/batch_profiles/SingleDir.pyPKaMrI;/ )44<dm mdt/data/components/standard/batch_profiles/DirPerSubject.pyPKjUpI(YG| mdt/data/components/standard/library_functions/NeumannCylPerpPGSESum.pyPKtVqI3IIF_ mdt/data/components/standard/library_functions/NeumannCylPerpPGSESum.hPKHjG mdt/data/components/standard/library_functions/NeumannCylPerpPGSESum.clPKǣxI~rr=e mdt/data/components/standard/library_functions/MRIConstants.hPKH`η>2 mdt/data/components/standard/library_functions/MRIConstants.clPKjUpI \++>u mdt/data/components/standard/library_functions/MRIConstants.pyPKG|IO5 mdt/data/components/standard/cascade_models/Tensor.pyPKjUpI쿭6 mdt/data/components/standard/cascade_models/CHARMED.pyPKdIX2//9Y mdt/data/components/standard/cascade_models/BallSticks.pyPK@qI|4ߴ mdt/data/components/standard/cascade_models/NODDI.pyPKjUpI5@B= mdt/data/components/standard/compartment_models/LinMPM_Fit.pyPKXqI@iB mdt/data/components/standard/compartment_models/ExpT1ExpT2STEAM.pyPKpIPx0 0 9 mdt/data/components/standard/compartment_models/Tensor.pyPK]uI;? mdt/data/components/standard/compartment_models/GDRCylinders.pyPKHaB__< mdt/data/components/standard/compartment_models/SphereGPD.clPKH3ODD?m mdt/data/components/standard/compartment_models/GDRCylinders.clPKjUpI*ץ //; mdt/data/components/standard/compartment_models/ExpT2Dec.pyPKjUpIcf{8 mdt/data/components/standard/compartment_models/Stick.pyPKXqIxGrA mdt/data/components/standard/compartment_models/AstroCylinders.pyPKXqIvShh;B mdt/data/components/standard/compartment_models/NODDI_IC.pyPKjUpIa5 mdt/data/components/standard/compartment_models/S0.pyPKjUpIr::=o mdt/data/components/standard/compartment_models/ExpT1DecTR.pyPKjUpI&&6 mdt/data/components/standard/compartment_models/Dot.pyPKs[xIO%9~ mdt/data/components/standard/compartment_models/Tensor.clPKjUpIHHAg mdt/data/components/standard/compartment_models/ExpT1ExpT2sGRE.pyPK&NxIG\+ + D mdt/data/components/standard/compartment_models/CHARMEDRestricted.clPKXqI=NdBB< mdt/data/components/standard/compartment_models/SphereGPD.pyPK&wIѬwD7 mdt/data/components/standard/compartment_models/CHARMEDRestricted.pyPKjUpIt B; mdt/data/components/standard/compartment_models/LinT2Dec.pyPKXqIzzz>2! mdt/data/components/standard/compartment_models/CylinderGPD.pyPK]?[I+ʊ..;& mdt/data/components/standard/compartment_models/NODDI_IC.clPKjUpI=:T mdt/data/components/standard/compartment_models/MPM_Fit.pyPKHcuuIX mdt/data/components/standard/compartment_models/GDRCylindersFixedRadii.clPKjUpIEE@^ mdt/data/components/standard/compartment_models/ExpT1ExpT2GRE.pyPKjUpIQ@` mdt/data/components/standard/compartment_models/ExpT2DecSTEAM.pyPKjUpIVgg;b mdt/data/components/standard/compartment_models/Zeppelin.pyPKjUpIkG;^f mdt/data/components/standard/compartment_models/NODDI_EC.pyPKjUpINY.>ki mdt/data/components/standard/compartment_models/AstroSticks.pyPKjUpIM w''7k mdt/data/components/standard/compartment_models/Ball.pyPKjUpI,LL>m mdt/data/components/standard/compartment_models/ExpT1DecGRE.pyPKjUpIb'(=n mdt/data/components/standard/compartment_models/ExpT1DecTM.pyPK7?[I,ee;p mdt/data/components/standard/compartment_models/NODDI_EC.clPKjUpILZ//Imx mdt/data/components/standard/compartment_models/GDRCylindersFixedRadii.pyPKjUpIq,+UU;| mdt/data/components/standard/compartment_models/LinT1GRE.pyPKjUpI=:!! mdt/gui/utils.pyPKjUpIR] mdt/gui/__init__.pyPKjUpIϸ [A!A!} mdt/gui/maps_visualizer/base.pyPKz?~I6" mdt/gui/maps_visualizer/actions.pyPKLxIF8Yb8989 mdt/gui/maps_visualizer/main.pyPKjUpIy."w mdt/gui/maps_visualizer/widgets.pyPKjUpI#D mdt/gui/maps_visualizer/__init__.pyPKl>~IRiG36 mdt/gui/maps_visualizer/design/ui_save_image_dialog.pyPKl>~IG0~rFrF)1 mdt/gui/maps_visualizer/design/main_rc.pyPKl>~IN/x mdt/gui/maps_visualizer/design/ui_TabTextual.pyPKl>~If&L 3~ mdt/gui/maps_visualizer/design/ui_TabMapSpecific.pyPKl>~I[qC{,{,/Ί mdt/gui/maps_visualizer/design/ui_MainWindow.pyPKl>~Ia׍}F}F7 mdt/gui/maps_visualizer/design/ui_MapSpecificOptions.pyPKl>~Iuu/h mdt/gui/maps_visualizer/design/ui_TabGeneral.pyPKjUpI*xt mdt/gui/maps_visualizer/design/__init__.pyPK=~I*7&''8t mdt/gui/maps_visualizer/renderers/matplotlib_renderer.pyPKjUpI-= mdt/gui/maps_visualizer/renderers/__init__.pyPKiA~ID"6/?/?2 mdt/gui/maps_visualizer/config_tabs/tab_general.pyPKɬ}Ie,:  2 mdt/gui/maps_visualizer/config_tabs/tab_textual.pyPKjUpI=V/Z mdt/gui/maps_visualizer/config_tabs/__init__.pyPKSZI]8871 mdt/gui/maps_visualizer/config_tabs/tab_map_specific.pyPKrVpIh"" mdt/gui/model_fit/qt_main.pyPKlVpIiO&&.mdt/gui/model_fit/utils.pyPKjUpI4mdt/gui/model_fit/__init__.pyPKIsIE,,6C4mdt/gui/model_fit/design/ui_runtime_settings_dialog.pyPKIsI,,/Dmdt/gui/model_fit/design/ui_view_results_tab.pyPKIsI+qmdt/gui/model_fit/design/ui_about_dialog.pyPKIsITT' mdt/gui/model_fit/design/main_gui_rc.pyPKIsI v))4mdt/gui/model_fit/design/ui_generate_protocol_tab.pyPKIsIY7070,mdt/gui/model_fit/design/ui_fit_model_tab.pyPKIsIDE++4:9mdt/gui/model_fit/design/ui_generate_roi_mask_tab.pyPKIsIrrvv?nemdt/gui/model_fit/design/ui_generate_protocol_load_gb_dialog.pyPKIsI ٴ{R7R7:Amdt/gui/model_fit/design/ui_optimization_options_dialog.pyPKIsIז'mdt/gui/model_fit/design/ui_main_gui.pyPKIsIeZ666mdt/gui/model_fit/design/ui_generate_brain_mask_tab.pyPKjUpI($S mdt/gui/model_fit/design/__init__.pyPKIsI-!!>mdt/gui/model_fit/design/ui_generate_protocol_update_dialog.pyPKjUpI R1/mdt/gui/model_fit/tabs/generate_brain_mask_tab.pyPKjUpIrb""/?Hmdt/gui/model_fit/tabs/generate_roi_mask_tab.pyPKjUpI>ND8D8/dmdt/gui/model_fit/tabs/generate_protocol_tab.pyPKjUpI\*==*?mdt/gui/model_fit/tabs/view_results_tab.pyPKjUpI)"Įmdt/gui/model_fit/tabs/__init__.pyPKq}II 6.22'mdt/gui/model_fit/tabs/fit_model_tab.pyPKjUpIbb`mdt/visualization/layouts.pyPKjUpI4T mdt/visualization/samples.pyPKjUpI6mdt/visualization/utils.pyPKjUpIu~mdt/visualization/__init__.pyPKjUpIOag{$Cmdt/visualization/dict_conversion.pyPK=~I$$A9mdt/visualization/maps/base.pyPK@~Ip7,,-mdt/visualization/maps/matplotlib_renderer.pyPKjUpIu"mdt/visualization/maps/__init__.pyPKII+nJ J #mdt-0.9.5.dist-info/DESCRIPTION.rstPKIZvxx$-mdt-0.9.5.dist-info/entry_points.txtPKIJ$^^!mdt-0.9.5.dist-info/metadata.jsonPKILE !0mdt-0.9.5.dist-info/top_level.txtPKIndnn0mdt-0.9.5.dist-info/WHEELPKIdr1mdt-0.9.5.dist-info/METADATAPKIYRR@mdt-0.9.5.dist-info/RECORDPKXI