PKlmHPUC C nilearn/__init__.py""" Machine Learning module for NeuroImaging in python -------------------------------------------------- Documentation is available in the docstrings and online at http://nilearn.github.io. Contents -------- Nilearn aims at simplifying the use of the scikit-learn package in the context of neuroimaging. It provides specific input/output functions, algorithms and visualization tools. Submodules --------- datasets --- Utilities to download NeuroImaging datasets decoding --- Decoding tools and algorithms decomposition --- Includes a subject level variant of the ICA algorithm called Canonical ICA connectome --- Set of tools for computing functional connectivity matrices and for sparse multi-subjects learning of Gaussian graphical models image --- Set of functions defining mathematical operations working on Niimg-like objects input_data --- includes scikit-learn tranformers and tools to preprocess neuro-imaging data masking --- Utilities to compute and operate on brain masks mass_univariate --- Defines a Massively Univariate Linear Model estimated with OLS and permutation test plotting --- Plotting code for nilearn region --- Set of functions for extracting region-defined signals signal --- Set of preprocessing functions for time series """ import gzip from .version import _check_module_dependencies, __version__ _check_module_dependencies() # Monkey-patch gzip to have faster reads on large gzip files if hasattr(gzip.GzipFile, 'max_read_chunk'): gzip.GzipFile.max_read_chunk = 100 * 1024 * 1024 # 100Mb # Boolean controlling the default globbing technique when using check_niimg # and the os.path.expanduser usage in CacheMixin. # Default value it True, set it to False to completely deactivate this # behavior. EXPAND_PATH_WILDCARDS = True # Boolean controlling whether the joblib caches should be # flushed if the version of certain modules changes (eg nibabel, as it # does not respect the backward compatibility in some of its internal # structures # This is used in nilearn._utils.cache_mixin CHECK_CACHE_VERSION = True # list all submodules available in nilearn and version __all__ = ['datasets', 'decoding', 'decomposition', 'connectome', 'image', 'input_data', 'masking', 'mass_univariate', 'plotting', 'region', 'signal', '__version__'] PKlmHr3EEnilearn/signal.py""" Preprocessing functions for time series. All functions in this module should take X matrices with samples x features """ # Authors: Alexandre Abraham, Gael Varoquaux, Philippe Gervais # License: simplified BSD import distutils.version import warnings import numpy as np import scipy from scipy import signal, stats, linalg from sklearn.utils import gen_even_slices, as_float_array from distutils.version import LooseVersion from ._utils.compat import _basestring from ._utils.numpy_conversions import csv_to_array from ._utils import check_niimg_4d NP_VERSION = distutils.version.LooseVersion(np.version.short_version).version def _standardize(signals, detrend=False, normalize=True): """ Center and norm a given signal (time is along first axis) Parameters ---------- signals: numpy.ndarray Timeseries to standardize detrend: bool if detrending of timeseries is requested normalize: bool if True, shift timeseries to zero mean value and scale to unit energy (sum of squares). Returns ------- std_signals: numpy.ndarray copy of signals, normalized. """ if detrend: signals = _detrend(signals, inplace=False) else: signals = signals.copy() if normalize: if signals.shape[0] == 1: warnings.warn('Standardization of 3D signal has been requested but ' 'would lead to zero values. Skipping.') return signals if not detrend: # remove mean if not already detrended signals = signals - signals.mean(axis=0) std = np.sqrt((signals ** 2).sum(axis=0)) std[std < np.finfo(np.float).eps] = 1. # avoid numerical problems signals /= std return signals def _mean_of_squares(signals, n_batches=20): """Compute mean of squares for each signal. This function is equivalent to var = np.copy(signals) var **= 2 var = var.mean(axis=0) but uses a lot less memory. Parameters ---------- signals : numpy.ndarray, shape (n_samples, n_features) signal whose mean of squares must be computed. n_batches : int, optional number of batches to use in the computation. Tweaking this value can lead to variation of memory usage and computation time. The higher the value, the lower the memory consumption. """ # No batching for small arrays if signals.shape[1] < 500: n_batches = 1 # Fastest for C order var = np.empty(signals.shape[1]) for batch in gen_even_slices(signals.shape[1], n_batches): tvar = np.copy(signals[:, batch]) tvar **= 2 var[batch] = tvar.mean(axis=0) return var def _detrend(signals, inplace=False, type="linear", n_batches=10): """Detrend columns of input array. Signals are supposed to be columns of `signals`. This function is significantly faster than scipy.signal.detrend on this case and uses a lot less memory. Parameters ---------- signals : numpy.ndarray This parameter must be two-dimensional. Signals to detrend. A signal is a column. inplace : bool, optional Tells if the computation must be made inplace or not (default False). type : str, optional Detrending type ("linear" or "constant"). See also scipy.signal.detrend. n_batches : int, optional number of batches to use in the computation. Tweaking this value can lead to variation of memory usage and computation time. The higher the value, the lower the memory consumption. Returns ------- detrended_signals: numpy.ndarray Detrended signals. The shape is that of 'signals'. Notes ----- If a signal of lenght 1 is given, it is returned unchanged. """ signals = as_float_array(signals, copy=not inplace) if signals.shape[0] == 1: warnings.warn('Detrending of 3D signal has been requested but ' 'would lead to zero values. Skipping.') return signals signals -= np.mean(signals, axis=0) if type == "linear": # Keeping "signals" dtype avoids some type conversion further down, # and can save a lot of memory if dtype is single-precision. regressor = np.arange(signals.shape[0], dtype=signals.dtype) regressor -= regressor.mean() std = np.sqrt((regressor ** 2).sum()) # avoid numerical problems if not std < np.finfo(np.float).eps: regressor /= std regressor = regressor[:, np.newaxis] # No batching for small arrays if signals.shape[1] < 500: n_batches = 1 # This is fastest for C order. for batch in gen_even_slices(signals.shape[1], n_batches): signals[:, batch] -= np.dot(regressor[:, 0], signals[:, batch] ) * regressor return signals def _check_wn(btype, freq, nyq): wn = freq / float(nyq) if wn > 1.: warnings.warn( 'The frequency specified for the %s pass filter is ' 'too high to be handled by a digital filter (superior to ' 'nyquist frequency). It has been lowered to %.2f (nyquist ' 'frequency).' % (btype, nyq)) wn = 1. return wn def butterworth(signals, sampling_rate, low_pass=None, high_pass=None, order=5, copy=False, save_memory=False): """ Apply a low-pass, high-pass or band-pass Butterworth filter Apply a filter to remove signal below the `low` frequency and above the `high` frequency. Parameters ---------- signals: numpy.ndarray (1D sequence or n_samples x n_sources) Signals to be filtered. A signal is assumed to be a column of `signals`. sampling_rate: float Number of samples per time unit (sample frequency) low_pass: float, optional If specified, signals above this frequency will be filtered out (low pass). This is -3dB cutoff frequency. high_pass: float, optional If specified, signals below this frequency will be filtered out (high pass). This is -3dB cutoff frequency. order: integer, optional Order of the Butterworth filter. When filtering signals, the filter has a decay to avoid ringing. Increasing the order sharpens this decay. Be aware that very high orders could lead to numerical instability. copy: bool, optional If False, `signals` is modified inplace, and memory consumption is lower than for copy=True, though computation time is higher. Returns ------- filtered_signals: numpy.ndarray Signals filtered according to the parameters """ if low_pass is None and high_pass is None: if copy: return signal.copy() else: return signal if low_pass is not None and high_pass is not None \ and high_pass >= low_pass: raise ValueError( "High pass cutoff frequency (%f) is greater or equal" "to low pass filter frequency (%f). This case is not handled " "by this function." % (high_pass, low_pass)) nyq = sampling_rate * 0.5 critical_freq = [] if high_pass is not None: btype = 'high' critical_freq.append(_check_wn(btype, high_pass, nyq)) if low_pass is not None: btype = 'low' critical_freq.append(_check_wn(btype, low_pass, nyq)) if len(critical_freq) == 2: btype = 'band' else: critical_freq = critical_freq[0] b, a = signal.butter(order, critical_freq, btype=btype) if signals.ndim == 1: # 1D case output = signal.filtfilt(b, a, signals) if copy: # filtfilt does a copy in all cases. signals = output else: signals[...] = output else: if copy: if (LooseVersion(scipy.__version__) < LooseVersion('0.10.0')): # filtfilt is 1D only in scipy 0.9.0 signals = signals.copy() for timeseries in signals.T: timeseries[:] = signal.filtfilt(b, a, timeseries) else: # No way to save memory when a copy has been requested, # because filtfilt does out-of-place processing signals = signal.filtfilt(b, a, signals, axis=0) else: # Lesser memory consumption, slower. for timeseries in signals.T: timeseries[:] = signal.filtfilt(b, a, timeseries) return signals def high_variance_confounds(series, n_confounds=5, percentile=2., detrend=True): """ Return confounds time series extracted from series with highest variance. Parameters ---------- series: numpy.ndarray Timeseries. A timeseries is a column in the "series" array. shape (sample number, feature number) n_confounds: int, optional Number of confounds to return percentile: float, optional Highest-variance series percentile to keep before computing the singular value decomposition, 0. <= `percentile` <= 100. series.shape[0] * percentile / 100 must be greater than n_confounds detrend: bool, optional If True, detrend timeseries before processing. Returns ------- v: numpy.ndarray highest variance confounds. Shape: (samples, n_confounds) Notes ----- This method is related to what has been published in the literature as 'CompCor' (Behzadi NeuroImage 2007). The implemented algorithm does the following: - compute sum of squares for each time series (no mean removal) - keep a given percentile of series with highest variances (percentile) - compute an svd of the extracted series - return a given number (n_confounds) of series from the svd with highest singular values. See also -------- nilearn.image.high_variance_confounds """ if detrend: series = _detrend(series) # copy # Retrieve the voxels|features with highest variance # Compute variance without mean removal. var = _mean_of_squares(series) var_thr = stats.scoreatpercentile(var, 100. - percentile) series = series[:, var > var_thr] # extract columns (i.e. features) # Return the singular vectors with largest singular values # We solve the symmetric eigenvalue problem here, increasing stability s, u = linalg.eigh(series.dot(series.T) / series.shape[0]) ix_ = np.argsort(s)[::-1] u = u[:, ix_[:n_confounds]].copy() return u def _ensure_float(data): "Make sure that data is a float type" if not data.dtype.kind == 'f': if data.dtype.itemsize == '8': data = data.astype(np.float64) else: data = data.astype(np.float32) return data def clean(signals, sessions=None, detrend=True, standardize=True, confounds=None, low_pass=None, high_pass=None, t_r=2.5): """Improve SNR on masked fMRI signals. This function can do several things on the input signals, in the following order: - detrend - standardize - remove confounds - low- and high-pass filter Low-pass filtering improves specificity. High-pass filtering should be kept small, to keep some sensitivity. Filtering is only meaningful on evenly-sampled signals. Parameters ---------- signals: numpy.ndarray Timeseries. Must have shape (instant number, features number). This array is not modified. sessions : numpy array, optional Add a session level to the cleaning process. Each session will be cleaned independently. Must be a 1D array of n_samples elements. confounds: numpy.ndarray, str or list of Confounds timeseries. Shape must be (instant number, confound number), or just (instant number,) The number of time instants in signals and confounds must be identical (i.e. signals.shape[0] == confounds.shape[0]). If a string is provided, it is assumed to be the name of a csv file containing signals as columns, with an optional one-line header. If a list is provided, all confounds are removed from the input signal, as if all were in the same array. t_r: float Repetition time, in second (sampling period). low_pass, high_pass: float Respectively low and high cutoff frequencies, in Hertz. detrend: bool If detrending should be applied on timeseries (before confound removal) standardize: bool If True, returned signals are set to unit variance. Returns ------- cleaned_signals: numpy.ndarray Input signals, cleaned. Same shape as `signals`. Notes ----- Confounds removal is based on a projection on the orthogonal of the signal space. See `Friston, K. J., A. P. Holmes, K. J. Worsley, J.-P. Poline, C. D. Frith, et R. S. J. Frackowiak. "Statistical Parametric Maps in Functional Imaging: A General Linear Approach". Human Brain Mapping 2, no 4 (1994): 189-210. `_ See Also -------- nilearn.image.clean_img """ if not isinstance(confounds, (list, tuple, _basestring, np.ndarray, type(None))): raise TypeError("confounds keyword has an unhandled type: %s" % confounds.__class__) # Read confounds if confounds is not None: if not isinstance(confounds, (list, tuple)): confounds = (confounds, ) all_confounds = [] for confound in confounds: if isinstance(confound, _basestring): filename = confound confound = csv_to_array(filename) if np.isnan(confound.flat[0]): # There may be a header if NP_VERSION >= [1, 4, 0]: confound = csv_to_array(filename, skip_header=1) else: confound = csv_to_array(filename, skiprows=1) if confound.shape[0] != signals.shape[0]: raise ValueError("Confound signal has an incorrect length") elif isinstance(confound, np.ndarray): if confound.ndim == 1: confound = np.atleast_2d(confound).T elif confound.ndim != 2: raise ValueError("confound array has an incorrect number " "of dimensions: %d" % confound.ndim) if confound.shape[0] != signals.shape[0]: raise ValueError("Confound signal has an incorrect length") else: raise TypeError("confound has an unhandled type: %s" % confound.__class__) all_confounds.append(confound) # Restrict the signal to the orthogonal of the confounds confounds = np.hstack(all_confounds) del all_confounds if sessions is not None: if not len(sessions) == len(signals): raise ValueError(('The length of the session vector (%i) ' 'does not match the length of the signals (%i)') % (len(sessions), len(signals))) for s in np.unique(sessions): session_confounds = None if confounds is not None: session_confounds = confounds[sessions == s] signals[sessions == s, :] = \ clean(signals[sessions == s], detrend=detrend, standardize=standardize, confounds=session_confounds, low_pass=low_pass, high_pass=high_pass, t_r=2.5) # detrend signals = _ensure_float(signals) signals = _standardize(signals, normalize=False, detrend=detrend) # Remove confounds if confounds is not None: confounds = _ensure_float(confounds) confounds = _standardize(confounds, normalize=standardize, detrend=detrend) if not standardize: # Improve numerical stability by controlling the range of # confounds. We don't rely on _standardize as it removes any # constant contribution to confounds. confound_max = np.max(np.abs(confounds), axis=0) confound_max[confound_max == 0] = 1 confounds /= confound_max if (LooseVersion(scipy.__version__) > LooseVersion('0.9.0')): # Pivoting in qr decomposition was added in scipy 0.10 Q, R, _ = linalg.qr(confounds, mode='economic', pivoting=True) Q = Q[:, np.abs(np.diag(R)) > np.finfo(np.float).eps * 100.] signals -= Q.dot(Q.T).dot(signals) else: Q, R = linalg.qr(confounds, mode='economic') non_null_diag = np.abs(np.diag(R)) > np.finfo(np.float).eps * 100. if np.all(non_null_diag): signals -= Q.dot(Q.T).dot(signals) elif np.any(non_null_diag): R = R[:, non_null_diag] confounds = confounds[:, non_null_diag] inv = scipy.linalg.inv(np.dot(R.T, R)) signals -= confounds.dot(inv).dot(confounds.T).dot(signals) if low_pass is not None or high_pass is not None: signals = butterworth(signals, sampling_rate=1. / t_r, low_pass=low_pass, high_pass=high_pass) if standardize: signals = _standardize(signals, normalize=True, detrend=False) signals *= np.sqrt(signals.shape[0]) # for unit variance return signals PKknH${)  nilearn/version.py# *- encoding: utf-8 -*- """ nilearn version, required package versions, and utilities for checking """ # Author: Loïc Estève, Ben Cipollini # License: simplified BSD # PEP0440 compatible formatted version, see: # https://www.python.org/dev/peps/pep-0440/ # # Generic release markers: # X.Y # X.Y.Z # For bugfix releases # # Admissible pre-release markers: # X.YaN # Alpha release # X.YbN # Beta release # X.YrcN # Release Candidate # X.Y # Final release # # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. # 'X.Y.dev0' is the canonical version of 'X.Y.dev' # __version__ = '0.2.5' _NILEARN_INSTALL_MSG = 'See %s for installation information.' % ( 'http://nilearn.github.io/introduction.html#installation') # This is a tuple to preserve order, so that dependencies are checked # in some meaningful order (more => less 'core'). We avoid using # collections.OrderedDict to preserve Python 2.6 compatibility. REQUIRED_MODULE_METADATA = ( ('numpy', { 'min_version': '1.6.1', 'required_at_installation': True, 'install_info': _NILEARN_INSTALL_MSG}), ('scipy', { 'min_version': '0.9.0', 'required_at_installation': True, 'install_info': _NILEARN_INSTALL_MSG}), ('sklearn', { 'min_version': '0.13', 'required_at_installation': True, 'install_info': _NILEARN_INSTALL_MSG}), ('nibabel', { 'min_version': '1.1.0', 'required_at_installation': False})) OPTIONAL_MATPLOTLIB_MIN_VERSION = '1.1.1' def _import_module_with_version_check( module_name, minimum_version, install_info=None): """Check that module is installed with a recent enough version """ from distutils.version import LooseVersion try: module = __import__(module_name) except ImportError as exc: user_friendly_info = ('Module "{0}" could not be found. {1}').format( module_name, install_info or 'Please install it properly to use nilearn.') exc.args += (user_friendly_info,) raise # Avoid choking on modules with no __version__ attribute module_version = getattr(module, '__version__', '0.0.0') version_too_old = (not LooseVersion(module_version) >= LooseVersion(minimum_version)) if version_too_old: message = ( 'A {module_name} version of at least {minimum_version} ' 'is required to use nilearn. {module_version} was found. ' 'Please upgrade {module_name}').format( module_name=module_name, minimum_version=minimum_version, module_version=module_version) raise ImportError(message) return module def _check_module_dependencies(is_nilearn_installing=False): """Throw an exception if nilearn dependencies are not installed. Parameters ---------- is_nilearn_installing: boolean if True, only error on missing packages that cannot be auto-installed. if False, error on any missing package. Throws ------- ImportError """ for (module_name, module_metadata) in REQUIRED_MODULE_METADATA: if not (is_nilearn_installing and not module_metadata['required_at_installation']): # Skip check only when installing and it's a module that # will be auto-installed. _import_module_with_version_check( module_name=module_name, minimum_version=module_metadata['min_version'], install_info=module_metadata.get('install_info')) PKpHR4hhnilearn/masking.py""" Utilities to compute and operate on brain masks """ # Author: Gael Varoquaux, Alexandre Abraham, Philippe Gervais # License: simplified BSD import warnings import numpy as np from scipy import ndimage from sklearn.externals.joblib import Parallel, delayed from . import _utils from .image import new_img_like from ._utils.cache_mixin import cache from ._utils.ndimage import largest_connected_component, get_border_data from ._utils.niimg import _safe_get_data class MaskWarning(UserWarning): "A class to always raise warnings" warnings.simplefilter("always", MaskWarning) def _load_mask_img(mask_img, allow_empty=False): """Check that a mask is valid, ie with two values including 0 and load it. Parameters ---------- mask_img: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. The mask to check allow_empty: boolean, optional Allow loading an empty mask (full of 0 values) Returns ------- mask: numpy.ndarray boolean version of the mask """ mask_img = _utils.check_niimg_3d(mask_img) mask = mask_img.get_data() values = np.unique(mask) if len(values) == 1: # We accept a single value if it is not 0 (full true mask). if values[0] == 0 and not allow_empty: raise ValueError( 'The mask is invalid as it is empty: it masks all data.') elif len(values) == 2: # If there are 2 different values, one of them must be 0 (background) if not 0 in values: raise ValueError('Background of the mask must be represented with' '0. Given mask contains: %s.' % values) elif len(values) != 2: # If there are more than 2 values, the mask is invalid raise ValueError('Given mask is not made of 2 values: %s' '. Cannot interpret as true or false' % values) mask = _utils.as_ndarray(mask, dtype=bool) return mask, mask_img.get_affine() def _extrapolate_out_mask(data, mask, iterations=1): """ Extrapolate values outside of the mask. """ if iterations > 1: data, mask = _extrapolate_out_mask(data, mask, iterations=iterations - 1) new_mask = ndimage.binary_dilation(mask) larger_mask = np.zeros(np.array(mask.shape) + 2, dtype=np.bool) larger_mask[1:-1, 1:-1, 1:-1] = mask # Use nans as missing value: ugly masked_data = np.zeros(larger_mask.shape + data.shape[3:]) masked_data[1:-1, 1:-1, 1:-1] = data.copy() masked_data[np.logical_not(larger_mask)] = np.nan outer_shell = larger_mask.copy() outer_shell[1:-1, 1:-1, 1:-1] = new_mask - mask outer_shell_x, outer_shell_y, outer_shell_z = np.where(outer_shell) extrapolation = list() for i, j, k in [(0, 1, 0), (0, -1, 0), (1, 0, 0), (-1, 0, 0), (1, 0, 0), (-1, 0, 0)]: this_x = outer_shell_x + i this_y = outer_shell_y + j this_z = outer_shell_z + k extrapolation.append(masked_data[this_x, this_y, this_z]) extrapolation = np.array(extrapolation) extrapolation = (np.nansum(extrapolation, axis=0) / np.sum(np.isfinite(extrapolation), axis=0)) extrapolation[np.logical_not(np.isfinite(extrapolation))] = 0 new_data = np.zeros_like(masked_data) new_data[outer_shell] = extrapolation new_data[larger_mask] = masked_data[larger_mask] return new_data[1:-1, 1:-1, 1:-1], new_mask # # Utilities to compute masks # def intersect_masks(mask_imgs, threshold=0.5, connected=True): """ Compute intersection of several masks Given a list of input mask images, generate the output image which is the the threshold-level intersection of the inputs Parameters ---------- mask_imgs: list of Niimg-like objects See http://nilearn.github.io/manipulating_images/input_output.html. 3D individual masks with same shape and affine. threshold: float, optional Gives the level of the intersection, must be within [0, 1]. threshold=1 corresponds to keeping the intersection of all masks, whereas threshold=0 is the union of all masks. connected: bool, optional If true, extract the main connected component Returns ------- grp_mask: 3D nibabel.Nifti1Image intersection of all masks. """ if len(mask_imgs) == 0: raise ValueError('No mask provided for intersection') grp_mask = None first_mask, ref_affine = _load_mask_img(mask_imgs[0], allow_empty=True) ref_shape = first_mask.shape if threshold > 1: raise ValueError('The threshold should be smaller than 1') if threshold < 0: raise ValueError('The threshold should be greater than 0') threshold = min(threshold, 1 - 1.e-7) for this_mask in mask_imgs: mask, affine = _load_mask_img(this_mask, allow_empty=True) if np.any(affine != ref_affine): raise ValueError("All masks should have the same affine") if np.any(mask.shape != ref_shape): raise ValueError("All masks should have the same shape") if grp_mask is None: # We use int here because there may be a lot of masks to merge grp_mask = _utils.as_ndarray(mask, dtype=int) else: # If this_mask is floating point and grp_mask is integer, numpy 2 # casting rules raise an error for in-place addition. Hence we do # it long-hand. # XXX should the masks be coerced to int before addition? grp_mask += mask grp_mask = grp_mask > (threshold * len(list(mask_imgs))) if np.any(grp_mask > 0) and connected: grp_mask = largest_connected_component(grp_mask) grp_mask = _utils.as_ndarray(grp_mask, dtype=np.int8) return new_img_like(_utils.check_niimg_3d(mask_imgs[0]), grp_mask, ref_affine) def _post_process_mask(mask, affine, opening=2, connected=True, warning_msg=""): if opening: opening = int(opening) mask = ndimage.binary_erosion(mask, iterations=opening) mask_any = mask.any() if not mask_any: warnings.warn("Computed an empty mask. %s" % warning_msg, MaskWarning, stacklevel=2) if connected and mask_any: mask = largest_connected_component(mask) if opening: mask = ndimage.binary_dilation(mask, iterations=2 * opening) mask = ndimage.binary_erosion(mask, iterations=opening) return mask, affine def compute_epi_mask(epi_img, lower_cutoff=0.2, upper_cutoff=0.85, connected=True, opening=2, exclude_zeros=False, ensure_finite=True, target_affine=None, target_shape=None, memory=None, verbose=0,): """Compute a brain mask from fMRI data in 3D or 4D ndarrays. This is based on an heuristic proposed by T.Nichols: find the least dense point of the histogram, between fractions lower_cutoff and upper_cutoff of the total image histogram. In case of failure, it is usually advisable to increase lower_cutoff. Parameters ---------- epi_img: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. EPI image, used to compute the mask. 3D and 4D images are accepted. If a 3D image is given, we suggest to use the mean image lower_cutoff: float, optional lower fraction of the histogram to be discarded. upper_cutoff: float, optional upper fraction of the histogram to be discarded. connected: bool, optional if connected is True, only the largest connect component is kept. opening: bool or int, optional if opening is True, a morphological opening is performed, to keep only large structures. This step is useful to remove parts of the skull that might have been included. If opening is an integer `n`, it is performed via `n` erosions. After estimation of the largest connected constituent, 2`n` closing operations are performed followed by `n` erosions. This corresponds to 1 opening operation of order `n` followed by a closing operator of order `n`. Note that turning off opening (opening=False) will also prevent any smoothing applied to the image during the mask computation. ensure_finite: bool If ensure_finite is True, the non-finite values (NaNs and infs) found in the images will be replaced by zeros exclude_zeros: bool, optional Consider zeros as missing values for the computation of the threshold. This option is useful if the images have been resliced with a large padding of zeros. target_affine: 3x3 or 4x4 matrix, optional This parameter is passed to image.resample_img. Please see the related documentation for details. target_shape: 3-tuple of integers, optional This parameter is passed to image.resample_img. Please see the related documentation for details. memory: instance of joblib.Memory or string Used to cache the function call: if this is a string, it specifies the directory where the cache will be stored. verbose: int, optional Controls the amount of verbosity: higher numbers give more messages Returns ------- mask: nibabel.Nifti1Image The brain mask (3D image) """ if verbose > 0: print("EPI mask computation") # Delayed import to avoid circular imports from .image.image import _compute_mean mean_epi, affine = cache(_compute_mean, memory)(epi_img, target_affine=target_affine, target_shape=target_shape, smooth=(1 if opening else False)) if ensure_finite: # Get rid of memmapping mean_epi = _utils.as_ndarray(mean_epi) # SPM tends to put NaNs in the data outside the brain mean_epi[np.logical_not(np.isfinite(mean_epi))] = 0 sorted_input = np.sort(np.ravel(mean_epi)) if exclude_zeros: sorted_input = sorted_input[sorted_input != 0] lower_cutoff = int(np.floor(lower_cutoff * len(sorted_input))) upper_cutoff = min(int(np.floor(upper_cutoff * len(sorted_input))), len(sorted_input) - 1) delta = sorted_input[lower_cutoff + 1:upper_cutoff + 1] \ - sorted_input[lower_cutoff:upper_cutoff] ia = delta.argmax() threshold = 0.5 * (sorted_input[ia + lower_cutoff] + sorted_input[ia + lower_cutoff + 1]) mask = mean_epi >= threshold mask, affine = _post_process_mask(mask, affine, opening=opening, connected=connected, warning_msg="Are you sure that input " "data are EPI images not detrended. ") return new_img_like(epi_img, mask, affine) def compute_multi_epi_mask(epi_imgs, lower_cutoff=0.2, upper_cutoff=0.85, connected=True, opening=2, threshold=0.5, target_affine=None, target_shape=None, exclude_zeros=False, n_jobs=1, memory=None, verbose=0): """ Compute a common mask for several sessions or subjects of fMRI data. Uses the mask-finding algorithms to extract masks for each session or subject, and then keep only the main connected component of the a given fraction of the intersection of all the masks. Parameters ---------- epi_imgs: list of Niimg-like objects See http://nilearn.github.io/manipulating_images/input_output.html. A list of arrays, each item being a subject or a session. 3D and 4D images are accepted. If 3D images is given, we suggest to use the mean image of each session threshold: float, optional the inter-session threshold: the fraction of the total number of session in for which a voxel must be in the mask to be kept in the common mask. threshold=1 corresponds to keeping the intersection of all masks, whereas threshold=0 is the union of all masks. lower_cutoff: float, optional lower fraction of the histogram to be discarded. upper_cutoff: float, optional upper fraction of the histogram to be discarded. connected: boolean, optional if connected is True, only the largest connect component is kept. exclude_zeros: boolean, optional Consider zeros as missing values for the computation of the threshold. This option is useful if the images have been resliced with a large padding of zeros. target_affine: 3x3 or 4x4 matrix, optional This parameter is passed to image.resample_img. Please see the related documentation for details. target_shape: 3-tuple of integers, optional This parameter is passed to image.resample_img. Please see the related documentation for details. memory: instance of joblib.Memory or string Used to cache the function call. n_jobs: integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. Returns ------- mask : 3D nibabel.Nifti1Image The brain mask. """ if len(epi_imgs) == 0: raise TypeError('An empty object - %r - was passed instead of an ' 'image or a list of images' % epi_imgs) masks = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(compute_epi_mask)(epi_img, lower_cutoff=lower_cutoff, upper_cutoff=upper_cutoff, connected=connected, opening=opening, exclude_zeros=exclude_zeros, target_affine=target_affine, target_shape=target_shape, memory=memory) for epi_img in epi_imgs) mask = intersect_masks(masks, connected=connected, threshold=threshold) return mask def compute_background_mask(data_imgs, border_size=2, connected=False, opening=False, target_affine=None, target_shape=None, memory=None, verbose=0): """ Compute a brain mask for the images by guessing the value of the background from the border of the image. Parameters ---------- data_imgs: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. Images used to compute the mask. 3D and 4D images are accepted. If a 3D image is given, we suggest to use the mean image border_size: integer, optional The size, in voxel of the border used on the side of the image to determine the value of the background. connected: bool, optional if connected is True, only the largest connect component is kept. opening: bool or int, optional if opening is True, a morphological opening is performed, to keep only large structures. This step is useful to remove parts of the skull that might have been included. If opening is an integer `n`, it is performed via `n` erosions. After estimation of the largest connected constituent, 2`n` closing operations are performed followed by `n` erosions. This corresponds to 1 opening operation of order `n` followed by a closing operator of order `n`. target_affine: 3x3 or 4x4 matrix, optional This parameter is passed to image.resample_img. Please see the related documentation for details. target_shape: 3-tuple of integers, optional This parameter is passed to image.resample_img. Please see the related documentation for details. memory: instance of joblib.Memory or string Used to cache the function call. verbose: int, optional Returns ------- mask: nibabel.Nifti1Image The brain mask (3D image) """ if verbose > 0: print("Background mask computation") data_imgs = _utils.check_niimg(data_imgs) # Delayed import to avoid circular imports from .image.image import _compute_mean data, affine = cache(_compute_mean, memory)(data_imgs, target_affine=target_affine, target_shape=target_shape, smooth=False) background = np.median(get_border_data(data, border_size)) if np.isnan(background): # We absolutely need to catter for NaNs as a background: # SPM does that by default mask = np.logical_not(np.isnan(data)) else: mask = data != background mask, affine = _post_process_mask(mask, affine, opening=opening, connected=connected, warning_msg="Are you sure that input " "images have a homogeneous background.") return new_img_like(data_imgs, mask, affine) def compute_multi_background_mask(data_imgs, border_size=2, upper_cutoff=0.85, connected=True, opening=2, threshold=0.5, target_affine=None, target_shape=None, exclude_zeros=False, n_jobs=1, memory=None, verbose=0): """ Compute a common mask for several sessions or subjects of data. Uses the mask-finding algorithms to extract masks for each session or subject, and then keep only the main connected component of the a given fraction of the intersection of all the masks. Parameters ---------- data_imgs: list of Niimg-like objects See http://nilearn.github.io/manipulating_images/input_output.html. A list of arrays, each item being a subject or a session. 3D and 4D images are accepted. If 3D images is given, we suggest to use the mean image of each session threshold: float, optional the inter-session threshold: the fraction of the total number of session in for which a voxel must be in the mask to be kept in the common mask. threshold=1 corresponds to keeping the intersection of all masks, whereas threshold=0 is the union of all masks. border_size: integer, optional The size, in voxel of the border used on the side of the image to determine the value of the background. connected: boolean, optional if connected is True, only the largest connect component is kept. target_affine: 3x3 or 4x4 matrix, optional This parameter is passed to image.resample_img. Please see the related documentation for details. target_shape: 3-tuple of integers, optional This parameter is passed to image.resample_img. Please see the related documentation for details. memory: instance of joblib.Memory or string Used to cache the function call. n_jobs: integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. Returns ------- mask : 3D nibabel.Nifti1Image The brain mask. """ if len(data_imgs) == 0: raise TypeError('An empty object - %r - was passed instead of an ' 'image or a list of images' % data_imgs) masks = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(compute_background_mask)(img, border_size=border_size, connected=connected, opening=opening, target_affine=target_affine, target_shape=target_shape, memory=memory) for img in data_imgs) mask = intersect_masks(masks, connected=connected, threshold=threshold) return mask # # Time series extraction # def apply_mask(imgs, mask_img, dtype='f', smoothing_fwhm=None, ensure_finite=True): """Extract signals from images using specified mask. Read the time series from the given Niimg-like object, using the mask. Parameters ----------- imgs: list of 4D Niimg-like objects See http://nilearn.github.io/manipulating_images/input_output.html. Images to be masked. list of lists of 3D images are also accepted. mask_img: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. 3D mask array: True where a voxel should be used. dtype: numpy dtype or 'f' The dtype of the output, if 'f', any float output is acceptable and if the data is stored on the disk as floats the data type will not be changed. smoothing_fwhm: float (optional) Gives the size of the spatial smoothing to apply to the signal, in voxels. Implies ensure_finite=True. ensure_finite: bool If ensure_finite is True (default), the non-finite values (NaNs and infs) found in the images will be replaced by zeros. Returns -------- session_series: numpy.ndarray 2D array of series with shape (image number, voxel number) Notes ----- When using smoothing, ensure_finite is set to True, as non-finite values would spread accross the image. """ mask_img = _utils.check_niimg_3d(mask_img) mask, mask_affine = _load_mask_img(mask_img) mask_img = new_img_like(mask_img, mask, mask_affine) return _apply_mask_fmri(imgs, mask_img, dtype=dtype, smoothing_fwhm=smoothing_fwhm, ensure_finite=ensure_finite) def _apply_mask_fmri(imgs, mask_img, dtype='f', smoothing_fwhm=None, ensure_finite=True): """Same as apply_mask(). The only difference with apply_mask is that some costly checks on mask_img are not performed: mask_img is assumed to contain only two different values (this is checked for in apply_mask, not in this function). """ mask_img = _utils.check_niimg_3d(mask_img) mask_affine = mask_img.get_affine() mask_data = _utils.as_ndarray(mask_img.get_data(), dtype=np.bool) if smoothing_fwhm is not None: ensure_finite = True imgs_img = _utils.check_niimg(imgs) affine = imgs_img.get_affine()[:3, :3] if not np.allclose(mask_affine, imgs_img.get_affine()): raise ValueError('Mask affine: \n%s\n is different from img affine:' '\n%s' % (str(mask_affine), str(imgs_img.get_affine()))) if not mask_data.shape == imgs_img.shape[:3]: raise ValueError('Mask shape: %s is different from img shape:%s' % (str(mask_data.shape), str(imgs_img.shape[:3]))) # All the following has been optimized for C order. # Time that may be lost in conversion here is regained multiple times # afterward, especially if smoothing is applied. series = _safe_get_data(imgs_img) if dtype == 'f': if series.dtype.kind == 'f': dtype = series.dtype else: dtype = np.float32 series = _utils.as_ndarray(series, dtype=dtype, order="C", copy=True) del imgs_img # frees a lot of memory # Delayed import to avoid circular imports from .image.image import _smooth_array _smooth_array(series, affine, fwhm=smoothing_fwhm, ensure_finite=ensure_finite, copy=False) return series[mask_data].T def _unmask_3d(X, mask, order="C"): """Take masked data and bring them back to 3D (space only). Parameters ---------- X: numpy.ndarray Masked data. shape: (features,) mask: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. Mask. mask.ndim must be equal to 3, and dtype *must* be bool. """ if mask.dtype != np.bool: raise TypeError("mask must be a boolean array") if X.ndim != 1: raise TypeError("X must be a 1-dimensional array") n_features = mask.sum() if X.shape[0] != n_features: raise TypeError('X must be of shape (samples, %d).' % n_features) data = np.zeros( (mask.shape[0], mask.shape[1], mask.shape[2]), dtype=X.dtype, order=order) data[mask] = X return data def _unmask_4d(X, mask, order="C"): """Take masked data and bring them back to 4D. Parameters ---------- X: numpy.ndarray Masked data. shape: (samples, features) mask: numpy.ndarray Mask. mask.ndim must be equal to 4, and dtype *must* be bool. Returns ------- data: numpy.ndarray Unmasked data. Shape: (mask.shape[0], mask.shape[1], mask.shape[2], X.shape[0]) """ if mask.dtype != np.bool: raise TypeError("mask must be a boolean array") if X.ndim != 2: raise TypeError("X must be a 2-dimensional array") n_features = mask.sum() if X.shape[1] != n_features: raise TypeError('X must be of shape (samples, %d).' % n_features) data = np.zeros(mask.shape + (X.shape[0],), dtype=X.dtype, order=order) data[mask, :] = X.T return data def unmask(X, mask_img, order="F"): """Take masked data and bring them back into 3D/4D This function can be applied to a list of masked data. Parameters ---------- X: numpy.ndarray (or list of) Masked data. shape: (samples #, features #). If X is one-dimensional, it is assumed that samples# == 1. mask_img: niimg: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. Must be 3-dimensional. Returns ------- data: nibabel.Nift1Image object Unmasked data. Depending on the shape of X, data can have different shapes: - X.ndim == 2: Shape: (mask.shape[0], mask.shape[1], mask.shape[2], X.shape[0]) - X.ndim == 1: Shape: (mask.shape[0], mask.shape[1], mask.shape[2]) """ if isinstance(X, list): ret = [] for x in X: ret.append(unmask(x, mask_img, order=order)) # 1-level recursion return ret mask_img = _utils.check_niimg_3d(mask_img) mask, affine = _load_mask_img(mask_img) if X.ndim == 2: unmasked = _unmask_4d(X, mask, order=order) elif X.ndim == 1: unmasked = _unmask_3d(X, mask, order=order) else: raise TypeError("Masked data X must be 2D or 1D array; " "got shape: %s" % str(X.shape)) return new_img_like(mask_img, unmasked, affine) PKH$'%f'f'nilearn/plotting/cm.py# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Matplotlib colormaps useful for neuroimaging. """ import numpy as _np from matplotlib import cm as _cm from matplotlib import colors as _colors ################################################################################ # Custom colormaps for two-tailed symmetric statistics ################################################################################ ################################################################################ # Helper functions def _rotate_cmap(cmap, swap_order=('green', 'red', 'blue')): """ Utility function to swap the colors of a colormap. """ orig_cdict = cmap._segmentdata.copy() cdict = dict() cdict['green'] = [(p, c1, c2) for (p, c1, c2) in orig_cdict[swap_order[0]]] cdict['blue'] = [(p, c1, c2) for (p, c1, c2) in orig_cdict[swap_order[1]]] cdict['red'] = [(p, c1, c2) for (p, c1, c2) in orig_cdict[swap_order[2]]] return cdict def _pigtailed_cmap(cmap, swap_order=('green', 'red', 'blue')): """ Utility function to make a new colormap by concatenating a colormap with its reverse. """ orig_cdict = cmap._segmentdata.copy() cdict = dict() cdict['green'] = [(0.5*(1-p), c1, c2) for (p, c1, c2) in reversed(orig_cdict[swap_order[0]])] cdict['blue'] = [(0.5*(1-p), c1, c2) for (p, c1, c2) in reversed(orig_cdict[swap_order[1]])] cdict['red'] = [(0.5*(1-p), c1, c2) for (p, c1, c2) in reversed(orig_cdict[swap_order[2]])] for color in ('red', 'green', 'blue'): cdict[color].extend([(0.5*(1+p), c1, c2) for (p, c1, c2) in orig_cdict[color]]) return cdict def _concat_cmap(cmap1, cmap2): """ Utility function to make a new colormap by concatenating two colormaps. """ cdict = dict() cdict1 = cmap1._segmentdata.copy() cdict2 = cmap2._segmentdata.copy() if not hasattr(cdict1['red'], '__call__'): for c in ['red', 'green', 'blue']: cdict[c] = [(0.5*p, c1, c2) for (p, c1, c2) in cdict1[c]] else: for c in ['red', 'green', 'blue']: cdict[c] = [] ps = _np.linspace(0, 1, 10) colors = cmap1(ps) for p, (r, g, b, a) in zip(ps, colors): cdict['red'].append((.5*p, r, r)) cdict['green'].append((.5*p, g, g)) cdict['blue'].append((.5*p, b, b)) if not hasattr(cdict2['red'], '__call__'): for c in ['red', 'green', 'blue']: cdict[c].extend([(0.5*(1+p), c1, c2) for (p, c1, c2) in cdict2[c]]) else: ps = _np.linspace(0, 1, 10) colors = cmap2(ps) for p, (r, g, b, a) in zip(ps, colors): cdict['red'].append((.5*(1+p), r, r)) cdict['green'].append((.5*(1+p), g, g)) cdict['blue'].append((.5*(1+p), b, b)) return cdict def alpha_cmap(color, name='', alpha_min=0.5, alpha_max=1.): """ Return a colormap with the given color, and alpha going from zero to 1. Parameters ---------- color: (r, g, b), or a string A triplet of floats ranging from 0 to 1, or a matplotlib color string """ red, green, blue = _colors.colorConverter.to_rgb(color) if name == '' and hasattr(color, 'startswith'): name = color cmapspec = [(red, green, blue, 1.), (red, green, blue, 1.), ] cmap = _colors.LinearSegmentedColormap.from_list( '%s_transparent' % name, cmapspec, _cm.LUTSIZE) cmap._init() cmap._lut[:, -1] = _np.linspace(alpha_min, alpha_max, cmap._lut.shape[0]) cmap._lut[-1, -1] = 0 return cmap ################################################################################ # Our colormaps definition _cmaps_data = dict( cold_hot = _pigtailed_cmap(_cm.hot), cold_white_hot = _pigtailed_cmap(_cm.hot_r), brown_blue = _pigtailed_cmap(_cm.bone), cyan_copper = _pigtailed_cmap(_cm.copper), cyan_orange = _pigtailed_cmap(_cm.YlOrBr_r), blue_red = _pigtailed_cmap(_cm.Reds_r), brown_cyan = _pigtailed_cmap(_cm.Blues_r), purple_green = _pigtailed_cmap(_cm.Greens_r, swap_order=('red', 'blue', 'green')), purple_blue = _pigtailed_cmap(_cm.Blues_r, swap_order=('red', 'blue', 'green')), blue_orange = _pigtailed_cmap(_cm.Oranges_r, swap_order=('green', 'red', 'blue')), black_blue = _rotate_cmap(_cm.hot), black_purple = _rotate_cmap(_cm.hot, swap_order=('blue', 'red', 'green')), black_pink = _rotate_cmap(_cm.hot, swap_order=('blue', 'green', 'red')), black_green = _rotate_cmap(_cm.hot, swap_order=('red', 'blue', 'green')), black_red = _cm.hot._segmentdata.copy(), ) if hasattr(_cm, 'ocean'): # MPL 0.99 doesn't have Ocean _cmaps_data['ocean_hot'] = _concat_cmap(_cm.ocean, _cm.hot_r) if hasattr(_cm, 'afmhot'): # or afmhot _cmaps_data['hot_white_bone'] = _concat_cmap(_cm.afmhot, _cm.bone_r) _cmaps_data['hot_black_bone'] = _concat_cmap(_cm.afmhot_r, _cm.bone) # Copied from matplotlib 1.2.0 for matplotlib 0.99 compatibility. _bwr_data = ((0.0, 0.0, 1.0), (1.0, 1.0, 1.0), (1.0, 0.0, 0.0)) _cmaps_data['bwr'] = _colors.LinearSegmentedColormap.from_list( 'bwr', _bwr_data)._segmentdata.copy() ################################################################################ # Build colormaps and their reverse. _cmap_d = dict() for _cmapname in list(_cmaps_data.keys()): # needed as dict changes within loop _cmapname_r = _cmapname + '_r' _cmapspec = _cmaps_data[_cmapname] _cmaps_data[_cmapname_r] = _cm.revcmap(_cmapspec) _cmap_d[_cmapname] = _colors.LinearSegmentedColormap( _cmapname, _cmapspec, _cm.LUTSIZE) _cmap_d[_cmapname_r] = _colors.LinearSegmentedColormap( _cmapname_r, _cmaps_data[_cmapname_r], _cm.LUTSIZE) ################################################################################ # A few transparent colormaps for color, name in (((1, 0, 0), 'red'), ((0, 1, 0), 'blue'), ((0, 0, 1), 'green'), ): _cmap_d['%s_transparent' % name] = alpha_cmap(color, name=name) _cmap_d['%s_transparent_full_alpha_range' % name] = alpha_cmap( color, alpha_min=0, alpha_max=1, name=name) locals().update(_cmap_d) ################################################################################ # Utility to replace a colormap by another in an interval ################################################################################ def dim_cmap(cmap, factor=.3, to_white=True): """ Dim a colormap to white, or to black. """ assert factor >= 0 and factor <=1, ValueError( 'Dimming factor must be larger than 0 and smaller than 1, %s was passed.' % factor) if to_white: dimmer = lambda c: 1 - factor*(1-c) else: dimmer = lambda c: factor*c cdict = cmap._segmentdata.copy() for c_index, color in enumerate(('red', 'green', 'blue')): color_lst = list() for value, c1, c2 in cdict[color]: color_lst.append((value, dimmer(c1), dimmer(c2))) cdict[color] = color_lst return _colors.LinearSegmentedColormap( '%s_dimmed' % cmap.name, cdict, _cm.LUTSIZE) def replace_inside(outer_cmap, inner_cmap, vmin, vmax): """ Replace a colormap by another inside a pair of values. """ assert vmin < vmax, ValueError('vmin must be smaller than vmax') assert vmin >= 0, ValueError('vmin must be larger than 0, %s was passed.' % vmin) assert vmax <= 1, ValueError('vmax must be smaller than 1, %s was passed.' % vmax) outer_cdict = outer_cmap._segmentdata.copy() inner_cdict = inner_cmap._segmentdata.copy() cdict = dict() for this_cdict, cmap in [(outer_cdict, outer_cmap), (inner_cdict, inner_cmap)]: if hasattr(this_cdict['red'], '__call__'): ps = _np.linspace(0, 1, 25) colors = cmap(ps) this_cdict['red'] = list() this_cdict['green'] = list() this_cdict['blue'] = list() for p, (r, g, b, a) in zip(ps, colors): this_cdict['red'].append((p, r, r)) this_cdict['green'].append((p, g, g)) this_cdict['blue'].append((p, b, b)) for c_index, color in enumerate(('red', 'green', 'blue')): color_lst = list() for value, c1, c2 in outer_cdict[color]: if value >= vmin: break color_lst.append((value, c1, c2)) color_lst.append((vmin, outer_cmap(vmin)[c_index], inner_cmap(vmin)[c_index])) for value, c1, c2 in inner_cdict[color]: if value <= vmin: continue if value >= vmax: break color_lst.append((value, c1, c2)) color_lst.append((vmax, inner_cmap(vmax)[c_index], outer_cmap(vmax)[c_index])) for value, c1, c2 in outer_cdict[color]: if value <= vmax: continue color_lst.append((value, c1, c2)) cdict[color] = color_lst return _colors.LinearSegmentedColormap( '%s_inside_%s' % (inner_cmap.name, outer_cmap.name), cdict, _cm.LUTSIZE) PKH}>ccnilearn/plotting/__init__.py""" Plotting code for nilearn """ # Authors: Chris Filo Gorgolewski, Gael Varoquaux ############################################################################### # Make sure that we don't get DISPLAY problems when running without X on # unices def _set_mpl_backend(): try: # We are doing local imports here to avoid poluting our namespace import matplotlib import os import sys # Set the backend to a non-interactive one for unices without X if (os.name == 'posix' and 'DISPLAY' not in os.environ and not (sys.platform == 'darwin' and matplotlib.get_backend() == 'MacOSX' )): matplotlib.use('Agg') except ImportError: from .._utils.testing import skip_if_running_nose # No need to fail when running tests skip_if_running_nose('matplotlib not installed') raise else: from ..version import (_import_module_with_version_check, OPTIONAL_MATPLOTLIB_MIN_VERSION) # When matplotlib was successfully imported we need to check # that the version is greater that the minimum required one _import_module_with_version_check('matplotlib', OPTIONAL_MATPLOTLIB_MIN_VERSION) _set_mpl_backend() ############################################################################### from . import cm from .img_plotting import plot_img, plot_anat, plot_epi, \ plot_roi, plot_stat_map, plot_glass_brain, plot_connectome, \ plot_prob_atlas, show from .find_cuts import find_xyz_cut_coords, find_cut_slices __all__ = ['cm', 'plot_img', 'plot_anat', 'plot_epi', 'plot_roi', 'plot_stat_map', 'plot_glass_brain', 'plot_connectome', 'plot_prob_atlas', 'find_xyz_cut_coords', 'find_cut_slices', 'show'] PKHxcA++nilearn/plotting/find_cuts.py""" Tools to find activations and cut on maps """ # Author: Gael Varoquaux # License: BSD import warnings import numbers import numpy as np from scipy import ndimage # Local imports from .._utils.ndimage import largest_connected_component from ..image import new_img_like from .._utils.extmath import fast_abs_percentile from .._utils.numpy_conversions import as_ndarray from .._utils import check_niimg_3d from .._utils.niimg import _safe_get_data from ..image.resampling import get_mask_bounds, coord_transform from ..image.image import _smooth_array ################################################################################ # Functions for automatic choice of cuts coordinates ################################################################################ def find_xyz_cut_coords(img, mask=None, activation_threshold=None): """ Find the center of the largest activation connected component. Parameters ----------- img : 3D Nifti1Image The brain map. mask : 3D ndarray, boolean, optional An optional brain mask. activation_threshold : float, optional The lower threshold to the positive activation. If None, the activation threshold is computed using the 80% percentile of the absolute value of the map. Returns ------- x : float the x world coordinate. y : float the y world coordinate. z : float the z world coordinate. """ # if a pseudo-4D image or several images were passed (cf. #922), # we reduce to a single 3D image to find the coordinates img = check_niimg_3d(img) data = _safe_get_data(img) # To speed up computations, we work with partial views of the array, # and keep track of the offset offset = np.zeros(3) # Deal with masked arrays: if hasattr(data, 'mask'): not_mask = np.logical_not(data.mask) if mask is None: mask = not_mask else: mask *= not_mask data = np.asarray(data) # Get rid of potential memmapping data = as_ndarray(data) my_map = data.copy() if mask is not None: # check against empty mask if mask.sum() == 0.: warnings.warn( "Provided mask is empty. Returning center of mass instead.") cut_coords = ndimage.center_of_mass(np.abs(my_map)) + offset x_map, y_map, z_map = cut_coords return np.asarray(coord_transform(x_map, y_map, z_map, img.get_affine())).tolist() slice_x, slice_y, slice_z = ndimage.find_objects(mask)[0] my_map = my_map[slice_x, slice_y, slice_z] mask = mask[slice_x, slice_y, slice_z] my_map *= mask offset += [slice_x.start, slice_y.start, slice_z.start] # Testing min and max is faster than np.all(my_map == 0) if (my_map.max() == 0) and (my_map.min() == 0): return .5 * np.array(data.shape) if activation_threshold is None: activation_threshold = fast_abs_percentile(my_map[my_map != 0].ravel(), 80) mask = np.abs(my_map) > activation_threshold - 1.e-15 # mask may be zero everywhere in rare cases if mask.max() == 0: return .5 * np.array(data.shape) mask = largest_connected_component(mask) slice_x, slice_y, slice_z = ndimage.find_objects(mask)[0] my_map = my_map[slice_x, slice_y, slice_z] mask = mask[slice_x, slice_y, slice_z] my_map *= mask offset += [slice_x.start, slice_y.start, slice_z.start] # For the second threshold, we use a mean, as it is much faster, # althought it is less robust second_threshold = np.abs(np.mean(my_map[mask])) second_mask = (np.abs(my_map) > second_threshold) if second_mask.sum() > 50: my_map *= largest_connected_component(second_mask) cut_coords = ndimage.center_of_mass(np.abs(my_map)) x_map, y_map, z_map = cut_coords + offset # Return as a list of scalars return np.asarray(coord_transform(x_map, y_map, z_map, img.get_affine())).tolist() def _get_auto_mask_bounds(img): """ Compute the bounds of the data with an automaticaly computed mask """ data = _safe_get_data(img) affine = img.get_affine() if hasattr(data, 'mask'): # Masked array mask = np.logical_not(data.mask) data = np.asarray(data) else: # The mask will be anything that is fairly different # from the values in the corners edge_value = float(data[0, 0, 0] + data[0, -1, 0] + data[-1, 0, 0] + data[0, 0, -1] + data[-1, -1, 0] + data[-1, 0, -1] + data[0, -1, -1] + data[-1, -1, -1] ) edge_value /= 6 mask = np.abs(data - edge_value) > .005*data.ptp() xmin, xmax, ymin, ymax, zmin, zmax = \ get_mask_bounds(new_img_like(img, mask, affine)) return (xmin, xmax), (ymin, ymax), (zmin, zmax) def _transform_cut_coords(cut_coords, direction, affine): """Transforms cut_coords back in image space Parameters ---------- cut_coords: 1D array of length n_cuts The coordinates to be transformed. direction: string, optional (default "z") sectional direction; possible values are "x", "y", or "z" affine: 2D array of shape (4, 4) The affine for the image. Returns ------- cut_coords: 1D array of length n_cuts The original cut_coords transformed image space. """ # make kwargs axis = 'xyz'.index(direction) kwargs = {} for name in 'xyz': kwargs[name] = np.zeros(len(cut_coords)) kwargs[direction] = cut_coords kwargs['affine'] = affine # We need atleast_1d to make sure that when n_cuts is 1 we do # get an iterable cut_coords = coord_transform(**kwargs)[axis] return np.atleast_1d(cut_coords) def find_cut_slices(img, direction='z', n_cuts=7, spacing='auto'): """ Find 'good' cross-section slicing positions along a given axis. Parameters ---------- img: 3D Nifti1Image the brain map direction: string, optional (default "z") sectional direction; possible values are "x", "y", or "z" n_cuts: int, optional (default 7) number of cuts in the plot spacing: 'auto' or int, optional (default 'auto') minimum spacing between cuts (in voxels, not milimeters) if 'auto', the spacing is .5 / n_cuts * img_length Returns ------- cut_coords: 1D array of length n_cuts the computed cut_coords Notes ----- This code works by iteratively locating peak activations that are separated by a distance of at least 'spacing'. If n_cuts is very large and all the activated regions are covered, cuts with a spacing less than 'spacing' will be returned. """ # misc if not direction in 'xyz': raise ValueError( "'direction' must be one of 'x', 'y', or 'z'. Got '%s'" % ( direction)) axis = 'xyz'.index(direction) affine = img.get_affine() orig_data = np.abs(_safe_get_data(img)) this_shape = orig_data.shape[axis] if not isinstance(n_cuts, numbers.Number): raise ValueError("The number of cuts (n_cuts) must be an integer " "greater than or equal to 1. " "You provided a value of n_cuts=%s. " % n_cuts) # BF issue #575: Return all the slices along and axis if this axis # is the display mode and there are at least as many requested # n_slices as there are slices. if n_cuts > this_shape: warnings.warn('Too many cuts requested for the data: ' 'n_cuts=%i, data size=%i' % (n_cuts, this_shape)) return _transform_cut_coords(np.arange(this_shape), direction, affine) data = orig_data.copy() if data.dtype.kind == 'i': data = data.astype(np.float) data = _smooth_array(data, affine, fwhm='fast') # to control floating point error problems # during given input value "n_cuts" epsilon = np.finfo(np.float32).eps difference = abs(round(n_cuts) - n_cuts) if round(n_cuts) < 1. or difference > epsilon: message = ("Image has %d slices in direction %s. " "Therefore, the number of cuts must be between 1 and %d. " "You provided n_cuts=%s " % ( this_shape, direction, this_shape, n_cuts)) raise ValueError(message) else: n_cuts = int(round(n_cuts)) if spacing == 'auto': spacing = max(int(.5 / n_cuts * data.shape[axis]), 1) slices = [slice(None, None), slice(None, None), slice(None, None)] cut_coords = list() for _ in range(n_cuts): # Find a peak max_along_axis = np.unravel_index(np.abs(data).argmax(), data.shape)[axis] # cancel out the surroundings of the peak start = max(0, max_along_axis - spacing) stop = max_along_axis + spacing slices[axis] = slice(start, stop) # We don't actually fully zero the neighborhood, to avoid ending # up with fully zeros if n_cuts is too big: we can do multiple # passes on the data data[slices] *= 1.e-3 cut_coords.append(max_along_axis) # We sometimes get duplicated cuts, so we add cuts at the beginning # and the end cut_coords = np.unique(cut_coords).tolist() while len(cut_coords) < n_cuts: # Candidates for new cuts: slice_below = min(cut_coords) - 2 slice_above = max(cut_coords) + 2 candidates = [slice_above] # One slice where there is the biggest gap in the existing # cut_coords if len(cut_coords) > 1: middle_idx = np.argmax(np.diff(cut_coords)) slice_middle = int(.5 * (cut_coords[middle_idx] + cut_coords[middle_idx + 1])) if not slice_middle in cut_coords: candidates.append(slice_middle) if slice_below >= 0: # We need positive slice to avoid having negative # indices, which would work, but not the way we think of them candidates.append(slice_below) best_weight = -10 for candidate in candidates: if candidate >= this_shape: this_weight = 0 else: this_weight = np.sum(np.rollaxis(orig_data, axis)[candidate]) if this_weight > best_weight: best_candidate = candidate best_weight = this_weight cut_coords.append(best_candidate) cut_coords = np.unique(cut_coords).tolist() cut_coords = np.array(cut_coords) cut_coords.sort() return _transform_cut_coords(cut_coords, direction, affine) PKHrrnilearn/plotting/glass_brain.py""" Brain schematics plotting for glass brain functionality """ import json import os from matplotlib.path import Path from matplotlib import patches from matplotlib import colors from matplotlib import transforms def _codes_bezier(pts): bezier_num = len(pts) # Next two lines are meant to handle both Bezier 3 and 4 path_attr = 'CURVE{0}'.format(bezier_num) codes = [getattr(Path, path_attr)] * (bezier_num - 1) return [Path.MOVETO] + codes def _codes_segment(pts): return [Path.MOVETO, Path.LINETO] def _codes(atype, pts): dispatch = {'bezier': _codes_bezier, 'segment': _codes_segment} return dispatch[atype](pts) def _invert_color(color): """Return inverted color If color is (R, G, B) it returns (1 - R, 1 - G, 1 - B). If 'color' can not be converted to a color it is returned unmodified. """ try: color_converter = colors.ColorConverter() color_rgb = color_converter.to_rgb(color) return tuple(1 - level for level in color_rgb) except ValueError: return color def _get_mpl_patches(json_content, transform=None, invert_color=False, **kwargs): """Walks over the json content and builds a list of matplotlib patches """ mpl_patches = [] kwargs_edgecolor = kwargs.pop('edgecolor', None) kwargs_linewidth = kwargs.pop('linewidth', None) for path in json_content['paths']: if kwargs_edgecolor is not None: edgecolor = kwargs_edgecolor else: edgecolor = path['edgecolor'] if invert_color: edgecolor = _invert_color(edgecolor) linewidth = kwargs_linewidth or path['linewidth'] path_id = path['id'] for item in path['items']: type = item['type'] pts = item['pts'] codes = _codes(type, pts) path = Path(pts, codes) patch = patches.PathPatch(path, edgecolor=edgecolor, linewidth=linewidth, facecolor='none', gid=path_id, transform=transform, **kwargs) mpl_patches.append(patch) return mpl_patches def _get_json_and_transform(direction): """Returns the json filename and and an affine transform, which has been tweaked by hand to fit the MNI template """ direction_to_view_name = {'x': 'side', 'y': 'front', 'z': 'top', 'l': 'side', 'r': 'side'} direction_to_transform_params = { 'x': [0.38, 0, 0, 0.38, -108, -70], 'y': [0.39, 0, 0, 0.39, -72, -73], 'z': [0.36, 0, 0, 0.37, -71, -107], 'l': [0.38, 0, 0, 0.38, -108, -70], 'r': [0.38, 0, 0, 0.38, -108, -70]} dirname = os.path.dirname(os.path.abspath(__file__)) dirname = os.path.join(dirname, 'glass_brain_files') direction_to_filename = dict([ (_direction, os.path.join( dirname, 'brain_schematics_{0}.json'.format(view_name))) for _direction, view_name in direction_to_view_name.items()]) direction_to_transforms = dict([ (_direction, transforms.Affine2D.from_values(*params)) for _direction, params in direction_to_transform_params.items()]) direction_to_json_and_transform = dict([ (_direction, (direction_to_filename[_direction], direction_to_transforms[_direction])) for _direction in direction_to_filename]) filename_and_transform = direction_to_json_and_transform.get(direction) if filename_and_transform is None: message = ("No glass brain view associated with direction '{0}'. " "Possible directions are {1}").format( direction, list(direction_to_json_and_transform.keys())) raise ValueError(message) return filename_and_transform def _get_object_bounds(json_content, transform): xmin, xmax, ymin, ymax = json_content['metadata']['bounds'] x0, y0 = transform.transform((xmin, ymin)) x1, y1 = transform.transform((xmax, ymax)) xmin, xmax = min(x0, x1), max(x0, x1) ymin, ymax = min(y0, y1), max(y0, y1) # A combination of a proportional factor (fraction of the drawing) # and a guestimate of the linewidth xmargin = (xmax - xmin) * 0.025 + .1 ymargin = (ymax - ymin) * 0.025 + .1 return xmin - xmargin, xmax + xmargin, ymin - ymargin, ymax + ymargin def plot_brain_schematics(ax, direction, **kwargs): """Creates matplotlib patches from a json custom format and plots them on a matplotlib Axes. Parameters ---------- ax: a MPL axes instance The axes in which the plots will be drawn direction: {'x', 'y', 'z', 'l', 'r'} The directions of the view **kwargs: Passed to the matplotlib patches constructor Returns ------- object_bounds: (xmin, xmax, ymin, ymax) tuple Useful for the caller to be able to set axes limits """ black_bg = colors.colorConverter.to_rgba(ax.get_axis_bgcolor()) \ == colors.colorConverter.to_rgba('k') json_filename, transform = _get_json_and_transform(direction) with open(json_filename) as json_file: json_content = json.loads(json_file.read()) mpl_patches = _get_mpl_patches(json_content, transform=transform + ax.transData, invert_color=black_bg, **kwargs) for mpl_patch in mpl_patches: ax.add_patch(mpl_patch) object_bounds = _get_object_bounds(json_content, transform) return object_bounds PKlmHI]nilearn/plotting/edge_detect.py""" Edge detection routines: this file provides a Canny filter """ import numpy as np from scipy import ndimage, signal from .._utils.extmath import fast_abs_percentile # Author: Gael Varoquaux # License: BSD ################################################################################ # Edge detection def _orientation_kernel(t): """ structure elements for calculating the value of neighbors in several directions """ sin = np.sin pi = np.pi t = pi * t arr = np.array([[sin(t), sin(t + .5 * pi), sin(t + pi)], [sin(t + 1.5 * pi), 0, sin(t + 1.5 * pi)], [sin(t + pi), sin(t + .5 * pi), sin(t)]]) return np.round(.5 * ((1 + arr)) ** 2).astype(np.bool) def _edge_detect(image, high_threshold=.75, low_threshold=.4): """ Edge detection for 2D images based on Canny filtering. Parameters ---------- image: 2D array The image on which edge detection is applied high_threshold: float, optional The quantile defining the upper threshold of the hysteries thresholding: decrease this to keep more edges low_threshold: float, optional The quantile defining the lower threshold of the hysteries thresholding: decrease this to extract wider edges Returns -------- grad_mag: 2D array of floats The magnitude of the gradient edge_mask: 2D array of booleans A mask of where have edges been detected Notes ------ This function is based on a Canny filter, however it has been taylored to visualization purposes on brain images: don't use it in the general case. It computes the norm of the gradient, extracts the ridge by keeping only local maximum in each direction, and performs hysteresis filtering to keep only edges with high gradient magnitude. """ # This code is loosely based on code by Stefan van der Waalt # Convert to floats to avoid overflows np_err = np.seterr(all='ignore') # Replace NaNs by 0s to avoid meaningless outputs image = np.nan_to_num(image) img = signal.wiener(image.astype(np.float)) np.seterr(**np_err) # Where the noise variance is 0, Wiener can create nans img[np.isnan(img)] = image[np.isnan(img)] img /= img.max() grad_x = ndimage.sobel(img, mode='constant', axis=0) grad_y = ndimage.sobel(img, mode='constant', axis=1) grad_mag = np.sqrt(grad_x ** 2 + grad_y ** 2) grad_angle = np.arctan2(grad_y, grad_x) # Scale the angles in the range [0, 2] grad_angle = (grad_angle + np.pi) / np.pi # Non-maximal suppression: an edge pixel is only good if its magnitude is # greater than its neighbors normal to the edge direction. thinner = np.zeros(grad_mag.shape, dtype=np.bool) for angle in np.arange(0, 2, .25): thinner = thinner | ( (grad_mag > .85 * ndimage.maximum_filter( grad_mag, footprint=_orientation_kernel(angle))) & (((grad_angle - angle) % 2) < .75) ) # Remove the edges next to the side of the image: they are not reliable thinner[0] = 0 thinner[-1] = 0 thinner[:, 0] = 0 thinner[:, -1] = 0 thinned_grad = thinner * grad_mag # Hysteresis thresholding: find seeds above a high threshold, then # expand out until we go below the low threshold grad_values = thinned_grad[thinner] high = thinned_grad > fast_abs_percentile(grad_values, 100 * high_threshold) low = thinned_grad > fast_abs_percentile(grad_values, 100 * low_threshold) edge_mask = ndimage.binary_dilation( high, structure=np.ones((3, 3)), iterations=-1, mask=low) return grad_mag, edge_mask def _edge_map(image): """ Return a maps of edges suitable for visualization. Parameters ---------- image: 2D array The image that the edges are extracted from. Returns -------- edge_mask: 2D masked array A mask of the edge as a masked array with parts without edges masked and the large extents detected with lower coefficients. """ edge_mask = _edge_detect(image)[-1] edge_mask = edge_mask.astype(np.float) edge_mask = -np.sqrt(ndimage.distance_transform_cdt(edge_mask)) edge_mask[edge_mask != 0] -= -.05 + edge_mask.min() edge_mask = np.ma.masked_less(edge_mask, .01) return edge_mask PKpHΤ99nilearn/plotting/displays.py""" The Slicer classes. The main purpose of these classes is to have auto adjust of axes size to the data with different layout of cuts. """ import collections import numbers import numpy as np from scipy import sparse, stats from ..image import new_img_like from .. import _utils import matplotlib.pyplot as plt from matplotlib import transforms, colors from matplotlib.colorbar import ColorbarBase from matplotlib import cm as mpl_cm from matplotlib import lines # Local imports from . import glass_brain, cm from .find_cuts import find_xyz_cut_coords, find_cut_slices from .edge_detect import _edge_map from ..image.resampling import (get_bounds, reorder_img, coord_transform, get_mask_bounds) ############################################################################### # class BaseAxes ############################################################################### class BaseAxes(object): """ An MPL axis-like object that displays a 2D view of 3D volumes """ def __init__(self, ax, direction, coord): """ An MPL axis-like object that displays a cut of 3D volumes Parameters ---------- ax: a MPL axes instance The axes in which the plots will be drawn direction: {'x', 'y', 'z'} The directions of the view coord: float The coordinate along the direction of the cut """ self.ax = ax self.direction = direction self.coord = coord self._object_bounds = list() def transform_to_2d(self, data, affine): raise NotImplementedError("'transform_to_2d' needs to be implemented " "in derived classes'") def add_object_bounds(self, bounds): """Ensures that axes get rescaled when adding object bounds """ old_object_bounds = self.get_object_bounds() self._object_bounds.append(bounds) new_object_bounds = self.get_object_bounds() if new_object_bounds != old_object_bounds: self.ax.axis(self.get_object_bounds()) def draw_2d(self, data_2d, data_bounds, bounding_box, type='imshow', **kwargs): # kwargs messaging kwargs['origin'] = 'upper' if self.direction == 'y': (xmin, xmax), (_, _), (zmin, zmax) = data_bounds (xmin_, xmax_), (_, _), (zmin_, zmax_) = bounding_box elif self.direction in 'xlr': (_, _), (xmin, xmax), (zmin, zmax) = data_bounds (_, _), (xmin_, xmax_), (zmin_, zmax_) = bounding_box elif self.direction == 'z': (xmin, xmax), (zmin, zmax), (_, _) = data_bounds (xmin_, xmax_), (zmin_, zmax_), (_, _) = bounding_box else: raise ValueError('Invalid value for direction %s' % self.direction) ax = self.ax # Here we need to do a copy to avoid having the image changing as # we change the data im = getattr(ax, type)(data_2d.copy(), extent=(xmin, xmax, zmin, zmax), **kwargs) self.add_object_bounds((xmin_, xmax_, zmin_, zmax_)) return im def get_object_bounds(self): """ Return the bounds of the objects on this axes. """ if len(self._object_bounds) == 0: # Nothing plotted yet return -.01, .01, -.01, .01 xmins, xmaxs, ymins, ymaxs = np.array(self._object_bounds).T xmax = max(xmaxs.max(), xmins.max()) xmin = min(xmins.min(), xmaxs.min()) ymax = max(ymaxs.max(), ymins.max()) ymin = min(ymins.min(), ymaxs.min()) return xmin, xmax, ymin, ymax def draw_left_right(self, size, bg_color, **kwargs): if self.direction in 'xlr': return ax = self.ax ax.text(.1, .95, 'L', transform=ax.transAxes, horizontalalignment='left', verticalalignment='top', size=size, bbox=dict(boxstyle="square,pad=0", ec=bg_color, fc=bg_color, alpha=1), **kwargs) ax.text(.9, .95, 'R', transform=ax.transAxes, horizontalalignment='right', verticalalignment='top', size=size, bbox=dict(boxstyle="square,pad=0", ec=bg_color, fc=bg_color), **kwargs) def draw_position(self, size, bg_color, **kwargs): raise NotImplementedError("'draw_position' should be implemented " "in derived classes") ############################################################################### # class CutAxes ############################################################################### class CutAxes(BaseAxes): """ An MPL axis-like object that displays a cut of 3D volumes """ def transform_to_2d(self, data, affine): """ Cut the 3D volume into a 2D slice Parameters ---------- data: 3D ndarray The 3D volume to cut affine: 4x4 ndarray The affine of the volume """ coords = [0, 0, 0] coords['xyz'.index(self.direction)] = self.coord x_map, y_map, z_map = [int(np.round(c)) for c in coord_transform(coords[0], coords[1], coords[2], np.linalg.inv(affine))] if self.direction == 'y': cut = np.rot90(data[:, y_map, :]) elif self.direction == 'x': cut = np.rot90(data[x_map, :, :]) elif self.direction == 'z': cut = np.rot90(data[:, :, z_map]) else: raise ValueError('Invalid value for direction %s' % self.direction) return cut def draw_position(self, size, bg_color, **kwargs): ax = self.ax ax.text(0, 0, '%s=%i' % (self.direction, self.coord), transform=ax.transAxes, horizontalalignment='left', verticalalignment='bottom', size=size, bbox=dict(boxstyle="square,pad=0", ec=bg_color, fc=bg_color, alpha=1), **kwargs) def _get_index_from_direction(direction): """Returns numerical index from direction """ directions = ['x', 'y', 'z'] try: # l and r are subcases of x if direction in 'lr': index = 0 else: index = directions.index(direction) except ValueError: message = ( '{0} is not a valid direction. ' "Allowed values are 'l', 'r', 'x', 'y' and 'z'").format(direction) raise ValueError(message) return index def _coords_3d_to_2d(coords_3d, direction, return_direction=False): """Project 3d coordinates into 2d ones given the direction of a cut """ index = _get_index_from_direction(direction) dimensions = [0, 1, 2] dimensions.pop(index) if return_direction: return coords_3d[:, dimensions], coords_3d[:, index] return coords_3d[:, dimensions] ############################################################################### # class GlassBrainAxes ############################################################################### class GlassBrainAxes(BaseAxes): """An MPL axis-like object that displays a 2D projection of 3D volumes with a schematic view of the brain. """ def __init__(self, ax, direction, coord, plot_abs=True, **kwargs): super(GlassBrainAxes, self).__init__(ax, direction, coord) self._plot_abs = plot_abs if ax is not None: object_bounds = glass_brain.plot_brain_schematics(ax, direction, **kwargs) self.add_object_bounds(object_bounds) def transform_to_2d(self, data, affine): """ Returns the maximum of the absolute value of the 3D volume along an axis. Parameters ---------- data: 3D ndarray The 3D volume affine: 4x4 ndarray The affine of the volume """ if self.direction in 'xlr': max_axis = 0 else: max_axis = '.yz'.index(self.direction) # set unselected brain hemisphere activations to 0 if self.direction == 'l': x_center, _, _, _ = np.dot(np.linalg.inv(affine), np.array([0, 0, 0, 1])) data_selection = data[int(x_center):, :, :] elif self.direction == 'r': x_center, _, _, _ = np.dot(np.linalg.inv(affine), np.array([0, 0, 0, 1])) data_selection = data[:int(x_center), :, :] else: data_selection = data # We need to make sure data_selection is not empty in the x axis # This should be the case since we expect images in MNI space if data_selection.shape[0] == 0: data_selection = data if not self._plot_abs: # get the shape of the array we are projecting to new_shape = list(data.shape) del new_shape[max_axis] # generate a 3D indexing array that points to max abs value in the # current projection a1, a2 = np.indices(new_shape) inds = [a1, a2] inds.insert(max_axis, np.abs(data_selection).argmax(axis=max_axis)) # take the values where the absolute value of the projection # is the highest maximum_intensity_data = data_selection[inds] else: maximum_intensity_data = np.abs(data_selection).max(axis=max_axis) return np.rot90(maximum_intensity_data) def draw_position(self, size, bg_color, **kwargs): # It does not make sense to draw crosses for the position of # the cuts since we are taking the max along one axis pass def _add_markers(self, marker_coords, marker_color, marker_size, **kwargs): """Plot markers In the case of 'l' and 'r' directions (for hemispheric projections), markers in the coordinate x == 0 are included in both hemispheres. """ marker_coords_2d = _coords_3d_to_2d(marker_coords, self.direction) xdata, ydata = marker_coords_2d.T # Allow markers only in their respective hemisphere when appropriate if self.direction in 'lr': relevant_coords = [] xcoords, ycoords, zcoords = marker_coords.T for cidx, xc in enumerate(xcoords): if self.direction == 'r' and xc >= 0: relevant_coords.append(cidx) elif self.direction == 'l' and xc <= 0: relevant_coords.append(cidx) xdata = xdata[relevant_coords] ydata = ydata[relevant_coords] marker_color = marker_color[relevant_coords] defaults = {'marker': 'o', 'zorder': 1000} for k, v in defaults.items(): kwargs.setdefault(k, v) self.ax.scatter(xdata, ydata, s=marker_size, c=marker_color, **kwargs) def _add_lines(self, line_coords, line_values, cmap, vmin=None, vmax=None, **kwargs): """Plot lines Parameters ---------- line_coords: list of numpy arrays of shape (2, 3) 3d coordinates of lines start points and end points. line_values: array_like values of the lines. cmap: colormap colormap used to map line_values to a color. vmin: float, optional, default: None vmax: float, optional, default: None If not None, either or both of these values will be used to as the minimum and maximum values to color lines. If None are supplied the maximum absolute value within the given threshold will be used as minimum (multiplied by -1) and maximum coloring levels. kwargs: dict additional arguments to pass to matplotlib Line2D. """ # colormap for colorbar self.cmap = cmap if vmin is None and vmax is None: abs_line_values_max = np.abs(line_values).max() vmin = -abs_line_values_max vmax = abs_line_values_max elif vmin is None: if vmax > 0: vmin = -vmax else: raise ValueError( "If vmax is set to a non-positive number " "then vmin needs to be specified" ) elif vmax is None: if vmin < 0: vmin = -vmax else: raise ValueError( "If vmin is set to a non-negative number " "then vmax needs to be specified" ) norm = colors.Normalize(vmin=vmin, vmax=vmax) # normalization useful for colorbar self.norm = norm abs_norm = colors.Normalize(vmin=0, vmax=vmax) value_to_color = plt.cm.ScalarMappable(norm=norm, cmap=cmap).to_rgba # Allow lines only in their respective hemisphere when appropriate if self.direction in 'lr': relevant_lines = [] for lidx, line in enumerate(line_coords): if self.direction == 'r': if line[0, 0] >= 0 and line[1, 0] >= 0: relevant_lines.append(lidx) elif self.direction == 'l': if line[0, 0] < 0 and line[1, 0] < 0: relevant_lines.append(lidx) line_coords = np.array(line_coords)[relevant_lines] line_values = line_values[relevant_lines] for start_end_point_3d, line_value in zip( line_coords, line_values): start_end_point_2d = _coords_3d_to_2d(start_end_point_3d, self.direction) color = value_to_color(line_value) abs_line_value = abs(line_value) linewidth = 1 + 2 * abs_norm(abs_line_value) # Hacky way to put the strongest connections on top of the weakest # note sign does not matter hence using 'abs' zorder = 10 + 10 * abs_norm(abs_line_value) this_kwargs = {'color': color, 'linewidth': linewidth, 'zorder': zorder} # kwargs should have priority over this_kwargs so that the # user can override the default logic this_kwargs.update(kwargs) xdata, ydata = start_end_point_2d.T line = lines.Line2D(xdata, ydata, **this_kwargs) self.ax.add_line(line) ############################################################################### # class BaseSlicer ############################################################################### class BaseSlicer(object): """ The main purpose of these class is to have auto adjust of axes size to the data with different layout of cuts. """ # This actually encodes the figsize for only one axe _default_figsize = [2.2, 2.6] _axes_class = CutAxes def __init__(self, cut_coords, axes=None, black_bg=False, **kwargs): """ Create 3 linked axes for plotting orthogonal cuts. Parameters ---------- cut_coords: 3 tuple of ints The cut position, in world space. axes: matplotlib axes object, optional The axes that will be subdivided in 3. black_bg: boolean, optional If True, the background of the figure will be put to black. If you wish to save figures with a black background, you will need to pass "facecolor='k', edgecolor='k'" to matplotlib.pyplot.savefig. """ self.cut_coords = cut_coords if axes is None: axes = plt.axes((0., 0., 1., 1.)) axes.axis('off') self.frame_axes = axes axes.set_zorder(1) bb = axes.get_position() self.rect = (bb.x0, bb.y0, bb.x1, bb.y1) self._black_bg = black_bg self._colorbar = False self._colorbar_width = 0.05 * bb.width self._colorbar_margin = dict(left=0.25 * bb.width, right=0.02 * bb.width, top=0.05 * bb.height, bottom=0.05 * bb.height) self._init_axes(**kwargs) @staticmethod def find_cut_coords(img=None, threshold=None, cut_coords=None): # Implement this as a staticmethod or a classmethod when # subclassing raise NotImplementedError @classmethod def init_with_figure(cls, img, threshold=None, cut_coords=None, figure=None, axes=None, black_bg=False, leave_space=False, colorbar=False, **kwargs): "Initialize the slicer with an image" # deal with "fake" 4D images if img is not None and img is not False: img = _utils.check_niimg_3d(img) cut_coords = cls.find_cut_coords(img, threshold, cut_coords) if isinstance(axes, plt.Axes) and figure is None: figure = axes.figure if not isinstance(figure, plt.Figure): # Make sure that we have a figure figsize = cls._default_figsize[:] # Adjust for the number of axes figsize[0] *= len(cut_coords) # Make space for the colorbar if colorbar: figsize[0] += .7 facecolor = 'k' if black_bg else 'w' if leave_space: figsize[0] += 3.4 figure = plt.figure(figure, figsize=figsize, facecolor=facecolor) if isinstance(axes, plt.Axes): assert axes.figure is figure, ("The axes passed are not " "in the figure") if axes is None: axes = [0., 0., 1., 1.] if leave_space: axes = [0.3, 0, .7, 1.] if isinstance(axes, collections.Sequence): axes = figure.add_axes(axes) # People forget to turn their axis off, or to set the zorder, and # then they cannot see their slicer axes.axis('off') return cls(cut_coords, axes, black_bg, **kwargs) def title(self, text, x=0.01, y=0.99, size=15, color=None, bgcolor=None, alpha=1, **kwargs): """ Write a title to the view. Parameters ---------- text: string The text of the title x: float, optional The horizontal position of the title on the frame in fraction of the frame width. y: float, optional The vertical position of the title on the frame in fraction of the frame height. size: integer, optional The size of the title text. color: matplotlib color specifier, optional The color of the font of the title. bgcolor: matplotlib color specifier, optional The color of the background of the title. alpha: float, optional The alpha value for the background. kwargs: Extra keyword arguments are passed to matplotlib's text function. """ if color is None: color = 'k' if self._black_bg else 'w' if bgcolor is None: bgcolor = 'w' if self._black_bg else 'k' if hasattr(self, '_cut_displayed'): first_axe = self._cut_displayed[0] else: first_axe = self.cut_coords[0] ax = self.axes[first_axe].ax ax.text(x, y, text, transform=self.frame_axes.transAxes, horizontalalignment='left', verticalalignment='top', size=size, color=color, bbox=dict(boxstyle="square,pad=.3", ec=bgcolor, fc=bgcolor, alpha=alpha), zorder=1000, **kwargs) ax.set_zorder(1000) def add_overlay(self, img, threshold=1e-6, colorbar=False, **kwargs): """ Plot a 3D map in all the views. Parameters ----------- img: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. If it is a masked array, only the non-masked part will be plotted. threshold : a number, None If None is given, the maps are not thresholded. If a number is given, it is used to threshold the maps: values below the threshold (in absolute value) are plotted as transparent. colorbar: boolean, optional If True, display a colorbar on the right of the plots. kwargs: Extra keyword arguments are passed to imshow. """ if colorbar and self._colorbar: raise ValueError("This figure already has an overlay with a " "colorbar.") else: self._colorbar = colorbar img = _utils.check_niimg_3d(img) # Make sure that add_overlay shows consistent default behavior # with plot_stat_map kwargs.setdefault('interpolation', 'nearest') ims = self._map_show(img, type='imshow', threshold=threshold, **kwargs) if colorbar: self._show_colorbar(ims[0].cmap, ims[0].norm, threshold) plt.draw_if_interactive() def add_contours(self, img, filled=False, **kwargs): """ Contour a 3D map in all the views. Parameters ----------- img: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. Provides image to plot. filled: boolean, optional If filled=True, contours are displayed with color fillings. kwargs: Extra keyword arguments are passed to contour, see the documentation of pylab.contour Useful, arguments are typical "levels", which is a list of values to use for plotting a contour, and "colors", which is one color or a list of colors for these contours. """ self._map_show(img, type='contour', **kwargs) if filled: colors = kwargs['colors'] levels = kwargs['levels'] if len(levels) <= 1: # contour fillings levels should be given as (lower, upper). levels.append(np.inf) alpha = kwargs['alpha'] self._map_show(img, type='contourf', levels=levels, alpha=alpha, colors=colors[:3]) plt.draw_if_interactive() def _map_show(self, img, type='imshow', resampling_interpolation='continuous', threshold=None, **kwargs): img = reorder_img(img, resample=resampling_interpolation) threshold = float(threshold) if threshold is not None else None if threshold is not None: data = img.get_data() if threshold == 0: data = np.ma.masked_equal(data, 0, copy=False) else: data = np.ma.masked_inside(data, -threshold, threshold, copy=False) img = new_img_like(img, data, img.get_affine()) affine = img.get_affine() data = img.get_data() data_bounds = get_bounds(data.shape, affine) (xmin, xmax), (ymin, ymax), (zmin, zmax) = data_bounds xmin_, xmax_, ymin_, ymax_, zmin_, zmax_ = \ xmin, xmax, ymin, ymax, zmin, zmax if hasattr(data, 'mask') and isinstance(data.mask, np.ndarray): not_mask = np.logical_not(data.mask) xmin_, xmax_, ymin_, ymax_, zmin_, zmax_ = \ get_mask_bounds(new_img_like(img, not_mask, affine)) data_2d_list = [] for display_ax in self.axes.values(): try: data_2d = display_ax.transform_to_2d(data, affine) # To obtain the brain left view, we simply invert the x axis if display_ax.direction == 'l': display_ax.ax.invert_xaxis() except IndexError: # We are cutting outside the indices of the data data_2d = None data_2d_list.append(data_2d) if kwargs.get('vmin') is None: kwargs['vmin'] = np.ma.min([d.min() for d in data_2d_list if d is not None]) if kwargs.get('vmax') is None: kwargs['vmax'] = np.ma.max([d.max() for d in data_2d_list if d is not None]) bounding_box = (xmin_, xmax_), (ymin_, ymax_), (zmin_, zmax_) ims = [] to_iterate_over = zip(self.axes.values(), data_2d_list) for display_ax, data_2d in to_iterate_over: if data_2d is not None: im = display_ax.draw_2d(data_2d, data_bounds, bounding_box, type=type, **kwargs) ims.append(im) return ims def _show_colorbar(self, cmap, norm, threshold=None): """ Parameters ---------- cmap: a matplotlib colormap The colormap used norm: a matplotlib.colors.Normalize object This object is typically found as the 'norm' attribute of an matplotlib.image.AxesImage threshold: float or None The absolute value at which the colorbar is thresholded """ if threshold is None: offset = 0 else: offset = threshold if offset > norm.vmax: offset = norm.vmax # create new axis for the colorbar figure = self.frame_axes.figure _, y0, x1, y1 = self.rect height = y1 - y0 x_adjusted_width = self._colorbar_width / len(self.axes) x_adjusted_margin = self._colorbar_margin['right'] / len(self.axes) lt_wid_top_ht = [x1 - (x_adjusted_width + x_adjusted_margin), y0 + self._colorbar_margin['top'], x_adjusted_width, height - (self._colorbar_margin['top'] + self._colorbar_margin['bottom'])] self._colorbar_ax = figure.add_axes(lt_wid_top_ht, axis_bgcolor='w') our_cmap = mpl_cm.get_cmap(cmap) # edge case where the data has a single value # yields a cryptic matplotlib error message # when trying to plot the color bar nb_ticks = 5 if norm.vmin != norm.vmax else 1 ticks = np.linspace(norm.vmin, norm.vmax, nb_ticks) bounds = np.linspace(norm.vmin, norm.vmax, our_cmap.N) # some colormap hacking cmaplist = [our_cmap(i) for i in range(our_cmap.N)] istart = int(norm(-offset, clip=True) * (our_cmap.N - 1)) istop = int(norm(offset, clip=True) * (our_cmap.N - 1)) for i in range(istart, istop): cmaplist[i] = (0.5, 0.5, 0.5, 1.) # just an average gray color if norm.vmin == norm.vmax: # len(np.unique(data)) == 1 ? return else: our_cmap = our_cmap.from_list('Custom cmap', cmaplist, our_cmap.N) self._cbar = ColorbarBase( self._colorbar_ax, ticks=ticks, norm=norm, orientation='vertical', cmap=our_cmap, boundaries=bounds, spacing='proportional', format='%.2g') self._colorbar_ax.yaxis.tick_left() tick_color = 'w' if self._black_bg else 'k' for tick in self._colorbar_ax.yaxis.get_ticklabels(): tick.set_color(tick_color) self._colorbar_ax.yaxis.set_tick_params(width=0) def add_edges(self, img, color='r'): """ Plot the edges of a 3D map in all the views. Parameters ---------- map: 3D ndarray The 3D map to be plotted. If it is a masked array, only the non-masked part will be plotted. affine: 4x4 ndarray The affine matrix giving the transformation from voxel indices to world space. color: matplotlib color: string or (r, g, b) value The color used to display the edge map """ img = reorder_img(img, resample='continuous') data = img.get_data() affine = img.get_affine() single_color_cmap = colors.ListedColormap([color]) data_bounds = get_bounds(data.shape, img.get_affine()) # For each ax, cut the data and plot it for display_ax in self.axes.values(): try: data_2d = display_ax.transform_to_2d(data, affine) edge_mask = _edge_map(data_2d) except IndexError: # We are cutting outside the indices of the data continue display_ax.draw_2d(edge_mask, data_bounds, data_bounds, type='imshow', cmap=single_color_cmap) plt.draw_if_interactive() def add_markers(self, marker_coords, marker_color='r', marker_size=30, **kwargs): """Add markers to the plot. Parameters ---------- marker_coords: array of size (n_markers, 3) Coordinates of the markers to plot. For each slice, only markers that are 2 millimeters away from the slice are plotted. marker_color: pyplot compatible color or list of shape (n_markers,) List of colors for each marker that can be string or matplotlib colors marker_size: single float or list of shape (n_markers,) Size in pixel for each marker """ defaults = {'marker': 'o', 'zorder': 1000} marker_coords = np.asanyarray(marker_coords) for k, v in defaults.items(): kwargs.setdefault(k, v) for display_ax in self.axes.values(): direction = display_ax.direction coord = display_ax.coord marker_coords_2d, third_d = _coords_3d_to_2d( marker_coords, direction, return_direction=True) # Heuristic that plots only markers that are 2mm away from the # current slice. # XXX: should we keep this heuristic? mask = np.abs(third_d - coord) <= 2. xdata, ydata = marker_coords_2d.T display_ax.ax.scatter(xdata[mask], ydata[mask], s=marker_size, c=marker_color, **kwargs) def annotate(self, left_right=True, positions=True, size=12, **kwargs): """ Add annotations to the plot. Parameters ---------- left_right: boolean, optional If left_right is True, annotations indicating which side is left and which side is right are drawn. positions: boolean, optional If positions is True, annotations indicating the positions of the cuts are drawn. size: integer, optional The size of the text used. kwargs: Extra keyword arguments are passed to matplotlib's text function. """ kwargs = kwargs.copy() if 'color' not in kwargs: if self._black_bg: kwargs['color'] = 'w' else: kwargs['color'] = 'k' bg_color = ('k' if self._black_bg else 'w') if left_right: for display_ax in self.axes.values(): display_ax.draw_left_right(size=size, bg_color=bg_color, **kwargs) if positions: for display_ax in self.axes.values(): display_ax.draw_position(size=size, bg_color=bg_color, **kwargs) def close(self): """ Close the figure. This is necessary to avoid leaking memory. """ plt.close(self.frame_axes.figure.number) def savefig(self, filename, dpi=None): """ Save the figure to a file Parameters ---------- filename: string The file name to save to. It's extension determines the file type, typically '.png', '.svg' or '.pdf'. dpi: None or scalar The resolution in dots per inch. """ facecolor = edgecolor = 'k' if self._black_bg else 'w' self.frame_axes.figure.savefig(filename, dpi=dpi, facecolor=facecolor, edgecolor=edgecolor) ############################################################################### # class OrthoSlicer ############################################################################### class OrthoSlicer(BaseSlicer): """ A class to create 3 linked axes for plotting orthogonal cuts of 3D maps. Attributes ---------- axes: dictionnary of axes The 3 axes used to plot each view. frame_axes: axes The axes framing the whole set of views. Notes ----- The extent of the different axes are adjusted to fit the data best in the viewing area. """ _cut_displayed = 'yxz' _axes_class = CutAxes @classmethod def find_cut_coords(self, img=None, threshold=None, cut_coords=None): "Instanciate the slicer and find cut coordinates" if cut_coords is None: if img is None or img is False: cut_coords = (0, 0, 0) else: cut_coords = find_xyz_cut_coords( img, activation_threshold=threshold) cut_coords = [cut_coords['xyz'.find(c)] for c in sorted(self._cut_displayed)] return cut_coords def _init_axes(self, **kwargs): cut_coords = self.cut_coords if len(cut_coords) != len(self._cut_displayed): raise ValueError('The number cut_coords passed does not' 'match the display_mode') x0, y0, x1, y1 = self.rect axisbg = 'k' if self._black_bg else 'w' # Create our axes: self.axes = dict() for index, direction in enumerate(self._cut_displayed): fh = self.frame_axes.get_figure() ax = fh.add_axes([0.3 * index * (x1 - x0) + x0, y0, .3 * (x1 - x0), y1 - y0], axisbg=axisbg, aspect='equal') ax.axis('off') coord = self.cut_coords[ sorted(self._cut_displayed).index(direction)] display_ax = self._axes_class(ax, direction, coord, **kwargs) self.axes[direction] = display_ax ax.set_axes_locator(self._locator) if self._black_bg: for ax in self.axes.values(): ax.ax.imshow(np.zeros((2, 2, 3)), extent=[-5000, 5000, -5000, 5000], zorder=-500, aspect='equal') # To have a black background in PDF, we need to create a # patch in black for the background self.frame_axes.imshow(np.zeros((2, 2, 3)), extent=[-5000, 5000, -5000, 5000], zorder=-500, aspect='auto') self.frame_axes.set_zorder(-1000) def _locator(self, axes, renderer): """ The locator function used by matplotlib to position axes. Here we put the logic used to adjust the size of the axes. """ x0, y0, x1, y1 = self.rect width_dict = dict() # A dummy axes, for the situation in which we are not plotting # all three (x, y, z) cuts dummy_ax = self._axes_class(None, None, None) width_dict[dummy_ax.ax] = 0 display_ax_dict = self.axes if self._colorbar: adjusted_width = self._colorbar_width / len(self.axes) right_margin = self._colorbar_margin['right'] / len(self.axes) ticks_margin = self._colorbar_margin['left'] / len(self.axes) x1 = x1 - (adjusted_width + ticks_margin + right_margin) for display_ax in display_ax_dict.values(): bounds = display_ax.get_object_bounds() if not bounds: # This happens if the call to _map_show was not # succesful. As it happens asyncroniously (during a # refresh of the figure) we capture the problem and # ignore it: it only adds a non informative traceback bounds = [0, 1, 0, 1] xmin, xmax, ymin, ymax = bounds width_dict[display_ax.ax] = (xmax - xmin) total_width = float(sum(width_dict.values())) for ax, width in width_dict.items(): width_dict[ax] = width / total_width * (x1 - x0) direction_ax = [] for d in self._cut_displayed: direction_ax.append(display_ax_dict.get(d, dummy_ax).ax) left_dict = dict() for idx, ax in enumerate(direction_ax): left_dict[ax] = x0 for prev_ax in direction_ax[:idx]: left_dict[ax] += width_dict[prev_ax] return transforms.Bbox([[left_dict[axes], y0], [left_dict[axes] + width_dict[axes], y1]]) def draw_cross(self, cut_coords=None, **kwargs): """ Draw a crossbar on the plot to show where the cut is performed. Parameters ---------- cut_coords: 3-tuple of floats, optional The position of the cross to draw. If none is passed, the ortho_slicer's cut coordinates are used. kwargs: Extra keyword arguments are passed to axhline """ if cut_coords is None: cut_coords = self.cut_coords coords = dict() for direction in 'xyz': coord = None if direction in self._cut_displayed: coord = cut_coords[ sorted(self._cut_displayed).index(direction)] coords[direction] = coord x, y, z = coords['x'], coords['y'], coords['z'] kwargs = kwargs.copy() if 'color' not in kwargs: if self._black_bg: kwargs['color'] = '.8' else: kwargs['color'] = 'k' if 'y' in self.axes: ax = self.axes['y'].ax if x is not None: ax.axvline(x, ymin=.05, ymax=.95, **kwargs) if z is not None: ax.axhline(z, **kwargs) if 'x' in self.axes: ax = self.axes['x'].ax if y is not None: ax.axvline(y, ymin=.05, ymax=.95, **kwargs) if z is not None: ax.axhline(z, xmax=.95, **kwargs) if 'z' in self.axes: ax = self.axes['z'].ax if x is not None: ax.axvline(x, ymin=.05, ymax=.95, **kwargs) if y is not None: ax.axhline(y, **kwargs) ############################################################################### # class BaseStackedSlicer ############################################################################### class BaseStackedSlicer(BaseSlicer): """ A class to create linked axes for plotting stacked cuts of 2D maps. Attributes ---------- axes: dictionnary of axes The axes used to plot each view. frame_axes: axes The axes framing the whole set of views. Notes ----- The extent of the different axes are adjusted to fit the data best in the viewing area. """ @classmethod def find_cut_coords(cls, img=None, threshold=None, cut_coords=None): "Instanciate the slicer and find cut coordinates" if cut_coords is None: cut_coords = 7 if img is None or img is False: bounds = ((-40, 40), (-30, 30), (-30, 75)) lower, upper = bounds['xyz'.index(cls._direction)] cut_coords = np.linspace(lower, upper, cut_coords).tolist() else: if (not isinstance(cut_coords, collections.Sequence) and isinstance(cut_coords, numbers.Number)): cut_coords = find_cut_slices(img, direction=cls._direction, n_cuts=cut_coords) return cut_coords def _init_axes(self, **kwargs): x0, y0, x1, y1 = self.rect # Create our axes: self.axes = dict() fraction = 1. / len(self.cut_coords) for index, coord in enumerate(self.cut_coords): coord = float(coord) fh = self.frame_axes.get_figure() ax = fh.add_axes([fraction * index * (x1 - x0) + x0, y0, fraction * (x1 - x0), y1 - y0]) ax.axis('off') display_ax = self._axes_class(ax, self._direction, coord, **kwargs) self.axes[coord] = display_ax ax.set_axes_locator(self._locator) if self._black_bg: for ax in self.axes.values(): ax.ax.imshow(np.zeros((2, 2, 3)), extent=[-5000, 5000, -5000, 5000], zorder=-500, aspect='equal') # To have a black background in PDF, we need to create a # patch in black for the background self.frame_axes.imshow(np.zeros((2, 2, 3)), extent=[-5000, 5000, -5000, 5000], zorder=-500, aspect='auto') self.frame_axes.set_zorder(-1000) def _locator(self, axes, renderer): """ The locator function used by matplotlib to position axes. Here we put the logic used to adjust the size of the axes. """ x0, y0, x1, y1 = self.rect width_dict = dict() display_ax_dict = self.axes if self._colorbar: adjusted_width = self._colorbar_width / len(self.axes) right_margin = self._colorbar_margin['right'] / len(self.axes) ticks_margin = self._colorbar_margin['left'] / len(self.axes) x1 = x1 - (adjusted_width + right_margin + ticks_margin) for display_ax in display_ax_dict.values(): bounds = display_ax.get_object_bounds() if not bounds: # This happens if the call to _map_show was not # succesful. As it happens asyncroniously (during a # refresh of the figure) we capture the problem and # ignore it: it only adds a non informative traceback bounds = [0, 1, 0, 1] xmin, xmax, ymin, ymax = bounds width_dict[display_ax.ax] = (xmax - xmin) total_width = float(sum(width_dict.values())) for ax, width in width_dict.items(): width_dict[ax] = width / total_width * (x1 - x0) left_dict = dict() left = float(x0) for coord, display_ax in sorted(display_ax_dict.items()): left_dict[display_ax.ax] = left this_width = width_dict[display_ax.ax] left += this_width return transforms.Bbox([[left_dict[axes], y0], [left_dict[axes] + width_dict[axes], y1]]) def draw_cross(self, cut_coords=None, **kwargs): """ Draw a crossbar on the plot to show where the cut is performed. Parameters ---------- cut_coords: 3-tuple of floats, optional The position of the cross to draw. If none is passed, the ortho_slicer's cut coordinates are used. kwargs: Extra keyword arguments are passed to axhline """ return class XSlicer(BaseStackedSlicer): _direction = 'x' _default_figsize = [2.6, 2.3] class YSlicer(BaseStackedSlicer): _direction = 'y' _default_figsize = [2.2, 2.3] class ZSlicer(BaseStackedSlicer): _direction = 'z' _default_figsize = [2.2, 2.3] class XZSlicer(OrthoSlicer): _cut_displayed = 'xz' class YXSlicer(OrthoSlicer): _cut_displayed = 'yx' class YZSlicer(OrthoSlicer): _cut_displayed = 'yz' SLICERS = dict(ortho=OrthoSlicer, xz=XZSlicer, yz=YZSlicer, yx=YXSlicer, x=XSlicer, y=YSlicer, z=ZSlicer) class OrthoProjector(OrthoSlicer): """A class to create linked axes for plotting orthogonal projections of 3D maps. """ _axes_class = GlassBrainAxes @classmethod def find_cut_coords(cls, img=None, threshold=None, cut_coords=None): return (None, ) * len(cls._cut_displayed) def draw_cross(self, cut_coords=None, **kwargs): # It does not make sense to draw crosses for the position of # the cuts since we are taking the max along one axis pass def add_graph(self, adjacency_matrix, node_coords, node_color='auto', node_size=50, edge_cmap=cm.bwr, edge_vmin=None, edge_vmax=None, edge_threshold=None, edge_kwargs=None, node_kwargs=None, colorbar=False, ): """Plot undirected graph on each of the axes Parameters ---------- adjacency_matrix: numpy array of shape (n, n) represents the edges strengths of the graph. Assumed to be a symmetric matrix. node_coords: numpy array_like of shape (n, 3) 3d coordinates of the graph nodes in world space. node_color: color or sequence of colors color(s) of the nodes. node_size: scalar or array_like size(s) of the nodes in points^2. edge_cmap: colormap colormap used for representing the strength of the edges. edge_vmin: float, optional, default: None edge_vmax: float, optional, default: None If not None, either or both of these values will be used to as the minimum and maximum values to color edges. If None are supplied the maximum absolute value within the given threshold will be used as minimum (multiplied by -1) and maximum coloring levels. edge_threshold: str or number If it is a number only the edges with a value greater than edge_threshold will be shown. If it is a string it must finish with a percent sign, e.g. "25.3%", and only the edges with a abs(value) above the given percentile will be shown. edge_kwargs: dict will be passed as kwargs for each edge matlotlib Line2D. node_kwargs: dict will be passed as kwargs to the plt.scatter call that plots all the nodes in one go. """ # set defaults if edge_kwargs is None: edge_kwargs = {} if node_kwargs is None: node_kwargs = {} if node_color == 'auto': nb_nodes = len(node_coords) node_color = mpl_cm.Set2(np.linspace(0, 1, nb_nodes)) node_coords = np.asarray(node_coords) # decompress input matrix if sparse if sparse.issparse(adjacency_matrix): adjacency_matrix = adjacency_matrix.toarray() # make the lines below well-behaved adjacency_matrix = np.nan_to_num(adjacency_matrix) # safety checks if 's' in node_kwargs: raise ValueError("Please use 'node_size' and not 'node_kwargs' " "to specify node sizes") if 'c' in node_kwargs: raise ValueError("Please use 'node_color' and not 'node_kwargs' " "to specify node colors") adjacency_matrix_shape = adjacency_matrix.shape if (len(adjacency_matrix_shape) != 2 or adjacency_matrix_shape[0] != adjacency_matrix_shape[1]): raise ValueError( "'adjacency_matrix' is supposed to have shape (n, n)." ' Its shape was {0}'.format(adjacency_matrix_shape)) node_coords_shape = node_coords.shape if len(node_coords_shape) != 2 or node_coords_shape[1] != 3: message = ( "Invalid shape for 'node_coords'. You passed an " "'adjacency_matrix' of shape {0} therefore " "'node_coords' should be a array with shape ({0[0]}, 3) " 'while its shape was {1}').format(adjacency_matrix_shape, node_coords_shape) raise ValueError(message) if node_coords_shape[0] != adjacency_matrix_shape[0]: raise ValueError( "Shape mismatch between 'adjacency_matrix' " "and 'node_coords'" "'adjacency_matrix' shape is {0}, 'node_coords' shape is {1}" .format(adjacency_matrix_shape, node_coords_shape)) if not np.allclose(adjacency_matrix, adjacency_matrix.T, rtol=1e-3): raise ValueError("'adjacency_matrix' should be symmetric") # For a masked array, masked values are replaced with zeros if hasattr(adjacency_matrix, 'mask'): if not (adjacency_matrix.mask == adjacency_matrix.mask.T).all(): raise ValueError( "'adjacency_matrix' was masked with a non symmetric mask") adjacency_matrix = adjacency_matrix.filled(0) if edge_threshold is not None: # Keep a percentile of edges with the highest absolute # values, so only need to look at the covariance # coefficients below the diagonal lower_diagonal_indices = np.tril_indices_from(adjacency_matrix, k=-1) lower_diagonal_values = adjacency_matrix[ lower_diagonal_indices] edge_threshold = _utils.param_validation.check_threshold( edge_threshold, np.abs(lower_diagonal_values), stats.scoreatpercentile, 'edge_threshold') adjacency_matrix = adjacency_matrix.copy() threshold_mask = np.abs(adjacency_matrix) < edge_threshold adjacency_matrix[threshold_mask] = 0 lower_triangular_adjacency_matrix = np.tril(adjacency_matrix, k=-1) non_zero_indices = lower_triangular_adjacency_matrix.nonzero() line_coords = [node_coords[list(index)] for index in zip(*non_zero_indices)] adjacency_matrix_values = adjacency_matrix[non_zero_indices] for ax in self.axes.values(): ax._add_markers(node_coords, node_color, node_size, **node_kwargs) if line_coords: ax._add_lines(line_coords, adjacency_matrix_values, edge_cmap, vmin=edge_vmin, vmax=edge_vmax, **edge_kwargs) # To obtain the brain left view, we simply invert the x axis if ax.direction == 'l': ax.ax.invert_xaxis() if colorbar: self._colorbar = colorbar self._show_colorbar(ax.cmap, ax.norm, threshold=edge_threshold) plt.draw_if_interactive() class XProjector(OrthoProjector): _cut_displayed = 'x' _default_figsize = [2.6, 2.3] class YProjector(OrthoProjector): _cut_displayed = 'y' _default_figsize = [2.2, 2.3] class ZProjector(OrthoProjector): _cut_displayed = 'z' _default_figsize = [2.2, 2.3] class XZProjector(OrthoProjector): _cut_displayed = 'xz' class YXProjector(OrthoProjector): _cut_displayed = 'yx' class YZProjector(OrthoProjector): _cut_displayed = 'yz' class LYRZProjector(OrthoProjector): _cut_displayed = 'lyrz' class LZRYProjector(OrthoProjector): _cut_displayed = 'lzry' class LZRProjector(OrthoProjector): _cut_displayed = 'lzr' class LYRProjector(OrthoProjector): _cut_displayed = 'lyr' class LRProjector(OrthoProjector): _cut_displayed = 'lr' class LProjector(OrthoProjector): _cut_displayed = 'l' _default_figsize = [2.6, 2.3] class RProjector(OrthoProjector): _cut_displayed = 'r' _default_figsize = [2.6, 2.3] PROJECTORS = dict(ortho=OrthoProjector, xz=XZProjector, yz=YZProjector, yx=YXProjector, x=XProjector, y=YProjector, z=ZProjector, lzry=LZRYProjector, lyrz=LYRZProjector, lyr=LYRProjector, lzr=LZRProjector, lr=LRProjector, l=LProjector, r=RProjector) def get_create_display_fun(display_mode, class_dict): try: return class_dict[display_mode].init_with_figure except KeyError: message = ('{0} is not a valid display_mode. ' 'Valid options are {1}').format( display_mode, sorted(class_dict.keys())) raise ValueError(message) def get_slicer(display_mode): "Internal function to retrieve a slicer" return get_create_display_fun(display_mode, SLICERS) def get_projector(display_mode): "Internal function to retrieve a projector" return get_create_display_fun(display_mode, PROJECTORS) PKpH!QQ nilearn/plotting/img_plotting.py""" Functions to do automatic visualization of Niimg-like objects See http://nilearn.github.io/manipulating_images/input_output.html Only matplotlib is required. """ # Author: Gael Varoquaux, Chris Filo Gorgolewski # License: BSD # Standard library imports import collections import functools import numbers import warnings # Standard scientific libraries imports (more specific imports are # delayed, so that the part module can be used without them). import numpy as np from scipy import ndimage from nibabel.spatialimages import SpatialImage from .._utils.numpy_conversions import as_ndarray from .._utils.compat import _basestring from .._utils.niimg import _safe_get_data import matplotlib import matplotlib.pyplot as plt from .. import _utils from ..image import new_img_like from .._utils.extmath import fast_abs_percentile from .._utils.param_validation import check_threshold from .._utils.fixes.matplotlib_backports import (cbar_outline_get_xy, cbar_outline_set_xy) from .._utils.ndimage import get_border_data from ..datasets import load_mni152_template from ..image import iter_img from .displays import get_slicer, get_projector from . import cm def show(): """Show all the figures generated by nilearn and/or matplotlib This function is equivalent to :func:`matplotlib.pyplot.show`. """ plt.show() ############################################################################### # Core, usage-agnostic functions def _get_colorbar_and_data_ranges(stat_map_data, vmax, symmetric_cbar, kwargs, force_min_stat_map_value=None): """ Internal function for setting colormap and colorbar limits Used by for plot_stat_map and plot_glass_brain. The limits for the colormap will always be set to range from -vmax to vmax. The limits for the colorbar depend on the symmetric_cbar argument, please refer to docstring of plot_stat_map. """ if 'vmin' in kwargs: raise ValueError('this function does not accept a "vmin" ' 'argument, as it uses a symmetrical range ' 'defined via the vmax argument. To threshold ' 'the plotted map, use the "threshold" argument') # make sure that the color range is symmetrical if vmax is None or symmetric_cbar in ['auto', False]: # Avoid dealing with masked_array: if hasattr(stat_map_data, '_mask'): stat_map_data = np.asarray( stat_map_data[np.logical_not(stat_map_data._mask)]) stat_map_max = np.nanmax(stat_map_data) if force_min_stat_map_value is None: stat_map_min = np.nanmin(stat_map_data) else: stat_map_min = force_min_stat_map_value if symmetric_cbar == 'auto': symmetric_cbar = stat_map_min < 0 and stat_map_max > 0 if vmax is None: vmax = max(-stat_map_min, stat_map_max) vmin = -vmax if not symmetric_cbar: negative_range = stat_map_max <= 0 positive_range = stat_map_min >= 0 if positive_range: cbar_vmin = 0 cbar_vmax = None elif negative_range: cbar_vmax = 0 cbar_vmin = None else: cbar_vmin = stat_map_min cbar_vmax = stat_map_max else: cbar_vmin, cbar_vmax = None, None return cbar_vmin, cbar_vmax, vmin, vmax def _plot_img_with_bg(img, bg_img=None, cut_coords=None, output_file=None, display_mode='ortho', colorbar=False, figure=None, axes=None, title=None, threshold=None, annotate=True, draw_cross=True, black_bg=False, vmin=None, vmax=None, bg_vmin=None, bg_vmax=None, interpolation="nearest", display_factory=get_slicer, cbar_vmin=None, cbar_vmax=None, **kwargs): """ Internal function, please refer to the docstring of plot_img for parameters not listed below. Parameters ---------- bg_vmin: float vmin for bg_img bg_vmax: float vmax for bg_img interpolation: string passed to the add_overlay calls display_factory: function takes a display_mode argument and return a display class """ show_nan_msg = False if vmax is not None and np.isnan(vmax): vmax = None show_nan_msg = True if vmin is not None and np.isnan(vmin): vmin = None show_nan_msg = True if show_nan_msg: nan_msg = ('NaN is not permitted for the vmax and vmin arguments.\n' 'Tip: Use np.nan_max() instead of np.max().') warnings.warn(nan_msg) if isinstance(cut_coords, numbers.Number) and display_mode == 'ortho': raise ValueError( "The input given for display_mode='ortho' needs to be " "a list of 3d world coordinates in (x, y, z). " "You provided single cut, cut_coords={0}".format(cut_coords)) if img is not False and img is not None: img = _utils.check_niimg_3d(img, dtype='auto') data = _safe_get_data(img) affine = img.get_affine() if np.isnan(np.sum(data)): data = np.nan_to_num(data) # Deal with automatic settings of plot parameters if threshold == 'auto': # Threshold epsilon below a percentile value, to be sure that some # voxels pass the threshold threshold = fast_abs_percentile(data) - 1e-5 img = new_img_like(img, as_ndarray(data), affine) display = display_factory(display_mode)( img, threshold=threshold, cut_coords=cut_coords, figure=figure, axes=axes, black_bg=black_bg, colorbar=colorbar) if bg_img is not None: bg_img = _utils.check_niimg_3d(bg_img) display.add_overlay(bg_img, vmin=bg_vmin, vmax=bg_vmax, cmap=plt.cm.gray, interpolation=interpolation) if img is not None and img is not False: display.add_overlay(new_img_like(img, data, affine), threshold=threshold, interpolation=interpolation, colorbar=colorbar, vmin=vmin, vmax=vmax, **kwargs) if annotate: display.annotate() if draw_cross: display.draw_cross() if title is not None and not title == '': display.title(title) if (cbar_vmax is not None) or (cbar_vmin is not None): if hasattr(display, '_cbar'): cbar = display._cbar cbar_tick_locs = cbar.locator.locs if cbar_vmax is None: cbar_vmax = cbar_tick_locs.max() if cbar_vmin is None: cbar_vmin = cbar_tick_locs.min() new_tick_locs = np.linspace(cbar_vmin, cbar_vmax, len(cbar_tick_locs)) cbar.ax.set_ylim(cbar.norm(cbar_vmin), cbar.norm(cbar_vmax)) outline = cbar_outline_get_xy(cbar.outline) outline[:2, 1] += cbar.norm(cbar_vmin) outline[2:6, 1] -= (1. - cbar.norm(cbar_vmax)) outline[6:, 1] += cbar.norm(cbar_vmin) cbar_outline_set_xy(cbar.outline, outline) cbar.set_ticks(new_tick_locs, update_ticks=True) if output_file is not None: display.savefig(output_file) display.close() display = None return display def plot_img(img, cut_coords=None, output_file=None, display_mode='ortho', figure=None, axes=None, title=None, threshold=None, annotate=True, draw_cross=True, black_bg=False, colorbar=False, **kwargs): """ Plot cuts of a given image (by default Frontal, Axial, and Lateral) Parameters ---------- img: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html cut_coords: None, a tuple of floats, or an integer The MNI coordinates of the point where the cut is performed If display_mode is 'ortho', this should be a 3-tuple: (x, y, z) For display_mode == 'x', 'y', or 'z', then these are the coordinates of each cut in the corresponding direction. If None is given, the cuts is calculated automaticaly. If display_mode is 'x', 'y' or 'z', cut_coords can be an integer, in which case it specifies the number of cuts to perform output_file: string, or None, optional The name of an image file to export the plot to. Valid extensions are .png, .pdf, .svg. If output_file is not None, the plot is saved to a file, and the display is closed. display_mode : {'ortho', 'x', 'y', 'z', 'xy', 'xz', 'yz'} Choose the direction of the cuts: 'x' - sagittal, 'y' - coronal, 'z' - axial, 'ortho' - three cuts are performed in orthogonal directions. figure : integer or matplotlib figure, optional Matplotlib figure used or its number. If None is given, a new figure is created. axes : matplotlib axes or 4 tuple of float: (xmin, ymin, width, height), optional The axes, or the coordinates, in matplotlib figure space, of the axes used to display the plot. If None, the complete figure is used. title : string, optional The title displayed on the figure. threshold : a number, None, or 'auto' If None is given, the image is not thresholded. If a number is given, it is used to threshold the image: values below the threshold (in absolute value) are plotted as transparent. If auto is given, the threshold is determined magically by analysis of the image. annotate: boolean, optional If annotate is True, positions and left/right annotation are added to the plot. draw_cross: boolean, optional If draw_cross is True, a cross is drawn on the plot to indicate the cut plosition. black_bg: boolean, optional If True, the background of the image is set to be black. If you wish to save figures with a black background, you will need to pass "facecolor='k', edgecolor='k'" to matplotlib.pyplot.savefig. colorbar: boolean, optional If True, display a colorbar on the right of the plots. kwargs: extra keyword arguments, optional Extra keyword arguments passed to matplotlib.pyplot.imshow """ display = _plot_img_with_bg( img, cut_coords=cut_coords, output_file=output_file, display_mode=display_mode, figure=figure, axes=axes, title=title, threshold=threshold, annotate=annotate, draw_cross=draw_cross, resampling_interpolation='continuous', black_bg=black_bg, colorbar=colorbar, **kwargs) return display ############################################################################### # Anatomy image for background # A constant class to serve as a sentinel for the default MNI template class _MNI152Template(SpatialImage): """ This class is a constant pointing to the MNI152 Template provided by nilearn """ data = None affine = None vmax = None _shape = None def __init__(self, data=None, affine=None, header=None): # Comply with spatial image requirements while allowing empty init pass def load(self): if self.data is None: anat_img = load_mni152_template() data = anat_img.get_data() data = data.astype(np.float) anat_mask = ndimage.morphology.binary_fill_holes(data > 0) data = np.ma.masked_array(data, np.logical_not(anat_mask)) self.affine = anat_img.get_affine() self.data = data self.vmax = data.max() self._shape = anat_img.shape def get_data(self): self.load() return self.data def get_affine(self): self.load() return self.affine @property def shape(self): self.load() return self._shape def get_shape(self): self.load() return self._shape def __str__(self): return "" def __repr__(self): return "" # The constant that we use as a default in functions MNI152TEMPLATE = _MNI152Template() def _load_anat(anat_img=MNI152TEMPLATE, dim=False, black_bg='auto'): """ Internal function used to load anatomy, for optional diming """ vmin = None vmax = None if anat_img is False or anat_img is None: if black_bg == 'auto': # No anatomy given: no need to turn black_bg on black_bg = False return anat_img, black_bg, vmin, vmax if anat_img is MNI152TEMPLATE: anat_img.load() # We special-case the 'canonical anat', as we don't need # to do a few transforms to it. vmin = 0 vmax = anat_img.vmax if black_bg == 'auto': black_bg = False else: anat_img = _utils.check_niimg_3d(anat_img) if dim or black_bg == 'auto': # We need to inspect the values of the image data = anat_img.get_data() vmin = data.min() vmax = data.max() if black_bg == 'auto': # Guess if the background is rather black or light based on # the values of voxels near the border background = np.median(get_border_data(data, 2)) if background > .5 * (vmin + vmax): black_bg = False else: black_bg = True if dim: vmean = .5 * (vmin + vmax) ptp = .5 * (vmax - vmin) if black_bg: if not isinstance(dim, numbers.Number): dim = .8 vmax = vmean + (1 + dim) * ptp else: if not isinstance(dim, numbers.Number): dim = .6 vmin = .5 * (2 - dim) * vmean - (1 + dim) * ptp return anat_img, black_bg, vmin, vmax ############################################################################### # Usage-specific functions def plot_anat(anat_img=MNI152TEMPLATE, cut_coords=None, output_file=None, display_mode='ortho', figure=None, axes=None, title=None, annotate=True, threshold=None, draw_cross=True, black_bg='auto', dim=False, cmap=plt.cm.gray, vmin=None, vmax=None, **kwargs): """ Plot cuts of an anatomical image (by default 3 cuts: Frontal, Axial, and Lateral) Parameters ---------- anat_img : Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html The anatomical image to be used as a background. If None is given, nilearn tries to find a T1 template. cut_coords : None, a tuple of floats, or an integer The MNI coordinates of the point where the cut is performed If display_mode is 'ortho', this should be a 3-tuple: (x, y, z) For display_mode == 'x', 'y', or 'z', then these are the coordinates of each cut in the corresponding direction. If None is given, the cuts is calculated automaticaly. If display_mode is 'x', 'y' or 'z', cut_coords can be an integer, in which case it specifies the number of cuts to perform output_file : string, or None, optional The name of an image file to export the plot to. Valid extensions are .png, .pdf, .svg. If output_file is not None, the plot is saved to a file, and the display is closed. display_mode : {'ortho', 'x', 'y', 'z', 'yx', 'xz', 'yz'} Choose the direction of the cuts: 'x' - sagittal, 'y' - coronal, 'z' - axial, 'ortho' - three cuts are performed in orthogonal directions. figure : integer or matplotlib figure, optional Matplotlib figure used or its number. If None is given, a new figure is created. axes : matplotlib axes or 4 tuple of float: (xmin, ymin, width, height), optional The axes, or the coordinates, in matplotlib figure space, of the axes used to display the plot. If None, the complete figure is used. title : string, optional The title displayed on the figure. annotate : boolean, optional If annotate is True, positions and left/right annotation are added to the plot. threshold : a number, None, or 'auto', optional If None is given, the image is not thresholded. If a number is given, it is used to threshold the image: values below the threshold (in absolute value) are plotted as transparent. If auto is given, the threshold is determined magically by analysis of the image. draw_cross : boolean, optional If draw_cross is True, a cross is drawn on the plot to indicate the cut plosition. black_bg : boolean, optional If True, the background of the image is set to be black. If you wish to save figures with a black background, you will need to pass "facecolor='k', edgecolor='k'" to matplotlib.pyplot.savefig. dim : boolean or float, optional Dimming factor applied to background image. If True, automatic heuristics are applied. Accepted float values, where a typical span is -1 to 1 (-1 = increase contrast; 1 = decrease contrast), but larger values can be used for a more pronounced effect. cmap : matplotlib colormap, optional The colormap for the anat vmin : float Lower bound for plotting, passed to matplotlib.pyplot.imshow vmax : float Upper bound for plotting, passed to matplotlib.pyplot.imshow Notes ----- Arrays should be passed in numpy convention: (x, y, z) ordered. """ anat_img, black_bg, anat_vmin, anat_vmax = _load_anat( anat_img, dim=dim, black_bg=black_bg) if vmin is None: vmin = anat_vmin if vmax is None: vmax = anat_vmax display = plot_img(anat_img, cut_coords=cut_coords, output_file=output_file, display_mode=display_mode, figure=figure, axes=axes, title=title, threshold=threshold, annotate=annotate, draw_cross=draw_cross, black_bg=black_bg, vmin=vmin, vmax=vmax, cmap=cmap, **kwargs) return display def plot_epi(epi_img=None, cut_coords=None, output_file=None, display_mode='ortho', figure=None, axes=None, title=None, annotate=True, draw_cross=True, black_bg=True, cmap=plt.cm.spectral, vmin=None, vmax=None, **kwargs): """ Plot cuts of an EPI image (by default 3 cuts: Frontal, Axial, and Lateral) Parameters ---------- epi_img : a nifti-image like object or a filename The EPI (T2*) image cut_coords : None, a tuple of floats, or an integer The MNI coordinates of the point where the cut is performed If display_mode is 'ortho', this should be a 3-tuple: (x, y, z) For display_mode == 'x', 'y', or 'z', then these are the coordinates of each cut in the corresponding direction. If None is given, the cuts is calculated automaticaly. If display_mode is 'x', 'y' or 'z', cut_coords can be an integer, in which case it specifies the number of cuts to perform output_file : string, or None, optional The name of an image file to export the plot to. Valid extensions are .png, .pdf, .svg. If output_file is not None, the plot is saved to a file, and the display is closed. display_mode : {'ortho', 'x', 'y', 'z', 'yx', 'xz', 'yz'} Choose the direction of the cuts: 'x' - sagittal, 'y' - coronal, 'z' - axial, 'ortho' - three cuts are performed in orthogonal directions. figure : integer or matplotlib figure, optional Matplotlib figure used or its number. If None is given, a new figure is created. axes : matplotlib axes or 4 tuple of float: (xmin, ymin, width, height), optional The axes, or the coordinates, in matplotlib figure space, of the axes used to display the plot. If None, the complete figure is used. title : string, optional The title displayed on the figure. annotate : boolean, optional If annotate is True, positions and left/right annotation are added to the plot. draw_cross : boolean, optional If draw_cross is True, a cross is drawn on the plot to indicate the cut plosition. black_bg : boolean, optional If True, the background of the image is set to be black. If you wish to save figures with a black background, you will need to pass "facecolor='k', edgecolor='k'" to matplotlib.pyplot.savefig. cmap : matplotlib colormap, optional The colormap for specified image threshold : a number, None, or 'auto' If None is given, the image is not thresholded. If a number is given, it is used to threshold the image: values below the threshold (in absolute value) are plotted as transparent. If auto is given, the threshold is determined magically by analysis of the image. vmin : float Lower bound for plotting, passed to matplotlib.pyplot.imshow vmax : float Upper bound for plotting, passed to matplotlib.pyplot.imshow Notes ----- Arrays should be passed in numpy convention: (x, y, z) ordered. """ display = plot_img(epi_img, cut_coords=cut_coords, output_file=output_file, display_mode=display_mode, figure=figure, axes=axes, title=title, threshold=None, annotate=annotate, draw_cross=draw_cross, black_bg=black_bg, cmap=cmap, vmin=vmin, vmax=vmax, **kwargs) return display def plot_roi(roi_img, bg_img=MNI152TEMPLATE, cut_coords=None, output_file=None, display_mode='ortho', figure=None, axes=None, title=None, annotate=True, draw_cross=True, black_bg='auto', alpha=0.7, cmap=plt.cm.gist_ncar, dim=True, vmin=None, vmax=None, **kwargs): """ Plot cuts of an ROI/mask image (by default 3 cuts: Frontal, Axial, and Lateral) Parameters ---------- roi_img : Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html The ROI/mask image, it could be binary mask or an atlas or ROIs with integer values. bg_img : Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html The background image that the ROI/mask will be plotted on top of. If nothing is specified, the MNI152 template will be used. To turn off background image, just pass "bg_img=False". cut_coords : None, or a tuple of floats The MNI coordinates of the point where the cut is performed, in MNI coordinates and order. If display_mode is 'ortho', this should be a 3-tuple: (x, y, z) For display_mode == 'x', 'y', or 'z', then these are the coordinates of each cut in the corresponding direction. If None is given, the cuts is calculated automaticaly. output_file : string, or None, optional The name of an image file to export the plot to. Valid extensions are .png, .pdf, .svg. If output_file is not None, the plot is saved to a file, and the display is closed. display_mode : {'ortho', 'x', 'y', 'z', 'yx', 'xz', 'yz'} Choose the direction of the cuts: 'x' - sagittal, 'y' - coronal, 'z' - axial, 'ortho' - three cuts are performed in orthogonal directions. figure : integer or matplotlib figure, optional Matplotlib figure used or its number. If None is given, a new figure is created. axes : matplotlib axes or 4 tuple of float: (xmin, ymin, width, height), optional The axes, or the coordinates, in matplotlib figure space, of the axes used to display the plot. If None, the complete figure is used. title : string, optional The title displayed on the figure. annotate : boolean, optional If annotate is True, positions and left/right annotation are added to the plot. draw_cross : boolean, optional If draw_cross is True, a cross is drawn on the plot to indicate the cut plosition. black_bg : boolean, optional If True, the background of the image is set to be black. If you wish to save figures with a black background, you will need to pass "facecolor='k', edgecolor='k'" to matplotlib.pyplot.savefig. threshold : a number, None, or 'auto' If None is given, the image is not thresholded. If a number is given, it is used to threshold the image: values below the threshold (in absolute value) are plotted as transparent. If auto is given, the threshold is determined magically by analysis of the image. dim : boolean or float, optional Dimming factor applied to background image. If True, automatic heuristics are applied. Accepted float values, where a typical span is -1 to 1 (-1 = increase contrast; 1 = decrease contrast), but larger values can be used for a more pronounced effect. vmin : float Lower bound for plotting, passed to matplotlib.pyplot.imshow vmax : float Upper bound for plotting, passed to matplotlib.pyplot.imshow See Also -------- nilearn.plotting.plot_prob_atlas : To simply plot probabilistic atlases (4D images) """ bg_img, black_bg, bg_vmin, bg_vmax = _load_anat(bg_img, dim=dim, black_bg=black_bg) display = _plot_img_with_bg(img=roi_img, bg_img=bg_img, cut_coords=cut_coords, output_file=output_file, display_mode=display_mode, figure=figure, axes=axes, title=title, annotate=annotate, draw_cross=draw_cross, black_bg=black_bg, threshold=0.5, bg_vmin=bg_vmin, bg_vmax=bg_vmax, resampling_interpolation='nearest', alpha=alpha, cmap=cmap, vmin=vmin, vmax=vmax, **kwargs) return display def plot_prob_atlas(maps_img, anat_img=MNI152TEMPLATE, view_type='auto', threshold='auto', linewidths=2.5, cut_coords=None, output_file=None, display_mode='ortho', figure=None, axes=None, title=None, annotate=True, draw_cross=True, black_bg='auto', dim=False, colorbar=False, cmap=plt.cm.gist_rainbow, vmin=None, vmax=None, alpha=0.5, **kwargs): """ Plot the probabilistic atlases onto the anatomical image by default MNI template Parameters ---------- maps_img : Niimg-like object or the filename 4D image of the probabilistic atlas maps anat_img : Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html The anatomical image to be used as a background. If nothing is specified, the MNI152 template will be used. To turn off background image, just pass "anat_img=False". view_type : {'auto', 'contours', 'filled_contours', 'continuous'}, optional By default view_type == 'auto', means maps will be displayed automatically using any one of the three view types. The automatic selection of view type depends on the total number of maps. If view_type == 'contours', maps are overlayed as contours If view_type == 'filled_contours', maps are overlayed as contours along with color fillings inside the contours. If view_type == 'continuous', maps are overlayed as continous colors irrespective of the number maps. threshold : a str or a number, list of str or numbers, None This parameter is optional and is used to threshold the maps image using the given value or automatically selected value. The values in the image above the threshold level will be visualized. The default strategy, computes a threshold level that seeks to minimize (yet not eliminate completely) the overlap between several maps for a better visualization. The threshold can also be expressed as a percentile over the values of the whole atlas. In that case, the value must be specified as string finishing with a percent sign, e.g., "25.3%". If a single string is provided, the same percentile will be applied over the whole atlas. Otherwise, if a list of percentiles is provided, each 3D map is thresholded with certain percentile sequentially. Length of percentiles given should match the number of 3D map in time (4th) dimension. If a number or a list of numbers, the given value will be used directly to threshold the maps without any percentile calculation. If None, a very small threshold is applied to remove numerical noise from the maps background. linewidths : float, optional This option can be used to set the boundary thickness of the contours. cut_coords : None, a tuple of floats, or an integer The MNI coordinates of the point where the cut is performed If display_mode is 'ortho', this should be a 3-tuple: (x, y, z) For display_mode == 'x', 'y', or 'z', then these are the coordinates of each cut in the corresponding direction. If None is given, the cuts is calculated automaticaly. If display_mode is 'x', 'y' or 'z', cut_coords can be an integer, in which case it specifies the number of cuts to perform output_file : string, or None, optional The name of an image file to export the plot to. Valid extensions are .png, .pdf, .svg. If output_file is not None, the plot is saved to a file, and the display is closed. display_mode : {'ortho', 'x', 'y', 'z', 'yx', 'xz', 'yz'} Choose the direction of the cuts: 'x' - sagittal, 'y' - coronal, 'z' - axial, 'ortho' - three cuts are performed in orthogonal directions. figure : integer or matplotlib figure, optional Matplotlib figure used or its number. If None is given, a new figure is created. axes : matplotlib axes or 4 tuple of float: (xmin, ymin, width, height), optional The axes, or the coordinates, in matplotlib figure space, of the axes used to display the plot. If None, the complete figure is used. title : string, optional The title displayed on the figure. annotate : boolean, optional If annotate is True, positions and left/right annotation are added to the plot. draw_cross : boolean, optional If draw_cross is True, a cross is drawn on the plot to indicate the cut plosition. black_bg: boolean, optional If True, the background of the image is set to be black. If you wish to save figures with a black background, you will need to pass "facecolor='k', edgecolor='k'" to pylab's savefig. dim : boolean or float, optional Dimming factor applied to background image. If True, automatic heuristics are applied. Accepted float values, where a typical span is -1 to 1 (-1 = increase contrast; 1 = decrease contrast), but larger values can be used for a more pronounced effect. cmap : matplotlib colormap, optional The colormap for the atlas maps colorbar : boolean, optional If True, display a colorbar on the right of the plots. vmin : float Lower bound for plotting, passed to matplotlib.pyplot.imshow vmax : float Upper bound for plotting, passed to matplotlib.pyplot.imshow alpha : float between 0 and 1 Alpha sets the transparency of the color inside the filled contours. See Also -------- nilearn.plotting.plot_roi : To simply plot max-prob atlases (3D images) """ display = plot_anat(anat_img, cut_coords=cut_coords, display_mode=display_mode, figure=figure, axes=axes, title=title, annotate=annotate, draw_cross=draw_cross, black_bg=black_bg, **kwargs) maps_img = _utils.check_niimg_4d(maps_img) n_maps = maps_img.shape[3] valid_view_types = ['auto', 'contours', 'filled_contours', 'continuous'] if view_type not in valid_view_types: raise ValueError( 'Unknown view type: %s. Valid view types are %s' % (str(view_type), str(valid_view_types)) ) cmap = plt.cm.get_cmap(cmap) color_list = cmap(np.linspace(0, 1, n_maps)) if view_type == 'auto': if n_maps > 20: view_type = 'contours' elif n_maps > 10: view_type = 'filled_contours' else: view_type = 'continuous' if threshold is None: threshold = 1e-6 elif threshold == 'auto': # it will use default percentage, # strategy is to avoid maximum overlaps as possible if view_type == 'contours': correction_factor = 1 elif view_type == 'filled_contours': correction_factor = .8 else: correction_factor = .5 threshold = "%f%%" % (100 * (1 - .2 * correction_factor / n_maps)) if (isinstance(threshold, collections.Iterable) and not isinstance(threshold, _basestring)): threshold = [thr for thr in threshold] if len(threshold) != n_maps: raise TypeError('The list of values to threshold ' 'should be equal to number of maps') else: threshold = [threshold] * n_maps filled = view_type.startswith('filled') for (map_img, color, thr) in zip(iter_img(maps_img), color_list, threshold): data = map_img.get_data() # To threshold or choose the level of the contours thr = check_threshold(thr, data, percentile_func=fast_abs_percentile, name='threshold') # Get rid of background values in all cases thr = max(thr, 1e-6) if view_type == 'continuous': display.add_overlay(map_img, threshold=thr, cmap=cm.alpha_cmap(color)) else: display.add_contours(map_img, levels=[thr], linewidths=linewidths, colors=[color], filled=filled, alpha=alpha, linestyles='solid', ) if colorbar: display._colorbar = True # Create a colormap from color list to feed display cmap = matplotlib.colors.LinearSegmentedColormap.from_list( 'segmented colors', color_list, n_maps + 1) display._show_colorbar(cmap, matplotlib.colors.Normalize(1, n_maps + 1)) tick_locator = matplotlib.ticker.MaxNLocator(nbins=10) display.locator = tick_locator display._cbar.update_ticks() tick_location = np.round(np.linspace(1, n_maps, min(n_maps, 10))).astype('int') display._cbar.set_ticks(tick_location + .5) display._cbar.set_ticklabels(tick_location) left, bottom, width, height = display._colorbar_ax.\ get_position().bounds display._colorbar_ax.set_position([left, bottom, width, height * 0.95]) display._colorbar_ax.annotate('Map #', xy=(1, 1.03), ha='right', va='bottom', xycoords='axes fraction') if output_file is not None: display.savefig(output_file) display.close() display = None return display def plot_stat_map(stat_map_img, bg_img=MNI152TEMPLATE, cut_coords=None, output_file=None, display_mode='ortho', colorbar=True, figure=None, axes=None, title=None, threshold=1e-6, annotate=True, draw_cross=True, black_bg='auto', cmap=cm.cold_hot, symmetric_cbar="auto", dim=True, vmax=None, **kwargs): """ Plot cuts of an ROI/mask image (by default 3 cuts: Frontal, Axial, and Lateral) Parameters ---------- stat_map_img : Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html The statistical map image bg_img : Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html The background image that the ROI/mask will be plotted on top of. If nothing is specified, the MNI152 template will be used. To turn off background image, just pass "bg_img=False". cut_coords : None, a tuple of floats, or an integer The MNI coordinates of the point where the cut is performed If display_mode is 'ortho', this should be a 3-tuple: (x, y, z) For display_mode == 'x', 'y', or 'z', then these are the coordinates of each cut in the corresponding direction. If None is given, the cuts is calculated automaticaly. If display_mode is 'x', 'y' or 'z', cut_coords can be an integer, in which case it specifies the number of cuts to perform output_file : string, or None, optional The name of an image file to export the plot to. Valid extensions are .png, .pdf, .svg. If output_file is not None, the plot is saved to a file, and the display is closed. display_mode : {'ortho', 'x', 'y', 'z', 'yx', 'xz', 'yz'} Choose the direction of the cuts: 'x' - sagittal, 'y' - coronal, 'z' - axial, 'ortho' - three cuts are performed in orthogonal directions. colorbar : boolean, optional If True, display a colorbar on the right of the plots. figure : integer or matplotlib figure, optional Matplotlib figure used or its number. If None is given, a new figure is created. axes : matplotlib axes or 4 tuple of float: (xmin, ymin, width, height), optional The axes, or the coordinates, in matplotlib figure space, of the axes used to display the plot. If None, the complete figure is used. title : string, optional The title displayed on the figure. threshold : a number, None, or 'auto' If None is given, the image is not thresholded. If a number is given, it is used to threshold the image: values below the threshold (in absolute value) are plotted as transparent. If auto is given, the threshold is determined magically by analysis of the image. annotate : boolean, optional If annotate is True, positions and left/right annotation are added to the plot. draw_cross : boolean, optional If draw_cross is True, a cross is drawn on the plot to indicate the cut plosition. black_bg : boolean, optional If True, the background of the image is set to be black. If you wish to save figures with a black background, you will need to pass "facecolor='k', edgecolor='k'" to matplotlib.pyplot.savefig. cmap : matplotlib colormap, optional The colormap for specified image. The ccolormap *must* be symmetrical. symmetric_cbar : boolean or 'auto', optional, default 'auto' Specifies whether the colorbar should range from -vmax to vmax or from vmin to vmax. Setting to 'auto' will select the latter if the range of the whole image is either positive or negative. Note: The colormap will always be set to range from -vmax to vmax. vmax : float Upper bound for plotting, passed to matplotlib.pyplot.imshow Notes ----- Arrays should be passed in numpy convention: (x, y, z) ordered. See Also -------- nilearn.plotting.plot_anat : To simply plot anatomical images nilearn.plotting.plot_epi : To simply plot raw EPI images nilearn.plotting.plot_glass_brain : To plot maps in a glass brain """ # dim the background bg_img, black_bg, bg_vmin, bg_vmax = _load_anat(bg_img, dim=dim, black_bg=black_bg) stat_map_img = _utils.check_niimg_3d(stat_map_img, dtype='auto') cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( _safe_get_data(stat_map_img), vmax, symmetric_cbar, kwargs) display = _plot_img_with_bg(img=stat_map_img, bg_img=bg_img, cut_coords=cut_coords, output_file=output_file, display_mode=display_mode, figure=figure, axes=axes, title=title, annotate=annotate, draw_cross=draw_cross, black_bg=black_bg, threshold=threshold, bg_vmin=bg_vmin, bg_vmax=bg_vmax, cmap=cmap, vmin=vmin, vmax=vmax, colorbar=colorbar, cbar_vmin=cbar_vmin, cbar_vmax=cbar_vmax, resampling_interpolation='continuous', **kwargs) return display def plot_glass_brain(stat_map_img, output_file=None, display_mode='ortho', colorbar=False, figure=None, axes=None, title=None, threshold='auto', annotate=True, black_bg=False, cmap=None, alpha=0.7, vmin=None, vmax=None, plot_abs=True, symmetric_cbar="auto", **kwargs): """Plot 2d projections of an ROI/mask image (by default 3 projections: Frontal, Axial, and Lateral). The brain glass schematics are added on top of the image. The plotted image should be in MNI space for this function to work properly. Parameters ---------- stat_map_img : Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html The statistical map image. It needs to be in MNI space in order to align with the brain schematics. output_file : string, or None, optional The name of an image file to export the plot to. Valid extensions are .png, .pdf, .svg. If output_file is not None, the plot is saved to a file, and the display is closed. display_mode : string, optional. Default is 'ortho' Choose the direction of the cuts: 'x' - sagittal, 'y' - coronal, 'z' - axial, 'l' - sagittal left hemisphere only, 'r' - sagittal right hemisphere only, 'ortho' - three cuts are performed in orthogonal directions. Possible values are: 'ortho', 'x', 'y', 'z', 'xz', 'yx', 'yz', 'l', 'r', 'lr', 'lzr', 'lyr', 'lzry', 'lyrz'. colorbar : boolean, optional If True, display a colorbar on the right of the plots. figure : integer or matplotlib figure, optional Matplotlib figure used or its number. If None is given, a new figure is created. axes : matplotlib axes or 4 tuple of float: (xmin, ymin, width, height), optional The axes, or the coordinates, in matplotlib figure space, of the axes used to display the plot. If None, the complete figure is used. title : string, optional The title displayed on the figure. threshold : a number, None, or 'auto' If None is given, the image is not thresholded. If a number is given, it is used to threshold the image: values below the threshold (in absolute value) are plotted as transparent. If auto is given, the threshold is determined magically by analysis of the image. annotate : boolean, optional If annotate is True, positions and left/right annotation are added to the plot. black_bg : boolean, optional If True, the background of the image is set to be black. If you wish to save figures with a black background, you will need to pass "facecolor='k', edgecolor='k'" to matplotlib.pyplot.savefig. cmap : matplotlib colormap, optional The colormap for specified image alpha : float between 0 and 1 Alpha transparency for the brain schematics vmax : float Upper bound for plotting, passed to matplotlib.pyplot.imshow plot_abs : boolean, optional If set to True (default) maximum intensity projection of the absolute value will be used (rendering positive and negative values in the same manner). If set to false the sign of the maximum intensity will be represented with different colors. See http://nilearn.github.io/auto_examples/01_plotting/plot_demo_glass_brain_extensive.html for examples. symmetric_cbar : boolean or 'auto', optional, default 'auto' Specifies whether the colorbar should range from -vmax to vmax or from vmin to vmax. Setting to 'auto' will select the latter if the range of the whole image is either positive or negative. Note: The colormap will always be set to range from -vmax to vmax. Notes ----- Arrays should be passed in numpy convention: (x, y, z) ordered. """ if cmap is None: cmap = cm.cold_hot if black_bg else cm.cold_white_hot if stat_map_img: stat_map_img = _utils.check_niimg_3d(stat_map_img, dtype='auto') if plot_abs: cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( _safe_get_data(stat_map_img), vmax, symmetric_cbar, kwargs, 0) else: cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( _safe_get_data(stat_map_img), vmax, symmetric_cbar, kwargs) else: cbar_vmin, cbar_vmax = None, None def display_factory(display_mode): return functools.partial(get_projector(display_mode), alpha=alpha, plot_abs=plot_abs) display = _plot_img_with_bg(img=stat_map_img, output_file=output_file, display_mode=display_mode, figure=figure, axes=axes, title=title, annotate=annotate, black_bg=black_bg, threshold=threshold, cmap=cmap, colorbar=colorbar, display_factory=display_factory, resampling_interpolation='continuous', vmin=vmin, vmax=vmax, cbar_vmin=cbar_vmin, cbar_vmax=cbar_vmax, **kwargs) return display def plot_connectome(adjacency_matrix, node_coords, node_color='auto', node_size=50, edge_cmap=cm.bwr, edge_vmin=None, edge_vmax=None, edge_threshold=None, output_file=None, display_mode='ortho', figure=None, axes=None, title=None, annotate=True, black_bg=False, alpha=0.7, edge_kwargs=None, node_kwargs=None, colorbar=False): """Plot connectome on top of the brain glass schematics. The plotted image should be in MNI space for this function to work properly. In the case of 'l' and 'r' directions (for hemispheric projections), markers in the coordinate x == 0 are included in both hemispheres. Parameters ---------- adjacency_matrix : numpy array of shape (n, n) represents the link strengths of the graph. Assumed to be a symmetric matrix. node_coords : numpy array_like of shape (n, 3) 3d coordinates of the graph nodes in world space. node_color : color or sequence of colors color(s) of the nodes. node_size : scalar or array_like size(s) of the nodes in points^2. edge_cmap : colormap colormap used for representing the strength of the edges. edge_vmin : float, optional, default: None edge_vmax : float, optional, default: None If not None, either or both of these values will be used to as the minimum and maximum values to color edges. If None are supplied the maximum absolute value within the given threshold will be used as minimum (multiplied by -1) and maximum coloring levels. edge_threshold : str or number If it is a number only the edges with a value greater than edge_threshold will be shown. If it is a string it must finish with a percent sign, e.g. "25.3%", and only the edges with a abs(value) above the given percentile will be shown. output_file : string, or None, optional The name of an image file to export the plot to. Valid extensions are .png, .pdf, .svg. If output_file is not None, the plot is saved to a file, and the display is closed. display_mode : string, optional. Default is 'ortho'. Choose the direction of the cuts: 'x' - sagittal, 'y' - coronal, 'z' - axial, 'l' - sagittal left hemisphere only, 'r' - sagittal right hemisphere only, 'ortho' - three cuts are performed in orthogonal directions. Possible values are: 'ortho', 'x', 'y', 'z', 'xz', 'yx', 'yz', 'l', 'r', 'lr', 'lzr', 'lyr', 'lzry', 'lyrz'. figure : integer or matplotlib figure, optional Matplotlib figure used or its number. If None is given, a new figure is created. axes : matplotlib axes or 4 tuple of float: (xmin, ymin, width, height), optional The axes, or the coordinates, in matplotlib figure space, of the axes used to display the plot. If None, the complete figure is used. title : string, optional The title displayed on the figure. annotate : boolean, optional If annotate is True, positions and left/right annotation are added to the plot. black_bg : boolean, optional If True, the background of the image is set to be black. If you wish to save figures with a black background, you will need to pass "facecolor='k', edgecolor='k'" to matplotlib.pyplot.savefig. alpha : float between 0 and 1 Alpha transparency for the brain schematics. edge_kwargs : dict will be passed as kwargs for each edge matlotlib Line2D. node_kwargs : dict will be passed as kwargs to the plt.scatter call that plots all the nodes in one go colorbar : bool, optional If True, display a colorbar on the right of the plots. By default it is False. """ display = plot_glass_brain(None, display_mode=display_mode, figure=figure, axes=axes, title=title, annotate=annotate, black_bg=black_bg) display.add_graph(adjacency_matrix, node_coords, node_color=node_color, node_size=node_size, edge_cmap=edge_cmap, edge_vmin=edge_vmin, edge_vmax=edge_vmax, edge_threshold=edge_threshold, edge_kwargs=edge_kwargs, node_kwargs=node_kwargs, colorbar=colorbar) if output_file is not None: display.savefig(output_file) display.close() display = None return display PKH.nilearn/plotting/glass_brain_files/__init__.pyPKH7QHH;nilearn/plotting/glass_brain_files/svg_to_json_converter.py"""SVG to JSON converter. The main assumption is that the SVG only contains Bezier curves and segments. The output JSON is used for plotting glass brain schematics. """ import re import sys import json class SVGToJSONConverter(object): """Reads an svg file and exports paths to a JSON format Only segments and Bezier curves are supported """ def __init__(self, filename): self.filename = filename self.svg = svg.parse(filename) self.paths = self.svg.flatten() def _get_style_attr(self, style, attr): pat = r'{}:([^;]+)'.format(attr) match = re.search(pat, style) return match.group(1) if match is not None else None def _type_and_pts(self, obj): if isinstance(obj, svg.Bezier): my_type = 'bezier' pts = [p.coord() for p in obj.pts] elif isinstance(obj, svg.Segment): my_type = 'segment' pts = [p.coord() for p in (obj.start, obj.end)] else: msg = '{0} is not a supported class'.format(obj.__class__) raise TypeError(msg) # svg has its origin in the top left whereas # matplotlib has its origin at the bottom left # need to apply a mirror symmetry pt_min, pt_max = self.svg.bbox() y_range = pt_max.y - pt_min.y pts = [(x, y_range - y) for x, y in pts] return {'type': my_type, 'pts': pts} def _get_paths(self): result = [] for path in self.paths: style = path.style edgecolor = self._get_style_attr(style, 'stroke') linewidth = float(self._get_style_attr(style, 'stroke-width')) path_id = path.id path_dict = {'edgecolor': edgecolor, 'linewidth': linewidth, 'id': path_id, 'items': []} # svg.MoveTo instances do not hold any information since they # just contain the first point of the next item filtered_items = [i for i in path.items if not isinstance(i, svg.MoveTo)] for geom in filtered_items: path_dict['items'].append(self._type_and_pts(geom)) result.append(path_dict) return result def _get_bounds(self, paths): points = [pt for path in paths for item in path['items'] for pt in item['pts']] x_coords = [pt[0] for pt in points] y_coords = [pt[1] for pt in points] xmin, xmax = min(x_coords), max(x_coords) ymin, ymax = min(y_coords), max(y_coords) return xmin, xmax, ymin, ymax def to_json(self): """Exports the svg paths into json. The json format looks like this: { "paths": [ { "edgecolor": "#b3b3b3", "linewidth": 3.03045774, "id": "path3943", "items": [ { "pts": [ [ 571.83955, 751.5887290000001 ], [ 571.57463, 750.8480390000001 ], [ 571.44965, 747.969189 ], [ 571.56178, 745.191269 ] ], "type": "bezier" }, { "pts": [ [ 566.41278, 705.415739 ], [ 566.7642900000001, 696.532339 ] ], "type": "segment" }, . . . ] }, . . . ], "metadata": { "bounds": [ 1.3884929999999542, 398.60061299999995, -0.9977599999999711, 490.82066700000007 ] } } """ paths = self._get_paths() bounds = self._get_bounds(paths) metadata = {'bounds': bounds} result = {'metadata': metadata, 'paths': paths} return json.dumps(result, indent=2) def save_json(self, filename): json_content = self.to_json() with open(filename, 'w') as f: f.write(json_content) def _import_svg(): try: import svg return svg except ImportError as exc: exc.args += ('Could not import svg (https://github.com/cjlano/svg)' ' which is required to parse the svg file', ) raise if __name__ == '__main__': svg = _import_svg() svg_filename = sys.argv[1] json_filename = sys.argv[2] converter = SVGToJSONConverter(svg_filename) converter.save_json(json_filename) PKHJm4nilearn/plotting/glass_brain_files/plot_align_svg.py"""The goal of this script is to align the glass brain SVGs on top of the anatomy. This is only useful for internal purposes especially when the SVG is modified. """ from nilearn import plotting from nilearn.plotting import img_plotting, glass_brain, show # plotting anat for coarse alignment bg_img, _, _, _ = img_plotting._load_anat() display = img_plotting.plot_glass_brain(bg_img, threshold=0, black_bg=True, title='anat', alpha=1) display = img_plotting.plot_glass_brain(bg_img, threshold=0, black_bg=True, title='anat', alpha=1, display_mode='ortho') display = img_plotting.plot_glass_brain(bg_img, threshold=0, title='anat', alpha=1) # checking hemispheres plotting display = img_plotting.plot_glass_brain(bg_img, threshold=0, black_bg=True, title='anat', alpha=1, display_mode='lyrz') # plotting slices for finer alignment # e.g. parieto-occipital sulcus def add_brain_schematics(display): for axes in display.axes.values(): kwargs = {'alpha': 0.5, 'linewidth': 1, 'edgecolor': 'orange'} object_bounds = glass_brain.plot_brain_schematics(axes.ax, axes.direction, **kwargs) axes.add_object_bounds(object_bounds) # side display = plotting.plot_anat(display_mode='x', cut_coords=[-2]) add_brain_schematics(display) # top display = plotting.plot_anat(display_mode='z', cut_coords=[20]) add_brain_schematics(display) # front display = plotting.plot_anat(display_mode='y', cut_coords=[-20]) add_brain_schematics(display) # all in one display = plotting.plot_anat(display_mode='ortho', cut_coords=(-2, -20, 20)) add_brain_schematics(display) # Plot multiple slices display = plotting.plot_anat(display_mode='x') add_brain_schematics(display) display = plotting.plot_anat(display_mode='y') add_brain_schematics(display) display = plotting.plot_anat(display_mode='z') add_brain_schematics(display) show() PKHl++>nilearn/plotting/glass_brain_files/brain_schematics_front.json{ "paths": [ { "edgecolor": "#a6a6a6", "linewidth": 1.7, "id": "path4189-1", "items": [ { "pts": [ [ 69.89360999999997, 177.3992100000001 ], [ 67.53782999999999, 179.42993000000007 ], [ 65.96706999999998, 184.42228000000011 ], [ 65.95015999999998, 189.93283000000008 ] ], "type": "bezier" }, { "pts": [ [ 65.95015999999998, 189.93283000000008 ], [ 65.93455999999998, 195.0336000000001 ], [ 65.87235999999996, 195.2131700000001 ], [ 64.26596, 194.8010700000001 ] ], "type": "bezier" }, { "pts": [ [ 64.26596, 194.8010700000001 ], [ 63.34861999999998, 194.5657200000001 ], [ 60.060569999999984, 194.1371400000001 ], [ 56.95932000000005, 193.84867000000008 ] ], "type": "bezier" }, { "pts": [ [ 56.95932000000005, 193.84867000000008 ], [ 50.447860000000105, 193.24299000000008 ], [ 47.01378, 194.1266700000001 ], [ 44.359620000000064, 197.0909000000001 ] ], "type": "bezier" }, { "pts": [ [ 44.359620000000064, 197.0909000000001 ], [ 41.4848300000001, 200.3015000000001 ], [ 35.986810000000105, 202.7540600000001 ], [ 29.683020000000056, 203.63778000000008 ] ], "type": "bezier" }, { "pts": [ [ 29.683020000000056, 203.63778000000008 ], [ 24.70963000000006, 204.3350000000001 ], [ 23.50201000000004, 204.2721600000001 ], [ 21.53771000000006, 203.21406000000007 ] ], "type": "bezier" }, { "pts": [ [ 21.53771000000006, 203.21406000000007 ], [ 18.864670000000046, 201.77418000000006 ], [ 18.887190000000032, 201.85592000000008 ], [ 20.351150000000075, 198.9071100000001 ] ], "type": "bezier" }, { "pts": [ [ 20.351150000000075, 198.9071100000001 ], [ 21.800170000000094, 195.9881900000001 ], [ 22.95019000000002, 195.3979700000001 ], [ 31.641410000000064, 193.11259000000013 ] ], "type": "bezier" }, { "pts": [ [ 31.641410000000064, 193.11259000000013 ], [ 39.76701000000003, 190.97593000000012 ], [ 42.592820000000074, 189.60647000000012 ], [ 57.530020000000036, 180.56629000000015 ] ], "type": "bezier" }, { "pts": [ [ 57.530020000000036, 180.56629000000015 ], [ 62.282560000000046, 177.52081000000015 ], [ 68.69086000000004, 176.51202000000018 ], [ 72.16303000000005, 171.92633000000018 ] ], "type": "bezier" }, { "pts": [ [ 72.16303000000005, 171.92633000000018 ], [ 73.88147000000004, 169.65686000000017 ], [ 74.74028000000004, 167.31025000000017 ], [ 74.46795000000003, 163.59400000000016 ] ], "type": "bezier" }, { "pts": [ [ 74.46795000000003, 163.59400000000016 ], [ 73.85926, 155.29093000000017 ], [ 73.70266000000004, 154.69263000000018 ], [ 71.38788000000005, 151.82692000000014 ] ], "type": "bezier" }, { "pts": [ [ 71.38788000000005, 151.82692000000014 ], [ 68.45289000000002, 148.19357000000014 ], [ 62.26783000000006, 146.58689000000015 ], [ 51.10823000000005, 146.55892000000017 ] ], "type": "bezier" }, { "pts": [ [ 51.10823000000005, 146.55892000000017 ], [ 40.5771400000001, 146.53252000000015 ], [ 37.394010000000094, 145.86057000000017 ], [ 37.394010000000094, 143.66381000000018 ] ], "type": "bezier" }, { "pts": [ [ 37.394010000000094, 143.66381000000018 ], [ 37.394010000000094, 142.59645000000017 ], [ 38.90592000000004, 141.40348000000017 ], [ 42.825190000000134, 139.37826000000018 ] ], "type": "bezier" }, { "pts": [ [ 42.825190000000134, 139.37826000000018 ], [ 54.6225300000001, 133.2821600000002 ], [ 58.3289400000001, 126.02981000000017 ], [ 55.17746000000011, 115.20819000000017 ] ], "type": "bezier" }, { "pts": [ [ 55.17746000000011, 115.20819000000017 ], [ 54.57023000000015, 113.12329000000017 ], [ 54.073520000000144, 110.9514500000002 ], [ 54.073520000000144, 110.38187000000016 ] ], "type": "bezier" }, { "pts": [ [ 54.073520000000144, 110.38187000000016 ], [ 54.073520000000144, 108.68374000000017 ], [ 55.892430000000104, 109.20173000000017 ], [ 57.893990000000144, 111.46987000000018 ] ], "type": "bezier" }, { "pts": [ [ 57.893990000000144, 111.46987000000018 ], [ 57.893990000000144, 111.46987000000018 ] ], "type": "segment" }, { "pts": [ [ 57.893990000000144, 111.46987000000018 ], [ 57.89347000000009, 111.46987000000018 ] ], "type": "segment" }, { "pts": [ [ 57.89347000000009, 111.46987000000018 ], [ 58.92414000000008, 112.63783000000018 ], [ 60.55187000000012, 114.95878000000016 ], [ 61.5105200000001, 116.6275100000002 ] ], "type": "bezier" }, { "pts": [ [ 61.5105200000001, 116.6275100000002 ], [ 64.7745000000001, 122.30880000000019 ], [ 70.2420100000001, 128.44351000000017 ], [ 73.4854400000001, 130.06369000000018 ] ], "type": "bezier" }, { "pts": [ [ 73.4854400000001, 130.06369000000018 ], [ 75.2440400000001, 130.9421700000002 ], [ 78.35079000000007, 131.88622000000015 ], [ 80.38941000000011, 132.16157000000015 ] ], "type": "bezier" }, { "pts": [ [ 80.38941000000011, 132.16157000000015 ], [ 84.09592000000009, 132.66223000000014 ] ], "type": "segment" }, { "pts": [ [ 84.09592000000009, 132.66223000000014 ], [ 84.1364200000001, 137.41372000000013 ] ], "type": "segment" }, { "pts": [ [ 84.1364200000001, 137.41372000000013 ], [ 84.21112000000011, 146.2786000000001 ], [ 90.35988000000009, 154.1454700000001 ], [ 99.29333000000008, 156.80452000000014 ] ], "type": "bezier" }, { "pts": [ [ 99.29333000000008, 156.80452000000014 ], [ 100.31259000000006, 157.10791000000012 ], [ 104.69317000000007, 159.07890000000015 ], [ 109.0279900000001, 161.18449000000015 ] ], "type": "bezier" }, { "pts": [ [ 109.0279900000001, 161.18449000000015 ], [ 118.4255500000001, 165.74931000000015 ], [ 121.28851000000009, 166.38061000000016 ], [ 124.77552000000009, 164.65697000000017 ] ], "type": "bezier" }, { "pts": [ [ 124.77552000000009, 164.65697000000017 ], [ 127.55525000000006, 163.28287000000017 ], [ 133.12312000000009, 163.00239000000016 ], [ 134.8636600000001, 164.14877000000018 ] ], "type": "bezier" }, { "pts": [ [ 134.8636600000001, 164.14877000000018 ], [ 136.65465000000012, 165.3284100000002 ], [ 134.10376000000008, 178.7100700000002 ], [ 130.09514000000007, 189.16467000000017 ] ], "type": "bezier" }, { "pts": [ [ 130.09514000000007, 189.16467000000017 ], [ 126.22890000000007, 199.24777000000017 ], [ 125.6872600000001, 201.3343500000002 ], [ 124.21053000000006, 211.83402000000018 ] ], "type": "bezier" }, { "pts": [ [ 124.21053000000006, 211.83402000000018 ], [ 122.64195000000007, 222.9875200000002 ], [ 124.19603000000006, 237.53303000000017 ], [ 126.95611000000008, 237.53303000000017 ] ], "type": "bezier" }, { "pts": [ [ 126.95611000000008, 237.53303000000017 ], [ 128.4155100000001, 237.53303000000017 ], [ 132.45164000000005, 233.72112000000016 ], [ 133.24288000000007, 231.5954300000002 ] ], "type": "bezier" }, { "pts": [ [ 133.24288000000007, 231.5954300000002 ], [ 133.68728000000004, 230.4016000000002 ], [ 134.42798000000005, 227.42206000000022 ], [ 134.88878000000005, 224.9742300000002 ] ], "type": "bezier" }, { "pts": [ [ 134.88878000000005, 224.9742300000002 ], [ 135.34968000000003, 222.5264000000002 ], [ 136.03174000000007, 220.72001000000023 ], [ 136.40453000000008, 220.9600300000002 ] ], "type": "bezier" }, { "pts": [ [ 136.40453000000008, 220.9600300000002 ], [ 137.29458000000005, 221.53305000000017 ], [ 140.4287300000001, 227.13196000000016 ], [ 141.86488000000008, 230.71477000000021 ] ], "type": "bezier" }, { "pts": [ [ 141.86488000000008, 230.71477000000021 ], [ 143.0010900000001, 233.54910000000024 ] ], "type": "segment" }, { "pts": [ [ 143.0010900000001, 233.54910000000024 ], [ 139.25598000000008, 236.35189000000025 ] ], "type": "segment" }, { "pts": [ [ 139.25598000000008, 236.35189000000025 ], [ 134.18866000000008, 240.14416000000023 ], [ 133.27371000000005, 242.61184000000026 ], [ 134.21481000000006, 249.94907000000023 ] ], "type": "bezier" }, { "pts": [ [ 134.21481000000006, 249.94907000000023 ], [ 135.07123000000007, 256.62578000000025 ], [ 137.89216000000005, 263.28329000000025 ], [ 140.66348000000005, 265.16809000000023 ] ], "type": "bezier" }, { "pts": [ [ 140.66348000000005, 265.16809000000023 ], [ 143.37855000000008, 267.01463000000024 ], [ 152.57883000000004, 266.66110000000026 ], [ 156.76658000000003, 264.54932000000025 ] ], "type": "bezier" }, { "pts": [ [ 156.76658000000003, 264.54932000000025 ], [ 160.71128000000004, 262.56011000000024 ], [ 160.89508, 262.58406000000025 ], [ 163.46309000000002, 265.42209000000025 ] ], "type": "bezier" }, { "pts": [ [ 163.46309000000002, 265.42209000000025 ], [ 164.66064, 266.74549000000025 ], [ 167.39180000000005, 269.18891000000025 ], [ 169.53233, 270.85193000000027 ] ], "type": "bezier" }, { "pts": [ [ 169.53233, 270.85193000000027 ], [ 172.93974000000003, 273.4992100000003 ], [ 173.42419999999998, 274.21376000000026 ], [ 173.42419999999998, 276.59230000000025 ] ], "type": "bezier" }, { "pts": [ [ 173.42419999999998, 276.59230000000025 ], [ 173.42419999999998, 280.02975000000026 ], [ 171.55298999999997, 283.8686300000003 ], [ 169.88394999999997, 283.85563000000025 ] ], "type": "bezier" }, { "pts": [ [ 169.88394999999997, 283.85563000000025 ], [ 169.18080999999995, 283.85063000000025 ], [ 166.49095999999997, 283.1551500000003 ], [ 163.90644999999995, 282.3112100000003 ] ], "type": "bezier" }, { "pts": [ [ 163.90644999999995, 282.3112100000003 ], [ 158.27226999999993, 280.47145000000023 ], [ 153.59008999999998, 280.3443500000003 ], [ 151.36103999999995, 281.9706600000003 ] ], "type": "bezier" }, { "pts": [ [ 151.36103999999995, 281.9706600000003 ], [ 150.46112999999997, 282.6273200000003 ], [ 148.84429999999992, 284.72963000000027 ], [ 147.76805999999993, 286.6424900000003 ] ], "type": "bezier" }, { "pts": [ [ 147.76805999999993, 286.6424900000003 ], [ 145.97333999999995, 289.83259000000027 ], [ 145.85367999999994, 290.5666600000003 ], [ 146.32163999999995, 295.51318000000026 ] ], "type": "bezier" }, { "pts": [ [ 146.32163999999995, 295.51318000000026 ], [ 147.53008999999997, 308.28824000000026 ], [ 151.08808999999997, 315.52287000000024 ], [ 161.29131999999993, 325.95100000000025 ] ], "type": "bezier" }, { "pts": [ [ 161.29131999999993, 325.95100000000025 ], [ 169.86567999999994, 334.7143900000002 ], [ 171.13826999999992, 337.3039900000002 ], [ 171.75421999999992, 347.2417400000003 ] ], "type": "bezier" }, { "pts": [ [ 171.75421999999992, 347.2417400000003 ], [ 172.03640999999993, 351.7940000000002 ], [ 171.83721999999995, 353.6521100000002 ], [ 170.85234999999994, 355.63512000000026 ] ], "type": "bezier" }, { "pts": [ [ 170.85234999999994, 355.63512000000026 ], [ 168.46761999999995, 360.43900000000025 ], [ 164.53111999999993, 365.51269000000025 ], [ 161.64532999999994, 367.50201000000027 ] ], "type": "bezier" }, { "pts": [ [ 161.64532999999994, 367.50201000000027 ], [ 159.84924999999993, 368.74017000000026 ], [ 157.51786999999996, 371.6257800000003 ], [ 155.47811999999993, 375.1352900000003 ] ], "type": "bezier" }, { "pts": [ [ 155.47811999999993, 375.1352900000003 ], [ 152.32496999999995, 380.5607600000003 ], [ 152.08004999999991, 380.7869500000003 ], [ 149.1148599999999, 381.01440000000025 ] ], "type": "bezier" }, { "pts": [ [ 149.1148599999999, 381.01440000000025 ], [ 147.28775999999993, 381.1545600000003 ], [ 142.02096999999992, 380.15576000000027 ], [ 136.1910499999999, 378.56352000000027 ] ], "type": "bezier" }, { "pts": [ [ 136.1910499999999, 378.56352000000027 ], [ 130.77906999999993, 377.0854200000002 ], [ 122.74417999999991, 375.14309000000026 ], [ 118.33587999999992, 374.2472100000003 ] ], "type": "bezier" }, { "pts": [ [ 118.33587999999992, 374.2472100000003 ], [ 102.00642999999991, 370.9287000000003 ], [ 89.99103999999994, 364.4950900000003 ], [ 86.2571299999999, 357.07077000000027 ] ], "type": "bezier" }, { "pts": [ [ 86.2571299999999, 357.07077000000027 ], [ 84.78475999999989, 354.14309000000026 ], [ 84.85324999999989, 340.21874000000025 ], [ 86.36619999999988, 334.9119000000003 ] ], "type": "bezier" }, { "pts": [ [ 86.36619999999988, 334.9119000000003 ], [ 87.95646999999985, 329.3334200000003 ], [ 89.3054499999999, 327.2346500000003 ], [ 96.56715999999989, 319.04094000000026 ] ], "type": "bezier" }, { "pts": [ [ 96.56715999999989, 319.04094000000026 ], [ 104.8152199999999, 309.7341200000003 ], [ 105.95880999999991, 307.70974000000024 ], [ 105.9623299999999, 302.4096900000003 ] ], "type": "bezier" }, { "pts": [ [ 105.9623299999999, 302.4096900000003 ], [ 105.9623299999999, 296.6811700000003 ], [ 103.13662999999991, 290.98203000000024 ], [ 97.85375999999991, 286.0771800000003 ] ], "type": "bezier" }, { "pts": [ [ 97.85375999999991, 286.0771800000003 ], [ 89.99092999999993, 278.7770400000003 ], [ 83.55398999999989, 277.2923300000003 ], [ 77.98529999999994, 281.49441000000024 ] ], "type": "bezier" }, { "pts": [ [ 77.98529999999994, 281.49441000000024 ], [ 76.33659999999992, 282.7385400000003 ], [ 74.45178999999996, 284.5533300000003 ], [ 73.79691999999994, 285.52728000000025 ] ], "type": "bezier" }, { "pts": [ [ 73.79691999999994, 285.52728000000025 ], [ 72.09974999999991, 288.05128000000025 ], [ 72.24753999999996, 295.22113000000024 ], [ 74.08885999999995, 299.6958800000003 ] ], "type": "bezier" }, { "pts": [ [ 74.08885999999995, 299.6958800000003 ], [ 76.31054999999998, 305.09509000000025 ], [ 76.00210999999996, 307.4491300000003 ], [ 72.16388999999998, 314.38656000000026 ] ], "type": "bezier" }, { "pts": [ [ 72.16388999999998, 314.38656000000026 ], [ 67.57085999999998, 322.6881500000003 ], [ 66.19002999999998, 326.12531000000024 ], [ 65.13351999999998, 331.88638000000026 ] ], "type": "bezier" }, { "pts": [ [ 65.13351999999998, 331.88638000000026 ], [ 63.99086, 338.11729000000025 ], [ 63.29976999999997, 339.47038000000026 ], [ 61.27517, 339.44010000000026 ] ], "type": "bezier" }, { "pts": [ [ 61.27517, 339.44010000000026 ], [ 59.11752000000001, 339.4078000000003 ], [ 57.542820000000006, 338.09092000000027 ], [ 56.04066, 335.06281000000024 ] ], "type": "bezier" }, { "pts": [ [ 56.04066, 335.06281000000024 ], [ 54.468650000000025, 331.89359000000024 ], [ 54.50590999999997, 330.37729000000024 ], [ 56.34018000000003, 322.86215000000027 ] ], "type": "bezier" }, { "pts": [ [ 56.34018000000003, 322.86215000000027 ], [ 57.16951000000006, 319.4644400000003 ], [ 58.309979999999996, 312.3408000000003 ], [ 58.87456000000009, 307.03187000000025 ] ], "type": "bezier" }, { "pts": [ [ 58.87456000000009, 307.03187000000025 ], [ 59.657710000000066, 299.6664900000003 ], [ 60.43432000000007, 296.0768600000003 ], [ 62.15193000000011, 291.8829000000003 ] ], "type": "bezier" }, { "pts": [ [ 62.15193000000011, 291.8829000000003 ], [ 64.69690000000008, 285.6687700000003 ], [ 65.60417000000012, 281.0434500000003 ], [ 64.87323000000009, 278.0097400000003 ] ], "type": "bezier" }, { "pts": [ [ 64.87323000000009, 278.0097400000003 ], [ 64.08001000000007, 274.7176800000003 ], [ 48.68222000000014, 258.86519000000027 ], [ 40.729940000000056, 253.15356000000025 ] ], "type": "bezier" }, { "pts": [ [ 40.729940000000056, 253.15356000000025 ], [ 32.8596500000001, 247.50078000000025 ], [ 28.86898000000008, 243.89221000000026 ], [ 28.86898000000008, 242.42809000000028 ] ], "type": "bezier" }, { "pts": [ [ 28.86898000000008, 242.42809000000028 ], [ 28.86898000000008, 240.30229000000026 ], [ 32.00562000000002, 239.6270700000003 ], [ 39.75635000000011, 240.0842900000003 ] ], "type": "bezier" }, { "pts": [ [ 39.75635000000011, 240.0842900000003 ], [ 46.269260000000145, 240.46849000000032 ], [ 48.34181000000012, 240.94638000000032 ], [ 53.881860000000074, 243.3414700000003 ] ], "type": "bezier" }, { "pts": [ [ 53.881860000000074, 243.3414700000003 ], [ 68.01474000000007, 249.45133000000033 ], [ 67.32718000000006, 249.30814000000032 ], [ 80.01934000000006, 248.78520000000032 ] ], "type": "bezier" }, { "pts": [ [ 80.01934000000006, 248.78520000000032 ], [ 93.24703000000005, 248.24017000000032 ], [ 97.10122000000007, 247.49584000000033 ], [ 100.93020000000007, 244.7468000000003 ] ], "type": "bezier" }, { "pts": [ [ 100.93020000000007, 244.7468000000003 ], [ 105.33216000000004, 241.58642000000032 ], [ 106.27296000000007, 237.9459800000003 ], [ 105.64673000000005, 226.49525000000028 ] ], "type": "bezier" }, { "pts": [ [ 105.64673000000005, 226.49525000000028 ], [ 105.07260000000002, 215.9979000000003 ], [ 106.11106000000007, 206.6446700000003 ], [ 107.85067000000004, 206.6446700000003 ] ], "type": "bezier" }, { "pts": [ [ 107.85067000000004, 206.6446700000003 ], [ 108.39636000000002, 206.6446700000003 ], [ 109.04895000000005, 206.9921600000003 ], [ 109.30094000000003, 207.4168700000003 ] ], "type": "bezier" }, { "pts": [ [ 109.30094000000003, 207.4168700000003 ], [ 110.22948000000002, 208.9819100000003 ], [ 113.97481000000005, 208.23823000000033 ], [ 115.52886000000001, 206.1802200000003 ] ], "type": "bezier" }, { "pts": [ [ 115.52886000000001, 206.1802200000003 ], [ 117.67593, 203.3368100000003 ], [ 118.15084000000002, 199.60083000000031 ], [ 117.15981, 193.34952000000033 ] ], "type": "bezier" }, { "pts": [ [ 117.15981, 193.34952000000033 ], [ 115.31194999999997, 181.69437000000033 ], [ 110.63475999999997, 171.9475100000003 ], [ 105.53546999999998, 169.1257800000003 ] ], "type": "bezier" }, { "pts": [ [ 105.53546999999998, 169.1257800000003 ], [ 102.86502999999999, 167.64810000000028 ] ], "type": "segment" }, { "pts": [ [ 102.86502999999999, 167.64810000000028 ], [ 100.76799, 170.2474500000003 ] ], "type": "segment" }, { "pts": [ [ 100.76799, 170.2474500000003 ], [ 98.67093999999997, 172.84682000000032 ] ], "type": "segment" }, { "pts": [ [ 98.67093999999997, 172.84682000000032 ], [ 93.02337999999997, 170.05440000000033 ] ], "type": "segment" }, { "pts": [ [ 93.02337999999997, 170.05440000000033 ], [ 86.31877999999995, 166.7393200000003 ], [ 80.87939999999998, 166.29354000000035 ], [ 77.79536999999999, 168.8064100000003 ] ], "type": "bezier" }, { "pts": [ [ 77.79536999999999, 168.8064100000003 ], [ 76.77611999999999, 169.6369500000003 ], [ 74.54219, 172.72103000000033 ], [ 72.80133000000001, 174.5662500000003 ] ], "type": "bezier" }, { "pts": [ [ 72.80133000000001, 174.5662500000003 ], [ 71.87112000000002, 175.5522500000003 ], [ 70.56977999999998, 176.8163200000003 ], [ 69.89364, 177.39921000000032 ] ], "type": "bezier" }, { "pts": [ [ 69.89364, 177.39921000000032 ], [ 69.89360999999997, 177.3992100000001 ] ], "type": "segment" } ] }, { "edgecolor": "#a6a6a6", "linewidth": 1.7, "id": "path4189", "items": [ { "pts": [ [ 306.89248999999995, 177.3992100000001 ], [ 309.24826999999993, 179.42993000000007 ], [ 310.81902999999994, 184.42228000000011 ], [ 310.83593999999994, 189.93283000000008 ] ], "type": "bezier" }, { "pts": [ [ 310.83593999999994, 189.93283000000008 ], [ 310.85153999999994, 195.0336000000001 ], [ 310.91373999999996, 195.2131700000001 ], [ 312.5201399999999, 194.8010700000001 ] ], "type": "bezier" }, { "pts": [ [ 312.5201399999999, 194.8010700000001 ], [ 313.43747999999994, 194.5657200000001 ], [ 316.72552999999994, 194.1371400000001 ], [ 319.82677999999987, 193.84867000000008 ] ], "type": "bezier" }, { "pts": [ [ 319.82677999999987, 193.84867000000008 ], [ 326.3382399999999, 193.24299000000008 ], [ 329.7723199999999, 194.1266700000001 ], [ 332.4264799999999, 197.0909000000001 ] ], "type": "bezier" }, { "pts": [ [ 332.4264799999999, 197.0909000000001 ], [ 335.30126999999993, 200.3015000000001 ], [ 340.7992899999999, 202.7540600000001 ], [ 347.1030799999999, 203.63778000000008 ] ], "type": "bezier" }, { "pts": [ [ 347.1030799999999, 203.63778000000008 ], [ 352.0764699999999, 204.3350000000001 ], [ 353.2840899999999, 204.2721600000001 ], [ 355.2483899999999, 203.21406000000007 ] ], "type": "bezier" }, { "pts": [ [ 355.2483899999999, 203.21406000000007 ], [ 357.9214299999999, 201.77418000000006 ], [ 357.8989099999999, 201.85592000000008 ], [ 356.4349499999999, 198.9071100000001 ] ], "type": "bezier" }, { "pts": [ [ 356.4349499999999, 198.9071100000001 ], [ 354.98592999999994, 195.9881900000001 ], [ 353.8359099999999, 195.3979700000001 ], [ 345.1446899999999, 193.11259000000013 ] ], "type": "bezier" }, { "pts": [ [ 345.1446899999999, 193.11259000000013 ], [ 337.0190899999999, 190.97593000000012 ], [ 334.1932799999999, 189.60647000000012 ], [ 319.2560799999999, 180.56629000000015 ] ], "type": "bezier" }, { "pts": [ [ 319.2560799999999, 180.56629000000015 ], [ 314.50353999999993, 177.52081000000015 ], [ 308.09523999999993, 176.51202000000018 ], [ 304.6230699999999, 171.92633000000018 ] ], "type": "bezier" }, { "pts": [ [ 304.6230699999999, 171.92633000000018 ], [ 302.90462999999994, 169.65686000000017 ], [ 302.04581999999994, 167.31025000000017 ], [ 302.31814999999995, 163.59400000000016 ] ], "type": "bezier" }, { "pts": [ [ 302.31814999999995, 163.59400000000016 ], [ 302.92683999999997, 155.29093000000017 ], [ 303.08343999999994, 154.69263000000018 ], [ 305.3982199999999, 151.82692000000014 ] ], "type": "bezier" }, { "pts": [ [ 305.3982199999999, 151.82692000000014 ], [ 308.33320999999995, 148.19357000000014 ], [ 314.5182699999999, 146.58689000000015 ], [ 325.6778699999999, 146.55892000000017 ] ], "type": "bezier" }, { "pts": [ [ 325.6778699999999, 146.55892000000017 ], [ 336.20895999999993, 146.53252000000015 ], [ 339.39208999999994, 145.86057000000017 ], [ 339.39208999999994, 143.66381000000018 ] ], "type": "bezier" }, { "pts": [ [ 339.39208999999994, 143.66381000000018 ], [ 339.39208999999994, 142.59645000000017 ], [ 337.88017999999994, 141.40348000000017 ], [ 333.9609099999999, 139.37826000000018 ] ], "type": "bezier" }, { "pts": [ [ 333.9609099999999, 139.37826000000018 ], [ 322.16356999999994, 133.2821600000002 ], [ 318.45715999999993, 126.02981000000017 ], [ 321.6086399999999, 115.20819000000017 ] ], "type": "bezier" }, { "pts": [ [ 321.6086399999999, 115.20819000000017 ], [ 322.2158699999999, 113.12329000000017 ], [ 322.7125799999999, 110.9514500000002 ], [ 322.7125799999999, 110.38187000000016 ] ], "type": "bezier" }, { "pts": [ [ 322.7125799999999, 110.38187000000016 ], [ 322.7125799999999, 108.68374000000017 ], [ 320.89366999999993, 109.20173000000017 ], [ 318.8921099999999, 111.46987000000018 ] ], "type": "bezier" }, { "pts": [ [ 318.8921099999999, 111.46987000000018 ], [ 318.8921099999999, 111.46987000000018 ] ], "type": "segment" }, { "pts": [ [ 318.8921099999999, 111.46987000000018 ], [ 318.89262999999994, 111.46987000000018 ] ], "type": "segment" }, { "pts": [ [ 318.89262999999994, 111.46987000000018 ], [ 317.86195999999995, 112.63783000000018 ], [ 316.2342299999999, 114.95878000000016 ], [ 315.27557999999993, 116.6275100000002 ] ], "type": "bezier" }, { "pts": [ [ 315.27557999999993, 116.6275100000002 ], [ 312.01159999999993, 122.30880000000019 ], [ 306.5440899999999, 128.44351000000017 ], [ 303.30065999999994, 130.06369000000018 ] ], "type": "bezier" }, { "pts": [ [ 303.30065999999994, 130.06369000000018 ], [ 301.54205999999994, 130.9421700000002 ], [ 298.43530999999996, 131.88622000000015 ], [ 296.3966899999999, 132.16157000000015 ] ], "type": "bezier" }, { "pts": [ [ 296.3966899999999, 132.16157000000015 ], [ 292.69017999999994, 132.66223000000014 ] ], "type": "segment" }, { "pts": [ [ 292.69017999999994, 132.66223000000014 ], [ 292.64967999999993, 137.41372000000013 ] ], "type": "segment" }, { "pts": [ [ 292.64967999999993, 137.41372000000013 ], [ 292.5749799999999, 146.2786000000001 ], [ 286.42621999999994, 154.1454700000001 ], [ 277.49276999999995, 156.80452000000014 ] ], "type": "bezier" }, { "pts": [ [ 277.49276999999995, 156.80452000000014 ], [ 276.47351, 157.10791000000012 ], [ 272.09292999999997, 159.07890000000015 ], [ 267.75810999999993, 161.18449000000015 ] ], "type": "bezier" }, { "pts": [ [ 267.75810999999993, 161.18449000000015 ], [ 258.36054999999993, 165.74931000000015 ], [ 255.49758999999995, 166.38061000000016 ], [ 252.01057999999995, 164.65697000000017 ] ], "type": "bezier" }, { "pts": [ [ 252.01057999999995, 164.65697000000017 ], [ 249.23084999999998, 163.28287000000017 ], [ 243.66297999999995, 163.00239000000016 ], [ 241.92243999999994, 164.14877000000018 ] ], "type": "bezier" }, { "pts": [ [ 241.92243999999994, 164.14877000000018 ], [ 240.13144999999992, 165.3284100000002 ], [ 242.68233999999995, 178.7100700000002 ], [ 246.69095999999996, 189.16467000000017 ] ], "type": "bezier" }, { "pts": [ [ 246.69095999999996, 189.16467000000017 ], [ 250.55719999999997, 199.24777000000017 ], [ 251.09883999999994, 201.3343500000002 ], [ 252.57556999999997, 211.83402000000018 ] ], "type": "bezier" }, { "pts": [ [ 252.57556999999997, 211.83402000000018 ], [ 254.14414999999997, 222.9875200000002 ], [ 252.59006999999997, 237.53303000000017 ], [ 249.82998999999995, 237.53303000000017 ] ], "type": "bezier" }, { "pts": [ [ 249.82998999999995, 237.53303000000017 ], [ 248.37058999999994, 237.53303000000017 ], [ 244.33445999999998, 233.72112000000016 ], [ 243.54321999999996, 231.5954300000002 ] ], "type": "bezier" }, { "pts": [ [ 243.54321999999996, 231.5954300000002 ], [ 243.09882, 230.4016000000002 ], [ 242.35811999999999, 227.42206000000022 ], [ 241.89731999999998, 224.9742300000002 ] ], "type": "bezier" }, { "pts": [ [ 241.89731999999998, 224.9742300000002 ], [ 241.43642, 222.5264000000002 ], [ 240.75435999999996, 220.72001000000023 ], [ 240.38156999999995, 220.9600300000002 ] ], "type": "bezier" }, { "pts": [ [ 240.38156999999995, 220.9600300000002 ], [ 239.49151999999998, 221.53305000000017 ], [ 236.35736999999995, 227.13196000000016 ], [ 234.92121999999995, 230.71477000000021 ] ], "type": "bezier" }, { "pts": [ [ 234.92121999999995, 230.71477000000021 ], [ 233.78500999999994, 233.54910000000024 ] ], "type": "segment" }, { "pts": [ [ 233.78500999999994, 233.54910000000024 ], [ 237.53011999999995, 236.35189000000025 ] ], "type": "segment" }, { "pts": [ [ 237.53011999999995, 236.35189000000025 ], [ 242.59743999999995, 240.14416000000023 ], [ 243.51238999999998, 242.61184000000026 ], [ 242.57128999999998, 249.94907000000023 ] ], "type": "bezier" }, { "pts": [ [ 242.57128999999998, 249.94907000000023 ], [ 241.71486999999996, 256.62578000000025 ], [ 238.89394, 263.28329000000025 ], [ 236.12261999999998, 265.16809000000023 ] ], "type": "bezier" }, { "pts": [ [ 236.12261999999998, 265.16809000000023 ], [ 233.40754999999996, 267.01463000000024 ], [ 224.20727, 266.66110000000026 ], [ 220.01952, 264.54932000000025 ] ], "type": "bezier" }, { "pts": [ [ 220.01952, 264.54932000000025 ], [ 216.07482, 262.56011000000024 ], [ 215.89102000000003, 262.58406000000025 ], [ 213.32301, 265.42209000000025 ] ], "type": "bezier" }, { "pts": [ [ 213.32301, 265.42209000000025 ], [ 212.12546000000003, 266.74549000000025 ], [ 209.3943, 269.18891000000025 ], [ 207.25377000000003, 270.85193000000027 ] ], "type": "bezier" }, { "pts": [ [ 207.25377000000003, 270.85193000000027 ], [ 203.84636, 273.4992100000003 ], [ 203.36190000000005, 274.21376000000026 ], [ 203.36190000000005, 276.59230000000025 ] ], "type": "bezier" }, { "pts": [ [ 203.36190000000005, 276.59230000000025 ], [ 203.36190000000005, 280.02975000000026 ], [ 205.23311000000007, 283.8686300000003 ], [ 206.90215000000006, 283.85563000000025 ] ], "type": "bezier" }, { "pts": [ [ 206.90215000000006, 283.85563000000025 ], [ 207.60529000000008, 283.85063000000025 ], [ 210.29514000000006, 283.1551500000003 ], [ 212.87965000000008, 282.3112100000003 ] ], "type": "bezier" }, { "pts": [ [ 212.87965000000008, 282.3112100000003 ], [ 218.5138300000001, 280.47145000000023 ], [ 223.19601000000006, 280.3443500000003 ], [ 225.4250600000001, 281.9706600000003 ] ], "type": "bezier" }, { "pts": [ [ 225.4250600000001, 281.9706600000003 ], [ 226.32497000000006, 282.6273200000003 ], [ 227.94180000000011, 284.72963000000027 ], [ 229.0180400000001, 286.6424900000003 ] ], "type": "bezier" }, { "pts": [ [ 229.0180400000001, 286.6424900000003 ], [ 230.81276000000008, 289.83259000000027 ], [ 230.9324200000001, 290.5666600000003 ], [ 230.4644600000001, 295.51318000000026 ] ], "type": "bezier" }, { "pts": [ [ 230.4644600000001, 295.51318000000026 ], [ 229.25601000000006, 308.28824000000026 ], [ 225.69801000000007, 315.52287000000024 ], [ 215.4947800000001, 325.95100000000025 ] ], "type": "bezier" }, { "pts": [ [ 215.4947800000001, 325.95100000000025 ], [ 206.9204200000001, 334.7143900000002 ], [ 205.6478300000001, 337.3039900000002 ], [ 205.03188000000011, 347.2417400000003 ] ], "type": "bezier" }, { "pts": [ [ 205.03188000000011, 347.2417400000003 ], [ 204.7496900000001, 351.7940000000002 ], [ 204.9488800000001, 353.6521100000002 ], [ 205.9337500000001, 355.63512000000026 ] ], "type": "bezier" }, { "pts": [ [ 205.9337500000001, 355.63512000000026 ], [ 208.31848000000008, 360.43900000000025 ], [ 212.2549800000001, 365.51269000000025 ], [ 215.1407700000001, 367.50201000000027 ] ], "type": "bezier" }, { "pts": [ [ 215.1407700000001, 367.50201000000027 ], [ 216.9368500000001, 368.74017000000026 ], [ 219.26823000000007, 371.6257800000003 ], [ 221.3079800000001, 375.1352900000003 ] ], "type": "bezier" }, { "pts": [ [ 221.3079800000001, 375.1352900000003 ], [ 224.46113000000008, 380.5607600000003 ], [ 224.70605000000012, 380.7869500000003 ], [ 227.67124000000013, 381.01440000000025 ] ], "type": "bezier" }, { "pts": [ [ 227.67124000000013, 381.01440000000025 ], [ 229.4983400000001, 381.1545600000003 ], [ 234.7651300000001, 380.15576000000027 ], [ 240.59505000000013, 378.56352000000027 ] ], "type": "bezier" }, { "pts": [ [ 240.59505000000013, 378.56352000000027 ], [ 246.0070300000001, 377.0854200000002 ], [ 254.04192000000012, 375.14309000000026 ], [ 258.4502200000001, 374.2472100000003 ] ], "type": "bezier" }, { "pts": [ [ 258.4502200000001, 374.2472100000003 ], [ 274.7796700000001, 370.9287000000003 ], [ 286.7950600000001, 364.4950900000003 ], [ 290.52897000000013, 357.07077000000027 ] ], "type": "bezier" }, { "pts": [ [ 290.52897000000013, 357.07077000000027 ], [ 292.00134000000014, 354.14309000000026 ], [ 291.93285000000014, 340.21874000000025 ], [ 290.41990000000015, 334.9119000000003 ] ], "type": "bezier" }, { "pts": [ [ 290.41990000000015, 334.9119000000003 ], [ 288.8296300000002, 329.3334200000003 ], [ 287.48065000000014, 327.2346500000003 ], [ 280.21894000000015, 319.04094000000026 ] ], "type": "bezier" }, { "pts": [ [ 280.21894000000015, 319.04094000000026 ], [ 271.97088000000014, 309.7341200000003 ], [ 270.8272900000001, 307.70974000000024 ], [ 270.82377000000014, 302.4096900000003 ] ], "type": "bezier" }, { "pts": [ [ 270.82377000000014, 302.4096900000003 ], [ 270.82377000000014, 296.6811700000003 ], [ 273.6494700000001, 290.98203000000024 ], [ 278.9323400000001, 286.0771800000003 ] ], "type": "bezier" }, { "pts": [ [ 278.9323400000001, 286.0771800000003 ], [ 286.7951700000001, 278.7770400000003 ], [ 293.23211000000015, 277.2923300000003 ], [ 298.8008000000001, 281.49441000000024 ] ], "type": "bezier" }, { "pts": [ [ 298.8008000000001, 281.49441000000024 ], [ 300.4495000000001, 282.7385400000003 ], [ 302.3343100000001, 284.5533300000003 ], [ 302.9891800000001, 285.52728000000025 ] ], "type": "bezier" }, { "pts": [ [ 302.9891800000001, 285.52728000000025 ], [ 304.6863500000001, 288.05128000000025 ], [ 304.5385600000001, 295.22113000000024 ], [ 302.6972400000001, 299.6958800000003 ] ], "type": "bezier" }, { "pts": [ [ 302.6972400000001, 299.6958800000003 ], [ 300.47555000000006, 305.09509000000025 ], [ 300.7839900000001, 307.4491300000003 ], [ 304.62221000000005, 314.38656000000026 ] ], "type": "bezier" }, { "pts": [ [ 304.62221000000005, 314.38656000000026 ], [ 309.21524000000005, 322.6881500000003 ], [ 310.59607000000005, 326.12531000000024 ], [ 311.65258000000006, 331.88638000000026 ] ], "type": "bezier" }, { "pts": [ [ 311.65258000000006, 331.88638000000026 ], [ 312.79524000000004, 338.11729000000025 ], [ 313.48633000000007, 339.47038000000026 ], [ 315.51093000000003, 339.44010000000026 ] ], "type": "bezier" }, { "pts": [ [ 315.51093000000003, 339.44010000000026 ], [ 317.66858, 339.4078000000003 ], [ 319.24328, 338.09092000000027 ], [ 320.74544000000003, 335.06281000000024 ] ], "type": "bezier" }, { "pts": [ [ 320.74544000000003, 335.06281000000024 ], [ 322.31745, 331.89359000000024 ], [ 322.28019000000006, 330.37729000000024 ], [ 320.44592, 322.86215000000027 ] ], "type": "bezier" }, { "pts": [ [ 320.44592, 322.86215000000027 ], [ 319.61659000000003, 319.4644400000003 ], [ 318.47612000000004, 312.3408000000003 ], [ 317.91154000000006, 307.03187000000025 ] ], "type": "bezier" }, { "pts": [ [ 317.91154000000006, 307.03187000000025 ], [ 317.12839, 299.6664900000003 ], [ 316.35178, 296.0768600000003 ], [ 314.63417000000004, 291.8829000000003 ] ], "type": "bezier" }, { "pts": [ [ 314.63417000000004, 291.8829000000003 ], [ 312.08920000000006, 285.6687700000003 ], [ 311.18193, 281.0434500000003 ], [ 311.91287000000005, 278.0097400000003 ] ], "type": "bezier" }, { "pts": [ [ 311.91287000000005, 278.0097400000003 ], [ 312.7060900000001, 274.7176800000003 ], [ 328.10388000000006, 258.86519000000027 ], [ 336.0561600000001, 253.15356000000025 ] ], "type": "bezier" }, { "pts": [ [ 336.0561600000001, 253.15356000000025 ], [ 343.92645000000005, 247.50078000000025 ], [ 347.91712000000007, 243.89221000000026 ], [ 347.91712000000007, 242.42809000000028 ] ], "type": "bezier" }, { "pts": [ [ 347.91712000000007, 242.42809000000028 ], [ 347.91712000000007, 240.30229000000026 ], [ 344.78048000000007, 239.6270700000003 ], [ 337.02975000000004, 240.0842900000003 ] ], "type": "bezier" }, { "pts": [ [ 337.02975000000004, 240.0842900000003 ], [ 330.51684000000006, 240.46849000000032 ], [ 328.44429, 240.94638000000032 ], [ 322.9042400000001, 243.3414700000003 ] ], "type": "bezier" }, { "pts": [ [ 322.9042400000001, 243.3414700000003 ], [ 308.7713600000001, 249.45133000000033 ], [ 309.4589200000001, 249.30814000000032 ], [ 296.7667600000001, 248.78520000000032 ] ], "type": "bezier" }, { "pts": [ [ 296.7667600000001, 248.78520000000032 ], [ 283.5390700000001, 248.24017000000032 ], [ 279.6848800000001, 247.49584000000033 ], [ 275.8559000000001, 244.7468000000003 ] ], "type": "bezier" }, { "pts": [ [ 275.8559000000001, 244.7468000000003 ], [ 271.4539400000001, 241.58642000000032 ], [ 270.5131400000001, 237.9459800000003 ], [ 271.1393700000001, 226.49525000000028 ] ], "type": "bezier" }, { "pts": [ [ 271.1393700000001, 226.49525000000028 ], [ 271.7135000000001, 215.9979000000003 ], [ 270.6750400000001, 206.6446700000003 ], [ 268.9354300000001, 206.6446700000003 ] ], "type": "bezier" }, { "pts": [ [ 268.9354300000001, 206.6446700000003 ], [ 268.38974000000013, 206.6446700000003 ], [ 267.7371500000001, 206.9921600000003 ], [ 267.4851600000001, 207.4168700000003 ] ], "type": "bezier" }, { "pts": [ [ 267.4851600000001, 207.4168700000003 ], [ 266.5566200000001, 208.9819100000003 ], [ 262.8112900000001, 208.23823000000033 ], [ 261.25724000000014, 206.1802200000003 ] ], "type": "bezier" }, { "pts": [ [ 261.25724000000014, 206.1802200000003 ], [ 259.11017000000015, 203.3368100000003 ], [ 258.63526000000013, 199.60083000000031 ], [ 259.62629000000015, 193.34952000000033 ] ], "type": "bezier" }, { "pts": [ [ 259.62629000000015, 193.34952000000033 ], [ 261.4741500000002, 181.69437000000033 ], [ 266.1513400000002, 171.9475100000003 ], [ 271.25063000000017, 169.1257800000003 ] ], "type": "bezier" }, { "pts": [ [ 271.25063000000017, 169.1257800000003 ], [ 273.92107000000016, 167.64810000000028 ] ], "type": "segment" }, { "pts": [ [ 273.92107000000016, 167.64810000000028 ], [ 276.01811000000015, 170.2474500000003 ] ], "type": "segment" }, { "pts": [ [ 276.01811000000015, 170.2474500000003 ], [ 278.1151600000002, 172.84682000000032 ] ], "type": "segment" }, { "pts": [ [ 278.1151600000002, 172.84682000000032 ], [ 283.7627200000002, 170.05440000000033 ] ], "type": "segment" }, { "pts": [ [ 283.7627200000002, 170.05440000000033 ], [ 290.4673200000002, 166.7393200000003 ], [ 295.90670000000017, 166.29354000000035 ], [ 298.99073000000016, 168.8064100000003 ] ], "type": "bezier" }, { "pts": [ [ 298.99073000000016, 168.8064100000003 ], [ 300.00998000000016, 169.6369500000003 ], [ 302.24391000000014, 172.72103000000033 ], [ 303.98477000000014, 174.5662500000003 ] ], "type": "bezier" }, { "pts": [ [ 303.98477000000014, 174.5662500000003 ], [ 304.9149800000001, 175.5522500000003 ], [ 306.21632000000017, 176.8163200000003 ], [ 306.89246000000014, 177.39921000000032 ] ], "type": "bezier" }, { "pts": [ [ 306.89246000000014, 177.39921000000032 ], [ 306.89248999999995, 177.3992100000001 ] ], "type": "segment" } ] }, { "edgecolor": "#a6a6a6", "linewidth": 1.7, "id": "path4114", "items": [ { "pts": [ [ 38.13621999999998, 96.75529000000012 ], [ 81.11926999999997, 82.55840000000012 ], [ 111.77483999999998, 86.9206200000001 ], [ 135.11028, 110.67918000000009 ] ], "type": "bezier" }, { "pts": [ [ 135.11028, 110.67918000000009 ], [ 149.27927999999997, 125.10511000000008 ], [ 133.70141999999998, 133.75634000000008 ], [ 142.18674, 136.8888300000001 ] ], "type": "bezier" }, { "pts": [ [ 142.18674, 136.8888300000001 ], [ 148.97364, 139.3943400000001 ], [ 160.00900000000001, 129.5173600000001 ], [ 154.76711999999998, 118.5966800000001 ] ], "type": "bezier" }, { "pts": [ [ 154.76711999999998, 118.5966800000001 ], [ 149.52535, 107.67599000000013 ], [ 149.00113999999996, 103.3077100000001 ], [ 152.67039, 94.0251300000001 ] ], "type": "bezier" }, { "pts": [ [ 152.67039, 94.0251300000001 ], [ 160.93683, 77.0438200000001 ], [ 173.2289, 71.59232000000009 ], [ 186.7423, 71.09169000000009 ] ], "type": "bezier" }, { "pts": [ [ 186.7423, 71.09169000000009 ], [ 211.15397000000002, 70.18731000000008 ], [ 215.44510000000002, 82.70901000000009 ], [ 223.17320999999998, 95.39022000000011 ] ], "type": "bezier" }, { "pts": [ [ 223.17320999999998, 95.39022000000011 ], [ 232.17494, 110.16125000000011 ], [ 211.57844999999998, 130.44844000000012 ], [ 232.34638999999999, 137.16185000000013 ] ], "type": "bezier" }, { "pts": [ [ 232.34638999999999, 137.16185000000013 ], [ 235.78659999999996, 138.27394000000015 ], [ 240.58429, 131.87216000000012 ], [ 239.1608, 128.4253000000001 ] ], "type": "bezier" }, { "pts": [ [ 239.1608, 128.4253000000001 ], [ 232.98705, 113.4758900000001 ], [ 252.31468999999998, 102.42569000000009 ], [ 260.91447, 98.9394400000001 ] ], "type": "bezier" }, { "pts": [ [ 260.91447, 98.9394400000001 ], [ 285.35967, 89.02965000000012 ], [ 303.68041, 79.85491000000013 ], [ 334.30028, 97.02832000000012 ] ], "type": "bezier" } ] }, { "edgecolor": "#a6a6a6", "linewidth": 1.7, "id": "path4097", "items": [ { "pts": [ [ 155.98293999999999, 242.03422000000012 ], [ 153.38214, 242.48581000000013 ], [ 150.8519, 243.77131000000014 ], [ 149.94099999999997, 245.10397000000012 ] ], "type": "bezier" }, { "pts": [ [ 149.94099999999997, 245.10397000000012 ], [ 149.07898, 246.3650300000001 ], [ 147.73208, 251.51161000000013 ], [ 147.73290999999995, 253.54117000000014 ] ], "type": "bezier" }, { "pts": [ [ 147.73290999999995, 253.54117000000014 ], [ 147.73362999999995, 255.25436000000013 ], [ 148.80830999999995, 257.58480000000014 ], [ 149.67541999999997, 257.7531500000001 ] ], "type": "bezier" }, { "pts": [ [ 149.67541999999997, 257.7531500000001 ], [ 150.03055999999998, 257.82215000000014 ], [ 151.69845999999995, 257.0166900000001 ], [ 153.38192999999995, 255.96332000000012 ] ], "type": "bezier" }, { "pts": [ [ 153.38192999999995, 255.96332000000012 ], [ 155.06528999999995, 254.90993000000014 ], [ 157.36003999999997, 253.64823000000013 ], [ 158.48131999999998, 253.1595200000001 ] ], "type": "bezier" }, { "pts": [ [ 158.48131999999998, 253.1595200000001 ], [ 160.89738999999997, 252.10642000000013 ], [ 161.81545, 251.5434400000001 ], [ 164.04107999999997, 249.7503400000001 ] ], "type": "bezier" }, { "pts": [ [ 164.04107999999997, 249.7503400000001 ], [ 167.92007999999998, 246.62515000000013 ], [ 169.90171999999995, 244.13861000000009 ], [ 169.46291999999994, 242.9472500000001 ] ], "type": "bezier" }, { "pts": [ [ 169.46291999999994, 242.9472500000001 ], [ 169.35747999999995, 242.6611000000001 ], [ 168.63649999999996, 242.24338000000012 ], [ 167.86081999999993, 242.0189900000001 ] ], "type": "bezier" }, { "pts": [ [ 167.86081999999993, 242.0189900000001 ], [ 166.20443999999992, 241.53985000000011 ], [ 158.7752299999999, 241.54939000000007 ], [ 155.98293999999993, 242.03419000000008 ] ], "type": "bezier" }, { "pts": [ [ 155.98293999999993, 242.03419000000008 ], [ 155.98293999999993, 242.03419000000008 ] ], "type": "segment" }, { "pts": [ [ 155.98293999999993, 242.03419000000008 ], [ 155.98293999999999, 242.03422000000012 ] ], "type": "segment" } ] }, { "edgecolor": "#a6a6a6", "linewidth": 1.7, "id": "path4099", "items": [ { "pts": [ [ 213.82648999999998, 242.5048000000001 ], [ 212.22501999999997, 243.1201700000001 ], [ 210.74350999999996, 244.52933000000013 ], [ 210.74350999999996, 245.43716000000012 ] ], "type": "bezier" }, { "pts": [ [ 210.74350999999996, 245.43716000000012 ], [ 210.74350999999996, 246.28588000000013 ], [ 212.28572999999994, 247.68831000000011 ], [ 213.59557999999998, 248.03077000000013 ] ], "type": "bezier" }, { "pts": [ [ 213.59557999999998, 248.03077000000013 ], [ 214.07858, 248.15707000000015 ], [ 214.98294999999996, 249.03122000000013 ], [ 215.60523, 249.97334000000012 ] ], "type": "bezier" }, { "pts": [ [ 215.60523, 249.97334000000012 ], [ 216.39222, 251.1649200000001 ], [ 217.34537, 251.9865400000001 ], [ 218.73667999999998, 252.6727600000001 ] ], "type": "bezier" }, { "pts": [ [ 218.73667999999998, 252.6727600000001 ], [ 220.27360999999996, 253.43076000000008 ], [ 220.99521, 254.09674000000007 ], [ 221.85256999999996, 255.54811000000012 ] ], "type": "bezier" }, { "pts": [ [ 221.85256999999996, 255.54811000000012 ], [ 223.14663999999993, 257.7389700000001 ], [ 223.77472999999998, 257.87704000000014 ], [ 226.31098999999995, 256.52811000000014 ] ], "type": "bezier" }, { "pts": [ [ 226.31098999999995, 256.52811000000014 ], [ 228.25982999999997, 255.49162000000013 ], [ 229.94193999999993, 253.20230000000015 ], [ 230.26138999999995, 251.15172000000013 ] ], "type": "bezier" }, { "pts": [ [ 230.26138999999995, 251.15172000000013 ], [ 230.46874999999994, 249.82024000000013 ], [ 230.34228999999993, 249.54147000000012 ], [ 228.67682999999994, 247.65278000000012 ] ], "type": "bezier" }, { "pts": [ [ 228.67682999999994, 247.65278000000012 ], [ 227.68143999999995, 246.52408000000014 ], [ 226.44994999999994, 245.03352000000012 ], [ 225.94037999999995, 244.34043000000014 ] ], "type": "bezier" }, { "pts": [ [ 225.94037999999995, 244.34043000000014 ], [ 225.24990999999994, 243.4015000000001 ], [ 224.49705999999992, 242.94168000000013 ], [ 222.98701999999997, 242.53654000000012 ] ], "type": "bezier" }, { "pts": [ [ 222.98701999999997, 242.53654000000012 ], [ 220.56917999999996, 241.88787000000013 ], [ 215.47788999999995, 241.87022000000013 ], [ 213.82648999999998, 242.50474000000014 ] ], "type": "bezier" }, { "pts": [ [ 213.82648999999998, 242.50474000000014 ], [ 213.82648999999998, 242.50474000000014 ] ], "type": "segment" }, { "pts": [ [ 213.82648999999998, 242.50474000000014 ], [ 213.82648999999998, 242.5048000000001 ] ], "type": "segment" } ] }, { "edgecolor": "#000000", "linewidth": 2.0, "id": "path3906", "items": [ { "pts": [ [ 197.33150999999998, 3.356960000000072 ], [ 201.36410999999998, 4.772150000000124 ], [ 206.60422, 8.609180000000038 ], [ 209.20263999999997, 9.052260000000047 ] ], "type": "bezier" }, { "pts": [ [ 209.20263999999997, 9.052260000000047 ], [ 214.51851999999997, 9.958690000000047 ], [ 231.98605999999995, 8.811320000000023 ], [ 250.45416999999998, 9.514049999999997 ] ], "type": "bezier" }, { "pts": [ [ 250.45416999999998, 9.514049999999997 ], [ 267.96218, 10.180230000000051 ], [ 274.92915, 11.10441000000003 ], [ 282.42643999999996, 13.755209999999977 ] ], "type": "bezier" }, { "pts": [ [ 282.42643999999996, 13.755209999999977 ], [ 305.80172999999996, 22.01977999999997 ], [ 323.7240499999999, 41.62241999999998 ], [ 332.30878999999993, 68.31419 ] ], "type": "bezier" }, { "pts": [ [ 332.30878999999993, 68.31419 ], [ 334.25980999999996, 74.38042999999999 ], [ 334.56762999999995, 76.54248000000001 ], [ 334.91716999999994, 86.63378 ] ], "type": "bezier" }, { "pts": [ [ 334.91716999999994, 86.63378 ], [ 335.31352, 98.07539000000003 ] ], "type": "segment" }, { "pts": [ [ 335.31352, 98.07539000000003 ], [ 342.35603, 105.64672000000002 ] ], "type": "segment" }, { "pts": [ [ 342.35603, 105.64672000000002 ], [ 357.04788999999994, 121.44190000000003 ], [ 361.72436, 130.07623 ], [ 367.53828999999996, 152.14120000000003 ] ], "type": "bezier" }, { "pts": [ [ 367.53828999999996, 152.14120000000003 ], [ 373.04896999999994, 173.05499000000003 ], [ 373.49627999999996, 182.84130000000005 ], [ 370.50037, 216.9452 ] ], "type": "bezier" }, { "pts": [ [ 370.50037, 216.9452 ], [ 369.43938999999995, 229.02146 ], [ 368.22450999999995, 243.33128 ], [ 367.80054999999993, 248.74477000000002 ] ], "type": "bezier" }, { "pts": [ [ 367.80054999999993, 248.74477000000002 ], [ 365.79659999999996, 274.33071 ], [ 360.97515, 294.57489 ], [ 352.90891, 311.27234 ] ], "type": "bezier" }, { "pts": [ [ 352.90891, 311.27234 ], [ 346.75280999999995, 324.01571 ], [ 339.18721999999997, 332.91801 ], [ 328.22553999999997, 340.31672000000003 ] ], "type": "bezier" }, { "pts": [ [ 328.22553999999997, 340.31672000000003 ], [ 322.26174, 344.34203 ], [ 302.44320999999997, 360.44696999999996 ], [ 288.46299, 372.62845 ] ], "type": "bezier" }, { "pts": [ [ 288.46299, 372.62845 ], [ 281.67464, 378.5434 ], [ 274.28237, 382.7286 ], [ 263.17373, 386.94611 ] ], "type": "bezier" }, { "pts": [ [ 263.17373, 386.94611 ], [ 258.57665, 388.69151999999997 ], [ 250.04807, 392.47229 ], [ 244.22146999999995, 395.34785999999997 ] ], "type": "bezier" }, { "pts": [ [ 244.22146999999995, 395.34785999999997 ], [ 225.30584999999996, 404.68314999999996 ], [ 221.33893999999998, 404.72245999999996 ], [ 199.93915999999996, 395.7867 ] ], "type": "bezier" }, { "pts": [ [ 199.93915999999996, 395.7867 ], [ 192.80406999999997, 392.80737 ], [ 191.85797999999994, 392.60206999999997 ], [ 185.40256999999997, 392.63249999999994 ] ], "type": "bezier" }, { "pts": [ [ 185.40256999999997, 392.63249999999994 ], [ 179.22787999999997, 392.6617 ], [ 177.49812999999995, 393.01406 ], [ 169.04861999999997, 395.96461 ] ], "type": "bezier" }, { "pts": [ [ 169.04861999999997, 395.96461 ], [ 158.40679999999998, 399.6808 ], [ 151.58067, 400.56206 ], [ 145.32077999999996, 399.02788999999996 ] ], "type": "bezier" }, { "pts": [ [ 145.32077999999996, 399.02788999999996 ], [ 141.82234999999997, 398.17049 ], [ 123.24993999999998, 391.07859999999994 ], [ 104.72380999999996, 383.52594999999997 ] ], "type": "bezier" }, { "pts": [ [ 104.72380999999996, 383.52594999999997 ], [ 83.11272999999994, 374.71560999999997 ], [ 70.54842999999994, 366.53601999999995 ], [ 58.7891699999999, 353.62176999999997 ] ], "type": "bezier" }, { "pts": [ [ 58.7891699999999, 353.62176999999997 ], [ 54.2260399999999, 348.61035999999996 ], [ 48.049069999999915, 342.55372 ], [ 45.062709999999925, 340.16256999999996 ] ], "type": "bezier" }, { "pts": [ [ 45.062709999999925, 340.16256999999996 ], [ 32.91062999999997, 330.43278999999995 ], [ 28.49673999999993, 325.58896999999996 ], [ 22.655199999999923, 315.57268 ] ], "type": "bezier" }, { "pts": [ [ 22.655199999999923, 315.57268 ], [ 13.648059999999873, 300.12871999999993 ], [ 7.3133499999999, 275.22274999999996 ], [ 5.176239999999893, 246.85193999999996 ] ], "type": "bezier" }, { "pts": [ [ 5.176239999999893, 246.85193999999996 ], [ -0.07809000000008837, 177.09676999999994 ], [ 0.09252999999989697, 167.51085999999998 ], [ 6.996079999999893, 144.63904999999994 ] ], "type": "bezier" }, { "pts": [ [ 6.996079999999893, 144.63904999999994 ], [ 11.951309999999921, 128.22232999999994 ], [ 16.1562899999999, 120.80725999999993 ], [ 28.29487999999992, 107.08050999999995 ] ], "type": "bezier" }, { "pts": [ [ 28.29487999999992, 107.08050999999995 ], [ 37.65102999999988, 96.50023999999996 ] ], "type": "segment" }, { "pts": [ [ 37.65102999999988, 96.50023999999996 ], [ 38.03554999999983, 86.68796999999995 ] ], "type": "segment" }, { "pts": [ [ 38.03554999999983, 86.68796999999995 ], [ 38.49104999999986, 75.06029999999993 ], [ 39.68507999999986, 70.52535999999998 ], [ 45.82831999999985, 57.091669999999965 ] ], "type": "bezier" }, { "pts": [ [ 45.82831999999985, 57.091669999999965 ], [ 57.14130999999986, 32.352529999999945 ], [ 68.77861999999982, 20.942709999999977 ], [ 90.10107999999985, 13.684409999999957 ] ], "type": "bezier" }, { "pts": [ [ 90.10107999999985, 13.684409999999957 ], [ 100.68591999999984, 10.08123999999998 ], [ 105.85722999999984, 9.59692999999993 ], [ 125.80196999999987, 10.34089999999992 ] ], "type": "bezier" }, { "pts": [ [ 125.80196999999987, 10.34089999999992 ], [ 135.5960899999999, 10.706239999999866 ], [ 148.52393999999987, 11.005159999999933 ], [ 154.53059999999988, 11.005159999999933 ] ], "type": "bezier" }, { "pts": [ [ 154.53059999999988, 11.005159999999933 ], [ 162.28019999999987, 12.130759999999896 ], [ 166.1589899999999, 10.530179999999973 ], [ 172.3380299999999, 6.380629999999883 ] ], "type": "bezier" }, { "pts": [ [ 172.3380299999999, 6.380629999999883 ], [ 180.6533399999999, 0.7964899999998352 ], [ 180.36025999999987, 0.9179399999999305 ], [ 185.7020799999999, 0.8436399999999367 ] ], "type": "bezier" }, { "pts": [ [ 185.7020799999999, 0.8436399999999367 ], [ 188.78215999999992, 0.8008399999998801 ], [ 192.0761399999999, 1.5126899999999068 ], [ 197.33150999999992, 3.356959999999958 ] ], "type": "bezier" }, { "pts": [ [ 197.33150999999992, 3.356959999999958 ], [ 197.33150999999998, 3.356960000000072 ] ], "type": "segment" } ] }, { "edgecolor": "#000000", "linewidth": 2.0, "id": "path3932", "items": [ { "pts": [ [ 335.03454999999997, 97.5731300000001 ], [ 335.03454999999997, 97.5731300000001 ], [ 336.05618999999996, 89.43700000000013 ], [ 334.37293, 85.98907000000008 ] ], "type": "bezier" }, { "pts": [ [ 334.37293, 85.98907000000008 ], [ 330.21495999999996, 77.47212000000007 ], [ 316.47625, 66.8119000000001 ], [ 304.80033, 62.43576000000007 ] ], "type": "bezier" }, { "pts": [ [ 304.80033, 62.43576000000007 ], [ 296.51117, 59.32897000000008 ], [ 283.69156, 61.184510000000046 ], [ 278.39124999999996, 61.39012000000008 ] ], "type": "bezier" }, { "pts": [ [ 278.39124999999996, 61.39012000000008 ], [ 265.8916, 61.87502000000006 ], [ 263.08717999999993, 62.59150000000005 ], [ 257.07865, 66.8352700000001 ] ], "type": "bezier" }, { "pts": [ [ 257.07865, 66.8352700000001 ], [ 252.7785, 69.87239000000011 ], [ 251.09118999999998, 72.12513000000007 ], [ 247.12428999999997, 80.12552000000011 ] ], "type": "bezier" }, { "pts": [ [ 247.12428999999997, 80.12552000000011 ], [ 245.65783, 83.08311000000009 ], [ 243.87140999999997, 86.24850000000009 ], [ 243.15467999999998, 87.15973000000008 ] ], "type": "bezier" }, { "pts": [ [ 243.15467999999998, 87.15973000000008 ], [ 241.53868, 89.21401000000009 ], [ 240.33821999999998, 92.00227000000007 ], [ 239.07018999999997, 96.64660000000009 ] ], "type": "bezier" }, { "pts": [ [ 239.07018999999997, 96.64660000000009 ], [ 237.75381, 101.46827000000008 ], [ 235.84958999999998, 105.1839500000001 ], [ 233.3671, 107.7751300000001 ] ], "type": "bezier" }, { "pts": [ [ 233.3671, 107.7751300000001 ], [ 229.95679, 111.3347500000001 ], [ 227.35868, 111.61818000000011 ], [ 217.29223000000002, 109.52876000000009 ] ], "type": "bezier" }, { "pts": [ [ 217.29223000000002, 109.52876000000009 ], [ 209.01428000000004, 107.8105700000001 ], [ 202.65403000000003, 107.3105000000001 ], [ 189.06362000000001, 107.30929000000009 ] ], "type": "bezier" }, { "pts": [ [ 189.06362000000001, 107.30929000000009 ], [ 175.16467, 107.30829000000011 ], [ 168.61304, 107.81458000000009 ], [ 160.48028, 109.51918000000006 ] ], "type": "bezier" }, { "pts": [ [ 160.48028, 109.51918000000006 ], [ 153.70189, 110.93990000000008 ], [ 149.61802999999998, 111.19656000000009 ], [ 147.6311, 110.32672000000008 ] ], "type": "bezier" }, { "pts": [ [ 147.6311, 110.32672000000008 ], [ 145.09931, 109.21836000000008 ], [ 141.81623000000002, 104.2451400000001 ], [ 140.17926, 99.0389100000001 ] ], "type": "bezier" }, { "pts": [ [ 140.17926, 99.0389100000001 ], [ 139.83958, 97.95854000000008 ], [ 139.25403, 95.17858000000012 ], [ 138.87802, 92.86123000000009 ] ], "type": "bezier" }, { "pts": [ [ 138.87802, 92.86123000000009 ], [ 138.13918999999999, 88.3078000000001 ], [ 137.63853999999998, 86.92275000000006 ], [ 135.72487999999998, 84.1384900000001 ] ], "type": "bezier" }, { "pts": [ [ 135.72487999999998, 84.1384900000001 ], [ 134.24129999999997, 81.97998000000013 ], [ 132.49079, 77.72286000000008 ], [ 131.49261, 73.84588000000008 ] ], "type": "bezier" }, { "pts": [ [ 131.49261, 73.84588000000008 ], [ 129.82045, 67.88159000000007 ], [ 123.28336000000002, 64.78068000000007 ], [ 118.26866000000001, 62.184580000000096 ] ], "type": "bezier" }, { "pts": [ [ 118.26866000000001, 62.184580000000096 ], [ 105.37443000000002, 56.949770000000115 ], [ 79.61898000000002, 59.986980000000074 ], [ 69.56986, 64.45407000000012 ] ], "type": "bezier" }, { "pts": [ [ 69.56986, 64.45407000000012 ], [ 62.31708000000003, 66.80784000000011 ], [ 55.16890999999998, 74.26103000000012 ], [ 49.15395000000001, 80.5754500000001 ] ], "type": "bezier" }, { "pts": [ [ 49.15395000000001, 80.5754500000001 ], [ 44.64042000000006, 85.31357000000008 ], [ 37.659959999999955, 96.6265800000001 ], [ 37.659959999999955, 96.6265800000001 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 2.0, "id": "path3934", "items": [ { "pts": [ [ 228.77282999999994, 110.6658000000001 ], [ 283.3600799999999, 112.00488000000013 ], [ 301.03102999999993, 122.80088000000012 ], [ 326.43745999999993, 147.26348000000007 ] ], "type": "bezier" }, { "pts": [ [ 326.43745999999993, 147.26348000000007 ], [ 347.5813099999999, 167.62185000000005 ], [ 358.15629999999993, 209.98714000000007 ], [ 361.9688699999999, 229.2770200000001 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 2.0, "id": "path3934-3", "items": [ { "pts": [ [ 147.89750999999995, 110.54101000000014 ], [ 93.31025999999997, 111.88008000000013 ], [ 74.23293999999999, 120.10832000000016 ], [ 48.90569999999991, 144.94685000000015 ] ], "type": "bezier" }, { "pts": [ [ 48.90569999999991, 144.94685000000015 ], [ 26.439229999999952, 166.97985000000017 ], [ 18.182559999999853, 215.12291000000016 ], [ 14.369979999999941, 234.41280000000017 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 2.0, "id": "path3961", "items": [ { "pts": [ [ 206.10172999999998, 398.06479000000013 ], [ 199.02527999999995, 390.1472900000001 ], [ 196.4398, 374.6868200000001 ], [ 196.17774999999995, 371.41061000000013 ] ], "type": "bezier" }, { "pts": [ [ 196.17774999999995, 371.41061000000013 ], [ 195.91558999999995, 368.13440000000014 ], [ 195.02346999999997, 325.3672300000001 ], [ 195.80973999999992, 317.7227400000001 ] ], "type": "bezier" }, { "pts": [ [ 195.80973999999992, 317.7227400000001 ], [ 196.59599999999995, 310.0782700000001 ], [ 197.75026999999994, 266.0259700000001 ], [ 198.27447999999993, 255.65131000000008 ] ], "type": "bezier" }, { "pts": [ [ 198.27447999999993, 255.65131000000008 ], [ 198.79858999999993, 245.2766600000001 ], [ 199.5848499999999, 225.8924300000001 ], [ 198.5365299999999, 216.88288000000006 ] ], "type": "bezier" }, { "pts": [ [ 198.5365299999999, 216.88288000000006 ], [ 197.48811999999992, 207.87331000000006 ], [ 193.45956999999993, 158.52004000000005 ], [ 200.17536999999993, 130.6260000000001 ] ], "type": "bezier" }, { "pts": [ [ 200.17536999999993, 130.6260000000001 ], [ 202.11621999999994, 122.56454000000008 ], [ 202.68982999999992, 108.34649000000007 ], [ 211.0511199999999, 108.41507000000007 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 2.0, "id": "path3961-6", "items": [ { "pts": [ [ 171.38831, 395.38242000000014 ], [ 178.46477, 387.4649200000001 ], [ 180.43322, 374.6868200000001 ], [ 180.69527, 371.41061000000013 ] ], "type": "bezier" }, { "pts": [ [ 180.69527, 371.41061000000013 ], [ 180.95743, 368.13440000000014 ], [ 181.84955000000002, 325.3672300000001 ], [ 181.06329, 317.7227400000001 ] ], "type": "bezier" }, { "pts": [ [ 181.06329, 317.7227400000001 ], [ 180.27702999999997, 310.0782700000001 ], [ 179.12275, 266.0259700000001 ], [ 178.59854, 255.65131000000008 ] ], "type": "bezier" }, { "pts": [ [ 178.59854, 255.65131000000008 ], [ 178.07444000000004, 245.2766600000001 ], [ 177.28818, 225.8924300000001 ], [ 178.33649000000003, 216.88288000000006 ] ], "type": "bezier" }, { "pts": [ [ 178.33649000000003, 216.88288000000006 ], [ 179.38491000000005, 207.87331000000006 ], [ 183.55575000000005, 159.01631000000003 ], [ 175.67839000000004, 131.88084000000003 ] ], "type": "bezier" }, { "pts": [ [ 175.67839000000004, 131.88084000000003 ], [ 173.20607, 123.36452000000003 ], [ 170.54300000000006, 109.12355000000002 ], [ 162.18170000000003, 109.19213000000002 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path3934-1", "items": [ { "pts": [ [ 307.30472, 87.8019900000001 ], [ 337.21710999999993, 121.95420000000013 ], [ 349.24515999999994, 159.53521000000012 ], [ 363.80355, 188.92673000000013 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path3934-1-4", "items": [ { "pts": [ [ 69.05934999999994, 87.80200000000013 ], [ 39.14695999999992, 121.9542100000001 ], [ 28.69142999999997, 156.80504000000013 ], [ 10.987999999999943, 185.65052000000014 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path4042", "items": [ { "pts": [ [ 164.20233999999994, 142.62218000000013 ], [ 122.45555999999993, 162.7305500000001 ], [ 95.01290999999992, 183.44360000000012 ], [ 88.71990999999991, 227.2575200000001 ] ], "type": "bezier" }, { "pts": [ [ 88.71990999999991, 227.2575200000001 ], [ 86.0989699999999, 245.2766600000001 ], [ 100.77598999999992, 275.3085500000001 ], [ 90.81653999999992, 282.4070000000001 ] ], "type": "bezier" }, { "pts": [ [ 90.81653999999992, 282.4070000000001 ], [ 79.69409999999993, 290.3343100000001 ], [ 66.17986999999994, 279.1307900000001 ], [ 64.60733999999991, 291.1435500000001 ] ], "type": "bezier" }, { "pts": [ [ 64.60733999999991, 291.1435500000001 ], [ 63.03481999999991, 303.1563100000001 ], [ 90.38801999999993, 318.7157700000001 ], [ 96.58262999999988, 324.4516500000001 ] ], "type": "bezier" }, { "pts": [ [ 96.58262999999988, 324.4516500000001 ], [ 106.0178699999999, 333.18820000000005 ], [ 115.86563999999987, 341.5448600000001 ], [ 108.63880999999986, 352.2994000000001 ] ], "type": "bezier" }, { "pts": [ [ 108.63880999999986, 352.2994000000001 ], [ 101.30019999999985, 363.2200900000001 ], [ 107.16715999999985, 371.61503000000005 ], [ 118.39203999999984, 379.5405600000001 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path4044", "items": [ { "pts": [ [ 100.68070999999998, 328.14708000000013 ], [ 121.12392, 321.5946700000001 ], [ 139.56566999999995, 305.8864800000001 ], [ 136.94473, 291.1435500000001 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path4042-8", "items": [ { "pts": [ [ 225.56426999999996, 157.3651200000001 ], [ 267.31104999999997, 177.4734800000001 ], [ 273.14905999999996, 196.91707000000008 ], [ 280.07928, 225.6194200000001 ] ], "type": "bezier" }, { "pts": [ [ 280.07928, 225.6194200000001 ], [ 286.10420999999997, 250.57224000000008 ], [ 256.79823, 286.4326900000001 ], [ 265.27513, 301.2532300000001 ] ], "type": "bezier" }, { "pts": [ [ 265.27513, 301.2532300000001 ], [ 272.17307999999997, 313.3133900000001 ], [ 287.418, 308.0706200000001 ], [ 288.99052, 320.0833800000001 ] ], "type": "bezier" }, { "pts": [ [ 288.99052, 320.0833800000001 ], [ 286.50087, 330.0638000000001 ], [ 253.74638, 334.9046900000001 ], [ 263.82974, 351.2073400000001 ] ], "type": "bezier" }, { "pts": [ [ 263.82974, 351.2073400000001 ], [ 271.16836, 362.1280300000001 ], [ 261.63203000000004, 377.6214100000001 ], [ 250.40716000000003, 385.54694000000006 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path4044-1", "items": [ { "pts": [ [ 262.35249, 342.3439800000001 ], [ 241.90929, 335.7915600000001 ], [ 226.61268, 328.27389000000016 ], [ 229.23361999999997, 313.53096000000016 ] ], "type": "bezier" } ] } ], "metadata": { "bounds": [ -0.07809000000008837, 373.49627999999996, 0.7964899999998352, 404.72245999999996 ] } }PKH]' ' =nilearn/plotting/glass_brain_files/brain_schematics_side.json{ "paths": [ { "edgecolor": "#a6a6a6", "linewidth": 1.7, "id": "path3188", "items": [ { "pts": [ [ 285.05676600000004, 170.044956 ], [ 281.62491600000004, 173.47679600000004 ], [ 281.00675600000005, 175.614416 ], [ 280.96690600000005, 184.18773600000003 ] ], "type": "bezier" }, { "pts": [ [ 280.96690600000005, 184.18773600000003 ], [ 281.13342600000004, 194.936956 ], [ 274.47422600000004, 195.80748600000004 ], [ 271.50339600000007, 205.42566600000004 ] ], "type": "bezier" }, { "pts": [ [ 271.50339600000007, 205.42566600000004 ], [ 269.61667600000004, 211.88584600000002 ], [ 258.37243600000005, 220.77295600000002 ], [ 241.38229600000005, 229.23246600000004 ] ], "type": "bezier" }, { "pts": [ [ 241.38229600000005, 229.23246600000004 ], [ 227.44955600000003, 235.31662600000004 ], [ 212.23987600000004, 238.45524600000005 ], [ 201.96564600000005, 229.13816600000007 ] ], "type": "bezier" }, { "pts": [ [ 201.96564600000005, 229.13816600000007 ], [ 197.18688600000004, 224.77667600000007 ], [ 196.27333600000006, 223.5005260000001 ], [ 195.17738600000004, 219.65554600000007 ] ], "type": "bezier" }, { "pts": [ [ 195.17738600000004, 219.65554600000007 ], [ 193.63062600000003, 214.22903600000006 ], [ 189.21417600000004, 207.2876960000001 ], [ 186.83443600000004, 206.54292600000008 ] ], "type": "bezier" }, { "pts": [ [ 186.83443600000004, 206.54292600000008 ], [ 178.42024600000005, 201.78997600000008 ], [ 172.31912600000004, 202.20584600000007 ], [ 166.50378600000005, 208.41763600000007 ] ], "type": "bezier" }, { "pts": [ [ 166.50378600000005, 208.41763600000007 ], [ 158.88090600000004, 217.41960600000004 ], [ 161.32788600000003, 228.35945600000008 ], [ 167.72833600000004, 237.37320600000007 ] ], "type": "bezier" }, { "pts": [ [ 167.72833600000004, 237.37320600000007 ], [ 175.33981600000004, 247.72795600000006 ], [ 177.80062600000005, 249.95509600000005 ], [ 186.25587600000003, 254.14136600000006 ] ], "type": "bezier" }, { "pts": [ [ 186.25587600000003, 254.14136600000006 ], [ 204.26678600000002, 260.4015260000001 ], [ 222.35850600000003, 255.95884600000005 ], [ 242.629366, 262.15335600000003 ] ], "type": "bezier" }, { "pts": [ [ 242.629366, 262.15335600000003 ], [ 250.418406, 264.568976 ], [ 263.961756, 266.0407160000001 ], [ 272.229016, 265.36990600000007 ] ], "type": "bezier" }, { "pts": [ [ 272.229016, 265.36990600000007 ], [ 304.981826, 254.44256600000008 ], [ 330.932066, 250.02618600000008 ], [ 355.260596, 232.97461600000008 ] ], "type": "bezier" }, { "pts": [ [ 355.260596, 232.97461600000008 ], [ 363.51184600000005, 227.1914260000001 ], [ 372.36210600000004, 219.07057600000007 ], [ 375.196026, 210.2516460000001 ] ], "type": "bezier" }, { "pts": [ [ 375.196026, 210.2516460000001 ], [ 371.858846, 205.1332660000001 ], [ 368.812976, 199.13838600000008 ], [ 367.508636, 193.7316960000001 ] ], "type": "bezier" }, { "pts": [ [ 367.508636, 193.7316960000001 ], [ 366.876436, 190.7794060000001 ], [ 360.422416, 183.74292600000012 ], [ 356.502446, 181.73220600000013 ] ], "type": "bezier" }, { "pts": [ [ 356.502446, 181.73220600000013 ], [ 352.556396, 179.70813600000014 ], [ 334.53569600000003, 176.23317600000013 ], [ 330.85075600000005, 176.78576600000014 ] ], "type": "bezier" }, { "pts": [ [ 330.85075600000005, 176.78576600000014 ], [ 329.03911600000004, 177.0574360000001 ], [ 324.74364600000007, 178.92503600000015 ], [ 321.3052660000001, 180.93600600000013 ] ], "type": "bezier" }, { "pts": [ [ 321.3052660000001, 180.93600600000013 ], [ 310.3370560000001, 187.35081600000012 ], [ 305.31727600000005, 186.04362600000013 ], [ 295.54088600000006, 174.22673600000013 ] ], "type": "bezier" }, { "pts": [ [ 295.54088600000006, 174.22673600000013 ], [ 289.11577600000004, 166.46058600000015 ], [ 288.77641600000004, 166.32521600000013 ], [ 285.05676600000004, 170.04487600000016 ] ], "type": "bezier" }, { "pts": [ [ 285.05676600000004, 170.04487600000016 ], [ 285.05676600000004, 170.044956 ] ], "type": "segment" }, { "pts": [ [ 320.096896, 194.20201600000019 ], [ 329.665496, 197.75096600000018 ], [ 332.971516, 201.85673600000018 ], [ 330.678836, 207.3438560000002 ] ], "type": "bezier" }, { "pts": [ [ 330.678836, 207.3438560000002 ], [ 328.976716, 211.4176260000002 ], [ 321.753496, 218.8485060000002 ], [ 315.371576, 223.09120600000017 ] ], "type": "bezier" }, { "pts": [ [ 315.371576, 223.09120600000017 ], [ 302.799676, 231.44898600000016 ], [ 283.386916, 239.78312600000018 ], [ 273.488456, 241.07214600000017 ] ], "type": "bezier" }, { "pts": [ [ 273.488456, 241.07214600000017 ], [ 269.319166, 241.61508600000016 ], [ 265.335356, 240.54591600000018 ], [ 265.335356, 238.88400600000017 ] ], "type": "bezier" }, { "pts": [ [ 265.335356, 238.88400600000017 ], [ 265.335356, 237.63738600000016 ], [ 269.79472599999997, 233.33758600000016 ], [ 278.409406, 226.2777960000002 ] ], "type": "bezier" }, { "pts": [ [ 278.409406, 226.2777960000002 ], [ 284.94505599999997, 220.9217960000002 ], [ 289.81577599999997, 215.3205060000002 ], [ 295.861036, 206.20854600000018 ] ], "type": "bezier" }, { "pts": [ [ 295.861036, 206.20854600000018 ], [ 301.40639600000003, 197.8501060000002 ], [ 304.569876, 194.3490060000002 ], [ 307.943556, 192.8365160000002 ] ], "type": "bezier" }, { "pts": [ [ 307.943556, 192.8365160000002 ], [ 311.455056, 191.2622760000002 ], [ 312.481846, 191.37763600000017 ], [ 320.096896, 194.20201600000019 ] ], "type": "bezier" }, { "pts": [ [ 320.096896, 194.20201600000019 ], [ 320.096896, 194.20201600000019 ] ], "type": "segment" } ] }, { "edgecolor": "#a6a6a6", "linewidth": 1.7, "id": "path3192", "items": [ { "pts": [ [ 167.856186, 2.2470559999999864 ], [ 165.703036, 2.8368059999999673 ], [ 162.355846, 3.909556000000009 ], [ 160.418016, 4.630966000000001 ] ], "type": "bezier" }, { "pts": [ [ 160.418016, 4.630966000000001 ], [ 158.480176, 5.3523759999999925 ], [ 154.156986, 6.204376000000025 ], [ 150.810936, 6.524315999999999 ] ], "type": "bezier" }, { "pts": [ [ 150.810936, 6.524315999999999 ], [ 147.464876, 6.8442360000000235 ], [ 144.523676, 7.309515999999974 ], [ 144.274916, 7.558275999999978 ] ], "type": "bezier" }, { "pts": [ [ 144.274916, 7.558275999999978 ], [ 143.764786, 8.068405999999982 ], [ 146.75385599999998, 21.776476000000002 ], [ 148.387066, 26.41683599999999 ] ], "type": "bezier" }, { "pts": [ [ 148.387066, 26.41683599999999 ], [ 148.973716, 28.083655999999962 ], [ 151.52231600000002, 31.658785999999964 ], [ 154.050616, 34.361545999999976 ] ], "type": "bezier" }, { "pts": [ [ 154.050616, 34.361545999999976 ], [ 157.333146, 37.870566 ], [ 158.808356, 40.132915999999966 ], [ 159.209986, 42.27375599999999 ] ], "type": "bezier" }, { "pts": [ [ 159.209986, 42.27375599999999 ], [ 159.51932599999998, 43.92269599999997 ], [ 160.609296, 47.92275599999999 ], [ 161.63213599999997, 51.162796000000014 ] ], "type": "bezier" }, { "pts": [ [ 161.63213599999997, 51.162796000000014 ], [ 162.65497599999998, 54.402826000000005 ], [ 164.03688599999998, 60.68604600000003 ], [ 164.70304599999997, 65.125516 ] ], "type": "bezier" }, { "pts": [ [ 164.70304599999997, 65.125516 ], [ 165.36920599999996, 69.564976 ], [ 166.56760599999998, 74.497656 ], [ 167.36613599999998, 76.08702599999998 ] ], "type": "bezier" }, { "pts": [ [ 167.36613599999998, 76.08702599999998 ], [ 168.164686, 77.67639599999995 ], [ 169.26021599999999, 80.91461599999997 ], [ 169.80064599999997, 83.283096 ] ], "type": "bezier" }, { "pts": [ [ 169.80064599999997, 83.283096 ], [ 170.34109599999996, 85.651566 ], [ 171.76093599999996, 89.87956600000001 ], [ 172.95587599999996, 92.678676 ] ], "type": "bezier" }, { "pts": [ [ 172.95587599999996, 92.678676 ], [ 174.15080599999996, 95.477776 ], [ 175.38538599999995, 99.00111600000002 ], [ 175.69937599999997, 100.50832600000001 ] ], "type": "bezier" }, { "pts": [ [ 175.69937599999997, 100.50832600000001 ], [ 176.01335599999996, 102.015536 ], [ 177.09326599999997, 104.88210600000002 ], [ 178.09914599999996, 106.87850600000002 ] ], "type": "bezier" }, { "pts": [ [ 178.09914599999996, 106.87850600000002 ], [ 179.10504599999996, 108.87489600000004 ], [ 180.51607599999997, 113.98375600000003 ], [ 181.23478599999996, 118.23150600000002 ] ], "type": "bezier" }, { "pts": [ [ 181.23478599999996, 118.23150600000002 ], [ 182.83395599999994, 127.68304600000005 ], [ 186.29300599999996, 139.19160600000004 ], [ 189.40856599999995, 145.42638600000004 ] ], "type": "bezier" }, { "pts": [ [ 189.40856599999995, 145.42638600000004 ], [ 190.79815599999995, 148.20721600000002 ], [ 191.73662599999994, 151.31807600000002 ], [ 191.73662599999994, 153.143526 ] ], "type": "bezier" }, { "pts": [ [ 191.73662599999994, 153.143526 ], [ 191.73662599999994, 156.56922600000001 ], [ 192.50588599999995, 158.310816 ], [ 195.87758599999995, 162.518506 ] ], "type": "bezier" }, { "pts": [ [ 195.87758599999995, 162.518506 ], [ 198.44791599999996, 165.726136 ], [ 198.28906599999996, 167.85019599999998 ], [ 195.48028599999995, 167.831346 ] ], "type": "bezier" }, { "pts": [ [ 195.48028599999995, 167.831346 ], [ 193.17667599999996, 167.815946 ], [ 192.21453599999995, 167.142026 ], [ 190.87173599999994, 164.603636 ] ], "type": "bezier" }, { "pts": [ [ 190.87173599999994, 164.603636 ], [ 189.76906599999995, 162.519226 ], [ 182.97547599999993, 156.88181600000001 ], [ 181.56621599999994, 156.88181600000001 ] ], "type": "bezier" }, { "pts": [ [ 181.56621599999994, 156.88181600000001 ], [ 180.53438599999993, 156.88181600000001 ], [ 179.73715599999994, 160.325736 ], [ 180.33381599999993, 162.20563600000003 ] ], "type": "bezier" }, { "pts": [ [ 180.33381599999993, 162.20563600000003 ], [ 180.87841599999993, 163.921536 ], [ 186.74858599999993, 170.23408600000005 ], [ 192.13413599999993, 174.89524600000004 ] ], "type": "bezier" }, { "pts": [ [ 192.13413599999993, 174.89524600000004 ], [ 194.55278599999994, 176.98856600000005 ], [ 197.47674599999993, 178.53122600000006 ], [ 201.12761599999993, 179.64014600000002 ] ], "type": "bezier" }, { "pts": [ [ 201.12761599999993, 179.64014600000002 ], [ 206.86967599999994, 181.38424600000002 ], [ 209.74482599999993, 181.136816 ], [ 209.74482599999993, 178.898576 ] ], "type": "bezier" }, { "pts": [ [ 209.74482599999993, 178.898576 ], [ 209.74482599999993, 178.146906 ], [ 210.71374599999996, 176.61622599999998 ], [ 211.89797599999991, 175.49708599999997 ] ], "type": "bezier" }, { "pts": [ [ 211.89797599999991, 175.49708599999997 ], [ 213.84503599999994, 173.65705599999995 ], [ 214.9335359999999, 173.392766 ], [ 223.2700959999999, 172.73581599999994 ] ], "type": "bezier" }, { "pts": [ [ 223.2700959999999, 172.73581599999994 ], [ 231.9962559999999, 172.04816599999992 ], [ 232.8751759999999, 171.81630599999994 ], [ 239.71236599999992, 168.39826599999992 ] ], "type": "bezier" }, { "pts": [ [ 239.71236599999992, 168.39826599999992 ], [ 251.04084599999993, 162.73494599999992 ], [ 256.1851059999999, 159.3901659999999 ], [ 258.1179259999999, 156.43102599999992 ] ], "type": "bezier" }, { "pts": [ [ 258.1179259999999, 156.43102599999992 ], [ 260.8092659999999, 152.31059599999992 ], [ 261.0603459999999, 149.80084599999992 ], [ 259.03841599999987, 147.23035599999992 ] ], "type": "bezier" }, { "pts": [ [ 259.03841599999987, 147.23035599999992 ], [ 257.31687599999987, 145.0417759999999 ] ], "type": "segment" }, { "pts": [ [ 257.31687599999987, 145.0417759999999 ], [ 255.34812599999987, 146.85123599999991 ] ], "type": "segment" }, { "pts": [ [ 255.34812599999987, 146.85123599999991 ], [ 254.26532599999985, 147.8464259999999 ], [ 252.56537599999984, 150.4769859999999 ], [ 251.57047599999987, 152.6969059999999 ] ], "type": "bezier" }, { "pts": [ [ 251.57047599999987, 152.6969059999999 ], [ 248.6879259999999, 159.1287059999999 ], [ 244.90499599999987, 160.7679659999999 ], [ 239.98139599999985, 157.71882599999992 ] ], "type": "bezier" }, { "pts": [ [ 239.98139599999985, 157.71882599999992 ], [ 236.65158599999984, 155.65670599999993 ], [ 222.41700599999984, 140.92219599999993 ], [ 221.54893599999986, 138.63898599999993 ] ], "type": "bezier" }, { "pts": [ [ 221.54893599999986, 138.63898599999993 ], [ 221.16368599999987, 137.62571599999995 ], [ 221.09445599999987, 135.59407599999992 ], [ 221.39507599999985, 134.12421599999993 ] ], "type": "bezier" }, { "pts": [ [ 221.39507599999985, 134.12421599999993 ], [ 221.81843599999985, 132.05416599999995 ], [ 222.46425599999986, 131.29604599999993 ], [ 224.26011599999987, 130.7609759999999 ] ], "type": "bezier" }, { "pts": [ [ 224.26011599999987, 130.7609759999999 ], [ 231.06814599999984, 128.7325059999999 ], [ 233.54873599999985, 127.47667599999994 ], [ 237.15915599999988, 124.23059599999993 ] ], "type": "bezier" }, { "pts": [ [ 237.15915599999988, 124.23059599999993 ], [ 242.9417959999999, 119.03149599999995 ], [ 243.7178459999999, 117.13146599999993 ], [ 243.0974159999999, 109.69186599999995 ] ], "type": "bezier" }, { "pts": [ [ 243.0974159999999, 109.69186599999995 ], [ 242.81162599999988, 106.26503599999995 ], [ 241.5494859999999, 100.41312599999992 ], [ 240.25730599999991, 96.52378599999997 ] ], "type": "bezier" }, { "pts": [ [ 240.25730599999991, 96.52378599999997 ], [ 238.9781559999999, 92.67363599999999 ], [ 237.93156599999992, 89.03607599999998 ], [ 237.93156599999992, 88.44030599999996 ] ], "type": "bezier" }, { "pts": [ [ 237.93156599999992, 88.44030599999996 ], [ 237.93156599999992, 87.84454599999998 ], [ 236.3646859999999, 84.23835599999995 ], [ 234.44959599999993, 80.42655599999995 ] ], "type": "bezier" }, { "pts": [ [ 234.44959599999993, 80.42655599999995 ], [ 227.65487599999994, 66.90230599999995 ], [ 218.0258859999999, 59.92198599999995 ], [ 204.29877599999992, 58.56935599999997 ] ], "type": "bezier" }, { "pts": [ [ 204.29877599999992, 58.56935599999997 ], [ 201.27962599999992, 58.27184599999998 ], [ 198.27129599999992, 57.63296599999995 ], [ 197.6135859999999, 57.149625999999955 ] ], "type": "bezier" }, { "pts": [ [ 197.6135859999999, 57.149625999999955 ], [ 196.9181159999999, 56.63850599999995 ], [ 195.9132759999999, 53.48617599999994 ], [ 195.21203599999993, 49.61558599999995 ] ], "type": "bezier" }, { "pts": [ [ 195.21203599999993, 49.61558599999995 ], [ 194.54888599999992, 45.95523599999996 ], [ 193.61426599999993, 41.903385999999955 ], [ 193.13513599999993, 40.61149599999993 ] ], "type": "bezier" }, { "pts": [ [ 193.13513599999993, 40.61149599999993 ], [ 191.05412599999994, 35.00051599999995 ], [ 183.58817599999992, 20.83764599999995 ], [ 181.57842599999992, 18.68846599999995 ] ], "type": "bezier" }, { "pts": [ [ 181.57842599999992, 18.68846599999995 ], [ 180.37035599999993, 17.39656599999995 ], [ 179.1291659999999, 15.45873599999993 ], [ 178.8202359999999, 14.38214599999992 ] ], "type": "bezier" }, { "pts": [ [ 178.8202359999999, 14.38214599999992 ], [ 178.5113259999999, 13.305575999999917 ], [ 177.26421599999992, 10.839235999999914 ], [ 176.0489259999999, 8.90139599999992 ] ], "type": "bezier" }, { "pts": [ [ 176.0489259999999, 8.90139599999992 ], [ 174.8336259999999, 6.963565999999901 ], [ 173.5999759999999, 4.409135999999933 ], [ 173.3074859999999, 3.2248959999999443 ] ], "type": "bezier" }, { "pts": [ [ 173.3074859999999, 3.2248959999999443 ], [ 173.0149959999999, 2.0406659999999306 ], [ 172.5496359999999, 1.0949359999999615 ], [ 172.2733559999999, 1.1232659999999441 ] ], "type": "bezier" }, { "pts": [ [ 172.2733559999999, 1.1232659999999441 ], [ 171.9970659999999, 1.1516659999999206 ], [ 170.00934599999988, 1.6573159999999234 ], [ 167.8561859999999, 2.2470559999999296 ] ], "type": "bezier" }, { "pts": [ [ 167.8561859999999, 2.2470559999999296 ], [ 167.856186, 2.2470559999999864 ] ], "type": "segment" } ] }, { "edgecolor": "#a6a6a6", "linewidth": 1.7, "id": "path3194", "items": [ { "pts": [ [ 54.915596, 307.54319599999997 ], [ 71.357876, 278.573486 ], [ 76.838626, 265.263076 ], [ 94.06386600000002, 242.557096 ] ], "type": "bezier" }, { "pts": [ [ 94.06386600000002, 242.557096 ], [ 111.28909600000003, 219.85109599999998 ], [ 115.98689600000003, 214.370336 ], [ 115.98689600000003, 214.370336 ] ], "type": "bezier" }, { "pts": [ [ 115.98689600000003, 214.370336 ], [ 115.98689600000003, 214.370336 ], [ 101.89351600000003, 194.01324599999998 ], [ 86.23420600000003, 178.353946 ] ], "type": "bezier" }, { "pts": [ [ 86.23420600000003, 178.353946 ], [ 70.57490600000003, 162.694636 ], [ 54.915596000000015, 141.554576 ], [ 40.03927100000002, 136.07381600000002 ] ], "type": "bezier" }, { "pts": [ [ 40.03927100000002, 136.07381600000002 ], [ 25.162930000000024, 130.59306600000002 ], [ 25.162930000000024, 130.59306600000002 ], [ 25.162930000000024, 130.59306600000002 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 1.7, "id": "path3196", "items": [ { "pts": [ [ 25.906538000000005, 130.81424599999997 ], [ 54.859956000000004, 140.874756 ], [ 73.70677600000002, 141.554576 ], [ 115.986896, 132.15899599999995 ] ], "type": "bezier" }, { "pts": [ [ 115.986896, 132.15899599999995 ], [ 154.24213600000002, 121.25725599999993 ], [ 218.505446, 89.80412599999994 ], [ 242.854066, 70.22442599999994 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 2.0, "id": "path3198", "items": [ { "pts": [ [ 363.680716, 105.39568600000001 ], [ 360.94034600000003, 129.667616 ], [ 348.52756600000004, 158.388326 ], [ 343.04679600000003, 163.47760599999998 ] ], "type": "bezier" }, { "pts": [ [ 343.04679600000003, 163.47760599999998 ], [ 311.457846, 192.81016599999998 ], [ 288.935676, 196.85853600000002 ], [ 260.443976, 210.846996 ] ], "type": "bezier" }, { "pts": [ [ 260.443976, 210.846996 ], [ 223.910576, 228.78364599999998 ], [ 186.26720600000002, 237.318826 ], [ 147.613526, 257.50537599999996 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 2.0, "id": "path3144-6", "items": [ { "pts": [ [ 181.713726, 4.255275999999981 ], [ 186.744256, 7.480325999999991 ], [ 191.725906, 13.222576000000004 ], [ 200.86266600000002, 26.32789600000001 ] ], "type": "bezier" }, { "pts": [ [ 200.86266600000002, 26.32789600000001 ], [ 210.14104600000002, 39.63633600000003 ], [ 214.74096600000001, 44.71784600000001 ], [ 224.947836, 52.93462599999998 ] ], "type": "bezier" }, { "pts": [ [ 224.947836, 52.93462599999998 ], [ 229.25473599999998, 56.401775999999984 ], [ 235.148576, 61.50685599999997 ], [ 238.04524600000002, 64.27920599999999 ] ], "type": "bezier" }, { "pts": [ [ 238.04524600000002, 64.27920599999999 ], [ 243.311936, 69.319886 ] ], "type": "segment" }, { "pts": [ [ 243.311936, 69.319886 ], [ 247.700406, 65.490026 ] ], "type": "segment" }, { "pts": [ [ 247.700406, 65.490026 ], [ 265.974496, 49.54207600000001 ], [ 271.824486, 45.58971600000001 ], [ 281.05143599999997, 42.95745599999998 ] ], "type": "bezier" }, { "pts": [ [ 281.05143599999997, 42.95745599999998 ], [ 292.286296, 39.752365999999995 ], [ 309.95435599999996, 42.85496599999999 ], [ 331.82417599999997, 51.873335999999995 ] ], "type": "bezier" }, { "pts": [ [ 331.82417599999997, 51.873335999999995 ], [ 353.77746599999995, 60.926135999999985 ], [ 359.72048599999994, 69.02982600000001 ], [ 362.89850599999994, 94.24515600000001 ] ], "type": "bezier" }, { "pts": [ [ 362.89850599999994, 94.24515600000001 ], [ 364.2422859999999, 104.907106 ] ], "type": "segment" }, { "pts": [ [ 364.2422859999999, 104.907106 ], [ 370.5432159999999, 105.39770599999997 ] ], "type": "segment" }, { "pts": [ [ 370.5432159999999, 105.39770599999997 ], [ 374.00873599999994, 105.66755599999999 ], [ 381.9530059999999, 105.89963599999999 ], [ 388.1971559999999, 105.91344599999996 ] ], "type": "bezier" }, { "pts": [ [ 388.1971559999999, 105.91344599999996 ], [ 404.2507059999999, 105.94894599999998 ], [ 422.2188859999999, 107.17262599999998 ], [ 428.9988659999999, 108.69209599999999 ] ], "type": "bezier" }, { "pts": [ [ 428.9988659999999, 108.69209599999999 ], [ 441.5447159999999, 111.50375600000001 ], [ 456.7995459999999, 119.386866 ], [ 465.84561599999995, 127.73310600000002 ] ], "type": "bezier" }, { "pts": [ [ 465.84561599999995, 127.73310600000002 ], [ 482.1868159999999, 142.810116 ], [ 491.7203659999999, 168.23389600000002 ], [ 490.5238559999999, 193.54429600000003 ] ], "type": "bezier" }, { "pts": [ [ 490.5238559999999, 193.54429600000003 ], [ 489.6047259999999, 212.98720600000001 ], [ 486.2867759999999, 225.70824600000003 ], [ 477.54287599999986, 243.31362600000003 ] ], "type": "bezier" }, { "pts": [ [ 477.54287599999986, 243.31362600000003 ], [ 469.05161599999985, 260.41028600000004 ], [ 460.73882599999985, 273.52186600000005 ], [ 448.0104259999999, 289.894536 ] ], "type": "bezier" }, { "pts": [ [ 448.0104259999999, 289.894536 ], [ 433.3216159999999, 308.788896 ], [ 426.4308059999999, 315.458216 ], [ 404.7608559999999, 331.754056 ] ], "type": "bezier" }, { "pts": [ [ 404.7608559999999, 331.754056 ], [ 385.52063599999985, 346.22274600000003 ], [ 371.5834559999999, 354.135956 ], [ 338.08738599999987, 369.609756 ] ], "type": "bezier" }, { "pts": [ [ 338.08738599999987, 369.609756 ], [ 309.29215599999986, 382.91195600000003 ], [ 303.5687859999999, 384.94184600000006 ], [ 287.0757659999999, 387.70189600000003 ] ], "type": "bezier" }, { "pts": [ [ 287.0757659999999, 387.70189600000003 ], [ 279.1744659999999, 389.02414300000004 ], [ 269.18643599999984, 391.163593 ], [ 264.88012599999985, 392.456203 ] ], "type": "bezier" }, { "pts": [ [ 264.88012599999985, 392.456203 ], [ 251.89822599999985, 396.352953 ], [ 247.63734599999987, 396.998533 ], [ 228.86373599999985, 397.913233 ] ], "type": "bezier" }, { "pts": [ [ 228.86373599999985, 397.913233 ], [ 218.95922599999983, 398.395803 ], [ 207.38293599999986, 399.12858300000005 ], [ 203.13866599999986, 399.54163300000005 ] ], "type": "bezier" }, { "pts": [ [ 203.13866599999986, 399.54163300000005 ], [ 182.67672599999986, 401.53294300000005 ], [ 176.30652599999985, 400.393163 ], [ 148.21831599999985, 389.715053 ] ], "type": "bezier" }, { "pts": [ [ 148.21831599999985, 389.715053 ], [ 105.14435599999985, 373.33990600000004 ], [ 97.47346599999986, 367.458176 ], [ 64.08522599999984, 325.205106 ] ], "type": "bezier" }, { "pts": [ [ 64.08522599999984, 325.205106 ], [ 43.13371699999984, 298.690826 ], [ 35.822661999999845, 286.364036 ], [ 17.892870999999836, 247.322516 ] ], "type": "bezier" }, { "pts": [ [ 17.892870999999836, 247.322516 ], [ 3.6725989999998347, 216.35834599999998 ], [ 0.8099779999998376, 204.69662600000004 ], [ 1.6279149999998381, 181.062366 ] ], "type": "bezier" }, { "pts": [ [ 1.6279149999998381, 181.062366 ], [ 2.0940639999998396, 167.593276 ], [ 3.3010369999998375, 162.35695599999997 ], [ 8.21522499999984, 152.484146 ] ], "type": "bezier" }, { "pts": [ [ 8.21522499999984, 152.484146 ], [ 11.820148999999837, 145.24170600000002 ], [ 13.446409999999837, 143.16269599999998 ], [ 23.862313999999834, 132.481086 ] ], "type": "bezier" }, { "pts": [ [ 23.862313999999834, 132.481086 ], [ 29.348156999999837, 126.85530599999998 ], [ 33.26179999999983, 122.12192600000003 ], [ 34.22173299999984, 119.95182599999998 ] ], "type": "bezier" }, { "pts": [ [ 34.22173299999984, 119.95182599999998 ], [ 35.46665399999984, 117.13748599999997 ], [ 35.930818999999836, 113.36546599999997 ], [ 36.625833999999834, 100.41512599999999 ] ], "type": "bezier" }, { "pts": [ [ 36.625833999999834, 100.41512599999999 ], [ 37.58909899999984, 82.46599599999996 ], [ 38.56066199999983, 77.20167599999996 ], [ 42.692537999999836, 67.54301599999997 ] ], "type": "bezier" }, { "pts": [ [ 42.692537999999836, 67.54301599999997 ], [ 54.77822599999984, 39.29149599999994 ], [ 76.87204599999984, 18.225175999999976 ], [ 103.98078599999985, 9.105015999999978 ] ], "type": "bezier" }, { "pts": [ [ 103.98078599999985, 9.105015999999978 ], [ 110.62866599999984, 6.868485999999962 ], [ 110.65923599999985, 6.865965999999958 ], [ 131.77604599999984, 6.810535999999956 ] ], "type": "bezier" }, { "pts": [ [ 131.77604599999984, 6.810535999999956 ], [ 152.83738599999984, 6.755335999999943 ], [ 152.93797599999985, 6.74703599999998 ], [ 158.78834599999985, 4.598825999999974 ] ], "type": "bezier" }, { "pts": [ [ 158.78834599999985, 4.598825999999974 ], [ 171.98418599999985, -0.24658400000004121 ], [ 174.62975599999984, -0.28622400000000425 ], [ 181.71372599999984, 4.255275999999981 ] ], "type": "bezier" }, { "pts": [ [ 181.71372599999984, 4.255275999999981 ], [ 181.713726, 4.255275999999981 ] ], "type": "segment" } ] }, { "edgecolor": "#a6a6a6", "linewidth": 1.7, "id": "path4040", "items": [ { "pts": [ [ 135.071886, 371.328006 ], [ 141.161926, 350.843326 ], [ 152.234736, 319.83948599999997 ], [ 183.23857600000002, 313.74944600000003 ] ], "type": "bezier" }, { "pts": [ [ 183.23857600000002, 313.74944600000003 ], [ 214.24241600000005, 307.659406 ], [ 240.81714600000004, 314.856726 ], [ 261.855456, 312.642166 ] ], "type": "bezier" }, { "pts": [ [ 261.855456, 312.642166 ], [ 301.206846, 308.499936 ], [ 348.776946, 284.406526 ], [ 370.922546, 264.475486 ] ], "type": "bezier" }, { "pts": [ [ 370.922546, 264.475486 ], [ 393.068146, 244.544436 ], [ 395.282696, 232.36436600000002 ], [ 395.282696, 232.36436600000002 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path3968", "items": [ { "pts": [ [ 221.993376, 397.349073 ], [ 221.993376, 397.349073 ], [ 216.53245600000002, 384.100756 ], [ 220.88608600000003, 379.078966 ] ], "type": "bezier" }, { "pts": [ [ 220.88608600000003, 379.078966 ], [ 230.47676600000005, 368.016396 ], [ 265.177296, 354.165156 ], [ 267.391856, 346.414206 ] ], "type": "bezier" }, { "pts": [ [ 267.391856, 346.414206 ], [ 269.606416, 338.66324599999996 ], [ 266.83821600000005, 329.805006 ], [ 272.374616, 320.94676599999997 ] ], "type": "bezier" }, { "pts": [ [ 272.374616, 320.94676599999997 ], [ 293.959306, 294.489826 ], [ 295.119636, 262.04576599999996 ], [ 291.752016, 232.36436599999996 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path3970", "items": [ { "pts": [ [ 108.49717600000002, 368.81694600000003 ], [ 134.28892600000003, 367.883346 ], [ 133.62735600000002, 356.013376 ], [ 147.23568600000002, 352.89572599999997 ] ], "type": "bezier" }, { "pts": [ [ 147.23568600000002, 352.89572599999997 ], [ 161.24503600000003, 349.68620599999997 ], [ 176.66999600000003, 354.92661599999997 ], [ 190.273726, 350.289676 ] ], "type": "bezier" }, { "pts": [ [ 190.273726, 350.289676 ], [ 204.667886, 345.383316 ], [ 216.96957600000002, 339.029986 ], [ 228.01624600000002, 324.660086 ] ], "type": "bezier" }, { "pts": [ [ 228.01624600000002, 324.660086 ], [ 246.616826, 296.16905599999996 ], [ 239.52445600000004, 271.31576599999994 ], [ 221.439746, 250.08084599999998 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path3972", "items": [ { "pts": [ [ 323.309496, 340.877796 ], [ 323.309496, 340.877796 ], [ 317.294776, 305.539826 ], [ 332.017106, 292.240946 ] ], "type": "bezier" }, { "pts": [ [ 332.017106, 292.240946 ], [ 350.75428600000004, 275.31540599999994 ], [ 416.22602600000005, 234.21525599999998 ], [ 416.941826, 221.83367599999997 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path3974", "items": [ { "pts": [ [ 345.455096, 159.28387600000002 ], [ 362.61793600000004, 198.03868600000004 ], [ 365.93977600000005, 206.343276 ], [ 365.93977600000005, 206.343276 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path3976", "items": [ { "pts": [ [ 317.773106, 184.197676 ], [ 326.077696, 211.87967600000002 ], [ 328.292266, 247.866276 ], [ 328.292266, 247.866276 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path3978", "items": [ { "pts": [ [ 401.37273600000003, 308.76667599999996 ], [ 401.37273600000003, 308.76667599999996 ], [ 405.801866, 270.011876 ], [ 420.75013600000005, 273.33372599999996 ] ], "type": "bezier" }, { "pts": [ [ 420.75013600000005, 273.33372599999996 ], [ 435.69841600000007, 276.655566 ], [ 448.430916, 263.366976 ], [ 450.09305600000005, 260.600006 ] ], "type": "bezier" }, { "pts": [ [ 450.09305600000005, 260.600006 ], [ 458.15526600000004, 247.17884599999996 ], [ 478.47382600000003, 223.577476 ], [ 479.43598600000007, 170.91031599999997 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path4038", "items": [ { "pts": [ [ 346.56237600000003, 81.77427599999999 ], [ 333.27501600000005, 117.760876 ], [ 304.927996, 135.738626 ], [ 277.35737600000004, 154.85475599999995 ] ], "type": "bezier" }, { "pts": [ [ 277.35737600000004, 154.85475599999995 ], [ 233.72577600000005, 185.10680599999995 ], [ 163.67792600000004, 193.50341599999996 ], [ 130.64277600000005, 216.86243599999995 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path4038-6", "items": [ { "pts": [ [ 293.970006, 64.07543599999997 ], [ 280.68264600000003, 100.06203599999998 ], [ 252.063666, 117.65339599999999 ], [ 224.76500600000003, 137.155916 ] ], "type": "bezier" }, { "pts": [ [ 224.76500600000003, 137.155916 ], [ 192.168386, 160.44336599999997 ], [ 148.461196, 163.682746 ], [ 115.42604600000001, 187.041766 ] ], "type": "bezier" } ] } ], "metadata": { "bounds": [ 0.8099779999998376, 491.7203659999999, -0.28622400000000425, 401.53294300000005 ] } }PKHE__<nilearn/plotting/glass_brain_files/brain_schematics_top.json{ "paths": [ { "edgecolor": "#a6a6a6", "linewidth": 1.7, "id": "path3943", "items": [ { "pts": [ [ 386.568843, 265.48969 ], [ 386.127613, 265.33187000000004 ], [ 384.412663, 265.25742 ], [ 382.75784300000004, 265.32422 ] ], "type": "bezier" }, { "pts": [ [ 382.75784300000004, 265.32422 ], [ 381.10302300000006, 265.39102 ], [ 376.285473, 264.68109000000004 ], [ 372.05217300000004, 263.74661000000003 ] ], "type": "bezier" }, { "pts": [ [ 372.05217300000004, 263.74661000000003 ], [ 366.00599300000005, 262.41194 ], [ 363.22031300000003, 262.09245000000004 ], [ 359.06336300000004, 262.25693 ] ], "type": "bezier" }, { "pts": [ [ 359.06336300000004, 262.25693 ], [ 353.77148300000005, 262.46633 ] ], "type": "segment" }, { "pts": [ [ 353.77148300000005, 262.46633 ], [ 349.48376300000007, 265.97515000000004 ] ], "type": "segment" }, { "pts": [ [ 349.48376300000007, 265.97515000000004 ], [ 343.9563530000001, 270.49846 ], [ 340.54773300000005, 271.76623000000006 ], [ 332.2218930000001, 272.3954 ] ], "type": "bezier" }, { "pts": [ [ 332.2218930000001, 272.3954 ], [ 324.59627300000005, 272.97164000000004 ], [ 322.2816930000001, 273.44469000000004 ], [ 322.2816930000001, 274.42693 ] ], "type": "bezier" }, { "pts": [ [ 322.2816930000001, 274.42693 ], [ 322.2816930000001, 274.81068000000005 ], [ 323.0263530000001, 276.20296 ], [ 323.93651300000005, 277.52090000000004 ] ], "type": "bezier" }, { "pts": [ [ 323.93651300000005, 277.52090000000004 ], [ 325.30313300000006, 279.49984 ], [ 325.591333, 280.56241 ], [ 325.591333, 283.62226000000004 ] ], "type": "bezier" }, { "pts": [ [ 325.591333, 283.62226000000004 ], [ 325.591333, 286.86603 ], [ 325.359253, 287.61529 ], [ 323.727483, 289.63979 ] ], "type": "bezier" }, { "pts": [ [ 323.727483, 289.63979 ], [ 322.702363, 290.91163 ], [ 320.557293, 292.76458 ], [ 318.960653, 293.75747 ] ], "type": "bezier" }, { "pts": [ [ 318.960653, 293.75747 ], [ 316.431513, 295.33025 ], [ 315.49598299999997, 295.56273 ], [ 311.696243, 295.56273 ] ], "type": "bezier" }, { "pts": [ [ 311.696243, 295.56273 ], [ 307.960983, 295.56273 ], [ 307.121223, 295.36206000000004 ], [ 305.846993, 294.16498 ] ], "type": "bezier" }, { "pts": [ [ 305.846993, 294.16498 ], [ 303.936193, 292.36988 ], [ 302.806123, 289.24686 ], [ 301.263573, 281.49845000000005 ] ], "type": "bezier" }, { "pts": [ [ 301.263573, 281.49845000000005 ], [ 300.445653, 277.38995 ], [ 299.397303, 274.16681000000005 ], [ 298.195933, 272.06704 ] ], "type": "bezier" }, { "pts": [ [ 298.195933, 272.06704 ], [ 295.780323, 267.84497 ], [ 295.07813300000004, 262.3045 ], [ 296.220233, 256.47811 ] ], "type": "bezier" }, { "pts": [ [ 296.220233, 256.47811 ], [ 297.237503, 251.28854 ], [ 297.158093, 250.84235 ], [ 293.454653, 240.93795000000003 ] ], "type": "bezier" }, { "pts": [ [ 293.454653, 240.93795000000003 ], [ 290.40234300000003, 232.77493000000004 ], [ 287.799943, 224.06897000000004 ], [ 287.09396300000003, 219.65921000000003 ] ], "type": "bezier" }, { "pts": [ [ 287.09396300000003, 219.65921000000003 ], [ 286.24241300000006, 214.34011000000004 ], [ 289.291263, 208.79560000000004 ], [ 298.03065300000003, 199.77023000000003 ] ], "type": "bezier" }, { "pts": [ [ 298.03065300000003, 199.77023000000003 ], [ 303.786743, 193.82580000000002 ], [ 308.10683300000005, 190.76759000000004 ], [ 313.255393, 188.99263000000002 ] ], "type": "bezier" }, { "pts": [ [ 313.255393, 188.99263000000002 ], [ 317.816303, 187.42025 ], [ 329.72362300000003, 187.54756000000003 ], [ 342.13953300000003, 189.30146000000002 ] ], "type": "bezier" }, { "pts": [ [ 342.13953300000003, 189.30146000000002 ], [ 358.07594300000005, 191.55268 ], [ 374.032433, 191.6137 ], [ 374.032433, 189.42343000000005 ] ], "type": "bezier" }, { "pts": [ [ 374.032433, 189.42343000000005 ], [ 374.032433, 188.79502000000002 ], [ 372.308733, 187.29908 ], [ 370.51288300000004, 186.36892 ] ], "type": "bezier" }, { "pts": [ [ 370.51288300000004, 186.36892 ], [ 369.73550300000005, 185.96628000000004 ], [ 363.236573, 184.28086000000002 ], [ 356.07081300000004, 182.62353000000002 ] ], "type": "bezier" }, { "pts": [ [ 356.07081300000004, 182.62353000000002 ], [ 329.3939730000001, 176.45362 ], [ 322.92993300000006, 173.08875 ], [ 323.716493, 165.78147 ] ], "type": "bezier" }, { "pts": [ [ 323.716493, 165.78147 ], [ 323.882953, 164.23520000000002 ], [ 324.711363, 161.60401000000002 ], [ 325.557413, 159.93439 ] ], "type": "bezier" }, { "pts": [ [ 325.557413, 159.93439 ], [ 326.403483, 158.26477 ], [ 327.095713, 156.63261 ], [ 327.095713, 156.30737000000005 ] ], "type": "bezier" }, { "pts": [ [ 327.095713, 156.30737000000005 ], [ 327.095713, 155.98212 ], [ 325.354813, 154.47374000000002 ], [ 323.227053, 152.95539000000002 ] ], "type": "bezier" }, { "pts": [ [ 323.227053, 152.95539000000002 ], [ 316.025033, 147.81615000000005 ], [ 313.581303, 143.50647000000004 ], [ 315.063673, 138.55879000000004 ] ], "type": "bezier" }, { "pts": [ [ 315.063673, 138.55879000000004 ], [ 316.83419299999997, 132.64928000000003 ], [ 321.912163, 128.08004000000005 ], [ 334.539023, 121.03457000000003 ] ], "type": "bezier" }, { "pts": [ [ 334.539023, 121.03457000000003 ], [ 338.188103, 118.99846000000002 ], [ 341.271863, 117.03815000000003 ], [ 341.391803, 116.67832000000004 ] ], "type": "bezier" }, { "pts": [ [ 341.391803, 116.67832000000004 ], [ 341.792753, 115.47544300000004 ], [ 336.507503, 115.25985300000002 ], [ 329.84928299999996, 116.20745300000004 ] ], "type": "bezier" }, { "pts": [ [ 329.84928299999996, 116.20745300000004 ], [ 321.38125299999996, 117.41265000000004 ], [ 318.93222299999996, 117.37172000000004 ], [ 315.85264299999994, 115.97354300000006 ] ], "type": "bezier" }, { "pts": [ [ 315.85264299999994, 115.97354300000006 ], [ 312.00736299999994, 114.22772300000003 ], [ 310.09987299999995, 110.92992300000003 ], [ 310.17155299999996, 106.1517 ] ], "type": "bezier" }, { "pts": [ [ 310.17155299999996, 106.1517 ], [ 310.23725299999995, 101.77185000000003 ], [ 311.170083, 98.83423000000005 ], [ 315.94882299999995, 87.95801000000006 ] ], "type": "bezier" }, { "pts": [ [ 315.94882299999995, 87.95801000000006 ], [ 320.03216299999997, 78.66447000000005 ], [ 322.28169299999996, 72.62267000000003 ], [ 322.28169299999996, 70.94906000000003 ] ], "type": "bezier" }, { "pts": [ [ 322.28169299999996, 70.94906000000003 ], [ 322.28169299999996, 69.84986000000004 ], [ 322.06195299999996, 69.78231000000005 ], [ 320.025113, 70.25531000000001 ] ], "type": "bezier" }, { "pts": [ [ 320.025113, 70.25531000000001 ], [ 318.78399299999995, 70.54353000000003 ], [ 314.75096299999996, 70.7858 ], [ 311.06281299999995, 70.79369000000003 ] ], "type": "bezier" }, { "pts": [ [ 311.06281299999995, 70.79369000000003 ], [ 304.35708299999993, 70.80808999999999 ] ], "type": "segment" }, { "pts": [ [ 304.35708299999993, 70.80808999999999 ], [ 302.73710299999993, 74.86991999999998 ] ], "type": "segment" }, { "pts": [ [ 302.73710299999993, 74.86991999999998 ], [ 300.6421929999999, 80.12255000000005 ], [ 298.74054299999995, 82.57524000000001 ], [ 296.08285299999994, 83.45236 ] ], "type": "bezier" }, { "pts": [ [ 296.08285299999994, 83.45236 ], [ 290.99072299999995, 85.13291000000004 ], [ 284.44906299999997, 79.26747999999998 ], [ 271.91390299999995, 61.781799999999976 ] ], "type": "bezier" }, { "pts": [ [ 271.91390299999995, 61.781799999999976 ], [ 267.88553299999995, 56.162480000000016 ], [ 266.92890299999993, 55.327049999999986 ], [ 260.30114299999997, 51.64026000000001 ] ], "type": "bezier" }, { "pts": [ [ 260.30114299999997, 51.64026000000001 ], [ 254.69129299999994, 48.519679999999994 ], [ 253.24811299999996, 47.31599 ], [ 252.75341299999994, 45.344960000000015 ] ], "type": "bezier" }, { "pts": [ [ 252.75341299999994, 45.344960000000015 ], [ 251.86927299999994, 41.82224000000002 ], [ 255.68493299999992, 35.09408000000002 ], [ 260.18554299999994, 32.239869999999996 ] ], "type": "bezier" }, { "pts": [ [ 260.18554299999994, 32.239869999999996 ], [ 263.48165299999994, 30.149550000000033 ], [ 267.49883299999993, 29.929470000000038 ], [ 274.32355299999995, 31.465340000000026 ] ], "type": "bezier" }, { "pts": [ [ 274.32355299999995, 31.465340000000026 ], [ 281.5212829999999, 33.08516000000003 ], [ 281.40827299999995, 32.869830000000036 ], [ 280.0373829999999, 42.35138000000006 ] ], "type": "bezier" }, { "pts": [ [ 280.0373829999999, 42.35138000000006 ], [ 279.17559299999994, 48.31180000000006 ], [ 279.5982829999999, 49.06805000000003 ], [ 284.1801229999999, 49.76340000000005 ] ], "type": "bezier" }, { "pts": [ [ 284.1801229999999, 49.76340000000005 ], [ 289.12808299999995, 50.51431000000002 ], [ 295.20281299999994, 48.127110000000016 ], [ 295.20281299999994, 45.431790000000035 ] ], "type": "bezier" }, { "pts": [ [ 295.20281299999994, 45.431790000000035 ], [ 295.20281299999994, 44.24984000000006 ], [ 288.94608299999993, 38.30501000000004 ], [ 286.41003299999994, 37.077330000000075 ] ], "type": "bezier" }, { "pts": [ [ 286.41003299999994, 37.077330000000075 ], [ 284.95774299999994, 36.37429000000009 ], [ 283.75026299999996, 35.75553000000002 ], [ 283.72674299999994, 35.70231000000001 ] ], "type": "bezier" }, { "pts": [ [ 283.72674299999994, 35.70231000000001 ], [ 283.70324299999993, 35.649110000000064 ], [ 283.47197299999993, 33.18025 ], [ 283.21287299999995, 30.216000000000008 ] ], "type": "bezier" }, { "pts": [ [ 283.21287299999995, 30.216000000000008 ], [ 282.74344299999996, 24.845570000000066 ], [ 282.73282299999994, 24.818730000000073 ], [ 280.21989299999996, 22.654440000000022 ] ], "type": "bezier" }, { "pts": [ [ 280.21989299999996, 22.654440000000022 ], [ 278.83285299999994, 21.45983000000001 ], [ 276.16489299999995, 19.552509999999984 ], [ 274.29109299999993, 18.41595000000001 ] ], "type": "bezier" }, { "pts": [ [ 274.29109299999993, 18.41595000000001 ], [ 271.11718299999995, 16.490800000000036 ], [ 270.53410299999996, 16.349460000000022 ], [ 265.76576299999994, 16.349460000000022 ] ], "type": "bezier" }, { "pts": [ [ 265.76576299999994, 16.349460000000022 ], [ 260.6473629999999, 16.349460000000022 ] ], "type": "segment" }, { "pts": [ [ 260.6473629999999, 16.349460000000022 ], [ 257.87833299999994, 21.314580000000035 ] ], "type": "segment" }, { "pts": [ [ 257.87833299999994, 21.314580000000035 ], [ 256.3553529999999, 24.045389999999998 ], [ 254.3552129999999, 27.06677000000002 ], [ 253.43357299999994, 28.02877000000001 ] ], "type": "bezier" }, { "pts": [ [ 253.43357299999994, 28.02877000000001 ], [ 251.1664729999999, 30.395110000000045 ], [ 245.36965299999994, 32.90407000000005 ], [ 240.52080299999992, 33.61763000000008 ] ], "type": "bezier" }, { "pts": [ [ 240.52080299999992, 33.61763000000008 ], [ 231.59358299999994, 34.93136000000004 ], [ 230.7079929999999, 35.63022000000001 ], [ 230.7353029999999, 41.33976000000007 ] ], "type": "bezier" }, { "pts": [ [ 230.7353029999999, 41.33976000000007 ], [ 230.74700299999992, 43.77907000000005 ], [ 231.30803299999988, 46.24861000000004 ], [ 232.5993829999999, 49.54464000000007 ] ], "type": "bezier" }, { "pts": [ [ 232.5993829999999, 49.54464000000007 ], [ 235.0395629999999, 55.772910000000024 ], [ 235.49270299999992, 60.07075000000003 ], [ 234.1389429999999, 64.14655000000005 ] ], "type": "bezier" }, { "pts": [ [ 234.1389429999999, 64.14655000000005 ], [ 233.09899299999992, 67.27754000000004 ], [ 233.1001029999999, 67.29157000000004 ], [ 234.6639129999999, 70.77620000000002 ] ], "type": "bezier" }, { "pts": [ [ 234.6639129999999, 70.77620000000002 ], [ 235.52582299999992, 72.69681000000003 ], [ 236.23103299999988, 74.72330000000005 ], [ 236.23103299999988, 75.27949000000001 ] ], "type": "bezier" }, { "pts": [ [ 236.23103299999988, 75.27949000000001 ], [ 236.23103299999988, 75.83570000000003 ], [ 235.24661299999988, 78.55736000000002 ], [ 234.0434129999999, 81.32765000000006 ] ], "type": "bezier" }, { "pts": [ [ 234.0434129999999, 81.32765000000006 ], [ 232.84023299999993, 84.09793000000002 ], [ 232.0065829999999, 86.75746000000004 ], [ 232.19087299999993, 87.23771000000005 ] ], "type": "bezier" }, { "pts": [ [ 232.19087299999993, 87.23771000000005 ], [ 232.62602299999995, 88.37170000000003 ], [ 242.45408299999994, 91.93405000000007 ], [ 245.51527299999995, 92.06736000000001 ] ], "type": "bezier" }, { "pts": [ [ 245.51527299999995, 92.06736000000001 ], [ 247.79716299999993, 92.16676000000007 ], [ 248.00193299999992, 92.00816000000003 ], [ 251.40530299999998, 87.50558000000001 ] ], "type": "bezier" }, { "pts": [ [ 251.40530299999998, 87.50558000000001 ], [ 256.01874300000003, 81.40211000000005 ], [ 257.184613, 80.63481000000002 ], [ 260.387173, 81.59432000000004 ] ], "type": "bezier" }, { "pts": [ [ 260.387173, 81.59432000000004 ], [ 266.787663, 83.51196000000004 ], [ 273.84087300000004, 94.62351000000001 ], [ 277.786313, 109.00477000000001 ] ], "type": "bezier" }, { "pts": [ [ 277.786313, 109.00477000000001 ], [ 281.18064300000003, 121.37721000000005 ], [ 277.008353, 131.09045000000003 ], [ 264.874363, 139.0643 ] ], "type": "bezier" }, { "pts": [ [ 264.874363, 139.0643 ], [ 258.068503, 143.53677000000005 ], [ 256.853623, 143.77684000000005 ], [ 242.254603, 143.53422 ] ], "type": "bezier" }, { "pts": [ [ 242.254603, 143.53422 ], [ 229.32294299999998, 143.31931000000003 ] ], "type": "segment" }, { "pts": [ [ 229.32294299999998, 143.31931000000003 ], [ 228.20860299999995, 145.68315 ] ], "type": "segment" }, { "pts": [ [ 228.20860299999995, 145.68315 ], [ 226.27705299999997, 149.78051000000005 ], [ 226.25526299999996, 151.66913 ], [ 228.11140299999997, 154.10270000000003 ] ], "type": "bezier" }, { "pts": [ [ 228.11140299999997, 154.10270000000003 ], [ 229.11886299999995, 155.42355000000003 ], [ 229.87829299999996, 157.31038 ], [ 230.06337299999998, 158.95247 ] ], "type": "bezier" }, { "pts": [ [ 230.06337299999998, 158.95247 ], [ 230.33855299999996, 161.39377000000002 ], [ 230.17996299999996, 161.82531000000006 ], [ 228.334213, 163.65795000000003 ] ], "type": "bezier" }, { "pts": [ [ 228.334213, 163.65795000000003 ], [ 225.816633, 166.15762 ], [ 218.959633, 170.19274000000001 ], [ 212.005113, 173.26708000000002 ] ], "type": "bezier" }, { "pts": [ [ 212.005113, 173.26708000000002 ], [ 207.480853, 175.26708000000002 ], [ 206.130203, 175.59514000000001 ], [ 202.467703, 175.58356000000003 ] ], "type": "bezier" }, { "pts": [ [ 202.467703, 175.58356000000003 ], [ 197.349333, 175.56736 ], [ 193.575763, 174.40365000000003 ], [ 188.880963, 171.39350000000002 ] ], "type": "bezier" }, { "pts": [ [ 188.880963, 171.39350000000002 ], [ 186.95702300000002, 170.15994 ], [ 183.893563, 168.42847 ], [ 182.073253, 167.54578000000004 ] ], "type": "bezier" }, { "pts": [ [ 182.073253, 167.54578000000004 ], [ 180.252953, 166.66311000000002 ], [ 177.553013, 164.70636000000002 ], [ 176.073383, 163.19745 ] ], "type": "bezier" }, { "pts": [ [ 176.073383, 163.19745 ], [ 173.38314300000002, 160.45399000000003 ] ], "type": "segment" }, { "pts": [ [ 173.38314300000002, 160.45399000000003 ], [ 172.91093300000003, 150.38227 ] ], "type": "segment" }, { "pts": [ [ 172.91093300000003, 150.38227 ], [ 172.65122300000002, 144.84283000000005 ], [ 172.22741300000004, 139.36970000000002 ], [ 171.96912300000002, 138.21978000000001 ] ], "type": "bezier" }, { "pts": [ [ 171.96912300000002, 138.21978000000001 ], [ 171.499523, 136.12900000000002 ] ], "type": "segment" }, { "pts": [ [ 171.499523, 136.12900000000002 ], [ 164.300023, 136.29333000000003 ] ], "type": "segment" }, { "pts": [ [ 164.300023, 136.29333000000003 ], [ 155.891043, 136.48526000000004 ], [ 152.15545300000002, 135.68903000000006 ], [ 145.72269300000002, 132.33360000000005 ] ], "type": "bezier" }, { "pts": [ [ 145.72269300000002, 132.33360000000005 ], [ 140.610373, 129.66694000000007 ], [ 135.88677300000003, 125.15654000000006 ], [ 133.542583, 120.70326000000006 ] ], "type": "bezier" }, { "pts": [ [ 133.542583, 120.70326000000006 ], [ 131.916733, 117.61462000000006 ], [ 131.828033, 117.05005000000006 ], [ 131.848503, 109.92203000000006 ] ], "type": "bezier" }, { "pts": [ [ 131.848503, 109.92203000000006 ], [ 131.871903, 101.76790000000005 ], [ 132.672213, 97.64754000000005 ], [ 135.851223, 89.31400000000002 ] ], "type": "bezier" }, { "pts": [ [ 135.851223, 89.31400000000002 ], [ 140.884203, 76.12038000000007 ], [ 148.829163, 67.19758000000002 ], [ 155.543903, 67.19758000000002 ] ], "type": "bezier" }, { "pts": [ [ 155.543903, 67.19758000000002 ], [ 158.15632300000001, 67.19758000000002 ], [ 158.447393, 67.38765000000001 ], [ 162.843683, 71.96476000000007 ] ], "type": "bezier" }, { "pts": [ [ 162.843683, 71.96476000000007 ], [ 167.737633, 77.06000000000006 ], [ 169.675203, 77.97667000000001 ], [ 173.752113, 77.12557000000004 ] ], "type": "bezier" }, { "pts": [ [ 173.752113, 77.12557000000004 ], [ 179.228543, 75.98232000000007 ], [ 179.641003, 73.88252 ], [ 175.98172300000002, 65.77462000000003 ] ], "type": "bezier" }, { "pts": [ [ 175.98172300000002, 65.77462000000003 ], [ 173.95451300000002, 61.28290000000004 ], [ 173.486723, 59.56041000000005 ], [ 173.42427300000003, 56.35739000000001 ] ], "type": "bezier" }, { "pts": [ [ 173.42427300000003, 56.35739000000001 ], [ 173.35677300000003, 52.89286000000004 ], [ 173.57509300000004, 52.08263000000005 ], [ 175.30353300000002, 49.38455000000005 ] ], "type": "bezier" }, { "pts": [ [ 175.30353300000002, 49.38455000000005 ], [ 176.379173, 47.70551000000006 ], [ 177.25923300000002, 45.64118000000008 ], [ 177.25923300000002, 44.797160000000076 ] ], "type": "bezier" }, { "pts": [ [ 177.25923300000002, 44.797160000000076 ], [ 177.25923300000002, 43.953140000000076 ], [ 176.37917300000004, 41.404360000000054 ], [ 175.30353300000002, 39.13319000000001 ] ], "type": "bezier" }, { "pts": [ [ 175.30353300000002, 39.13319000000001 ], [ 173.347833, 35.00380000000007 ] ], "type": "segment" }, { "pts": [ [ 173.347833, 35.00380000000007 ], [ 173.51127300000002, 24.623560000000055 ] ], "type": "segment" }, { "pts": [ [ 173.51127300000002, 24.623560000000055 ], [ 173.70141300000003, 12.547620000000052 ], [ 173.53877300000002, 12.287280000000067 ], [ 168.85592300000002, 17.171410000000037 ] ], "type": "bezier" }, { "pts": [ [ 168.85592300000002, 17.171410000000037 ], [ 165.64737300000002, 20.51786000000004 ], [ 158.86374300000003, 25.676630000000046 ], [ 157.67181300000001, 25.676630000000046 ] ], "type": "bezier" }, { "pts": [ [ 157.67181300000001, 25.676630000000046 ], [ 157.231433, 25.676630000000046 ], [ 154.826793, 23.78111000000007 ], [ 152.32818300000002, 21.464360000000056 ] ], "type": "bezier" }, { "pts": [ [ 152.32818300000002, 21.464360000000056 ], [ 149.82956300000004, 19.147620000000074 ], [ 147.41835300000002, 17.252090000000067 ], [ 146.96992300000002, 17.252090000000067 ] ], "type": "bezier" }, { "pts": [ [ 146.96992300000002, 17.252090000000067 ], [ 145.856703, 17.252090000000067 ], [ 144.77998300000002, 18.596370000000093 ], [ 141.725173, 23.800080000000094 ] ], "type": "bezier" }, { "pts": [ [ 141.725173, 23.800080000000094 ], [ 138.842343, 28.7108300000001 ], [ 136.98765300000002, 30.166670000000067 ], [ 132.81777300000002, 30.79192000000012 ] ], "type": "bezier" }, { "pts": [ [ 132.81777300000002, 30.79192000000012 ], [ 126.82872300000002, 31.689950000000067 ], [ 125.40277300000002, 33.65642000000008 ], [ 128.26986300000002, 37.06377000000009 ] ], "type": "bezier" }, { "pts": [ [ 128.26986300000002, 37.06377000000009 ], [ 130.64338300000003, 39.8845300000001 ], [ 133.085833, 39.62114000000008 ], [ 138.712173, 35.93770000000006 ] ], "type": "bezier" }, { "pts": [ [ 138.712173, 35.93770000000006 ], [ 143.758203, 32.63418000000013 ], [ 152.047693, 29.670550000000105 ], [ 155.864383, 29.805510000000083 ] ], "type": "bezier" }, { "pts": [ [ 155.864383, 29.805510000000083 ], [ 160.654733, 29.974900000000048 ], [ 163.834053, 35.47714000000008 ], [ 161.96677300000002, 40.3665400000001 ] ], "type": "bezier" }, { "pts": [ [ 161.96677300000002, 40.3665400000001 ], [ 160.49900300000002, 44.20983000000007 ], [ 158.09046300000003, 46.68781000000007 ], [ 149.45328300000003, 53.24075000000005 ] ], "type": "bezier" }, { "pts": [ [ 149.45328300000003, 53.24075000000005 ], [ 145.05419300000003, 56.57830000000007 ], [ 137.12232300000002, 63.149620000000084 ], [ 131.82690300000002, 67.84367000000009 ] ], "type": "bezier" }, { "pts": [ [ 131.82690300000002, 67.84367000000009 ], [ 120.78312300000002, 77.63328000000013 ], [ 119.26698300000001, 78.58087000000012 ], [ 116.48871300000002, 77.43007000000011 ] ], "type": "bezier" }, { "pts": [ [ 116.48871300000002, 77.43007000000011 ], [ 112.64592300000001, 75.83834000000007 ], [ 109.837073, 70.3545400000001 ], [ 105.08308300000002, 55.162520000000086 ] ], "type": "bezier" }, { "pts": [ [ 105.08308300000002, 55.162520000000086 ], [ 103.37421300000003, 49.70161000000007 ], [ 101.72573300000002, 44.9824000000001 ], [ 101.41979300000003, 44.675380000000075 ] ], "type": "bezier" }, { "pts": [ [ 101.41979300000003, 44.675380000000075 ], [ 101.07865300000003, 44.33305000000007 ], [ 100.34169300000002, 45.26393000000007 ], [ 99.51421300000004, 47.08239000000009 ] ], "type": "bezier" }, { "pts": [ [ 99.51421300000004, 47.08239000000009 ], [ 98.28346300000004, 49.78705000000008 ], [ 98.14456300000003, 51.158100000000104 ], [ 97.93356300000004, 62.68443000000008 ] ], "type": "bezier" }, { "pts": [ [ 97.93356300000004, 62.68443000000008 ], [ 97.68147300000004, 76.45569000000012 ], [ 97.61273300000003, 76.76821000000007 ], [ 93.81390300000004, 81.41752000000008 ] ], "type": "bezier" }, { "pts": [ [ 93.81390300000004, 81.41752000000008 ], [ 91.78284300000004, 83.90328000000011 ] ], "type": "segment" }, { "pts": [ [ 91.78284300000004, 83.90328000000011 ], [ 94.15980300000004, 88.33767000000006 ] ], "type": "segment" }, { "pts": [ [ 94.15980300000004, 88.33767000000006 ], [ 97.82616300000004, 95.1775100000001 ], [ 99.29275300000003, 100.31431000000009 ], [ 99.29148300000003, 106.31151000000006 ] ], "type": "bezier" }, { "pts": [ [ 99.29148300000003, 106.31151000000006 ], [ 99.28948300000002, 113.77654300000006 ], [ 97.90981300000003, 117.20397000000008 ], [ 92.14669300000003, 124.05540000000008 ] ], "type": "bezier" }, { "pts": [ [ 92.14669300000003, 124.05540000000008 ], [ 89.77516300000002, 126.8747800000001 ] ], "type": "segment" }, { "pts": [ [ 89.77516300000002, 126.8747800000001 ], [ 89.73966300000002, 133.00378000000006 ] ], "type": "segment" }, { "pts": [ [ 89.73966300000002, 133.00378000000006 ], [ 89.70676300000002, 138.68209000000007 ], [ 89.55851300000002, 139.42869000000007 ], [ 87.72322300000002, 143.15649000000008 ] ], "type": "bezier" }, { "pts": [ [ 87.72322300000002, 143.15649000000008 ], [ 85.39025300000002, 147.89515000000006 ], [ 82.27624300000002, 150.82593000000008 ], [ 76.76650300000001, 153.4685100000001 ] ], "type": "bezier" }, { "pts": [ [ 76.76650300000001, 153.4685100000001 ], [ 72.855113, 155.34449 ] ], "type": "segment" }, { "pts": [ [ 72.855113, 155.34449 ], [ 52.59487299999999, 155.65031000000005 ] ], "type": "segment" }, { "pts": [ [ 52.59487299999999, 155.65031000000005 ], [ 41.45174299999999, 155.81850000000003 ], [ 32.109533, 156.18626 ], [ 31.834403, 156.46755000000002 ] ], "type": "bezier" }, { "pts": [ [ 31.834403, 156.46755000000002 ], [ 31.116863, 157.20114 ], [ 35.372913, 160.70551 ], [ 39.15791299999999, 162.49762000000004 ] ], "type": "bezier" }, { "pts": [ [ 39.15791299999999, 162.49762000000004 ], [ 44.904393, 165.21845000000002 ], [ 52.509502999999995, 166.68867 ], [ 70.74897299999999, 168.60479000000004 ] ], "type": "bezier" }, { "pts": [ [ 70.74897299999999, 168.60479000000004 ], [ 90.04475299999999, 170.63187000000005 ], [ 91.348823, 171.22747000000004 ], [ 103.60492299999999, 183.61082000000005 ] ], "type": "bezier" }, { "pts": [ [ 103.60492299999999, 183.61082000000005 ], [ 112.72813299999999, 192.82877000000002 ], [ 117.41341299999999, 198.74430000000007 ], [ 118.352083, 202.23028000000005 ] ], "type": "bezier" }, { "pts": [ [ 118.352083, 202.23028000000005 ], [ 119.680613, 207.16404000000006 ], [ 118.16582299999999, 215.14879000000002 ], [ 114.16679299999998, 224.29184000000004 ] ], "type": "bezier" }, { "pts": [ [ 114.16679299999998, 224.29184000000004 ], [ 109.40301299999999, 235.18331000000006 ], [ 107.38447299999999, 244.15658000000005 ], [ 107.60989299999997, 253.44008000000002 ] ], "type": "bezier" }, { "pts": [ [ 107.60989299999997, 253.44008000000002 ], [ 107.72202299999998, 258.05845000000005 ], [ 107.50115299999996, 259.49714000000006 ], [ 106.10981299999997, 263.21131 ] ], "type": "bezier" }, { "pts": [ [ 106.10981299999997, 263.21131 ], [ 105.21244299999998, 265.60681000000005 ], [ 103.53157299999998, 271.49364 ], [ 102.37453299999999, 276.29316000000006 ] ], "type": "bezier" }, { "pts": [ [ 102.37453299999999, 276.29316000000006 ], [ 100.074773, 285.83282 ], [ 99.27145299999998, 287.54614000000004 ], [ 96.57470299999999, 288.66318 ] ], "type": "bezier" }, { "pts": [ [ 96.57470299999999, 288.66318 ], [ 91.09113299999999, 290.93455000000006 ], [ 80.00025299999999, 285.50317000000007 ], [ 77.67176299999998, 279.40610000000004 ] ], "type": "bezier" }, { "pts": [ [ 77.67176299999998, 279.40610000000004 ], [ 76.52406299999998, 276.40090000000004 ], [ 76.99734299999999, 273.67298000000005 ], [ 79.53537299999998, 268.66442000000006 ] ], "type": "bezier" }, { "pts": [ [ 79.53537299999998, 268.66442000000006 ], [ 80.75655299999998, 266.25455000000005 ], [ 81.38559299999997, 263.97095 ], [ 80.82833299999997, 263.97068 ] ], "type": "bezier" }, { "pts": [ [ 80.82833299999997, 263.97068 ], [ 77.83307299999997, 263.96968000000004 ], [ 60.86076299999998, 261.23133000000007 ], [ 58.65768299999998, 260.39423000000005 ] ], "type": "bezier" }, { "pts": [ [ 58.65768299999998, 260.39423000000005 ], [ 53.79710299999998, 258.54735000000005 ], [ 53.29962299999998, 257.85251000000005 ], [ 53.28892299999998, 252.89550000000003 ] ], "type": "bezier" }, { "pts": [ [ 53.28892299999998, 252.89550000000003 ], [ 53.27882299999999, 248.20252000000005 ], [ 53.015422999999984, 247.47718000000003 ], [ 51.10527299999998, 246.88157000000004 ] ], "type": "bezier" }, { "pts": [ [ 51.10527299999998, 246.88157000000004 ], [ 48.70666299999998, 246.13366000000002 ], [ 23.623482999999982, 250.46712000000002 ], [ 20.248172999999984, 252.21256000000002 ] ], "type": "bezier" }, { "pts": [ [ 20.248172999999984, 252.21256000000002 ], [ 17.681962999999985, 253.53960000000004 ], [ 17.075312999999984, 254.66858000000002 ], [ 17.10165299999998, 258.06829000000005 ] ], "type": "bezier" }, { "pts": [ [ 17.10165299999998, 258.06829000000005 ], [ 17.13225299999998, 262.01880000000006 ], [ 18.19613299999998, 263.01771 ], [ 21.70612299999998, 262.39153000000005 ] ], "type": "bezier" }, { "pts": [ [ 21.70612299999998, 262.39153000000005 ], [ 32.075952999999984, 260.54156 ], [ 41.52883299999998, 265.61528000000004 ], [ 43.33350299999998, 273.99975000000006 ] ], "type": "bezier" }, { "pts": [ [ 43.33350299999998, 273.99975000000006 ], [ 44.16154299999998, 277.84686000000005 ], [ 42.791582999999974, 280.46048 ], [ 36.44409299999998, 287.14347000000004 ] ], "type": "bezier" }, { "pts": [ [ 36.44409299999998, 287.14347000000004 ], [ 29.017972999999987, 294.96208 ], [ 28.35937299999998, 296.1538800000001 ], [ 28.853302999999986, 300.87957000000006 ] ], "type": "bezier" }, { "pts": [ [ 28.853302999999986, 300.87957000000006 ], [ 29.059472999999986, 302.85205 ], [ 29.532062999999983, 304.8321 ], [ 29.90352299999999, 305.27967 ] ], "type": "bezier" }, { "pts": [ [ 29.90352299999999, 305.27967 ], [ 31.017632999999986, 306.62209 ], [ 33.38815299999999, 306.21115000000003 ], [ 36.97210299999999, 304.05432 ] ], "type": "bezier" }, { "pts": [ [ 36.97210299999999, 304.05432 ], [ 46.630442999999985, 298.24191 ], [ 54.786902999999995, 295.2619 ], [ 61.037442999999996, 295.2619 ] ], "type": "bezier" }, { "pts": [ [ 61.037442999999996, 295.2619 ], [ 65.72788299999999, 295.2619 ], [ 67.564343, 295.79052 ], [ 70.937483, 298.11162 ] ], "type": "bezier" }, { "pts": [ [ 70.937483, 298.11162 ], [ 75.503293, 301.25342 ], [ 77.828943, 308.08501 ], [ 77.137173, 316.32324000000006 ] ], "type": "bezier" }, { "pts": [ [ 77.137173, 316.32324000000006 ], [ 76.182213, 327.69587 ], [ 74.09846300000001, 329.75117 ], [ 55.830163, 337.33937000000003 ] ], "type": "bezier" }, { "pts": [ [ 55.830163, 337.33937000000003 ], [ 44.561443, 342.02007000000003 ], [ 43.068372999999994, 342.77997000000005 ], [ 43.068372999999994, 343.83487 ] ], "type": "bezier" }, { "pts": [ [ 43.068372999999994, 343.83487 ], [ 43.068372999999994, 344.37287000000003 ], [ 44.37634299999999, 346.32347000000004 ], [ 45.97497299999999, 348.16937 ] ], "type": "bezier" }, { "pts": [ [ 45.97497299999999, 348.16937 ], [ 51.132412999999985, 354.12487 ], [ 51.14401299999999, 354.15567 ], [ 50.28940299999999, 359.64937 ] ], "type": "bezier" }, { "pts": [ [ 50.28940299999999, 359.64937 ], [ 49.62731299999999, 363.90547000000004 ], [ 49.654883, 364.62467000000004 ], [ 50.53142299999999, 365.96247 ] ], "type": "bezier" }, { "pts": [ [ 50.53142299999999, 365.96247 ], [ 51.09008299999999, 366.81507 ], [ 52.103922999999995, 367.47776999999996 ], [ 52.86072299999999, 367.48497 ] ], "type": "bezier" }, { "pts": [ [ 52.86072299999999, 367.48497 ], [ 53.597762999999986, 367.49496999999997 ], [ 57.772223, 365.68787 ], [ 62.13730299999999, 363.47577 ] ], "type": "bezier" }, { "pts": [ [ 62.13730299999999, 363.47577 ], [ 71.248353, 358.85837000000004 ], [ 74.56669299999999, 358.18957 ], [ 79.82747299999998, 359.91027 ] ], "type": "bezier" }, { "pts": [ [ 79.82747299999998, 359.91027 ], [ 84.40674299999998, 361.40807000000007 ], [ 87.30057299999999, 363.47697000000005 ], [ 90.40116299999998, 367.46967000000006 ] ], "type": "bezier" }, { "pts": [ [ 90.40116299999998, 367.46967000000006 ], [ 92.00620299999999, 369.53647 ], [ 95.05669299999998, 372.34457000000003 ], [ 97.62723299999999, 374.12167 ] ], "type": "bezier" }, { "pts": [ [ 97.62723299999999, 374.12167 ], [ 102.857203, 377.73707 ], [ 103.93246299999998, 379.17657 ], [ 104.305823, 383.06207000000006 ] ], "type": "bezier" }, { "pts": [ [ 104.305823, 383.06207000000006 ], [ 104.797863, 388.18297000000007 ], [ 103.20716300000001, 390.59197000000006 ], [ 95.02862300000001, 397.11207 ] ], "type": "bezier" }, { "pts": [ [ 95.02862300000001, 397.11207 ], [ 84.78939300000002, 405.27497000000005 ], [ 85.265623, 404.52347000000003 ], [ 87.202673, 409.46077 ] ], "type": "bezier" }, { "pts": [ [ 87.202673, 409.46077 ], [ 89.17475300000001, 414.4872700000001 ], [ 89.17493300000001, 414.7712700000001 ], [ 87.208673, 419.39147 ] ], "type": "bezier" }, { "pts": [ [ 87.208673, 419.39147 ], [ 82.73868300000001, 429.89677000000006 ], [ 83.513833, 431.60177000000004 ], [ 91.50914300000001, 428.8509700000001 ] ], "type": "bezier" }, { "pts": [ [ 91.50914300000001, 428.8509700000001 ], [ 96.66302300000001, 427.07777000000004 ], [ 98.75274300000001, 427.24877000000004 ], [ 101.14344300000002, 429.6394700000001 ] ], "type": "bezier" }, { "pts": [ [ 101.14344300000002, 429.6394700000001 ], [ 103.43905300000003, 431.9351700000001 ], [ 104.17406300000002, 434.35027 ], [ 104.74601300000003, 441.47697000000005 ] ], "type": "bezier" }, { "pts": [ [ 104.74601300000003, 441.47697000000005 ], [ 105.18625300000002, 446.96247000000005 ], [ 105.77612300000004, 448.70887000000005 ], [ 107.18869300000003, 448.70887000000005 ] ], "type": "bezier" }, { "pts": [ [ 107.18869300000003, 448.70887000000005 ], [ 107.68054300000003, 448.70887000000005 ], [ 111.23095300000003, 445.6471700000001 ], [ 115.07850300000004, 441.90507 ] ], "type": "bezier" }, { "pts": [ [ 115.07850300000004, 441.90507 ], [ 122.07404300000005, 435.1013700000001 ] ], "type": "segment" }, { "pts": [ [ 122.07404300000005, 435.1013700000001 ], [ 120.37494300000006, 430.02047000000005 ] ], "type": "segment" }, { "pts": [ [ 120.37494300000006, 430.02047000000005 ], [ 117.99957300000005, 422.91727000000003 ], [ 118.02363300000005, 418.3269700000001 ], [ 120.45444300000005, 414.85857000000004 ] ], "type": "bezier" }, { "pts": [ [ 120.45444300000005, 414.85857000000004 ], [ 122.76387300000005, 411.56327000000005 ], [ 126.18627300000006, 410.18547 ], [ 129.08234300000007, 411.38507000000004 ] ], "type": "bezier" }, { "pts": [ [ 129.08234300000007, 411.38507000000004 ], [ 134.73186300000006, 413.72517000000005 ], [ 142.80726300000006, 431.33457000000004 ], [ 143.87240300000008, 443.63647000000003 ] ], "type": "bezier" }, { "pts": [ [ 143.87240300000008, 443.63647000000003 ], [ 144.48513300000008, 450.71337000000005 ], [ 143.5670130000001, 454.50547000000006 ], [ 139.65272300000007, 461.06517 ] ], "type": "bezier" }, { "pts": [ [ 139.65272300000007, 461.06517 ], [ 134.66903300000007, 469.41697000000005 ], [ 135.07475300000007, 469.95307 ], [ 143.73603300000008, 466.46057 ] ], "type": "bezier" }, { "pts": [ [ 143.73603300000008, 466.46057 ], [ 152.3844330000001, 462.97317 ], [ 155.0386230000001, 463.61027 ], [ 161.34777300000007, 470.68757 ] ], "type": "bezier" }, { "pts": [ [ 161.34777300000007, 470.68757 ], [ 169.36099300000006, 479.67637 ], [ 169.01775300000008, 479.39817 ], [ 172.09457300000008, 479.39817 ] ], "type": "bezier" }, { "pts": [ [ 172.09457300000008, 479.39817 ], [ 174.87331300000008, 479.39817 ] ], "type": "segment" }, { "pts": [ [ 174.87331300000008, 479.39817 ], [ 174.5522330000001, 472.62847 ] ], "type": "segment" }, { "pts": [ [ 174.5522330000001, 472.62847 ], [ 173.54264300000008, 451.34287 ], [ 173.89981300000008, 453.28207 ], [ 168.8169330000001, 441.48776999999995 ] ], "type": "bezier" }, { "pts": [ [ 168.8169330000001, 441.48776999999995 ], [ 165.1957230000001, 433.08507 ], [ 159.4186530000001, 427.35257 ], [ 159.4186530000001, 424.07376999999997 ] ], "type": "bezier" }, { "pts": [ [ 159.4186530000001, 424.07376999999997 ], [ 159.4186530000001, 423.03117 ], [ 159.0557530000001, 420.03432 ], [ 160.3989930000001, 418.27536999999995 ] ], "type": "bezier" }, { "pts": [ [ 160.3989930000001, 418.27536999999995 ], [ 161.37933300000012, 414.37266999999997 ] ], "type": "segment" }, { "pts": [ [ 161.37933300000012, 414.37266999999997 ], [ 159.39815300000012, 408.54067 ] ], "type": "segment" }, { "pts": [ [ 159.39815300000012, 408.54067 ], [ 157.21681300000012, 402.11947 ], [ 156.75452300000012, 396.09286999999995 ], [ 158.13186300000012, 392.03327 ] ], "type": "bezier" }, { "pts": [ [ 158.13186300000012, 392.03327 ], [ 159.57415300000014, 387.78247 ], [ 165.79912300000012, 383.48387 ], [ 171.9606230000001, 382.48416999999995 ] ], "type": "bezier" }, { "pts": [ [ 171.9606230000001, 382.48416999999995 ], [ 176.6953630000001, 381.71596999999997 ], [ 178.58959300000012, 380.81566999999995 ], [ 179.6057430000001, 378.85067 ] ], "type": "bezier" }, { "pts": [ [ 179.6057430000001, 378.85067 ], [ 180.8759730000001, 376.39427 ], [ 180.6830030000001, 373.09387 ], [ 178.9756130000001, 368.07397 ] ], "type": "bezier" }, { "pts": [ [ 178.9756130000001, 368.07397 ], [ 176.8933330000001, 361.95177 ], [ 176.9774230000001, 358.70637 ], [ 179.3014630000001, 355.49816999999996 ] ], "type": "bezier" }, { "pts": [ [ 179.3014630000001, 355.49816999999996 ], [ 182.8626930000001, 350.58207 ], [ 190.6989530000001, 346.65187 ], [ 198.2317030000001, 346.00397 ] ], "type": "bezier" }, { "pts": [ [ 198.2317030000001, 346.00397 ], [ 202.2676330000001, 345.65686999999997 ], [ 204.92946300000008, 346.55457 ], [ 211.4438930000001, 350.45997 ] ], "type": "bezier" }, { "pts": [ [ 211.4438930000001, 350.45997 ], [ 220.11589300000009, 355.65877 ], [ 223.15187300000008, 359.51507 ], [ 223.63899300000008, 365.95007 ] ], "type": "bezier" }, { "pts": [ [ 223.63899300000008, 365.95007 ], [ 223.87502300000008, 369.06817 ], [ 223.60989300000008, 370.35787 ], [ 221.75378300000008, 375.12186999999994 ] ], "type": "bezier" }, { "pts": [ [ 221.75378300000008, 375.12186999999994 ], [ 219.28985300000008, 381.44587 ], [ 218.9912830000001, 384.55816999999996 ], [ 220.75788300000008, 385.50356999999997 ] ], "type": "bezier" }, { "pts": [ [ 220.75788300000008, 385.50356999999997 ], [ 221.3980530000001, 385.84617 ], [ 223.76744300000007, 386.12807 ], [ 226.02321300000008, 386.12987 ] ], "type": "bezier" }, { "pts": [ [ 226.02321300000008, 386.12987 ], [ 235.7619530000001, 386.13987 ], [ 238.7984930000001, 388.81256999999994 ], [ 242.1947630000001, 400.37456999999995 ] ], "type": "bezier" }, { "pts": [ [ 242.1947630000001, 400.37456999999995 ], [ 244.42420300000012, 407.9643699999999 ], [ 244.34796300000014, 411.5499699999999 ], [ 241.8492330000001, 416.62566999999996 ] ], "type": "bezier" }, { "pts": [ [ 241.8492330000001, 416.62566999999996 ], [ 239.1955530000001, 422.01606999999996 ], [ 235.11046300000012, 430.24026999999995 ], [ 229.63158300000012, 433.12236999999993 ] ], "type": "bezier" }, { "pts": [ [ 229.63158300000012, 433.12236999999993 ], [ 227.4913930000001, 434.24826999999993 ], [ 224.7158130000001, 436.14286999999996 ], [ 223.46362300000013, 437.33277 ] ], "type": "bezier" }, { "pts": [ [ 223.46362300000013, 437.33277 ], [ 220.35180300000013, 440.28977 ], [ 220.4064530000001, 442.33047 ], [ 223.74437300000014, 447.81576999999993 ] ], "type": "bezier" }, { "pts": [ [ 223.74437300000014, 447.81576999999993 ], [ 226.30182300000013, 452.01847 ] ], "type": "segment" }, { "pts": [ [ 226.30182300000013, 452.01847 ], [ 226.30182300000013, 463.50956999999994 ] ], "type": "segment" }, { "pts": [ [ 226.30182300000013, 463.50956999999994 ], [ 226.30182300000013, 475.5141699999999 ], [ 226.59635300000014, 476.99116999999995 ], [ 228.99012300000012, 476.99116999999995 ] ], "type": "bezier" }, { "pts": [ [ 228.99012300000012, 476.99116999999995 ], [ 230.35055300000013, 476.99116999999995 ], [ 231.10560300000012, 476.04536999999993 ], [ 235.53318300000015, 468.79456999999996 ] ], "type": "bezier" }, { "pts": [ [ 235.53318300000015, 468.79456999999996 ], [ 239.99658300000013, 461.4851699999999 ], [ 243.39594300000013, 457.49326999999994 ], [ 246.50985300000016, 455.90466999999995 ] ], "type": "bezier" }, { "pts": [ [ 246.50985300000016, 455.90466999999995 ], [ 249.65523300000015, 454.30006999999995 ], [ 251.2103330000002, 454.41927 ], [ 254.33060300000014, 456.50406999999996 ] ], "type": "bezier" }, { "pts": [ [ 254.33060300000014, 456.50406999999996 ], [ 257.72476300000017, 458.77196999999995 ], [ 259.95412300000015, 461.61796999999996 ], [ 262.17737300000016, 466.52126999999996 ] ], "type": "bezier" }, { "pts": [ [ 262.17737300000016, 466.52126999999996 ], [ 264.14336300000014, 470.85716999999994 ], [ 265.1926730000002, 471.63436999999993 ], [ 266.94255300000015, 470.05076999999994 ] ], "type": "bezier" }, { "pts": [ [ 266.94255300000015, 470.05076999999994 ], [ 268.46511300000014, 468.67286999999993 ], [ 268.46315300000015, 466.11396999999994 ], [ 266.9324530000001, 456.83246999999994 ] ], "type": "bezier" }, { "pts": [ [ 266.9324530000001, 456.83246999999994 ], [ 265.41103300000015, 447.60726999999997 ], [ 265.4231130000001, 446.6029699999999 ], [ 267.17794300000014, 436.4244699999999 ] ], "type": "bezier" }, { "pts": [ [ 267.17794300000014, 436.4244699999999 ], [ 267.9816730000001, 431.7625699999999 ], [ 268.82251300000013, 426.0528699999999 ], [ 269.04646300000013, 423.7360699999999 ] ], "type": "bezier" }, { "pts": [ [ 269.04646300000013, 423.7360699999999 ], [ 269.5532930000001, 418.49316999999996 ], [ 270.5371730000001, 416.53986999999995 ], [ 273.4013630000001, 415.08996999999994 ] ], "type": "bezier" }, { "pts": [ [ 273.4013630000001, 415.08996999999994 ], [ 277.4408730000001, 413.04526999999996 ], [ 279.2161430000001, 412.9611699999999 ], [ 282.88494300000013, 414.64077 ] ], "type": "bezier" }, { "pts": [ [ 282.88494300000013, 414.64077 ], [ 287.53026300000016, 416.76746999999995 ], [ 295.17519300000015, 421.84376999999995 ], [ 302.3948330000001, 427.59536999999995 ] ], "type": "bezier" }, { "pts": [ [ 302.3948330000001, 427.59536999999995 ], [ 309.8990230000001, 433.57376999999997 ], [ 311.4131830000001, 434.38397 ], [ 314.6428530000001, 434.14926999999994 ] ], "type": "bezier" }, { "pts": [ [ 314.6428530000001, 434.14926999999994 ], [ 317.0323830000001, 433.9756699999999 ], [ 317.1768830000001, 433.85677 ], [ 317.3622530000001, 431.91337 ] ], "type": "bezier" }, { "pts": [ [ 317.3622530000001, 431.91337 ], [ 317.4699430000001, 430.7845699999999 ], [ 317.01930300000015, 428.34746999999993 ], [ 316.36085300000013, 426.49766999999997 ] ], "type": "bezier" }, { "pts": [ [ 316.36085300000013, 426.49766999999997 ], [ 315.7024030000001, 424.64777 ], [ 314.02500300000014, 419.07246999999995 ], [ 312.63329300000015, 414.10807 ] ], "type": "bezier" }, { "pts": [ [ 312.63329300000015, 414.10807 ], [ 311.24158300000016, 409.14356999999995 ], [ 308.48825300000016, 401.02826999999996 ], [ 306.51479300000017, 396.07407 ] ], "type": "bezier" }, { "pts": [ [ 306.51479300000017, 396.07407 ], [ 300.99733300000014, 382.22297 ], [ 301.47281300000014, 379.09977 ], [ 310.4685430000002, 370.10397 ] ], "type": "bezier" }, { "pts": [ [ 310.4685430000002, 370.10397 ], [ 316.8608230000002, 363.71166999999997 ], [ 320.3624730000002, 361.98776999999995 ], [ 327.7562930000002, 361.59267 ] ], "type": "bezier" }, { "pts": [ [ 327.7562930000002, 361.59267 ], [ 334.7589530000002, 361.21846999999997 ], [ 338.0021330000002, 362.26147 ], [ 346.0627230000002, 367.48006999999996 ] ], "type": "bezier" }, { "pts": [ [ 346.0627230000002, 367.48006999999996 ], [ 353.50135300000017, 372.29597 ], [ 354.9382330000002, 372.55497 ], [ 356.7680930000002, 369.41007 ] ], "type": "bezier" }, { "pts": [ [ 356.7680930000002, 369.41007 ], [ 358.3650830000002, 366.66537 ], [ 360.8076930000002, 359.28587 ], [ 361.1939130000002, 356.03887 ] ], "type": "bezier" }, { "pts": [ [ 361.1939130000002, 356.03887 ], [ 361.6049830000002, 352.58307 ], [ 360.6287930000002, 351.61046999999996 ], [ 353.04348300000015, 347.91867 ] ], "type": "bezier" }, { "pts": [ [ 353.04348300000015, 347.91867 ], [ 346.02023300000013, 344.50037 ], [ 343.3141630000002, 342.62937 ], [ 338.99489300000016, 338.20537 ] ], "type": "bezier" }, { "pts": [ [ 338.99489300000016, 338.20537 ], [ 328.79674300000016, 327.75996999999995 ], [ 325.04913300000015, 313.96454 ], [ 330.47668300000015, 306.84865 ] ], "type": "bezier" }, { "pts": [ [ 330.47668300000015, 306.84865 ], [ 332.5509030000002, 304.12921 ], [ 337.62779300000017, 300.86539 ], [ 340.91468300000014, 300.13829999999996 ] ], "type": "bezier" }, { "pts": [ [ 340.91468300000014, 300.13829999999996 ], [ 344.1944330000001, 299.41278 ], [ 349.62964300000016, 300.37522 ], [ 357.3804430000001, 303.05397 ] ], "type": "bezier" }, { "pts": [ [ 357.3804430000001, 303.05397 ], [ 362.96952300000015, 304.9856 ], [ 365.2776030000001, 305.30890999999997 ], [ 365.93100300000015, 304.25171 ] ], "type": "bezier" }, { "pts": [ [ 365.93100300000015, 304.25171 ], [ 366.14794300000017, 303.90067999999997 ], [ 365.97870300000017, 301.86905 ], [ 365.55492300000014, 299.73699999999997 ] ], "type": "bezier" }, { "pts": [ [ 365.55492300000014, 299.73699999999997 ], [ 364.87437300000016, 296.31315 ], [ 364.90272300000015, 295.61241 ], [ 365.79774300000014, 293.73554 ] ], "type": "bezier" }, { "pts": [ [ 365.79774300000014, 293.73554 ], [ 366.57879300000013, 292.09761000000003 ], [ 366.81104300000015, 289.90175 ], [ 366.81104300000015, 284.15522 ] ], "type": "bezier" }, { "pts": [ [ 366.81104300000015, 284.15522 ], [ 366.81104300000015, 276.99329 ], [ 366.87294300000013, 276.6263 ], [ 368.3847030000002, 274.82972 ] ], "type": "bezier" }, { "pts": [ [ 368.3847030000002, 274.82972 ], [ 371.87523300000015, 270.68146 ], [ 376.78438300000016, 268.27540999999997 ], [ 383.01045300000015, 267.66139999999996 ] ], "type": "bezier" }, { "pts": [ [ 383.01045300000015, 267.66139999999996 ], [ 386.63335300000017, 267.30411 ], [ 388.48375300000015, 266.17471 ], [ 386.56850300000013, 265.48973 ] ], "type": "bezier" }, { "pts": [ [ 386.56850300000013, 265.48973 ], [ 386.568843, 265.48969 ] ], "type": "segment" } ] }, { "edgecolor": "#000000", "linewidth": 2.0, "id": "path3105", "items": [ { "pts": [ [ 1.508063, 206.07640000000004 ], [ 1.3884930000000004, 219.15247000000005 ], [ 2.590643, 233.16057 ], [ 5.659383000000002, 257.6421 ] ], "type": "bezier" }, { "pts": [ [ 5.659383000000002, 257.6421 ], [ 6.941362999999999, 267.86944000000005 ], [ 16.718473, 302.25615000000005 ], [ 22.469423000000003, 316.69135000000006 ] ], "type": "bezier" }, { "pts": [ [ 22.469423000000003, 316.69135000000006 ], [ 24.980513000000006, 322.99427000000003 ], [ 29.316843000000002, 337.28087000000005 ], [ 32.131012999999996, 348.48707 ] ], "type": "bezier" }, { "pts": [ [ 32.131012999999996, 348.48707 ], [ 38.377573, 373.36137 ], [ 40.117352999999994, 378.03897 ], [ 51.62173299999999, 401.00217 ] ], "type": "bezier" }, { "pts": [ [ 51.62173299999999, 401.00217 ], [ 62.88802299999999, 423.49017000000003 ], [ 74.71667299999999, 441.37257 ], [ 85.11152299999999, 451.65577 ] ], "type": "bezier" }, { "pts": [ [ 85.11152299999999, 451.65577 ], [ 91.75676299999999, 458.22957 ], [ 96.23387299999999, 461.24657 ], [ 112.197473, 469.89917 ] ], "type": "bezier" }, { "pts": [ [ 112.197473, 469.89917 ], [ 134.756353, 482.12657 ], [ 141.625463, 485.26127 ], [ 151.23476300000002, 487.77037 ] ], "type": "bezier" }, { "pts": [ [ 151.23476300000002, 487.77037 ], [ 161.534723, 490.45967 ], [ 170.66640300000003, 489.48407000000003 ], [ 182.267263, 484.45677 ] ], "type": "bezier" }, { "pts": [ [ 182.267263, 484.45677 ], [ 188.72628300000002, 481.65767 ], [ 193.190393, 480.27167000000003 ], [ 197.736973, 480.36127 ] ], "type": "bezier" }, { "pts": [ [ 197.736973, 480.36127 ], [ 202.283553, 480.45126999999997 ], [ 206.878253, 482.01317 ], [ 213.597613, 485.07106999999996 ] ], "type": "bezier" }, { "pts": [ [ 213.597613, 485.07106999999996 ], [ 222.218913, 488.99447 ], [ 225.283783, 489.82816999999994 ], [ 232.827713, 490.24627 ] ], "type": "bezier" }, { "pts": [ [ 232.827713, 490.24627 ], [ 243.19398299999997, 490.82067 ], [ 256.51446300000003, 487.93136999999996 ], [ 266.987663, 482.81856999999997 ] ], "type": "bezier" }, { "pts": [ [ 266.987663, 482.81856999999997 ], [ 274.709213, 479.04896999999994 ], [ 302.112723, 463.20606999999995 ], [ 308.407773, 458.87866999999994 ] ], "type": "bezier" }, { "pts": [ [ 308.407773, 458.87866999999994 ], [ 318.793363, 451.73927 ], [ 328.199923, 439.21187 ], [ 345.788253, 409.04427 ] ], "type": "bezier" }, { "pts": [ [ 345.788253, 409.04427 ], [ 356.486333, 390.69476999999995 ], [ 362.293463, 377.45646999999997 ], [ 365.427893, 364.29187 ] ], "type": "bezier" }, { "pts": [ [ 365.427893, 364.29187 ], [ 370.96435299999996, 341.03887 ], [ 373.28365299999996, 333.31066999999996 ], [ 378.17970299999996, 321.84787 ] ], "type": "bezier" }, { "pts": [ [ 378.17970299999996, 321.84787 ], [ 383.51147299999997, 309.36501999999996 ], [ 392.10896299999996, 277.44079 ], [ 394.39404299999995, 261.58865 ] ], "type": "bezier" }, { "pts": [ [ 394.39404299999995, 261.58865 ], [ 395.46140299999996, 254.18416 ], [ 396.668373, 239.81870999999998 ], [ 397.50288299999994, 226.42343999999997 ] ], "type": "bezier" }, { "pts": [ [ 397.50288299999994, 226.42343999999997 ], [ 397.92013299999996, 219.72582 ], [ 398.24052299999994, 213.27018999999996 ], [ 398.41505299999994, 208.03105999999997 ] ], "type": "bezier" }, { "pts": [ [ 398.41505299999994, 208.03105999999997 ], [ 398.5895929999999, 202.79192 ], [ 398.60061299999995, 198.76461999999998 ], [ 398.41505299999994, 196.9919 ] ], "type": "bezier" }, { "pts": [ [ 398.41505299999994, 196.9919 ], [ 397.28426299999995, 186.18932 ], [ 392.81042299999996, 166.28531999999996 ], [ 388.84654299999994, 154.45484999999996 ] ], "type": "bezier" }, { "pts": [ [ 388.84654299999994, 154.45484999999996 ], [ 375.14778299999995, 113.57014299999997 ], [ 350.68272299999995, 76.55715999999995 ], [ 317.11995299999995, 45.94349999999997 ] ], "type": "bezier" }, { "pts": [ [ 317.11995299999995, 45.94349999999997 ], [ 289.194873, 20.472170000000006 ], [ 270.17692299999993, 8.901990000000012 ], [ 250.69886299999993, 5.510030000000029 ] ], "type": "bezier" }, { "pts": [ [ 250.69886299999993, 5.510030000000029 ], [ 242.04308299999994, 4.00269000000003 ], [ 234.31064299999994, 5.432630000000017 ], [ 222.06779299999994, 10.815529999999967 ] ], "type": "bezier" }, { "pts": [ [ 222.06779299999994, 10.815529999999967 ], [ 215.66750299999993, 13.629570000000001 ], [ 211.44062299999993, 14.99027000000001 ], [ 206.87732299999993, 14.650379999999984 ] ], "type": "bezier" }, { "pts": [ [ 206.87732299999993, 14.650379999999984 ], [ 202.31401299999993, 14.310489999999959 ], [ 197.47778299999993, 12.293389999999988 ], [ 189.78803299999993, 8.432709999999986 ] ], "type": "bezier" }, { "pts": [ [ 189.78803299999993, 8.432709999999986 ], [ 173.76758299999992, 0.38954999999998563 ], [ 166.88319299999992, -0.997760000000028 ], [ 153.67342299999993, 1.2097899999999981 ] ], "type": "bezier" }, { "pts": [ [ 153.67342299999993, 1.2097899999999981 ], [ 144.48064299999993, 2.746030000000019 ], [ 126.95292299999991, 9.488069999999993 ], [ 116.92588299999991, 15.357779999999991 ] ], "type": "bezier" }, { "pts": [ [ 116.92588299999991, 15.357779999999991 ], [ 92.79538299999992, 29.483479999999986 ], [ 62.99343299999991, 54.97045000000003 ], [ 50.728182999999916, 71.91251 ] ], "type": "bezier" }, { "pts": [ [ 50.728182999999916, 71.91251 ], [ 34.944503, 93.71458000000001 ], [ 17.023593, 126.73681000000005 ], [ 10.778723000000003, 145.53792000000004 ] ], "type": "bezier" }, { "pts": [ [ 10.778723000000003, 145.53792000000004 ], [ 5.378342999999997, 161.79655000000002 ], [ 3.5164130000000036, 171.95347000000004 ], [ 2.047923000000001, 192.95231 ] ], "type": "bezier" }, { "pts": [ [ 2.047923000000001, 192.95231 ], [ 1.732913, 197.45684000000006 ], [ 1.5479230000000008, 201.71774000000005 ], [ 1.508063, 206.07643000000002 ] ], "type": "bezier" }, { "pts": [ [ 1.508063, 206.07643000000002 ], [ 1.508063, 206.07640000000004 ] ], "type": "segment" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path3967", "items": [ { "pts": [ [ 8.511083000000003, 269.9254 ], [ 16.595643, 295.88112 ], [ 26.637473000000004, 317.55960000000005 ], [ 43.82786300000001, 335.45297000000005 ] ], "type": "bezier" }, { "pts": [ [ 43.82786300000001, 335.45297000000005 ], [ 49.422813000000005, 341.27667 ], [ 64.67753300000001, 347.79257 ], [ 64.67753300000001, 347.79257 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path3967-3", "items": [ { "pts": [ [ 392.315223, 269.9254 ], [ 384.230663, 295.88110000000006 ], [ 373.058613, 316.53859 ], [ 356.998433, 335.45296 ] ], "type": "bezier" }, { "pts": [ [ 356.998433, 335.45296 ], [ 351.15273299999996, 342.33756000000005 ], [ 335.54702299999997, 352.00476000000003 ], [ 335.54702299999997, 352.00476000000003 ] ], "type": "bezier" } ] }, { "edgecolor": "#a6a6a6", "linewidth": 1.7, "id": "path4131", "items": [ { "pts": [ [ 123.39702299999999, 176.48484000000002 ], [ 121.69500299999999, 161.16672000000005 ], [ 124.24802299999999, 156.91168000000005 ], [ 129.354073, 157.33718000000005 ] ], "type": "bezier" }, { "pts": [ [ 129.354073, 157.33718000000005 ], [ 134.460113, 157.76269000000002 ], [ 162.330573, 180.69733000000002 ], [ 168.925883, 194.10069000000004 ] ], "type": "bezier" }, { "pts": [ [ 168.925883, 194.10069000000004 ], [ 181.360373, 219.37078000000002 ], [ 184.882263, 220.567 ], [ 187.222533, 245.67171000000002 ] ], "type": "bezier" }, { "pts": [ [ 187.222533, 245.67171000000002 ], [ 190.343583, 279.1521 ], [ 195.51985299999998, 296.09387000000004 ], [ 184.244013, 313.32676000000004 ] ], "type": "bezier" }, { "pts": [ [ 184.244013, 313.32676000000004 ], [ 172.883053, 330.68977000000007 ], [ 155.427943, 339.42737 ], [ 153.395013, 337.15497000000005 ] ], "type": "bezier" }, { "pts": [ [ 153.395013, 337.15497000000005 ], [ 150.579373, 334.00757000000004 ], [ 156.160783, 323.11337000000003 ], [ 156.160783, 323.11337000000003 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 2.0, "id": "path3987", "items": [ { "pts": [ [ 191.052073, 9.091769999999997 ], [ 196.58362300000002, 15.261570000000006 ], [ 197.047383, 30.883500000000026 ], [ 196.796363, 41.43004000000002 ] ], "type": "bezier" }, { "pts": [ [ 196.796363, 41.43004000000002 ], [ 196.37795300000002, 59.010380000000055 ], [ 191.159513, 144.07981 ], [ 190.626573, 195.46230000000003 ] ], "type": "bezier" }, { "pts": [ [ 190.626573, 195.46230000000003 ], [ 190.168993, 239.57866 ], [ 192.859023, 283.67631000000006 ], [ 193.179593, 327.79387 ] ], "type": "bezier" }, { "pts": [ [ 193.179593, 327.79387 ], [ 193.54934300000002, 378.68027000000006 ], [ 194.933233, 475.32117000000005 ], [ 192.453213, 480.45527000000004 ] ], "type": "bezier" } ] }, { "edgecolor": "#a6a6a6", "linewidth": 1.7, "id": "path4131-4", "items": [ { "pts": [ [ 274.036173, 176.48484000000002 ], [ 275.738193, 161.16672000000005 ], [ 273.185173, 156.91168000000005 ], [ 268.07912300000004, 157.33718000000005 ] ], "type": "bezier" }, { "pts": [ [ 268.07912300000004, 157.33718000000005 ], [ 262.97308300000003, 157.76268000000005 ], [ 235.102613, 180.69732000000005 ], [ 228.507303, 194.10068 ] ], "type": "bezier" }, { "pts": [ [ 228.507303, 194.10068 ], [ 216.072813, 219.37078000000002 ], [ 212.550923, 220.567 ], [ 210.210653, 245.67171000000002 ] ], "type": "bezier" }, { "pts": [ [ 210.210653, 245.67171000000002 ], [ 207.089603, 279.1521 ], [ 201.913343, 296.09386000000006 ], [ 213.189183, 313.32676000000004 ] ], "type": "bezier" }, { "pts": [ [ 213.189183, 313.32676000000004 ], [ 224.55013300000002, 330.68977000000007 ], [ 242.00524300000004, 339.42737 ], [ 244.038173, 337.15497000000005 ] ], "type": "bezier" }, { "pts": [ [ 244.038173, 337.15497000000005 ], [ 246.85381299999997, 334.00757000000004 ], [ 241.272413, 323.11337000000003 ], [ 241.272413, 323.11337000000003 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 2.0, "id": "path3987-1", "items": [ { "pts": [ [ 218.125803, 11.900080000000003 ], [ 214.72177299999998, 17.644370000000038 ], [ 209.805053, 31.05839000000003 ], [ 208.126473, 41.25981999999999 ] ], "type": "bezier" }, { "pts": [ [ 208.126473, 41.25981999999999 ], [ 203.699323, 68.16550000000001 ], [ 209.783173, 147.07050000000004 ], [ 209.190223, 199.97262999999998 ] ], "type": "bezier" }, { "pts": [ [ 209.190223, 199.97262999999998 ], [ 208.698823, 243.81532 ], [ 205.585803, 287.60834 ], [ 205.360693, 331.45317 ] ], "type": "bezier" }, { "pts": [ [ 205.360693, 331.45317 ], [ 205.101573, 381.92317 ], [ 205.734573, 477.70397 ], [ 208.214593, 482.83807 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path4007", "items": [ { "pts": [ [ 190.201073, 187.80323000000004 ], [ 168.13169299999998, 195.80961000000002 ], [ 161.724043, 211.56986 ], [ 146.799713, 216.31197000000003 ] ], "type": "bezier" }, { "pts": [ [ 146.799713, 216.31197000000003 ], [ 104.804923, 229.65553 ], [ 84.676203, 219.29049000000003 ], [ 71.485593, 227.37506000000002 ] ], "type": "bezier" }, { "pts": [ [ 71.485593, 227.37506000000002 ], [ 58.294983, 235.45962000000003 ], [ 60.84800299999999, 255.67104000000003 ], [ 51.061432999999994, 259.28782 ] ], "type": "bezier" }, { "pts": [ [ 51.061432999999994, 259.28782 ], [ 38.682272999999995, 263.86273000000006 ], [ 39.147333, 260.13883000000004 ], [ 29.786253, 266.94688 ] ], "type": "bezier" }, { "pts": [ [ 29.786253, 266.94688 ], [ 20.425183, 273.75494000000003 ], [ 12.340613000000001, 287.79655 ], [ 12.340613000000001, 287.79655 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path4027", "items": [ { "pts": [ [ 209.135963, 188.86699000000004 ], [ 214.880263, 196.52606000000003 ], [ 223.390333, 203.54686000000004 ], [ 231.68765299999998, 206.95089000000002 ] ], "type": "bezier" }, { "pts": [ [ 231.68765299999998, 206.95089000000002 ], [ 254.967053, 216.50142000000005 ], [ 269.051193, 219.72318 ], [ 278.918533, 219.92875000000004 ] ], "type": "bezier" }, { "pts": [ [ 278.918533, 219.92875000000004 ], [ 289.130623, 220.1415 ], [ 309.98028300000004, 218.86499000000003 ], [ 315.086323, 220.77976 ] ], "type": "bezier" }, { "pts": [ [ 315.086323, 220.77976 ], [ 320.014403, 222.62778000000003 ], [ 333.80848299999997, 229.28982000000002 ], [ 337.638023, 242.48043 ] ], "type": "bezier" }, { "pts": [ [ 337.638023, 242.48043 ], [ 341.46755299999995, 255.67104000000003 ], [ 345.72259299999996, 267.37239 ], [ 363.38098299999996, 276.09521000000007 ] ], "type": "bezier" }, { "pts": [ [ 363.38098299999996, 276.09521000000007 ], [ 380.416433, 284.51030000000003 ], [ 388.48569299999997, 291.20058000000006 ], [ 388.48569299999997, 291.20058000000006 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path4029", "items": [ { "pts": [ [ 191.477573, 269.92539400000004 ], [ 177.43596300000002, 269.92539400000004 ], [ 170.696153, 274.019197 ], [ 161.47958300000002, 277.371711 ] ], "type": "bezier" }, { "pts": [ [ 161.47958300000002, 277.371711 ], [ 148.016443, 282.26890000000003 ], [ 141.74819300000001, 282.27749600000004 ], [ 137.864143, 280.988492 ] ], "type": "bezier" }, { "pts": [ [ 137.864143, 280.988492 ], [ 130.39827300000002, 278.510776 ], [ 122.11175300000002, 268.50099800000004 ], [ 111.908433, 271.62741300000005 ] ], "type": "bezier" }, { "pts": [ [ 111.908433, 271.62741300000005 ], [ 104.77485300000001, 273.813221 ], [ 100.97402300000002, 282.945281 ], [ 87.016473, 289.924059 ] ], "type": "bezier" }, { "pts": [ [ 87.016473, 289.924059 ], [ 78.93191300000001, 293.96634600000004 ], [ 71.48559300000001, 304.39117500000003 ], [ 56.592971000000006, 301.838157 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path4031", "items": [ { "pts": [ [ 99.242133, 282.01353300000005 ], [ 106.90120299999998, 289.247094 ], [ 114.45198299999998, 319.29492000000005 ], [ 114.886943, 327.79387 ] ], "type": "bezier" }, { "pts": [ [ 114.886943, 327.79387 ], [ 116.376183, 356.89278 ], [ 113.610443, 377.57778 ], [ 117.43997300000001, 393.32141 ] ], "type": "bezier" }, { "pts": [ [ 117.43997300000001, 393.32141 ], [ 121.26950300000001, 409.06504 ], [ 127.43930300000002, 411.19255 ], [ 133.609103, 418.42611 ] ], "type": "bezier" }, { "pts": [ [ 133.609103, 418.42611 ], [ 142.235653, 428.54 ], [ 157.437303, 427.78719 ], [ 157.01180300000001, 451.18988 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path4029-3", "items": [ { "pts": [ [ 207.031173, 271.73066400000005 ], [ 223.867223, 270.020545 ], [ 237.60260300000002, 277.55581200000006 ], [ 252.520953, 274.67009900000005 ] ], "type": "bezier" }, { "pts": [ [ 252.520953, 274.67009900000005 ], [ 263.47456300000005, 272.551301 ], [ 274.290863, 273.014158 ], [ 284.494193, 276.140572 ] ], "type": "bezier" }, { "pts": [ [ 284.494193, 276.140572 ], [ 291.627763, 278.32638000000003 ], [ 300.844373, 282.644419 ], [ 314.801923, 289.623197 ] ], "type": "bezier" }, { "pts": [ [ 314.801923, 289.623197 ], [ 322.886483, 293.665483 ], [ 333.341563, 301.382423 ], [ 345.827173, 301.53729400000003 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path4031-7", "items": [ { "pts": [ [ 305.88590300000004, 285.02231200000006 ], [ 298.22683300000006, 292.255873 ], [ 289.33001300000006, 321.38996000000003 ], [ 289.33846300000005, 329.90002000000004 ] ], "type": "bezier" }, { "pts": [ [ 289.33846300000005, 329.90002000000004 ], [ 289.35356300000007, 345.15862000000004 ], [ 292.62892300000004, 360.53921 ], [ 287.38718300000005, 375.87059 ] ], "type": "bezier" }, { "pts": [ [ 287.38718300000005, 375.87059 ], [ 279.94714300000004, 397.63174000000004 ], [ 295.74131300000005, 411.79431 ], [ 280.84609300000005, 426.54979000000003 ] ], "type": "bezier" }, { "pts": [ [ 280.84609300000005, 426.54979000000003 ], [ 271.40222300000005, 435.90506000000005 ], [ 263.33630300000004, 451.25556000000006 ], [ 253.832883, 458.11005 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path4061", "items": [ { "pts": [ [ 112.00229300000001, 469.72927000000004 ], [ 113.205793, 459.49947000000003 ], [ 115.61280300000001, 457.39327000000003 ], [ 118.92245300000002, 452.27837000000005 ] ], "type": "bezier" }, { "pts": [ [ 118.92245300000002, 452.27837000000005 ], [ 124.872183, 443.08337000000006 ], [ 118.82285300000001, 431.32627 ], [ 105.98476300000002, 419.48287000000005 ] ], "type": "bezier" }, { "pts": [ [ 105.98476300000002, 419.48287000000005 ], [ 91.58479300000002, 406.19857 ], [ 79.54189300000002, 396.5429700000001 ], [ 78.00325300000002, 389.6960700000001 ] ], "type": "bezier" }, { "pts": [ [ 78.00325300000002, 389.6960700000001 ], [ 76.24011300000001, 381.85027 ], [ 77.64340300000002, 377.61177000000004 ], [ 65.96820300000002, 369.8382700000001 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path4061-3", "items": [ { "pts": [ [ 296.57705300000003, 466.53797000000003 ], [ 295.799053, 463.11627000000004 ], [ 297.647083, 458.88257000000004 ], [ 295.188443, 456.32067000000006 ] ], "type": "bezier" }, { "pts": [ [ 295.188443, 456.32067000000006 ], [ 287.605013, 448.41887 ], [ 296.351853, 439.41087000000005 ], [ 309.189893, 427.56747000000007 ] ], "type": "bezier" }, { "pts": [ [ 309.189893, 427.56747000000007 ], [ 323.589853, 414.28307000000007 ], [ 329.462963, 405.05307000000005 ], [ 331.001603, 398.20617000000004 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path4103", "items": [ { "pts": [ [ 5.792922999999998, 250.99210000000002 ], [ 14.819212999999994, 236.85091000000003 ], [ 20.535862999999996, 230.83338000000003 ], [ 30.163912999999997, 209.77203000000003 ] ], "type": "bezier" }, { "pts": [ [ 30.163912999999997, 209.77203000000003 ], [ 39.79195299999999, 188.71068000000002 ], [ 39.190203, 182.09140000000002 ], [ 51.827012999999994, 171.8616 ] ], "type": "bezier" }, { "pts": [ [ 51.827012999999994, 171.8616 ], [ 64.46382299999999, 161.63181000000003 ], [ 65.421673, 154.92436000000004 ], [ 66.569953, 147.49062000000004 ] ], "type": "bezier" }, { "pts": [ [ 66.569953, 147.49062000000004 ], [ 69.195283, 130.49478000000005 ], [ 78.019223, 116.21387300000004 ], [ 83.71991299999999, 109.88105000000007 ] ], "type": "bezier" }, { "pts": [ [ 83.71991299999999, 109.88105000000007 ], [ 94.98668299999999, 97.36496000000005 ], [ 98.83244299999998, 97.12524000000008 ], [ 108.69265299999998, 81.59868000000006 ] ], "type": "bezier" }, { "pts": [ [ 108.69265299999998, 81.59868000000006 ], [ 112.91966299999999, 74.94253000000003 ], [ 119.52420299999997, 65.95310000000006 ], [ 119.52420299999997, 50.608400000000074 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path4105", "items": [ { "pts": [ [ 63.22699299999999, 159.29450000000003 ], [ 76.843103, 160.57101 ], [ 91.484253, 170.35759000000002 ], [ 111.057423, 160.14551000000006 ] ], "type": "bezier" }, { "pts": [ [ 111.057423, 160.14551000000006 ], [ 131.062983, 145.13410000000005 ], [ 133.014313, 138.67054000000002 ], [ 148.927223, 125.25422000000003 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path4107", "items": [ { "pts": [ [ 193.99111299999998, 94.68680000000006 ], [ 181.05342299999998, 91.37717000000004 ], [ 150.66491299999998, 94.98768000000007 ], [ 150.66491299999998, 94.98768000000007 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path4107-7", "items": [ { "pts": [ [ 206.649083, 94.98768000000001 ], [ 219.586773, 91.67804000000001 ], [ 233.380643, 95.28855000000004 ], [ 233.380643, 95.28855000000004 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path4127", "items": [ { "pts": [ [ 395.12698300000005, 263.32803 ], [ 381.50568300000003, 243.20581000000004 ], [ 368.10814300000004, 222.78399000000002 ], [ 363.240353, 215.75047 ] ], "type": "bezier" }, { "pts": [ [ 363.240353, 215.75047 ], [ 350.506633, 197.35137000000003 ], [ 345.451523, 191.65858000000003 ], [ 342.84749300000004, 173.22991000000002 ] ], "type": "bezier" }, { "pts": [ [ 342.84749300000004, 173.22991000000002 ], [ 340.20511300000004, 154.52984000000004 ], [ 329.57243300000005, 138.98421000000002 ], [ 323.032413, 134.91796 ] ], "type": "bezier" }, { "pts": [ [ 323.032413, 134.91796 ], [ 311.81300300000004, 127.94229000000001 ], [ 303.893123, 123.73963000000003 ], [ 296.070333, 116.518597 ] ], "type": "bezier" }, { "pts": [ [ 296.070333, 116.518597 ], [ 288.247553, 109.29756000000003 ], [ 266.051013, 98.14688000000001 ], [ 266.051013, 98.14688000000001 ] ], "type": "bezier" } ] }, { "edgecolor": "#000000", "linewidth": 0.8, "id": "path4129", "items": [ { "pts": [ [ 345.79681300000004, 186.7808 ], [ 308.48813300000006, 176.55100000000004 ], [ 296.099553, 180.61100000000005 ], [ 282.31345300000004, 182.92729000000003 ] ], "type": "bezier" }, { "pts": [ [ 282.31345300000004, 182.92729000000003 ], [ 281.38416300000006, 148.65875000000005 ], [ 266.18662300000005, 134.53209000000004 ], [ 244.087033, 124.02226000000002 ] ], "type": "bezier" } ] } ], "metadata": { "bounds": [ 1.3884930000000004, 398.60061299999995, -0.997760000000028, 490.82067 ] } }PKH"nilearn/plotting/tests/__init__.pyPKHnD!nilearn/plotting/tests/test_cm.py# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Smoke testing the cm module """ import matplotlib.pyplot as plt from nilearn.plotting.cm import dim_cmap, replace_inside def test_dim_cmap(): # This is only a smoke test dim_cmap(plt.cm.jet) def test_replace_inside(): # This is only a smoke test replace_inside(plt.cm.jet, plt.cm.hsv, .2, .8) # We also test with gnuplot, which is defined using function if hasattr(plt.cm, 'gnuplot'): # gnuplot is only in recent version of MPL replace_inside(plt.cm.gnuplot, plt.cm.gnuplot2, .2, .8) PKHFF(nilearn/plotting/tests/test_find_cuts.pyimport numpy as np from nose.tools import assert_equal, assert_true import nibabel from nilearn.plotting.find_cuts import (find_xyz_cut_coords, find_cut_slices, _transform_cut_coords) from nilearn._utils.testing import assert_raises_regex, assert_warns def test_find_cut_coords(): data = np.zeros((100, 100, 100)) x_map, y_map, z_map = 50, 10, 40 data[x_map - 30:x_map + 30, y_map - 3:y_map + 3, z_map - 10:z_map + 10] = 1 # identity affine affine = np.eye(4) img = nibabel.Nifti1Image(data, affine) x, y, z = find_xyz_cut_coords(img, mask=np.ones(data.shape, np.bool)) np.testing.assert_allclose((x, y, z), (x_map, y_map, z_map), # Need such a high tolerance for the test to # pass. x, y, z = [49.5, 9.5, 39.5] rtol=6e-2) # non-trivial affine affine = np.diag([1. / 2, 1 / 3., 1 / 4., 1.]) img = nibabel.Nifti1Image(data, affine) x, y, z = find_xyz_cut_coords(img, mask=np.ones(data.shape, np.bool)) np.testing.assert_allclose((x, y, z), (x_map / 2., y_map / 3., z_map / 4.), # Need such a high tolerance for the test to # pass. x, y, z = [24.75, 3.17, 9.875] rtol=6e-2) # regression test (cf. #473) # test case: no data exceeds the activation threshold data = np.ones((36, 43, 36)) affine = np.eye(4) img = nibabel.Nifti1Image(data, affine) x, y, z = find_xyz_cut_coords(img, activation_threshold=1.1) np.testing.assert_array_equal( np.array([x, y, z]), 0.5 * np.array(data.shape).astype(np.float)) # regression test (cf. #922) # pseudo-4D images as input (i.e., X, Y, Z, 1) # previously raised "ValueError: too many values to unpack" rng = np.random.RandomState(42) data_3d = rng.randn(10, 10, 10) data_4d = data_3d[..., np.newaxis] affine = np.eye(4) img_3d = nibabel.Nifti1Image(data_3d, affine) img_4d = nibabel.Nifti1Image(data_4d, affine) assert_equal(find_xyz_cut_coords(img_3d), find_xyz_cut_coords(img_4d)) def test_find_cut_slices(): data = np.zeros((50, 50, 50)) x_map, y_map, z_map = 25, 5, 20 data[x_map - 15:x_map + 15, y_map - 3:y_map + 3, z_map - 10:z_map + 10] = 1 img = nibabel.Nifti1Image(data, np.eye(4)) for n_cuts in (2, 4): for direction in 'xz': cuts = find_cut_slices(img, direction=direction, n_cuts=n_cuts, spacing=2) # Test that we are indeed getting the right number of cuts assert_equal(len(cuts), n_cuts) # Test that we are not getting cuts that are separated by # less than the minimum spacing that we asked for assert_equal(np.diff(cuts).min(), 2) # Test that the cuts indeed go through the 'activated' part # of the data for cut in cuts: if direction == 'x': cut_value = data[cut] elif direction == 'z': cut_value = data[..., cut] assert_equal(cut_value.max(), 1) # Now ask more cuts than it is possible to have with a given spacing n_cuts = 15 for direction in 'xz': # Only a smoke test cuts = find_cut_slices(img, direction=direction, n_cuts=n_cuts, spacing=2) def test_validity_of_ncuts_error_in_find_cut_slices(): data = np.zeros((50, 50, 50)) affine = np.eye(4) x_map, y_map, z_map = 25, 5, 20 data[x_map - 15:x_map + 15, y_map - 3:y_map + 3, z_map - 10:z_map + 10] = 1 img = nibabel.Nifti1Image(data, affine) direction = 'z' for n_cuts in (0, -2, -10.00034, 0.999999, 0.4, 0.11111111): message = ("Image has %d slices in direction %s. Therefore, the number " "of cuts must be between 1 and %d. You provided n_cuts=%s " % ( data.shape[0], direction, data.shape[0], n_cuts)) assert_raises_regex(ValueError, message, find_cut_slices, img, n_cuts=n_cuts) def test_passing_of_ncuts_in_find_cut_slices(): data = np.zeros((50, 50, 50)) affine = np.eye(4) x_map, y_map, z_map = 25, 5, 20 data[x_map - 15:x_map + 15, y_map - 3:y_map + 3, z_map - 10:z_map + 10] = 1 img = nibabel.Nifti1Image(data, affine) # smoke test to check if it rounds the floating point inputs for n_cuts in (1, 5., 0.9999999, 2.000000004): cut1 = find_cut_slices(img, direction='x', n_cuts=n_cuts) cut2 = find_cut_slices(img, direction='x', n_cuts=round(n_cuts)) np.testing.assert_array_equal(cut1, cut2) def test_singleton_ax_dim(): for axis, direction in enumerate("xyz"): shape = [5, 6, 7] shape[axis] = 1 img = nibabel.Nifti1Image(np.ones(shape), np.eye(4)) find_cut_slices(img, direction=direction) def test_tranform_cut_coords(): affine = np.eye(4) # test that when n_cuts is 1 we do get an iterable for direction in 'xyz': assert_true(hasattr(_transform_cut_coords([4], direction, affine), "__iter__")) # test that n_cuts after as before function call n_cuts = 5 cut_coords = np.arange(n_cuts) for direction in 'xyz': assert_equal(len(_transform_cut_coords(cut_coords, direction, affine)), n_cuts) def test_find_cuts_empty_mask_no_crash(): img = nibabel.Nifti1Image(np.ones((2, 2, 2)), np.eye(4)) mask = np.zeros((2, 2, 2)).astype(np.bool) cut_coords = assert_warns(UserWarning, find_xyz_cut_coords, img, mask=mask) np.testing.assert_array_equal(cut_coords, [.5, .5, .5]) def test_fast_abs_percentile_no_index_error_find_cuts(): # check that find_cuts functions are safe data = np.array([[[1., 2.], [3., 4.]], [[0., 0.], [0., 0.]]]) img = nibabel.Nifti1Image(data, np.eye(4)) assert_equal(len(find_xyz_cut_coords(img)), 3) PKH/-CC'nilearn/plotting/tests/test_displays.py# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import tempfile import matplotlib.pyplot as plt from nilearn.plotting.displays import OrthoSlicer, XSlicer, OrthoProjector from nilearn.datasets import load_mni152_template ############################################################################## # Some smoke testing for graphics-related code def test_demo_ortho_slicer(): # This is only a smoke test oslicer = OrthoSlicer(cut_coords=(0, 0, 0)) img = load_mni152_template() oslicer.add_overlay(img, cmap=plt.cm.gray) oslicer.close() def test_stacked_slicer(): # Test stacked slicers, like the XSlicer img = load_mni152_template() slicer = XSlicer.init_with_figure(img=img, cut_coords=3) slicer.add_overlay(img, cmap=plt.cm.gray) # Forcing a layout here, to test the locator code with tempfile.TemporaryFile() as fp: slicer.savefig(fp) slicer.close() def test_demo_ortho_projector(): # This is only a smoke test img = load_mni152_template() oprojector = OrthoProjector.init_with_figure(img=img) oprojector.add_overlay(img, cmap=plt.cm.gray) with tempfile.TemporaryFile() as fp: oprojector.savefig(fp) oprojector.close() def test_contour_fillings_levels_in_add_contours(): oslicer = OrthoSlicer(cut_coords=(0, 0, 0)) img = load_mni152_template() # levels should be atleast 2 # If single levels are passed then we force upper level to be inf oslicer.add_contours(img, filled=True, colors='r', alpha=0.2, levels=[0.]) # If two levels are passed, it should be increasing from zero index # In this case, we simply omit appending inf oslicer.add_contours(img, filled=True, colors='b', alpha=0.1, levels=[0., 0.2]) PKH$*nilearn/plotting/tests/test_edge_detect.pyimport numpy as np from nilearn.plotting.edge_detect import _edge_detect from nose.tools import assert_true def test_edge_detect(): img = np.zeros((10, 10)) img[:5] = 1 _, edge_mask = _edge_detect(img) np.testing.assert_almost_equal(img[4], 1) def test_edge_nan(): img = np.zeros((10, 10)) img[:5] = 1 img[0] = np.NaN grad_mag, edge_mask = _edge_detect(img) np.testing.assert_almost_equal(img[4], 1) assert_true((grad_mag[0] > 2).all()) PKH:tt+nilearn/plotting/tests/test_img_plotting.py # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os import tempfile from functools import partial import matplotlib.pyplot as plt import nibabel import numpy as np from nose.tools import assert_raises, assert_true, assert_equal from scipy import sparse from nilearn._utils.testing import assert_raises_regex from nilearn.image.resampling import coord_transform from nilearn.plotting.img_plotting import (MNI152TEMPLATE, plot_anat, plot_img, plot_roi, plot_stat_map, plot_epi, plot_glass_brain, plot_connectome, plot_prob_atlas, _get_colorbar_and_data_ranges) mni_affine = np.array([[-2., 0., 0., 90.], [0., 2., 0., -126.], [0., 0., 2., -72.], [0., 0., 0., 1.]]) def _generate_img(): data_positive = np.zeros((7, 7, 3)) rng = np.random.RandomState(42) data_rng = rng.rand(7, 7, 3) data_positive[1:-1, 2:-1, 1:] = data_rng[1:-1, 2:-1, 1:] return nibabel.Nifti1Image(data_positive, mni_affine) def demo_plot_roi(**kwargs): """ Demo plotting an ROI """ mni_affine = MNI152TEMPLATE.get_affine() data = np.zeros((91, 109, 91)) # Color a asymetric rectangle around Broca area: x, y, z = -52, 10, 22 x_map, y_map, z_map = coord_transform(x, y, z, np.linalg.inv(mni_affine)) data[int(x_map) - 5:int(x_map) + 5, int(y_map) - 3:int(y_map) + 3, int(z_map) - 10:int(z_map) + 10] = 1 img = nibabel.Nifti1Image(data, mni_affine) return plot_roi(img, title="Broca's area", **kwargs) def test_demo_plot_roi(): # This is only a smoke test demo_plot_roi() # Test the black background code path demo_plot_roi(black_bg=True) # Save execution time and memory plt.close() with tempfile.NamedTemporaryFile(suffix='.png') as fp: out = demo_plot_roi(output_file=fp) assert_true(out is None) def test_plot_anat(): img = _generate_img() # Test saving with empty plot z_slicer = plot_anat(anat_img=False, display_mode='z') filename = tempfile.mktemp(suffix='.png') try: z_slicer.savefig(filename) finally: os.remove(filename) z_slicer = plot_anat(display_mode='z') filename = tempfile.mktemp(suffix='.png') try: z_slicer.savefig(filename) finally: os.remove(filename) ortho_slicer = plot_anat(img, dim=True) filename = tempfile.mktemp(suffix='.png') try: ortho_slicer.savefig(filename) finally: os.remove(filename) # Save execution time and memory plt.close() def test_plot_functions(): img = _generate_img() # smoke-test for each plotting function with default arguments for plot_func in [plot_anat, plot_img, plot_stat_map, plot_epi, plot_glass_brain]: filename = tempfile.mktemp(suffix='.png') try: plot_func(img, output_file=filename) finally: os.remove(filename) # test for bad input arguments (cf. #510) ax = plt.subplot(111, rasterized=True) filename = tempfile.mktemp(suffix='.png') try: plot_stat_map(img, symmetric_cbar=True, output_file=filename, axes=ax, vmax=np.nan) finally: os.remove(filename) plt.close() def test_plot_glass_brain(): img = _generate_img() # test plot_glass_brain with colorbar plot_glass_brain(img, colorbar=True) # test plot_glass_brain with negative values plot_glass_brain(img, colorbar=True, plot_abs=False) # Save execution time and memory plt.close() # smoke-test for hemispheric glass brain filename = tempfile.mktemp(suffix='.png') plot_glass_brain(img, output_file=filename, display_mode='lzry') plt.close() def test_plot_stat_map(): img = _generate_img() plot_stat_map(img, cut_coords=(80, -120, -60)) # Smoke test coordinate finder, with and without mask masked_img = nibabel.Nifti1Image( np.ma.masked_equal(img.get_data(), 0), mni_affine) plot_stat_map(masked_img, display_mode='x') plot_stat_map(img, display_mode='y', cut_coords=2) # 'yx' display_mode plot_stat_map(img, display_mode='yx') # regression test #510 data = np.zeros((91, 109, 91)) aff = np.eye(4) new_img = nibabel.Nifti1Image(data, aff) plot_stat_map(new_img, threshold=1000, colorbar=True) rng = np.random.RandomState(42) data = rng.randn(91, 109, 91) new_img = nibabel.Nifti1Image(data, aff) plot_stat_map(new_img, threshold=1000, colorbar=True) # Save execution time and memory plt.close() def test_plot_stat_map_threshold_for_affine_with_rotation(): # threshold was not being applied when affine has a rotation # see https://github.com/nilearn/nilearn/issues/599 for more details data = np.random.randn(10, 10, 10) # matrix with rotation affine = np.array([[-3., 1., 0., 1.], [-1., -3., 0., -2.], [0., 0., 3., 3.], [0., 0., 0., 1.]]) img = nibabel.Nifti1Image(data, affine) display = plot_stat_map(img, bg_img=None, threshold=1e6, display_mode='z', cut_coords=1) # Next two lines retrieve the numpy array from the plot ax = list(display.axes.values())[0].ax plotted_array = ax.images[0].get_array() # Given the high threshold the array should be entirely masked assert_true(plotted_array.mask.all()) # Save execution time and memory plt.close() def test_plot_stat_map_threshold_for_uint8(): # mask was applied in [-threshold, threshold] which is problematic # for uint8 data. See https://github.com/nilearn/nilearn/issues/611 # for more details data = 10 * np.ones((10, 10, 10), dtype='uint8') # Having a zero minimum value is important to reproduce # https://github.com/nilearn/nilearn/issues/762 data[0, 0, 0] = 0 affine = np.eye(4) img = nibabel.Nifti1Image(data, affine) threshold = np.array(5, dtype='uint8') display = plot_stat_map(img, bg_img=None, threshold=threshold, display_mode='z', cut_coords=[0]) # Next two lines retrieve the numpy array from the plot ax = list(display.axes.values())[0].ax plotted_array = ax.images[0].get_array() # Make sure that there is one value masked assert_equal(plotted_array.mask.sum(), 1) # Make sure that the value masked is in the corner. Note that the # axis orientation seem to be flipped, hence (0, 0) -> (-1, 0) assert_true(plotted_array.mask[-1, 0]) # Save execution time and memory plt.close() def test_plot_glass_brain_threshold_for_uint8(): # mask was applied in [-threshold, threshold] which is problematic # for uint8 data. See https://github.com/nilearn/nilearn/issues/611 # for more details data = 10 * np.ones((10, 10, 10), dtype='uint8') # Having a zero minimum value is important to reproduce # https://github.com/nilearn/nilearn/issues/762 data[0, 0] = 0 affine = np.eye(4) img = nibabel.Nifti1Image(data, affine) threshold = np.array(5, dtype='uint8') display = plot_glass_brain(img, threshold=threshold, display_mode='z', colorbar=True) # Next two lines retrieve the numpy array from the plot ax = list(display.axes.values())[0].ax plotted_array = ax.images[0].get_array() # Make sure that there is one value masked assert_equal(plotted_array.mask.sum(), 1) # Make sure that the value masked is in the corner. Note that the # axis orientation seem to be flipped, hence (0, 0) -> (-1, 0) assert_true(plotted_array.mask[-1, 0]) # Save execution time and memory plt.close() def test_save_plot(): img = _generate_img() kwargs_list = [{}, {'display_mode': 'x', 'cut_coords': 3}] for kwargs in kwargs_list: filename = tempfile.mktemp(suffix='.png') try: display = plot_stat_map(img, output_file=filename, **kwargs) finally: os.remove(filename) assert_true(display is None) display = plot_stat_map(img, **kwargs) filename = tempfile.mktemp(suffix='.png') try: display.savefig(filename) finally: os.remove(filename) # Save execution time and memory plt.close() def test_display_methods(): img = _generate_img() display = plot_img(img) display.add_overlay(img, threshold=0) display.add_edges(img, color='c') display.add_contours(img, contours=2, linewidth=4, colors=['limegreen', 'yellow']) def test_plot_with_axes_or_figure(): img = _generate_img() figure = plt.figure() plot_img(img, figure=figure) ax = plt.subplot(111) plot_img(img, axes=ax) # Save execution time and memory plt.close() def test_plot_stat_map_colorbar_variations(): # This is only a smoke test img_positive = _generate_img() data_positive = img_positive.get_data() rng = np.random.RandomState(42) data_negative = -data_positive data_heterogeneous = data_positive * rng.randn(*data_positive.shape) img_negative = nibabel.Nifti1Image(data_negative, mni_affine) img_heterogeneous = nibabel.Nifti1Image(data_heterogeneous, mni_affine) for img in [img_positive, img_negative, img_heterogeneous]: for func in [plot_stat_map, partial(plot_stat_map, symmetric_cbar=True), partial(plot_stat_map, symmetric_cbar=False), partial(plot_stat_map, symmetric_cbar=False, vmax=10), partial(plot_stat_map, symmetric_cbar=True, vmax=10), partial(plot_stat_map, colorbar=False)]: func(img, cut_coords=(80, -120, -60)) plt.close() def test_plot_empty_slice(): # Test that things don't crash when we give a map with nothing above # threshold # This is only a smoke test data = np.zeros((20, 20, 20)) img = nibabel.Nifti1Image(data, mni_affine) plot_img(img, display_mode='y', threshold=1) # Save execution time and memory plt.close() def test_plot_img_invalid(): # Check that we get a meaningful error message when we give a wrong # display_mode argument assert_raises(Exception, plot_anat, display_mode='zzz') def test_plot_img_with_auto_cut_coords(): data = np.zeros((20, 20, 20)) data[3:-3, 3:-3, 3:-3] = 1 img = nibabel.Nifti1Image(data, np.eye(4)) for display_mode in 'xyz': plot_img(img, cut_coords=None, display_mode=display_mode, black_bg=True) # Save execution time and memory plt.close() def test_plot_img_with_resampling(): data = _generate_img().get_data() affine = np.array([[1., -1., 0., 0.], [1., 1., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]]) img = nibabel.Nifti1Image(data, affine) display = plot_img(img) display.add_overlay(img) display.add_contours(img, contours=2, linewidth=4, colors=['limegreen', 'yellow']) display.add_edges(img, color='c') # Save execution time and memory plt.close() def test_plot_noncurrent_axes(): """Regression test for Issue #450""" maps_img = nibabel.Nifti1Image(np.random.random((10, 10, 10)), np.eye(4)) fh1 = plt.figure() fh2 = plt.figure() ax1 = fh1.add_subplot(1, 1, 1) assert_equal(plt.gcf(), fh2, "fh2 was the last plot created.") # Since we gave ax1, the figure should be plotted in fh1. # Before #451, it was plotted in fh2. slicer = plot_glass_brain(maps_img, axes=ax1, title='test') for ax_name, niax in slicer.axes.items(): ax_fh = niax.ax.get_figure() assert_equal(ax_fh, fh1, 'New axis %s should be in fh1.' % ax_name) # Save execution time and memory plt.close() def test_plot_connectome(): node_color = ['green', 'blue', 'k', 'cyan'] # symmetric up to 1e-3 relative tolerance adjacency_matrix = np.array([[1., -2., 0.3, 0.], [-2.002, 1, 0., 0.], [0.3, 0., 1., 0.], [0., 0., 0., 1.]]) node_coords = np.arange(3 * 4).reshape(4, 3) args = adjacency_matrix, node_coords kwargs = dict(edge_threshold=0.38, title='threshold=0.38', node_size=10, node_color=node_color) plot_connectome(*args, **kwargs) plt.close() # used to speed-up tests for the next plots kwargs['display_mode'] = 'x' # node_coords not an array but a list of tuples plot_connectome(adjacency_matrix, [tuple(each) for each in node_coords], **kwargs) # saving to file filename = tempfile.mktemp(suffix='.png') try: display = plot_connectome(*args, output_file=filename, **kwargs) assert_true(display is None) assert_true(os.path.isfile(filename) and os.path.getsize(filename) > 0) finally: os.remove(filename) plt.close() # with node_kwargs, edge_kwargs and edge_cmap arguments plot_connectome(*args, edge_threshold='70%', node_size=[10, 20, 30, 40], node_color=np.zeros((4, 3)), edge_cmap='RdBu', colorbar=True, node_kwargs={ 'marker': 'v'}, edge_kwargs={ 'linewidth': 4}) plt.close() # masked array support masked_adjacency_matrix = np.ma.masked_array( adjacency_matrix, np.abs(adjacency_matrix) < 0.5) plot_connectome(masked_adjacency_matrix, node_coords, **kwargs) plt.close() # sparse matrix support sparse_adjacency_matrix = sparse.coo_matrix(adjacency_matrix) plot_connectome(sparse_adjacency_matrix, node_coords, **kwargs) plt.close() # NaN matrix support nan_adjacency_matrix = np.array([[1., np.nan, 0.], [np.nan, 1., 2.], [np.nan, 2., 1.]]) nan_node_coords = np.arange(3 * 3).reshape(3, 3) plot_connectome(nan_adjacency_matrix, nan_node_coords, **kwargs) plt.close() # smoke-test where there is no edge to draw, e.g. when # edge_threshold is too high plot_connectome(*args, edge_threshold=1e12) plt.close() # with colorbar=True plot_connectome(*args, colorbar=True) plt.close() # smoke-test with hemispheric saggital cuts plot_connectome(*args, display_mode='lzry') plt.close() def test_plot_connectome_exceptions(): node_coords = np.arange(2 * 3).reshape((2, 3)) # Used to speed-up tests because the glass brain is always plotted # before any error occurs kwargs = {'display_mode': 'x'} # adjacency_matrix is not symmetric non_symmetric_adjacency_matrix = np.array([[1., 2], [0.4, 1.]]) assert_raises_regex(ValueError, 'should be symmetric', plot_connectome, non_symmetric_adjacency_matrix, node_coords, **kwargs) adjacency_matrix = np.array([[1., 2.], [2., 1.]]) # adjacency_matrix mask is not symmetric masked_adjacency_matrix = np.ma.masked_array( adjacency_matrix, [[False, True], [False, False]]) assert_raises_regex(ValueError, 'non symmetric mask', plot_connectome, masked_adjacency_matrix, node_coords, **kwargs) # edges threshold is neither a number nor a string assert_raises_regex(TypeError, 'should be either a number or a string', plot_connectome, adjacency_matrix, node_coords, edge_threshold=object(), **kwargs) # wrong shapes for node_coords or adjacency_matrix assert_raises_regex(ValueError, r'supposed to have shape \(n, n\).+\(1L?, 2L?\)', plot_connectome, adjacency_matrix[:1, :], node_coords, **kwargs) assert_raises_regex(ValueError, r'shape \(2L?, 3L?\).+\(2L?,\)', plot_connectome, adjacency_matrix, node_coords[:, 2], **kwargs) wrong_adjacency_matrix = np.zeros((3, 3)) assert_raises_regex(ValueError, r'Shape mismatch.+\(3L?, 3L?\).+\(2L?, 3L?\)', plot_connectome, wrong_adjacency_matrix, node_coords, **kwargs) # a few not correctly formatted strings for 'edge_threshold' wrong_edge_thresholds = ['0.1', '10', '10.2.3%', 'asdf%'] for wrong_edge_threshold in wrong_edge_thresholds: assert_raises_regex(ValueError, 'should be a number followed by the percent sign', plot_connectome, adjacency_matrix, node_coords, edge_threshold=wrong_edge_threshold, **kwargs) # specifying node sizes via node_kwargs assert_raises_regex(ValueError, "Please use 'node_size' and not 'node_kwargs'", plot_connectome, adjacency_matrix, node_coords, node_kwargs={'s': 50}, **kwargs) # specifying node colors via node_kwargs assert_raises_regex(ValueError, "Please use 'node_color' and not 'node_kwargs'", plot_connectome, adjacency_matrix, node_coords, node_kwargs={'c': 'blue'}, **kwargs) def test_singleton_ax_dim(): for axis, direction in enumerate("xyz"): shape = [5, 6, 7] shape[axis] = 1 img = nibabel.Nifti1Image(np.ones(shape), np.eye(4)) plot_stat_map(img, None, display_mode=direction) plt.close() def test_plot_prob_atlas(): affine = np.eye(4) shape = (6, 8, 10, 5) rng = np.random.RandomState(0) data_rng = rng.normal(size=shape) img = nibabel.Nifti1Image(data_rng, affine) # Testing the 4D plot prob atlas with contours plot_prob_atlas(img, view_type='contours') plt.close() # Testing the 4D plot prob atlas with contours plot_prob_atlas(img, view_type='filled_contours', threshold=0.2) plt.close() # Testing the 4D plot prob atlas with contours plot_prob_atlas(img, view_type='continuous') plt.close() # Testing the 4D plot prob atlas with colormap plot_prob_atlas(img, view_type='filled_contours', colorbar=True) plt.close() # threshold=None plot_prob_atlas(img, threshold=None) plt.close() def test_get_colorbar_and_data_ranges_with_vmin(): data = np.array([[-.5, 1., np.nan], [0., np.nan, -.2], [1.5, 2.5, 3.]]) assert_raises_regex(ValueError, 'does not accept a "vmin" argument', _get_colorbar_and_data_ranges, data, vmax=None, symmetric_cbar=True, kwargs={'vmin': 1.}) def test_get_colorbar_and_data_ranges_pos_neg(): # data with positive and negative range data = np.array([[-.5, 1., np.nan], [0., np.nan, -.2], [1.5, 2.5, 3.]]) # Reasonable additional arguments that would end up being passed # to imshow in a real plotting use case kwargs = {'aspect': 'auto', 'alpha': 0.9} # symmetric_cbar set to True cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( data, vmax=None, symmetric_cbar=True, kwargs=kwargs) assert_equal(vmin, -np.nanmax(data)) assert_equal(vmax, np.nanmax(data)) assert_equal(cbar_vmin, None) assert_equal(cbar_vmax, None) # same case if vmax has been set cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( data, vmax=2, symmetric_cbar=True, kwargs=kwargs) assert_equal(vmin, -2) assert_equal(vmax, 2) assert_equal(cbar_vmin, None) assert_equal(cbar_vmax, None) # symmetric_cbar is set to False cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( data, vmax=None, symmetric_cbar=False, kwargs=kwargs) assert_equal(vmin, -np.nanmax(data)) assert_equal(vmax, np.nanmax(data)) assert_equal(cbar_vmin, np.nanmin(data)) assert_equal(cbar_vmax, np.nanmax(data)) # same case if vmax has been set cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( data, vmax=2, symmetric_cbar=False, kwargs=kwargs) assert_equal(vmin, -2) assert_equal(vmax, 2) assert_equal(cbar_vmin, np.nanmin(data)) assert_equal(cbar_vmax, np.nanmax(data)) # symmetric_cbar is set to 'auto', same behaviours as True for this case cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( data, vmax=None, symmetric_cbar='auto', kwargs=kwargs) assert_equal(vmin, -np.nanmax(data)) assert_equal(vmax, np.nanmax(data)) assert_equal(cbar_vmin, None) assert_equal(cbar_vmax, None) # same case if vmax has been set cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( data, vmax=2, symmetric_cbar='auto', kwargs=kwargs) assert_equal(vmin, -2) assert_equal(vmax, 2) assert_equal(cbar_vmin, None) assert_equal(cbar_vmax, None) def test_get_colorbar_and_data_ranges_pos(): # data with positive range data_pos = np.array([[0, 1., np.nan], [0., np.nan, 0], [1.5, 2.5, 3.]]) # symmetric_cbar set to True cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( data_pos, vmax=None, symmetric_cbar=True, kwargs={}) assert_equal(vmin, -np.nanmax(data_pos)) assert_equal(vmax, np.nanmax(data_pos)) assert_equal(cbar_vmin, None) assert_equal(cbar_vmax, None) # same case if vmax has been set cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( data_pos, vmax=2, symmetric_cbar=True, kwargs={}) assert_equal(vmin, -2) assert_equal(vmax, 2) assert_equal(cbar_vmin, None) assert_equal(cbar_vmax, None) # symmetric_cbar is set to False cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( data_pos, vmax=None, symmetric_cbar=False, kwargs={}) assert_equal(vmin, -np.nanmax(data_pos)) assert_equal(vmax, np.nanmax(data_pos)) assert_equal(cbar_vmin, 0) assert_equal(cbar_vmax, None) # same case if vmax has been set cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( data_pos, vmax=2, symmetric_cbar=False, kwargs={}) assert_equal(vmin, -2) assert_equal(vmax, 2) assert_equal(cbar_vmin, 0) assert_equal(cbar_vmax, None) # symmetric_cbar is set to 'auto', same behaviour as false in this case cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( data_pos, vmax=None, symmetric_cbar='auto', kwargs={}) assert_equal(vmin, -np.nanmax(data_pos)) assert_equal(vmax, np.nanmax(data_pos)) assert_equal(cbar_vmin, 0) assert_equal(cbar_vmax, None) # same case if vmax has been set cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( data_pos, vmax=2, symmetric_cbar='auto', kwargs={}) assert_equal(vmin, -2) assert_equal(vmax, 2) assert_equal(cbar_vmin, 0) assert_equal(cbar_vmax, None) def test_get_colorbar_and_data_ranges_neg(): # data with negative range data_neg = np.array([[-.5, 0, np.nan], [0., np.nan, -.2], [0, 0, 0]]) # symmetric_cbar set to True cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( data_neg, vmax=None, symmetric_cbar=True, kwargs={}) assert_equal(vmin, np.nanmin(data_neg)) assert_equal(vmax, -np.nanmin(data_neg)) assert_equal(cbar_vmin, None) assert_equal(cbar_vmax, None) # same case if vmax has been set cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( data_neg, vmax=2, symmetric_cbar=True, kwargs={}) assert_equal(vmin, -2) assert_equal(vmax, 2) assert_equal(cbar_vmin, None) assert_equal(cbar_vmax, None) # symmetric_cbar is set to False cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( data_neg, vmax=None, symmetric_cbar=False, kwargs={}) assert_equal(vmin, np.nanmin(data_neg)) assert_equal(vmax, -np.nanmin(data_neg)) assert_equal(cbar_vmin, None) assert_equal(cbar_vmax, 0) # same case if vmax has been set cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( data_neg, vmax=2, symmetric_cbar=False, kwargs={}) assert_equal(vmin, -2) assert_equal(vmax, 2) assert_equal(cbar_vmin, None) assert_equal(cbar_vmax, 0) # symmetric_cbar is set to 'auto', same behaviour as False in this case cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( data_neg, vmax=None, symmetric_cbar='auto', kwargs={}) assert_equal(vmin, np.nanmin(data_neg)) assert_equal(vmax, -np.nanmin(data_neg)) assert_equal(cbar_vmin, None) assert_equal(cbar_vmax, 0) # same case if vmax has been set cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( data_neg, vmax=2, symmetric_cbar='auto', kwargs={}) assert_equal(vmin, -2) assert_equal(vmax, 2) assert_equal(cbar_vmin, None) assert_equal(cbar_vmax, 0) def test_get_colorbar_and_data_ranges_masked_array(): # data with positive and negative range data = np.array([[-.5, 1., np.nan], [0., np.nan, -.2], [1.5, 2.5, 3.]]) masked_data = np.ma.masked_greater(data, 2.) # Easier to fill masked values with NaN to test against later on filled_data = masked_data.filled(np.nan) # Reasonable additional arguments that would end up being passed # to imshow in a real plotting use case kwargs = {'aspect': 'auto', 'alpha': 0.9} # symmetric_cbar set to True cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( masked_data, vmax=None, symmetric_cbar=True, kwargs=kwargs) assert_equal(vmin, -np.nanmax(filled_data)) assert_equal(vmax, np.nanmax(filled_data)) assert_equal(cbar_vmin, None) assert_equal(cbar_vmax, None) # same case if vmax has been set cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( masked_data, vmax=2, symmetric_cbar=True, kwargs=kwargs) assert_equal(vmin, -2) assert_equal(vmax, 2) assert_equal(cbar_vmin, None) assert_equal(cbar_vmax, None) # symmetric_cbar is set to False cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( masked_data, vmax=None, symmetric_cbar=False, kwargs=kwargs) assert_equal(vmin, -np.nanmax(filled_data)) assert_equal(vmax, np.nanmax(filled_data)) assert_equal(cbar_vmin, np.nanmin(filled_data)) assert_equal(cbar_vmax, np.nanmax(filled_data)) # same case if vmax has been set cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( masked_data, vmax=2, symmetric_cbar=False, kwargs=kwargs) assert_equal(vmin, -2) assert_equal(vmax, 2) assert_equal(cbar_vmin, np.nanmin(filled_data)) assert_equal(cbar_vmax, np.nanmax(filled_data)) # symmetric_cbar is set to 'auto', same behaviours as True for this case cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( masked_data, vmax=None, symmetric_cbar='auto', kwargs=kwargs) assert_equal(vmin, -np.nanmax(filled_data)) assert_equal(vmax, np.nanmax(filled_data)) assert_equal(cbar_vmin, None) assert_equal(cbar_vmax, None) # same case if vmax has been set cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges( masked_data, vmax=2, symmetric_cbar='auto', kwargs=kwargs) assert_equal(vmin, -2) assert_equal(vmax, 2) assert_equal(cbar_vmin, None) assert_equal(cbar_vmax, None) def test_invalid_in_display_mode_cut_coords_all_plots(): img = _generate_img() for plot_func in [plot_img, plot_anat, plot_roi, plot_epi, plot_stat_map, plot_prob_atlas, plot_glass_brain]: assert_raises_regex(ValueError, "The input given for display_mode='ortho' needs to " "be a list of 3d world coordinates.", plot_func, img, display_mode='ortho', cut_coords=2) PKHԊxxnilearn/_utils/compat.py""" Compatibility layer for Python 3/Python 2 single codebase """ import sys import hashlib if sys.version_info[0] == 3: import pickle import io import urllib _basestring = str cPickle = pickle StringIO = io.StringIO BytesIO = io.BytesIO _urllib = urllib izip = zip def md5_hash(string): m = hashlib.md5() m.update(string.encode('utf-8')) return m.hexdigest() else: import cPickle import StringIO import urllib import urllib2 import urlparse import types import itertools _basestring = basestring cPickle = cPickle StringIO = BytesIO = StringIO.StringIO izip = itertools.izip class _module_lookup(object): modules = [urlparse, urllib2, urllib] def __getattr__(self, name): for module in self.modules: if hasattr(module, name): attr = getattr(module, name) if not isinstance(attr, types.ModuleType): return attr raise NotImplementedError( 'This function has not been imported properly') module_lookup = _module_lookup() class _urllib(): request = module_lookup error = module_lookup parse = module_lookup def md5_hash(string): m = hashlib.md5() m.update(string) return m.hexdigest() PKH}  nilearn/_utils/ndimage.py""" N-dimensional image manipulation """ # Author: Gael Varoquaux, Alexandre Abraham, Philippe Gervais # License: simplified BSD import numpy as np from scipy import ndimage ############################################################################### # Operating on connected components ############################################################################### def largest_connected_component(volume): """Return the largest connected component of a 3D array. Parameters ----------- volume: numpy.array 3D boolean array indicating a volume. Returns -------- volume: numpy.array 3D boolean array with only one connected component. """ # We use asarray to be able to work with masked arrays. volume = np.asarray(volume) labels, label_nb = ndimage.label(volume) if not label_nb: raise ValueError('No non-zero values: no connected components') if label_nb == 1: return volume.astype(np.bool) label_count = np.bincount(labels.ravel().astype(np.int)) # discard the 0 label label_count[0] = 0 return labels == label_count.argmax() def get_border_data(data, border_size): return np.concatenate([ data[:border_size, :, :].ravel(), data[-border_size:, :, :].ravel(), data[:, :border_size, :].ravel(), data[:, -border_size:, :].ravel(), data[:, :, :border_size].ravel(), data[:, :, -border_size:].ravel(), ]) def _peak_local_max(image, min_distance=10, threshold_abs=0, threshold_rel=0.1, num_peaks=np.inf): """Find peaks in an image, and return them as coordinates or a boolean array. Peaks are the local maxima in a region of `2 * min_distance + 1` (i.e. peaks are separated by at least `min_distance`). NOTE: If peaks are flat (i.e. multiple adjacent pixels have identical intensities), the coordinates of all such pixels are returned. Parameters ---------- image : ndarray of floats Input image. min_distance : int Minimum number of pixels separating peaks in a region of `2 * min_distance + 1` (i.e. peaks are separated by at least `min_distance`). To find the maximum number of peaks, use `min_distance=1`. threshold_abs : float Minimum intensity of peaks. threshold_rel : float Minimum intensity of peaks calculated as `max(image) * threshold_rel`. num_peaks : int Maximum number of peaks. When the number of peaks exceeds `num_peaks`, return `num_peaks` peaks based on highest peak intensity. Returns ------- output : ndarray or ndarray of bools Boolean array shaped like `image`, with peaks represented by True values. Notes ----- The peak local maximum function returns the coordinates of local peaks (maxima) in a image. A maximum filter is used for finding local maxima. This operation dilates the original image. After comparison between dilated and original image, peak_local_max function returns the coordinates of peaks where dilated image = original. This code is mostly adapted from scikit image 0.11.3 release. Location of file in scikit image: peak_local_max function in skimage.feature.peak """ out = np.zeros_like(image, dtype=np.bool) if np.all(image == image.flat[0]): return out image = image.copy() size = 2 * min_distance + 1 image_max = ndimage.maximum_filter(image, size=size, mode='constant') mask = (image == image_max) image *= mask # find top peak candidates above a threshold peak_threshold = max(np.max(image.ravel()) * threshold_rel, threshold_abs) # get coordinates of peaks coordinates = np.argwhere(image > peak_threshold) if coordinates.shape[0] > num_peaks: intensities = image.flat[np.ravel_multi_index(coordinates.transpose(), image.shape)] idx_maxsort = np.argsort(intensities)[::-1] coordinates = coordinates[idx_maxsort][:num_peaks] nd_indices = tuple(coordinates.T) out[nd_indices] = True return out PKH@z z "nilearn/_utils/param_validation.py""" Utilities to check for valid parameters """ import numbers import warnings from .compat import _basestring def check_threshold(threshold, data, percentile_func, name='threshold'): """ Checks if the given threshold is in correct format and within the limit. If necessary, this function also returns score of the data calculated based upon the given specific percentile function. Note: This is only for threshold as string. Parameters ---------- threshold: float or str If threshold is a float value, it should be within the range of the maximum intensity value of the data. If threshold is a percentage expressed in a string it must finish with a percent sign like "99.7%". data: ndarray an array of the input masked data. percentile_func: function {scoreatpercentile, fastabspercentile} Percentile function for example scipy.stats.scoreatpercentile to calculate the score on the data. name: str, optional A string just used for representing the name of the threshold for a precise error message. Returns ------- threshold: number returns the score of the percentile on the data or returns threshold as it is if given threshold is not a string percentile. """ if isinstance(threshold, _basestring): message = ('If "{0}" is given as string it ' 'should be a number followed by the percent ' 'sign, e.g. "25.3%"').format(name) if not threshold.endswith('%'): raise ValueError(message) try: percentile = float(threshold[:-1]) except ValueError as exc: exc.args += (message, ) raise threshold = percentile_func(data, percentile) elif isinstance(threshold, numbers.Real): # checks whether given float value exceeds the maximum # value of the image data value_check = abs(data).max() if abs(threshold) > value_check: warnings.warn("The given float value must not exceed %d. " "But, you have given threshold=%s " % (value_check, threshold)) else: raise TypeError('%s should be either a number ' 'or a string finishing with a percent sign' % (name, )) return threshold PKHNnilearn/_utils/__init__.pyfrom .niimg_conversions import (check_niimg, check_niimg_3d, concat_niimgs, check_niimg_4d) from .niimg import _repr_niimgs, copy_img, load_niimg from .numpy_conversions import as_ndarray from .cache_mixin import CacheMixin from .logger import _compose_err_msg __all__ = ['check_niimg', 'check_niimg_3d', 'concat_niimgs', 'check_niimg_4d', '_repr_niimgs', 'copy_img', 'load_niimg', 'as_ndarray', 'CacheMixin', '_compose_err_msg'] PKH"2--nilearn/_utils/segmentation.py""" Random walker segmentation algorithm from *Random walks for image segmentation*, Leo Grady, IEEE Trans Pattern Anal Mach Intell. 2006 Nov;28(11):1768-83. This code is mostly adapted from scikit-image 0.11.3 release. Location of file in scikit image: random_walker function and its suporting sub functions in skimage.segmentation """ import warnings import numpy as np from scipy import sparse, ndimage as ndi from sklearn.utils import as_float_array from scipy.sparse.linalg import cg def _make_graph_edges_3d(n_x, n_y, n_z): """Returns a list of edges for a 3D image. Parameters ---------- n_x: integer The size of the grid in the x direction. n_y: integer The size of the grid in the y direction n_z: integer The size of the grid in the z direction Returns ------- edges : (2, N) ndarray with the total number of edges:: N = n_x * n_y * (nz - 1) + n_x * (n_y - 1) * nz + (n_x - 1) * n_y * nz Graph edges with each column describing a node-id pair. """ vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z)) edges_deep = np.vstack((vertices[:, :, :-1].ravel(), vertices[:, :, 1:].ravel())) edges_right = np.vstack((vertices[:, :-1].ravel(), vertices[:, 1:].ravel())) edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel())) edges = np.hstack((edges_deep, edges_right, edges_down)) return edges def _compute_weights_3d(data, spacing, beta=130, eps=1.e-6): # Weight calculation is main difference in multispectral version # Original gradient**2 replaced with sum of gradients ** 2 gradients = 0 for channel in range(0, data.shape[-1]): gradients += _compute_gradients_3d(data[..., channel], spacing) ** 2 # All channels considered together in this standard deviation beta /= 10 * data.std() gradients *= beta weights = np.exp(- gradients) weights += eps return weights def _compute_gradients_3d(data, spacing): gr_deep = np.abs(data[:, :, :-1] - data[:, :, 1:]).ravel() / spacing[2] gr_right = np.abs(data[:, :-1] - data[:, 1:]).ravel() / spacing[1] gr_down = np.abs(data[:-1] - data[1:]).ravel() / spacing[0] return np.r_[gr_deep, gr_right, gr_down] def _make_laplacian_sparse(edges, weights): """ Sparse implementation """ pixel_nb = edges.max() + 1 diag = np.arange(pixel_nb) i_indices = np.hstack((edges[0], edges[1])) j_indices = np.hstack((edges[1], edges[0])) data = np.hstack((-weights, -weights)) lap = sparse.coo_matrix((data, (i_indices, j_indices)), shape=(pixel_nb, pixel_nb)) connect = - np.ravel(lap.sum(axis=1)) lap = sparse.coo_matrix( (np.hstack((data, connect)), (np.hstack((i_indices, diag)), np.hstack((j_indices, diag)))), shape=(pixel_nb, pixel_nb)) return lap.tocsr() def _clean_labels_ar(X, labels): X = X.astype(labels.dtype) labels = np.ravel(labels) labels[labels == 0] = X return labels def _buildAB(lap_sparse, labels): """ Build the matrix A and rhs B of the linear system to solve. A and B are two block of the laplacian of the image graph. """ labels = labels[labels >= 0] indices = np.arange(labels.size) unlabeled_indices = indices[labels == 0] seeds_indices = indices[labels > 0] # The following two lines take most of the time in this function B = lap_sparse[unlabeled_indices][:, seeds_indices] lap_sparse = lap_sparse[unlabeled_indices][:, unlabeled_indices] nlabels = labels.max() rhs = [] for lab in range(1, nlabels + 1): mask = (labels[seeds_indices] == lab) fs = sparse.csr_matrix(mask) fs = fs.transpose() rhs.append(B * fs) return lap_sparse, rhs def _mask_edges_weights(edges, weights, mask): """ Remove edges of the graph connected to masked nodes, as well as corresponding weights of the edges. """ mask0 = np.hstack((mask[:, :, :-1].ravel(), mask[:, :-1].ravel(), mask[:-1].ravel())) mask1 = np.hstack((mask[:, :, 1:].ravel(), mask[:, 1:].ravel(), mask[1:].ravel())) ind_mask = np.logical_and(mask0, mask1) edges, weights = edges[:, ind_mask], weights[ind_mask] max_node_index = edges.max() # Reassign edges labels to 0, 1, ... edges_number - 1 order = np.searchsorted(np.unique(edges.ravel()), np.arange(max_node_index + 1)) edges = order[edges.astype(np.int64)] return edges, weights def _build_laplacian(data, spacing, mask=None, beta=50): l_x, l_y, l_z = tuple(data.shape[i] for i in range(3)) edges = _make_graph_edges_3d(l_x, l_y, l_z) weights = _compute_weights_3d(data, spacing, beta=beta, eps=1.e-10) if mask is not None: edges, weights = _mask_edges_weights(edges, weights, mask) lap = _make_laplacian_sparse(edges, weights) del edges, weights return lap def _random_walker(data, labels, beta=130, tol=1.e-3, copy=True, spacing=None): """Random walker algorithm for segmentation from markers. Parameters ---------- data : array_like Image to be segmented in phases. Data spacing is assumed isotropic unless the `spacing` keyword argument is used. labels : array of ints, of same shape as `data` without channels dimension Array of seed markers labeled with different positive integers for different phases. Zero-labeled pixels are unlabeled pixels. Negative labels correspond to inactive pixels that are not taken into account (they are removed from the graph). If labels are not consecutive integers, the labels array will be transformed so that labels are consecutive. beta : float Penalization coefficient for the random walker motion (the greater `beta`, the more difficult the diffusion). tol : float tolerance to achieve when solving the linear system, in cg' mode. copy : bool If copy is False, the `labels` array will be overwritten with the result of the segmentation. Use copy=False if you want to save on memory. spacing : iterable of floats Spacing between voxels in each spatial dimension. If `None`, then the spacing between pixels/voxels in each dimension is assumed 1. Returns ------- output : ndarray * An array of ints of same shape as `data`, in which each pixel has been labeled according to the marker that reached the pixel first by anisotropic diffusion. Notes ----- The `spacing` argument is specifically for anisotropic datasets, where data points are spaced differently in one or more spatial dimensions. Anisotropic data is commonly encountered in medical imaging. The algorithm was first proposed in *Random walks for image segmentation*, Leo Grady, IEEE Trans Pattern Anal Mach Intell. 2006 Nov;28(11):1768-83. The algorithm solves the diffusion equation at infinite times for sources placed on markers of each phase in turn. A pixel is labeled with the phase that has the greatest probability to diffuse first to the pixel. The diffusion equation is solved by minimizing x.T L x for each phase, where L is the Laplacian of the weighted graph of the image, and x is the probability that a marker of the given phase arrives first at a pixel by diffusion (x=1 on markers of the phase, x=0 on the other markers, and the other coefficients are looked for). Each pixel is attributed the label for which it has a maximal value of x. The Laplacian L of the image is defined as: - L_ii = d_i, the number of neighbors of pixel i (the degree of i) - L_ij = -w_ij if i and j are adjacent pixels The weight w_ij is a decreasing function of the norm of the local gradient. This ensures that diffusion is easier between pixels of similar values. When the Laplacian is decomposed into blocks of marked and unmarked pixels:: L = M B.T B A with first indices corresponding to marked pixels, and then to unmarked pixels, minimizing x.T L x for one phase amount to solving:: A x = - B x_m where x_m = 1 on markers of the given phase, and 0 on other markers. This linear system is solved in the algorithm using a direct method for small images, and an iterative method for larger images. """ if (labels != 0).all(): warnings.warn('Random walker only segments unlabeled areas, where ' 'labels == 0. No zero valued areas in labels were ' 'found. Returning provided labels.') out_labels = labels return out_labels # We take multichannel as always False since we are not strictly using # for image processing as such with RGB values. multichannel = False if not multichannel: if data.ndim < 2 or data.ndim > 3: raise ValueError('For non-multichannel input, data must be of ' 'dimension 2 or 3.') dims = data.shape # To reshape final labeled result data = np.atleast_3d(as_float_array(data))[..., np.newaxis] # Spacing kwarg checks if spacing is None: spacing = np.asarray((1.,) * 3) elif len(spacing) == len(dims): if len(spacing) == 2: # Need a dummy spacing for singleton 3rd dim spacing = np.r_[spacing, 1.] else: # Convert to array spacing = np.asarray(spacing) else: raise ValueError('Input argument `spacing` incorrect, should be an ' 'iterable with one number per spatial dimension.') if copy: labels = np.copy(labels) label_values = np.unique(labels) # Reorder label values to have consecutive integers (no gaps) if np.any(np.diff(label_values) != 1): mask = labels >= 0 labels[mask] = np.searchsorted(np.unique(labels[mask]), labels[mask]).astype(labels.dtype) labels = labels.astype(np.int32) # If the array has pruned zones, be sure that no isolated pixels # exist between pruned zones (they could not be determined) if np.any(labels < 0): filled = ndi.binary_propagation(labels > 0, mask=labels >= 0) labels[np.logical_and(np.logical_not(filled), labels == 0)] = -1 del filled labels = np.atleast_3d(labels) if np.any(labels < 0): lap_sparse = _build_laplacian(data, spacing, mask=labels >= 0, beta=beta) else: lap_sparse = _build_laplacian(data, spacing, beta=beta) lap_sparse, B = _buildAB(lap_sparse, labels) # We solve the linear system # lap_sparse X = B # where X[i, j] is the probability that a marker of label i arrives # first at pixel j by anisotropic diffusion. X = _solve_cg(lap_sparse, B, tol=tol) # Clean up results X = _clean_labels_ar(X + 1, labels).reshape(dims) return X def _solve_cg(lap_sparse, B, tol): """ solves lap_sparse X_i = B_i for each phase i, using the conjugate gradient method. For each pixel, the label i corresponding to the maximal X_i is returned. """ lap_sparse = lap_sparse.tocsc() X = [] for i in range(len(B)): x0 = cg(lap_sparse, -B[i].todense(), tol=tol)[0] X.append(x0) X = np.array(X) X = np.argmax(X, axis=0) return X PKH{$" *'*'nilearn/_utils/cache_mixin.py""" Mixin for cache with joblib """ # Author: Gael Varoquaux, Alexandre Abraham, Philippe Gervais # License: simplified BSD import json import warnings import os import shutil from distutils.version import LooseVersion import nibabel from sklearn.externals.joblib import Memory MEMORY_CLASSES = (Memory, ) try: from joblib import Memory as JoblibMemory MEMORY_CLASSES = (Memory, JoblibMemory) except ImportError: pass import nilearn from .compat import _basestring __CACHE_CHECKED = dict() def _safe_cache(memory, func, **kwargs): """ A wrapper for mem.cache that flushes the cache if the version number of nibabel has changed. """ cachedir = memory.cachedir if cachedir is None or cachedir in __CACHE_CHECKED: return memory.cache(func, **kwargs) version_file = os.path.join(cachedir, 'module_versions.json') versions = dict() if os.path.exists(version_file): with open(version_file, 'r') as _version_file: versions = json.load(_version_file) modules = (nibabel, ) # Keep only the major + minor version numbers my_versions = dict((m.__name__, LooseVersion(m.__version__).version[:2]) for m in modules) commons = set(versions.keys()).intersection(set(my_versions.keys())) collisions = [m for m in commons if versions[m] != my_versions[m]] # Flush cache if version collision if len(collisions) > 0: if nilearn.CHECK_CACHE_VERSION: warnings.warn("Incompatible cache in %s: " "different version of nibabel. Deleting " "the cache. Put nilearn.CHECK_CACHE_VERSION " "to false to avoid this behavior." % cachedir) try: tmp_dir = (os.path.split(cachedir)[:-1] + ('old_%i' % os.getpid(), )) tmp_dir = os.path.join(*tmp_dir) # We use rename + unlink to be more robust to race # conditions os.rename(cachedir, tmp_dir) shutil.rmtree(tmp_dir) except OSError: # Another process could have removed this dir pass try: os.makedirs(cachedir) except OSError: # File exists? pass else: warnings.warn("Incompatible cache in %s: " "old version of nibabel." % cachedir) # Write json files if configuration is different if versions != my_versions: with open(version_file, 'w') as _version_file: json.dump(my_versions, _version_file) __CACHE_CHECKED[cachedir] = True return memory.cache(func, **kwargs) def cache(func, memory, func_memory_level=None, memory_level=None, **kwargs): """ Return a joblib.Memory object. The memory_level determines the level above which the wrapped function output is cached. By specifying a numeric value for this level, the user can to control the amount of cache memory used. This function will cache the function call or not depending on the cache level. Parameters ---------- func: function The function which output is to be cached. memory: instance of joblib.Memory or string Used to cache the function call. func_memory_level: int, optional The memory_level from which caching must be enabled for the wrapped function. memory_level: int, optional The memory_level used to determine if function call must be cached or not (if user_memory_level is equal of greater than func_memory_level the function is cached) kwargs: keyword arguments The keyword arguments passed to memory.cache Returns ------- mem: joblib.MemorizedFunc object that wraps the function func. This object may be a no-op, if the requested level is lower than the value given to _cache()). For consistency, a joblib.Memory object is always returned. """ verbose = kwargs.get('verbose', 0) # memory_level and func_memory_level must be both None or both integers. memory_levels = [memory_level, func_memory_level] both_params_integers = all(isinstance(lvl, int) for lvl in memory_levels) both_params_none = all(lvl is None for lvl in memory_levels) if not (both_params_integers or both_params_none): raise ValueError('Reference and user memory levels must be both None ' 'or both integers.') if memory is not None and (func_memory_level is None or memory_level >= func_memory_level): if isinstance(memory, _basestring): memory = Memory(cachedir=memory, verbose=verbose) if not isinstance(memory, MEMORY_CLASSES): raise TypeError("'memory' argument must be a string or a " "joblib.Memory object. " "%s %s was given." % (memory, type(memory))) if (memory.cachedir is None and memory_level is not None and memory_level > 1): warnings.warn("Caching has been enabled (memory_level = %d) " "but no Memory object or path has been provided" " (parameter memory). Caching deactivated for " "function %s." % (memory_level, func.__name__), stacklevel=2) else: memory = Memory(cachedir=None, verbose=verbose) return _safe_cache(memory, func, **kwargs) class CacheMixin(object): """Mixin to add caching to a class. This class is a thin layer on top of joblib.Memory, that mainly adds a "caching level", similar to a "log level". Usage: to cache the results of a method, wrap it in self._cache() defined by this class. Caching is performed only if the user-specified cache level (self._memory_level) is greater than the value given as a parameter to self._cache(). See _cache() documentation for details. """ def _cache(self, func, func_memory_level=1, **kwargs): """Return a joblib.Memory object. The memory_level determines the level above which the wrapped function output is cached. By specifying a numeric value for this level, the user can to control the amount of cache memory used. This function will cache the function call or not depending on the cache level. Parameters ---------- func: function The function the output of which is to be cached. memory_level: int The memory_level from which caching must be enabled for the wrapped function. Returns ------- mem: joblib.Memory object that wraps the function func. This object may be a no-op, if the requested level is lower than the value given to _cache()). For consistency, a joblib.Memory object is always returned. """ verbose = getattr(self, 'verbose', 0) # Creates attributes if they don't exist # This is to make creating them in __init__() optional. if not hasattr(self, "memory_level"): self.memory_level = 0 if not hasattr(self, "memory"): self.memory = Memory(cachedir=None, verbose=verbose) if isinstance(self.memory, _basestring): cache_dir = self.memory if nilearn.EXPAND_PATH_WILDCARDS: cache_dir = os.path.expanduser(cache_dir) # Perform some verifications on given path. split_cache_dir = os.path.split(cache_dir) if (len(split_cache_dir) > 1 and (not os.path.exists(split_cache_dir[0]) and split_cache_dir[0] != '')): if (not nilearn.EXPAND_PATH_WILDCARDS and cache_dir.startswith("~")): # Maybe the user want to enable expanded user path. error_msg = ("Given cache path parent directory doesn't " "exists, you gave '{0}'. Enabling " "nilearn.EXPAND_PATH_WILDCARDS could solve " "this issue.".format(split_cache_dir[0])) elif self.memory.startswith("~"): # Path built on top of expanded user path doesn't exist. error_msg = ("Given cache path parent directory doesn't " "exists, you gave '{0}' which was expanded " "as '{1}' but doesn't exist either. Use " "nilearn.EXPAND_PATH_WILDCARDS to deactivate " "auto expand user path (~) behavior." .format(split_cache_dir[0], os.path.dirname(self.memory))) else: # The given cache base path doesn't exist. error_msg = ("Given cache path parent directory doesn't " "exists, you gave '{0}'." .format(split_cache_dir[0])) raise ValueError(error_msg) self.memory = Memory(cachedir=cache_dir, verbose=verbose) # If cache level is 0 but a memory object has been provided, set # memory_level to 1 with a warning. if self.memory_level == 0 and self.memory.cachedir is not None: warnings.warn("memory_level is currently set to 0 but " "a Memory object has been provided. " "Setting memory_level to 1.") self.memory_level = 1 return cache(func, self.memory, func_memory_level=func_memory_level, memory_level=self.memory_level, **kwargs) PKlmHU~_ nilearn/_utils/class_inspect.py""" Small utilities to inspect classes """ from sklearn.base import BaseEstimator import inspect from .exceptions import AuthorizedException def get_params(cls, instance, ignore=None): """ Retrieve the initialization parameters corresponding to a class This helper function retrieves the parameters of function __init__ for class 'cls' and returns the value for these parameters in object 'instance'. When using a composition pattern (e.g. with a NiftiMasker class), it is useful to forward parameters from one instance to another. Parameters ---------- cls: class The class that gives us the list of parameters we are interested in instance: object, instance of BaseEstimator The object that gives us the values of the parameters ignore: None or list of strings Names of the parameters that are not returned. Returns ------- params: dict The dict of parameters """ _ignore = set(('memory', 'memory_level', 'verbose', 'copy', 'n_jobs')) if ignore is not None: _ignore.update(ignore) param_names = cls._get_param_names() params = dict() for param_name in param_names: if param_name in _ignore: continue if hasattr(instance, param_name): params[param_name] = getattr(instance, param_name) return params def enclosing_scope_name(ensure_estimator=True, stack_level=2): """ Find the name of the enclosing scope for debug output purpose Use inspection to climb up the stack until the calling object. This is typically used to get the estimator at the origin of a functional call for debug print purpose. Parameters ---------- ensure_estimator: boolean, default: True If true, find the enclosing object deriving from 'BaseEstimator' stack_level: integer, default 2 If ensure_estimator is not True, stack_level quantifies the number of frame we will go up. """ try: frame = inspect.currentframe() if not ensure_estimator: for _ in range(stack_level): frame = frame.f_back else: while True: frame = frame.f_back if not 'self' in frame.f_locals: continue if not isinstance(frame.f_locals['self'], BaseEstimator): continue break if 'self' in frame.f_locals: caller_name = frame.f_locals['self'].__class__.__name__ caller_name = '%s.%s' % (caller_name, frame.f_code.co_name) else: caller_name = frame.f_code.co_name return caller_name except AuthorizedException: return 'Unknown' PKlmH!nilearn/_utils/extmath.py""" Extended math utilities """ # Author: Gael Varoquaux # License: BSD import numpy as np try: # partition is available only in numpy >= 1.8.0 from numpy import partition except ImportError: partition = None def fast_abs_percentile(data, percentile=80): """ A fast version of the percentile of the absolute value. Parameters ---------- data: ndarray, possibly masked array The input data percentile: number between 0 and 100 The percentile that we are asking for Returns ------- value: number The score at percentile Notes ----- This is a faster, and less accurate version of scipy.stats.scoreatpercentile(np.abs(data), percentile) """ if hasattr(data, 'mask'): # Catter for masked arrays data = np.asarray(data[np.logical_not(data.mask)]) data = np.abs(data) data = data.ravel() index = int(data.size * .01 * percentile) if partition is not None: # Partial sort: faster than sort data = partition(data, index) else: data.sort() return data[index] def is_spd(M, decimal=15, verbose=1): """Assert that input matrix is symmetric positive definite. M must be symmetric down to specified decimal places. The check is performed by checking that all eigenvalues are positive. Parameters ---------- M: numpy.ndarray symmetric positive definite matrix. verbose: int, optional verbosity level (0 means no message) Returns ------- answer: boolean True if matrix is symmetric positive definite, False otherwise. """ if not np.allclose(M, M.T, atol=0, rtol=10 ** -decimal): if verbose > 0: print("matrix not symmetric to %d decimals" % decimal) return False eigvalsh = np.linalg.eigvalsh(M) ispd = eigvalsh.min() > 0 if not ispd and verbose > 0: print("matrix has a negative eigenvalue: %.3f" % eigvalsh.min()) return ispd PKlmHJ{  nilearn/_utils/logger.py"""Logging facility for nilearn""" # Author: Philippe Gervais # License: simplified BSD import inspect from sklearn.base import BaseEstimator from .compat import _basestring # The technique used in the log() function only applies to CPython, because # it uses the inspect module to walk the call stack. def log(msg, verbose=1, object_classes=(BaseEstimator, ), stack_level=1, msg_level=1): """Display a message to the user, depending on the verbosity level. This function allows to display some information that references an object that is significant to the user, instead of a internal function. The goal is to make user's code as simple to debug as possible. Parameters ---------- msg: str message to display verbose: int current verbosity level. Message is displayed if this value is greater or equal to msg_level. object_classes: tuple of type classes that should appear to emit the message stack_level: int if no object in the call stack matches object_classes, go back that amount in the call stack and display class/function name thereof. msg_level: int verbosity level at and above which message should be displayed to the user. Most of the time this parameter can be left unchanged. Notes ----- This function does tricky things to ensure that the proper object is referenced in the message. If it is called e.g. inside a function that is called by a method of an object inheriting from any class in object_classes, then the name of the object (and the method) will be displayed to the user. If several matching objects exist in the call stack, the highest one is used (first call chronologically), because this is the one which is most likely to have been written in the user's script. """ if verbose >= msg_level: stack = inspect.stack() object_frame = None object_self = None for f in reversed(stack): frame = f[0] current_self = frame.f_locals.get("self", None) if isinstance(current_self, object_classes): object_frame = frame func_name = f[3] object_self = current_self break if object_frame is None: # no object found: use stack_level if stack_level >= len(stack): stack_level = -1 func_name = '' else: object_frame, _, _, func_name = stack[stack_level][:4] object_self = object_frame.f_locals.get("self", None) if object_self is not None: func_name = "%s.%s" % (object_self.__class__.__name__, func_name) print("[{func_name}] {msg}".format(func_name=func_name, msg=msg)) def _compose_err_msg(msg, **kwargs): """Append key-value pairs to msg, for display. Parameters ---------- msg: string arbitrary message kwargs: dict arbitrary dictionary Returns ------- updated_msg: string msg, with "key: value" appended. Only string values are appended. Example ------- >>> _compose_err_msg('Error message with arguments...', arg_num=123, \ arg_str='filename.nii', arg_bool=True) 'Error message with arguments...\\narg_str: filename.nii' >>> """ updated_msg = msg for k, v in sorted(kwargs.items()): if isinstance(v, _basestring): # print only str-like arguments updated_msg += "\n" + k + ": " + v return updated_msg PKlmH]]#nilearn/_utils/numpy_conversions.py""" Validation and conversion utilities for numpy. """ # Author: Gael Varoquaux, Alexandre Abraham, Philippe Gervais # License: simplified BSD import csv import numpy as np from .compat import _basestring def _asarray(arr, dtype=None, order=None): # np.asarray does not take "K" and "A" orders in version 1.3.0 if order in ("K", "A", None): if (arr.itemsize == 1 and dtype == np.bool) \ or (arr.dtype == np.bool and np.dtype(dtype).itemsize == 1): ret = arr.view(dtype=dtype) else: ret = np.asarray(arr, dtype=dtype) else: if (((arr.itemsize == 1 and dtype == np.bool) or (arr.dtype == np.bool and np.dtype(dtype).itemsize == 1)) and (order == "F" and arr.flags["F_CONTIGUOUS"] or order == "C" and arr.flags["C_CONTIGUOUS"])): ret = arr.view(dtype=dtype) else: ret = np.asarray(arr, dtype=dtype, order=order) return ret def as_ndarray(arr, copy=False, dtype=None, order='K'): """Starting with an arbitrary array, convert to numpy.ndarray. In the case of a memmap array, a copy is automatically made to break the link with the underlying file (whatever the value of the "copy" keyword). The purpose of this function is mainly to get rid of memmap objects, but it can be used for other purposes. In particular, combining copying and casting can lead to performance improvements in some cases, by avoiding unnecessary copies. If not specified, input array order is preserved, in all cases, even when a copy is requested. Caveat: this function does not copy during bool to/from 1-byte dtype conversions. This can lead to some surprising results in some rare cases. Example: a = numpy.asarray([0, 1, 2], dtype=numpy.int8) b = as_ndarray(a, dtype=bool) # array([False, True, True], dtype=bool) c = as_ndarray(b, dtype=numpy.int8) # array([0, 1, 2], dtype=numpy.int8) The usually expected result for the last line would be array([0, 1, 1]) because True evaluates to 1. Since there is no copy made here, the original array is recovered. Parameters ---------- arr: array-like input array. Any value accepted by numpy.asarray is valid. copy: bool if True, force a copy of the array. Always True when arr is a memmap. dtype: any numpy dtype dtype of the returned array. Performing copy and type conversion at the same time can in some cases avoid an additional copy. order: string gives the order of the returned array. Valid values are: "C", "F", "A", "K", None. default is "K". See ndarray.copy() for more information. Returns ------- ret: numpy.ndarray Numpy array containing the same data as arr, always of class numpy.ndarray, and with no link to any underlying file. """ # This function should work on numpy 1.3 # in this version, astype() and copy() have no "order" keyword. # and asarray() does not accept the "K" and "A" values for order. # numpy.asarray never copies a subclass of numpy.ndarray (even for # memmaps) when dtype is unchanged. # .astype() always copies if order not in ("C", "F", "A", "K", None): raise ValueError("Invalid value for 'order': %s" % str(order)) if isinstance(arr, np.memmap): if dtype is None: if order in ("K", "A", None): ret = np.array(np.asarray(arr), copy=True) else: ret = np.array(np.asarray(arr), copy=True, order=order) else: if order in ("K", "A", None): # always copy (even when dtype does not change) ret = np.asarray(arr).astype(dtype) else: # First load data from disk without changing order # Changing order while reading through a memmap is incredibly # inefficient. ret = np.array(arr, copy=True) ret = _asarray(arr, dtype=dtype, order=order) elif isinstance(arr, np.ndarray): ret = _asarray(arr, dtype=dtype, order=order) # In the present cas, np.may_share_memory result is always reliable. if np.may_share_memory(ret, arr) and copy: # order-preserving copy if ret.flags["F_CONTIGUOUS"]: ret = ret.T.copy().T else: ret = ret.copy() elif isinstance(arr, (list, tuple)): if order in ("A", "K"): ret = np.asarray(arr, dtype=dtype) else: ret = np.asarray(arr, dtype=dtype, order=order) else: raise ValueError("Type not handled: %s" % arr.__class__) return ret def csv_to_array(csv_path, delimiters=' \t,;', **kwargs): """Read a CSV file by trying to guess its delimiter Parameters ---------- csv_path: string Path of the CSV file to load. delimiters: string Each character of the delimiters string is a potential delimiters for the CSV file. kwargs: keyword arguments The additional keyword arguments are passed to numpy.genfromtxt when loading the CSV. Returns ------- array: numpy.ndarray An array containing the data loaded from the CSV file. """ if not isinstance(csv_path, _basestring): raise TypeError('CSV must be a file path. Got a CSV of type: %s' % type(csv_path)) try: # First, we try genfromtxt which works in most cases. array = np.genfromtxt(csv_path, loose=False, **kwargs) except ValueError: # There was an error during the conversion to numpy array, probably # because the delimiter is wrong. # In that case, we try to guess the delimiter. try: with open(csv_path, 'r') as csv_file: dialect = csv.Sniffer().sniff(csv_file.readline(), delimiters) except csv.Error as e: raise TypeError( 'Could not read CSV file [%s]: %s' % (csv_path, e.args[0])) array = np.genfromtxt(csv_path, delimiter=dialect.delimiter, **kwargs) return array PKlmHL+o_o_nilearn/_utils/testing.py"""Utilities for testing nilearn.""" # Author: Alexandre Abraham, Philippe Gervais # License: simplified BSD import contextlib import functools import inspect import os import re import sys import tempfile import warnings import gc import numpy as np import scipy.signal from sklearn.utils import check_random_state import scipy.linalg import nibabel from .. import masking from . import logger from .compat import _basestring, _urllib from ..datasets.utils import _fetch_files try: from nose.tools import assert_raises_regex except ImportError: # For Py 2.7 try: from nose.tools import assert_raises_regexp as assert_raises_regex except ImportError: # for Py 2.6 def assert_raises_regex(expected_exception, expected_regexp, callable_obj=None, *args, **kwargs): """Helper function to check for message patterns in exceptions""" not_raised = False try: callable_obj(*args, **kwargs) not_raised = True except Exception as e: error_message = str(e) if not re.compile(expected_regexp).search(error_message): raise AssertionError("Error message should match pattern " "%r. %r does not." % (expected_regexp, error_message)) if not_raised: raise AssertionError("Should have raised %r" % expected_exception(expected_regexp)) try: from sklearn.utils.testing import assert_warns except ImportError: # sklearn.utils.testing.assert_warns new in scikit-learn 0.14 def assert_warns(warning_class, func, *args, **kw): with warnings.catch_warnings(record=True): warnings.simplefilter("ignore", warning_class) output = func(*args, **kw) return output # we use memory_profiler library for memory consumption checks try: from memory_profiler import memory_usage def with_memory_profiler(func): """A decorator to skip tests requiring memory_profiler.""" return func def memory_used(func, *args, **kwargs): """Compute memory usage when executing func.""" def func_3_times(*args, **kwargs): for _ in range(3): func(*args, **kwargs) gc.collect() mem_use = memory_usage((func_3_times, args, kwargs), interval=0.001) return max(mem_use) - min(mem_use) except ImportError: def with_memory_profiler(func): """A decorator to skip tests requiring memory_profiler.""" def dummy_func(): import nose raise nose.SkipTest('Test requires memory_profiler.') return dummy_func memory_usage = memory_used = None def assert_memory_less_than(memory_limit, tolerance, callable_obj, *args, **kwargs): """Check memory consumption of a callable stays below a given limit. Parameters ---------- memory_limit : int The expected memory limit in MiB. tolerance: float As memory_profiler results have some variability, this adds some tolerance around memory_limit. Accepted values are in range [0.0, 1.0]. callable_obj: callable The function to be called to check memory consumption. """ mem_used = memory_used(callable_obj, *args, **kwargs) if mem_used > memory_limit * (1 + tolerance): raise ValueError("Memory consumption measured ({0:.2f} MiB) is " "greater than required memory limit ({1} MiB) within " "accepted tolerance ({2:.2f}%)." "".format(mem_used, memory_limit, tolerance * 100)) # We are confident in memory_profiler measures above 100MiB. # We raise an error if the measure is below the limit of 50MiB to avoid # false positive. if mem_used < 50: raise ValueError("Memory profiler measured an untrustable memory " "consumption ({0:.2f} MiB). The expected memory " "limit was {1:.2f} MiB. Try to bench with larger " "objects (at least 100MiB in memory).". format(mem_used, memory_limit)) class MockRequest(object): def __init__(self, url): self.url = url def add_header(*args): pass class MockOpener(object): def __init__(self): pass def open(self, request): return request.url @contextlib.contextmanager def write_tmp_imgs(*imgs, **kwargs): """Context manager for writing Nifti images. Write nifti images in a temporary location, and remove them at the end of the block. Parameters ---------- imgs: Nifti1Image Several Nifti images. Every format understood by nibabel.save is accepted. create_files: bool if True, imgs are written on disk and filenames are returned. If False, nothing is written, and imgs is returned as output. This is useful to test the two cases (filename / Nifti1Image) in the same loop. use_wildcards: bool if True, and create_files is True, imgs are written on disk and a matching glob is returned. Returns ------- filenames: string or list of filename(s) where input images have been written. If a single image has been given as input, a single string is returned. Otherwise, a list of string is returned. """ valid_keys = set(("create_files", "use_wildcards")) input_keys = set(kwargs.keys()) invalid_keys = input_keys - valid_keys if len(invalid_keys) > 0: raise TypeError("%s: unexpected keyword argument(s): %s" % (sys._getframe().f_code.co_name, " ".join(invalid_keys))) create_files = kwargs.get("create_files", True) use_wildcards = kwargs.get("use_wildcards", False) prefix = "nilearn_" suffix = ".nii" if create_files: filenames = [] try: with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) for img in imgs: filename = tempfile.mktemp(prefix=prefix, suffix=suffix, dir=None) filenames.append(filename) img.to_filename(filename) if use_wildcards: yield prefix + "*" + suffix else: if len(imgs) == 1: yield filenames[0] else: yield filenames finally: # Ensure all created files are removed for filename in filenames: os.remove(filename) else: # No-op if len(imgs) == 1: yield imgs[0] else: yield imgs class mock_request(object): def __init__(self): """Object that mocks the urllib (future) module to store downloaded filenames. `urls` is the list of the files whose download has been requested. """ self.urls = set() def reset(self): self.urls = set() def Request(self, url): self.urls.add(url) return MockRequest(url) def build_opener(self, *args, **kwargs): return MockOpener() def wrap_chunk_read_(_chunk_read_): def mock_chunk_read_(response, local_file, initial_size=0, chunk_size=8192, report_hook=None, verbose=0): if not isinstance(response, _basestring): return _chunk_read_(response, local_file, initial_size=initial_size, chunk_size=chunk_size, report_hook=report_hook, verbose=verbose) return response return mock_chunk_read_ def mock_chunk_read_raise_error_(response, local_file, initial_size=0, chunk_size=8192, report_hook=None, verbose=0): raise _urllib.errors.HTTPError("url", 418, "I'm a teapot", None, None) class FetchFilesMock (object): _mock_fetch_files = functools.partial(_fetch_files, mock=True) def __init__(self): """Create a mock that can fill a CSV file if needed """ self.csv_files = {} def add_csv(self, filename, content): self.csv_files[filename] = content def __call__(self, *args, **kwargs): """Load requested dataset, downloading it if needed or requested. For test purpose, instead of actually fetching the dataset, this function creates empty files and return their paths. """ filenames = self._mock_fetch_files(*args, **kwargs) # Fill CSV files with given content if needed for fname in filenames: basename = os.path.basename(fname) if basename in self.csv_files: array = self.csv_files[basename] # np.savetxt does not have a header argument for numpy 1.6 # np.savetxt(fname, array, delimiter=',', fmt="%s", # header=','.join(array.dtype.names)) # We need to add the header ourselves with open(fname, 'wb') as f: header = '# {0}\n'.format(','.join(array.dtype.names)) f.write(header.encode()) np.savetxt(f, array, delimiter=',', fmt='%s') return filenames def generate_timeseries(n_instants, n_features, rand_gen=None): """Generate some random timeseries. """ if rand_gen is None: rand_gen = np.random.RandomState(0) # TODO: add an "order" keyword return rand_gen.randn(n_instants, n_features) def generate_regions_ts(n_features, n_regions, overlap=0, rand_gen=None, window="boxcar"): """Generate some regions as timeseries. Parameters ---------- overlap: int Number of overlapping voxels between two regions (more or less) window: str Name of a window in scipy.signal. e.g. "hamming". Returns ------- regions: numpy.ndarray regions, nepresented as signals. shape (n_features, n_regions) """ if rand_gen is None: rand_gen = np.random.RandomState(0) if window is None: window = "boxcar" assert(n_features > n_regions) # Compute region boundaries indices. # Start at 1 to avoid getting an empty region boundaries = np.zeros(n_regions + 1) boundaries[-1] = n_features boundaries[1:-1] = rand_gen.permutation(np.arange(1, n_features) )[:n_regions - 1] boundaries.sort() regions = np.zeros((n_regions, n_features), order="C") overlap_end = int((overlap + 1) / 2.) overlap_start = int(overlap / 2.) for n in range(len(boundaries) - 1): start = int(max(0, boundaries[n] - overlap_start)) end = int(min(n_features, boundaries[n + 1] + overlap_end)) win = scipy.signal.get_window(window, end - start) win /= win.mean() # unity mean regions[n, start:end] = win return regions def generate_maps(shape, n_regions, overlap=0, border=1, window="boxcar", rand_gen=None, affine=np.eye(4)): """Generate a 4D volume containing several maps. Parameters ---------- n_regions: int number of regions to generate overlap: int approximate number of voxels common to two neighboring regions window: str name of a window in scipy.signal. Used to get non-uniform regions. border: int number of background voxels on each side of the 3D volumes. Returns ------- maps: nibabel.Nifti1Image 4D array, containing maps. """ mask = np.zeros(shape, dtype=np.int8) mask[border:-border, border:-border, border:-border] = 1 ts = generate_regions_ts(mask.sum(), n_regions, overlap=overlap, rand_gen=rand_gen, window=window) mask_img = nibabel.Nifti1Image(mask, affine) return masking.unmask(ts, mask_img), mask_img def generate_labeled_regions(shape, n_regions, rand_gen=None, labels=None, affine=np.eye(4), dtype=np.int): """Generate a 3D volume with labeled regions. Parameters ---------- shape: tuple shape of returned array n_regions: int number of regions to generate. By default (if "labels" is None), add a background with value zero. labels: iterable labels to use for each zone. If provided, n_regions is unused. rand_gen: numpy.random.RandomState random generator to use for generation. affine: numpy.ndarray affine of returned image Returns ------- regions: nibabel.Nifti1Image data has shape "shape", containing region labels. """ n_voxels = shape[0] * shape[1] * shape[2] if labels is None: labels = range(0, n_regions + 1) n_regions += 1 else: n_regions = len(labels) regions = generate_regions_ts(n_voxels, n_regions, rand_gen=rand_gen) # replace weights with labels for n, row in zip(labels, regions): row[row > 0] = n data = np.zeros(shape, dtype=dtype) data[np.ones(shape, dtype=np.bool)] = regions.sum(axis=0).T return nibabel.Nifti1Image(data, affine) def generate_labeled_regions_large(shape, n_regions, rand_gen=None, affine=np.eye(4)): """Similar to generate_labeled_regions, but suitable for a large number of regions. See generate_labeled_regions for details. """ if rand_gen is None: rand_gen = np.random.RandomState(0) data = rand_gen.randint(n_regions + 1, size=shape) if len(np.unique(data)) != n_regions + 1: raise ValueError("Some labels are missing. Maybe shape is too small.") return nibabel.Nifti1Image(data, affine) def generate_fake_fmri(shape=(10, 11, 12), length=17, kind="noise", affine=np.eye(4), n_blocks=None, block_size=None, block_type='classification', rand_gen=np.random.RandomState(0)): """Generate a signal which can be used for testing. The return value is a 4D array, representing 3D volumes along time. Only the voxels in the center are non-zero, to mimic the presence of brain voxels in real signals. Setting n_blocks to an integer generates condition blocks, the remaining of the timeseries corresponding to 'rest' or 'baseline' condition. Parameters ---------- shape: tuple, optional Shape of 3D volume length: int, optional Number of time instants kind: string, optional Kind of signal used as timeseries. "noise": uniformly sampled values in [0..255] "step": 0.5 for the first half then 1. affine: numpy.ndarray Affine of returned images n_blocks: int or None Number of condition blocks. block_size: int or None Number of timepoints in a block. Used only if n_blocks is not None. Defaults to 3 if n_blocks is not None. block_type: str Defines if the returned target should be used for 'classification' or 'regression'. Returns ------- fmri: nibabel.Nifti1Image fake fmri signal. shape: shape + (length,) mask: nibabel.Nifti1Image mask giving non-zero voxels target: numpy.ndarray Classification or regression target. Shape of number of time points (length). Returned only if n_blocks is not None """ full_shape = shape + (length, ) fmri = np.zeros(full_shape) # Fill central voxels timeseries with random signals width = [s // 2 for s in shape] shift = [s // 4 for s in shape] if kind == "noise": signals = rand_gen.randint(256, size=(width + [length])) elif kind == "step": signals = np.ones(width + [length]) signals[..., :length // 2] = 0.5 else: raise ValueError("Unhandled value for parameter 'kind'") fmri[shift[0]:shift[0] + width[0], shift[1]:shift[1] + width[1], shift[2]:shift[2] + width[2], :] = signals mask = np.zeros(shape) mask[shift[0]:shift[0] + width[0], shift[1]:shift[1] + width[1], shift[2]:shift[2] + width[2]] = 1 if n_blocks is None: return (nibabel.Nifti1Image(fmri, affine), nibabel.Nifti1Image(mask, affine)) block_size = 3 if block_size is None else block_size flat_fmri = fmri[mask.astype(np.bool)] flat_fmri /= np.abs(flat_fmri).max() target = np.zeros(length, dtype=np.int) rest_max_size = (length - (n_blocks * block_size)) // n_blocks if rest_max_size < 0: raise ValueError( '%s is too small ' 'to put %s blocks of size %s' % ( length, n_blocks, block_size)) t_start = 0 if rest_max_size > 0: t_start = rand_gen.random_integers(0, rest_max_size, 1)[0] for block in range(n_blocks): if block_type == 'classification': # Select a random voxel and add some signal to the background voxel_idx = rand_gen.randint(0, flat_fmri.shape[0], 1)[0] trials_effect = (rand_gen.random_sample(block_size) + 1) * 3. else: # Select the voxel in the image center and add some signal # that increases with each block voxel_idx = flat_fmri.shape[0] // 2 trials_effect = ( rand_gen.random_sample(block_size) + 1) * block t_rest = 0 if rest_max_size > 0: t_rest = rand_gen.random_integers(0, rest_max_size, 1)[0] flat_fmri[voxel_idx, t_start:t_start + block_size] += trials_effect target[t_start:t_start + block_size] = block + 1 t_start += t_rest + block_size target = target if block_type == 'classification' \ else target.astype(np.float) fmri = np.zeros(fmri.shape) fmri[mask.astype(np.bool)] = flat_fmri return (nibabel.Nifti1Image(fmri, affine), nibabel.Nifti1Image(mask, affine), target) def generate_signals_from_precisions(precisions, min_n_samples=50, max_n_samples=100, random_state=0): """Generate timeseries according to some given precision matrices. Signals all have zero mean. Parameters ---------- precisions: list of numpy.ndarray list of precision matrices. Every matrix must be square (with the same size) and positive definite. The output of generate_group_sparse_gaussian_graphs() can be used here. min_samples, max_samples: int the number of samples drawn for each timeseries is taken at random between these two numbers. Returns ------- signals: list of numpy.ndarray output signals. signals[n] corresponds to precisions[n], and has shape (sample number, precisions[n].shape[0]). """ random_state = check_random_state(random_state) signals = [] n_samples = random_state.randint(min_n_samples, high=max_n_samples, size=len(precisions)) mean = np.zeros(precisions[0].shape[0]) for n, prec in zip(n_samples, precisions): signals.append(random_state.multivariate_normal(mean, np.linalg.inv(prec), (n,))) return signals def generate_group_sparse_gaussian_graphs( n_subjects=5, n_features=30, min_n_samples=30, max_n_samples=50, density=0.1, random_state=0, verbose=0): """Generate signals drawn from a sparse Gaussian graphical model. Parameters ---------- n_subjects : int, optional number of subjects n_features : int, optional number of signals per subject to generate density : float, optional density of edges in graph topology min_n_samples, max_n_samples : int, optional Each subject have a different number of samples, between these two numbers. All signals for a given subject have the same number of samples. random_state : int or numpy.random.RandomState instance, optional random number generator, or seed. verbose: int, optional verbosity level (0 means no message). Returns ------- subjects : list of numpy.ndarray, shape for each (n_samples, n_features) subjects[n] is the signals for subject n. They are provided as a numpy len(subjects) = n_subjects. n_samples varies according to the subject. precisions : list of numpy.ndarray precision matrices. topology : numpy.ndarray binary array giving the graph topology used for generating covariances and signals. """ random_state = check_random_state(random_state) # Generate topology (upper triangular binary matrix, with zeros on the # diagonal) topology = np.empty((n_features, n_features)) topology[:, :] = np.triu(( random_state.randint(0, high=int(1. / density), size=n_features * n_features) ).reshape(n_features, n_features) == 0, k=1) # Generate edges weights on topology precisions = [] mask = topology > 0 for _ in range(n_subjects): # See also sklearn.datasets.samples_generator.make_sparse_spd_matrix prec = topology.copy() prec[mask] = random_state.uniform(low=.1, high=.8, size=(mask.sum())) prec += np.eye(prec.shape[0]) prec = np.dot(prec.T, prec) # Assert precision matrix is spd np.testing.assert_almost_equal(prec, prec.T) eigenvalues = np.linalg.eigvalsh(prec) if eigenvalues.min() < 0: raise ValueError("Failed generating a positive definite precision " "matrix. Decreasing n_features can help solving " "this problem.") precisions.append(prec) # Returns the topology matrix of precision matrices. topology += np.eye(*topology.shape) topology = np.dot(topology.T, topology) topology = topology > 0 assert(np.all(topology == topology.T)) logger.log("Sparsity: {0:f}".format( 1. * topology.sum() / (topology.shape[0] ** 2)), verbose=verbose) # Generate temporal signals signals = generate_signals_from_precisions(precisions, min_n_samples=min_n_samples, max_n_samples=max_n_samples, random_state=random_state) return signals, precisions, topology def is_nose_running(): """Returns whether we are running the nose test loader """ if 'nose' not in sys.modules: return try: import nose except ImportError: return False # Now check that we have the loader in the call stask stack = inspect.stack() loader_file_name = nose.loader.__file__ if loader_file_name.endswith('.pyc'): loader_file_name = loader_file_name[:-1] for _, file_name, _, _, _, _ in stack: if file_name == loader_file_name: return True return False def skip_if_running_nose(msg=''): """ Raise a SkipTest if we appear to be running the nose test loader. Parameters ---------- msg: string, optional The message issued when SkipTest is raised """ if is_nose_running(): import nose raise nose.SkipTest(msg) # Backport: On some nose versions, assert_less_equal is not present try: from nose.tools import assert_less_equal except ImportError: def assert_less_equal(a, b): if a > b: raise AssertionError("%f is not less or equal than %f" % (a, b)) try: from nose.tools import assert_less except ImportError: def assert_less(a, b): if a >= b: raise AssertionError("%f is not less than %f" % (a, b)) PK oHFhKKnilearn/_utils/exceptions.pyAuthorizedException = ( BufferError, ArithmeticError, AssertionError, AttributeError, EnvironmentError, EOFError, LookupError, MemoryError, ReferenceError, RuntimeError, SystemError, TypeError, ValueError ) class DimensionError(TypeError): """Custom error type for dimension checking. This error is used in recursive calls in check_niimg to keep track of the dimensionality of the data. Its final goal it to generate a user friendly message. Parameters ---------- file_dimension: integer Indicates the dimensonality of the bottom-level nifti file required_dimension: integer The dimension the nifti file should have """ def __init__(self, file_dimension, required_dimension): self.file_dimension = file_dimension self.required_dimension = required_dimension self.stack_counter = 0 super(DimensionError, self).__init__() def increment_stack_counter(self): """Increments the counter of recursive calls. Called when the error is catched and re-raised to count the number of recursive calls, ie the number of dimensions added by imbrication in lists. """ self.stack_counter += 1 @property def message(self): return ("Input data has incompatible dimensionality: " "Expected dimension is {0}D and you provided a " "{1}{2}D image{3}{4}. " "See http://nilearn.github.io/manipulating_images/" "input_output.html." .format(self.required_dimension + self.stack_counter, "list of " * self.stack_counter, self.file_dimension, "s" * (self.stack_counter != 0), (" (%iD)" % (self.file_dimension + self.stack_counter)) * (self.stack_counter > 0) ) ) def __str__(self): return self.message PKoH=ynilearn/_utils/niimg.py""" Neuroimaging file input and output. """ # Author: Gael Varoquaux, Alexandre Abraham, Philippe Gervais # License: simplified BSD import copy import gc import collections import numpy as np import nibabel from .compat import _basestring def _safe_get_data(img): """ Get the data in the image without having a side effect on the Nifti1Image object """ if hasattr(img, '_data_cache') and img._data_cache is None: # By loading directly dataobj, we prevent caching if the data is # memmaped. Preventing this side-effect can save memory in some cases. img = copy.deepcopy(img) # typically the line below can double memory usage # that's why we invoke a forced call to the garbage collector gc.collect() return img.get_data() def _get_data_dtype(img): """Returns the dtype of an image. If the image is non standard (no get_data_dtype member), this function relies on the data itself. """ try: return img.get_data_dtype() except AttributeError: return img.get_data().dtype def _get_target_dtype(dtype, target_dtype): """Returns a new dtype if conversion is needed Parameters ---------- dtype: dtype Data type of the original data target_dtype: {None, dtype, "auto"} If None, no conversion is required. If a type is provided, the function will check if a conversion is needed. The "auto" mode will automatically convert to int32 if dtype is discrete and float32 if it is continuous. Returns ------- dtype: dtype The data type toward which the original data should be converted. """ if target_dtype is None: return None if target_dtype == 'auto': if dtype.kind == 'i': target_dtype = np.int32 else: target_dtype = np.float32 if target_dtype == dtype: return None return target_dtype def load_niimg(niimg, dtype=None): """Load a niimg, check if it is a nibabel SpatialImage and cast if needed Parameters: ----------- niimg: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. Image to load. dtype: {dtype, "auto"} Data type toward which the data should be converted. If "auto", the data will be converted to int32 if dtype is discrete and float32 if it is continuous. Returns: -------- img: image A loaded image object. """ from ..image import new_img_like # avoid circular imports if isinstance(niimg, _basestring): # data is a filename, we load it niimg = nibabel.load(niimg) elif not isinstance(niimg, nibabel.spatialimages.SpatialImage): raise TypeError("Data given cannot be loaded because it is" " not compatible with nibabel format:\n" + short_repr(niimg)) dtype = _get_target_dtype(_get_data_dtype(niimg), dtype) if dtype is not None: niimg = new_img_like(niimg, niimg.get_data().astype(dtype), niimg.get_affine()) return niimg def copy_img(img): """Copy an image to a nibabel.Nifti1Image. Parameters ---------- img: image nibabel SpatialImage object to copy. Returns ------- img_copy: image copy of input (data, affine and header) """ from ..image import new_img_like # avoid circular imports if not isinstance(img, nibabel.spatialimages.SpatialImage): raise ValueError("Input value is not an image") return new_img_like(img, _safe_get_data(img).copy(), img.get_affine().copy(), copy_header=True) def _repr_niimgs(niimgs): """ Pretty printing of niimg or niimgs. """ if isinstance(niimgs, _basestring): return niimgs if isinstance(niimgs, collections.Iterable): return '[%s]' % ', '.join(_repr_niimgs(niimg) for niimg in niimgs) # Nibabel objects have a 'get_filename' try: filename = niimgs.get_filename() if filename is not None: return "%s('%s')" % (niimgs.__class__.__name__, filename) else: return "%s(\nshape=%s,\naffine=%s\n)" % \ (niimgs.__class__.__name__, repr(niimgs.shape), repr(niimgs.get_affine())) except: pass return repr(niimgs) def short_repr(niimg): """Gives a shorten version on niimg representation """ this_repr = _repr_niimgs(niimg) if len(this_repr) > 20: # Shorten the repr to have a useful error message this_repr = this_repr[:18] + '...' return this_repr PK3oHF EE#nilearn/_utils/niimg_conversions.py""" Conversion utilities. """ # Author: Gael Varoquaux, Alexandre Abraham, Philippe Gervais # License: simplified BSD import warnings import os.path import glob import nilearn as ni import numpy as np import itertools from sklearn.externals.joblib import Memory from .cache_mixin import cache from .niimg import _safe_get_data, load_niimg from .compat import _basestring, izip from .exceptions import DimensionError def _check_fov(img, affine, shape): """ Return True if img's field of view correspond to given shape and affine, False elsewhere. """ img = check_niimg(img) return (img.shape[:3] == shape and np.allclose(img.get_affine(), affine)) def _check_same_fov(*args, **kwargs): """Returns True if provided images has the same field of view (shape and affine). Returns False or raise an error elsewhere, depending on the `raise_error` argument. This function can take an unlimited number of images as arguments or keyword arguments and raise a user-friendly ValueError if asked. Parameters ---------- args: images Images to be checked. Images passed without keywords will be labelled as img_#1 in the error message (replace 1 with the appropriate index). kwargs: images Images to be checked. In case of error, images will be reference by their keyword name in the error message. raise_error: boolean, optional If True, an error will be raised in case of error. """ raise_error = kwargs.pop('raise_error', False) for i, arg in enumerate(args): kwargs['img_#%i' % i] = arg errors = [] for (a_name, a_img), (b_name, b_img) in itertools.combinations( kwargs.items(), 2): if not a_img.shape[:3] == b_img.shape[:3]: errors.append((a_name, b_name, 'shape')) if not np.allclose(a_img.get_affine(), b_img.get_affine()): errors.append((a_name, b_name, 'affine')) if len(errors) > 0 and raise_error: raise ValueError('Following field of view errors were detected:\n' + '\n'.join(['- %s and %s do not have the same %s' % e for e in errors])) return (len(errors) == 0) def _index_img(img, index): from ..image import new_img_like # avoid circular imports """Helper function for check_niimg_4d.""" return new_img_like( img, img.get_data()[:, :, :, index], img.get_affine(), copy_header=True) def _resolve_globbing(path): if isinstance(path, _basestring): path_list = sorted(glob.glob(os.path.expanduser(path))) # Raise an error in case the niimgs list is empty. if len(path_list) == 0: raise ValueError("No files matching path: %s" % path) path = path_list return path def _iter_check_niimg(niimgs, ensure_ndim=None, atleast_4d=False, target_fov=None, dtype=None, memory=Memory(cachedir=None), memory_level=0, verbose=0): """Iterate over a list of niimgs and do sanity checks and resampling Parameters ---------- niimgs: list of niimg or glob pattern Image to iterate over ensure_ndim: integer, optional If specified, an error is raised if the data does not have the required dimension. atleast_4d: boolean, optional If True, any 3D image is converted to a 4D single scan. target_fov: tuple of affine and shape If specified, images are resampled to this field of view dtype: {dtype, "auto"} Data type toward which the data should be converted. If "auto", the data will be converted to int32 if dtype is discrete and float32 if it is continuous. See also -------- check_niimg, check_niimg_3d, check_niimg_4d """ # If niimgs is a string, use glob to expand it to the matching filenames. niimgs = _resolve_globbing(niimgs) ref_fov = None resample_to_first_img = False ndim_minus_one = ensure_ndim - 1 if ensure_ndim is not None else None if target_fov is not None and target_fov != "first": ref_fov = target_fov i = -1 for i, niimg in enumerate(niimgs): try: niimg = check_niimg( niimg, ensure_ndim=ndim_minus_one, atleast_4d=atleast_4d, dtype=dtype) if i == 0: ndim_minus_one = len(niimg.shape) if ref_fov is None: ref_fov = (niimg.get_affine(), niimg.shape[:3]) resample_to_first_img = True if not _check_fov(niimg, ref_fov[0], ref_fov[1]): if target_fov is not None: from nilearn import image # we avoid a circular import if resample_to_first_img: warnings.warn('Affine is different across subjects.' ' Realignement on first subject ' 'affine forced') niimg = cache(image.resample_img, memory, func_memory_level=2, memory_level=memory_level)( niimg, target_affine=ref_fov[0], target_shape=ref_fov[1]) else: raise ValueError( "Field of view of image #%d is different from " "reference FOV.\n" "Reference affine:\n%r\nImage affine:\n%r\n" "Reference shape:\n%r\nImage shape:\n%r\n" % (i, ref_fov[0], niimg.get_affine(), ref_fov[1], niimg.shape)) yield niimg except DimensionError as exc: # Keep track of the additional dimension in the error exc.increment_stack_counter() raise except TypeError as exc: img_name = '' if isinstance(niimg, _basestring): img_name = " (%s) " % niimg exc.args = (('Error encountered while loading image #%d%s' % (i, img_name),) + exc.args) raise # Raising an error if input generator is empty. if i == -1: raise ValueError("Input niimgs list is empty.") def check_niimg(niimg, ensure_ndim=None, atleast_4d=False, dtype=None, return_iterator=False, wildcards=True): """Check that niimg is a proper 3D/4D niimg. Turn filenames into objects. Parameters ---------- niimg: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. The '~' symbol is expanded to the user home folder. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. ensure_ndim: integer {3, 4}, optional Indicate the dimensionality of the expected niimg. An error is raised if the niimg is of another dimensionality. atleast_4d: boolean, optional Indicates if a 3d image should be turned into a single-scan 4d niimg. dtype: {dtype, "auto"} Data type toward which the data should be converted. If "auto", the data will be converted to int32 if dtype is discrete and float32 if it is continuous. return_iterator: boolean, optional Returns an iterator on the content of the niimg file input wildcards: boolean, optional Use niimg as a regular expression to get a list of matching input filenames. If multiple files match, the returned list is sorted using an ascending order. If no file matches the regular expression, a ValueError exception is raised. Returns ------- result: 3D/4D Niimg-like object Result can be nibabel.Nifti1Image or the input, as-is. It is guaranteed that the returned object has get_data() and get_affine() methods. Notes ----- In nilearn, special care has been taken to make image manipulation easy. This method is a kind of pre-requisite for any data processing method in nilearn because it checks if data have a correct format and loads them if necessary. Its application is idempotent. See also -------- _iter_check_niimg, check_niimg_3d, check_niimg_4d """ from ..image import new_img_like # avoid circular imports if isinstance(niimg, _basestring): if wildcards and ni.EXPAND_PATH_WILDCARDS: # Ascending sorting + expand user path filenames = sorted(glob.glob(os.path.expanduser(niimg))) # processing filenames matching globbing expression if len(filenames) >= 1 and glob.has_magic(niimg): niimg = filenames # iterable case # niimg is an existing filename elif [niimg] == filenames: niimg = filenames[0] # No files found by glob elif glob.has_magic(niimg): # No files matching the glob expression, warn the user message = ("No files matching the entered niimg expression: " "'%s'.\n You may have left wildcards usage " "activated: please set the global constant " "'nilearn.EXPAND_PATH_WILDCARDS' to False to " "deactivate this behavior.") % niimg raise ValueError(message) else: raise ValueError("File not found: '%s'" % niimg) elif not os.path.exists(niimg): raise ValueError("File not found: '%s'" % niimg) # in case of an iterable if hasattr(niimg, "__iter__") and not isinstance(niimg, _basestring): if return_iterator: return _iter_check_niimg(niimg, ensure_ndim=ensure_ndim, dtype=dtype) return concat_niimgs(niimg, ensure_ndim=ensure_ndim, dtype=dtype) # Otherwise, it should be a filename or a SpatialImage, we load it niimg = load_niimg(niimg, dtype=dtype) if ensure_ndim == 3 and len(niimg.shape) == 4 and niimg.shape[3] == 1: # "squeeze" the image. data = _safe_get_data(niimg) affine = niimg.get_affine() niimg = new_img_like(niimg, data[:, :, :, 0], affine) if atleast_4d and len(niimg.shape) == 3: data = niimg.get_data().view() data.shape = data.shape + (1, ) niimg = new_img_like(niimg, data, niimg.get_affine()) if ensure_ndim is not None and len(niimg.shape) != ensure_ndim: raise DimensionError(len(niimg.shape), ensure_ndim) if return_iterator: return (_index_img(niimg, i) for i in range(niimg.shape[3])) return niimg def check_niimg_3d(niimg, dtype=None): """Check that niimg is a proper 3D niimg-like object and load it. Parameters ---------- niimg: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. dtype: {dtype, "auto"} Data type toward which the data should be converted. If "auto", the data will be converted to int32 if dtype is discrete and float32 if it is continuous. Returns ------- result: 3D Niimg-like object Result can be nibabel.Nifti1Image or the input, as-is. It is guaranteed that the returned object has get_data() and get_affine() methods. Notes ----- In nilearn, special care has been taken to make image manipulation easy. This method is a kind of pre-requisite for any data processing method in nilearn because it checks if data have a correct format and loads them if necessary. Its application is idempotent. """ return check_niimg(niimg, ensure_ndim=3, dtype=dtype) def check_niimg_4d(niimg, return_iterator=False, dtype=None): """Check that niimg is a proper 4D niimg-like object and load it. Parameters ---------- niimg: 4D Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. If niimgs is an iterable, checks if data is really 4D. Then, considering that it is a list of niimg and load them one by one. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data and get_affine methods are present, raise an Exception otherwise. dtype: {dtype, "auto"} Data type toward which the data should be converted. If "auto", the data will be converted to int32 if dtype is discrete and float32 if it is continuous. return_iterator: boolean If True, an iterator of 3D images is returned. This reduces the memory usage when `niimgs` contains 3D images. If False, a single 4D image is returned. When `niimgs` contains 3D images they are concatenated together. Returns ------- niimg: 4D nibabel.Nifti1Image or iterator of 3D nibabel.Nifti1Image Notes ----- This function is the equivalent to check_niimg_3d() for Niimg-like objects with a session level. Its application is idempotent. """ return check_niimg(niimg, ensure_ndim=4, return_iterator=return_iterator, dtype=dtype) def concat_niimgs(niimgs, dtype=np.float32, ensure_ndim=None, memory=Memory(cachedir=None), memory_level=0, auto_resample=False, verbose=0): """Concatenate a list of 3D/4D niimgs of varying lengths. The niimgs list can contain niftis/paths to images of varying dimensions (i.e., 3D or 4D) as well as different 3D shapes and affines, as they will be matched to the first image in the list if auto_resample=True. Parameters ---------- niimgs: iterable of Niimg-like objects or glob pattern See http://nilearn.github.io/manipulating_images/input_output.html. Niimgs to concatenate. dtype: numpy dtype, optional the dtype of the returned image ensure_ndim: integer, optional Indicate the dimensionality of the expected niimg. An error is raised if the niimg is of another dimensionality. auto_resample: boolean Converts all images to the space of the first one. verbose: int Controls the amount of verbosity (0 means no messages). memory : instance of joblib.Memory or string Used to cache the resampling process. By default, no caching is done. If a string is given, it is the path to the caching directory. memory_level : integer, optional Rough estimator of the amount of memory used by caching. Higher value means more memory for caching. Returns ------- concatenated: nibabel.Nifti1Image A single image. See Also -------- nilearn.image.index_img """ from ..image import new_img_like # avoid circular imports target_fov = 'first' if auto_resample else None # We remove one to the dimensionality because of the list is one dimension. ndim = None if ensure_ndim is not None: ndim = ensure_ndim - 1 # If niimgs is a string, use glob to expand it to the matching filenames. niimgs = _resolve_globbing(niimgs) # First niimg is extracted to get information and for new_img_like first_niimg = None iterator, literator = itertools.tee(iter(niimgs)) try: first_niimg = check_niimg(next(literator), ensure_ndim=ndim) except StopIteration: raise TypeError('Cannot concatenate empty objects') except DimensionError as exc: # Keep track of the additional dimension in the error exc.increment_stack_counter() raise # If no particular dimensionality is asked, we force consistency wrt the # first image if ndim is None: ndim = len(first_niimg.shape) if ndim not in [3, 4]: raise TypeError('Concatenated images must be 3D or 4D. You gave a ' 'list of %dD images' % ndim) lengths = [first_niimg.shape[-1] if ndim == 4 else 1] for niimg in literator: # We check the dimensionality of the niimg try: niimg = check_niimg(niimg, ensure_ndim=ndim) except DimensionError as exc: # Keep track of the additional dimension in the error exc.increment_stack_counter() raise lengths.append(niimg.shape[-1] if ndim == 4 else 1) target_shape = first_niimg.shape[:3] data = np.ndarray(target_shape + (sum(lengths), ), order="F", dtype=dtype) cur_4d_index = 0 for index, (size, niimg) in enumerate(izip(lengths, _iter_check_niimg( iterator, atleast_4d=True, target_fov=target_fov, memory=memory, memory_level=memory_level))): if verbose > 0: if isinstance(niimg, _basestring): nii_str = "image " + niimg else: nii_str = "image #" + str(index) print("Concatenating {0}: {1}".format(index + 1, nii_str)) data[..., cur_4d_index:cur_4d_index + size] = niimg.get_data() cur_4d_index += size return new_img_like(first_niimg, data, first_niimg.get_affine()) PKH M:::5nilearn/_utils/fixes/sklearn_f_regression_nosparse.pyimport numpy as np from scipy import stats from sklearn.utils import check_arrays from sklearn.utils.extmath import norm # f_regression with correct degrees of freedom when center=False # available is sklearn version >= 0.15 # This version does not support sparse matrices and is used to have tests # passing for versions of sklearn < 0.12. def f_regression_nosparse(X, y, center=True): """Univariate linear regression tests Quick linear model for testing the effect of a single regressor, sequentially for many regressors. This is done in 3 steps: 1. the regressor of interest and the data are orthogonalized with respect to constant regressors 2. the cross correlation between data and regressors is computed 3. it is converted to an F score then to a p-value Parameters ---------- X : {array-like, sparse matrix} shape = (n_samples, n_features) The set of regressors that will tested sequentially. y : array of shape(n_samples). The data matrix center : True, bool, If true, X and y will be centered. Returns ------- F : array, shape=(n_features,) F values of features. pval : array, shape=(n_features,) p-values of F-scores. """ X, y = check_arrays(X, y, dtype=np.float) y = y.ravel() if center: y = y - np.mean(y) X = X.copy('F') # faster in fortran X -= X.mean(axis=0) # compute the correlation corr = np.dot(y, X) # XXX could use corr /= row_norms(X.T) here, but the test doesn't pass corr /= np.asarray(np.sqrt((X ** 2).sum(axis=0))).ravel() corr /= norm(y) # convert to p-value degrees_of_freedom = y.size - (2 if center else 1) F = corr ** 2 / (1 - corr ** 2) * degrees_of_freedom pv = stats.f.sf(F, 1, degrees_of_freedom) return F, pv PKH3aDD,nilearn/_utils/fixes/matplotlib_backports.py"""Backports for matplotlib compatibility across versions""" def cbar_outline_get_xy(cbar_outline): """In the matplotlib versions >= 1.4.0, ColorbarBase.outline is a Polygon(Patch) object instead of a Line2D(Line) object. This entails different getters and setters. Change specifically after commit 48f594c2e2b05839ea394040b06196f39d9fbfba, entitled "changed colorbar outline from a Line2D object to a Polygon object" from August 28th, 2013. This function unifies getters and setters of ColorbarBase outline xy coordinates.""" if hasattr(cbar_outline, "get_xy"): # loose version >= 1.4.x return cbar_outline.get_xy() else: return cbar_outline.get_xydata() def cbar_outline_set_xy(cbar_outline, xy): """Setter for ColorbarBase.outline xy coordinates. See cbar_outline_get_xy for more information. """ if hasattr(cbar_outline, "set_xy"): # loose version >= 1.4.x return cbar_outline.set_xy(xy) else: cbar_outline.set_xdata(xy[:, 0]) cbar_outline.set_ydata(xy[:, 1]) PKHъ= nilearn/_utils/fixes/__init__.pyfrom distutils.version import LooseVersion import sklearn if (LooseVersion(sklearn.__version__) < LooseVersion('0.15') or sklearn.__version__ == '0.15-git'): from .sklearn_f_regression_nosparse import ( f_regression_nosparse as f_regression) else: from sklearn.feature_selection import f_regression # atleast2d_or_csr try: from sklearn.utils import atleast2d_or_csr except ImportError: # Changed in 0.15 from sklearn.utils import check_array as atleast2d_or_csr # roc_auc_score try: from sklearn.metrics import roc_auc_score except ImportError: from sklearn.metrics import auc as roc_auc_score __all__ = ['f_regression', 'atleast2d_or_csr', 'roc_auc_score'] PKHFKK1nilearn/mass_univariate/permuted_least_squares.py""" Massively Univariate Linear Model estimated with OLS and permutation test. """ # Author: Benoit Da Mota, , sept. 2011 # Virgile Fritsch, , jan. 2014 import warnings import numpy as np from scipy import linalg from sklearn.utils import check_random_state import sklearn.externals.joblib as joblib def normalize_matrix_on_axis(m, axis=0): """ Normalize a 2D matrix on an axis. Parameters ---------- m : numpy 2D array, The matrix to normalize. axis : integer in {0, 1}, optional A valid axis to normalize across. Returns ------- ret : numpy array, shape = m.shape The normalized matrix Examples -------- >>> import numpy as np >>> from nilearn.mass_univariate.permuted_least_squares import ( ... normalize_matrix_on_axis) >>> X = np.array([[0, 4], [1, 0]]) >>> normalize_matrix_on_axis(X) array([[ 0., 1.], [ 1., 0.]]) >>> normalize_matrix_on_axis(X, axis=1) array([[ 0., 1.], [ 1., 0.]]) """ if m.ndim > 2: raise ValueError('This function only accepts 2D arrays. ' 'An array of shape %r was passed.' % m.shape) if axis == 0: # array transposition preserves the contiguity flag of that array ret = (m.T / np.sqrt(np.sum(m ** 2, axis=0))[:, np.newaxis]).T elif axis == 1: ret = normalize_matrix_on_axis(m.T).T else: raise ValueError('axis(=%d) out of bounds' % axis) return ret def orthonormalize_matrix(m, tol=1.e-12): """ Orthonormalize a matrix. Uses a Singular Value Decomposition. If the input matrix is rank-deficient, then its shape is cropped. Parameters ---------- m : numpy array, The matrix to orthonormalize. Returns ------- ret : numpy array, shape = m.shape The orthonormalized matrix. Examples -------- >>> import numpy as np >>> from nilearn.mass_univariate.permuted_least_squares import ( ... orthonormalize_matrix) >>> X = np.array([[1, 2], [0, 1], [1, 1]]) >>> orthonormalize_matrix(X) array([[-0.81049889, -0.0987837 ], [-0.31970025, -0.75130448], [-0.49079864, 0.65252078]]) >>> X = np.array([[0, 1], [4, 0]]) >>> orthonormalize_matrix(X) array([[ 0., -1.], [-1., 0.]]) """ U, s, _ = linalg.svd(m, full_matrices=False) n_eig = np.count_nonzero(s > tol) return np.ascontiguousarray(U[:, :n_eig]) def _t_score_with_covars_and_normalized_design(tested_vars, target_vars, covars_orthonormalized=None): """t-score in the regression of tested variates against target variates Covariates are taken into account (if not None). The normalized_design case corresponds to the following assumptions: - tested_vars and target_vars are normalized - covars_orthonormalized are orthonormalized - tested_vars and covars_orthonormalized are orthogonal (np.dot(tested_vars.T, covars) == 0) Parameters ---------- tested_vars : array-like, shape=(n_samples, n_tested_vars) Explanatory variates. target_vars : array-like, shape=(n_samples, n_target_vars) Targets variates. F-ordered is better for efficient computation. covars_orthonormalized : array-like, shape=(n_samples, n_covars) or None Confounding variates. Returns ------- score : numpy.ndarray, shape=(n_target_vars, n_tested_vars) t-scores associated with the tests of each explanatory variate against each target variate (in the presence of covars). """ if covars_orthonormalized is None: lost_dof = 0 else: lost_dof = covars_orthonormalized.shape[1] # Tested variates are fitted independently, # so lost_dof is unrelated to n_tested_vars. dof = target_vars.shape[0] - lost_dof beta_targetvars_testedvars = np.dot(target_vars.T, tested_vars) if covars_orthonormalized is None: rss = (1 - beta_targetvars_testedvars ** 2) else: beta_targetvars_covars = np.dot(target_vars.T, covars_orthonormalized) a2 = np.sum(beta_targetvars_covars ** 2, 1) rss = (1 - a2[:, np.newaxis] - beta_targetvars_testedvars ** 2) return beta_targetvars_testedvars * np.sqrt((dof - 1.) / rss) def _permuted_ols_on_chunk(scores_original_data, tested_vars, target_vars, confounding_vars=None, n_perm_chunk=10000, intercept_test=True, two_sided_test=True, random_state=None): """Massively univariate group analysis with permuted OLS on a data chunk. To be used in a parallel computing context. Parameters ---------- scores_original_data : array-like, shape=(n_descriptors, n_regressors) t-scores obtained for the original (non-permuted) data. tested_vars : array-like, shape=(n_samples, n_regressors) Explanatory variates. target_vars : array-like, shape=(n_samples, n_targets) fMRI data. F-ordered for efficient computations. confounding_vars : array-like, shape=(n_samples, n_covars) Clinical data (covariates). n_perm_chunk : int, Number of permutations to be performed. intercept_test : boolean, Change the permutation scheme (swap signs for intercept, switch labels otherwise). See [1] two_sided_test : boolean, If True, performs an unsigned t-test. Both positive and negative effects are considered; the null hypothesis is that the effect is zero. If False, only positive effects are considered as relevant. The null hypothesis is that the effect is zero or negative. random_state : int or None, Seed for random number generator, to have the same permutations in each computing units. Returns ------- h0_fmax_part : array-like, shape=(n_perm_chunk, ) Distribution of the (max) t-statistic under the null hypothesis (limited to this permutation chunk). References ---------- [1] Fisher, R. A. (1935). The design of experiments. """ # initialize the seed of the random generator rng = check_random_state(random_state) n_samples, n_regressors = tested_vars.shape n_descriptors = target_vars.shape[1] # run the permutations h0_fmax_part = np.empty((n_perm_chunk, n_regressors)) scores_as_ranks_part = np.zeros((n_regressors, n_descriptors)) for i in range(n_perm_chunk): if intercept_test: # sign swap (random multiplication by 1 or -1) target_vars = (target_vars * (rng.randint(2, size=(n_samples, 1)) * 2 - 1)) else: # shuffle data # Regarding computation costs, we choose to shuffle testvars # and covars rather than fmri_signal. # Also, it is important to shuffle tested_vars and covars # jointly to simplify t-scores computation (null dot product). shuffle_idx = rng.permutation(n_samples) tested_vars = tested_vars[shuffle_idx] if confounding_vars is not None: confounding_vars = confounding_vars[shuffle_idx] # OLS regression on randomized data perm_scores = np.asfortranarray( _t_score_with_covars_and_normalized_design(tested_vars, target_vars, confounding_vars)) if two_sided_test: perm_scores = np.fabs(perm_scores) h0_fmax_part[i] = np.amax(perm_scores, 0) # find the rank of the original scores in h0_part # (when n_descriptors or n_perm are large, it can be quite long to # find the rank of the original scores into the whole H0 distribution. # Here, it is performed in parallel by the workers involded in the # permutation computation) scores_as_ranks_part += (h0_fmax_part[i].reshape((-1, 1)) < scores_original_data.T) return scores_as_ranks_part, h0_fmax_part.T def permuted_ols(tested_vars, target_vars, confounding_vars=None, model_intercept=True, n_perm=10000, two_sided_test=True, random_state=None, n_jobs=1, verbose=0): """Massively univariate group analysis with permuted OLS. Tested variates are independently fitted to target variates descriptors (e.g. brain imaging signal) according to a linear model solved with an Ordinary Least Squares criterion. Confounding variates may be included in the model. Permutation testing is used to assess the significance of the relationship between the tested variates and the target variates [1, 2]. A max-type procedure is used to obtain family-wise corrected p-values. The specific permutation scheme implemented here is the one of Freedman & Lane [3]. Its has been demonstrated in [1] that this scheme conveys more sensitivity than alternative schemes. This holds for neuroimaging applications, as discussed in details in [2]. Permutations are performed on parallel computing units. Each of them performs a fraction of permutations on the whole dataset. Thus, the max t-score amongst data descriptors can be computed directly, which avoids storing all the computed t-scores. The variates should be given C-contiguous. target_vars are fortran-ordered automatically to speed-up computations. Parameters ---------- tested_vars : array-like, shape=(n_samples, n_regressors) Explanatory variates, fitted and tested independently from each others. target_vars : array-like, shape=(n_samples, n_descriptors) fMRI data, trying to be explained by explanatory and confounding variates. confounding_vars : array-like, shape=(n_samples, n_covars) Confounding variates (covariates), fitted but not tested. If None, no confounding variate is added to the model (except maybe a constant column according to the value of `model_intercept`) model_intercept : bool, If True, a constant column is added to the confounding variates unless the tested variate is already the intercept. n_perm : int, Number of permutations to perform. Permutations are costly but the more are performed, the more precision one gets in the p-values estimation. two_sided_test : boolean, If True, performs an unsigned t-test. Both positive and negative effects are considered; the null hypothesis is that the effect is zero. If False, only positive effects are considered as relevant. The null hypothesis is that the effect is zero or negative. random_state : int or None, Seed for random number generator, to have the same permutations in each computing units. n_jobs : int, Number of parallel workers. If 0 is provided, all CPUs are used. A negative number indicates that all the CPUs except (abs(n_jobs) - 1) ones will be used. verbose: int, optional verbosity level (0 means no message). Returns ------- pvals : array-like, shape=(n_regressors, n_descriptors) Negative log10 p-values associated with the significance test of the n_regressors explanatory variates against the n_descriptors target variates. Family-wise corrected p-values. score_orig_data : numpy.ndarray, shape=(n_regressors, n_descriptors) t-statistic associated with the significance test of the n_regressors explanatory variates against the n_descriptors target variates. The ranks of the scores into the h0 distribution correspond to the p-values. h0_fmax : array-like, shape=(n_perm, ) Distribution of the (max) t-statistic under the null hypothesis (obtained from the permutations). Array is sorted. References ---------- [1] Anderson, M. J. & Robinson, J. (2001). Permutation tests for linear models. Australian & New Zealand Journal of Statistics, 43(1), 75-88. [2] Winkler, A. M. et al. (2014). Permutation inference for the general linear model. Neuroimage. [3] Freedman, D. & Lane, D. (1983). A nonstochastic interpretation of reported significance levels. J. Bus. Econ. Stats., 1(4), 292-298 """ # initialize the seed of the random generator rng = check_random_state(random_state) # check n_jobs (number of CPUs) if n_jobs == 0: # invalid according to joblib's conventions raise ValueError("'n_jobs == 0' is not a valid choice. " "Please provide a positive number of CPUs, or -1 " "for all CPUs, or a negative number (-i) for " "'all but (i-1)' CPUs (joblib conventions).") elif n_jobs < 0: n_jobs = max(1, joblib.cpu_count() - int(n_jobs) + 1) else: n_jobs = min(n_jobs, joblib.cpu_count()) # make target_vars F-ordered to speed-up computation if target_vars.ndim != 2: raise ValueError("'target_vars' should be a 2D array. " "An array with %d dimension%s was passed" % (target_vars.ndim, "s" if target_vars.ndim > 1 else "")) target_vars = np.asfortranarray(target_vars) # efficient for chunking n_descriptors = target_vars.shape[1] # check explanatory variates dimensions if tested_vars.ndim == 1: tested_vars = np.atleast_2d(tested_vars).T n_samples, n_regressors = tested_vars.shape # check if explanatory variates is intercept (constant) or not if (n_regressors == 1 and np.unique(tested_vars).size == 1): intercept_test = True else: intercept_test = False # optionally add intercept if model_intercept and not intercept_test: if confounding_vars is not None: confounding_vars = np.hstack( (confounding_vars, np.ones((n_samples, 1)))) else: confounding_vars = np.ones((n_samples, 1)) ### OLS regression on original data if confounding_vars is not None: # step 1: extract effect of covars from target vars covars_orthonormalized = orthonormalize_matrix(confounding_vars) if not covars_orthonormalized.flags['C_CONTIGUOUS']: # useful to developer warnings.warn('Confounding variates not C_CONTIGUOUS.') covars_orthonormalized = np.ascontiguousarray( covars_orthonormalized) targetvars_normalized = normalize_matrix_on_axis( target_vars).T # faster with F-ordered target_vars_chunk if not targetvars_normalized.flags['C_CONTIGUOUS']: # useful to developer warnings.warn('Target variates not C_CONTIGUOUS.') targetvars_normalized = np.ascontiguousarray(targetvars_normalized) beta_targetvars_covars = np.dot(targetvars_normalized, covars_orthonormalized) targetvars_resid_covars = targetvars_normalized - np.dot( beta_targetvars_covars, covars_orthonormalized.T) targetvars_resid_covars = normalize_matrix_on_axis( targetvars_resid_covars, axis=1) # step 2: extract effect of covars from tested vars testedvars_normalized = normalize_matrix_on_axis(tested_vars.T, axis=1) beta_testedvars_covars = np.dot(testedvars_normalized, covars_orthonormalized) testedvars_resid_covars = testedvars_normalized - np.dot( beta_testedvars_covars, covars_orthonormalized.T) testedvars_resid_covars = normalize_matrix_on_axis( testedvars_resid_covars, axis=1).T.copy() else: targetvars_resid_covars = normalize_matrix_on_axis(target_vars).T testedvars_resid_covars = normalize_matrix_on_axis(tested_vars).copy() covars_orthonormalized = None # check arrays contiguousity (for the sake of code efficiency) if not targetvars_resid_covars.flags['C_CONTIGUOUS']: # useful to developer warnings.warn('Target variates not C_CONTIGUOUS.') targetvars_resid_covars = np.ascontiguousarray(targetvars_resid_covars) if not testedvars_resid_covars.flags['C_CONTIGUOUS']: # useful to developer warnings.warn('Tested variates not C_CONTIGUOUS.') testedvars_resid_covars = np.ascontiguousarray(testedvars_resid_covars) # step 3: original regression (= regression on residuals + adjust t-score) # compute t score for original data scores_original_data = _t_score_with_covars_and_normalized_design( testedvars_resid_covars, targetvars_resid_covars.T, covars_orthonormalized) if two_sided_test: sign_scores_original_data = np.sign(scores_original_data) scores_original_data = np.fabs(scores_original_data) ### Permutations # parallel computing units perform a reduced number of permutations each if n_perm > n_jobs: n_perm_chunks = np.asarray([n_perm / n_jobs] * n_jobs, dtype=int) n_perm_chunks[-1] += n_perm % n_jobs elif n_perm > 0: warnings.warn('The specified number of permutations is %d and ' 'the number of jobs to be performed in parallel has ' 'set to %s. This is incompatible so only %d jobs will ' 'be running. You may want to perform more permutations ' 'in order to take the most of the available computing ' 'ressources.' % (n_perm, n_jobs, n_perm)) n_perm_chunks = np.ones(n_perm, dtype=int) else: # 0 or negative number of permutations => original data scores only if two_sided_test: scores_original_data = (scores_original_data * sign_scores_original_data) return np.asarray([]), scores_original_data, np.asarray([]) # actual permutations, seeded from a random integer between 0 and maximum # value represented by np.int32 (to have a large entropy). ret = joblib.Parallel(n_jobs=n_jobs, verbose=verbose)( joblib.delayed(_permuted_ols_on_chunk)( scores_original_data, testedvars_resid_covars, targetvars_resid_covars.T, covars_orthonormalized, n_perm_chunk=n_perm_chunk, intercept_test=intercept_test, two_sided_test=two_sided_test, random_state=rng.random_integers(np.iinfo(np.int32).max - 1)) for n_perm_chunk in n_perm_chunks) # reduce results scores_as_ranks_parts, h0_fmax_parts = zip(*ret) h0_fmax = np.hstack((h0_fmax_parts)) scores_as_ranks = np.zeros((n_regressors, n_descriptors)) for scores_as_ranks_part in scores_as_ranks_parts: scores_as_ranks += scores_as_ranks_part # convert ranks into p-values pvals = (n_perm + 1 - scores_as_ranks) / float(1 + n_perm) # put back sign on scores if it was removed in the case of a two-sided test # (useful to distinguish between positive and negative effects) if two_sided_test: scores_original_data = scores_original_data * sign_scores_original_data return - np.log10(pvals), scores_original_data.T, h0_fmax[0] PKHL8#nilearn/mass_univariate/__init__.py""" Defines a Massively Univariate Linear Model estimated with OLS and permutation test """ from .permuted_least_squares import permuted_ols __all__ = ['permuted_ols'] PKH)nilearn/mass_univariate/tests/__init__.pyPKH(]YY<nilearn/mass_univariate/tests/test_permuted_least_squares.py""" Tests for the permuted_ols function. """ # Author: Virgile Fritsch, , Feb. 2014 import numpy as np from scipy import stats from sklearn.utils import check_random_state from numpy.testing import (assert_almost_equal, assert_array_almost_equal, assert_array_less, assert_equal) from nilearn.mass_univariate import permuted_ols from nilearn.mass_univariate.permuted_least_squares import ( _t_score_with_covars_and_normalized_design, orthonormalize_matrix) def get_tvalue_with_alternative_library(tested_vars, target_vars, covars=None): """Utility function to compute tvalues with linalg or statsmodels Massively univariate linear model (= each target is considered independently). Parameters ---------- tested_vars: array-like, shape=(n_samples, n_regressors) Tested variates, the associated coefficient of which are to be tested independently with a t-test, resulting in as many t-values. target_vars: array-like, shape=(n_samples, n_targets) Target variates, to be approximated with a linear combination of the tested variates and the confounding variates. covars: array-like, shape=(n_samples, n_confounds) Confounding variates, to be fitted but not to be tested Returns ------- t-values: np.ndarray, shape=(n_regressors, n_targets) """ ### set up design n_samples, n_regressors = tested_vars.shape n_targets = target_vars.shape[1] if covars is not None: n_covars = covars.shape[1] design_matrix = np.hstack((tested_vars, covars)) else: n_covars = 0 design_matrix = tested_vars mask_covars = np.ones(n_regressors + n_covars, dtype=bool) mask_covars[:n_regressors] = False test_matrix = np.array([[1.] + [0.] * n_covars]) ### t-values computation try: # try with statsmodels if available (more concise) from statsmodels.regression.linear_model import OLS t_values = np.empty((n_targets, n_regressors)) for i in range(n_targets): current_target = target_vars[:, i].reshape((-1, 1)) for j in range(n_regressors): current_tested_mask = mask_covars.copy() current_tested_mask[j] = True current_design_matrix = design_matrix[:, current_tested_mask] ols_fit = OLS(current_target, current_design_matrix).fit() t_values[i, j] = np.ravel(ols_fit.t_test(test_matrix).tvalue) except: # use linalg if statsmodels is not available from numpy import linalg lost_dof = n_covars + 1 # fit all tested variates independently t_values = np.empty((n_targets, n_regressors)) for i in range(n_regressors): current_tested_mask = mask_covars.copy() current_tested_mask[i] = True current_design_matrix = design_matrix[:, current_tested_mask] invcov = linalg.pinv(current_design_matrix) normalized_cov = np.dot(invcov, invcov.T) t_val_denom_aux = np.diag( np.dot(test_matrix, np.dot(normalized_cov, test_matrix.T))) t_val_denom_aux = t_val_denom_aux.reshape((-1, 1)) for j in range(n_targets): current_target = target_vars[:, j].reshape((-1, 1)) res_lstsq = linalg.lstsq(current_design_matrix, current_target) residuals = (current_target - np.dot(current_design_matrix, res_lstsq[0])) t_val_num = np.dot(test_matrix, res_lstsq[0]) t_val_denom = np.sqrt( np.sum(residuals ** 2, 0) / float(n_samples - lost_dof) * t_val_denom_aux) t_values[j, i] = np.ravel(t_val_num / t_val_denom) return t_values ### Tests t-scores computation ################################################ def test_t_score_with_covars_and_normalized_design_nocovar(random_state=0): rng = check_random_state(random_state) ### Normalized data n_samples = 50 # generate data var1 = np.ones((n_samples, 1)) / np.sqrt(n_samples) var2 = rng.randn(n_samples, 1) var2 = var2 / np.sqrt(np.sum(var2 ** 2, 0)) # normalize # compute t-scores with nilearn routine t_val_own = _t_score_with_covars_and_normalized_design(var1, var2) # compute t-scores with linalg or statsmodels t_val_alt = get_tvalue_with_alternative_library(var1, var2) assert_array_almost_equal(t_val_own, t_val_alt) def test_t_score_with_covars_and_normalized_design_withcovar(random_state=0): """ """ rng = check_random_state(random_state) ### Normalized data n_samples = 50 # generate data var1 = np.ones((n_samples, 1)) / np.sqrt(n_samples) # normalized var2 = rng.randn(n_samples, 1) var2 = var2 / np.sqrt(np.sum(var2 ** 2, 0)) # normalize covars = np.eye(n_samples, 3) # covars is orthogonal covars[3] = -1 # covars is orthogonal to var1 covars = orthonormalize_matrix(covars) # nilearn t-score own_score = _t_score_with_covars_and_normalized_design(var1, var2, covars) # compute t-scores with linalg or statmodels ref_score = get_tvalue_with_alternative_library(var1, var2, covars) assert_array_almost_equal(own_score, ref_score) ### General tests for permuted_ols function ################################### def test_permuted_ols_check_h0_noeffect_labelswap(random_state=0): rng = check_random_state(random_state) # design parameters n_samples = 100 # create dummy design with no effect target_var = rng.randn(n_samples, 1) tested_var = np.arange(n_samples, dtype='f8').reshape((-1, 1)) tested_var_not_centered = tested_var.copy() tested_var -= tested_var.mean(0) # centered # permuted OLS # We check that h0 is close to the theoretical distribution, which is # known for this simple design (= t(n_samples - dof)). perm_ranges = [10, 100, 1000] # test various number of permutations # we use two models (with and without intercept modelling) all_kstest_pvals = [] all_kstest_pvals_intercept = [] all_kstest_pvals_intercept2 = [] # we compute the Mean Squared Error between cumulative Density Function # as a proof of consistency of the permutation algorithm all_mse = [] all_mse_intercept = [] all_mse_intercept2 = [] for i, n_perm in enumerate(np.repeat(perm_ranges, 10)): ### Case no. 1: no intercept in the model pval, orig_scores, h0 = permuted_ols( tested_var, target_var, model_intercept=False, n_perm=n_perm, two_sided_test=False, random_state=i) assert_equal(h0.size, n_perm) # Kolmogorov-Smirnov test kstest_pval = stats.kstest(h0, stats.t(n_samples - 1).cdf)[1] all_kstest_pvals.append(kstest_pval) mse = np.mean( (stats.t(n_samples - 1).cdf(np.sort(h0)) - np.linspace(0, 1, h0.size + 1)[1:]) ** 2) all_mse.append(mse) ### Case no. 2: intercept in the model pval, orig_scores, h0 = permuted_ols( tested_var, target_var, model_intercept=True, n_perm=n_perm, two_sided_test=False, random_state=i) assert_array_less(pval, 1.) # pval should not be significant # Kolmogorov-Smirnov test kstest_pval = stats.kstest(h0, stats.t(n_samples - 2).cdf)[1] all_kstest_pvals_intercept.append(kstest_pval) mse = np.mean( (stats.t(n_samples - 2).cdf(np.sort(h0)) - np.linspace(0, 1, h0.size + 1)[1:]) ** 2) all_mse_intercept.append(mse) ### Case no. 3: intercept in the model, no centering of tested vars pval, orig_scores, h0 = permuted_ols( tested_var_not_centered, target_var, model_intercept=True, n_perm=n_perm, two_sided_test=False, random_state=i) assert_array_less(pval, 1.) # pval should not be significant # Kolmogorov-Smirnov test kstest_pval = stats.kstest(h0, stats.t(n_samples - 2).cdf)[1] all_kstest_pvals_intercept2.append(kstest_pval) mse = np.mean( (stats.t(n_samples - 2).cdf(np.sort(h0)) - np.linspace(0, 1, h0.size + 1)[1:]) ** 2) all_mse_intercept2.append(mse) all_kstest_pvals = np.array(all_kstest_pvals).reshape( (len(perm_ranges), -1)) all_kstest_pvals_intercept = np.array(all_kstest_pvals_intercept).reshape( (len(perm_ranges), -1)) all_mse = np.array(all_mse).reshape((len(perm_ranges), -1)) all_mse_intercept = np.array(all_mse_intercept).reshape( (len(perm_ranges), -1)) all_mse_intercept2 = np.array(all_mse_intercept2).reshape( (len(perm_ranges), -1)) # check that a difference between distributions is not rejected by KS test assert_array_less(0.01, all_kstest_pvals) assert_array_less(0.01, all_kstest_pvals_intercept) assert_array_less(0.01, all_kstest_pvals_intercept2) # consistency of the algorithm: the more permutations, the less the MSE assert_array_less(np.diff(all_mse.mean(1)), 0) assert_array_less(np.diff(all_mse_intercept.mean(1)), 0) assert_array_less(np.diff(all_mse_intercept2.mean(1)), 0) def test_permuted_ols_check_h0_noeffect_signswap(random_state=0): rng = check_random_state(random_state) # design parameters n_samples = 100 # create dummy design with no effect target_var = rng.randn(n_samples, 1) tested_var = np.ones((n_samples, 1)) # permuted OLS # We check that h0 is close to the theoretical distribution, which is # known for this simple design (= t(n_samples - dof)). perm_ranges = [10, 100, 1000] # test various number of permutations all_kstest_pvals = [] # we compute the Mean Squared Error between cumulative Density Function # as a proof of consistency of the permutation algorithm all_mse = [] for i, n_perm in enumerate(np.repeat(perm_ranges, 10)): pval, orig_scores, h0 = permuted_ols( tested_var, target_var, model_intercept=False, n_perm=n_perm, two_sided_test=False, random_state=i) assert_equal(h0.size, n_perm) # Kolmogorov-Smirnov test kstest_pval = stats.kstest(h0, stats.t(n_samples).cdf)[1] all_kstest_pvals.append(kstest_pval) mse = np.mean( (stats.t(n_samples).cdf(np.sort(h0)) - np.linspace(0, 1, h0.size + 1)[1:]) ** 2) all_mse.append(mse) all_kstest_pvals = np.array(all_kstest_pvals).reshape( (len(perm_ranges), -1)) all_mse = np.array(all_mse).reshape((len(perm_ranges), -1)) # check that a difference between distributions is not rejected by KS test assert_array_less(0.01 / (len(perm_ranges) * 10.), all_kstest_pvals) # consistency of the algorithm: the more permutations, the less the MSE assert_array_less(np.diff(all_mse.mean(1)), 0) ### Tests for labels swapping permutation scheme ############################## def test_permuted_ols_nocovar(random_state=0): rng = check_random_state(random_state) # design parameters n_samples = 50 # create design target_var = rng.randn(n_samples, 1) tested_var = rng.randn(n_samples, 1) # compute t-scores with linalg or statsmodels ref_score = get_tvalue_with_alternative_library(tested_var, target_var) # permuted OLS _, own_score, _ = permuted_ols( tested_var, target_var, model_intercept=False, n_perm=0, random_state=random_state) assert_array_almost_equal(ref_score, own_score, decimal=6) # test with ravelized tested_var _, own_score, _ = permuted_ols( np.ravel(tested_var), target_var, model_intercept=False, n_perm=0, random_state=random_state) assert_array_almost_equal(ref_score, own_score, decimal=6) ### Adds intercept (should be equivalent to centering variates) # permuted OLS _, own_score_intercept, _ = permuted_ols( tested_var, target_var, model_intercept=True, n_perm=0, random_state=random_state) target_var -= target_var.mean(0) tested_var -= tested_var.mean(0) # compute t-scores with linalg or statsmodels ref_score_intercept = get_tvalue_with_alternative_library( tested_var, target_var, np.ones((n_samples, 1))) assert_array_almost_equal(ref_score_intercept, own_score_intercept, decimal=6) def test_permuted_ols_withcovar(random_state=0): """ """ rng = check_random_state(random_state) # design parameters n_samples = 50 # create design target_var = rng.randn(n_samples, 1) tested_var = rng.randn(n_samples, 1) confounding_vars = rng.randn(n_samples, 2) # compute t-scores with linalg or statsmodels ref_score = get_tvalue_with_alternative_library(tested_var, target_var, confounding_vars) # permuted OLS _, own_score, _ = permuted_ols( tested_var, target_var, confounding_vars, model_intercept=False, n_perm=0, random_state=random_state) assert_array_almost_equal(ref_score, own_score, decimal=6) ### Adds intercept # permuted OLS _, own_scores_intercept, _ = permuted_ols( tested_var, target_var, confounding_vars, model_intercept=True, n_perm=0, random_state=random_state) # compute t-scores with linalg or statsmodels confounding_vars = np.hstack((confounding_vars, np.ones((n_samples, 1)))) alt_score_intercept = get_tvalue_with_alternative_library( tested_var, target_var, confounding_vars) assert_array_almost_equal(alt_score_intercept, own_scores_intercept, decimal=6) def test_permuted_ols_nocovar_multivariate(random_state=0): """Test permuted_ols with multiple tested variates and no covariate. It is equivalent to fitting several models with only one tested variate. """ rng = check_random_state(random_state) # design parameters n_samples = 50 n_targets = 10 n_regressors = 2 # create design target_vars = rng.randn(n_samples, n_targets) tested_var = rng.randn(n_samples, n_regressors) # compute t-scores with linalg or statsmodels ref_scores = get_tvalue_with_alternative_library(tested_var, target_vars) # permuted OLS _, own_scores, _ = permuted_ols( tested_var, target_vars, model_intercept=False, n_perm=0, random_state=random_state) assert_array_almost_equal(ref_scores, own_scores, decimal=6) ### Adds intercept (should be equivalent to centering variates) # permuted OLS _, own_scores_intercept, _ = permuted_ols( tested_var, target_vars, model_intercept=True, n_perm=0, random_state=random_state) target_vars -= target_vars.mean(0) tested_var -= tested_var.mean(0) # compute t-scores with linalg or statsmodels ref_scores_intercept = get_tvalue_with_alternative_library( tested_var, target_vars, np.ones((n_samples, 1))) assert_array_almost_equal(ref_scores_intercept, own_scores_intercept, decimal=6) def test_permuted_ols_withcovar_multivariate(random_state=0): """Test permuted_ols with multiple tested variates and covariates. It is equivalent to fitting several models with only one tested variate. """ rng = check_random_state(random_state) # design parameters n_samples = 50 n_targets = 10 n_covars = 2 # create design target_vars = rng.randn(n_samples, n_targets) tested_var = rng.randn(n_samples, 1) confounding_vars = rng.randn(n_samples, n_covars) # compute t-scores with linalg or statmodels ref_scores = get_tvalue_with_alternative_library(tested_var, target_vars, confounding_vars) # permuted OLS _, own_scores, _ = permuted_ols( tested_var, target_vars, confounding_vars, model_intercept=False, n_perm=0, random_state=random_state) assert_almost_equal(ref_scores, own_scores, decimal=6) ### Adds intercept # permuted OLS _, own_scores_intercept, _ = permuted_ols( tested_var, target_vars, confounding_vars, model_intercept=True, n_perm=0, random_state=random_state) # compute t-scores with linalg or statmodels confounding_vars = np.hstack((confounding_vars, np.ones((n_samples, 1)))) ref_scores_intercept = get_tvalue_with_alternative_library( tested_var, target_vars, confounding_vars) assert_array_almost_equal(ref_scores_intercept, own_scores_intercept, decimal=6) ### Tests for sign swapping permutation scheme ############################## def test_permuted_ols_intercept_nocovar(random_state=0): rng = check_random_state(random_state) # design parameters n_samples = 50 # create design target_var = rng.randn(n_samples, 1) tested_var = np.ones((n_samples, 1)) # compute t-scores with linalg or statmodels t_val_ref = get_tvalue_with_alternative_library(tested_var, target_var) # permuted OLS neg_log_pvals, orig_scores, _ = permuted_ols( tested_var, target_var, confounding_vars=None, n_perm=10, random_state=random_state) assert_array_less(neg_log_pvals, 1.) # ensure sign swap is correctly done # same thing but with model_intercept=True to check it has no effect _, orig_scores_addintercept, _ = permuted_ols( tested_var, target_var, confounding_vars=None, model_intercept=True, n_perm=0, random_state=random_state) assert_array_almost_equal(t_val_ref, orig_scores, decimal=6) assert_array_almost_equal(orig_scores, orig_scores_addintercept, decimal=6) def test_permuted_ols_intercept_statsmodels_withcovar(random_state=0): rng = check_random_state(random_state) # design parameters n_samples = 50 # create design target_var = rng.randn(n_samples, 1) tested_var = np.ones((n_samples, 1)) confounding_vars = rng.randn(n_samples, 2) # compute t-scores with linalg or statmodels ref_scores = get_tvalue_with_alternative_library(tested_var, target_var, confounding_vars) # permuted OLS _, own_scores, _ = permuted_ols( tested_var, target_var, confounding_vars, n_perm=0, random_state=random_state) # same thing but with model_intercept=True to check it has no effect _, own_scores_intercept, _ = permuted_ols( tested_var, target_var, confounding_vars, model_intercept=True, n_perm=0, random_state=random_state) assert_array_almost_equal(ref_scores, own_scores, decimal=6) assert_array_almost_equal(ref_scores, own_scores_intercept, decimal=6) def test_permuted_ols_intercept_nocovar_multivariate(random_state=0): rng = check_random_state(random_state) # design parameters n_samples = 50 n_targets = 10 # create design target_vars = rng.randn(n_samples, n_targets) tested_vars = np.ones((n_samples, 1)) # compute t-scores with nilearn routine ref_scores = get_tvalue_with_alternative_library(tested_vars, target_vars) # permuted OLS _, own_scores, _ = permuted_ols( tested_vars, target_vars, confounding_vars=None, n_perm=0, random_state=random_state) # same thing but with model_intercept=True to check it has no effect _, own_scores_intercept, _ = permuted_ols( tested_vars, target_vars, confounding_vars=None, model_intercept=True, n_perm=0, random_state=random_state) assert_array_almost_equal(ref_scores, own_scores, decimal=6) assert_array_almost_equal(own_scores, own_scores_intercept, decimal=6) def test_permuted_ols_intercept_withcovar_multivariate(random_state=0): rng = check_random_state(random_state) # design parameters n_samples = 50 n_targets = 10 n_covars = 2 # create design target_vars = rng.randn(n_samples, n_targets) tested_var = np.ones((n_samples, 1)) confounding_vars = rng.randn(n_samples, n_covars) # compute t-scores with linalg or statsmodels ref_scores = get_tvalue_with_alternative_library(tested_var, target_vars, confounding_vars) # permuted OLS _, own_scores, _ = permuted_ols( tested_var, target_vars, confounding_vars, n_perm=0, random_state=random_state) # same thing but with model_intercept=True to check it has no effect _, own_scores_intercept, _ = permuted_ols( tested_var, target_vars, confounding_vars, model_intercept=True, n_perm=0, random_state=random_state) assert_almost_equal(ref_scores, own_scores, decimal=6) assert_array_almost_equal(own_scores, own_scores_intercept, decimal=6) ### Test one-sided versus two-sided ########################################### def test_sided_test(random_state=0): """Check that a positive effect is always better recovered with one-sided. """ rng = check_random_state(random_state) # design parameters n_samples = 50 # create design target_var = rng.randn(n_samples, 100) tested_var = rng.randn(n_samples, 1) # permuted OLS # one-sided neg_log_pvals_onesided, _, _ = permuted_ols( tested_var, target_var, model_intercept=False, two_sided_test=False, n_perm=100, random_state=random_state) # two-sided neg_log_pvals_twosided, _, _ = permuted_ols( tested_var, target_var, model_intercept=False, two_sided_test=True, n_perm=100, random_state=random_state) positive_effect_location = neg_log_pvals_onesided > 1 assert_equal( np.sum(neg_log_pvals_twosided[positive_effect_location] - neg_log_pvals_onesided[positive_effect_location] > 0), 0) def test_sided_test2(random_state=0): """Check that two-sided can actually recover positive and negative effects. """ # create design target_var1 = np.arange(0, 10).reshape((-1, 1)) # positive effect target_var = np.hstack((target_var1, - target_var1)) tested_var = np.arange(0, 20, 2) # permuted OLS # one-sided neg_log_pvals_onesided, _, _ = permuted_ols( tested_var, target_var, model_intercept=False, two_sided_test=False, n_perm=100, random_state=random_state) # one-sided (other side) neg_log_pvals_onesided2, _, _ = permuted_ols( tested_var, -target_var, model_intercept=False, two_sided_test=False, n_perm=100, random_state=random_state) # two-sdided neg_log_pvals_twosided, _, _ = permuted_ols( tested_var, target_var, model_intercept=False, two_sided_test=True, n_perm=100, random_state=random_state) assert_array_almost_equal(neg_log_pvals_onesided[0], neg_log_pvals_onesided2[0][::-1]) assert_array_almost_equal(neg_log_pvals_onesided + neg_log_pvals_onesided2, neg_log_pvals_twosided) PKHnilearn/tests/__init__.pyPKH鸚c  #nilearn/tests/test_class_inspect.py""" Test the class_inspect module This test file is in nilearn/tests because nosetests seems to ignore modules whose name starts with an underscore """ from nose.tools import assert_equal from sklearn.base import BaseEstimator from nilearn._utils import class_inspect ############################################################################## # Helpers for the tests class A(BaseEstimator): def __init__(self, a=1): self.a = a class B(A): def __init__(self, a=1, b=2): self.a = a self.b = b def get_scope_name(self, stack=0, *args, **kwargs): c = C() return c.get_scope_name(stack=stack, *args, **kwargs) class C: def get_scope_name(self, *args, **kwargs): return get_scope_name(*args, **kwargs) def get_scope_name(stack=0, *args, **kwargs): if stack == 0: return class_inspect.enclosing_scope_name(*args, **kwargs) return get_scope_name(stack - 1, *args, **kwargs) ############################################################################## # The tests themselves def test_get_params(): b = B() params_a_in_b = class_inspect.get_params(A, b) assert_equal(params_a_in_b, dict(a=1)) params_a_in_b = class_inspect.get_params(A, b, ignore=['a']) assert_equal(params_a_in_b, {}) def test_enclosing_scope_name(): b = B() name = b.get_scope_name() assert_equal(name, 'B.get_scope_name') name = b.get_scope_name(stack=3) assert_equal(name, 'B.get_scope_name') name = b.get_scope_name(ensure_estimator=False) assert_equal(name, 'C.get_scope_name') name = b.get_scope_name(stack=3, ensure_estimator=False) assert_equal(name, 'get_scope_name') name = b.get_scope_name(ensure_estimator=False, stack_level=120) assert_equal(name, 'Unknown') PKHu72 2 nilearn/tests/test_logger.py""" Test the logger module This test file is in nilearn/tests because nosetests ignores modules whose name starts with an underscore. """ import contextlib from nose.tools import assert_equal from sklearn.base import BaseEstimator from nilearn._utils.logger import log @contextlib.contextmanager def capture_output(): import sys from nilearn._utils.compat import StringIO oldout, olderr = sys.stdout, sys.stderr try: out = [StringIO(), StringIO()] sys.stdout, sys.stderr = out yield out finally: sys.stdout, sys.stderr = oldout, olderr out[0] = out[0].getvalue() out[1] = out[1].getvalue() # Helper functions and classes def run(): log("function run()") def other_run(): # Test too large values for stack_level # stack_level should exceed nosetests stack levels as well log("function other_run()", stack_level=100) class Run3(object): def run3(self): log("method Test3") run() class Run2(BaseEstimator): def run2(self): log("method Test2") t = Run() t.run() class Run(BaseEstimator): def run(self): log("method Test") run() def test_log(): # Stack containing one non-matching object with capture_output() as out: t = Run3() t.run3() assert_equal(out[0], "[Run3.run3] method Test3\n[run] function run()\n") # Stack containing two matching objects with capture_output() as out: t = Run2() t.run2() assert_equal(out[0], "[Run2.run2] method Test2\n" "[Run2.run2] method Test\n" "[Run2.run2] function run()\n") # Stack containing one matching object with capture_output() as out: t = Run() t.run() assert_equal(out[0], "[Run.run] method Test\n[Run.run] function run()\n") # Stack containing no object with capture_output() as out: run() assert_equal(out[0], "[run] function run()\n") # Test stack_level too large with capture_output() as out: other_run() assert_equal(out[0], "[] function other_run()\n") # Will be executed by nosetests upon importing with capture_output() as out: log("message from no function") assert_equal(out[0], "[] message from no function\n") PKH#/["["'nilearn/tests/test_numpy_conversions.py""" Test the numpy_conversions module This test file is in nilearn/tests because nosetests seems to ignore modules whose name starts with an underscore """ import numpy as np import os import tempfile from nose.tools import assert_true, assert_raises from nilearn._utils.numpy_conversions import as_ndarray, csv_to_array def are_arrays_identical(arr1, arr2): """Check if two 1-dimensional array point to the same buffer. The check is performed only on the first value of the arrays. For this test to be reliable, arr2 must not point to a subset of arr1. For example, if arr2 = arr1[1:] has been executed just before calling this function, the test will FAIL, even if the same buffer is used by both arrays. arr2 = arr1[:1] will succeed though. dtypes are not supposed to be identical. """ # Modify the first value in arr1 twice, and see if corresponding # value in arr2 has changed. Changing the value twice is required, since # the original value could be the first value that we use. orig1 = arr1[0] orig2 = arr2[0] arr1[0] = 0 if arr2[0] != orig2: arr1[0] = orig1 return True arr1[0] = 1 if arr2[0] != orig2: arr1[0] = orig1 return True arr1[0] = orig1 return False def test_are_array_identical(): arr1 = np.ones(4) orig1 = arr1.copy() arr2 = arr1 orig2 = arr2.copy() assert(are_arrays_identical(arr1, arr2)) np.testing.assert_array_almost_equal(orig1, arr1, decimal=10) np.testing.assert_array_almost_equal(orig2, arr2, decimal=10) arr2 = arr1[:1] orig2 = arr2.copy() assert(are_arrays_identical(arr1, arr2)) np.testing.assert_array_almost_equal(orig1, arr1, decimal=10) np.testing.assert_array_almost_equal(orig2, arr2, decimal=10) arr2 = arr1[1:] orig2 = arr2.copy() assert(not are_arrays_identical(arr1, arr2)) np.testing.assert_array_almost_equal(orig1, arr1, decimal=10) np.testing.assert_array_almost_equal(orig2, arr2, decimal=10) arr2 = arr1.copy() orig2 = arr2.copy() assert(not are_arrays_identical(arr1, arr2)) np.testing.assert_array_almost_equal(orig1, arr1, decimal=10) np.testing.assert_array_almost_equal(orig2, arr2, decimal=10) def test_as_ndarray(): # All test cases # input dtype, input order, should copy, output dtype, output order, copied test_cases = [ # no-op (np.float, "C", False, None, None, False), (np.float, "F", False, None, None, False), # simple copy (np.float, "C", True, None, None, True), (np.float, "F", True, None, None, True), # dtype provided, identical (np.float, "C", False, np.float, None, False), (np.float, "F", False, np.float, None, False), # dtype changed (np.float, "C", False, np.float32, None, True), (np.float, "F", False, np.float32, None, True), # dtype and order provided, but identical (np.float, "C", False, np.float, "C", False), (np.float, "F", False, np.float, "F", False), # order provided, unchanged (np.float, "C", False, None, "C", False), (np.float, "F", False, None, "F", False), (np.float, "C", True, None, "C", True), (np.float, "F", True, None, "F", True), # order provided, changed (np.float, "C", False, None, "F", True), (np.float, "F", False, None, "C", True), (np.float, "C", True, None, "F", True), (np.float, "F", True, None, "C", True), # Special case for int8 <-> bool conversion. (np.int8, "C", False, np.bool, None, False), (np.int8, "F", False, np.bool, None, False), (np.int8, "C", False, np.bool, "C", False), (np.int8, "F", False, np.bool, "F", False), (np.int8, "C", False, np.bool, "F", True), (np.int8, "F", False, np.bool, "C", True), (np.int8, "C", True, np.bool, None, True), (np.int8, "F", True, np.bool, None, True), (np.int8, "C", True, np.bool, "C", True), (np.int8, "F", True, np.bool, "F", True), (np.bool, "C", False, np.int8, None, False), (np.bool, "F", False, np.int8, None, False), (np.bool, "C", False, np.int8, "C", False), (np.bool, "F", False, np.int8, "F", False), (np.bool, "C", False, np.int8, "F", True), (np.bool, "F", False, np.int8, "C", True), (np.bool, "C", True, np.int8, None, True), (np.bool, "F", True, np.int8, None, True), (np.bool, "C", True, np.int8, "C", True), (np.bool, "F", True, np.int8, "F", True), ] shape = (10, 11) for case in test_cases: in_dtype, in_order, copy, out_dtype, out_order, copied = case arr1 = np.ones(shape, dtype=in_dtype, order=in_order) arr2 = as_ndarray(arr1, copy=copy, dtype=out_dtype, order=out_order) assert_true(not are_arrays_identical(arr1[0], arr2[0]) == copied, msg=str(case)) if out_dtype is None: assert_true(arr2.dtype == in_dtype, msg=str(case)) else: assert_true(arr2.dtype == out_dtype, msg=str(case)) result_order = out_order if out_order is not None else in_order if result_order == "F": assert_true(arr2.flags["F_CONTIGUOUS"], msg=str(case)) else: assert_true(arr2.flags["C_CONTIGUOUS"], msg=str(case)) # memmap filename = os.path.join(os.path.dirname(__file__), "data", "mmap.dat") # same dtype, no copy requested arr1 = np.memmap(filename, dtype='float32', mode='w+', shape=(5,)) arr2 = as_ndarray(arr1) assert(not are_arrays_identical(arr1, arr2)) # same dtype, copy requested arr1 = np.memmap(filename, dtype='float32', mode='readwrite', shape=(5,)) arr2 = as_ndarray(arr1, copy=True) assert(not are_arrays_identical(arr1, arr2)) # different dtype arr1 = np.memmap(filename, dtype='float32', mode='readwrite', shape=(5,)) arr2 = as_ndarray(arr1, dtype=np.int) assert(arr2.dtype == np.int) assert(not are_arrays_identical(arr1, arr2)) # same dtype, explicitly provided: must copy arr1 = np.memmap(filename, dtype='float32', mode='readwrite', shape=(5,)) arr2 = as_ndarray(arr1, dtype=np.float32) assert(arr2.dtype == np.float32) assert(not are_arrays_identical(arr1, arr2)) # same dtype, order provided arr1 = np.memmap(filename, dtype='float32', mode='readwrite', shape=(10, 10)) arr2 = as_ndarray(arr1, order="F") assert(arr2.flags["F_CONTIGUOUS"] and not arr2.flags["C_CONTIGUOUS"]) assert(arr2.dtype == arr1.dtype) assert(not are_arrays_identical(arr1[0], arr2[0])) # same dtype, order unchanged but provided arr1 = np.memmap(filename, dtype='float32', mode='readwrite', shape=(10, 10), order="F") arr2 = as_ndarray(arr1, order="F") assert(arr2.flags["F_CONTIGUOUS"] and not arr2.flags["C_CONTIGUOUS"]) assert(arr2.dtype == arr1.dtype) assert(not are_arrays_identical(arr1[0], arr2[0])) # dtype and order specified arr1 = np.memmap(filename, dtype='float32', mode='readwrite', shape=(10, 10), order="F") arr2 = as_ndarray(arr1, order="F", dtype=np.int32) assert(arr2.flags["F_CONTIGUOUS"] and not arr2.flags["C_CONTIGUOUS"]) assert(arr2.dtype == np.int32) assert(not are_arrays_identical(arr1[0], arr2[0])) # list # same dtype, no copy requested arr1 = [0, 1, 2, 3] arr2 = as_ndarray(arr1) assert(not are_arrays_identical(arr1, arr2)) # same dtype, copy requested arr1 = [0, 1, 2, 3] arr2 = as_ndarray(arr1, copy=True) assert(not are_arrays_identical(arr1, arr2)) # different dtype arr1 = [0, 1, 2, 3] arr2 = as_ndarray(arr1, dtype=np.float) assert(arr2.dtype == np.float) assert(not are_arrays_identical(arr1, arr2)) # order specified arr1 = [[0, 1, 2, 3], [0, 1, 2, 3]] arr2 = as_ndarray(arr1, dtype=np.float, order="F") assert(arr2.dtype == np.float) assert(arr2.flags["F_CONTIGUOUS"] and not arr2.flags["C_CONTIGUOUS"]) assert(not are_arrays_identical(arr1[0], arr2[0])) # Unhandled cases assert_raises(ValueError, as_ndarray, "test string") assert_raises(ValueError, as_ndarray, [], order="invalid") def test_csv_to_array(): # Create a phony CSV file filename = tempfile.mktemp(suffix='.csv') try: with open(filename, mode='wt') as fp: fp.write('1.,2.,3.,4.,5.\n') assert_true(np.allclose(csv_to_array(filename), np.asarray([1., 2., 3., 4., 5.]))) assert_raises(TypeError, csv_to_array, filename, delimiters='?!') finally: os.remove(filename) PKHnilearn/tests/test_extmath.py""" Test the _utils.extmath module """ import nose import numpy as np from sklearn.utils import check_random_state from nilearn._utils.extmath import fast_abs_percentile, is_spd def test_fast_abs_percentile(): rng = check_random_state(1) data = np.arange(100) rng.shuffle(data) for p in data: yield nose.tools.assert_equal, fast_abs_percentile(data, p), p def test_is_spd_with_non_symmetrical_matrix(): matrix = np.arange(4).reshape(4, 1) assert not is_spd(matrix, verbose=0) matrix = np.array([[1, 1e-3 + 9e-19], [1e-3, 1]]) assert is_spd(matrix, verbose=0) matrix = np.array([[1, 1e-3 + 1e-18], [1e-3, 1]]) assert not is_spd(matrix, verbose=0) matrix = np.array([[1, 1e-3 + 9e-8], [1e-3, 1]]) assert is_spd(matrix, decimal=4, verbose=0) matrix = np.array([[1, 1e-3 + 1e-7], [1e-3, 1]]) assert not is_spd(matrix, decimal=4, verbose=0) def test_is_spd_with_symmetrical_matrix(): # matrix with negative eigenvalue matrix = np.array([[0, 1], [1, 0]]) assert not is_spd(matrix, verbose=0) # matrix with 0 eigenvalue matrix = np.arange(4).reshape(2, 2) assert not is_spd(matrix, verbose=0) # spd matrix matrix = np.array([[2, 1], [1, 1]]) assert is_spd(matrix, verbose=0) def test_fast_abs_percentile_no_index_error(): # check the offending low-level function fast_abs_percentile(np.arange(4)) PKHo}Ynilearn/tests/test_ndimage.py""" Test the ndimage module This test file is in nilearn/tests because nosetests ignores modules whose name starts with an underscore """ from nose.tools import assert_raises import numpy as np from nilearn._utils.ndimage import largest_connected_component, _peak_local_max def test_largest_cc(): """ Check the extraction of the largest connected component. """ a = np.zeros((6, 6, 6)) assert_raises(ValueError, largest_connected_component, a) a[1:3, 1:3, 1:3] = 1 np.testing.assert_equal(a, largest_connected_component(a)) b = a.copy() b[5, 5, 5] = 1 np.testing.assert_equal(a, largest_connected_component(b)) def test_empty_peak_local_max(): image = np.zeros((10, 20)) result = _peak_local_max(image, min_distance=1, threshold_rel=0) assert np.all(~ result) def test_flat_peak_local_max(): image = np.zeros((5, 5)) image[1:3, 1:3] = 10 peaks = _peak_local_max(image, min_distance=1) np.testing.assert_equal(len(peaks[peaks == 1]), 4) def test_relative_and_absolute_thresholds_in_peak_local_max(): image = np.zeros((5, 5)) image[1, 1] = 10 image[3, 3] = 20 peaks_rel = _peak_local_max(image, min_distance=1, threshold_rel=0.5) np.testing.assert_equal(len(peaks_rel[peaks_rel == 1]), 1) peaks_abs = _peak_local_max(image, min_distance=1, threshold_abs=10) np.testing.assert_equal(len(peaks_abs[peaks_abs == 1]), 1) def test_constant_image_in_peak_local_max(): image = 128 * np.ones((20, 20)) peaks = _peak_local_max(image, min_distance=1) np.testing.assert_equal(len(peaks[peaks == 1]), 0) def test_trivial_cases_in_peak_local_max(): trivial = np.zeros((25, 25)) peaks = _peak_local_max(trivial, min_distance=1) assert (peaks.astype(np.bool) == trivial).all() PKHLnilearn/tests/test_niimg.pyimport os import numpy as np from nose.tools import assert_equal from nibabel import Nifti1Image from sklearn.externals import joblib from nilearn.image import new_img_like from nilearn._utils import niimg from nilearn._utils.testing import assert_raises_regex currdir = os.path.dirname(os.path.abspath(__file__)) def test_copy_img(): assert_raises_regex(ValueError, "Input value is not an image", niimg.copy_img, 3) def test_copy_img_side_effect(): img1 = Nifti1Image(np.ones((2, 2, 2, 2)), affine=np.eye(4)) hash1 = joblib.hash(img1) niimg.copy_img(img1) hash2 = joblib.hash(img1) assert_equal(hash1, hash2) def test_new_img_like_side_effect(): img1 = Nifti1Image(np.ones((2, 2, 2, 2)), affine=np.eye(4)) hash1 = joblib.hash(img1) new_img_like(img1, np.ones((2, 2, 2, 2)), img1.get_affine().copy(), copy_header=True) hash2 = joblib.hash(img1) assert_equal(hash1, hash2) PKH#eT^^&nilearn/tests/test_param_validation.py""" Test the _utils.param_validation module """ import numpy as np from nose.tools import assert_true, assert_equal from nilearn._utils.testing import assert_raises_regex, assert_warns from nilearn._utils.extmath import fast_abs_percentile from nilearn._utils.param_validation import check_threshold def test_check_threshold(): matrix = np.array([[1., 2.], [2., 1.]]) name = 'threshold' # few not correctly formatted strings for 'threshold' wrong_thresholds = ['0.1', '10', '10.2.3%', 'asdf%'] for wrong_threshold in wrong_thresholds: assert_raises_regex(ValueError, '{0}.+should be a number followed by ' 'the percent sign'.format(name), check_threshold, wrong_threshold, matrix, 'fast_abs_percentile', name) threshold = object() assert_raises_regex(TypeError, '{0}.+should be either a number ' 'or a string'.format(name), check_threshold, threshold, matrix, 'fast_abs_percentile', name) # Test threshold as int, threshold=2 should return as it is # since it is not string assert_equal(check_threshold(2, matrix, percentile_func=fast_abs_percentile), 2) # check whether raises a warning if given threshold is higher than expected assert_warns(UserWarning, check_threshold, 3., matrix, percentile_func=fast_abs_percentile) # test with numpy scalar as argument threshold = 2. threshold_numpy_scalar = np.float64(threshold) assert_equal( check_threshold(threshold, matrix, percentile_func=fast_abs_percentile), check_threshold(threshold_numpy_scalar, matrix, percentile_func=fast_abs_percentile)) # Test for threshold provided as a percentile of the data (str ending with a # %) assert_true(1. < check_threshold("50%", matrix, percentile_func=fast_abs_percentile, name=name) <= 2.) PKHBO"nilearn/tests/test_segmentation.py""" Testing functions for random walker segmentation from scikit-image 0.11.3. Thanks to scikit image. """ import numpy as np from nilearn._utils.segmentation import _random_walker def test_modes_in_random_walker(): img = np.zeros((30, 30, 30)) + 0.1 * np.random.randn(30, 30, 30) img[9:21, 9:21, 9:21] = 1 img[10:20, 10:20, 10:20] = 0 labels = np.zeros_like(img) labels[6, 6, 6] = 1 labels[14, 15, 16] = 2 # default mode = cg random_walker_cg = _random_walker(img, labels, beta=90) assert (random_walker_cg.reshape(img.shape)[6, 6, 6] == 1).all() assert img.shape == random_walker_cg.shape # test `mask` strategy of sub function _mask_edges_weights in laplacian labels[5:25, 26:29, 26:29] = -1 random_walker_inactive = _random_walker(img, labels, beta=30) def test_trivial_cases(): # When all voxels are labeled img = np.ones((10, 10, 10)) labels = np.ones((10, 10, 10)) # It returns same labels which are provided pass_through = _random_walker(img, labels) np.testing.assert_array_equal(pass_through, labels) def test_bad_inputs(): # Too few dimensions img = np.ones(10) labels = np.arange(10) np.testing.assert_raises(ValueError, _random_walker, img, labels) # Too many dimensions np.random.seed(42) img = np.random.normal(size=(3, 3, 3, 3, 3)) labels = np.arange(3 ** 5).reshape(img.shape) np.testing.assert_raises(ValueError, _random_walker, img, labels) # Spacing incorrect length img = np.random.normal(size=(10, 10)) labels = np.zeros((10, 10)) labels[2, 4] = 2 labels[6, 8] = 5 np.testing.assert_raises(ValueError, _random_walker, img, labels, spacing=(1,)) def test_reorder_labels(): # When labels have non-consecutive integers, we make them consecutive # by reordering them to make no gaps/differences between integers. We expect # labels to be of same shape even if they are reordered. # Issue #938, comment #14. data = np.zeros((5, 5)) + 0.1 * np.random.randn(5, 5) data[1:5, 1:5] = 1 labels = np.zeros_like(data) labels[3, 3] = 1 labels[1, 4] = 4 # giving integer which is non-consecutive labels = _random_walker(data, labels) assert data.shape == labels.shape PKHJx x nilearn/tests/test_testing.pyimport itertools import numpy as np from nose.tools import assert_equal, assert_raises from nilearn._utils.testing import generate_fake_fmri, with_memory_profiler from nilearn._utils.testing import assert_memory_less_than, assert_raises_regex def create_object(size): """Just create and return an object containing `size` bytes.""" mem_use = b'a' * size return mem_use @with_memory_profiler def test_memory_usage(): # Valid measures for mem in (500, 200, 100): assert_memory_less_than(mem, 0.1, create_object, mem * 1024 ** 2) # Ensure an exception is raised with too small objects as # memory_profiler can return non trustable memory measure in this case. assert_raises_regex(ValueError, "Memory profiler measured an untrustable memory", assert_memory_less_than, 50, 0.1, create_object, 25 * 1024 ** 2) # Ensure ValueError is raised if memory used is above expected memory # limit. assert_raises_regex(ValueError, "Memory consumption measured", assert_memory_less_than, 50, 0.1, create_object, 100 * 1024 ** 2) def test_generate_fake_fmri(): shapes = [(6, 6, 7), (10, 11, 12)] lengths = [16, 20] kinds = ['noise', 'step'] n_blocks = [None, 1, 4] block_size = [None, 4] block_type = ['classification', 'regression'] rand_gen = np.random.RandomState(3) for shape, length, kind, n_block, bsize, btype in itertools.product( shapes, lengths, kinds, n_blocks, block_size, block_type): if n_block is None: fmri, mask = generate_fake_fmri( shape=shape, length=length, kind=kind, n_blocks=n_block, block_size=bsize, block_type=btype, rand_gen=rand_gen) else: fmri, mask, target = generate_fake_fmri( shape=shape, length=length, kind=kind, n_blocks=n_block, block_size=bsize, block_type=btype, rand_gen=rand_gen) assert_equal(fmri.shape[:-1], shape) assert_equal(fmri.shape[-1], length) if n_block is not None: assert_equal(target.size, length) assert_raises(ValueError, generate_fake_fmri, length=10, n_blocks=10, block_size=None, rand_gen=rand_gen) PKH#U!nilearn/tests/test_cache_mixin.py""" Test the _utils.cache_mixin module """ import os import shutil import tempfile import json import glob from nose.tools import assert_false, assert_true, assert_equal from sklearn.externals.joblib import Memory import nilearn from nilearn._utils import cache_mixin, CacheMixin from nilearn._utils.testing import assert_raises_regex def f(x): # A simple test function return x def test__safe_cache_dir_creation(): # Test the _safe_cache function that is supposed to flush the # cache if the nibabel version changes try: temp_dir = tempfile.mkdtemp() mem = Memory(cachedir=temp_dir) version_file = os.path.join(temp_dir, 'joblib', 'module_versions.json') assert_false(os.path.exists(version_file)) # First test that a version file get created cache_mixin._safe_cache(mem, f) assert_true(os.path.exists(version_file)) # Test that it does not get recreated during the same session os.unlink(version_file) cache_mixin._safe_cache(mem, f) assert_false(os.path.exists(version_file)) finally: if os.path.exists(temp_dir): shutil.rmtree(temp_dir) def test__safe_cache_flush(): # Test the _safe_cache function that is supposed to flush the # cache if the nibabel version changes try: temp_dir = tempfile.mkdtemp() mem = Memory(cachedir=temp_dir) version_file = os.path.join(temp_dir, 'joblib', 'module_versions.json') # Create an mock version_file with old module versions with open(version_file, 'w') as f: json.dump({"nibabel": [0, 0]}, f) # Create some store structure nibabel_dir = os.path.join(temp_dir, 'joblib', 'nibabel_') os.makedirs(nibabel_dir) # First turn off version checking nilearn.CHECK_CACHE_VERSION = False cache_mixin._safe_cache(mem, f) assert_true(os.path.exists(nibabel_dir)) # Second turn on version checking nilearn.CHECK_CACHE_VERSION = True # Make sure that the check will run again cache_mixin.__CACHE_CHECKED = {} with open(version_file, 'w') as f: json.dump({"nibabel": [0, 0]}, f) cache_mixin._safe_cache(mem, f) assert_true(os.path.exists(version_file)) assert_false(os.path.exists(nibabel_dir)) finally: pass # if os.path.exists(temp_dir): # shutil.rmtree(temp_dir) def test_cache_memory_level(): temp_dir = tempfile.mkdtemp() job_glob = os.path.join(temp_dir, 'joblib', 'nilearn', 'tests', 'test_cache_mixin', 'f', '*') mem = Memory(cachedir=temp_dir, verbose=0) cache_mixin.cache(f, mem, func_memory_level=2, memory_level=1)(2) assert_equal(len(glob.glob(job_glob)), 0) cache_mixin.cache(f, Memory(cachedir=None))(2) assert_equal(len(glob.glob(job_glob)), 0) cache_mixin.cache(f, mem, func_memory_level=2, memory_level=3)(2) assert_equal(len(glob.glob(job_glob)), 2) cache_mixin.cache(f, mem)(3) assert_equal(len(glob.glob(job_glob)), 3) class CacheMixinTest(CacheMixin): """Dummy mock object that wraps a CacheMixin.""" def __init__(self, memory=None, memory_level=1): self.memory = memory self.memory_level = memory_level def run(self): self._cache(f) def test_cache_mixin_with_expand_user(): # Test the memory cache is correctly created when using ~. cache_dir = "~/nilearn_data/test_cache" expand_cache_dir = os.path.expanduser(cache_dir) mixin_mock = CacheMixinTest(cache_dir) try: assert_false(os.path.exists(expand_cache_dir)) mixin_mock.run() assert_true(os.path.exists(expand_cache_dir)) finally: if os.path.exists(expand_cache_dir): shutil.rmtree(expand_cache_dir) def test_cache_mixin_without_expand_user(): # Test the memory cache is correctly created when using ~. cache_dir = "~/nilearn_data/test_cache" expand_cache_dir = os.path.expanduser(cache_dir) mixin_mock = CacheMixinTest(cache_dir) try: assert_false(os.path.exists(expand_cache_dir)) nilearn.EXPAND_PATH_WILDCARDS = False assert_raises_regex(ValueError, "Given cache path parent directory doesn't", mixin_mock.run) assert_false(os.path.exists(expand_cache_dir)) nilearn.EXPAND_PATH_WILDCARDS = True finally: if os.path.exists(expand_cache_dir): shutil.rmtree(expand_cache_dir) def test_cache_mixin_wrong_dirs(): # Test the memory cache raises a ValueError when input base path doesn't # exist. for cache_dir in ("/bad_dir/cache", "~/nilearn_data/tmp/test_cache"): expand_cache_dir = os.path.expanduser(cache_dir) mixin_mock = CacheMixinTest(cache_dir) try: assert_raises_regex(ValueError, "Given cache path parent directory doesn't", mixin_mock.run) assert_false(os.path.exists(expand_cache_dir)) finally: if os.path.exists(expand_cache_dir): shutil.rmtree(expand_cache_dir) PKH99nilearn/tests/test_masking.py""" Test the mask-extracting utilities. """ import distutils.version import warnings import numpy as np from numpy.testing import assert_array_equal from nose.tools import assert_true, assert_false, assert_equal, \ assert_raises from nibabel import Nifti1Image from nilearn import masking from nilearn.masking import (compute_epi_mask, compute_multi_epi_mask, compute_background_mask, unmask, _unmask_3d, _unmask_4d, intersect_masks, MaskWarning) from nilearn._utils.testing import (write_tmp_imgs, assert_raises_regex) from nilearn._utils.exceptions import DimensionError from nilearn.input_data import NiftiMasker np_version = (np.version.full_version if hasattr(np.version, 'full_version') else np.version.short_version) np_version = distutils.version.LooseVersion(np_version).version _TEST_DIM_ERROR_MSG = ("Input data has incompatible dimensionality: " "Expected dimension is 3D and you provided " "a %s image") def test_compute_epi_mask(): mean_image = np.ones((9, 9, 3)) mean_image[3:-2, 3:-2, :] = 10 mean_image[5, 5, :] = 11 mean_image = Nifti1Image(mean_image, np.eye(4)) mask1 = compute_epi_mask(mean_image, opening=False) mask2 = compute_epi_mask(mean_image, exclude_zeros=True, opening=False) # With an array with no zeros, exclude_zeros should not make # any difference np.testing.assert_array_equal(mask1.get_data(), mask2.get_data()) # Check that padding with zeros does not change the extracted mask mean_image2 = np.zeros((30, 30, 3)) mean_image2[3:12, 3:12, :] = mean_image.get_data() mean_image2 = Nifti1Image(mean_image2, np.eye(4)) mask3 = compute_epi_mask(mean_image2, exclude_zeros=True, opening=False) np.testing.assert_array_equal(mask1.get_data(), mask3.get_data()[3:12, 3:12]) # However, without exclude_zeros, it does mask3 = compute_epi_mask(mean_image2, opening=False) assert_false(np.allclose(mask1.get_data(), mask3.get_data()[3:12, 3:12])) # Check that we get a ValueError for incorrect shape mean_image = np.ones((9, 9)) mean_image[3:-3, 3:-3] = 10 mean_image[5, 5] = 100 mean_image = Nifti1Image(mean_image, np.eye(4)) assert_raises(ValueError, compute_epi_mask, mean_image) # Check that we get a useful warning for empty masks mean_image = np.zeros((9, 9, 9)) mean_image[0, 0, 1] = -1 mean_image[0, 0, 0] = 1.2 mean_image[0, 0, 2] = 1.1 mean_image = Nifti1Image(mean_image, np.eye(4)) with warnings.catch_warnings(record=True) as w: compute_epi_mask(mean_image, exclude_zeros=True) assert_equal(len(w), 1) assert_true(isinstance(w[0].message, masking.MaskWarning)) def test_compute_background_mask(): for value in (0, np.nan): mean_image = value * np.ones((9, 9, 9)) mean_image[3:-3, 3:-3, 3:-3] = 1 mask = mean_image == 1 mean_image = Nifti1Image(mean_image, np.eye(4)) mask1 = compute_background_mask(mean_image, opening=False) np.testing.assert_array_equal(mask1.get_data(), mask.astype(np.int8)) # Check that we get a ValueError for incorrect shape mean_image = np.ones((9, 9)) mean_image[3:-3, 3:-3] = 10 mean_image[5, 5] = 100 mean_image = Nifti1Image(mean_image, np.eye(4)) assert_raises(ValueError, compute_background_mask, mean_image) # Check that we get a useful warning for empty masks mean_image = np.zeros((9, 9, 9)) mean_image = Nifti1Image(mean_image, np.eye(4)) with warnings.catch_warnings(record=True) as w: compute_background_mask(mean_image) assert_equal(len(w), 1) assert_true(isinstance(w[0].message, masking.MaskWarning)) def test_apply_mask(): """ Test smoothing of timeseries extraction """ # A delta in 3D # Standard masking data = np.zeros((40, 40, 40, 2)) data[20, 20, 20] = 1 mask = np.ones((40, 40, 40)) full_mask = np.zeros((40, 40, 40)) for create_files in (False, True): for affine in (np.eye(4), np.diag((1, 1, -1, 1)), np.diag((.5, 1, .5, 1))): data_img = Nifti1Image(data, affine) mask_img = Nifti1Image(mask, affine) with write_tmp_imgs(data_img, mask_img, create_files=create_files)\ as filenames: series = masking.apply_mask(filenames[0], filenames[1], smoothing_fwhm=9) series = np.reshape(series[0, :], (40, 40, 40)) vmax = series.max() # We are expecting a full-width at half maximum of # 9mm/voxel_size: above_half_max = series > .5 * vmax for axis in (0, 1, 2): proj = np.any(np.any(np.rollaxis(above_half_max, axis=axis), axis=-1), axis=-1) np.testing.assert_equal(proj.sum(), 9 / np.abs(affine[axis, axis])) # Check that NaNs in the data do not propagate data[10, 10, 10] = np.NaN data_img = Nifti1Image(data, affine) mask_img = Nifti1Image(mask, affine) full_mask_img = Nifti1Image(full_mask, affine) series = masking.apply_mask(data_img, mask_img, smoothing_fwhm=9) assert_true(np.all(np.isfinite(series))) # veriy that 4D masks are rejected mask_img_4d = Nifti1Image(np.ones((40, 40, 40, 2)), np.eye(4)) assert_raises_regex(DimensionError, _TEST_DIM_ERROR_MSG % "4D", masking.apply_mask, data_img, mask_img_4d) # Check that 3D data is accepted data_3d = Nifti1Image(np.arange(27).reshape((3, 3, 3)), np.eye(4)) mask_data_3d = np.zeros((3, 3, 3)) mask_data_3d[1, 1, 0] = True mask_data_3d[0, 1, 0] = True mask_data_3d[0, 1, 1] = True data_3d = masking.apply_mask(data_3d, Nifti1Image(mask_data_3d, np.eye(4))) assert_equal(sorted(data_3d.tolist()), [3., 4., 12.]) # Check data shape and affine assert_raises_regex(DimensionError, _TEST_DIM_ERROR_MSG % "2D", masking.apply_mask, data_img, Nifti1Image(mask[20, ...], affine)) assert_raises(ValueError, masking.apply_mask, data_img, Nifti1Image(mask, affine / 2.)) # Check that full masking raises error assert_raises(ValueError, masking.apply_mask, data_img, full_mask_img) # Check weird values in data mask[10, 10, 10] = 2 assert_raises(ValueError, masking.apply_mask, data_img, Nifti1Image(mask, affine)) mask[15, 15, 15] = 3 assert_raises(ValueError, masking.apply_mask, Nifti1Image(data, affine), mask_img) def test_unmask(): # A delta in 3D shape = (10, 20, 30, 40) generator = np.random.RandomState(42) data4D = generator.rand(*shape) data3D = data4D[..., 0] mask = generator.randint(2, size=shape[:3]) mask_img = Nifti1Image(mask, np.eye(4)) mask = mask.astype(bool) masked4D = data4D[mask, :].T unmasked4D = data4D.copy() unmasked4D[-mask, :] = 0 masked3D = data3D[mask] unmasked3D = data3D.copy() unmasked3D[-mask] = 0 # 4D Test, test value ordering at the same time. t = unmask(masked4D, mask_img, order="C").get_data() assert_equal(t.ndim, 4) assert_true(t.flags["C_CONTIGUOUS"]) assert_false(t.flags["F_CONTIGUOUS"]) assert_array_equal(t, unmasked4D) t = unmask([masked4D], mask_img, order="F") t = [t_.get_data() for t_ in t] assert_true(isinstance(t, list)) assert_equal(t[0].ndim, 4) assert_false(t[0].flags["C_CONTIGUOUS"]) assert_true(t[0].flags["F_CONTIGUOUS"]) assert_array_equal(t[0], unmasked4D) # 3D Test - check both with Nifti1Image and file for create_files in (False, True): with write_tmp_imgs(mask_img, create_files=create_files) as filename: t = unmask(masked3D, filename, order="C").get_data() assert_equal(t.ndim, 3) assert_true(t.flags["C_CONTIGUOUS"]) assert_false(t.flags["F_CONTIGUOUS"]) assert_array_equal(t, unmasked3D) t = unmask([masked3D], filename, order="F") t = [t_.get_data() for t_ in t] assert_true(isinstance(t, list)) assert_equal(t[0].ndim, 3) assert_false(t[0].flags["C_CONTIGUOUS"]) assert_true(t[0].flags["F_CONTIGUOUS"]) assert_array_equal(t[0], unmasked3D) # Error test: shape vec_1D = np.empty((500,), dtype=np.int) assert_raises(TypeError, unmask, vec_1D, mask_img) assert_raises(TypeError, unmask, [vec_1D], mask_img) vec_2D = np.empty((500, 500), dtype=np.float64) assert_raises(TypeError, unmask, vec_2D, mask_img) assert_raises(TypeError, unmask, [vec_2D], mask_img) # Error test: mask type assert_raises_regex(TypeError, 'mask must be a boolean array', _unmask_3d, vec_1D, mask.astype(np.int)) assert_raises_regex(TypeError, 'mask must be a boolean array', _unmask_4d, vec_2D, mask.astype(np.float64)) # Transposed vector transposed_vector = np.ones((np.sum(mask), 1), dtype=np.bool) assert_raises_regex(TypeError, 'X must be of shape', unmask, transposed_vector, mask_img) def test_intersect_masks_filename(): # Create dummy masks mask_a = np.zeros((4, 4, 1), dtype=np.bool) mask_a[2:4, 2:4] = 1 mask_a_img = Nifti1Image(mask_a.astype(int), np.eye(4)) # +---+---+---+---+ # | | | | | # +---+---+---+---+ # | | | | | # +---+---+---+---+ # | | | X | X | # +---+---+---+---+ # | | | X | X | # +---+---+---+---+ mask_b = np.zeros((4, 4, 1), dtype=np.bool) mask_b[1:3, 1:3] = 1 mask_b_img = Nifti1Image(mask_b.astype(int), np.eye(4)) # +---+---+---+---+ # | | | | | # +---+---+---+---+ # | | X | X | | # +---+---+---+---+ # | | X | X | | # +---+---+---+---+ # | | | | | # +---+---+---+---+ with write_tmp_imgs(mask_a_img, mask_b_img, create_files=True)\ as filenames: mask_ab = np.zeros((4, 4, 1), dtype=np.bool) mask_ab[2, 2] = 1 mask_ab_ = intersect_masks(filenames, threshold=1.) assert_array_equal(mask_ab, mask_ab_.get_data()) def test_intersect_masks(): """ Test the intersect_masks function """ # Create dummy masks mask_a = np.zeros((4, 4, 1), dtype=np.bool) mask_a[2:4, 2:4] = 1 mask_a_img = Nifti1Image(mask_a.astype(int), np.eye(4)) # +---+---+---+---+ # | | | | | # +---+---+---+---+ # | | | | | # +---+---+---+---+ # | | | X | X | # +---+---+---+---+ # | | | X | X | # +---+---+---+---+ mask_b = np.zeros((4, 4, 1), dtype=np.bool) mask_b[1:3, 1:3] = 1 mask_b_img = Nifti1Image(mask_b.astype(int), np.eye(4)) # +---+---+---+---+ # | | | | | # +---+---+---+---+ # | | X | X | | # +---+---+---+---+ # | | X | X | | # +---+---+---+---+ # | | | | | # +---+---+---+---+ mask_c = np.zeros((4, 4, 1), dtype=np.bool) mask_c[:, 2] = 1 mask_c[0, 0] = 1 mask_c_img = Nifti1Image(mask_c.astype(int), np.eye(4)) # +---+---+---+---+ # | X | | X | | # +---+---+---+---+ # | | | X | | # +---+---+---+---+ # | | | X | | # +---+---+---+---+ # | | | X | | # +---+---+---+---+ mask_ab = np.zeros((4, 4, 1), dtype=np.bool) mask_ab[2, 2] = 1 mask_ab_ = intersect_masks([mask_a_img, mask_b_img], threshold=1.) assert_array_equal(mask_ab, mask_ab_.get_data()) mask_abc = mask_a + mask_b + mask_c mask_abc_ = intersect_masks([mask_a_img, mask_b_img, mask_c_img], threshold=0., connected=False) assert_array_equal(mask_abc, mask_abc_.get_data()) mask_abc[0, 0] = 0 mask_abc_ = intersect_masks([mask_a_img, mask_b_img, mask_c_img], threshold=0.) assert_array_equal(mask_abc, mask_abc_.get_data()) mask_abc = mask_ab mask_abc_ = intersect_masks([mask_a_img, mask_b_img, mask_c_img], threshold=1.) assert_array_equal(mask_abc, mask_abc_.get_data()) mask_abc[1, 2] = 1 mask_abc[3, 2] = 1 mask_abc_ = intersect_masks([mask_a_img, mask_b_img, mask_c_img]) assert_array_equal(mask_abc, mask_abc_.get_data()) def test_compute_multi_epi_mask(): # Check that an empty list of images creates a meaningful error assert_raises(TypeError, compute_multi_epi_mask, []) # As it calls intersect_masks, we only test resampling here. # Same masks as test_intersect_masks mask_a = np.zeros((4, 4, 1), dtype=np.bool) mask_a[2:4, 2:4] = 1 mask_a_img = Nifti1Image(mask_a.astype(int), np.eye(4)) mask_b = np.zeros((8, 8, 1), dtype=np.bool) mask_b[2:6, 2:6] = 1 mask_b_img = Nifti1Image(mask_b.astype(int), np.eye(4) / 2.) with warnings.catch_warnings(): warnings.simplefilter("ignore", MaskWarning) assert_raises(ValueError, compute_multi_epi_mask, [mask_a_img, mask_b_img]) mask_ab = np.zeros((4, 4, 1), dtype=np.bool) mask_ab[2, 2] = 1 mask_ab_ = compute_multi_epi_mask([mask_a_img, mask_b_img], threshold=1., opening=0, target_affine=np.eye(4), target_shape=(4, 4, 1)) assert_array_equal(mask_ab, mask_ab_.get_data()) def test_error_shape(random_state=42, shape=(3, 5, 7, 11)): # open-ended `if .. elif` in masking.unmask rng = np.random.RandomState(random_state) # setup X = rng.randn() mask_img = np.zeros(shape, dtype=np.uint8) mask_img[rng.randn(*shape) > .4] = 1 n_features = (mask_img > 0).sum() mask_img = Nifti1Image(mask_img, np.eye(4)) n_samples = shape[0] X = rng.randn(n_samples, n_features, 2) # 3D X (unmask should raise a TypeError) assert_raises(TypeError, unmask, X, mask_img) X = rng.randn(n_samples, n_features) # Raises an error because the mask is 4D assert_raises(TypeError, unmask, X, mask_img) def test_nifti_masker_empty_mask_warning(): X = Nifti1Image(np.ones((2, 2, 2, 5)), np.eye(4)) assert_raises_regex( ValueError, "The mask is invalid as it is empty: it masks all data", NiftiMasker(mask_strategy="epi").fit_transform, X) PKH5/BHH'nilearn/tests/test_niimg_conversions.py""" Test the niimg_conversions This test file is in nilearn/tests because nosetests seems to ignore modules whose name starts with an underscore """ # Author: Gael Varoquaux, Alexandre Abraham # License: simplified BSD import os import re import tempfile from nose.tools import assert_equal, assert_true import numpy as np from numpy.testing import assert_array_equal, assert_array_almost_equal import nibabel from nibabel import Nifti1Image import nilearn as ni from nilearn import _utils, image from nilearn._utils.exceptions import DimensionError from nilearn._utils import testing, niimg_conversions from nilearn._utils.testing import assert_raises_regex from nilearn._utils.testing import with_memory_profiler from nilearn._utils.testing import assert_memory_less_than from nilearn._utils.niimg_conversions import _iter_check_niimg class PhonyNiimage(nibabel.spatialimages.SpatialImage): def __init__(self): self.data = np.ones((9, 9, 9, 9)) self.my_affine = np.ones((4, 4)) def get_data(self): return self.data def get_affine(self): return self.my_affine @property def shape(self): return self.data.shape def test_check_same_fov(): affine_a = np.eye(4) affine_b = np.eye(4) * 2 shape_a = (2, 2, 2) shape_b = (3, 3, 3) shape_a_affine_a = nibabel.Nifti1Image(np.empty(shape_a), affine_a) shape_a_affine_a_2 = nibabel.Nifti1Image(np.empty(shape_a), affine_a) shape_a_affine_b = nibabel.Nifti1Image(np.empty(shape_a), affine_b) shape_b_affine_a = nibabel.Nifti1Image(np.empty(shape_b), affine_a) shape_b_affine_b = nibabel.Nifti1Image(np.empty(shape_b), affine_b) niimg_conversions._check_same_fov(a=shape_a_affine_a, b=shape_a_affine_a_2, raise_error=True) assert_raises_regex(ValueError, '[ac] and [ac] do not have the same affine', niimg_conversions._check_same_fov, a=shape_a_affine_a, b=shape_a_affine_a_2, c=shape_a_affine_b, raise_error=True) assert_raises_regex(ValueError, '[ab] and [ab] do not have the same shape', niimg_conversions._check_same_fov, a=shape_a_affine_a, b=shape_b_affine_a, raise_error=True) assert_raises_regex(ValueError, '[ab] and [ab] do not have the same affine', niimg_conversions._check_same_fov, a=shape_b_affine_b, b=shape_a_affine_a, raise_error=True) assert_raises_regex(ValueError, '[ab] and [ab] do not have the same shape', niimg_conversions._check_same_fov, a=shape_b_affine_b, b=shape_a_affine_a, raise_error=True) def test_check_niimg_3d(): # check error for non-forced but necessary resampling assert_raises_regex(TypeError, 'nibabel format', _utils.check_niimg, 0) # check error for non-forced but necessary resampling assert_raises_regex(TypeError, 'empty object', _utils.check_niimg, []) # Test dimensionality error img = Nifti1Image(np.zeros((10, 10, 10)), np.eye(4)) assert_raises_regex(TypeError, "Input data has incompatible dimensionality: " "Expected dimension is 3D and you provided a list " "of 3D images \(4D\).", _utils.check_niimg_3d, [img, img]) # Check that a filename does not raise an error data = np.zeros((40, 40, 40, 1)) data[20, 20, 20] = 1 data_img = Nifti1Image(data, np.eye(4)) with testing.write_tmp_imgs(data_img, create_files=True) as filename: _utils.check_niimg_3d(filename) def test_check_niimg_4d(): assert_raises_regex(TypeError, 'nibabel format', _utils.check_niimg_4d, 0) assert_raises_regex(TypeError, 'empty object', _utils.check_niimg_4d, []) affine = np.eye(4) img_3d = Nifti1Image(np.ones((10, 10, 10)), affine) # Tests with return_iterator=False img_4d_1 = _utils.check_niimg_4d([img_3d, img_3d]) assert_true(img_4d_1.get_data().shape == (10, 10, 10, 2)) assert_array_equal(img_4d_1.get_affine(), affine) img_4d_2 = _utils.check_niimg_4d(img_4d_1) assert_array_equal(img_4d_2.get_data(), img_4d_2.get_data()) assert_array_equal(img_4d_2.get_affine(), img_4d_2.get_affine()) # Tests with return_iterator=True img_3d_iterator = _utils.check_niimg_4d([img_3d, img_3d], return_iterator=True) img_3d_iterator_length = sum(1 for _ in img_3d_iterator) assert_true(img_3d_iterator_length == 2) img_3d_iterator_1 = _utils.check_niimg_4d([img_3d, img_3d], return_iterator=True) img_3d_iterator_2 = _utils.check_niimg_4d(img_3d_iterator_1, return_iterator=True) for img_1, img_2 in zip(img_3d_iterator_1, img_3d_iterator_2): assert_true(img_1.get_data().shape == (10, 10, 10)) assert_array_equal(img_1.get_data(), img_2.get_data()) assert_array_equal(img_1.get_affine(), img_2.get_affine()) img_3d_iterator_1 = _utils.check_niimg_4d([img_3d, img_3d], return_iterator=True) img_3d_iterator_2 = _utils.check_niimg_4d(img_4d_1, return_iterator=True) for img_1, img_2 in zip(img_3d_iterator_1, img_3d_iterator_2): assert_true(img_1.get_data().shape == (10, 10, 10)) assert_array_equal(img_1.get_data(), img_2.get_data()) assert_array_equal(img_1.get_affine(), img_2.get_affine()) # This should raise an error: a 3D img is given and we want a 4D assert_raises_regex(DimensionError, "Input data has incompatible dimensionality: " "Expected dimension is 4D and you provided a " "3D image.", _utils.check_niimg_4d, img_3d) # Test a Niimg-like object that does not hold a shape attribute phony_img = PhonyNiimage() _utils.check_niimg_4d(phony_img) a = nibabel.Nifti1Image(np.zeros((10, 10, 10)), np.eye(4)) b = np.zeros((10, 10, 10)) c = _utils.check_niimg_4d([a, b], return_iterator=True) assert_raises_regex(TypeError, 'Error encountered while loading image #1', list, c) b = nibabel.Nifti1Image(np.zeros((10, 20, 10)), np.eye(4)) c = _utils.check_niimg_4d([a, b], return_iterator=True) assert_raises_regex( ValueError, 'Field of view of image #1 is different from reference FOV', list, c) def test_check_niimg(): affine = np.eye(4) img_3d = Nifti1Image(np.ones((10, 10, 10)), affine) img_4d = Nifti1Image(np.ones((10, 10, 10, 4)), affine) img_3_3d = [[[img_3d, img_3d]]] img_2_4d = [[img_4d, img_4d]] assert_raises_regex( DimensionError, "Input data has incompatible dimensionality: " "Expected dimension is 2D and you provided " "a list of list of list of 3D images \(6D\)", _utils.check_niimg, img_3_3d, ensure_ndim=2) assert_raises_regex( DimensionError, "Input data has incompatible dimensionality: " "Expected dimension is 4D and you provided " "a list of list of 4D images \(6D\)", _utils.check_niimg, img_2_4d, ensure_ndim=4) def test_check_niimg_wildcards(): tmp_dir = tempfile.tempdir + os.sep nofile_path = "/tmp/nofile" nofile_path_wildcards = "/tmp/no*file" wildcards_msg = ("No files matching the entered niimg expression: " "'%s'.\n You may have left wildcards usage " "activated: please set the global constant " "'nilearn.EXPAND_PATH_WILDCARDS' to False to " "deactivate this behavior.") file_not_found_msg = "File not found: '%s'" assert_equal(ni.EXPAND_PATH_WILDCARDS, True) # Check bad filename # Non existing file (with no magic) raise a ValueError exception assert_raises_regex(ValueError, file_not_found_msg % nofile_path, _utils.check_niimg, nofile_path) # Non matching wildcard raises a ValueError exception assert_raises_regex(ValueError, wildcards_msg % re.escape(nofile_path_wildcards), _utils.check_niimg, nofile_path_wildcards) # First create some testing data data_3d = np.zeros((40, 40, 40)) data_3d[20, 20, 20] = 1 img_3d = Nifti1Image(data_3d, np.eye(4)) data_4d = np.zeros((40, 40, 40, 3)) data_4d[20, 20, 20] = 1 img_4d = Nifti1Image(data_4d, np.eye(4)) ####### # Testing with an existing filename with testing.write_tmp_imgs(img_3d, create_files=True) as filename: assert_array_equal(_utils.check_niimg(filename).get_data(), img_3d.get_data()) # No globbing behavior with testing.write_tmp_imgs(img_3d, create_files=True) as filename: assert_array_equal(_utils.check_niimg(filename, wildcards=False).get_data(), img_3d.get_data()) ####### # Testing with an existing filename with testing.write_tmp_imgs(img_4d, create_files=True) as filename: assert_array_equal(_utils.check_niimg(filename).get_data(), img_4d.get_data()) # No globbing behavior with testing.write_tmp_imgs(img_4d, create_files=True) as filename: assert_array_equal(_utils.check_niimg(filename, wildcards=False).get_data(), img_4d.get_data()) ####### # Testing with a glob matching exactly one filename # Using a glob matching one file containing a 3d image returns a 4d image # with 1 as last dimension. with testing.write_tmp_imgs(img_3d, create_files=True, use_wildcards=True) as globs: glob_input = tmp_dir + globs assert_array_equal(_utils.check_niimg(glob_input).get_data()[..., 0], img_3d.get_data()) # Disabled globbing behavior should raise an ValueError exception with testing.write_tmp_imgs(img_3d, create_files=True, use_wildcards=True) as globs: glob_input = tmp_dir + globs assert_raises_regex(ValueError, file_not_found_msg % re.escape(glob_input), _utils.check_niimg, glob_input, wildcards=False) ####### # Testing with a glob matching multiple filenames img_4d = _utils.check_niimg_4d((img_3d, img_3d)) with testing.write_tmp_imgs(img_3d, img_3d, create_files=True, use_wildcards=True) as globs: assert_array_equal(_utils.check_niimg(glob_input).get_data(), img_4d.get_data()) ####### # Test when global variable is set to False => no globbing allowed ni.EXPAND_PATH_WILDCARDS = False # Non existing filename (/tmp/nofile) could match an existing one through # globbing but global wildcards variable overrides this feature => raises # a ValueError assert_raises_regex(ValueError, file_not_found_msg % nofile_path, _utils.check_niimg, nofile_path) # Verify wildcards function parameter has no effect assert_raises_regex(ValueError, file_not_found_msg % nofile_path, _utils.check_niimg, nofile_path, wildcards=False) # Testing with an exact filename matching (3d case) with testing.write_tmp_imgs(img_3d, create_files=True) as filename: assert_array_equal(_utils.check_niimg(filename).get_data(), img_3d.get_data()) # Testing with an exact filename matching (4d case) with testing.write_tmp_imgs(img_4d, create_files=True) as filename: assert_array_equal(_utils.check_niimg(filename).get_data(), img_4d.get_data()) # Reverting to default behavior ni.EXPAND_PATH_WILDCARDS = True def test_iter_check_niimgs(): no_file_matching = "No files matching path: %s" affine = np.eye(4) img_4d = Nifti1Image(np.ones((10, 10, 10, 4)), affine) img_2_4d = [[img_4d, img_4d]] for empty in ((), [], (i for i in ()), [i for i in ()]): assert_raises_regex(ValueError, "Input niimgs list is empty.", list, _iter_check_niimg(empty)) nofile_path = "/tmp/nofile" assert_raises_regex(ValueError, no_file_matching % nofile_path, list, _iter_check_niimg(nofile_path)) # Create a test file filename = tempfile.mktemp(prefix="nilearn_test", suffix=".nii", dir=None) img_4d.to_filename(filename) niimgs = list(_iter_check_niimg([filename])) assert_array_equal(niimgs[0].get_data(), _utils.check_niimg(img_4d).get_data()) del img_4d del niimgs os.remove(filename) # Regular case niimgs = list(_iter_check_niimg(img_2_4d)) assert_array_equal(niimgs[0].get_data(), _utils.check_niimg(img_2_4d).get_data()) def _check_memory(list_img_3d): # We intentionally add an offset of memory usage to avoid non trustable # measures with memory_profiler. mem_offset = b'a' * 100 * 1024 ** 2 list(_iter_check_niimg(list_img_3d)) return mem_offset @with_memory_profiler def test_iter_check_niimgs_memory(): # Verify that iterating over a list of images doesn't consume extra # memory. assert_memory_less_than(100, 0.1, _check_memory, [Nifti1Image(np.ones((100, 100, 200)), np.eye(4)) for i in range(10)]) def test_repr_niimgs(): # Test with file path assert_equal(_utils._repr_niimgs("test"), "test") assert_equal(_utils._repr_niimgs(["test", "retest"]), "[test, retest]") # Create phony Niimg with filename affine = np.eye(4) shape = (10, 10, 10) img1 = Nifti1Image(np.ones(shape), affine) assert_equal( _utils._repr_niimgs(img1).replace("10L","10"), ("%s(\nshape=%s,\naffine=%s\n)" % (img1.__class__.__name__, repr(shape), repr(affine)))) _, tmpimg1 = tempfile.mkstemp(suffix='.nii') nibabel.save(img1, tmpimg1) assert_equal( _utils._repr_niimgs(img1), ("%s('%s')" % (img1.__class__.__name__, img1.get_filename()))) def _remove_if_exists(file): if os.path.exists(file): os.remove(file) def test_concat_niimgs(): # create images different in affine and 3D/4D shape shape = (10, 11, 12) affine = np.eye(4) img1 = Nifti1Image(np.ones(shape), affine) img2 = Nifti1Image(np.ones(shape), 2 * affine) img3 = Nifti1Image(np.zeros(shape), affine) img4d = Nifti1Image(np.ones(shape + (2, )), affine) shape2 = (12, 11, 10) img1b = Nifti1Image(np.ones(shape2), affine) shape3 = (11, 22, 33) img1c = Nifti1Image(np.ones(shape3), affine) # Regression test for #601. Dimensionality of first image was not checked # properly _dimension_error_msg = ("Input data has incompatible dimensionality: " "Expected dimension is 4D and you provided " "a list of 4D images \(5D\)") assert_raises_regex(DimensionError, _dimension_error_msg, _utils.concat_niimgs, [img4d], ensure_ndim=4) # check basic concatenation with equal shape/affine concatenated = _utils.concat_niimgs((img1, img3, img1)) assert_raises_regex(DimensionError, _dimension_error_msg, _utils.concat_niimgs, [img1, img4d]) # smoke-test auto_resample concatenated = _utils.concat_niimgs((img1, img1b, img1c), auto_resample=True) assert_true(concatenated.shape == img1.shape + (3, )) # check error for non-forced but necessary resampling assert_raises_regex(ValueError, 'Field of view of image', _utils.concat_niimgs, [img1, img2], auto_resample=False) # test list of 4D niimgs as input tempdir = tempfile.mkdtemp() tmpimg1 = os.path.join(tempdir, '1.nii') tmpimg2 = os.path.join(tempdir, '2.nii') try: nibabel.save(img1, tmpimg1) nibabel.save(img3, tmpimg2) concatenated = _utils.concat_niimgs(os.path.join(tempdir, '*')) assert_array_equal( concatenated.get_data()[..., 0], img1.get_data()) assert_array_equal( concatenated.get_data()[..., 1], img3.get_data()) finally: _remove_if_exists(tmpimg1) _remove_if_exists(tmpimg2) if os.path.exists(tempdir): os.removedirs(tempdir) img5d = Nifti1Image(np.ones((2, 2, 2, 2, 2)), affine) assert_raises_regex(TypeError, 'Concatenated images must be 3D or 4D. ' 'You gave a list of 5D images', _utils.concat_niimgs, [img5d, img5d]) def nifti_generator(buffer): for i in range(10): buffer.append(Nifti1Image(np.random.random((10, 10, 10)), np.eye(4))) yield buffer[-1] def test_iterator_generator(): # Create a list of random images l = [Nifti1Image(np.random.random((10, 10, 10)), np.eye(4)) for i in range(10)] cc = _utils.concat_niimgs(l) assert_equal(cc.shape[-1], 10) assert_array_almost_equal(cc.get_data()[..., 0], l[0].get_data()) # Same with iteration i = image.iter_img(l) cc = _utils.concat_niimgs(i) assert_equal(cc.shape[-1], 10) assert_array_almost_equal(cc.get_data()[..., 0], l[0].get_data()) # Now, a generator b = [] g = nifti_generator(b) cc = _utils.concat_niimgs(g) assert_equal(cc.shape[-1], 10) assert_equal(len(b), 10) PKlmH^CCnilearn/tests/test_signal.py""" Test the signals module """ # Author: Gael Varoquaux, Alexandre Abraham # License: simplified BSD import os.path import numpy as np from nose.tools import assert_true, assert_false, assert_raises from sklearn.utils.testing import assert_less import nibabel # Use nisignal here to avoid name collisions (using nilearn.signal is # not possible) from nilearn import signal as nisignal from nilearn.signal import clean import scipy.signal def generate_signals(n_features=17, n_confounds=5, length=41, same_variance=True, order="C"): """Generate test signals. All returned signals have no trends at all (to machine precision). Parameters ---------- n_features, n_confounds : int, optional respectively number of features to generate, and number of confounds to use for generating noise signals. length : int, optional number of samples for every signal. same_variance : bool, optional if True, every column of "signals" have a unit variance. Otherwise, a random amplitude is applied. order : "C" or "F" gives the contiguousness of the output arrays. Returns ------- signals : numpy.ndarray, shape (length, n_features) unperturbed signals. noises : numpy.ndarray, shape (length, n_features) confound-based noises. Each column is a signal obtained by linear combination of all confounds signals (below). The coefficients in the linear combination are also random. confounds : numpy.ndarray, shape (length, n_confounds) random signals used as confounds. """ rand_gen = np.random.RandomState(0) # Generate random confounds confounds_shape = (length, n_confounds) confounds = np.ndarray(confounds_shape, order=order) confounds[...] = rand_gen.randn(*confounds_shape) confounds[...] = scipy.signal.detrend(confounds, axis=0) # Compute noise based on confounds, with random factors factors = rand_gen.randn(n_confounds, n_features) noises_shape = (length, n_features) noises = np.ndarray(noises_shape, order=order) noises[...] = np.dot(confounds, factors) noises[...] = scipy.signal.detrend(noises, axis=0) # Generate random signals with random amplitudes signals_shape = noises_shape signals = np.ndarray(signals_shape, order=order) if same_variance: signals[...] = rand_gen.randn(*signals_shape) else: signals[...] = (4. * abs(rand_gen.randn(signals_shape[1])) + 0.5 ) * rand_gen.randn(*signals_shape) signals[...] = scipy.signal.detrend(signals, axis=0) return signals, noises, confounds def generate_trends(n_features=17, length=41): """Generate linearly-varying signals, with zero mean. Parameters ---------- n_features, length : int respectively number of signals and number of samples to generate. Returns ------- trends : numpy.ndarray, shape (length, n_features) output signals, one per column. """ rand_gen = np.random.RandomState(0) trends = scipy.signal.detrend(np.linspace(0, 1.0, length), type="constant") trends = np.repeat(np.atleast_2d(trends).T, n_features, axis=1) factors = rand_gen.randn(n_features) return trends * factors def test_butterworth(): rand_gen = np.random.RandomState(0) n_features = 20000 n_samples = 100 sampling = 100 low_pass = 30 high_pass = 10 # Compare output for different options. # single timeseries data = rand_gen.randn(n_samples) data_original = data.copy() out_single = nisignal.butterworth(data, sampling, low_pass=low_pass, high_pass=high_pass, copy=True) np.testing.assert_almost_equal(data, data_original) nisignal.butterworth(data, sampling, low_pass=low_pass, high_pass=high_pass, copy=False, save_memory=True) np.testing.assert_almost_equal(out_single, data) # multiple timeseries data = rand_gen.randn(n_samples, n_features) data[:, 0] = data_original # set first timeseries to previous data data_original = data.copy() out1 = nisignal.butterworth(data, sampling, low_pass=low_pass, high_pass=high_pass, copy=True) np.testing.assert_almost_equal(data, data_original) # check that multiple- and single-timeseries filtering do the same thing. np.testing.assert_almost_equal(out1[:, 0], out_single) nisignal.butterworth(data, sampling, low_pass=low_pass, high_pass=high_pass, copy=False) np.testing.assert_almost_equal(out1, data) # Test nyquist frequency clipping, issue #482 out1 = nisignal.butterworth(data, sampling, low_pass=50., copy=True) out2 = nisignal.butterworth(data, sampling, low_pass=80., # Greater than nyq frequency copy=True) np.testing.assert_almost_equal(out1, out2) def test_standardize(): rand_gen = np.random.RandomState(0) n_features = 10 n_samples = 17 # Create random signals with offsets a = rand_gen.random_sample((n_samples, n_features)) a += np.linspace(0, 2., n_features) # transpose array to fit _standardize input. # Without trend removal b = nisignal._standardize(a, normalize=True) energies = (b ** 2).sum(axis=0) np.testing.assert_almost_equal(energies, np.ones(n_features)) np.testing.assert_almost_equal(b.sum(axis=0), np.zeros(n_features)) # With trend removal a = np.atleast_2d(np.linspace(0, 2., n_features)).T b = nisignal._standardize(a, detrend=True, normalize=False) np.testing.assert_almost_equal(b, np.zeros(b.shape)) length_1_signal = np.atleast_2d(np.linspace(0, 2., n_features)) np.testing.assert_array_equal(length_1_signal, nisignal._standardize(length_1_signal, normalize=True)) def test_detrend(): """Test custom detrend implementation.""" point_number = 703 features = 17 signals, _, _ = generate_signals(n_features=features, length=point_number, same_variance=True) trends = generate_trends(n_features=features, length=point_number) x = signals + trends + 1 original = x.copy() # Mean removal only (out-of-place) detrended = nisignal._detrend(x, inplace=False, type="constant") assert_true(abs(detrended.mean(axis=0)).max() < 15. * np.finfo(np.float).eps) # out-of-place detrending. Use scipy as a reference implementation detrended = nisignal._detrend(x, inplace=False) detrended_scipy = scipy.signal.detrend(x, axis=0) # "x" must be left untouched np.testing.assert_almost_equal(original, x, decimal=14) assert_true(abs(detrended.mean(axis=0)).max() < 15. * np.finfo(np.float).eps) np.testing.assert_almost_equal(detrended_scipy, detrended, decimal=14) # for this to work, there must be no trends at all in "signals" np.testing.assert_almost_equal(detrended, signals, decimal=14) # inplace detrending nisignal._detrend(x, inplace=True) assert_true(abs(x.mean(axis=0)).max() < 15. * np.finfo(np.float).eps) # for this to work, there must be no trends at all in "signals" np.testing.assert_almost_equal(detrended_scipy, detrended, decimal=14) np.testing.assert_almost_equal(x, signals, decimal=14) length_1_signal = x[0] length_1_signal = length_1_signal[np.newaxis, :] np.testing.assert_array_equal(length_1_signal, nisignal._detrend(length_1_signal)) # Mean removal on integers detrended = nisignal._detrend(x.astype(np.int64), inplace=True, type="constant") assert_less(abs(detrended.mean(axis=0)).max(), 20. * np.finfo(np.float).eps) def test_mean_of_squares(): """Test _mean_of_squares.""" n_samples = 11 n_features = 501 # Higher than 500 required signals, _, _ = generate_signals(n_features=n_features, length=n_samples, same_variance=True) # Reference computation var1 = np.copy(signals) var1 **= 2 var1 = var1.mean(axis=0) var2 = nisignal._mean_of_squares(signals) np.testing.assert_almost_equal(var1, var2) # This test is inspired from Scipy docstring of detrend function def test_clean_detrending(): n_samples = 21 n_features = 501 # Must be higher than 500 signals, _, _ = generate_signals(n_features=n_features, length=n_samples) trends = generate_trends(n_features=n_features, length=n_samples) x = signals + trends # This should remove trends x_detrended = nisignal.clean(x, standardize=False, detrend=True, low_pass=None, high_pass=None) np.testing.assert_almost_equal(x_detrended, signals, decimal=13) # This should do nothing x_undetrended = nisignal.clean(x, standardize=False, detrend=False, low_pass=None, high_pass=None) assert_false(abs(x_undetrended - signals).max() < 0.06) def test_clean_frequencies(): sx1 = np.sin(np.linspace(0, 100, 2000)) sx2 = np.sin(np.linspace(0, 100, 2000)) sx = np.vstack((sx1, sx2)).T assert_true(clean(sx, standardize=False, high_pass=0.002, low_pass=None) .max() > 0.1) assert_true(clean(sx, standardize=False, high_pass=0.2, low_pass=None) .max() < 0.01) assert_true(clean(sx, standardize=False, low_pass=0.01).max() > 0.9) assert_raises(ValueError, clean, sx, low_pass=0.4, high_pass=0.5) def test_clean_confounds(): signals, noises, confounds = generate_signals(n_features=41, n_confounds=5, length=45) # No signal: output must be zero. eps = np.finfo(np.float).eps noises1 = noises.copy() cleaned_signals = nisignal.clean(noises, confounds=confounds, detrend=True, standardize=False) assert_true(abs(cleaned_signals).max() < 100. * eps) np.testing.assert_almost_equal(noises, noises1, decimal=12) # With signal: output must be orthogonal to confounds cleaned_signals = nisignal.clean(signals + noises, confounds=confounds, detrend=False, standardize=True) assert_true(abs(np.dot(confounds.T, cleaned_signals)).max() < 1000. * eps) # Same output when a constant confound is added confounds1 = np.hstack((np.ones((45, 1)), confounds)) cleaned_signals1 = nisignal.clean(signals + noises, confounds=confounds1, detrend=False, standardize=True) np.testing.assert_almost_equal(cleaned_signals1, cleaned_signals) # Test detrending. No trend should exist in the output. # Use confounds with a trend. temp = confounds.T temp += np.arange(confounds.shape[0]) cleaned_signals = nisignal.clean(signals + noises, confounds=confounds, detrend=False, standardize=False) coeffs = np.polyfit(np.arange(cleaned_signals.shape[0]), cleaned_signals, 1) assert_true((abs(coeffs) > 1e-3).any()) # trends remain cleaned_signals = nisignal.clean(signals + noises, confounds=confounds, detrend=True, standardize=False) coeffs = np.polyfit(np.arange(cleaned_signals.shape[0]), cleaned_signals, 1) assert_true((abs(coeffs) < 150. * eps).all()) # trend removed # Test no-op input_signals = 10 * signals cleaned_signals = nisignal.clean(input_signals, detrend=False, standardize=False) np.testing.assert_almost_equal(cleaned_signals, input_signals) cleaned_signals = nisignal.clean(input_signals, detrend=False, standardize=True) np.testing.assert_almost_equal(cleaned_signals.var(axis=0), np.ones(cleaned_signals.shape[1])) # Test with confounds read from a file. Smoke test only (result has # no meaning). current_dir = os.path.split(__file__)[0] signals, _, confounds = generate_signals(n_features=41, n_confounds=3, length=20) filename1 = os.path.join(current_dir, "data", "spm_confounds.txt") filename2 = os.path.join(current_dir, "data", "confounds_with_header.csv") nisignal.clean(signals, detrend=False, standardize=False, confounds=filename1) nisignal.clean(signals, detrend=False, standardize=False, confounds=filename2) nisignal.clean(signals, detrend=False, standardize=False, confounds=confounds[:, 1]) # Use a list containing two filenames, a 2D array and a 1D array nisignal.clean(signals, detrend=False, standardize=False, confounds=[filename1, confounds[:, 0:2], filename2, confounds[:, 2]]) # Test error handling assert_raises(TypeError, nisignal.clean, signals, confounds=1) assert_raises(ValueError, nisignal.clean, signals, confounds=np.zeros(2)) assert_raises(ValueError, nisignal.clean, signals, confounds=np.zeros((2, 2))) assert_raises(ValueError, nisignal.clean, signals, confounds=np.zeros((2, 3, 4))) assert_raises(ValueError, nisignal.clean, signals[:-1, :], confounds=filename1) assert_raises(TypeError, nisignal.clean, signals, confounds=[None]) # Test without standardizing that constant parts of confounds are # accounted for np.testing.assert_almost_equal(nisignal.clean(np.ones((20, 2)), standardize=False, confounds=np.ones(20), detrend=False, ).mean(), np.zeros((20, 2))) def test_high_variance_confounds(): # C and F order might take different paths in the function. Check that the # result is identical. n_features = 1001 length = 20 n_confounds = 5 seriesC, _, _ = generate_signals(n_features=n_features, length=length, order="C") seriesF, _, _ = generate_signals(n_features=n_features, length=length, order="F") np.testing.assert_almost_equal(seriesC, seriesF, decimal=13) outC = nisignal.high_variance_confounds(seriesC, n_confounds=n_confounds, detrend=False) outF = nisignal.high_variance_confounds(seriesF, n_confounds=n_confounds, detrend=False) np.testing.assert_almost_equal(outC, outF, decimal=13) # Result must not be influenced by global scaling seriesG = 2 * seriesC outG = nisignal.high_variance_confounds(seriesG, n_confounds=n_confounds, detrend=False) np.testing.assert_almost_equal(outC, outG, decimal=13) assert(outG.shape == (length, n_confounds)) # Changing percentile changes the result seriesG = seriesC outG = nisignal.high_variance_confounds(seriesG, percentile=1., n_confounds=n_confounds, detrend=False) assert_raises(AssertionError, np.testing.assert_almost_equal, outC, outG, decimal=13) assert(outG.shape == (length, n_confounds)) # Check shape of output out = nisignal.high_variance_confounds(seriesG, n_confounds=7, detrend=False) assert(out.shape == (length, 7)) # Adding a trend and detrending should give same results as with no trend. seriesG = seriesC trends = generate_trends(n_features=n_features, length=length) seriesGt = seriesG + trends outG = nisignal.high_variance_confounds(seriesG, detrend=False, n_confounds=n_confounds) outGt = nisignal.high_variance_confounds(seriesGt, detrend=True, n_confounds=n_confounds) # Since sign flips could occur, we look at the absolute values of the # covariance, rather than the absolute difference, and compare this to # the identity matrix np.testing.assert_almost_equal(np.abs(outG.T.dot(outG)), np.identity(outG.shape[1]), decimal=13) # Control for sign flips by taking the min of both possibilities np.testing.assert_almost_equal( np.min(np.abs(np.dstack([outG - outGt, outG + outGt])), axis=2), np.zeros(outG.shape)) PKHnilearn/tests/data/__init__.pyPKHMFF-nilearn/tests/data/pymvpa-exampledata.tar.bz2BZh91AY&SYŚHl̐@B@p@٪Zh 4@$ @&SmMzhdzJjҏS SK"ʵ_k԰S`"m}nʥYJఖ*%UUDp$a$HBFXI̐"u-Ţ0sA%@z{/$yQUU]i: #@ @P _OAپv#P_I&T)%i.䥛$-޵&o,Gd8]mgp!@Ĭ9PZعȹ+u\ @0I ] \\ѪܕDhpIY2RLne[Rd3YH`%B_[E*SNJ 5Bulh`☮LQJhɂ% օ  t8"@oXڻQxy zoC\y59ctLd0H'!MQzsPKHnCC%nilearn/decoding/space_net_solvers.py""" Regression with spatial priors like TV-L1 and Graph-Net. """ # Author: DOHMATOB Elvis Dopgima, # Gael Varoquaux, # Alexandre Gramfort, # Gaspar Pizarro, # Virgile Fritsch, # Bertrand Thirion, # and others. # License: simplified BSD from math import sqrt import numpy as np from .objective_functions import (spectral_norm_squared, _gradient_id, _logistic_loss_lipschitz_constant, _squared_loss, _squared_loss_grad, _unmask, _logistic_loss_grad, _logistic as _logistic_loss) from .objective_functions import _gradient, _div from .proximal_operators import (_prox_l1, _prox_l1_with_intercept, _prox_tvl1, _prox_tvl1_with_intercept) from .fista import mfista def _squared_loss_and_spatial_grad(X, y, w, mask, grad_weight): """ Computes the squared loss (data fidelity term) + squared l2 norm of gradient (penalty term). Parameters ---------- X : ndarray, shape (n_samples, n_features) Design matrix. y : ndarray, shape (n_samples,) Target / response vector. w : ndarray shape (n_features,) Unmasked, ravelized weights map. grad_weight: float l1_ratio * alpha. Returns ------- float Value of Graph-Net objective. """ data_section = np.dot(X, w) - y grad_buffer = np.zeros(mask.shape) grad_buffer[mask] = w grad_mask = np.tile(mask, [mask.ndim] + [1] * mask.ndim) grad_section = _gradient(grad_buffer)[grad_mask] return 0.5 * (np.dot(data_section, data_section) + grad_weight * np.dot(grad_section, grad_section)) def _squared_loss_and_spatial_grad_derivative(X, y, w, mask, grad_weight): """ Computes the derivative of _squared_loss_and_spatial_grad. Parameters ---------- X : ndarray, shape (n_samples, n_features) Design matrix. y : ndarray, shape (n_samples,) Target / response vector. w : ndarray shape (n_features,) Unmasked, ravelized weights map. grad_weight: float l1_ratio * alpha Returns ------- ndarray, shape (n_features,) Derivative of _squared_loss_and_spatial_grad function. """ data_section = np.dot(X, w) - y image_buffer = np.zeros(mask.shape) image_buffer[mask] = w return (np.dot(X.T, data_section) - grad_weight * _div(_gradient(image_buffer))[mask]) def _graph_net_data_function(X, w, mask, grad_weight): """ Computes dot([X; grad_weight * grad], w). This function is made for the Lasso-like interpretation of the Graph-Net. Parameters ---------- X : ndarray, shape (n_samples, n_features) Design matrix. y : ndarray, shape (n_samples,) Target / response vector. w : ndarray shape (n_features,) Unmasked, ravelized weights map. grad_weight: float l1_ratio * alpha. Returns ------- ndarray, shape (n_features + mask.ndim * n_samples,) Data-fit term augmented with design matrix augmented with nabla operator (for spatial gradient). """ data_buffer = np.zeros(mask.shape) data_buffer[mask] = w w_g = grad_weight * _gradient(data_buffer) out = np.ndarray(X.shape[0] + mask.ndim * X.shape[1]) out[:X.shape[0]] = X.dot(w) out[X.shape[0]:] = np.concatenate( tuple([w_g[i][mask] for i in range(mask.ndim)])) return out def _graph_net_adjoint_data_function(X, w, adjoint_mask, grad_weight): """ Computes the adjoint of the _graph_net_data_function, that is np.dot([X.T; grad_weight * div], w). This function is made for the Lasso-like interpretation of the Graph-Net. Parameters ---------- X : ndarray, shape (n_samples, n_features) Design matrix. y : ndarray, shape (n_samples,) Target / response vector. w : ndarray shape (n_features,) Unmasked, ravelized weights map. grad_weight: float l1_ratio * alpha. Returns ------- ndarray, shape (n_samples,) Value of adjoint. """ n_samples, _ = X.shape out = X.T.dot(w[:n_samples]) div_buffer = np.zeros(adjoint_mask.shape) div_buffer[adjoint_mask] = w[n_samples:] out -= grad_weight * _div(div_buffer)[adjoint_mask[0]] return out def _squared_loss_derivative_lipschitz_constant(X, mask, grad_weight, n_iterations=100): """ Computes the lipschitz constant of the gradient of the smooth part of the Graph-Net regression problem (squared_loss + grad_weight*grad) via power method """ rng = np.random.RandomState(42) a = rng.randn(X.shape[1]) a /= sqrt(np.dot(a, a)) adjoint_mask = np.tile(mask, [mask.ndim] + [1] * mask.ndim) # Since we are putting the coefficient into the matrix, which # is squared in the data loss function, it must be the # square root of the desired weight actual_grad_weight = sqrt(grad_weight) for _ in range(n_iterations): a = _graph_net_adjoint_data_function( X, _graph_net_data_function(X, a, mask, actual_grad_weight), adjoint_mask, actual_grad_weight) a /= sqrt(np.dot(a, a)) lipschitz_constant = np.dot(_graph_net_adjoint_data_function( X, _graph_net_data_function(X, a, mask, actual_grad_weight), adjoint_mask, actual_grad_weight), a) / np.dot(a, a) return lipschitz_constant def _logistic_derivative_lipschitz_constant(X, mask, grad_weight, n_iterations=100): """ Computes the lipschitz constant of the gradient of the smooth part of the Graph-Net classification problem (logistic_loss + grad_weight*grad) via analytical formula on the logistic loss + power method on the smooth part """ # L. constant for the data term (logistic) # data_constant = sp.linalg.norm(X, 2) ** 2 data_constant = _logistic_loss_lipschitz_constant(X) rng = np.random.RandomState(42) a = rng.randn(X.shape[1]) a /= sqrt(np.dot(a, a)) grad_buffer = np.zeros(mask.shape) for _ in range(n_iterations): grad_buffer[mask] = a a = - _div(_gradient(grad_buffer))[mask] / sqrt(np.dot(a, a)) grad_buffer[mask] = a grad_constant = (- np.dot(_div(_gradient(grad_buffer))[mask], a) / np.dot(a, a)) return data_constant + grad_weight * grad_constant def _logistic_data_loss_and_spatial_grad(X, y, w, mask, grad_weight): """Compute the smooth part of the Graph-Net objective, with logistic loss""" grad_buffer = np.zeros(mask.shape) grad_buffer[mask] = w[:-1] grad_mask = np.array([mask for _ in range(mask.ndim)]) grad_section = _gradient(grad_buffer)[grad_mask] return (_logistic_loss(X, y, w) + 0.5 * grad_weight * np.dot(grad_section, grad_section)) def _logistic_data_loss_and_spatial_grad_derivative(X, y, w, mask, grad_weight): """Compute the derivative of _logistic_loss_and_spatial_grad""" image_buffer = np.zeros(mask.shape) image_buffer[mask] = w[:-1] data_section = _logistic_loss_grad(X, y, w) data_section[:-1] = data_section[:-1]\ - grad_weight * _div(_gradient(image_buffer))[mask] return data_section def _graph_net_squared_loss(X, y, alpha, l1_ratio, mask, init=None, max_iter=1000, tol=1e-4, callback=None, lipschitz_constant=None, verbose=0): """Computes a solution for the Graph-Net regression problem. This function invokes the mfista backend (from fista.py) to solve the underlying optimization problem. Returns ------- w : ndarray, shape (n_features,) Solution vector. solver_info : float Solver information, for warm start. objective : array of floats Objective function (fval) computed on every iteration. """ _, n_features = X.shape # misc model_size = n_features l1_weight = alpha * l1_ratio grad_weight = alpha * (1. - l1_ratio) if lipschitz_constant is None: lipschitz_constant = _squared_loss_derivative_lipschitz_constant( X, mask, grad_weight) # it's always a good idea to use somethx a bit bigger lipschitz_constant *= 1.05 # smooth part of energy, and gradient thereof def f1(w): return _squared_loss_and_spatial_grad(X, y, w, mask, grad_weight) def f1_grad(w): return _squared_loss_and_spatial_grad_derivative(X, y, w, mask, grad_weight) # prox of nonsmooth path of energy (account for the intercept) def f2(w): return np.sum(np.abs(w)) * l1_weight def f2_prox(w, l, *args, **kwargs): return _prox_l1(w, l * l1_weight), dict(converged=True) # total energy (smooth + nonsmooth) def total_energy(w): return f1(w) + f2(w) return mfista( f1_grad, f2_prox, total_energy, lipschitz_constant, model_size, dgap_factor=(.1 + l1_ratio) ** 2, callback=callback, tol=tol, max_iter=max_iter, verbose=verbose, init=init) def _graph_net_logistic(X, y, alpha, l1_ratio, mask, init=None, max_iter=1000, tol=1e-4, callback=None, verbose=0, lipschitz_constant=None): """Computes a solution for the Graph-Net classification problem, with response vector in {-1, 1}^n_samples. This function invokes the mfista backend (from fista.py) to solve the underlying optimization problem. Returns ------- w : ndarray of shape (n_features,) The solution vector (Where `n_features` is the size of the support of the mask.) solver_info : dict Solver information for warm starting. See fista.py.mfista(...) function for detailed documentation. objective : array of floats Cost function (fval) computed on every iteration. """ _, n_features = X.shape # misc model_size = n_features + 1 l1_weight = alpha * l1_ratio grad_weight = alpha * (1 - l1_ratio) if lipschitz_constant is None: lipschitz_constant = _logistic_derivative_lipschitz_constant( X, mask, grad_weight) # it's always a good idea to use somethx a bit bigger lipschitz_constant *= 1.1 # smooth part of energy, and gradient of def f1(w): return _logistic_data_loss_and_spatial_grad(X, y, w, mask, grad_weight) def f1_grad(w): return _logistic_data_loss_and_spatial_grad_derivative(X, y, w, mask, grad_weight) # prox of nonsmooth path of energy (account for the intercept) def f2(w): return np.sum(np.abs(w[:-1])) * l1_weight def f2_prox(w, l, *args, **kwargs): return _prox_l1_with_intercept( w, l * l1_weight), dict(converged=True) # total energy (smooth + nonsmooth) def total_energy(w): return f1(w) + f2(w) # finally, run the solver proper return mfista( f1_grad, f2_prox, total_energy, lipschitz_constant, model_size, dgap_factor=(.1 + l1_ratio) ** 2, callback=callback, tol=tol, max_iter=max_iter, verbose=verbose, init=init) def _tvl1_objective_from_gradient(gradient): """Computes TV-l1 objective function from gradient. Parameters ---------- gradient: ndarray, shape (4, nx, ny, nz) precomputed "gradient + id" array Returns ------- float Value of TV-L1 penalty. """ tv_term = np.sum(np.sqrt(np.sum(gradient[:-1] * gradient[:-1], axis=0))) l1_term = np.abs(gradient[-1]).sum() return l1_term + tv_term def _tvl1_objective(X, y, w, alpha, l1_ratio, mask, loss="mse"): """The TV-L1 squared loss regression objective functions. Returns ------- float Value of TV-L1 penalty. """ loss = loss.lower() if loss not in ['mse', 'logistic']: raise ValueError( "loss must be one of 'mse' or 'logistic'; got '%s'" % loss) if loss == "mse": out = _squared_loss(X, y, w) else: out = _logistic_loss(X, y, w) w = w[:-1] grad_id = _gradient_id(_unmask(w, mask), l1_ratio=l1_ratio) out += alpha * _tvl1_objective_from_gradient(grad_id) return out def tvl1_solver(X, y, alpha, l1_ratio, mask, loss=None, max_iter=100, lipschitz_constant=None, init=None, prox_max_iter=5000, tol=1e-4, callback=None, verbose=1): """Minimizes empirical risk for TV-L1 penalized models. Can handle least squares (mean squared error --a.k.a mse) or logistic regression. The same solver works for both of these losses. This function invokes the mfista backend (from fista.py) to solver the underlying optimization problem. Parameters ---------- X : ndarray, shape (n_samples, n_features) Design matrix. y : ndarray, shape (n_samples,) Target / response vector. alpha : float Constant that scales the overall regularization term. Defaults to 1.0. l1_ratio : float in the interval [0, 1]; optinal (default .5) Constant that mixes L1 and TV penalization. l1_ratio == 0 : just smooth. l1_ratio == 1 : just lasso. Defaults to 0.5. mask : ndarray, shape (nx, ny, nz) The support of this mask defines the ROIs being considered in the problem. max_iter : int Defines the iterations for the solver. Defaults to 100 prox_max_iter : int, optional (default 5000) Maximum number of iterations for inner FISTA loop in which the prox of TV is approximated. tol : float Defines the tolerance for convergence. Defaults to 1e-4. loss : string Loss model for regression. Can be "mse" (for squared loss) or "logistic" (for logistic loss). lipschitz_constant : float, optional (default None) Lipschitz constant (i.e an upper bound of) of gradient of smooth part of the energy being minimized. If no value is specified (None), then it will be calculated. callback : callable(dict) -> bool, optional (default None) Function called at the end of every energy descendent iteration of the solver. If it returns True, the loop breaks. Returns ------- w : ndarray, shape (n_features,) The solution vector (Where `w_size` is the size of the support of the mask.) objective : array of floats Objective function (fval) computed on every iteration. solver_info: float Solver information, for warm start. """ # sanitize loss if loss not in ["mse", "logistic"]: raise ValueError("'%s' loss not implemented. Should be 'mse' or " "'logistic" % loss) # shape of image box flat_mask = mask.ravel() volume_shape = mask.shape # in logistic regression, we fit the intercept explicitly w_size = X.shape[1] + int(loss == "logistic") def unmaskvec(w): if loss == "mse": return _unmask(w, mask) else: return np.append(_unmask(w[:-1], mask), w[-1]) def maskvec(w): if loss == "mse": return w[flat_mask] else: return np.append(w[:-1][flat_mask], w[-1]) # function to compute derivative of f1 def f1_grad(w): if loss == "logistic": return _logistic_loss_grad(X, y, w) else: return _squared_loss_grad(X, y, w) # function to compute total energy (i.e smooth (f1) + nonsmooth (f2) parts) def total_energy(w): return _tvl1_objective(X, y, w, alpha, l1_ratio, mask, loss=loss) # Lipschitz constant of f1_grad if lipschitz_constant is None: if loss == "mse": lipschitz_constant = 1.05 * spectral_norm_squared(X) else: lipschitz_constant = 1.1 * _logistic_loss_lipschitz_constant(X) # proximal operator of nonsmooth proximable part of energy (f2) if loss == "mse": def f2_prox(w, stepsize, dgap_tol, init=None): out, info = _prox_tvl1( unmaskvec(w), weight=alpha * stepsize, l1_ratio=l1_ratio, dgap_tol=dgap_tol, init=unmaskvec(init), max_iter=prox_max_iter, verbose=verbose) return maskvec(out.ravel()), info else: def f2_prox(w, stepsize, dgap_tol, init=None): out, info = _prox_tvl1_with_intercept( unmaskvec(w), volume_shape, l1_ratio, alpha * stepsize, dgap_tol, prox_max_iter, init=_unmask( init[:-1], mask) if init is not None else None, verbose=verbose) return maskvec(out.ravel()), info # invoke m-FISTA solver w, obj, init = mfista( f1_grad, f2_prox, total_energy, lipschitz_constant, w_size, dgap_factor=(.1 + l1_ratio) ** 2, tol=tol, init=init, verbose=verbose, max_iter=max_iter, callback=callback) return w, obj, init PKHZnilearn/decoding/__init__.py""" Decoding tools and algorithms. """ from .searchlight import SearchLight from .space_net import SpaceNetClassifier, SpaceNetRegressor __all__ = ['SearchLight', 'SpaceNetClassifier', 'SpaceNetRegressor'] PKH" nilearn/decoding/fista.py""" Generic FISTA for solving TV-L1, Graph-Net, etc., problems. For problems on which the prox of the nonsmooth term cannot be computed closed-form (e.g TV-L1), we approximate the prox using an inner FISTA loop. """ # Author: DOHMATOB Elvis Dopgima, # PIZARRO Gaspar, # VAROQUAUX Gael, # GRAMFORT Alexandre, # THIRION Bertrand # License: simplified BSD from math import sqrt import numpy as np from scipy import linalg from sklearn.utils import check_random_state def _check_lipschitz_continuous(f, ndim, lipschitz_constant, n_trials=10, random_state=42): """Empirically check Lipschitz continuity of a function. If this test is passed, then we are empirically confident in the Lipschitz continuity of the function with respect to the given constant `L`. This confidence increases with the `n_trials` parameter. Parameters ---------- f : callable, The function to be checked for Lipschitz continuity. `f` takes a vector of float as unique argument. The size of the input vector is determined by `ndim`. ndim : int, Dimension of the input of the function to be checked for Lipschitz continuity (i.e. it corresponds to the size of the vector that `f` takes as an argument). lispchitz_constant : float, Constant associated to the Lipschitz continuity. n_trials : int, Number of tests performed when assessing the Lipschitz continuity of function `f`. The more tests, the more confident we are in the Lipschitz continuity of `f` if the test passes. random_state : int, optional (default 42) Random state for initializing local rng. Raises ------ RuntimeError """ rng = check_random_state(random_state) for x in rng.randn(n_trials, ndim): for y in rng.randn(n_trials, ndim): a = linalg.norm(f(x).ravel() - f(y).ravel(), 2) b = lipschitz_constant * linalg.norm(x - y, 2) if a > b: raise RuntimeError("Counter example: (%s, %s)" % (x, y)) def mfista(f1_grad, f2_prox, total_energy, lipschitz_constant, w_size, dgap_tol=None, init=None, max_iter=1000, tol=1e-4, check_lipschitz=False, dgap_factor=None, callback=None, verbose=2): """Generic FISTA solver. Minimizes the a sum `f + g` of two convex functions f (smooth) and g (proximable nonsmooth). Parameters ---------- f1_grad : callable(w) -> np.array Gradient of smooth part of energy f2_prox : callable(w, stepsize, dgap_tol, init?) -> float, dict Proximal operator of non-smooth part of energy (f2). The returned dict should have a key "converged", whose value indicates whether the prox computation converged. total_energy : callable(w) -> float total energy (i.e smooth (f1) + nonsmooth (f2) parts) lipschitz_constant : float Lipschitz constant of gradient of f1_grad. w_size : int Size of the solution. f1, f2, f1_grad, f2_prox (fixed l, tol) must accept a w such that w.shape = (w_size,). tol : float Tolerance on the (primal) cost function. dgap_tol : float If None, the nonsmooth_prox argument returns a float, with the value, if not 0, the nonsmooth_prox accepts a third parameter tol, which is the tolerance on the computation of the proximal operator and returns a float, and a dict with the key "converged", that says if the method to compute f2_prox converged or not. init : dict-like, optional (default None) Dictionary of initialization parameters. Possible keys are 'w', 'stepsize', 'z', 't', 'dgap_factor', etc. callback : callable(dict) -> bool Function called on every iteration. If it returns True, then the loop breaks. max_iter : int Maximum number of iterations for the solver. Returns ------- w : ndarray, shape (w_size,) A minimizer for `f + g`. solver_info : float Solver information, for warm starting. cost : array of floats Cost function (fval) computed on every iteration. Notes ----- A motivation for the choice of FISTA as a solver for the TV-L1 penalized problems emerged in the paper: Elvis Dohmatob, Alexandre Gramfort, Bertrand Thirion, Gael Varoquaux, "Benchmarking solvers for TV-L1 least-squares and logistic regression in brain imaging". Pattern Recoginition in Neuroimaging (PRNI), Jun 2014, Tubingen, Germany. IEEE """ # initialization if init is None: init = dict() w = init.get('w', np.zeros(w_size)) z = init.get("z", w.copy()) t = init.get("t", 1.) stepsize = init.get("stepsize", 1. / lipschitz_constant) if dgap_tol is None: dgap_tol = init.get('dgap_tol', np.inf) if dgap_factor is None: dgap_factor = init.get("dgap_factor", 1.) # check Lipschitz continuity of gradient of smooth part if check_lipschitz: _check_lipschitz_continuous(f1_grad, w_size, lipschitz_constant) # aux variables old_energy = total_energy(w) energy_delta = np.inf best_w = w.copy() best_energy = old_energy best_dgap_tol = dgap_tol ista_step = False best_z = z.copy() best_t = t prox_info = dict(converged=True) stepsize = 1. / lipschitz_constant history = [] w_old = w.copy() # FISTA loop for i in range(max_iter): history.append(old_energy) w_old[:] = w # invoke callback if verbose: print('mFISTA: Iteration % 2i/%2i: E = %7.4e, dE % 4.4e' % ( i + 1, max_iter, old_energy, energy_delta)) if callback and callback(locals()): break if np.abs(energy_delta) < tol: if verbose: print("\tConverged (|dE| < %g)" % tol) break # forward (gradient) step gradient_buffer = f1_grad(z) # backward (prox) step for _ in range(10): w, prox_info = f2_prox(z - stepsize * gradient_buffer, stepsize, dgap_factor * dgap_tol, init=w) energy = total_energy(w) if ista_step and prox_info['converged'] and old_energy <= energy: # Even when doing ISTA steps we are not decreasing. # Thus we need a tighter dual_gap on the prox_tv # This corresponds to a line search on the dual_gap # tolerance. dgap_factor *= .2 if verbose: print("decreased dgap_tol") else: break # energy house-keeping energy_delta = old_energy - energy old_energy = energy # z update if energy_delta < 0.: # M-FISTA strategy: rewind and switch temporarily to an ISTA step z[:] = w_old w[:] = w_old ista_step = True if verbose: print('Monotonous FISTA: Switching to ISTA') else: if ista_step: z = w else: t0 = t t = 0.5 * (1. + sqrt(1. + 4. * t * t)) z = w + ((t0 - 1.) / t) * (w - w_old) ista_step = False # misc if energy_delta != 0.: # We need to decrease the tolerance on the dual_gap as 1/i**4 # (see Mark Schmidt, Nicolas le Roux and Francis Bach, NIPS # 2011), thus we need to count how many times we are called, # hence the callable class. In practice, empirically I (Gael) # have found that such a sharp decrease was counter # productive in terms of computation time, as it leads to too # much time spent in the prox_tvl1 calls. # # For this reason, we rely more on the linesearch-like # strategy to set the dgap_tol dgap_tol = abs(energy_delta) / (i + 1.) # dgap_tol house-keeping if energy < best_energy: best_energy = energy best_w[:] = w best_z[:] = z best_t = t best_dgap_tol = dgap_tol init = dict(w=best_w.copy(), z=best_z, t=best_t, dgap_tol=best_dgap_tol, stepsize=stepsize) return best_w, history, init PKHF=XX'nilearn/decoding/objective_functions.py """ Common functions and base classes. """ # Author: DOHMATOB Elvis Dopgima, # PIZARRO Gaspar, # VAROQUAUX Gael, # GRAMFORT Alexandre, # PEDREGOSA Fabian # License: simplified BSD from functools import partial import numpy as np from scipy import linalg def spectral_norm_squared(X): """Computes square of the operator 2-norm (spectral norm) of X This corresponds to the Lipschitz constant of the gradient of the squared-loss function: w -> .5 * ||y - Xw||^2 Parameters ---------- X : ndarray, shape (n_samples, n_features) Design matrix. Returns ------- lipschitz_constant : float The square of the spectral norm of X. """ # On big matrices like those that we have in neuroimaging, svdvals # is faster than a power iteration (even when using arpack's) return linalg.svdvals(X)[0] ** 2 def _logistic_loss_lipschitz_constant(X): """Compute the Lipschitz constant (upper bound) for the gradient of the logistic sum: w -> \sum_i log(1+exp(-y_i*(x_i*w + v))) """ # N.B: we handle intercept! X = np.hstack((X, np.ones((X.shape[0], 1)))) return spectral_norm_squared(X) def _squared_loss(X, y, w, compute_energy=True, compute_grad=False): """Compute the MSE error, and optionally, its gradient too. The cost / energy function is MSE = .5 * ||y - Xw||^2 A (1 / n_samples) factor is applied to the MSE. Parameters ---------- X : ndarray, shape (n_samples, n_features) Design matrix. y : ndarray, shape (n_samples,) Target / response vector. w : ndarray shape (n_features,) Unmasked, ravelized weights map. compute_energy : bool, optional (default True) If set then energy is computed, otherwise only gradient is computed. compute_grad : bool, optional (default False) If set then gradient is computed, otherwise only energy is computed. Returns ------- energy : float Energy (returned if `compute_energy` is set). gradient : ndarray, shape (n_features,) Gradient of energy (returned if `compute_grad` is set). """ if not (compute_energy or compute_grad): raise RuntimeError( "At least one of compute_energy or compute_grad must be True.") residual = np.dot(X, w) - y # compute energy if compute_energy: energy = .5 * np.dot(residual, residual) if not compute_grad: return energy grad = np.dot(X.T, residual) if not compute_energy: return grad return energy, grad def _tv_l1_from_gradient(spatial_grad): """Energy contribution due to penalized gradient, in TV-L1 model. Parameters ---------- spatial_grad : ndarray, shape (4, nx, ny, nx) precomputed "gradient + id" array Returns ------- out : float Energy contribution due to penalized gradient. """ tv_term = np.sum(np.sqrt(np.sum(spatial_grad[:-1] * spatial_grad[:-1], axis=0))) l1_term = np.abs(spatial_grad[-1]).sum() return l1_term + tv_term def _div_id(grad, l1_ratio=.5): """Compute divergence + id of image gradient + id Parameters ---------- grad : ndarray, shape (4, nx, ny, nz, ...) where `img_shape` is the shape of the brain bounding box, and n_axes = len(img_shape). l1_ratio : float in the interval [0, 1]; optional (default .5) Constant that mixes L1 and spatial prior terms in the penalization. Returns ------- res : ndarray, shape (nx, ny, nz, ...) The computed divergence + id operator. Raises ------ RuntimeError """ if not (0. <= l1_ratio <= 1.): raise RuntimeError( "l1_ratio must be in the interval [0, 1]; got %s" % l1_ratio) res = np.zeros(grad.shape[1:]) # the divergence part for d in range((grad.shape[0] - 1)): this_grad = np.rollaxis(grad[d], d) this_res = np.rollaxis(res, d) this_res[:-1] += this_grad[:-1] this_res[1:-1] -= this_grad[:-2] if len(this_grad) > 1: this_res[-1] -= this_grad[-2] res *= (1. - l1_ratio) # the identity part res -= l1_ratio * grad[-1] return res def _gradient_id(img, l1_ratio=.5): """Compute gradient + id of an image Parameters ---------- img : ndarray, shape (nx, ny, nz, ...) N-dimensional image l1_ratio : float in the interval [0, 1]; optional (default .5) Constant that mixes L1 and spatial prior terms in the penalization. Returns ------- gradient : ndarray, shape (4, nx, ny, nz, ...). Spatial gradient of the image: the i-th component along the first axis is the gradient along the i-th axis of the original array img. Raises ------ RuntimeError """ if not (0. <= l1_ratio <= 1.): raise RuntimeError( "l1_ratio must be in the interval [0, 1]; got %s" % l1_ratio) shape = [img.ndim + 1] + list(img.shape) gradient = np.zeros(shape, dtype=np.float) # the gradient part: 'Clever' code to have a view of the gradient # with dimension i stop at -1 slice_all = [0, slice(None, -1)] for d in range(img.ndim): gradient[slice_all] = np.diff(img, axis=d) slice_all[0] = d + 1 slice_all.insert(1, slice(None)) gradient[:-1] *= (1. - l1_ratio) # the identity part gradient[-1] = l1_ratio * img return gradient def _unmask(w, mask): """Unmask an image into whole brain, with off-mask voxels set to 0. Parameters ---------- w : ndarray, shape (n_features,) The image to be unmasked. mask : ndarray, shape (nx, ny, nz) The mask used in the unmasking operation. It is required that mask.sum() == n_features. Returns ------- out : 3d of same shape as `mask`. The unmasked version of `w` """ if mask.sum() != len(w): raise ValueError("Expecting mask.sum() == len(w).") out = np.zeros(mask.shape, dtype=w.dtype) out[mask] = w return out def _sigmoid(t, copy=True): """Helper function: return 1. / (1 + np.exp(-t))""" if copy: t = np.copy(t) t *= -1. t = np.exp(t, t) t += 1. t = np.reciprocal(t, t) return t def _logistic(X, y, w): """Compute the logistic function of the data: sum(sigmoid(yXw)) Parameters ---------- X : ndarray, shape (n_samples, n_features) Design matrix. y : ndarray, shape (n_samples,) Target / response vector. Each entry must be +1 or -1. w : ndarray, shape (n_features,) Unmasked, ravelized input map. Returns ------- energy : float Energy contribution due to logistic data-fit term. """ z = np.dot(X, w[:-1]) + w[-1] yz = y * z idx = yz > 0 out = np.empty_like(yz) out[idx] = np.log1p(np.exp(-yz[idx])) out[~idx] = -yz[~idx] + np.log1p(np.exp(yz[~idx])) out = out.sum() return out def _logistic_loss_grad(X, y, w): """Computes the derivative of logistic""" z = np.dot(X, w[:-1]) + w[-1] yz = y * z z = _sigmoid(yz, copy=False) z0 = (z - 1.) * y grad = np.empty(w.shape) grad[:-1] = np.dot(X.T, z0) grad[-1] = np.sum(z0) return grad # gradient of squared loss function _squared_loss_grad = partial(_squared_loss, compute_energy=False, compute_grad=True) def _gradient(w): """Pure spatial gradient""" return _gradient_id(w, l1_ratio=0.)[:-1] # pure nabla def _div(v): """Pure spatial divergence""" return _div_id(np.vstack((v, [np.zeros_like(v[0])])), l1_ratio=0.) PKHH*H*&nilearn/decoding/proximal_operators.py"""Implementation of multiple proximal operators for TV-L1, Graph-Net, etc. """ # Author: DOHMATOB Elvis Dopgima, # VAROQUAUX Gael, # GRAMFORT Alexandre, # License: simplified BSD from math import sqrt import numpy as np from .objective_functions import _tv_l1_from_gradient, _div_id, _gradient_id def _prox_l1(y, alpha, copy=True): """proximity operator for L1 norm""" shrink = np.zeros(y.shape) if copy: y = y.copy() y_nz = y.nonzero() shrink[y_nz] = np.maximum(1 - alpha / np.abs(y[y_nz]), 0) y *= shrink return y def _prox_l1_with_intercept(x, tau): """The same as prox_l1, but just for the n-1 components""" x[:-1] = _prox_l1(x[:-1], tau) return x def _projector_on_tvl1_dual(grad, l1_ratio): """Function to compute TV-l1 duality gap. Modifies IN PLACE the gradient + id to project it on the l21 unit ball in the gradient direction and the L1 ball in the identity direction. """ # The l21 ball for the gradient direction if l1_ratio < 1.: # infer number of axes and include an additional axis if l1_ratio > 0 end = len(grad) - int(l1_ratio > 0.) norm = np.sqrt(np.sum(grad[:end] * grad[:end], 0)) norm.clip(1., out=norm) # set everythx < 1 to 1 for grad_comp in grad[:end]: grad_comp /= norm # The L1 ball for the identity direction if l1_ratio > 0.: norm = np.abs(grad[-1]) norm.clip(1., out=norm) grad[-1] /= norm return grad def _dual_gap_prox_tvl1(input_img_norm, new, gap, weight, l1_ratio=1.): """ Dual gap of total variation denoising see "Total variation regularization for fMRI-based prediction of behavior", by Michel et al. (2011) for a derivation of the dual gap """ tv_new = _tv_l1_from_gradient(_gradient_id(new, l1_ratio=l1_ratio)) gap = gap.ravel() d_gap = np.dot(gap, gap) + 2 * weight * tv_new - input_img_norm + ( new * new).sum() return 0.5 * d_gap def _objective_function_prox_tvl1( input_img, output_img, gradient, weight): diff = (input_img - output_img).ravel() return (.5 * (diff * diff).sum() + weight * _tv_l1_from_gradient(gradient)) def _prox_tvl1(input_img, l1_ratio=.05, weight=50, dgap_tol=5.e-5, x_tol=None, max_iter=200, check_gap_frequency=4, val_min=None, val_max=None, verbose=False, fista=True, init=None): """ Compute the TV-L1 proximal (ie total-variation +l1 denoising) on 3d images. Find the argmin `res` of 1/2 * ||im - res||^2 + weight * TVl1(res), Parameters ---------- input_img : ndarray of floats (2-d or 3-d) Input data to be denoised. `im` can be of any numeric type, but it is cast into an ndarray of floats for the computation of the denoised image. weight : float, optional Denoising weight. The greater ``weight``, the more denoising (at the expense of fidelity to ``input``) dgap_tol : float, optional Precision required. The distance to the exact solution is computed by the dual gap of the optimization problem and rescaled by the squared l2 norm of the image (for contrast invariance). x_tol : float or None, optional The maximal relative difference between input and output. If specified, this specifies a stopping criterion on x, rather than the dual gap. max_iter : int, optional Maximal number of iterations used for the optimization. val_min : None or float, optional An optional lower bound constraint on the reconstructed image. val_max : None or float, optional An optional upper bound constraint on the reconstructed image. verbose : bool, optional If True, print the dual gap of the optimization fista : bool, optional If True, uses a FISTA loop to perform the optimization. if False, uses an ISTA loop. callback : callable Callable that takes the local variables at each steps. Useful for tracking. init : array of shape shape as im Starting point for the optimization. check_gap_frequency : int, optional (default 4) Frequency at which duality gap is checked for convergence. Returns ------- out : ndarray TV-l1-denoised image. Notes ----- The principle of total variation denoising is explained in http://en.wikipedia.org/wiki/Total_variation_denoising The principle of total variation denoising is to minimize the total variation of the image, which can be roughly described as the integral of the norm of the image gradient. Total variation denoising tends to produce "cartoon-like" images, that is, piecewise-constant images. This function implements the FISTA (Fast Iterative Shrinkage Thresholding Algorithm) algorithm of Beck et Teboulle, adapted to total variation denoising in "Fast gradient-based algorithms for constrained total variation image denoising and deblurring problems" (2009). For details on implementing the bound constraints, read the aforementioned Beck and Teboulle paper. """ weight = float(weight) input_img_flat = input_img.view() input_img_flat.shape = input_img.size input_img_norm = np.dot(input_img_flat, input_img_flat) if not input_img.dtype.kind == 'f': input_img = input_img.astype(np.float) shape = [len(input_img.shape) + 1] + list(input_img.shape) grad_im = np.zeros(shape) grad_aux = np.zeros(shape) t = 1. i = 0 lipschitz_constant = 1.1 * (4 * input_img.ndim * (1 - l1_ratio) ** 2 + l1_ratio ** 2) # negated_output is the negated primal variable in the optimization # loop if init is None: negated_output = -input_img else: negated_output = -init # Clipping values for the inner loop negated_val_min = np.inf negated_val_max = -np.inf if val_min is not None: negated_val_min = -val_min if val_max is not None: negated_val_max = -val_max if True or (val_min is not None or val_max is not None): # With bound constraints, the stopping criterion is on the # evolution of the output negated_output_old = negated_output.copy() grad_tmp = None old_dgap = np.inf dgap = np.inf # A boolean to control if we are going to do a fista step fista_step = fista while i < max_iter: grad_tmp = _gradient_id(negated_output, l1_ratio=l1_ratio) grad_tmp *= 1. / (lipschitz_constant * weight) grad_aux += grad_tmp grad_tmp = _projector_on_tvl1_dual( grad_aux, l1_ratio ) # Careful, in the next few lines, grad_tmp and grad_aux are a # view on the same array, as _projector_on_tvl1_dual returns a view # on the input array t_new = 0.5 * (1. + sqrt(1. + 4. * t * t)) t_factor = (t - 1.) / t_new if fista_step: grad_aux = (1 + t_factor) * grad_tmp - t_factor * grad_im else: grad_aux = grad_tmp grad_im = grad_tmp t = t_new gap = weight * _div_id(grad_aux, l1_ratio=l1_ratio) # Compute the primal variable negated_output = gap - input_img if (val_min is not None or val_max is not None): negated_output = negated_output.clip(negated_val_max, negated_val_min, out=negated_output) if (i % check_gap_frequency) == 0: if x_tol is None: # Stopping criterion based on the dual gap if val_min is not None or val_max is not None: # We need to recompute the dual variable gap = negated_output + input_img old_dgap = dgap dgap = _dual_gap_prox_tvl1(input_img_norm, -negated_output, gap, weight, l1_ratio=l1_ratio) if verbose: print('\tProxTVl1: Iteration % 2i, dual gap: % 6.3e' % ( i, dgap)) if dgap < dgap_tol: break if old_dgap < dgap: # M-FISTA strategy: switch to an ISTA to have # monotone convergence fista_step = False elif fista: fista_step = True else: # Stopping criterion based on x_tol diff = np.max(np.abs(negated_output_old - negated_output)) diff /= np.max(np.abs(negated_output)) if verbose: gid = _gradient_id(negated_output, l1_ratio=l1_ratio) energy = _objective_function_prox_tvl1(input_img, -negated_output, gid, weight) print('\tProxTVl1 iteration % 2i, relative difference:' ' % 6.3e, energy: % 6.3e' % (i, diff, energy)) if diff < x_tol: break negated_output_old = negated_output i += 1 # Compute the primal variable, however, here we must use the ista # value, not the fista one output = input_img - weight * _div_id(grad_im, l1_ratio=l1_ratio) if (val_min is not None or val_max is not None): output = output.clip(val_min, val_max, out=output) return output, dict(converged=(i < max_iter)) def _prox_tvl1_with_intercept(w, shape, l1_ratio, weight, dgap_tol, max_iter=5000, init=None, verbose=False): """ Computation of TV-L1 prox, taking into account the intercept. Parameters ---------- weight : float Weight in prox. This would be something like `alpha_ * stepsize`, where `alpha_` is the effective (i.e. re-scaled) alpha. w : ndarray, shape (w_size,) The point at which the prox is being computed init : ndarray, shape (w_size - 1,), optional (default None) Initialization vector for the prox. max_iter : int Maximum number of iterations for the solver. verbose : int, optional (default 0) Verbosity level. dgap_tol : float Dual-gap tolerance for TV-L1 prox operator approximation loop. """ init = init.reshape(shape) if not init is None else init out, prox_info = _prox_tvl1( w[:-1].reshape(shape), weight=weight, l1_ratio=l1_ratio, dgap_tol=dgap_tol, init=init, max_iter=max_iter, verbose=verbose) return np.append(out, w[-1]), prox_info PKDoH`[V**nilearn/decoding/searchlight.py""" The searchlight is a widely used approach for the study of the fine-grained patterns of information in fMRI analysis, in which multivariate statistical relationships are iteratively tested in the neighborhood of each location of a domain. """ # Authors : Vincent Michel (vm.michel@gmail.com) # Alexandre Gramfort (alexandre.gramfort@inria.fr) # Philippe Gervais (philippe.gervais@inria.fr) # # License: simplified BSD import time import sys import warnings from distutils.version import LooseVersion import numpy as np import sklearn from sklearn.externals.joblib import Parallel, delayed, cpu_count from sklearn import svm from sklearn.cross_validation import cross_val_score from sklearn.base import BaseEstimator from .. import masking from ..image.resampling import coord_transform from ..input_data.nifti_spheres_masker import _apply_mask_and_get_affinity from .._utils.compat import _basestring ESTIMATOR_CATALOG = dict(svc=svm.LinearSVC, svr=svm.SVR) def search_light(X, y, estimator, A, scoring=None, cv=None, n_jobs=-1, verbose=0): """Function for computing a search_light Parameters ---------- X : array-like of shape at least 2D data to fit. y : array-like target variable to predict. estimator : estimator object implementing 'fit' object to use to fit the data A : scipy sparse matrix. adjacency matrix. Defines for each feature the neigbhoring features following a given structure of the data. scoring : string or callable, optional The scoring strategy to use. See the scikit-learn documentation for possible values. If callable, it taks as arguments the fitted estimator, the test data (X_test) and the test target (y_test) if y is not None. cv : cross-validation generator, optional A cross-validation generator. If None, a 3-fold cross validation is used or 3-fold stratified cross-validation when y is supplied. n_jobs : int, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. verbose : int, optional The verbosity level. Defaut is 0 Returns ------- scores : array-like of shape (number of rows in A) search_light scores """ group_iter = GroupIterator(A.shape[0], n_jobs) scores = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_group_iter_search_light)( A.rows[list_i], estimator, X, y, scoring, cv, thread_id + 1, A.shape[0], verbose) for thread_id, list_i in enumerate(group_iter)) return np.concatenate(scores) class GroupIterator(object): """Group iterator Provides group of features for search_light loop that may be used with Parallel. Parameters ---------- n_features : int Total number of features n_jobs : int, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. Defaut is 1 """ def __init__(self, n_features, n_jobs=1): self.n_features = n_features if n_jobs == -1: n_jobs = cpu_count() self.n_jobs = n_jobs def __iter__(self): split = np.array_split(np.arange(self.n_features), self.n_jobs) for list_i in split: yield list_i def _group_iter_search_light(list_rows, estimator, X, y, scoring, cv, thread_id, total, verbose=0): """Function for grouped iterations of search_light Parameters ----------- list_rows : array of arrays of int adjacency rows. For a voxel with index i in X, list_rows[i] is the list of neighboring voxels indices (in X). estimator : estimator object implementing 'fit' object to use to fit the data X : array-like of shape at least 2D data to fit. y : array-like target variable to predict. scoring : string or callable, optional Scoring strategy to use. See the scikit-learn documentation. If callable, takes as arguments the fitted estimator, the test data (X_test) and the test target (y_test) if y is not None. cv : cross-validation generator, optional A cross-validation generator. If None, a 3-fold cross validation is used or 3-fold stratified cross-validation when y is supplied. thread_id : int process id, used for display. total : int Total number of voxels, used for display verbose : int, optional The verbosity level. Defaut is 0 Returns ------- par_scores : numpy.ndarray score for each voxel. dtype: float64. """ par_scores = np.zeros(len(list_rows)) t0 = time.time() for i, row in enumerate(list_rows): kwargs = dict() if not LooseVersion(sklearn.__version__) < LooseVersion('0.15'): kwargs['scoring'] = scoring elif scoring is not None: warnings.warn('Scikit-learn version is too old. ' 'scoring argument ignored', stacklevel=2) par_scores[i] = np.mean(cross_val_score(estimator, X[:, row], y, cv=cv, n_jobs=1, **kwargs)) if verbose > 0: # One can't print less than each 10 iterations step = 11 - min(verbose, 10) if (i % step == 0): # If there is only one job, progress information is fixed if total == len(list_rows): crlf = "\r" else: crlf = "\n" percent = float(i) / len(list_rows) percent = round(percent * 100, 2) dt = time.time() - t0 # We use a max to avoid a division by zero remaining = (100. - percent) / max(0.01, percent) * dt sys.stderr.write( "Job #%d, processed %d/%d voxels " "(%0.2f%%, %i seconds remaining)%s" % (thread_id, i, len(list_rows), percent, remaining, crlf)) return par_scores ############################################################################## # Class for search_light ##################################################### ############################################################################## class SearchLight(BaseEstimator): """Implement search_light analysis using an arbitrary type of classifier. Parameters ----------- mask_img : Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. boolean image giving location of voxels containing usable signals. process_mask_img : Niimg-like object, optional See http://nilearn.github.io/manipulating_images/input_output.html. boolean image giving voxels on which searchlight should be computed. radius : float, optional radius of the searchlight ball, in millimeters. Defaults to 2. estimator : 'svr', 'svc', or an estimator object implementing 'fit' The object to use to fit the data n_jobs : int, optional. Default is -1. The number of CPUs to use to do the computation. -1 means 'all CPUs'. scoring : string or callable, optional The scoring strategy to use. See the scikit-learn documentation If callable, takes as arguments the fitted estimator, the test data (X_test) and the test target (y_test) if y is not None. cv : cross-validation generator, optional A cross-validation generator. If None, a 3-fold cross validation is used or 3-fold stratified cross-validation when y is supplied. verbose : int, optional Verbosity level. Defaut is False Notes ------ The searchlight [Kriegeskorte 06] is a widely used approach for the study of the fine-grained patterns of information in fMRI analysis. Its principle is relatively simple: a small group of neighboring features is extracted from the data, and the prediction function is instantiated on these features only. The resulting prediction accuracy is thus associated with all the features within the group, or only with the feature on the center. This yields a map of local fine-grained information, that can be used for assessing hypothesis on the local spatial layout of the neural code under investigation. Nikolaus Kriegeskorte, Rainer Goebel & Peter Bandettini. Information-based functional brain mapping. Proceedings of the National Academy of Sciences of the United States of America, vol. 103, no. 10, pages 3863-3868, March 2006 """ def __init__(self, mask_img, process_mask_img=None, radius=2., estimator='svc', n_jobs=1, scoring=None, cv=None, verbose=0): self.mask_img = mask_img self.process_mask_img = process_mask_img self.radius = radius self.estimator = estimator self.n_jobs = n_jobs self.scoring = scoring self.cv = cv self.verbose = verbose def fit(self, imgs, y): """Fit the searchlight Parameters ---------- imgs : Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. 4D image. y : 1D array-like Target variable to predict. Must have exactly as many elements as 3D images in img. """ # Get the seeds process_mask_img = self.process_mask_img if self.process_mask_img is None: process_mask_img = self.mask_img # Compute world coordinates of the seeds process_mask, process_mask_affine = masking._load_mask_img( process_mask_img) process_mask_coords = np.where(process_mask != 0) process_mask_coords = coord_transform( process_mask_coords[0], process_mask_coords[1], process_mask_coords[2], process_mask_affine) process_mask_coords = np.asarray(process_mask_coords).T X, A = _apply_mask_and_get_affinity( process_mask_coords, imgs, self.radius, True, mask_img=self.process_mask_img) estimator = self.estimator if isinstance(estimator, _basestring): estimator = ESTIMATOR_CATALOG[estimator]() scores = search_light(X, y, estimator, A, self.scoring, self.cv, self.n_jobs, self.verbose) scores_3D = np.zeros(process_mask.shape) scores_3D[process_mask] = scores self.scores_ = scores_3D return self PKQoHߩ#nilearn/decoding/space_net.py""" sklearn-compatible implementation of spatially structured learners ( TV-L1, Graph-Net, etc.) """ # Author: DOHMATOB Elvis Dopgima, # PIZARRO Gaspar, # VAROQUAUX Gael, # GRAMFORT Alexandre, # EICKENBERG Michael, # THIRION Bertrand # License: simplified BSD import warnings import numbers import time import sys from functools import partial import numpy as np from scipy import stats, ndimage from sklearn.base import RegressorMixin, clone from sklearn.utils.extmath import safe_sparse_dot from sklearn.linear_model.base import LinearModel, center_data from sklearn.feature_selection import (SelectPercentile, f_regression, f_classif) from sklearn.externals.joblib import Memory, Parallel, delayed from sklearn.cross_validation import check_cv from sklearn.preprocessing import LabelBinarizer from sklearn.metrics import accuracy_score from .._utils.compat import _basestring from .._utils.fixes import atleast2d_or_csr from .._utils.cache_mixin import CacheMixin from ..input_data import NiftiMasker from .objective_functions import _unmask from .space_net_solvers import (tvl1_solver, _graph_net_logistic, _graph_net_squared_loss) # Volume of a standard (MNI152) brain mask in mm^3 MNI152_BRAIN_VOLUME = 1827243. def _get_mask_volume(mask_img): """Computes the volume of a brain mask in mm^3 Parameters ---------- mask_img : nibabel image object Input image whose voxel dimensions are to be computed. Returns ------- vol : float The computed volume. """ vox_dims = mask_img.get_header().get_zooms()[:3] return 1. * np.prod(vox_dims) * mask_img.get_data().astype(np.bool).sum() def _adjust_screening_percentile(screening_percentile, mask_img, verbose=0): original_screening_percentile = screening_percentile # correct screening_percentile according to the volume of the data mask mask_volume = _get_mask_volume(mask_img) if mask_volume > MNI152_BRAIN_VOLUME: warnings.warn( "Brain mask is bigger than the volume of a standard " "human brain. SpaceNet is probably not tuned to " "be used on such data.", stacklevel=2) elif mask_volume < .005 * MNI152_BRAIN_VOLUME: warnings.warn( "Brain mask is smaller than .5% of the volume " "human brain. SpaceNet is probably not tuned to" "be used on such data.", stacklevel=2) if screening_percentile < 100: screening_percentile = screening_percentile * ( MNI152_BRAIN_VOLUME / mask_volume) screening_percentile = min(screening_percentile, 100) # if screening_percentile is 100, we don't do anything if verbose > 1: print("Mask volume = %gmm^3 = %gcm^3" % ( mask_volume, mask_volume / 1.e3)) print("Standard brain volume = %gmm^3 = %gcm^3" % ( MNI152_BRAIN_VOLUME, MNI152_BRAIN_VOLUME / 1.e3)) print("Original screening-percentile: %g" % ( original_screening_percentile)) print("Volume-corrected screening-percentile: %g" % ( screening_percentile)) return screening_percentile def _crop_mask(mask): """Crops input mask to produce tighter (i.e smaller) bounding box with the same support (active voxels)""" idx = np.where(mask) if idx[0].size == 0: raise ValueError("Empty mask: if you have given a mask, it is " "empty, and if you have not given a mask, the " "mask-extraction routines have failed. Please " "provide an appropriate mask.") i_min = max(idx[0].min() - 1, 0) i_max = idx[0].max() j_min = max(idx[1].min() - 1, 0) j_max = idx[1].max() k_min = max(idx[2].min() - 1, 0) k_max = idx[2].max() return mask[i_min:i_max + 1, j_min:j_max + 1, k_min:k_max + 1] def _univariate_feature_screening( X, y, mask, is_classif, screening_percentile, smoothing_fwhm=2.): """ Selects the most import features, via a univariate test Parameters ---------- X : ndarray, shape (n_samples, n_features) Design matrix. y : ndarray, shape (n_samples,) Response Vector. mask: ndarray or booleans, shape (nx, ny, nz) Mask defining brain Rois. is_classif: bool Flag telling whether the learning task is classification or regression. screening_percentile : float in the closed interval [0., 100.] Only the `screening_percentile * 100" percent most import voxels will be retained. smoothing_fwhm : float, optional (default 2.) FWHM for isotropically smoothing the data X before F-testing. A value of zero means "don't smooth". Returns ------- X_: ndarray, shape (n_samples, n_features_) Reduced design matrix with only columns corresponding to the voxels retained after screening. mask_ : ndarray of booleans, shape (nx, ny, nz) Mask with support reduced to only contain voxels retained after screening. support : ndarray of ints, shape (n_features_,) Support of the screened mask, as a subset of the support of the original mask. """ # smooth the data (with isotropic Gaussian kernel) before screening if smoothing_fwhm > 0.: sX = np.empty(X.shape) for sample in range(sX.shape[0]): sX[sample] = ndimage.gaussian_filter( _unmask(X[sample].copy(), # avoid modifying X mask), (smoothing_fwhm, smoothing_fwhm, smoothing_fwhm))[mask] else: sX = X # do feature screening proper selector = SelectPercentile(f_classif if is_classif else f_regression, percentile=screening_percentile).fit(sX, y) support = selector.get_support() # erode and then dilate mask, thus obtaining a "cleaner" version of # the mask on which a spatial prior actually makes sense mask_ = mask.copy() mask_[mask] = (support > 0) mask_ = ndimage.binary_dilation(ndimage.binary_erosion( mask_)).astype(np.bool) mask_[np.logical_not(mask)] = 0 support = mask_[mask] X = X[:, support] return X, mask_, support def _space_net_alpha_grid(X, y, eps=1e-3, n_alphas=10, l1_ratio=1., logistic=False): """Compute the grid of alpha values for TV-L1 and Graph-Net. Parameters ---------- X : ndarray, shape (n_samples, n_features) Training data (design matrix). y : ndarray, shape (n_samples,) Target / response vector. l1_ratio : float The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For ``l1_ratio = 0`` the penalty is purely a spatial prior (Graph-Net, TV, etc.). ``For l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and a spatial prior. eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3``. n_alphas : int, optional Number of alphas along the regularization path. logistic : bool, optional (default False) Indicates where the underlying loss function is logistic. """ if logistic: # Computes the theoretical upper bound for the overall # regularization, as derived in "An Interior-Point Method for # Large-Scale l1-Regularized Logistic Regression", by Koh, Kim, # Boyd, in Journal of Machine Learning Research, 8:1519-1555, # July 2007. # url: http://www.stanford.edu/~boyd/papers/pdf/l1_logistic_reg.pdf m = float(y.size) m_plus = float(y[y == 1].size) m_minus = float(y[y == -1].size) b = np.zeros_like(y) b[y == 1] = m_minus / m b[y == -1] = - m_plus / m alpha_max = np.max(np.abs(X.T.dot(b))) # tt may happen that b is in the kernel of X.T! if alpha_max == 0.: alpha_max = np.abs(np.dot(X.T, y)).max() else: alpha_max = np.abs(np.dot(X.T, y)).max() # prevent alpha_max from exploding when l1_ratio = 0 if l1_ratio == 0.: l1_ratio = 1e-3 alpha_max /= l1_ratio if n_alphas == 1: return np.array([alpha_max]) alpha_min = alpha_max * eps return np.logspace(np.log10(alpha_min), np.log10(alpha_max), num=n_alphas)[::-1] class _EarlyStoppingCallback(object): """Out-of-bag early stopping A callable that returns True when the test error starts rising. We use a Spearman correlation (between X_test.w and y_test) for scoring. """ def __init__(self, X_test, y_test, is_classif, debias=False, verbose=0): self.X_test = X_test self.y_test = y_test self.is_classif = is_classif self.debias = debias self.verbose = verbose self.tol = -1e-4 if self.is_classif else -1e-2 self.test_scores = [] self.counter = 0. def __call__(self, variables): """The callback proper """ # misc if not isinstance(variables, dict): variables = dict(w=variables) self.counter += 1 w = variables['w'] # use Spearman score as stopping criterion score = self.test_score(w)[0] self.test_scores.append(score) if not (self.counter > 20 and (self.counter % 10) == 2): return # check whether score increased on average over last 5 iterations if len(self.test_scores) > 4: if np.mean(np.diff(self.test_scores[-5:][::-1])) >= self.tol: if self.verbose: if self.verbose > 1: print('Early stopping. Test score: %.8f %s' % ( score, 40 * '-')) else: sys.stderr.write('.') return True if self.verbose > 1: print('Test score: %.8f' % score) return False def _debias(self, w): """"Debias w by rescaling the coefficients by a fixed factor. Precisely, the scaling factor is: / ||y_test||^2. """ y_pred = np.dot(self.X_test, w) scaling = np.dot(y_pred, y_pred) if scaling > 0.: scaling = np.dot(y_pred, self.y_test) / scaling w *= scaling return w def test_score(self, w): """Compute test score for model, given weights map `w`. We use correlations between linear prediction and ground truth (y_test). We return 2 scores for model selection: one is the Spearman correlation, which captures ordering between input and output, but tends to have 'flat' regions. The other is the Pearson correlation, that we can use to disambiguate between regions with equivalent Spearman correlation. """ if self.is_classif: w = w[:-1] if w.ptp() == 0: # constant map, there is nothing return (-np.inf, -np.inf) y_pred = np.dot(self.X_test, w) spearman_score = stats.spearmanr(y_pred, self.y_test)[0] pearson_score = np.corrcoef(y_pred, self.y_test)[1, 0] if self.is_classif: return spearman_score, pearson_score else: return pearson_score, spearman_score def path_scores(solver, X, y, mask, alphas, l1_ratios, train, test, solver_params, is_classif=False, n_alphas=10, eps=1E-3, key=None, debias=False, Xmean=None, screening_percentile=20., verbose=1): """Function to compute scores of different alphas in regression and classification used by CV objects Parameters ---------- X : 2D array of shape (n_samples, n_features) Design matrix, one row per sample point. y : 1D array of length n_samples Response vector; one value per sample. mask : 3D arrays of boolean Mask defining brain regions that we work on. alphas : list of floats List of regularization parameters being considered. train : array or list of integers List of indices for the train samples. test : array or list of integers List of indices for the test samples. l1_ratio : float in the interval [0, 1]; optional (default .5) Constant that mixes L1 and TV (resp. Graph-Net) penalization. l1_ratio == 0: just smooth. l1_ratio == 1: just lasso. eps : float, optional (default 1e-3) Length of the path. For example, ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3``. n_alphas : int, optional (default 10). Generate this number of alphas per regularization path. This parameter is mutually exclusive with the `alphas` parameter. solver : function handle See for example tv.TVl1Classifier documentation. solver_params: dict Dictionary of param-value pairs to be passed to solver. """ if l1_ratios is None: raise ValueError("l1_ratios must be specified!") # misc _, n_features = X.shape verbose = int(verbose if verbose is not None else 0) # Univariate feature screening. Note that if we have only as few as 100 # features in the mask's support, then we should use all of them to # learn the model i.e disable this screening) do_screening = (n_features > 100) and screening_percentile < 100. if do_screening: X, mask, support = _univariate_feature_screening( X, y, mask, is_classif, screening_percentile) # crop the mask to have a tighter bounding box mask = _crop_mask(mask) # get train and test data X_train, y_train = X[train].copy(), y[train].copy() X_test, y_test = X[test].copy(), y[test].copy() # it is essential to center the data in regression X_train, y_train, _, y_train_mean, _ = center_data( X_train, y_train, fit_intercept=True, normalize=False, copy=False) # misc if isinstance(l1_ratios, numbers.Number): l1_ratios = [l1_ratios] l1_ratios = sorted(l1_ratios)[::-1] # from large to small l1_ratios best_score = -np.inf best_secondary_score = -np.inf best_l1_ratio = l1_ratios[0] best_alpha = None best_init = None all_test_scores = [] if len(test) > 0.: # do l1_ratio path for l1_ratio in l1_ratios: this_test_scores = [] # make alpha grid if alphas is None: alphas_ = _space_net_alpha_grid( X_train, y_train, l1_ratio=l1_ratio, eps=eps, n_alphas=n_alphas, logistic=is_classif) else: alphas_ = alphas alphas_ = sorted(alphas_)[::-1] # from large to small l1_ratios # do alpha path if best_alpha is None: best_alpha = alphas_[0] init = None for alpha in alphas_: # setup callback mechanism for early stopping early_stopper = _EarlyStoppingCallback( X_test, y_test, is_classif=is_classif, debias=debias, verbose=verbose) w, _, init = solver( X_train, y_train, alpha, l1_ratio, mask=mask, init=init, callback=early_stopper, verbose=max(verbose - 1, 0.), **solver_params) # We use 2 scores for model selection: the second one is to # disambiguate between regions of equivalent Spearman # correlations score, secondary_score = early_stopper.test_score(w) this_test_scores.append(score) if (np.isfinite(score) and (score > best_score or (score == best_score and secondary_score > best_secondary_score))): best_secondary_score = secondary_score best_score = score best_l1_ratio = l1_ratio best_alpha = alpha best_init = init.copy() all_test_scores.append(this_test_scores) else: if alphas is None: alphas_ = _space_net_alpha_grid( X_train, y_train, l1_ratio=best_l1_ratio, eps=eps, n_alphas=n_alphas, logistic=is_classif) else: alphas_ = alphas best_alpha = alphas_[0] # re-fit best model to high precision (i.e without early stopping, etc.) best_w, _, init = solver(X_train, y_train, best_alpha, best_l1_ratio, mask=mask, init=best_init, verbose=max(verbose - 1, 0), **solver_params) if debias: best_w = _EarlyStoppingCallback( X_test, y_test, is_classif=is_classif, debias=debias, verbose=verbose)._debias(best_w) if len(test) == 0.: all_test_scores.append(np.nan) # unmask univariate screening if do_screening: w_ = np.zeros(len(support)) if is_classif: w_ = np.append(w_, best_w[-1]) w_[:-1][support] = best_w[:-1] else: w_[support] = best_w best_w = w_ if len(best_w) == n_features: if Xmean is None: Xmean = np.zeros(n_features) best_w = np.append(best_w, 0.) all_test_scores = np.array(all_test_scores) return (all_test_scores, best_w, best_alpha, best_l1_ratio, alphas_, y_train_mean, key) class BaseSpaceNet(LinearModel, RegressorMixin, CacheMixin): """ Regression and classification learners with sparsity and spatial priors `SpaceNet` implements Graph-Net and TV-L1 priors / penalties. Thus, the penalty is a sum an L1 term and a spatial term. The aim of such a hybrid prior is to obtain weights maps which are structured (due to the spatial prior) and sparse (enforced by L1 norm). Parameters ---------- penalty : string, optional (default 'graph-net') Penalty to used in the model. Can be 'graph-net' or 'tv-l1'. loss : string, optional (default "mse") Loss to be used in the model. Must be an one of "mse", or "logistic". is_classif : bool, optional (default False) Flag telling whether the learning task is classification or regression. l1_ratios : float or list of floats in the interval [0, 1]; optional (default .5) Constant that mixes L1 and spatial prior terms in penalization. l1_ratio == 1 corresponds to pure LASSO. The larger the value of this parameter, the sparser the estimated weights map. If list is provided, then the best value will be selected by cross-validation. alphas : float or list of floats, optional (default None) Choices for the constant that scales the overall regularization term. This parameter is mutually exclusive with the `n_alphas` parameter. If None or list of floats is provided, then the best value will be selected by cross-validation. n_alphas : int, optional (default 10). Generate this number of alphas per regularization path. This parameter is mutually exclusive with the `alphas` parameter. eps : float, optional (default 1e-3) Length of the path. For example, ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3`` mask : filename, niimg, NiftiMasker instance, optional default None) Mask to be used on data. If an instance of masker is passed, then its mask will be used. If no mask is it will be computed automatically by a NiftiMasker. target_affine : 3x3 or 4x4 matrix, optional (default None) This parameter is passed to image.resample_img. An important use-case of this parameter is for downsampling the input data to a coarser resolution (to speed of the model fit). Please see the related documentation for details. target_shape : 3-tuple of integers, optional (default None) This parameter is passed to image.resample_img. Please see the related documentation for details. low_pass : False or float, optional, (default None) This parameter is passed to signal.clean. Please see the related documentation for details. high_pass : False or float, optional (default None) This parameter is passed to signal. Clean. Please see the related documentation for details. t_r : float, optional (default None) This parameter is passed to signal.clean. Please see the related documentation for details. screening_percentile : float in the interval [0, 100]; Optional ( default 20) Percentile value for ANOVA univariate feature selection. A value of 100 means 'keep all features'. This percentile is is expressed w.r.t the volume of a standard (MNI152) brain, and so is corrected at runtime to correspond to the volume of the user-supplied mask (which is typically smaller). If '100' is given, all the features are used, regardless of the number of voxels. standardize : bool, optional (default True): If set, then the data (X, y) are centered to have mean zero along axis 0. This is here because nearly all linear models will want their data to be centered. fit_intercept : bool, optional (default True) Fit or not an intercept. max_iter : int (default 1000) Defines the iterations for the solver. tol : float, optional (default 5e-4) Defines the tolerance for convergence for the backend FISTA solver. verbose : int, optional (default 1) Verbosity level. n_jobs : int, optional (default 1) Number of jobs in solving the sub-problems. memory: instance of joblib.Memory or string Used to cache the masking process. By default, no caching is done. If a string is given, it is the path to the caching directory. memory_level: integer, optional (default 1) Rough estimator of the amount of memory used by caching. Higher value means more memory for caching. cv : int, a cv generator instance, or None (default 8) The input specifying which cross-validation generator to use. It can be an integer, in which case it is the number of folds in a KFold, None, in which case 3 fold is used, or another object, that will then be used as a cv generator. debias : bool, optional (default False) If set, then the estimated weights maps will be debiased. Attributes ---------- `alpha_` : float Best alpha found by cross-validation. `coef_` : ndarray, shape (n_classes-1, n_features) Coefficient of the features in the decision function. `masker_` : instance of NiftiMasker The nifti masker used to mask the data. `mask_img_` : Nifti like image The mask of the data. If no mask was supplied by the user, this attribute is the mask image computed automatically from the data `X`. `intercept_` : narray, shape (nclasses -1,) Intercept (a.k.a. bias) added to the decision function. It is available only when parameter intercept is set to True. `cv_` : list of pairs of lists List of the (n_folds,) folds. For the corresponding fold, each pair is composed of two lists of indices, one for the train samples and one for the test samples. `cv_scores_` : ndarray, shape (n_alphas, n_folds) or (n_l1_ratios, n_alphas, n_folds) Scores (misclassification) for each alpha, and on each fold `screening_percentile_` : float Screening percentile corrected according to volume of mask, relative to the volume of standard brain. """ SUPPORTED_PENALTIES = ["graph-net", "tv-l1"] SUPPORTED_LOSSES = ["mse", "logistic"] def __init__(self, penalty="graph-net", is_classif=False, loss=None, l1_ratios=.5, alphas=None, n_alphas=10, mask=None, target_affine=None, target_shape=None, low_pass=None, high_pass=None, t_r=None, max_iter=1000, tol=5e-4, memory=Memory(None), memory_level=1, standardize=True, verbose=1, n_jobs=1, eps=1e-3, cv=8, fit_intercept=True, screening_percentile=20., debias=False): self.penalty = penalty self.is_classif = is_classif self.loss = loss self.n_alphas = n_alphas self.eps = eps self.l1_ratios = l1_ratios self.alphas = alphas self.mask = mask self.fit_intercept = fit_intercept self.memory = memory self.memory_level = memory_level self.max_iter = max_iter self.tol = tol self.verbose = verbose self.standardize = standardize self.n_jobs = n_jobs self.cv = cv self.screening_percentile = screening_percentile self.debias = debias self.low_pass = low_pass self.high_pass = high_pass self.t_r = t_r self.target_affine = target_affine self.target_shape = target_shape # sanity check on params self.check_params() def check_params(self): """Makes sure parameters are sane""" if self.l1_ratios is not None: l1_ratios = self.l1_ratios if isinstance(l1_ratios, numbers.Number): l1_ratios = [l1_ratios] for l1_ratio in l1_ratios: if not 0 <= l1_ratio <= 1.: raise ValueError( "l1_ratio must be in the interval [0, 1]; got %g" % ( l1_ratio)) elif l1_ratio == 0. or l1_ratio == 1.: warnings.warn( ("Specified l1_ratio = %g. It's advised to only " "specify values of l1_ratio strictly between 0 " "and 1." % l1_ratio)) if not (0. <= self.screening_percentile <= 100.): raise ValueError( ("screening_percentile should be in the interval" " [0, 100], got %g" % self.screening_percentile)) if self.penalty not in self.SUPPORTED_PENALTIES: raise ValueError( "'penalty' parameter must be one of %s%s or %s; got %s" % ( ",".join(self.SUPPORTED_PENALTIES[:-1]), "," if len( self.SUPPORTED_PENALTIES) > 2 else "", self.SUPPORTED_PENALTIES[-1], self.penalty)) if not (self.loss is None or self.loss in self.SUPPORTED_LOSSES): raise ValueError( "'loss' parameter must be one of %s%s or %s; got %s" % ( ",".join(self.SUPPORTED_LOSSES[:-1]), "," if len( self.SUPPORTED_LOSSES) > 2 else "", self.SUPPORTED_LOSSES[-1], self.loss)) if self.loss is not None and not self.is_classif and ( self.loss == "logistic"): raise ValueError( ("'logistic' loss is only available for classification " "problems.")) def _set_coef_and_intercept(self, w): """Sets the loadings vector (coef) and the intercept of the fitted model.""" self.w_ = np.array(w) if self.w_.ndim == 1: self.w_ = self.w_[np.newaxis, :] self.coef_ = self.w_[:, :-1] if self.is_classif: self.intercept_ = self.w_[:, -1] else: self._set_intercept(self.Xmean_, self.ymean_, self.Xstd_) def fit(self, X, y): """Fit the learner Parameters ---------- X : list of Niimg-like objects See http://nilearn.github.io/manipulating_images/input_output.html Data on which model is to be fitted. If this is a list, the affine is considered the same for all. y : array or list of length n_samples The dependent variable (age, sex, QI, etc.). Notes ----- self : `SpaceNet` object Model selection is via cross-validation with bagging. """ # misc self.check_params() if self.memory is None or isinstance(self.memory, _basestring): self.memory_ = Memory(self.memory, verbose=max(0, self.verbose - 1)) else: self.memory_ = self.memory if self.verbose: tic = time.time() # nifti masking if isinstance(self.mask, NiftiMasker): self.masker_ = clone(self.mask) else: self.masker_ = NiftiMasker(mask_img=self.mask, target_affine=self.target_affine, target_shape=self.target_shape, standardize=self.standardize, low_pass=self.low_pass, high_pass=self.high_pass, mask_strategy='epi', t_r=self.t_r, memory=self.memory_) X = self.masker_.fit_transform(X) # misc self.Xmean_ = X.mean(axis=0) self.Xstd_ = X.std(axis=0) self.Xstd_[self.Xstd_ < 1e-8] = 1 self.mask_img_ = self.masker_.mask_img_ self.mask_ = self.mask_img_.get_data().astype(np.bool) n_samples, _ = X.shape y = np.array(y).copy() l1_ratios = self.l1_ratios if isinstance(l1_ratios, numbers.Number): l1_ratios = [l1_ratios] alphas = self.alphas if isinstance(alphas, numbers.Number): alphas = [alphas] if self.loss is not None: loss = self.loss elif self.is_classif: loss = "logistic" else: loss = "mse" # set backend solver if self.penalty.lower() == "graph-net": if not self.is_classif or loss == "mse": solver = _graph_net_squared_loss else: solver = _graph_net_logistic else: if not self.is_classif or loss == "mse": solver = partial(tvl1_solver, loss="mse") else: solver = partial(tvl1_solver, loss="logistic") # generate fold indices case1 = (None in [alphas, l1_ratios]) and self.n_alphas > 1 case2 = (alphas is not None) and min(len(l1_ratios), len(alphas)) > 1 if case1 or case2: self.cv_ = list(check_cv(self.cv, X=X, y=y, classifier=self.is_classif)) else: # no cross-validation needed, user supplied all params self.cv_ = [(np.arange(n_samples), [])] n_folds = len(self.cv_) # number of problems to solve if self.is_classif: y = self._binarize_y(y) else: y = y[:, np.newaxis] if self.is_classif and self.n_classes_ > 2: n_problems = self.n_classes_ else: n_problems = 1 # standardize y self.ymean_ = np.zeros(y.shape[0]) if n_problems == 1: y = y[:, 0] # scores & mean weights map over all folds self.cv_scores_ = [[] for i in range(n_problems)] w = np.zeros((n_problems, X.shape[1] + 1)) self.all_coef_ = np.ndarray((n_problems, n_folds, X.shape[1])) self.screening_percentile_ = _adjust_screening_percentile( self.screening_percentile, self.mask_img_, verbose=self.verbose) # main loop: loop on classes and folds solver_params = dict(tol=self.tol, max_iter=self.max_iter) self.best_model_params_ = [] self.alpha_grids_ = [] for (test_scores, best_w, best_alpha, best_l1_ratio, alphas, y_train_mean, (cls, fold)) in Parallel( n_jobs=self.n_jobs, verbose=2 * self.verbose)( delayed(self._cache(path_scores, func_memory_level=2))( solver, X, y[:, cls] if n_problems > 1 else y, self.mask_, alphas, l1_ratios, self.cv_[fold][0], self.cv_[fold][1], solver_params, n_alphas=self.n_alphas, eps=self.eps, is_classif=self.loss == "logistic", key=(cls, fold), debias=self.debias, verbose=self.verbose, screening_percentile=self.screening_percentile_, ) for cls in range(n_problems) for fold in range(n_folds)): self.best_model_params_.append((best_alpha, best_l1_ratio)) self.alpha_grids_.append(alphas) self.ymean_[cls] += y_train_mean self.all_coef_[cls, fold] = best_w[:-1] if len(np.atleast_1d(l1_ratios)) == 1: test_scores = test_scores[0] self.cv_scores_[cls].append(test_scores) w[cls] += best_w # misc self.cv_scores_ = np.array(self.cv_scores_) self.alpha_grids_ = np.array(self.alpha_grids_) self.ymean_ /= n_folds if not self.is_classif: self.all_coef_ = np.array(self.all_coef_) w = w[0] self.ymean_ = self.ymean_[0] # bagging: average best weights maps over folds w /= n_folds # set coefs and intercepts self._set_coef_and_intercept(w) # unmask weights map as a niimg self.coef_img_ = self.masker_.inverse_transform(self.coef_) # report time elapsed if self.verbose: duration = time.time() - tic print("Time Elapsed: %g seconds, %i minutes." % ( duration, duration / 60.)) return self def decision_function(self, X): """Predict confidence scores for samples The confidence score for a sample is the signed distance of that sample to the hyperplane. Parameters ---------- X : {array-like, sparse matrix}, shape = (n_samples, n_features) Samples. Returns ------- array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes) Confidence scores per (sample, class) combination. In the binary case, confidence score for self.classes_[1] where >0 means this class would be predicted. """ # handle regression (least-squared loss) if not self.is_classif: return LinearModel.decision_function(self, X) X = atleast2d_or_csr(X) n_features = self.coef_.shape[1] if X.shape[1] != n_features: raise ValueError("X has %d features per sample; expecting %d" % (X.shape[1], n_features)) scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ return scores.ravel() if scores.shape[1] == 1 else scores def predict(self, X): """Predict class labels for samples in X. Parameters ---------- X : list of Niimg-like objects See http://nilearn.github.io/manipulating_images/input_output.html Data on prediction is to be made. If this is a list, the affine is considered the same for all. Returns ------- y_pred : ndarray, shape (n_samples,) Predicted class label per sample. """ # cast X into usual 2D array if not hasattr(self, "masker_"): raise RuntimeError("This %s instance is not fitted yet!" % ( self.__class__.__name__)) X = self.masker_.transform(X) # handle regression (least-squared loss) if not self.is_classif: return LinearModel.predict(self, X) # prediction proper scores = self.decision_function(X) if len(scores.shape) == 1: indices = (scores > 0).astype(np.int) else: indices = scores.argmax(axis=1) return self.classes_[indices] class SpaceNetClassifier(BaseSpaceNet): """Classification learners with sparsity and spatial priors. `SpaceNetClassifier` implements Graph-Net and TV-L1 priors / penalties for classification problems. Thus, the penalty is a sum an L1 term and a spatial term. The aim of such a hybrid prior is to obtain weights maps which are structured (due to the spatial prior) and sparse (enforced by L1 norm). Parameters ---------- penalty : string, optional (default 'graph-net') Penalty to used in the model. Can be 'graph-net' or 'tv-l1'. loss : string, optional (default "logistic") Loss to be used in the classifier. Must be one of "mse", or "logistic". l1_ratios : float or list of floats in the interval [0, 1]; optional (default .5) Constant that mixes L1 and spatial prior terms in penalization. l1_ratio == 1 corresponds to pure LASSO. The larger the value of this parameter, the sparser the estimated weights map. If list is provided, then the best value will be selected by cross-validation. alphas : float or list of floats, optional (default None) Choices for the constant that scales the overall regularization term. This parameter is mutually exclusive with the `n_alphas` parameter. If None or list of floats is provided, then the best value will be selected by cross-validation. n_alphas : int, optional (default 10). Generate this number of alphas per regularization path. This parameter is mutually exclusive with the `alphas` parameter. eps : float, optional (default 1e-3) Length of the path. For example, ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3``. mask : filename, niimg, NiftiMasker instance, optional default None) Mask to be used on data. If an instance of masker is passed, then its mask will be used. If no mask is it will be computed automatically by a MultiNiftiMasker with default parameters. target_affine : 3x3 or 4x4 matrix, optional (default None) This parameter is passed to image.resample_img. Please see the related documentation for details. target_shape : 3-tuple of integers, optional (default None) This parameter is passed to image.resample_img. Please see the related documentation for details. low_pass : False or float, optional, (default None) This parameter is passed to signal.clean. Please see the related documentation for details. high_pass : False or float, optional (default None) This parameter is passed to signal. Clean. Please see the related documentation for details. t_r : float, optional (default None) This parameter is passed to signal.clean. Please see the related documentation for details. screening_percentile : float in the interval [0, 100]; Optional (default 20) Percentile value for ANOVA univariate feature selection. A value of 100 means 'keep all features'. This percentile is is expressed w.r.t the volume of a standard (MNI152) brain, and so is corrected at runtime by premultiplying it with the ratio of the volume of the mask of the data and volume of a standard brain. If '100' is given, all the features are used, regardless of the number of voxels. standardize : bool, optional (default True): If set, then we'll center the data (X, y) have mean zero along axis 0. This is here because nearly all linear models will want their data to be centered. fit_intercept : bool, optional (default True) Fit or not an intercept. max_iter : int (default 1000) Defines the iterations for the solver. tol : float Defines the tolerance for convergence. Defaults to 1e-4. verbose : int, optional (default 1) Verbosity level. n_jobs : int, optional (default 1) Number of jobs in solving the sub-problems. memory: instance of joblib.Memory or string Used to cache the masking process. By default, no caching is done. If a string is given, it is the path to the caching directory. memory_level: integer, optional (default 1) Rough estimator of the amount of memory used by caching. Higher value means more memory for caching. cv : int, a cv generator instance, or None (default 8) The input specifying which cross-validation generator to use. It can be an integer, in which case it is the number of folds in a KFold, None, in which case 3 fold is used, or another object, that will then be used as a cv generator. debias : bool, optional (default False) If set, then the estimated weights maps will be debiased. Attributes ---------- `alpha_` : float Best alpha found by cross-validation. `coef_` : array, shape = [n_classes-1, n_features] Coefficient of the features in the decision function. `masker_` : instance of NiftiMasker The nifti masker used to mask the data. `mask_img_` : Nifti like image The mask of the data. If no mask was given at masker creation, contains the automatically computed mask. `intercept_` : array, shape = [n_classes-1] Intercept (a.k.a. bias) added to the decision function. It is available only when parameter intercept is set to True. `cv_` : list of pairs of lists Each pair are the list of indices for the train and test samples for the corresponding fold. `cv_scores_` : 2d array of shape (n_alphas, n_folds) Scores (misclassification) for each alpha, and on each fold. `screening_percentile_` : float Screening percentile corrected according to volume of mask, relative to the volume of standard brain. """ def __init__(self, penalty="graph-net", loss="logistic", l1_ratios=.5, alphas=None, n_alphas=10, mask=None, target_affine=None, target_shape=None, low_pass=None, high_pass=None, t_r=None, max_iter=1000, tol=1e-4, memory=Memory(None), memory_level=1, standardize=True, verbose=1, n_jobs=1, eps=1e-3, cv=8, fit_intercept=True, screening_percentile=20., debias=False): super(SpaceNetClassifier, self).__init__( penalty=penalty, is_classif=True, l1_ratios=l1_ratios, alphas=alphas, n_alphas=n_alphas, target_shape=target_shape, low_pass=low_pass, high_pass=high_pass, mask=mask, t_r=t_r, max_iter=max_iter, tol=tol, memory=memory, memory_level=memory_level, n_jobs=n_jobs, eps=eps, cv=cv, debias=debias, fit_intercept=fit_intercept, standardize=standardize, screening_percentile=screening_percentile, loss=loss, target_affine=target_affine, verbose=verbose) def _binarize_y(self, y): """Helper function invoked just before fitting a classifier.""" y = np.array(y) # encode target classes as -1 and 1 self._enc = LabelBinarizer(pos_label=1, neg_label=-1) y = self._enc.fit_transform(y) self.classes_ = self._enc.classes_ self.n_classes_ = len(self.classes_) return y def score(self, X, y): """Returns the mean accuracy on the given test data and labels. Parameters ---------- X : list of Niimg-like objects See http://nilearn.github.io/manipulating_images/input_output.html Data on which model is to be fitted. If this is a list, the affine is considered the same for all. y : array or list of length n_samples. Labels. Returns ------- score : float Mean accuracy of self.predict(X) w.r.t y. """ return accuracy_score(y, self.predict(X)) class SpaceNetRegressor(BaseSpaceNet): """Regression learners with sparsity and spatial priors. `SpaceNetClassifier` implements Graph-Net and TV-L1 priors / penalties for regression problems. Thus, the penalty is a sum an L1 term and a spatial term. The aim of such a hybrid prior is to obtain weights maps which are structured (due to the spatial prior) and sparse (enforced by L1 norm). Parameters ---------- penalty : string, optional (default 'graph-net') Penalty to used in the model. Can be 'graph-net' or 'tv-l1'. l1_ratios : float or list of floats in the interval [0, 1]; optional (default .5) Constant that mixes L1 and spatial prior terms in penalization. l1_ratio == 1 corresponds to pure LASSO. The larger the value of this parameter, the sparser the estimated weights map. If list is provided, then the best value will be selected by cross-validation. alphas : float or list of floats, optional (default None) Choices for the constant that scales the overall regularization term. This parameter is mutually exclusive with the `n_alphas` parameter. If None or list of floats is provided, then the best value will be selected by cross-validation. n_alphas : int, optional (default 10). Generate this number of alphas per regularization path. This parameter is mutually exclusive with the `alphas` parameter. eps : float, optional (default 1e-3) Length of the path. For example, ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3`` mask : filename, niimg, NiftiMasker instance, optional default None) Mask to be used on data. If an instance of masker is passed, then its mask will be used. If no mask is it will be computed automatically by a MultiNiftiMasker with default parameters. target_affine : 3x3 or 4x4 matrix, optional (default None) This parameter is passed to image.resample_img. Please see the related documentation for details. target_shape : 3-tuple of integers, optional (default None) This parameter is passed to image.resample_img. Please see the related documentation for details. low_pass : False or float, optional, (default None) This parameter is passed to signal.clean. Please see the related documentation for details high_pass : False or float, optional (default None) This parameter is passed to signal. Clean. Please see the related documentation for details t_r : float, optional (default None) This parameter is passed to signal.clean. Please see the related documentation for details screening_percentile : float in the interval [0, 100]; Optional (default 20) Percentile value for ANOVA univariate feature selection. A value of 100 means 'keep all features'. This percentile is is expressed w.r.t the volume of a standard (MNI152) brain, and so is corrected at runtime to correspond to the volume of the user-supplied mask (which is typically smaller). standardize : bool, optional (default True): If set, then we'll center the data (X, y) have mean zero along axis 0. This is here because nearly all linear models will want their data to be centered. fit_intercept : bool, optional (default True) Fit or not an intercept. max_iter : int (default 1000) Defines the iterations for the solver. tol : float Defines the tolerance for convergence. Defaults to 1e-4. verbose : int, optional (default 1) Verbosity level. n_jobs : int, optional (default 1) Number of jobs in solving the sub-problems. memory: instance of joblib.Memory or string Used to cache the masking process. By default, no caching is done. If a string is given, it is the path to the caching directory. memory_level: integer, optional (default 1) Rough estimator of the amount of memory used by caching. Higher value means more memory for caching. cv : int, a cv generator instance, or None (default 8) The input specifying which cross-validation generator to use. It can be an integer, in which case it is the number of folds in a KFold, None, in which case 3 fold is used, or another object, that will then be used as a cv generator. debias: bool, optional (default False) If set, then the estimated weights maps will be debiased. Attributes ---------- `alpha_` : float Best alpha found by cross-validation `coef_` : array, shape = [n_classes-1, n_features] Coefficient of the features in the decision function. `masker_` : instance of NiftiMasker The nifti masker used to mask the data. `mask_img_` : Nifti like image The mask of the data. If no mask was given at masker creation, contains the automatically computed mask. `intercept_` : array, shape = [n_classes-1] Intercept (a.k.a. bias) added to the decision function. It is available only when parameter intercept is set to True. `cv_scores_` : 2d array of shape (n_alphas, n_folds) Scores (misclassification) for each alpha, and on each fold `screening_percentile_` : float Screening percentile corrected according to volume of mask, relative to the volume of standard brain. """ def __init__(self, penalty="graph-net", l1_ratios=.5, alphas=None, n_alphas=10, mask=None, target_affine=None, target_shape=None, low_pass=None, high_pass=None, t_r=None, max_iter=1000, tol=1e-4, memory=Memory(None), memory_level=1, standardize=True, verbose=1, n_jobs=1, eps=1e-3, cv=8, fit_intercept=True, screening_percentile=20., debias=False): super(SpaceNetRegressor, self).__init__( penalty=penalty, is_classif=False, l1_ratios=l1_ratios, alphas=alphas, n_alphas=n_alphas, target_shape=target_shape, low_pass=low_pass, high_pass=high_pass, mask=mask, t_r=t_r, max_iter=max_iter, tol=tol, memory=memory, memory_level=memory_level, n_jobs=n_jobs, eps=eps, cv=cv, debias=debias, fit_intercept=fit_intercept, standardize=standardize, screening_percentile=screening_percentile, target_affine=target_affine, verbose=verbose) PKH"nilearn/decoding/tests/__init__.pyPKHapKK1nilearn/decoding/tests/simulate_graph_net_data.py""" Simple code to simulate data """ import numpy as np from scipy import linalg, ndimage from sklearn.utils import check_random_state def create_graph_net_simulation_data( snr=1., n_samples=200, size=8, n_points=10, random_state=42, task="regression", smooth_X=1): """ Function to generate data """ generator = check_random_state(random_state) # Coefs w = np.zeros((size, size, size)) for _ in range(n_points): point = (generator.randint(0, size), generator.randint(0, size), generator.randint(0, size)) w[point] = 1.0 mask = np.ones((size, size, size), dtype=np.bool) w = ndimage.gaussian_filter(w, sigma=1) w = w[mask] # Generate smooth background noise XX = generator.randn(n_samples, size, size, size) noise = [] for i in range(n_samples): Xi = ndimage.filters.gaussian_filter(XX[i, :, :, :], smooth_X) Xi = Xi[mask] noise.append(Xi) noise = np.array(noise) # Generate the signal y if task == "regression": y = generator.randn(n_samples) elif task == "classification": y = np.ones(n_samples) y[0::2] = -1 X = np.dot(y[:, np.newaxis], w[np.newaxis]) norm_noise = linalg.norm(X, 2) / np.exp(snr / 20.) noise_coef = norm_noise / linalg.norm(noise, 2) noise *= noise_coef snr = 20 * np.log(linalg.norm(X, 2) / linalg.norm(noise, 2)) # Mixing of signal + noise and splitting into train/test X += noise X -= X.mean(axis=-1)[:, np.newaxis] X /= X.std(axis=-1)[:, np.newaxis] return X, y, w, mask PKHh $nilearn/decoding/tests/test_fista.pyfrom nose.tools import assert_equal, assert_true import numpy as np from nilearn.decoding.fista import mfista from nilearn.decoding.proximal_operators import _prox_l1 from nilearn.decoding.objective_functions import ( _squared_loss, _logistic, _squared_loss_grad, _logistic_loss_lipschitz_constant, spectral_norm_squared) from nilearn.decoding.fista import _check_lipschitz_continuous def test_logistic_lipschitz(n_samples=4, n_features=2, random_state=42): rng = np.random.RandomState(random_state) for scaling in np.logspace(-3, 3, num=7): X = rng.randn(n_samples, n_features) * scaling y = rng.randn(n_samples) n_features = X.shape[1] L = _logistic_loss_lipschitz_constant(X) _check_lipschitz_continuous(lambda w: _logistic( X, y, w), n_features + 1, L) def test_squared_loss_lipschitz(n_samples=4, n_features=2, random_state=42): rng = np.random.RandomState(random_state) for scaling in np.logspace(-3, 3, num=7): X = rng.randn(n_samples, n_features) * scaling y = rng.randn(n_samples) n_features = X.shape[1] L = spectral_norm_squared(X) _check_lipschitz_continuous(lambda w: _squared_loss_grad( X, y, w), n_features, L) def test_input_args_and_kwargs(): rng = np.random.RandomState(42) p = 125 noise_std = 1e-1 sig = np.zeros(p) sig[[0, 2, 13, 4, 25, 32, 80, 89, 91, 93, -1]] = 1 sig[:6] = 2 sig[-7:] = 2 sig[60:75] = 1 y = sig + noise_std * rng.randn(*sig.shape) X = np.eye(p) mask = np.ones((p,)).astype(np.bool) alpha = .01 alpha_ = alpha * X.shape[0] l1_ratio = .2 l1_weight = alpha_ * l1_ratio f1 = lambda w: _squared_loss(X, y, w, compute_grad=False) f1_grad = lambda w: _squared_loss(X, y, w, compute_grad=True, compute_energy=False) f2_prox = lambda w, l, *args, **kwargs: (_prox_l1(w, l * l1_weight), dict(converged=True)) total_energy = lambda w: f1(w) + l1_weight * np.sum(np.abs(w)) for cb_retval in [0, 1]: for verbose in [0, 1]: for dgap_factor in [1., None]: best_w, objective, init = mfista( f1_grad, f2_prox, total_energy, 1., p, dgap_factor=dgap_factor, callback=lambda _: cb_retval, verbose=verbose, max_iter=100) assert_equal(best_w.shape, mask.shape) assert_true(isinstance(objective, list)) assert_true(isinstance(init, dict)) for key in ["w", "t", "dgap_tol", "stepsize"]: assert_true(key in init) PKH>&&(nilearn/decoding/tests/test_graph_net.pyfrom nose.tools import assert_true import numpy as np import scipy as sp from numpy.testing import assert_almost_equal from sklearn.utils import extmath from sklearn.utils import check_random_state from nilearn.decoding.objective_functions import _gradient, _div from nilearn.decoding.space_net_solvers import ( _graph_net_data_function, _graph_net_adjoint_data_function, _squared_loss_and_spatial_grad, _logistic_data_loss_and_spatial_grad, _squared_loss_and_spatial_grad_derivative, _logistic_data_loss_and_spatial_grad_derivative, _squared_loss_derivative_lipschitz_constant, _logistic_derivative_lipschitz_constant, mfista) from nilearn.decoding.space_net import BaseSpaceNet # Data used in almost all tests import nibabel from .test_same_api import to_niimgs from .simulate_graph_net_data import create_graph_net_simulation_data def _make_data(task="regression", size=4): X, y, w, mask = create_graph_net_simulation_data( snr=1., n_samples=10, size=size, n_points=5, random_state=42, task=task) X_, _ = to_niimgs(X, [size] * 3) mask_ = nibabel.Nifti1Image(mask.astype(np.float), X_.get_affine()) return X, y, w, mask, mask_, X_ X, y, w, mask, mask_, X_ = _make_data() def get_gradient_matrix(w_size, mask): """ Given a number of features and a mask (which has the property mask[mask==True].size == w_size) computes a matrix G such that for a w vector we have np.dot(G, w) == gradient(w_masked)[mask] """ grad_matrix = np.zeros((mask.ndim * w_size, w_size)) grad_mask = np.array([mask for _ in range(mask.ndim)]) image_buffer = np.zeros(mask.shape) for i in range(w_size): base_vector = np.zeros(w_size) base_vector[i] = 1 image_buffer[mask] = base_vector gradient_column = _gradient(image_buffer)[grad_mask] grad_matrix[:, i] = gradient_column return grad_matrix def test_grad_matrix(): """Test for matricial form of gradient""" rng = check_random_state(42) G = get_gradient_matrix(w.size, mask) image_buffer = np.zeros(mask.shape) grad_mask = np.array([mask for _ in range(mask.ndim)]) for _ in range(10): v = rng.rand(w.size) * rng.randint(1000) image_buffer[mask] = v assert_almost_equal(_gradient(image_buffer)[grad_mask], np.dot(G, v)) def test_adjointness(size=4): """Tests for adjointness between gradient and div operators""" rng = check_random_state(42) for _ in range(3): image_1 = rng.rand(size, size, size) image_2 = rng.rand(3, size, size, size) Axdoty = np.dot((_gradient(image_1).ravel()), image_2.ravel()) xdotAty = np.dot((_div(image_2).ravel()), image_1.ravel()) assert_almost_equal(Axdoty, - xdotAty) def test_identity_adjointness(size=4): """Tests adjointess between _graph_net_data_function and _graph_net_adjoint_data_function, with identity design matrix""" rng = check_random_state(42) # A mask full of ones mask = np.ones((size, size, size), dtype=np.bool) # But with some zeros mask[0:3, 0:3, 0:3] = 0 adjoint_mask = np.array([mask for _ in range(mask.ndim)]) n_samples = np.sum(mask) X = np.eye(n_samples) l1_ratio = 0.5 for _ in range(10): x = rng.rand(np.sum(mask)) y = rng.rand(n_samples + np.sum(mask) * mask.ndim) Axdoty = np.dot(_graph_net_data_function(X, x, mask, l1_ratio), y) xdotAty = np.dot(_graph_net_adjoint_data_function( X, y, adjoint_mask, l1_ratio), x) assert_almost_equal(Axdoty, xdotAty) def test_operators_adjointness(size=4): """The same as test_identity_adjointness, but with generic design matrix""" rng = check_random_state(42) # A mask full of ones mask = np.ones((size, size, size), dtype=np.bool) # But with some zeros mask[0:3, 0:3, 0:3] = 0 adjoint_mask = np.array([mask for _ in range(mask.ndim)]) n_samples = 200 X = rng.rand(n_samples, np.sum(mask)) l1_ratio = 0.5 for _ in range(10): x = rng.rand(np.sum(mask)) y = rng.rand(n_samples + np.sum(mask) * mask.ndim) Axdoty = np.dot(_graph_net_data_function(X, x, mask, l1_ratio), y) xdotAty = np.dot(_graph_net_adjoint_data_function( X, y, adjoint_mask, l1_ratio), x) np.testing.assert_almost_equal(Axdoty, xdotAty) def test__squared_loss_gradient_at_simple_points(): """Tests gradient of data loss function in points near to zero. This is a not so hard test, just for detecting big errors""" X, y, w, mask = create_graph_net_simulation_data(n_samples=10, size=4) grad_weight = 1 func = lambda w: _squared_loss_and_spatial_grad(X, y, w, mask, grad_weight) func_grad = lambda w: _squared_loss_and_spatial_grad_derivative( X, y, w, mask, grad_weight) for i in range(0, w.size, 2): point = np.zeros(*w.shape) point[i] = 1 assert_almost_equal(sp.optimize.check_grad(func, func_grad, point), 0, decimal=3) def test_logistic_gradient_at_simple_points(): # Tests gradient of logistic data loss function in points near to zero. # This is a not so hard test, just for detecting big errors X, y, w, mask = create_graph_net_simulation_data(n_samples=10, size=4) grad_weight = 1 # Add the intercept w = np.append(w, 0) func = lambda w: _logistic_data_loss_and_spatial_grad( X, y, w, mask, grad_weight) func_grad = lambda w: _logistic_data_loss_and_spatial_grad_derivative( X, y, w, mask, grad_weight) for i in range(0, w.size, 7): point = np.zeros(*w.shape) point[i] = 1 assert_almost_equal(sp.optimize.check_grad(func, func_grad, point), 0, decimal=3) def test__squared_loss_derivative_lipschitz_constant(): # Tests Lipschitz-continuity of the derivative of _squared_loss loss # function rng = check_random_state(42) grad_weight = 2.08e-1 lipschitz_constant = _squared_loss_derivative_lipschitz_constant( X, mask, grad_weight) for _ in range(20): x_1 = rng.rand(*w.shape) * rng.randint(1000) x_2 = rng.rand(*w.shape) * rng.randint(1000) gradient_difference = extmath.norm( _squared_loss_and_spatial_grad_derivative(X, y, x_1, mask, grad_weight) - _squared_loss_and_spatial_grad_derivative(X, y, x_2, mask, grad_weight)) point_difference = extmath.norm(x_1 - x_2) assert_true( gradient_difference <= lipschitz_constant * point_difference) def test_logistic_derivative_lipschitz_constant(): # Tests Lipschitz-continuity of of the derivative of logistic loss rng = check_random_state(42) grad_weight = 2.08e-1 lipschitz_constant = _logistic_derivative_lipschitz_constant( X, mask, grad_weight) for _ in range(20): x_1 = rng.rand((w.shape[0] + 1)) * rng.randint(1000) x_2 = rng.rand((w.shape[0] + 1)) * rng.randint(1000) gradient_difference = extmath.norm( _logistic_data_loss_and_spatial_grad_derivative( X, y, x_1, mask, grad_weight) - _logistic_data_loss_and_spatial_grad_derivative( X, y, x_2, mask, grad_weight)) point_difference = extmath.norm(x_1 - x_2) assert_true( gradient_difference <= lipschitz_constant * point_difference) def test_max_alpha__squared_loss(): """Tests that models with L1 regularization over the theoretical bound are full of zeros, for logistic regression""" l1_ratios = np.linspace(0.1, 1, 3) reg = BaseSpaceNet(mask=mask_, max_iter=10, penalty="graph-net", is_classif=False) for l1_ratio in l1_ratios: reg.l1_ratios = l1_ratio reg.alphas = np.max(np.dot(X.T, y)) / l1_ratio reg.fit(X_, y) assert_almost_equal(reg.coef_, 0.) def test_tikhonov_regularization_vs_graph_net(): # Test for one of the extreme cases of Graph-Net: That is, with # l1_ratio = 0 (pure Smooth), we compare Graph-Net's performance # with the analytical solution for Tikhonov Regularization # XXX A small dataset here (this test is very lengthy) G = get_gradient_matrix(w.size, mask) optimal_model = np.dot(sp.linalg.pinv( np.dot(X.T, X) + y.size * np.dot(G.T, G)), np.dot(X.T, y)) graph_net = BaseSpaceNet( mask=mask_, alphas=1. * X.shape[0], l1_ratios=0., max_iter=400, fit_intercept=False, screening_percentile=100., standardize=False) graph_net.fit(X_, y.copy()) coef_ = graph_net.coef_[0] graph_net_perf = 0.5 / y.size * extmath.norm( np.dot(X, coef_) - y) ** 2\ + 0.5 * extmath.norm(np.dot(G, coef_)) ** 2 optimal_model_perf = 0.5 / y.size * extmath.norm( np.dot(X, optimal_model) - y) ** 2\ + 0.5 * extmath.norm(np.dot(G, optimal_model)) ** 2 assert_almost_equal(graph_net_perf, optimal_model_perf, decimal=1) def test_mfista_solver_graph_net_no_l1_term(): w = np.zeros(2) X = np.array([[1, 0], [0, 4]]) y = np.array([-10, 20]) f1 = lambda w: 0.5 * np.dot(np.dot(X, w) - y, np.dot(X, w) - y) f1_grad = lambda w: np.dot(X.T, np.dot(X, w) - y) f2_prox = lambda w, l, *args, **kwargs: (w, dict(converged=True)) lipschitz_constant = _squared_loss_derivative_lipschitz_constant( X, (np.eye(2) == 1).astype(np.bool), 1) estimate_solution, _, _ = mfista( f1_grad, f2_prox, f1, lipschitz_constant, w.size, tol=1e-8) solution = np.array([-10, 5]) assert_almost_equal(estimate_solution, solution, decimal=4) PKHɌ  2nilearn/decoding/tests/test_objective_functions.py""" Test module for functions related cost functions (including penalties). """ import numpy as np from scipy.optimize import check_grad from sklearn.utils import check_random_state from nilearn.decoding.objective_functions import ( _gradient_id, _logistic, _div_id, _logistic_loss_grad, _unmask) from nilearn.decoding.space_net import BaseSpaceNet from nose.tools import raises def test_grad_div_adjoint_arbitrary_ndim(size=5, max_ndim=5): # We need to check that = for x and y random vectors rng = check_random_state(42) for ndim in range(1, max_ndim): shape = tuple([size] * ndim) x = rng.normal(size=shape) y = rng.normal(size=[ndim + 1] + list(shape)) for l1_ratio in [0., .1, .3, .5, .7, .9, 1.]: np.testing.assert_almost_equal( np.sum(_gradient_id(x, l1_ratio=l1_ratio) * y), -np.sum(x * _div_id(y, l1_ratio=l1_ratio))) def test_1D__gradient_id(): for size in [1, 2, 10]: img = np.arange(size) for l1_ratio in [0., .1, .3, .5, .7, .9, 1.]: gid = _gradient_id(img, l1_ratio=l1_ratio) np.testing.assert_array_equal( gid.shape, [img.ndim + 1] + list(img.shape)) np.testing.assert_array_equal(l1_ratio * img, gid[-1]) def test_2D__gradient_id(): img = np.array([[1, 3], [4, 2]]) for l1_ratio in [0., .1, .3, .5, .7, .9, 1.]: gid = _gradient_id(img, l1_ratio) np.testing.assert_array_equal( gid.shape, [img.ndim + 1] + list(img.shape)) np.testing.assert_array_equal(l1_ratio * img, gid[-1]) def test_3D__gradient_id(): img = np.array([[1, 3], [4, 2], [1, 0]]) for l1_ratio in [0., .1, .3, .5, .7, .9, 1.]: gid = _gradient_id(img, l1_ratio) np.testing.assert_array_equal( gid.shape, [img.ndim + 1] + list(img.shape)) def test_logistic_loss_derivative(n_samples=4, n_features=10, decimal=5): rng = np.random.RandomState(42) X = rng.randn(n_samples, n_features) y = rng.randn(n_samples) n_features = X.shape[1] w = rng.randn(n_features + 1) np.testing.assert_almost_equal(check_grad( lambda w: _logistic(X, y, w), lambda w: _logistic_loss_grad(X, y, w), w), 0., decimal=decimal) np.testing.assert_almost_equal(check_grad( lambda w: _logistic(X, y, w), lambda w: _logistic_loss_grad(X, y, w), w), 0., decimal=decimal) def test_grad_div_adjoint_arbitrary_ndim_(): for size in [3, 4, 5]: test_grad_div_adjoint_arbitrary_ndim(size=size) @raises(ValueError) def test_baseestimator_invalid_l1_ratio(): BaseSpaceNet(l1_ratios=2.) def test_unmask(size=5): rng = check_random_state(42) for ndim in range(1, 4): shape = [size] * ndim mask = np.zeros(shape).astype(np.bool) mask[rng.rand(*shape) > .8] = 1 support = rng.randn(mask.sum()) full = _unmask(support, mask) np.testing.assert_array_equal(full.shape, shape) np.testing.assert_array_equal(full[mask], support) PKHs%('nilearn/decoding/tests/test_same_api.py""" Make sure all models are using thesame low-level API ( for computing image gradient, loss functins, etc.). """ from nose.tools import nottest, assert_equal, assert_true import numpy as np import nibabel from sklearn.datasets import load_iris from sklearn.utils import check_random_state from nilearn.decoding.objective_functions import ( _squared_loss, _squared_loss_grad, _logistic_loss_lipschitz_constant, spectral_norm_squared, _unmask) from nilearn.decoding.space_net_solvers import ( _squared_loss_and_spatial_grad, _logistic_derivative_lipschitz_constant, _squared_loss_derivative_lipschitz_constant, _graph_net_squared_loss, _graph_net_logistic, _squared_loss_and_spatial_grad_derivative, tvl1_solver) from nilearn.decoding.space_net import (BaseSpaceNet, SpaceNetClassifier, SpaceNetRegressor) def _make_data(rng=None, masked=False, dim=(2, 2, 2)): if rng is None: rng = check_random_state(42) mask = np.ones(dim).astype(np.bool) mask[rng.rand() < .7] = 0 w = np.zeros(dim) w[dim[0] // 2:, dim[1] // 2:, :dim[2] // 2] = 1 n = 5 X = np.ones([n] + list(dim)) X += rng.randn(*X.shape) y = np.dot([x[mask] for x in X], w[mask]) if masked: X = np.array([x[mask] for x in X]) w = w[mask] else: X = np.rollaxis(X, 0, start=4) assert_equal(X.shape[-1], n) return X, y, w, mask def to_niimgs(X, dim): p = np.prod(dim) assert_equal(len(dim), 3) assert_true(X.shape[-1] <= p) mask = np.zeros(p).astype(np.bool) mask[:X.shape[-1]] = 1 assert_equal(mask.sum(), X.shape[1]) mask = mask.reshape(dim) X = np.rollaxis(np.array([_unmask(x, mask) for x in X]), 0, start=4) affine = np.eye(4) return nibabel.Nifti1Image(X, affine), nibabel.Nifti1Image( mask.astype(np.float), affine) def test_same_energy_calculus_pure_lasso(): rng = check_random_state(42) X, y, w, mask = _make_data(rng=rng, masked=True) # check funcvals f1 = _squared_loss(X, y, w) f2 = _squared_loss_and_spatial_grad(X, y, w.ravel(), mask, 0.) assert_equal(f1, f2) # check derivatives g1 = _squared_loss_grad(X, y, w) g2 = _squared_loss_and_spatial_grad_derivative(X, y, w.ravel(), mask, 0.) np.testing.assert_array_equal(g1, g2) def test_lipschitz_constant_loss_mse(): rng = check_random_state(42) X, _, w, mask = _make_data(rng=rng, masked=True) l1_ratio = 1. alpha = .1 mask = np.ones(X.shape[1]).astype(np.bool) grad_weight = alpha * X.shape[0] * (1. - l1_ratio) a = _squared_loss_derivative_lipschitz_constant(X, mask, grad_weight) b = spectral_norm_squared(X) np.testing.assert_almost_equal(a, b) def test_lipschitz_constant_loss_logreg(): rng = check_random_state(42) X, _, w, mask = _make_data(rng=rng, masked=True) l1_ratio = 1. alpha = .1 grad_weight = alpha * X.shape[0] * (1. - l1_ratio) a = _logistic_derivative_lipschitz_constant(X, mask, grad_weight) b = _logistic_loss_lipschitz_constant(X) assert_equal(a, b) def test_graph_net_and_tvl1_same_for_pure_l1(max_iter=100, decimal=2): ############################################################### # graph_net_solver and tvl1_solver should give same results # when l1_ratio = 1. ############################################################### X, y, _, mask = _make_data() alpha = .1 unmasked_X = np.rollaxis(X, -1, start=0) unmasked_X = np.array([x[mask] for x in unmasked_X]) # results should be exactly the same for pure lasso a = tvl1_solver(unmasked_X, y, alpha, 1., mask, loss="mse", max_iter=max_iter)[0] b = _graph_net_squared_loss(unmasked_X, y, alpha, 1., max_iter=max_iter, mask=mask)[0] mask = nibabel.Nifti1Image(mask.astype(np.float), np.eye(4)) X = nibabel.Nifti1Image(X.astype(np.float), np.eye(4)) for standardize in [True, False]: sl = BaseSpaceNet( alphas=alpha, l1_ratios=1., mask=mask, penalty="graph-net", max_iter=max_iter, standardize=standardize).fit(X, y) tvl1 = BaseSpaceNet( alphas=alpha, l1_ratios=1., mask=mask, penalty="tv-l1", max_iter=max_iter, standardize=standardize).fit(X, y) # Should be exactly the same (except for numerical errors). # However because of the TV-L1 prox approx, results might be 'slightly' # different. np.testing.assert_array_almost_equal(a, b, decimal=decimal) np.testing.assert_array_almost_equal(sl.coef_, tvl1.coef_, decimal=decimal) def test_graph_net_and_tvl1_same_for_pure_l1_logistic(max_iter=20, decimal=2): ############################################################### # graph_net_solver and tvl1_solver should give same results # when l1_ratio = 1. ############################################################### iris = load_iris() X, y = iris.data, iris.target y = y > 0. alpha = 1. / X.shape[0] X_, mask_ = to_niimgs(X, (2, 2, 2)) mask = mask_.get_data().astype(np.bool).ravel() # results should be exactly the same for pure lasso a = _graph_net_logistic(X, y, alpha, 1., mask=mask, max_iter=max_iter)[0] b = tvl1_solver(X, y, alpha, 1., loss="logistic", mask=mask, max_iter=max_iter)[0] for standardize in [True, False]: sl = SpaceNetClassifier( alphas=alpha, l1_ratios=1., max_iter=max_iter, mask=mask_, penalty="graph-net", standardize=standardize).fit( X_, y) tvl1 = SpaceNetClassifier( alphas=alpha, l1_ratios=1., max_iter=max_iter, mask=mask_, penalty="tv-l1", standardize=standardize).fit( X_, y) # should be exactly the same (except for numerical errors) np.testing.assert_array_almost_equal(a, b, decimal=decimal) np.testing.assert_array_almost_equal(sl.coef_[0], tvl1.coef_[0], decimal=decimal) def test_graph_net_and_tv_same_for_pure_l1_another_test(decimal=1): ############################################################### # graph_net_solver and tvl1_solver should give same results # when l1_ratio = 1. ############################################################### dim = (3, 3, 3) X, y, _, mask = _make_data(masked=True, dim=dim) X, mask = to_niimgs(X, dim) alpha = .1 l1_ratio = 1. max_iter = 20 for standardize in [True, False]: sl = BaseSpaceNet(alphas=alpha, l1_ratios=l1_ratio, penalty="graph-net", max_iter=max_iter, mask=mask, is_classif=False, standardize=standardize, verbose=0).fit(X, y) tvl1 = BaseSpaceNet(alphas=alpha, l1_ratios=l1_ratio, penalty="tv-l1", max_iter=max_iter, mask=mask, is_classif=False, standardize=standardize, verbose=0).fit(X, y) # should be exactly the same (except for numerical errors) np.testing.assert_array_almost_equal(sl.coef_, tvl1.coef_, decimal=decimal) def test_coef_shape(): iris = load_iris() X, y = iris.data, iris.target X, mask = to_niimgs(X, (2, 2, 2)) for penalty in ["graph-net", "tv-l1"]: for cls in [SpaceNetRegressor, SpaceNetClassifier]: model = cls( mask=mask, max_iter=3, penalty=penalty, alphas=1.).fit(X, y) assert_equal(model.coef_.ndim, 2) @nottest def test_w_shapes(): """Test that solvers handle w of same shape (during callbacks, etc.).""" pass PKHȶ*nilearn/decoding/tests/test_searchlight.py""" Test the searchlight module """ # Author: Alexandre Abraham # License: simplified BSD from nose.tools import assert_equal import numpy as np import nibabel from nilearn.decoding import searchlight def test_searchlight(): # Create a toy dataset to run searchlight on # Initialize with 4x4x4 scans of random values on 30 frames rand = np.random.RandomState(0) frames = 30 data = rand.rand(5, 5, 5, frames) mask = np.ones((5, 5, 5), np.bool) mask_img = nibabel.Nifti1Image(mask.astype(np.int), np.eye(4)) # Create a condition array cond = np.arange(frames, dtype=int) > frames // 2 # Create an activation pixel. data[2, 2, 2, :] = 0 data[2, 2, 2][cond.astype(np.bool)] = 2 data_img = nibabel.Nifti1Image(data, np.eye(4)) # Define cross validation from sklearn.cross_validation import check_cv # avoid using KFold for compatibility with sklearn 0.10-0.13 cv = check_cv(4, cond) n_jobs = 1 # Run Searchlight with different radii # Small radius : only one pixel is selected sl = searchlight.SearchLight(mask_img, process_mask_img=mask_img, radius=0.5, n_jobs=n_jobs, scoring='accuracy', cv=cv) sl.fit(data_img, cond) assert_equal(np.where(sl.scores_ == 1)[0].size, 1) assert_equal(sl.scores_[2, 2, 2], 1.) # Medium radius : little ball selected sl = searchlight.SearchLight(mask_img, process_mask_img=mask_img, radius=1, n_jobs=n_jobs, scoring='accuracy', cv=cv) sl.fit(data_img, cond) assert_equal(np.where(sl.scores_ == 1)[0].size, 7) assert_equal(sl.scores_[2, 2, 2], 1.) assert_equal(sl.scores_[1, 2, 2], 1.) assert_equal(sl.scores_[2, 1, 2], 1.) assert_equal(sl.scores_[2, 2, 1], 1.) assert_equal(sl.scores_[3, 2, 2], 1.) assert_equal(sl.scores_[2, 3, 2], 1.) assert_equal(sl.scores_[2, 2, 3], 1.) # Big radius : big ball selected sl = searchlight.SearchLight(mask_img, process_mask_img=mask_img, radius=2, n_jobs=n_jobs, scoring='accuracy', cv=cv) sl.fit(data_img, cond) assert_equal(np.where(sl.scores_ == 1)[0].size, 33) assert_equal(sl.scores_[2, 2, 2], 1.) PKHZMM4nilearn/decoding/tests/test_sklearn_compatibility.pyfrom nilearn.decoding.space_net import BaseSpaceNet from nose.tools import assert_true import traceback def test_get_params(): # Issue #12 (on github) reported that our objects # get_params() methods returned empty dicts. for penalty in ["graph-net", "tv-l1"]: for is_classif in [True, False]: kwargs = {} for param in ["max_iter", "alphas", "l1_ratios", "verbose", "tol", "mask", "memory", "fit_intercept", "alphas"]: m = BaseSpaceNet( mask='dummy', penalty=penalty, is_classif=is_classif, **kwargs) try: params = m.get_params() except AttributeError: if "get_params" in traceback.format_exc(): params = m._get_params() else: raise assert_true(param in params, msg="%s doesn't have parameter '%s'." % ( m, param)) PKH3!nilearn/decoding/tests/test_tv.pyfrom nose.tools import assert_equal, assert_raises import numpy as np from nilearn.decoding.objective_functions import _gradient_id, _squared_loss from nilearn.decoding.space_net_solvers import ( _tvl1_objective, _tvl1_objective_from_gradient, tvl1_solver) def test_tvl1_from_gradient(size=5, n_samples=10, random_state=42): rng = np.random.RandomState(random_state) shape = [size] * 3 n_voxels = np.prod(shape) X = rng.randn(n_samples, n_voxels) y = rng.randn(n_samples) w = rng.randn(*shape) mask = np.ones_like(w).astype(np.bool) for alpha in [0., 1e-1, 1e-3]: for l1_ratio in [0., .5, 1.]: gradid = _gradient_id(w, l1_ratio=l1_ratio) assert_equal(_tvl1_objective( X, y, w.copy().ravel(), alpha, l1_ratio, mask), _squared_loss(X, y, w.copy().ravel(), compute_grad=False ) + alpha * _tvl1_objective_from_gradient( gradid)) def test_tvl1_objective_raises_value_error_if_invalid_loss(): assert_raises(ValueError, lambda loss: _tvl1_objective( None, None, None, None, None, None, loss=loss), "invalidloss") def test_tvl1_solver_raises_value_error_if_invalid_loss(): assert_raises(ValueError, lambda loss: tvl1_solver( np.array([[1]]), None, None, None, None, loss=loss), "invalidloss") PKHn˝))(nilearn/decoding/tests/test_operators.pyimport itertools from nose.tools import assert_true import numpy as np from nilearn.decoding.proximal_operators import _prox_l1, _prox_tvl1 def test_prox_l1_nonexpansiveness(n_features=10): rng = np.random.RandomState(42) x = rng.randn(n_features, 1) tau = .3 s = _prox_l1(x.copy(), tau) p = x - s # projection + shrinkage = id # We should have ||s(a) - s(b)||^2 <= ||a - b||^2 - ||p(a) - p(b)||^2 # for all a and b (this is strong non-expansiveness for (a, b), (pa, pb), (sa, sb) in zip(*[itertools.product(z[0], z[0]) for z in [x, p, s]]): assert_true((sa - sb) ** 2 <= (a - b) ** 2 - (pa - pb) ** 2) def test_prox_tvl1_approximates_prox_l1_for_lasso(size=15, random_state=42, decimal=4, dgap_tol=1e-7): rng = np.random.RandomState(random_state) l1_ratio = 1. # pure LASSO for ndim in range(3, 4): shape = [size] * ndim z = rng.randn(*shape) for weight in np.logspace(-10, 10, num=10): # use prox_tvl1 approximation to prox_l1 a = _prox_tvl1(z.copy(), weight=weight, l1_ratio=l1_ratio, dgap_tol=dgap_tol, max_iter=10)[0][-1].ravel() # use exact closed-form soft shrinkage formula for prox_l1 b = _prox_l1(z.copy(), weight)[-1].ravel() # results shoud be close in l-infinity norm np.testing.assert_almost_equal(np.abs(a - b).max(), 0., decimal=decimal) PKHo44(nilearn/decoding/tests/test_space_net.pyimport os import warnings import itertools from functools import partial from nose import SkipTest from nose.tools import (assert_equal, assert_true, assert_false, assert_raises) import numpy as np import nibabel from sklearn.datasets import load_iris from sklearn.utils import extmath from sklearn.linear_model import Lasso from sklearn.utils import check_random_state from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from nilearn._utils.testing import assert_raises_regex, assert_warns from nilearn.decoding.space_net import ( _EarlyStoppingCallback, _space_net_alpha_grid, MNI152_BRAIN_VOLUME, path_scores, BaseSpaceNet, _crop_mask, _univariate_feature_screening, _get_mask_volume, SpaceNetClassifier, SpaceNetRegressor, _adjust_screening_percentile) from nilearn.decoding.space_net_solvers import (_graph_net_logistic, _graph_net_squared_loss) mni152_brain_mask = ( "/usr/share/fsl/data/standard/MNI152_T1_1mm_brain_mask.nii.gz") logistic_path_scores = partial(path_scores, is_classif=True) squared_loss_path_scores = partial(path_scores, is_classif=False) # Data used in almost all tests from .test_same_api import to_niimgs size = 4 from .simulate_graph_net_data import create_graph_net_simulation_data X_, y, w, mask = create_graph_net_simulation_data( snr=1., n_samples=10, size=size, n_points=5, random_state=42) X, mask = to_niimgs(X_, [size] * 3) def test_space_net_alpha_grid(n_samples=4, n_features=3): rng = check_random_state(42) X = rng.randn(n_samples, n_features) y = np.arange(n_samples) for l1_ratio, is_classif in itertools.product([.5, 1.], [True, False]): alpha_max = np.max(np.abs(np.dot(X.T, y))) / l1_ratio np.testing.assert_almost_equal(_space_net_alpha_grid( X, y, n_alphas=1, l1_ratio=l1_ratio, logistic=is_classif), alpha_max) for l1_ratio, is_classif in itertools.product([.5, 1.], [True, False]): alpha_max = np.max(np.abs(np.dot(X.T, y))) / l1_ratio for n_alphas in range(1, 10): alphas = _space_net_alpha_grid( X, y, n_alphas=n_alphas, l1_ratio=l1_ratio, logistic=is_classif) np.testing.assert_almost_equal(alphas.max(), alpha_max) np.testing.assert_almost_equal(n_alphas, len(alphas)) def test_space_net_alpha_grid_same_as_sk(): try: from sklearn.linear_model.coordinate_descent import _alpha_grid iris = load_iris() X = iris.data y = iris.target np.testing.assert_almost_equal(_space_net_alpha_grid( X, y, n_alphas=5), X.shape[0] * _alpha_grid(X, y, n_alphas=5, fit_intercept=False)) except ImportError: raise SkipTest def test_early_stopping_callback_object(n_samples=10, n_features=30): # This test evolves w so that every line of th _EarlyStoppingCallback # code is executed a some point. This a kind of code fuzzing. rng = check_random_state(42) X_test = rng.randn(n_samples, n_features) y_test = np.dot(X_test, np.ones(n_features)) w = np.zeros(n_features) escb = _EarlyStoppingCallback(X_test, y_test, False) for counter in range(50): k = min(counter, n_features - 1) w[k] = 1 # jitter if k > 0 and rng.rand() > .9: w[k - 1] = 1 - w[k - 1] escb(dict(w=w, counter=counter)) assert_equal(len(escb.test_scores), counter + 1) # restart if counter > 20: w *= 0. def test_params_correctly_propagated_in_constructors(): for (penalty, is_classif, n_alphas, l1_ratio, n_jobs, cv, perc) in itertools.product(["graph-net", "tv-l1"], [True, False], [.1, .01], [.5, 1.], [1, -1], [2, 3], [5, 10]): cvobj = BaseSpaceNet( mask="dummy", n_alphas=n_alphas, n_jobs=n_jobs, l1_ratios=l1_ratio, cv=cv, screening_percentile=perc, penalty=penalty, is_classif=is_classif) assert_equal(cvobj.n_alphas, n_alphas) assert_equal(cvobj.l1_ratios, l1_ratio) assert_equal(cvobj.n_jobs, n_jobs) assert_equal(cvobj.cv, cv) assert_equal(cvobj.screening_percentile, perc) def test_screening_space_net(): screening_percentile = assert_warns(UserWarning, _adjust_screening_percentile, 10, mask) # We gave here a very small mask, judging by standards of brain size # thus the screening_percentile_ corrected for brain size should # be 100% assert_equal(screening_percentile, 100) def test_logistic_path_scores(): iris = load_iris() X, y = iris.data, iris.target _, mask = to_niimgs(X, [2, 2, 2]) mask = mask.get_data().astype(np.bool) alphas = [1., .1, .01] test_scores, best_w = logistic_path_scores( _graph_net_logistic, X, y, mask, alphas, .5, np.arange(len(X)), np.arange(len(X)), {})[:2] test_scores = test_scores[0] assert_equal(len(test_scores), len(alphas)) assert_equal(X.shape[1] + 1, len(best_w)) def test_squared_loss_path_scores(): iris = load_iris() X, y = iris.data, iris.target _, mask = to_niimgs(X, [2, 2, 2]) mask = mask.get_data().astype(np.bool) alphas = [1., .1, .01] test_scores, best_w = squared_loss_path_scores( _graph_net_squared_loss, X, y, mask, alphas, .5, np.arange(len(X)), np.arange(len(X)), {})[:2] test_scores = test_scores[0] assert_equal(len(test_scores), len(alphas)) assert_equal(X.shape[1] + 1, len(best_w)) def test_tv_regression_simple(): rng = check_random_state(42) dim = (4, 4, 4) W_init = np.zeros(dim) W_init[2:3, 1:2, -2:] = 1 n = 10 p = np.prod(dim) X = np.ones((n, 1)) + W_init.ravel().T X += rng.randn(n, p) y = np.dot(X, W_init.ravel()) X, mask = to_niimgs(X, dim) print("%s %s" % (X.shape, mask.get_data().sum())) alphas = [.1, 1.] for l1_ratio in [1.]: for debias in [True]: BaseSpaceNet(mask=mask, alphas=alphas, l1_ratios=l1_ratio, penalty="tv-l1", is_classif=False, max_iter=10, debias=debias).fit(X, y) def test_tv_regression_3D_image_doesnt_crash(): rng = check_random_state(42) dim = (3, 4, 5) W_init = np.zeros(dim) W_init[2:3, 3:, 1:3] = 1 n = 10 p = dim[0] * dim[1] * dim[2] X = np.ones((n, 1)) + W_init.ravel().T X += rng.randn(n, p) y = np.dot(X, W_init.ravel()) alpha = 1. X, mask = to_niimgs(X, dim) for l1_ratio in [0., .5, 1.]: BaseSpaceNet(mask=mask, alphas=alpha, l1_ratios=l1_ratio, penalty="tv-l1", is_classif=False, max_iter=10).fit(X, y) def test_graph_net_classifier_score(): iris = load_iris() X, y = iris.data, iris.target y = 2 * (y > 0) - 1 X_, mask = to_niimgs(X, (2, 2, 2)) gnc = SpaceNetClassifier(mask=mask, alphas=1. / .01 / X.shape[0], l1_ratios=1., tol=1e-10, standardize=False, verbose=0, screening_percentile=100.).fit(X_, y) accuracy = gnc.score(X_, y) assert_equal(accuracy, accuracy_score(y, gnc.predict(X_))) def test_log_reg_vs_graph_net_two_classes_iris(C=.01, tol=1e-10, zero_thr=1e-4): # Test for one of the extreme cases of Graph-Net: That is, with # l1_ratio = 1 (pure Lasso), we compare Graph-Net's coefficients' # performance with the coefficients obtained from Scikit-Learn's # LogisticRegression, with L1 penalty, in a 2 classes classification task iris = load_iris() X, y = iris.data, iris.target y = 2 * (y > 0) - 1 X_, mask = to_niimgs(X, (2, 2, 2)) tvl1 = SpaceNetClassifier( mask=mask, alphas=1. / C / X.shape[0], l1_ratios=1., tol=tol, verbose=0, max_iter=1000, penalty="tv-l1", standardize=False, screening_percentile=100.).fit(X_, y) sklogreg = LogisticRegression(penalty="l1", fit_intercept=True, tol=tol, C=C).fit(X, y) # compare supports np.testing.assert_array_equal((np.abs(tvl1.coef_) < zero_thr), (np.abs(sklogreg.coef_) < zero_thr)) # compare predictions np.testing.assert_array_equal(tvl1.predict(X_), sklogreg.predict(X)) def test_lasso_vs_graph_net(): # Test for one of the extreme cases of Graph-Net: That is, with # l1_ratio = 1 (pure Lasso), we compare Graph-Net's performance with # Scikit-Learn lasso lasso = Lasso(max_iter=100, tol=1e-8, normalize=False) graph_net = BaseSpaceNet(mask=mask, alphas=1. * X_.shape[0], l1_ratios=1, is_classif=False, penalty="graph-net", max_iter=100) lasso.fit(X_, y) graph_net.fit(X, y) lasso_perf = 0.5 / y.size * extmath.norm(np.dot( X_, lasso.coef_) - y) ** 2 + np.sum(np.abs(lasso.coef_)) graph_net_perf = 0.5 * ((graph_net.predict(X) - y) ** 2).mean() np.testing.assert_almost_equal(graph_net_perf, lasso_perf, decimal=3) def test_params_correctly_propagated_in_constructors_biz(): for penalty, is_classif, alpha, l1_ratio in itertools.product( ["graph-net", "tv-l1"], [True, False], [.4, .01], [.5, 1.]): cvobj = BaseSpaceNet( mask="dummy", penalty=penalty, is_classif=is_classif, alphas=alpha, l1_ratios=l1_ratio) assert_equal(cvobj.alphas, alpha) assert_equal(cvobj.l1_ratios, l1_ratio) def test_crop_mask(): rng = np.random.RandomState(42) mask = np.zeros((3, 4, 5), dtype=np.bool) box = mask[:2, :3, :4] box[rng.rand(*box.shape) < 3.] = 1 # mask covers 30% of brain idx = np.where(mask) assert_true(idx[1].max() < 3) tight_mask = _crop_mask(mask) assert_equal(mask.sum(), tight_mask.sum()) assert_true(np.prod(tight_mask.shape) <= np.prod(box.shape)) def test_univariate_feature_screening(dim=(11, 12, 13), n_samples=10): rng = np.random.RandomState(42) mask = rng.rand(*dim) > 100. / np.prod(dim) assert_true(mask.sum() >= 100.) mask[dim[0] // 2, dim[1] // 3:, -dim[2] // 2:] = 1 # put spatial structure n_features = mask.sum() X = rng.randn(n_samples, n_features) w = rng.randn(n_features) w[rng.rand(n_features) > .8] = 0. y = X.dot(w) for is_classif in [True, False]: X_, mask_, support_ = _univariate_feature_screening( X, y, mask, is_classif, 20.) n_features_ = support_.sum() assert_equal(X_.shape[1], n_features_) assert_equal(mask_.sum(), n_features_) assert_true(n_features_ <= n_features) def test_get_mask_volume(): # Test that hard-coded standard mask volume can be corrected computed if os.path.isfile(mni152_brain_mask): assert_equal(MNI152_BRAIN_VOLUME, _get_mask_volume(nibabel.load( mni152_brain_mask))) else: warnings.warn("Couldn't find %s (for testing)" % ( mni152_brain_mask)) def test_space_net_classifier_subclass(): for penalty, alpha, l1_ratio, verbose in itertools.product( ["graph-net", "tv-l1"], [.4, .01], [.5, 1.], [True, False]): cvobj = SpaceNetClassifier( mask="dummy", penalty=penalty, alphas=alpha, l1_ratios=l1_ratio, verbose=verbose) assert_equal(cvobj.alphas, alpha) assert_equal(cvobj.l1_ratios, l1_ratio) def test_space_net_regressor_subclass(): for penalty, alpha, l1_ratio, verbose in itertools.product( ["graph-net", "tv-l1"], [.4, .01], [.5, 1.], [True, False]): cvobj = SpaceNetRegressor( mask="dummy", penalty=penalty, alphas=alpha, l1_ratios=l1_ratio, verbose=verbose) assert_equal(cvobj.alphas, alpha) assert_equal(cvobj.l1_ratios, l1_ratio) def test_space_net_alpha_grid_pure_spatial(): rng = check_random_state(42) X = rng.randn(10, 100) y = np.arange(X.shape[0]) for is_classif in [True, False]: assert_false(np.any(np.isnan(_space_net_alpha_grid( X, y, l1_ratio=0., logistic=is_classif)))) def test_string_params_case(): # penalty assert_raises(ValueError, BaseSpaceNet, penalty='TV-L1') assert_raises(ValueError, BaseSpaceNet, penalty='Graph-Net') def test_crop_mask_empty_mask(): assert_raises_regex(ValueError, "Empty mask:.", _crop_mask, np.array([])) assert_raises_regex(ValueError, "Empty mask:", _crop_mask, np.zeros((2, 2, 2))) def test_space_net_no_crash_not_fitted(): """Regression test.""" iris = load_iris() X, y = iris.data, iris.target X, mask = to_niimgs(X, [2, 2, 2]) for model in [SpaceNetRegressor, SpaceNetClassifier]: assert_raises_regex(RuntimeError, "This %s instance is not fitted yet" % ( model.__name__), model().predict, X) model(mask=mask, alphas=1.).fit(X, y).predict(X) def test_space_net_one_alpha_no_crash(): """Regression test.""" iris = load_iris() X, y = iris.data, iris.target X, mask = to_niimgs(X, [2, 2, 2]) for model in [SpaceNetRegressor, SpaceNetClassifier]: model(n_alphas=1, mask=mask).fit(X, y) model(alphas=None, n_alphas=2, mask=mask).fit(X, y) PKHnc<nilearn/input_data/__init__.py""" The :mod:`nilearn.input_data` module includes scikit-learn tranformers and tools to preprocess neuro-imaging data. """ from .nifti_masker import NiftiMasker from .multi_nifti_masker import MultiNiftiMasker from .nifti_labels_masker import NiftiLabelsMasker from .nifti_maps_masker import NiftiMapsMasker from .nifti_spheres_masker import NiftiSpheresMasker __all__ = ['NiftiMasker', 'MultiNiftiMasker', 'NiftiLabelsMasker', 'NiftiMapsMasker', 'NiftiSpheresMasker'] PKH0^( 'nilearn/input_data/masker_validation.pyimport warnings import numpy as np from .._utils.class_inspect import get_params from .multi_nifti_masker import MultiNiftiMasker from .nifti_masker import NiftiMasker def check_embedded_nifti_masker(estimator, multi_subject=True): """Base function for using a masker within a BaseEstimator class This creates a masker from instance parameters : - If instance contains a mask image in mask parameter, we use this image as new masker mask_img, forwarding instance parameters to new masker : smoothing_fwhm, standardize, detrend, low_pass= high_pass, t_r, target_affine, target_shape, mask_strategy, mask_args, - If instance contains a masker in mask parameter, we use a copy of this masker, overriding all instance masker related parameters. In all case, we forward system parameters of instance to new masker : memory, memory_level, verbose, n_jobs Parameters ---------- instance: object, instance of BaseEstimator The object that gives us the values of the parameters multi_subject: boolean Indicates whether to return a MultiNiftiMasker or a NiftiMasker (the default is True) Returns ------- masker: MultiNiftiMasker or NiftiMasker New masker """ masker_type = MultiNiftiMasker if multi_subject else NiftiMasker estimator_params = get_params(masker_type, estimator) mask = getattr(estimator, 'mask', None) if isinstance(mask, (NiftiMasker, MultiNiftiMasker)): # Creating (Multi)NiftiMasker from provided masker masker_params = get_params(masker_type, mask) new_masker_params = masker_params else: # Creating (Multi)NiftiMasker # with parameters extracted from estimator new_masker_params = estimator_params new_masker_params['mask_img'] = mask # Forwarding system parameters of instance to new masker in all case if multi_subject and hasattr(estimator, 'n_jobs'): # For MultiNiftiMasker only new_masker_params['n_jobs'] = estimator.n_jobs new_masker_params['memory'] = estimator.memory new_masker_params['memory_level'] = max(0, estimator.memory_level - 1) new_masker_params['verbose'] = estimator.verbose # Raising warning if masker override parameters conflict_string = "" for param_key in sorted(estimator_params): if np.any(new_masker_params[param_key] != estimator_params[param_key]): conflict_string += ("Parameter {0} :\n" " Masker parameter {1}" " - overriding estimator parameter {2}\n" ).format(param_key, new_masker_params[param_key], estimator_params[param_key]) if conflict_string != "": warn_str = ("Overriding provided-default estimator parameters with" " provided masker parameters :\n" "{0:s}").format(conflict_string) warnings.warn(warn_str) masker = masker_type(**new_masker_params) # Forwarding potential attribute of provided masker if hasattr(mask, 'mask_img_'): # Allow free fit of returned mask masker.mask_img = mask.mask_img_ return masker PKIpHCm4z!z!!nilearn/input_data/base_masker.py""" Transformer used to apply basic transformations on MRI data. """ # Author: Gael Varoquaux, Alexandre Abraham # License: simplified BSD import warnings import abc import numpy as np from sklearn.base import BaseEstimator, TransformerMixin from sklearn.externals.joblib import Memory from .. import masking from .. import image from .. import signal from .. import _utils from .._utils.cache_mixin import CacheMixin, cache from .._utils.class_inspect import enclosing_scope_name from .._utils.compat import _basestring def filter_and_extract(imgs, extraction_function, parameters, memory_level=0, memory=Memory(cachedir=None), verbose=0, confounds=None, copy=True): """Extract representative time series using given function. Parameters ---------- imgs: 3D/4D Niimg-like object Images to be masked. Can be 3-dimensional or 4-dimensional. extraction_function: function Function used to extract the time series from 4D data. This function should take images as argument and returns a tuple containing a 2D array with masked signals along with a auxiliary value used if returning a second value is needed. If any other parameter is needed, a functor or a partial function must be provided. For all other parameters refer to NiftiMasker documentation Returns ------- signals: 2D numpy array Signals extracted using the extraction function. It is a scikit-learn friendly 2D array with shape n_samples x n_features. """ # Since the calling class can be any *Nifti*Masker, we look for exact type if verbose > 0: class_name = enclosing_scope_name(stack_level=10) # If we have a string (filename), we won't need to copy, as # there will be no side effect if isinstance(imgs, _basestring): copy = False if verbose > 0: print("[%s] Loading data from %s" % ( class_name, _utils._repr_niimgs(imgs)[:200])) imgs = _utils.check_niimg(imgs, atleast_4d=True, ensure_ndim=4) sample_mask = parameters.get('sample_mask') if sample_mask is not None: imgs = image.index_img(imgs, sample_mask) target_shape = parameters.get('target_shape') target_affine = parameters.get('target_affine') if target_shape is not None or target_affine is not None: if verbose > 0: print("[%s] Resampling images" % class_name) imgs = cache( image.resample_img, memory, func_memory_level=2, memory_level=memory_level, ignore=['copy'])( imgs, interpolation="continuous", target_shape=target_shape, target_affine=target_affine, copy=copy) smoothing_fwhm = parameters.get('smoothing_fwhm') if smoothing_fwhm is not None: if verbose > 0: print("[%s] Smoothing images" % class_name) imgs = cache( image.smooth_img, memory, func_memory_level=2, memory_level=memory_level)( imgs, parameters['smoothing_fwhm']) if verbose > 0: print("[%s] Extracting region signals" % class_name) region_signals, aux = cache(extraction_function, memory, func_memory_level=2, memory_level=memory_level)(imgs) # Temporal # -------- # Detrending (optional) # Filtering # Confounds removing (from csv file or numpy array) # Normalizing if verbose > 0: print("[%s] Cleaning extracted signals" % class_name) sessions = parameters.get('sessions') region_signals = cache( signal.clean, memory=memory, func_memory_level=2, memory_level=memory_level)( region_signals, detrend=parameters['detrend'], standardize=parameters['standardize'], t_r=parameters['t_r'], low_pass=parameters['low_pass'], high_pass=parameters['high_pass'], confounds=confounds, sessions=sessions) return region_signals, aux class BaseMasker(BaseEstimator, TransformerMixin, CacheMixin): """Base class for NiftiMaskers """ @abc.abstractmethod def transform_single_imgs(self, imgs, confounds=None, copy=True): """Extract signals from a single 4D niimg. Parameters ---------- imgs: 3D/4D Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. Images to process. It must boil down to a 4D image with scans number as last dimension. confounds: CSV file or array-like, optional This parameter is passed to signal.clean. Please see the related documentation for details. shape: (number of scans, number of confounds) Returns ------- region_signals: 2D numpy.ndarray Signal for each element. shape: (number of scans, number of elements) """ raise NotImplementedError() def transform(self, imgs, confounds=None): """Apply mask, spatial and temporal preprocessing Parameters ---------- imgs: 3D/4D Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. Images to process. It must boil down to a 4D image with scans number as last dimension. confounds: CSV file or array-like, optional This parameter is passed to signal.clean. Please see the related documentation for details. shape: (number of scans, number of confounds) Returns ------- region_signals: 2D numpy.ndarray Signal for each element. shape: (number of scans, number of elements) """ self._check_fitted() return self.transform_single_imgs(imgs, confounds) def fit_transform(self, X, y=None, confounds=None, **fit_params): """Fit to data, then transform it Parameters ---------- X : Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. y : numpy array of shape [n_samples] Target values. confounds: list of confounds, optional List of confounds (2D arrays or filenames pointing to CSV files). Must be of same length than imgs_list. Returns ------- X_new : numpy array of shape [n_samples, n_features_new] Transformed array. """ # non-optimized default implementation; override when a better # method is possible for a given clustering algorithm if y is None: # fit method of arity 1 (unsupervised transformation) if self.mask_img is None: return self.fit(X, **fit_params ).transform(X, confounds=confounds) else: return self.fit(**fit_params).transform(X, confounds=confounds) else: # fit method of arity 2 (supervised transformation) if self.mask_img is None: return self.fit(X, y, **fit_params ).transform(X, confounds=confounds) else: warnings.warn('[%s.fit] Generation of a mask has been' ' requested (y != None) while a mask has' ' been provided at masker creation. Given mask' ' will be used.' % self.__class__.__name__) return self.fit(**fit_params).transform(X, confounds=confounds) def inverse_transform(self, X): """ Transform the 2D data matrix back to an image in brain space. """ self._check_fitted() img = self._cache(masking.unmask)(X, self.mask_img_) # Be robust again memmapping that will create read-only arrays in # internal structures of the header: remove the memmaped array try: img._header._structarr = np.array(img._header._structarr).copy() except: pass return img def _check_fitted(self): if not hasattr(self, "mask_img_"): raise ValueError('It seems that %s has not been fitted. ' 'You must call fit() before calling transform().' % self.__class__.__name__) PKYpHQy/y/(nilearn/input_data/multi_nifti_masker.py""" Transformer used to apply basic transformations on multi subject MRI data. """ # Author: Gael Varoquaux, Alexandre Abraham # License: simplified BSD import warnings import collections import itertools from sklearn.externals.joblib import Memory, Parallel, delayed from .. import masking from .. import image from .. import _utils from .._utils import CacheMixin from .nifti_masker import NiftiMasker, filter_and_mask from .._utils.compat import _basestring, izip from .._utils.niimg_conversions import _iter_check_niimg from .._utils.class_inspect import get_params class MultiNiftiMasker(NiftiMasker, CacheMixin): """Class for masking of Niimg-like objects. MultiNiftiMasker is useful when dealing with image sets from multiple subjects. Use case: integrates well with decomposition by MultiPCA and CanICA (multi-subject models) Parameters ---------- mask_img: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. Mask of the data. If not given, a mask is computed in the fit step. Optional parameters can be set using mask_args and mask_strategy to fine tune the mask extraction. smoothing_fwhm: float, optional If smoothing_fwhm is not None, it gives the size in millimeters of the spatial smoothing to apply to the signal. standardize: boolean, optional If standardize is True, the time-series are centered and normed: their mean is put to 0 and their variance to 1 in the time dimension. detrend: boolean, optional This parameter is passed to signal.clean. Please see the related documentation for details low_pass: False or float, optional This parameter is passed to signal.clean. Please see the related documentation for details high_pass: False or float, optional This parameter is passed to signal.clean. Please see the related documentation for details t_r: float, optional This parameter is passed to signal.clean. Please see the related documentation for details target_affine: 3x3 or 4x4 matrix, optional This parameter is passed to image.resample_img. Please see the related documentation for details. target_shape: 3-tuple of integers, optional This parameter is passed to image.resample_img. Please see the related documentation for details. mask_strategy: {'background' or 'epi'}, optional The strategy used to compute the mask: use 'background' if your images present a clear homogeneous background, and 'epi' if they are raw EPI images. Depending on this value, the mask will be computed from masking.compute_background_mask or masking.compute_epi_mask. Default is 'background'. mask_args : dict, optional If mask is None, these are additional parameters passed to masking.compute_background_mask or masking.compute_epi_mask to fine-tune mask computation. Please see the related documentation for details. memory: instance of joblib.Memory or string Used to cache the masking process. By default, no caching is done. If a string is given, it is the path to the caching directory. memory_level: integer, optional Rough estimator of the amount of memory used by caching. Higher value means more memory for caching. n_jobs: integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs', -2 'all CPUs but one', and so on. verbose: integer, optional Indicate the level of verbosity. By default, nothing is printed Attributes ---------- `mask_img_` : nibabel.Nifti1Image object The mask of the data. `affine_` : 4x4 numpy.ndarray Affine of the transformed image. See Also -------- nilearn.image.resample_img: image resampling nilearn.masking.compute_epi_mask: mask computation nilearn.masking.apply_mask: mask application on image nilearn.signal.clean: confounds removal and general filtering of signals """ def __init__(self, mask_img=None, smoothing_fwhm=None, standardize=False, detrend=False, low_pass=None, high_pass=None, t_r=None, target_affine=None, target_shape=None, mask_strategy='background', mask_args=None, memory=Memory(cachedir=None), memory_level=0, n_jobs=1, verbose=0 ): # Mask is provided or computed self.mask_img = mask_img self.smoothing_fwhm = smoothing_fwhm self.standardize = standardize self.detrend = detrend self.low_pass = low_pass self.high_pass = high_pass self.t_r = t_r self.target_affine = target_affine self.target_shape = target_shape self.mask_strategy = mask_strategy self.mask_args = mask_args self.memory = memory self.memory_level = memory_level self.n_jobs = n_jobs self.verbose = verbose def fit(self, imgs=None, y=None): """Compute the mask corresponding to the data Parameters ---------- imgs: list of Niimg-like objects See http://nilearn.github.io/manipulating_images/input_output.html. Data on which the mask must be calculated. If this is a list, the affine is considered the same for all. """ # Load data (if filenames are given, load them) if self.verbose > 0: print("[%s.fit] Loading data from %s" % ( self.__class__.__name__, _utils._repr_niimgs(imgs)[:200])) # Compute the mask if not given by the user if self.mask_img is None: if self.verbose > 0: print("[%s.fit] Computing mask" % self.__class__.__name__) if not isinstance(imgs, collections.Iterable) \ or isinstance(imgs, _basestring): raise ValueError("[%s.fit] For multiple processing, you should" " provide a list of data " "(e.g. Nifti1Image objects or filenames)." "%r is an invalid input" % (self.__class__.__name__, imgs)) mask_args = (self.mask_args if self.mask_args is not None else {}) if self.mask_strategy == 'background': compute_mask = masking.compute_multi_background_mask elif self.mask_strategy == 'epi': compute_mask = masking.compute_multi_epi_mask else: raise ValueError("Unknown value of mask_strategy '%s'. " "Acceptable values are 'background' and 'epi'.") self.mask_img_ = self._cache(compute_mask, ignore=['n_jobs', 'verbose', 'memory'])( imgs, target_affine=self.target_affine, target_shape=self.target_shape, n_jobs=self.n_jobs, memory=self.memory, verbose=max(0, self.verbose - 1), **mask_args) else: if imgs is not None: warnings.warn('[%s.fit] Generation of a mask has been' ' requested (imgs != None) while a mask has' ' been provided at masker creation. Given mask' ' will be used.' % self.__class__.__name__) self.mask_img_ = _utils.check_niimg_3d(self.mask_img) # If resampling is requested, resample the mask as well. # Resampling: allows the user to change the affine, the shape or both. if self.verbose > 0: print("[%s.transform] Resampling mask" % self.__class__.__name__) self.mask_img_ = self._cache(image.resample_img)( self.mask_img_, target_affine=self.target_affine, target_shape=self.target_shape, interpolation='nearest', copy=False) if self.target_affine is not None: self.affine_ = self.target_affine else: self.affine_ = self.mask_img_.get_affine() # Load data in memory self.mask_img_.get_data() return self def transform_imgs(self, imgs_list, confounds=None, copy=True, n_jobs=1): """Prepare multi subject data in parallel Parameters ---------- imgs_list: list of Niimg-like objects See http://nilearn.github.io/manipulating_images/input_output.html. List of imgs file to prepare. One item per subject. confounds: list of confounds, optional List of confounds (2D arrays or filenames pointing to CSV files). Must be of same length than imgs_list. copy: boolean, optional If True, guarantees that output array has no memory in common with input array. n_jobs: integer, optional The number of cpus to use to do the computation. -1 means 'all cpus'. Returns ------- region_signals: list of 2D numpy.ndarray List of signal for each element per subject. shape: list of (number of scans, number of elements) """ if not hasattr(self, 'mask_img_'): raise ValueError('It seems that %s has not been fitted. ' 'You must call fit() before calling transform().' % self.__class__.__name__) target_fov = None if self.target_affine is None: # Force resampling on first image target_fov = 'first' niimg_iter = _iter_check_niimg(imgs_list, ensure_ndim=None, atleast_4d=False, target_fov=target_fov, memory=self.memory, memory_level=self.memory_level, verbose=self.verbose) if confounds is None: confounds = itertools.repeat(None, len(imgs_list)) # Ignore the mask-computing params: they are not useful and will # just invalidate the cache for no good reason # target_shape and target_affine are conveyed implicitly in mask_img params = get_params(self.__class__, self, ignore=['mask_img', 'mask_args', 'mask_strategy', 'copy']) func = self._cache(filter_and_mask, ignore=['verbose', 'memory', 'memory_level', 'copy']) data = Parallel(n_jobs=n_jobs)( delayed(func)(imgs, self.mask_img_, params, memory_level=self.memory_level, memory=self.memory, verbose=self.verbose, confounds=cfs, copy=copy) for imgs, cfs in izip(niimg_iter, confounds)) return [d[0] for d in data] def transform(self, imgs, confounds=None): """ Apply mask, spatial and temporal preprocessing Parameters ---------- imgs: list of Niimg-like objects See http://nilearn.github.io/manipulating_images/input_output.html. Data to be preprocessed confounds: CSV file path or 2D matrix This parameter is passed to signal.clean. Please see the corresponding documentation for details. Returns ------- data: {list of numpy arrays} preprocessed images """ self._check_fitted() if not hasattr(imgs, '__iter__')\ or isinstance(imgs, _basestring): return self.transform_single_imgs(imgs) return self.transform_imgs(imgs, confounds, n_jobs=self.n_jobs) PKhpH"Up+p+)nilearn/input_data/nifti_labels_masker.py""" Transformer for computing ROI signals. """ import numpy as np from sklearn.externals.joblib import Memory from .. import _utils from .._utils import logger, CacheMixin, _compose_err_msg from .._utils.class_inspect import get_params from .._utils.niimg_conversions import _check_same_fov from .. import masking from .. import image from .base_masker import filter_and_extract, BaseMasker class _ExtractionFunctor(object): func_name = 'nifti_labels_masker_extractor' def __init__(self, _resampled_labels_img_, background_label): self._resampled_labels_img_ = _resampled_labels_img_ self.background_label = background_label def __call__(self, imgs): from ..regions import signal_extraction return signal_extraction.img_to_signals_labels( imgs, self._resampled_labels_img_, background_label=self.background_label) class NiftiLabelsMasker(BaseMasker, CacheMixin): """Class for masking of Niimg-like objects. NiftiLabelsMasker is useful when data from non-overlapping volumes should be extracted (contrarily to NiftiMapsMasker). Use case: Summarize brain signals from clusters that were obtained by prior K-means or Ward clustering. Parameters ---------- labels_img: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. Region definitions, as one image of labels. background_label: number, optional Label used in labels_img to represent background. mask_img: Niimg-like object, optional See http://nilearn.github.io/manipulating_images/input_output.html. Mask to apply to regions before extracting signals. smoothing_fwhm: float, optional If smoothing_fwhm is not None, it gives the full-width half maximum in millimeters of the spatial smoothing to apply to the signal. standardize: boolean, optional If standardize is True, the time-series are centered and normed: their mean is put to 0 and their variance to 1 in the time dimension. detrend: boolean, optional This parameter is passed to signal.clean. Please see the related documentation for details low_pass: False or float, optional This parameter is passed to signal.clean. Please see the related documentation for details high_pass: False or float, optional This parameter is passed to signal.clean. Please see the related documentation for details t_r: float, optional This parameter is passed to signal.clean. Please see the related documentation for details resampling_target: {"data", "labels", None}, optional. Gives which image gives the final shape/size. For example, if `resampling_target` is "data", the atlas is resampled to the shape of the data if needed. If it is "labels" then mask_img and images provided to fit() are resampled to the shape and affine of maps_img. "None" means no resampling: if shapes and affines do not match, a ValueError is raised. Defaults to "data". memory: joblib.Memory or str, optional Used to cache the region extraction process. By default, no caching is done. If a string is given, it is the path to the caching directory. memory_level: int, optional Aggressiveness of memory caching. The higher the number, the higher the number of functions that will be cached. Zero means no caching. verbose: integer, optional Indicate the level of verbosity. By default, nothing is printed See also -------- nilearn.input_data.NiftiMasker """ # memory and memory_level are used by CacheMixin. def __init__(self, labels_img, background_label=0, mask_img=None, smoothing_fwhm=None, standardize=False, detrend=False, low_pass=None, high_pass=None, t_r=None, resampling_target="data", memory=Memory(cachedir=None, verbose=0), memory_level=1, verbose=0): self.labels_img = labels_img self.background_label = background_label self.mask_img = mask_img # Parameters for _smooth_array self.smoothing_fwhm = smoothing_fwhm # Parameters for clean() self.standardize = standardize self.detrend = detrend self.low_pass = low_pass self.high_pass = high_pass self.t_r = t_r # Parameters for resampling self.resampling_target = resampling_target # Parameters for joblib self.memory = memory self.memory_level = memory_level self.verbose = verbose if resampling_target not in ("labels", "data", None): raise ValueError("invalid value for 'resampling_target' " "parameter: " + str(resampling_target)) def fit(self, X=None, y=None): """Prepare signal extraction from regions. All parameters are unused, they are for scikit-learn compatibility. """ logger.log("loading data from %s" % _utils._repr_niimgs(self.labels_img)[:200], verbose=self.verbose) self.labels_img_ = _utils.check_niimg_3d(self.labels_img) if self.mask_img is not None: logger.log("loading data from %s" % _utils._repr_niimgs(self.mask_img)[:200], verbose=self.verbose) self.mask_img_ = _utils.check_niimg_3d(self.mask_img) else: self.mask_img_ = None # Check shapes and affines or resample. if self.mask_img_ is not None: if self.resampling_target == "data": # resampling will be done at transform time pass elif self.resampling_target is None: if self.mask_img_.shape != self.labels_img_.shape[:3]: raise ValueError( _compose_err_msg( "Regions and mask do not have the same shape", mask_img=self.mask_img, labels_img=self.labels_img)) if not np.allclose(self.mask_img_.get_affine(), self.labels_img_.get_affine()): raise ValueError(_compose_err_msg( "Regions and mask do not have the same affine.", mask_img=self.mask_img, labels_img=self.labels_img)) elif self.resampling_target == "labels": logger.log("resampling the mask", verbose=self.verbose) self.mask_img_ = image.resample_img( self.mask_img_, target_affine=self.labels_img_.get_affine(), target_shape=self.labels_img_.shape[:3], interpolation="nearest", copy=True) else: raise ValueError("Invalid value for resampling_target: " + str(self.resampling_target)) mask_data, mask_affine = masking._load_mask_img(self.mask_img_) return self def fit_transform(self, imgs, confounds=None): """ Prepare and perform signal extraction from regions. """ return self.fit().transform(imgs, confounds=confounds) def _check_fitted(self): if not hasattr(self, "labels_img_"): raise ValueError('It seems that %s has not been fitted. ' 'You must call fit() before calling transform().' % self.__class__.__name__) def transform_single_imgs(self, imgs, confounds=None): """Extract signals from a single 4D niimg. Parameters ---------- imgs: 3D/4D Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. Images to process. It must boil down to a 4D image with scans number as last dimension. confounds: CSV file or array-like, optional This parameter is passed to signal.clean. Please see the related documentation for details. shape: (number of scans, number of confounds) Returns ------- region_signals: 2D numpy.ndarray Signal for each label. shape: (number of scans, number of labels) """ # We handle the resampling of labels separately because the affine of # the labels image should not impact the extraction of the signal. if not hasattr(self, '_resampled_labels_img_'): self._resampled_labels_img_ = self.labels_img_ if self.resampling_target == "data": imgs_ = _utils.check_niimg_4d(imgs) if not _check_same_fov(imgs_, self._resampled_labels_img_): if self.verbose > 0: print("Resampling labels") self._resampled_labels_img_ = self._cache( image.resample_img, func_memory_level=2)( self.labels_img_, interpolation="nearest", target_shape=imgs_.shape[:3], target_affine=imgs_.get_affine()) target_shape = None target_affine = None if self.resampling_target == 'labels': target_shape = self._resampled_labels_img_.shape[:3] target_affine = self._resampled_labels_img_.get_affine() params = get_params(NiftiLabelsMasker, self, ignore=['resampling_target']) params['target_shape'] = target_shape params['target_affine'] = target_affine region_signals, labels_ = self._cache( filter_and_extract, ignore=['verbose', 'memory', 'memory_level'])( # Images imgs, _ExtractionFunctor(self._resampled_labels_img_, self.background_label), # Pre-processing params, confounds=confounds, # Caching memory=self.memory, memory_level=self.memory_level, verbose=self.verbose) self.labels_ = labels_ return region_signals def inverse_transform(self, signals): """Compute voxel signals from region signals Any mask given at initialization is taken into account. Parameters ---------- signals (2D numpy.ndarray) Signal for each region. shape: (number of scans, number of regions) Returns ------- voxel_signals (Nifti1Image) Signal for each voxel shape: (number of scans, number of voxels) """ from ..regions import signal_extraction self._check_fitted() logger.log("computing image from signals", verbose=self.verbose) return signal_extraction.signals_to_img_labels( signals, self.labels_img_, self.mask_img_, background_label=self.background_label) PKupHN^I3I3'nilearn/input_data/nifti_maps_masker.py""" Transformer for computing ROI signals. """ import numpy as np from sklearn.externals.joblib import Memory from .. import _utils from .._utils import logger, CacheMixin from .._utils.niimg import _get_data_dtype from .._utils.class_inspect import get_params from .._utils.niimg_conversions import _check_same_fov from .. import image from .base_masker import filter_and_extract, BaseMasker class _ExtractionFunctor(object): func_name = 'nifti_maps_masker_extractor' def __init__(self, _resampled_maps_img_, _resampled_mask_img_): self._resampled_maps_img_ = _resampled_maps_img_ self._resampled_mask_img_ = _resampled_mask_img_ def __call__(self, imgs): from ..regions import signal_extraction return signal_extraction.img_to_signals_maps( imgs, self._resampled_maps_img_, mask_img=self._resampled_mask_img_) class NiftiMapsMasker(BaseMasker, CacheMixin): """Class for masking of Niimg-like objects. NiftiMapsMasker is useful when data from overlapping volumes should be extracted (contrarily to NiftiLabelsMasker). Use case: Summarize brain signals from large-scale networks obtained by prior PCA or ICA. Parameters ---------- maps_img: 4D niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. Set of continuous maps. One representative time course per map is extracted using least square regression. mask_img: 3D niimg-like object, optional See http://nilearn.github.io/manipulating_images/input_output.html. Mask to apply to regions before extracting signals. allow_overlap: boolean, optional If False, an error is raised if the maps overlaps (ie at least two maps have a non-zero value for the same voxel). Default is True. smoothing_fwhm: float, optional If smoothing_fwhm is not None, it gives the full-width half maximum in millimeters of the spatial smoothing to apply to the signal. standardize: boolean, optional If standardize is True, the time-series are centered and normed: their mean is put to 0 and their variance to 1 in the time dimension. detrend: boolean, optional This parameter is passed to signal.clean. Please see the related documentation for details low_pass: False or float, optional This parameter is passed to signal.clean. Please see the related documentation for details high_pass: False or float, optional This parameter is passed to signal.clean. Please see the related documentation for details t_r: float, optional This parameter is passed to signal.clean. Please see the related documentation for details resampling_target: {"mask", "maps", None} optional. Gives which image gives the final shape/size. For example, if `resampling_target` is "mask" then maps_img and images provided to fit() are resampled to the shape and affine of mask_img. "None" means no resampling: if shapes and affines do not match, a ValueError is raised. Default value: "maps". memory: joblib.Memory or str, optional Used to cache the region extraction process. By default, no caching is done. If a string is given, it is the path to the caching directory. memory_level: int, optional Aggressiveness of memory caching. The higher the number, the higher the number of functions that will be cached. Zero means no caching. verbose: integer, optional Indicate the level of verbosity. By default, nothing is printed Notes ----- With the default value for resampling_target, every 3D image processed by transform() will be resampled to the shape of maps_img. It may lead to a very large memory consumption if the voxel number in labels_img is large. See also -------- nilearn.input_data.NiftiMasker nilearn.input_data.NiftiLabelsMasker """ # memory and memory_level are used by CacheMixin. def __init__(self, maps_img, mask_img=None, allow_overlap=True, smoothing_fwhm=None, standardize=False, detrend=False, low_pass=None, high_pass=None, t_r=None, resampling_target="data", memory=Memory(cachedir=None, verbose=0), memory_level=0, verbose=0): self.maps_img = maps_img self.mask_img = mask_img # Maps Masker parameter self.allow_overlap = allow_overlap # Parameters for image.smooth self.smoothing_fwhm = smoothing_fwhm # Parameters for clean() self.standardize = standardize self.detrend = detrend self.low_pass = low_pass self.high_pass = high_pass self.t_r = t_r # Parameters for resampling self.resampling_target = resampling_target # Parameters for joblib self.memory = memory self.memory_level = memory_level self.verbose = verbose if resampling_target not in ("mask", "maps", "data", None): raise ValueError("invalid value for 'resampling_target'" " parameter: " + str(resampling_target)) if self.mask_img is None and resampling_target == "mask": raise ValueError( "resampling_target has been set to 'mask' but no mask " "has been provided.\nSet resampling_target to something else" " or provide a mask.") def fit(self, X=None, y=None): """Prepare signal extraction from regions. All parameters are unused, they are for scikit-learn compatibility. """ # Load images logger.log("loading regions from %s" % _utils._repr_niimgs(self.maps_img)[:200], verbose=self.verbose) self.maps_img_ = _utils.check_niimg_4d(self.maps_img) if self.mask_img is not None: logger.log("loading mask from %s" % _utils._repr_niimgs(self.mask_img)[:200], verbose=self.verbose) self.mask_img_ = _utils.check_niimg_3d(self.mask_img) else: self.mask_img_ = None # Check shapes and affines or resample. if self.resampling_target is None and self.mask_img_ is not None: _check_same_fov(mask=self.mask_img_, maps=self.maps_img_, raise_error=True) elif self.resampling_target == "mask" and self.mask_img_ is not None: if self.verbose > 0: print("Resampling maps") self.maps_img_ = image.resample_img( self.maps_img_, target_affine=self.mask_img_.get_affine(), target_shape=self.mask_img_.shape, interpolation="continuous", copy=True) elif self.resampling_target == "maps" and self.mask_img_ is not None: if self.verbose > 0: print("Resampling mask") self.mask_img_ = image.resample_img( self.mask_img_, target_affine=self.maps_img_.get_affine(), target_shape=self.maps_img_.shape[:3], interpolation="nearest", copy=True) return self def _check_fitted(self): if not hasattr(self, "maps_img_"): raise ValueError('It seems that %s has not been fitted. ' 'You must call fit() before calling transform().' % self.__class__.__name__) def fit_transform(self, imgs, confounds=None): """Prepare and perform signal extraction. """ return self.fit().transform(imgs, confounds=confounds) def transform_single_imgs(self, imgs, confounds=None): """Extract signals from a single 4D niimg. Parameters ---------- imgs: 3D/4D Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. Images to process. It must boil down to a 4D image with scans number as last dimension. confounds: CSV file or array-like, optional This parameter is passed to signal.clean. Please see the related documentation for details. shape: (number of scans, number of confounds) Returns ------- region_signals: 2D numpy.ndarray Signal for each map. shape: (number of scans, number of maps) """ # We handle the resampling of maps and mask separately because the # affine of the maps and mask images should not impact the extraction # of the signal. if not hasattr(self, '_resampled_maps_img_'): self._resampled_maps_img_ = self.maps_img_ if not hasattr(self, '_resampled_mask_img_'): self._resampled_mask_img_ = self.mask_img_ if self.resampling_target is None: imgs_ = _utils.check_niimg_4d(imgs) images = dict(maps=self.maps_img_, data=imgs_) if self.mask_img_ is not None: images['mask'] = self.mask_img_ _check_same_fov(raise_error=True, **images) else: if self.resampling_target == "data": imgs_ = _utils.check_niimg_4d(imgs) ref_img = imgs_ elif self.resampling_target == "mask": self._resampled_mask_img_ = self.mask_img_ ref_img = self.mask_img_ elif self.resampling_target == "maps": self._resampled_maps_img_ = self.maps_img_ ref_img = self.maps_img_ if not _check_same_fov(ref_img, self._resampled_maps_img_): if self.verbose > 0: print("Resampling maps") self._resampled_maps_img_ = self._cache(image.resample_img)( self.maps_img_, interpolation="continuous", target_shape=ref_img.shape[:3], target_affine=ref_img.get_affine()) if (self.mask_img_ is not None and not _check_same_fov(ref_img, self.mask_img_)): if self.verbose > 0: print("Resampling mask") self._resampled_mask_img_ = self._cache(image.resample_img)( self.mask_img_, interpolation="nearest", target_shape=ref_img.shape[:3], target_affine=ref_img.get_affine()) if not self.allow_overlap: # Check if there is an overlap. # If float, we set low values to 0 dtype = _get_data_dtype(self._resampled_maps_img_) data = self._resampled_maps_img_.get_data() if dtype.kind == 'f': data[data < np.finfo(dtype).eps] = 0. # Check the overlaps if np.any(np.sum(data > 0., axis=3) > 1): raise ValueError( 'Overlap detected in the maps. The overlap may be ' 'due to the atlas itself or possibly introduced by ' 'resampling' ) target_shape = None target_affine = None if self.resampling_target != 'data': target_shape = self._resampled_maps_img_.shape[:3] target_affine = self._resampled_maps_img_.get_affine() params = get_params(NiftiMapsMasker, self, ignore=['resampling_target']) params['target_shape'] = target_shape params['target_affine'] = target_affine region_signals, labels_ = self._cache( filter_and_extract, ignore=['verbose', 'memory', 'memory_level'])( # Images imgs, _ExtractionFunctor(self._resampled_maps_img_, self._resampled_mask_img_), # Pre-treatments params, confounds=confounds, # Caching memory=self.memory, memory_level=self.memory_level, # kwargs verbose=self.verbose) self.labels_ = labels_ return region_signals def inverse_transform(self, region_signals): """Compute voxel signals from region signals Any mask given at initialization is taken into account. Parameters ---------- region_signals: 2D numpy.ndarray Signal for each region. shape: (number of scans, number of regions) Returns ------- voxel_signals: nibabel.Nifti1Image Signal for each voxel. shape: that of maps. """ from ..regions import signal_extraction self._check_fitted() logger.log("computing image from signals", verbose=self.verbose) return signal_extraction.signals_to_img_maps( region_signals, self.maps_img_, mask_img=self.mask_img_) PKpH(]--"nilearn/input_data/nifti_masker.py""" Transformer used to apply basic transformations on MRI data. """ # Author: Gael Varoquaux, Alexandre Abraham # License: simplified BSD from copy import copy as copy_object from sklearn.externals.joblib import Memory from .. import masking from .. import image from .. import _utils from .._utils import CacheMixin from .._utils.class_inspect import get_params from .base_masker import BaseMasker, filter_and_extract from nilearn._utils.niimg_conversions import _check_same_fov class _ExtractionFunctor(object): func_name = 'nifti_masker_extractor' def __init__(self, mask_img_): self.mask_img_ = mask_img_ def __call__(self, imgs): return masking.apply_mask(imgs, self.mask_img_), imgs.get_affine() def filter_and_mask(imgs, mask_img_, parameters, memory_level=0, memory=Memory(cachedir=None), verbose=0, confounds=None, copy=True): imgs = _utils.check_niimg(imgs, atleast_4d=True, ensure_ndim=4) # Check whether resampling is truly necessary. If so, crop mask # as small as possible in order to speed up the process if not _check_same_fov(imgs, mask_img_): parameters = copy_object(parameters) # now we can crop mask_img_ = image.crop_img(mask_img_, copy=False) parameters['target_shape'] = mask_img_.shape parameters['target_affine'] = mask_img_.get_affine() data, affine = filter_and_extract(imgs, _ExtractionFunctor(mask_img_), parameters, memory_level=memory_level, memory=memory, verbose=verbose, confounds=confounds, copy=copy) # For _later_: missing value removal or imputing of missing data # (i.e. we want to get rid of NaNs, if smoothing must be done # earlier) # Optionally: 'doctor_nan', remove voxels with NaNs, other option # for later: some form of imputation return data, affine class NiftiMasker(BaseMasker, CacheMixin): """Class for masking of Niimg-like objects. NiftiMasker is useful when preprocessing (detrending, standardization, resampling, etc.) of in-mask voxels is necessary. Use case: working with time series of resting-state or task maps. Parameters ---------- mask_img : Niimg-like object, optional See http://nilearn.github.io/manipulating_images/input_output.html. Mask for the data. If not given, a mask is computed in the fit step. Optional parameters (mask_args and mask_strategy) can be set to fine tune the mask extraction. sessions : numpy array, optional Add a session level to the preprocessing. Each session will be detrended independently. Must be a 1D array of n_samples elements. smoothing_fwhm : float, optional If smoothing_fwhm is not None, it gives the full-width half maximum in millimeters of the spatial smoothing to apply to the signal. standardize : boolean, optional If standardize is True, the time-series are centered and normed: their mean is put to 0 and their variance to 1 in the time dimension. detrend : boolean, optional This parameter is passed to signal.clean. Please see the related documentation for details low_pass : False or float, optional This parameter is passed to signal.clean. Please see the related documentation for details high_pass : False or float, optional This parameter is passed to signal.clean. Please see the related documentation for details t_r : float, optional This parameter is passed to signal.clean. Please see the related documentation for details target_affine : 3x3 or 4x4 matrix, optional This parameter is passed to image.resample_img. Please see the related documentation for details. target_shape : 3-tuple of integers, optional This parameter is passed to image.resample_img. Please see the related documentation for details. mask_strategy: {'background' or 'epi'}, optional The strategy used to compute the mask: use 'background' if your images present a clear homogeneous background, and 'epi' if they are raw EPI images. Depending on this value, the mask will be computed from masking.compute_background_mask or masking.compute_epi_mask. Default is 'background'. mask_args : dict, optional If mask is None, these are additional parameters passed to masking.compute_background_mask or masking.compute_epi_mask to fine-tune mask computation. Please see the related documentation for details. sample_mask : Any type compatible with numpy-array indexing Masks the niimgs along time/fourth dimension. This complements 3D masking by the mask_img argument. This masking step is applied before data preprocessing at the beginning of NiftiMasker.transform. This is useful to perform data subselection as part of a scikit-learn pipeline. memory : instance of joblib.Memory or string Used to cache the masking process. By default, no caching is done. If a string is given, it is the path to the caching directory. memory_level : integer, optional Rough estimator of the amount of memory used by caching. Higher value means more memory for caching. verbose : integer, optional Indicate the level of verbosity. By default, nothing is printed Attributes ---------- `mask_img_` : nibabel.Nifti1Image The mask of the data, or the computed one. `affine_` : 4x4 numpy array Affine of the transformed image. See also -------- nilearn.masking.compute_background_mask nilearn.masking.compute_epi_mask nilearn.image.resample_img nilearn.masking.apply_mask nilearn.signal.clean """ def __init__(self, mask_img=None, sessions=None, smoothing_fwhm=None, standardize=False, detrend=False, low_pass=None, high_pass=None, t_r=None, target_affine=None, target_shape=None, mask_strategy='background', mask_args=None, sample_mask=None, memory_level=1, memory=Memory(cachedir=None), verbose=0 ): # Mask is provided or computed self.mask_img = mask_img self.sessions = sessions self.smoothing_fwhm = smoothing_fwhm self.standardize = standardize self.detrend = detrend self.low_pass = low_pass self.high_pass = high_pass self.t_r = t_r self.target_affine = target_affine self.target_shape = target_shape self.mask_strategy = mask_strategy self.mask_args = mask_args self.sample_mask = sample_mask self.memory = memory self.memory_level = memory_level self.verbose = verbose def _check_fitted(self): if not hasattr(self, 'mask_img_'): raise ValueError('It seems that %s has not been fitted. ' 'You must call fit() before calling transform().' % self.__class__.__name__) def fit(self, imgs=None, y=None): """Compute the mask corresponding to the data Parameters ---------- imgs: list of Niimg-like objects See http://nilearn.github.io/manipulating_images/input_output.html. Data on which the mask must be calculated. If this is a list, the affine is considered the same for all. """ # y=None is for scikit-learn compatibility (unused here). # Load data (if filenames are given, load them) if self.verbose > 0: print("[%s.fit] Loading data from %s" % ( self.__class__.__name__, _utils._repr_niimgs(imgs)[:200])) # Compute the mask if not given by the user if self.mask_img is None: mask_args = (self.mask_args if self.mask_args is not None else {}) if self.mask_strategy == 'background': compute_mask = masking.compute_background_mask elif self.mask_strategy == 'epi': compute_mask = masking.compute_epi_mask else: raise ValueError("Unknown value of mask_strategy '%s'. " "Acceptable values are 'background' and " "'epi'." % self.mask_strategy) if self.verbose > 0: print("[%s.fit] Computing the mask" % self.__class__.__name__) self.mask_img_ = self._cache(compute_mask, ignore=['verbose'])( imgs, verbose=max(0, self.verbose - 1), **mask_args) else: self.mask_img_ = _utils.check_niimg_3d(self.mask_img) # If resampling is requested, resample also the mask # Resampling: allows the user to change the affine, the shape or both if self.verbose > 0: print("[%s.fit] Resampling mask" % self.__class__.__name__) self.mask_img_ = self._cache(image.resample_img)( self.mask_img_, target_affine=self.target_affine, target_shape=self.target_shape, copy=False) if self.target_affine is not None: self.affine_ = self.target_affine else: self.affine_ = self.mask_img_.get_affine() # Load data in memory self.mask_img_.get_data() if self.verbose > 10: print("[%s.fit] Finished fit" % self.__class__.__name__) return self def transform_single_imgs(self, imgs, confounds=None, copy=True): """Apply mask, spatial and temporal preprocessing Parameters ---------- imgs: 3D/4D Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. Images to process. It must boil down to a 4D image with scans number as last dimension. confounds: CSV file or array-like, optional This parameter is passed to signal.clean. Please see the related documentation for details. shape: (number of scans, number of confounds) Returns ------- region_signals: 2D numpy.ndarray Signal for each voxel inside the mask. shape: (number of scans, number of voxels) """ # Ignore the mask-computing params: they are not useful and will # just invalid the cache for no good reason # target_shape and target_affine are conveyed implicitly in mask_img params = get_params(self.__class__, self, ignore=['mask_img', 'mask_args', 'mask_strategy']) data, _ = self._cache(filter_and_mask, ignore=['verbose', 'memory', 'memory_level', 'copy'])( imgs, self.mask_img_, params, memory_level=self.memory_level, memory=self.memory, verbose=self.verbose, confounds=confounds, copy=copy ) return data PKpH,L.L.*nilearn/input_data/nifti_spheres_masker.py""" Transformer for computing seeds signals ---------------------------------------- Mask nifti images by spherical volumes for seed-region analyses """ import numpy as np import sklearn from sklearn import neighbors from sklearn.externals.joblib import Memory from distutils.version import LooseVersion from ..image.resampling import coord_transform from .._utils import CacheMixin from .._utils.niimg_conversions import check_niimg_4d, check_niimg_3d from .._utils.class_inspect import get_params from .. import image from .. import masking from .base_masker import filter_and_extract, BaseMasker def _apply_mask_and_get_affinity(seeds, niimg, radius, allow_overlap, mask_img=None): seeds = list(seeds) affine = niimg.get_affine() # Compute world coordinates of all in-mask voxels. if mask_img is not None: mask_img = check_niimg_3d(mask_img) mask_img = image.resample_img(mask_img, target_affine=affine, target_shape=niimg.shape[:3], interpolation='nearest') mask, _ = masking._load_mask_img(mask_img) mask_coords = list(zip(*np.where(mask != 0))) X = masking._apply_mask_fmri(niimg, mask_img) else: mask_coords = list(np.ndindex(niimg.shape[:3])) X = niimg.get_data().reshape([-1, niimg.shape[3]]).T # For each seed, get coordinates of nearest voxel nearests = [] for sx, sy, sz in seeds: nearest = np.round(coord_transform(sx, sy, sz, np.linalg.inv(affine))) nearest = nearest.astype(int) nearest = (nearest[0], nearest[1], nearest[2]) try: nearests.append(mask_coords.index(nearest)) except ValueError: nearests.append(None) mask_coords = np.asarray(list(zip(*mask_coords))) mask_coords = coord_transform(mask_coords[0], mask_coords[1], mask_coords[2], affine) mask_coords = np.asarray(mask_coords).T if (radius is not None and LooseVersion(sklearn.__version__) < LooseVersion('0.16')): # Fix for scikit learn versions below 0.16. See # https://github.com/scikit-learn/scikit-learn/issues/4072 radius += 1e-6 clf = neighbors.NearestNeighbors(radius=radius) A = clf.fit(mask_coords).radius_neighbors_graph(seeds) A = A.tolil() for i, nearest in enumerate(nearests): if nearest is None: continue A[i, nearest] = True # Include the voxel containing the seed itself if not masked mask_coords = mask_coords.astype(int).tolist() for i, seed in enumerate(seeds): try: A[i, mask_coords.index(seed)] = True except ValueError: # seed is not in the mask pass if not allow_overlap: if np.any(A.sum(axis=0) >= 2): raise ValueError('Overlap detected between spheres') return X, A def _iter_signals_from_spheres(seeds, niimg, radius, allow_overlap, mask_img=None): """Utility function to iterate over spheres. Parameters ---------- seeds: List of triplets of coordinates in native space Seed definitions. List of coordinates of the seeds in the same space as the images (typically MNI or TAL). imgs: 3D/4D Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. Images to process. It must boil down to a 4D image with scans number as last dimension. radius: float, optional Indicates, in millimeters, the radius for the sphere around the seed. Default is None (signal is extracted on a single voxel). allow_overlap: boolean If False, an error is raised if the maps overlaps (ie at least two maps have a non-zero value for the same voxel). Default is False. mask_img: Niimg-like object, optional See http://nilearn.github.io/manipulating_images/input_output.html. Mask to apply to regions before extracting signals. """ X, A = _apply_mask_and_get_affinity(seeds, niimg, radius, allow_overlap, mask_img=mask_img) for i, row in enumerate(A.rows): if len(row) == 0: raise ValueError('Sphere around seed #%i is empty' % i) yield X[:, row] class _ExtractionFunctor(object): func_name = 'nifti_spheres_masker_extractor' def __init__(self, seeds_, radius, mask_img, allow_overlap): self.seeds_ = seeds_ self.radius = radius self.mask_img = mask_img self.allow_overlap = allow_overlap def __call__(self, imgs): n_seeds = len(self.seeds_) imgs = check_niimg_4d(imgs) signals = np.empty((imgs.shape[3], n_seeds)) for i, sphere in enumerate(_iter_signals_from_spheres( self.seeds_, imgs, self.radius, self.allow_overlap, mask_img=self.mask_img)): signals[:, i] = np.mean(sphere, axis=1) return signals, None class NiftiSpheresMasker(BaseMasker, CacheMixin): """Class for masking of Niimg-like objects using seeds. NiftiSpheresMasker is useful when data from given seeds should be extracted. Use case: Summarize brain signals from seeds that were obtained from prior knowledge. Parameters ---------- seeds: List of triplet of coordinates in native space Seed definitions. List of coordinates of the seeds in the same space as the images (typically MNI or TAL). radius: float, optional Indicates, in millimeters, the radius for the sphere around the seed. Default is None (signal is extracted on a single voxel). mask_img: Niimg-like object, optional See http://nilearn.github.io/manipulating_images/input_output.html. Mask to apply to regions before extracting signals. allow_overlap: boolean, optional If False, an error is raised if the maps overlaps (ie at least two maps have a non-zero value for the same voxel). Default is False. smoothing_fwhm: float, optional If smoothing_fwhm is not None, it gives the full-width half maximum in millimeters of the spatial smoothing to apply to the signal. standardize: boolean, optional If standardize is True, the time-series are centered and normed: their mean is set to 0 and their variance to 1 in the time dimension. detrend: boolean, optional This parameter is passed to signal.clean. Please see the related documentation for details. low_pass: False or float, optional This parameter is passed to signal.clean. Please see the related documentation for details. high_pass: False or float, optional This parameter is passed to signal.clean. Please see the related documentation for details. t_r: float, optional This parameter is passed to signal.clean. Please see the related documentation for details. memory: joblib.Memory or str, optional Used to cache the region extraction process. By default, no caching is done. If a string is given, it is the path to the caching directory. memory_level: int, optional Aggressiveness of memory caching. The higher the number, the higher the number of functions that will be cached. Zero means no caching. verbose: integer, optional Indicate the level of verbosity. By default, nothing is printed. See also -------- nilearn.input_data.NiftiMasker """ # memory and memory_level are used by CacheMixin. def __init__(self, seeds, radius=None, mask_img=None, allow_overlap=False, smoothing_fwhm=None, standardize=False, detrend=False, low_pass=None, high_pass=None, t_r=None, memory=Memory(cachedir=None, verbose=0), memory_level=1, verbose=0): self.seeds = seeds self.mask_img = mask_img self.radius = radius self.allow_overlap = allow_overlap # Parameters for _smooth_array self.smoothing_fwhm = smoothing_fwhm # Parameters for clean() self.standardize = standardize self.detrend = detrend self.low_pass = low_pass self.high_pass = high_pass self.t_r = t_r # Parameters for joblib self.memory = memory self.memory_level = memory_level self.verbose = verbose def fit(self, X=None, y=None): """Prepare signal extraction from regions. All parameters are unused, they are for scikit-learn compatibility. """ if hasattr(self, 'seeds_'): return self error = ("Seeds must be a list of triplets of coordinates in " "native space.\n") if not hasattr(self.seeds, '__iter__'): raise ValueError(error + "Given seed list is of type: " + type(self.seeds)) self.seeds_ = [] # Check seeds and convert them to lists if needed for i, seed in enumerate(self.seeds): # Check the type first if not hasattr(seed, '__len__'): raise ValueError(error + "Seed #%i is not a valid triplet " "of coordinates. It is of type %s." % (i, type(seed))) # Convert to list because it is easier to process if isinstance(seed, np.ndarray): seed = seed.tolist() else: # in case of tuple seed = list(seed) # Check the length if len(seed) != 3: raise ValueError(error + "Seed #%i is of length %i " "instead of 3." % (i, len(seed))) self.seeds_.append(seed) return self def fit_transform(self, imgs, confounds=None): """Prepare and perform signal extraction""" return self.fit().transform(imgs, confounds=confounds) def _check_fitted(self): if not hasattr(self, "seeds_"): raise ValueError('It seems that %s has not been fitted. ' 'You must call fit() before calling transform().' % self.__class__.__name__) def transform_single_imgs(self, imgs, confounds=None): """Extract signals from a single 4D niimg. Parameters ---------- imgs: 3D/4D Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. Images to process. It must boil down to a 4D image with scans number as last dimension. confounds: CSV file or array-like, optional This parameter is passed to signal.clean. Please see the related documentation for details. shape: (number of scans, number of confounds) Returns ------- region_signals: 2D numpy.ndarray Signal for each sphere. shape: (number of scans, number of spheres) """ self._check_fitted() params = get_params(NiftiSpheresMasker, self) signals, _ = self._cache( filter_and_extract, ignore=['verbose', 'memory', 'memory_level'])( # Images imgs, _ExtractionFunctor(self.seeds_, self.radius, self.mask_img, self.allow_overlap), # Pre-processing params, confounds=confounds, # Caching memory=self.memory, memory_level=self.memory_level, # kwargs verbose=self.verbose) return signals PKH$nilearn/input_data/tests/__init__.pyPKHB ԍ,nilearn/input_data/tests/test_base_masker.py""" Test the base_masker module """ import numpy as np from numpy.testing import assert_array_almost_equal import nibabel from nilearn.input_data.nifti_masker import filter_and_mask from nilearn import image def test_cropping_code_paths(): # Will mask data with an identically sampled mask and # with a smaller mask. The results must be identical rng = np.random.RandomState(42) data = np.zeros([20, 30, 40, 5]) data[10:15, 5:20, 10:30, :] = 1. + rng.rand(5, 15, 20, 5) affine = np.eye(4) img = nibabel.Nifti1Image(data, affine=affine) mask = (data[..., 0] > 0).astype(int) mask_img = nibabel.Nifti1Image(mask, affine=affine) # the mask in mask_img has the same shape and affine as the # data and should thus avoid resampling # we now crop the mask to its non-zero part. Masking with this # mask must yield the same result cropped_mask_img = image.crop_img(mask_img) parameters = {"smoothing_fwhm": None, "high_pass": None, "low_pass": None, "t_r": None, "detrend": None, "standardize": None } # Now do the two maskings out_data_uncropped, affine_uncropped = filter_and_mask(img, mask_img, parameters) out_data_cropped, affine_cropped = filter_and_mask(img, cropped_mask_img, parameters) assert_array_almost_equal(out_data_cropped, out_data_uncropped) PKHZ6$ 2nilearn/input_data/tests/test_masker_validation.pyfrom nose.tools import assert_true, assert_equal import nibabel import numpy as np from sklearn.base import BaseEstimator from sklearn.externals.joblib import Memory from nilearn._utils.testing import assert_warns from nilearn.input_data.masker_validation import check_embedded_nifti_masker from nilearn.input_data import MultiNiftiMasker, NiftiMasker class OwningClass(BaseEstimator): def __init__(self, mask=None, smoothing_fwhm=None, standardize=False, detrend=False, low_pass=None, high_pass=None, t_r=None, target_affine=None, target_shape=None, mask_strategy='background', mask_args=None, memory=Memory(cachedir=None), memory_level=0, n_jobs=1, verbose=0, dummy=None): self.mask = mask self.smoothing_fwhm = smoothing_fwhm self.standardize = standardize self.detrend = detrend self.low_pass = low_pass self.high_pass = high_pass self.t_r = t_r self.target_affine = target_affine self.target_shape = target_shape self.mask_strategy = mask_strategy self.mask_args = mask_args self.memory = memory self.memory_level = memory_level self.n_jobs = n_jobs self.verbose = verbose self.dummy = dummy def test_check_embedded_nifti_masker(): owner = OwningClass() masker = check_embedded_nifti_masker(owner) assert_true(type(masker) is MultiNiftiMasker) for mask, multi_subject in ( (MultiNiftiMasker(), True), (NiftiMasker(), False)): owner = OwningClass(mask=mask) masker = check_embedded_nifti_masker(owner, multi_subject=multi_subject) assert_equal(type(masker), type(mask)) for param_key in masker.get_params(): if param_key not in ['memory', 'memory_level', 'n_jobs', 'verbose']: assert_equal(getattr(masker, param_key), getattr(mask, param_key)) else: assert_equal(getattr(masker, param_key), getattr(owner, param_key)) # Check use of mask as mask_img shape = (6, 8, 10, 5) affine = np.eye(4) mask = nibabel.Nifti1Image(np.ones(shape[:3], dtype=np.int8), affine) owner = OwningClass(mask=mask) masker = check_embedded_nifti_masker(owner) assert_true(masker.mask_img is mask) # Check attribute forwarding data = np.zeros((9, 9, 9)) data[2:-2, 2:-2, 2:-2] = 10 imgs = nibabel.Nifti1Image(data, np.eye(4)) mask = MultiNiftiMasker() mask.fit([[imgs]]) owner = OwningClass(mask=mask) masker = check_embedded_nifti_masker(owner) assert_true(masker.mask_img is mask.mask_img_) # Check conflict warning mask = NiftiMasker(mask_strategy='epi') owner = OwningClass(mask=mask) assert_warns(UserWarning, check_embedded_nifti_masker, owner) PKH=I 5nilearn/input_data/tests/test_nifti_spheres_masker.pyimport nibabel import numpy as np from numpy.testing import assert_array_equal from nilearn.input_data import NiftiSpheresMasker from nilearn._utils.testing import assert_raises_regex def test_seed_extraction(): data = np.random.random((3, 3, 3, 5)) img = nibabel.Nifti1Image(data, np.eye(4)) masker = NiftiSpheresMasker([(1, 1, 1)]) # Test the fit masker.fit() # Test the transform s = masker.transform(img) assert_array_equal(s[:, 0], data[1, 1, 1]) def test_sphere_extraction(): data = np.random.random((3, 3, 3, 5)) img = nibabel.Nifti1Image(data, np.eye(4)) masker = NiftiSpheresMasker([(1, 1, 1)], radius=1) # Test the fit masker.fit() # Test the transform s = masker.transform(img) mask = np.zeros((3, 3, 3), dtype=np.bool) mask[:, 1, 1] = True mask[1, :, 1] = True mask[1, 1, :] = True assert_array_equal(s[:, 0], np.mean(data[mask], axis=0)) # Now with a mask mask_img = np.zeros((3, 3, 3)) mask_img[1, :, :] = 1 mask_img = nibabel.Nifti1Image(mask_img, np.eye(4)) masker = NiftiSpheresMasker([(1, 1, 1)], radius=1, mask_img=mask_img) masker.fit() s = masker.transform(img) assert_array_equal(s[:, 0], np.mean(data[np.logical_and(mask, mask_img.get_data())], axis=0)) def test_anisotropic_sphere_extraction(): data = np.random.random((3, 3, 3, 5)) affine = np.eye(4) affine[0, 0] = 2 affine[2, 2] = 2 img = nibabel.Nifti1Image(data, affine) masker = NiftiSpheresMasker([(2, 1, 2)], radius=1) # Test the fit masker.fit() # Test the transform s = masker.transform(img) mask = np.zeros((3, 3, 3), dtype=np.bool) mask[1, :, 1] = True assert_array_equal(s[:, 0], np.mean(data[mask], axis=0)) # Now with a mask mask_img = np.zeros((3, 2, 3)) mask_img[1, 0, 1] = 1 affine_2 = affine.copy() affine_2[0, 0] = 4 mask_img = nibabel.Nifti1Image(mask_img, affine=affine_2) masker = NiftiSpheresMasker([(2, 1, 2)], radius=1, mask_img=mask_img) masker.fit() s = masker.transform(img) assert_array_equal(s[:, 0], data[1, 0, 1]) def test_errors(): masker = NiftiSpheresMasker(([1, 2]), radius=.2) assert_raises_regex(ValueError, 'Seeds must be a list .+', masker.fit) def test_nifti_spheres_masker_overlap(): # Test resampling in NiftiMapsMasker affine = np.eye(4) shape = (5, 5, 5) data = np.random.random(shape + (5,)) fmri_img = nibabel.Nifti1Image(data, affine) seeds = [(0, 0, 0), (2, 2, 2)] overlapping_masker = NiftiSpheresMasker(seeds, radius=1, allow_overlap=True) overlapping_masker.fit_transform(fmri_img) overlapping_masker = NiftiSpheresMasker(seeds, radius=2, allow_overlap=True) overlapping_masker.fit_transform(fmri_img) noverlapping_masker = NiftiSpheresMasker(seeds, radius=1, allow_overlap=False) noverlapping_masker.fit_transform(fmri_img) noverlapping_masker = NiftiSpheresMasker(seeds, radius=2, allow_overlap=False) assert_raises_regex(ValueError, 'Overlap detected', noverlapping_masker.fit_transform, fmri_img) def test_small_radius(): affine = np.eye(4) shape = (3, 3, 3) data = np.random.random(shape) mask = np.zeros(shape) mask[1, 1, 1] = 1 mask[2, 2, 2] = 1 affine = np.eye(4) * 1.2 seed = (1.4, 1.4, 1.4) masker = NiftiSpheresMasker([seed], radius=0.1, mask_img=nibabel.Nifti1Image(mask, affine)) masker.fit_transform(nibabel.Nifti1Image(data, affine)) # Test if masking is taken into account mask[1, 1, 1] = 0 mask[1, 1, 0] = 1 masker = NiftiSpheresMasker([seed], radius=0.1, mask_img=nibabel.Nifti1Image(mask, affine)) assert_raises_regex(ValueError, 'Sphere around seed #0 is empty', masker.fit_transform, nibabel.Nifti1Image(data, affine)) masker = NiftiSpheresMasker([seed], radius=1.6, mask_img=nibabel.Nifti1Image(mask, affine)) masker.fit_transform(nibabel.Nifti1Image(data, affine)) PKH;  3nilearn/input_data/tests/test_multi_nifti_masker.py""" Test the multi_nifti_masker module """ # Author: Gael Varoquaux # License: simplified BSD from nose.tools import assert_true, assert_false, assert_raises, assert_equal from nose import SkipTest import numpy as np from numpy.testing import assert_array_equal from nibabel import Nifti1Image import nibabel from distutils.version import LooseVersion from nilearn.input_data.multi_nifti_masker import MultiNiftiMasker from nilearn._utils.testing import assert_raises_regex, write_tmp_imgs from nilearn._utils.exceptions import DimensionError def test_auto_mask(): # This mostly a smoke test data = np.zeros((9, 9, 9)) data[2:-2, 2:-2, 2:-2] = 10 img = Nifti1Image(data, np.eye(4)) masker = MultiNiftiMasker(mask_args=dict(opening=0)) # Check that if we have not fit the masker we get a intelligible # error assert_raises(ValueError, masker.transform, [[img, ]]) # Check error return due to bad data format assert_raises(ValueError, masker.fit, img) # Smoke test the fit masker.fit([[img]]) # Test mask intersection data2 = np.zeros((9, 9, 9)) data2[1:-3, 1:-3, 1:-3] = 10 img2 = Nifti1Image(data2, np.eye(4)) masker.fit([[img, img2]]) assert_array_equal(masker.mask_img_.get_data(), np.logical_or(data, data2)) # Smoke test the transform masker.transform([[img, ]]) # It should also work with a 3D image masker.transform(img) # check exception when transform() called without prior fit() masker2 = MultiNiftiMasker(mask_img=img) assert_raises_regex( ValueError, 'has not been fitted. ', masker2.transform, img2) def test_nan(): data = np.ones((9, 9, 9)) data[0] = np.nan data[:, 0] = np.nan data[:, :, 0] = np.nan data[-1] = np.nan data[:, -1] = np.nan data[:, :, -1] = np.nan data[3:-3, 3:-3, 3:-3] = 10 img = Nifti1Image(data, np.eye(4)) masker = MultiNiftiMasker(mask_args=dict(opening=0)) masker.fit([img]) mask = masker.mask_img_.get_data() assert_true(mask[1:-1, 1:-1, 1:-1].all()) assert_false(mask[0].any()) assert_false(mask[:, 0].any()) assert_false(mask[:, :, 0].any()) assert_false(mask[-1].any()) assert_false(mask[:, -1].any()) assert_false(mask[:, :, -1].any()) def test_different_affines(): # Mask and EIP files with different affines mask_img = Nifti1Image(np.ones((2, 2, 2), dtype=np.int8), affine=np.diag((4, 4, 4, 1))) epi_img1 = Nifti1Image(np.ones((4, 4, 4, 3)), affine=np.diag((2, 2, 2, 1))) epi_img2 = Nifti1Image(np.ones((3, 3, 3, 3)), affine=np.diag((3, 3, 3, 1))) masker = MultiNiftiMasker(mask_img=mask_img) epis = masker.fit_transform([epi_img1, epi_img2]) for this_epi in epis: masker.inverse_transform(this_epi) def test_3d_images(): # Test that the MultiNiftiMasker works with 3D images mask_img = Nifti1Image(np.ones((2, 2, 2), dtype=np.int8), affine=np.diag((4, 4, 4, 1))) epi_img1 = Nifti1Image(np.ones((2, 2, 2)), affine=np.diag((4, 4, 4, 1))) epi_img2 = Nifti1Image(np.ones((2, 2, 2)), affine=np.diag((2, 2, 2, 1))) masker = MultiNiftiMasker(mask_img=mask_img) epis = masker.fit_transform([epi_img1, epi_img2]) # This is mostly a smoke test assert_equal(len(epis), 2) # verify that 4D mask arguments are refused mask_img_4d = Nifti1Image(np.ones((2, 2, 2, 2), dtype=np.int8), affine=np.diag((4, 4, 4, 1))) masker2 = MultiNiftiMasker(mask_img=mask_img_4d) assert_raises_regex(DimensionError, "Input data has incompatible dimensionality: " "Expected dimension is 3D and you provided " "a 4D image.", masker2.fit) def test_joblib_cache(): if not LooseVersion(nibabel.__version__) > LooseVersion('1.1.0'): # Old nibabel do not pickle raise SkipTest from sklearn.externals.joblib import hash # Dummy mask mask = np.zeros((40, 40, 40)) mask[20, 20, 20] = 1 mask_img = Nifti1Image(mask, np.eye(4)) with write_tmp_imgs(mask_img, create_files=True) as filename: masker = MultiNiftiMasker(mask_img=filename) masker.fit() mask_hash = hash(masker.mask_img_) masker.mask_img_.get_data() assert_true(mask_hash == hash(masker.mask_img_)) # enables to delete "filename" on windows del masker PKH+Vy"y"4nilearn/input_data/tests/test_nifti_labels_masker.py"""Test the nifti_region module Functions in this file only test features added by the NiftiLabelsMasker class, not the underlying functions (clean(), img_to_signals_labels(), etc.). See test_masking.py and test_signal.py for details. """ from nose.tools import assert_raises, assert_equal import numpy as np import nibabel from nilearn.input_data.nifti_labels_masker import NiftiLabelsMasker from nilearn._utils import testing, as_ndarray from nilearn._utils.exceptions import DimensionError from nilearn._utils.testing import assert_less def generate_random_img(shape, length=1, affine=np.eye(4), rand_gen=np.random.RandomState(0)): data = rand_gen.randn(*(shape + (length,))) return nibabel.Nifti1Image(data, affine), nibabel.Nifti1Image( as_ndarray(data[..., 0] > 0.2, dtype=np.int8), affine) def test_nifti_labels_masker(): # Check working of shape/affine checks shape1 = (13, 11, 12) affine1 = np.eye(4) shape2 = (12, 10, 14) affine2 = np.diag((1, 2, 3, 1)) n_regions = 9 length = 3 fmri11_img, mask11_img = generate_random_img(shape1, affine=affine1, length=length) fmri12_img, mask12_img = generate_random_img(shape1, affine=affine2, length=length) fmri21_img, mask21_img = generate_random_img(shape2, affine=affine1, length=length) labels11_img = testing.generate_labeled_regions(shape1, affine=affine1, n_regions=n_regions) mask_img_4d = nibabel.Nifti1Image(np.ones((2, 2, 2, 2), dtype=np.int8), affine=np.diag((4, 4, 4, 1))) # verify that 4D mask arguments are refused masker = NiftiLabelsMasker(labels11_img, mask_img=mask_img_4d) testing.assert_raises_regex(DimensionError, "Input data has incompatible dimensionality: " "Expected dimension is 3D and you provided " "a 4D image.", masker.fit) # check exception when transform() called without prior fit() masker11 = NiftiLabelsMasker(labels11_img, resampling_target=None) testing.assert_raises_regex( ValueError, 'has not been fitted. ', masker11.transform, fmri11_img) # No exception raised here signals11 = masker11.fit().transform(fmri11_img) assert_equal(signals11.shape, (length, n_regions)) masker11 = NiftiLabelsMasker(labels11_img, mask_img=mask11_img, resampling_target=None) signals11 = masker11.fit().transform(fmri11_img) assert_equal(signals11.shape, (length, n_regions)) # Test all kinds of mismatch between shapes and between affines masker11 = NiftiLabelsMasker(labels11_img, resampling_target=None) masker11.fit() assert_raises(ValueError, masker11.transform, fmri12_img) assert_raises(ValueError, masker11.transform, fmri21_img) masker11 = NiftiLabelsMasker(labels11_img, mask_img=mask12_img, resampling_target=None) assert_raises(ValueError, masker11.fit) masker11 = NiftiLabelsMasker(labels11_img, mask_img=mask21_img, resampling_target=None) assert_raises(ValueError, masker11.fit) # Transform, with smoothing (smoke test) masker11 = NiftiLabelsMasker(labels11_img, smoothing_fwhm=3, resampling_target=None) signals11 = masker11.fit().transform(fmri11_img) assert_equal(signals11.shape, (length, n_regions)) masker11 = NiftiLabelsMasker(labels11_img, smoothing_fwhm=3, resampling_target=None) signals11 = masker11.fit_transform(fmri11_img) assert_equal(signals11.shape, (length, n_regions)) testing.assert_raises_regex( ValueError, 'has not been fitted. ', NiftiLabelsMasker(labels11_img).inverse_transform, signals11) # Call inverse transform (smoke test) fmri11_img_r = masker11.inverse_transform(signals11) assert_equal(fmri11_img_r.shape, fmri11_img.shape) np.testing.assert_almost_equal(fmri11_img_r.get_affine(), fmri11_img.get_affine()) def test_nifti_labels_masker_resampling(): # Test resampling in NiftiLabelsMasker shape1 = (10, 11, 12) affine = np.eye(4) # mask shape2 = (16, 17, 18) # labels shape3 = (13, 14, 15) n_regions = 9 length = 3 # With data of the same affine fmri11_img, _ = generate_random_img(shape1, affine=affine, length=length) _, mask22_img = generate_random_img(shape2, affine=affine, length=length) labels33_img = testing.generate_labeled_regions(shape3, n_regions, affine=affine) # Test error checking assert_raises(ValueError, NiftiLabelsMasker, labels33_img, resampling_target="mask") assert_raises(ValueError, NiftiLabelsMasker, labels33_img, resampling_target="invalid") # Target: labels masker = NiftiLabelsMasker(labels33_img, mask_img=mask22_img, resampling_target="labels") masker.fit() np.testing.assert_almost_equal(masker.labels_img_.get_affine(), labels33_img.get_affine()) assert_equal(masker.labels_img_.shape, labels33_img.shape) np.testing.assert_almost_equal(masker.mask_img_.get_affine(), masker.labels_img_.get_affine()) assert_equal(masker.mask_img_.shape, masker.labels_img_.shape[:3]) transformed = masker.transform(fmri11_img) assert_equal(transformed.shape, (length, n_regions)) fmri11_img_r = masker.inverse_transform(transformed) np.testing.assert_almost_equal(fmri11_img_r.get_affine(), masker.labels_img_.get_affine()) assert_equal(fmri11_img_r.shape, (masker.labels_img_.shape[:3] + (length,))) # Test with clipped labels: mask does not contain all labels. # Shapes do matter in that case, because there is some resampling # taking place. shape1 = (10, 11, 12) # fmri shape2 = (8, 9, 10) # mask shape3 = (16, 18, 20) # maps n_regions = 9 length = 21 fmri11_img, _ = generate_random_img(shape1, affine=affine, length=length) _, mask22_img = generate_random_img(shape2, affine=affine, length=length) # Target: labels labels33_img = testing.generate_labeled_regions(shape3, n_regions, affine=affine) masker = NiftiLabelsMasker(labels33_img, mask_img=mask22_img, resampling_target="labels") masker.fit() np.testing.assert_almost_equal(masker.labels_img_.get_affine(), labels33_img.get_affine()) assert_equal(masker.labels_img_.shape, labels33_img.shape) np.testing.assert_almost_equal(masker.mask_img_.get_affine(), masker.labels_img_.get_affine()) assert_equal(masker.mask_img_.shape, masker.labels_img_.shape[:3]) uniq_labels = np.unique(masker.labels_img_.get_data()) assert_equal(uniq_labels[0], 0) assert_equal(len(uniq_labels) - 1, n_regions) transformed = masker.transform(fmri11_img) assert_equal(transformed.shape, (length, n_regions)) # Some regions have been clipped. Resulting signal must be zero assert_less((transformed.var(axis=0) == 0).sum(), n_regions) fmri11_img_r = masker.inverse_transform(transformed) np.testing.assert_almost_equal(fmri11_img_r.get_affine(), masker.labels_img_.get_affine()) assert_equal(fmri11_img_r.shape, (masker.labels_img_.shape[:3] + (length,))) # Test with data and atlas of different shape: the atlas should be # resampled to the data shape22 = (5, 5, 6) affine2 = 2 * np.eye(4) affine2[-1, -1] = 1 fmri22_img, _ = generate_random_img(shape22, affine=affine2, length=length) masker = NiftiLabelsMasker(labels33_img, mask_img=mask22_img) masker.fit_transform(fmri22_img) np.testing.assert_array_equal( masker._resampled_labels_img_.get_affine(), affine2) # Test with filenames with testing.write_tmp_imgs(fmri22_img) as filename: masker = NiftiLabelsMasker(labels33_img, resampling_target='data') masker.fit_transform(filename) PKHp+p+2nilearn/input_data/tests/test_nifti_maps_masker.py"""Test the nifti_region module Functions in this file only test features added by the NiftiLabelsMasker class, non_overlappingt the underlying functions (clean(), img_to_signals_labels(), etc.). See test_masking.py and test_signal.py for details. """ from nose.tools import assert_raises, assert_equal import numpy as np import nibabel from nilearn.input_data.nifti_maps_masker import NiftiMapsMasker from nilearn._utils import testing, as_ndarray from nilearn._utils.exceptions import DimensionError from nilearn._utils.testing import assert_less, assert_raises_regex def generate_random_img(shape, length=1, affine=np.eye(4), rand_gen=np.random.RandomState(0)): data = rand_gen.randn(*(shape + (length,))) return nibabel.Nifti1Image(data, affine), nibabel.Nifti1Image( as_ndarray(data[..., 0] > 0.2, dtype=np.int8), affine) def test_nifti_maps_masker(): # Check working of shape/affine checks shape1 = (13, 11, 12) affine1 = np.eye(4) shape2 = (12, 10, 14) affine2 = np.diag((1, 2, 3, 1)) n_regions = 9 length = 3 fmri11_img, mask11_img = generate_random_img(shape1, affine=affine1, length=length) fmri12_img, mask12_img = generate_random_img(shape1, affine=affine2, length=length) fmri21_img, mask21_img = generate_random_img(shape2, affine=affine1, length=length) labels11_img, labels_mask_img = \ testing.generate_maps(shape1, n_regions, affine=affine1) # No exception raised here for create_files in (True, False): with testing.write_tmp_imgs(labels11_img, create_files=create_files) \ as labels11: masker11 = NiftiMapsMasker(labels11, resampling_target=None) signals11 = masker11.fit().transform(fmri11_img) assert_equal(signals11.shape, (length, n_regions)) # enables to delete "labels11" on windows del masker11 masker11 = NiftiMapsMasker(labels11_img, mask_img=mask11_img, resampling_target=None) testing.assert_raises_regex( ValueError, 'has not been fitted. ', masker11.transform, fmri11_img) signals11 = masker11.fit().transform(fmri11_img) assert_equal(signals11.shape, (length, n_regions)) NiftiMapsMasker(labels11_img).fit_transform(fmri11_img) # Test all kinds of mismatches between shapes and between affines for create_files in (True, False): with testing.write_tmp_imgs(labels11_img, mask12_img, create_files=create_files) as images: labels11, mask12 = images masker11 = NiftiMapsMasker(labels11, resampling_target=None) masker11.fit() assert_raises(ValueError, masker11.transform, fmri12_img) assert_raises(ValueError, masker11.transform, fmri21_img) masker11 = NiftiMapsMasker(labels11, mask_img=mask12, resampling_target=None) assert_raises(ValueError, masker11.fit) masker11 = NiftiMapsMasker(labels11_img, mask_img=mask21_img, resampling_target=None) assert_raises(ValueError, masker11.fit) # Transform, with smoothing (smoke test) masker11 = NiftiMapsMasker(labels11_img, smoothing_fwhm=3, resampling_target=None) signals11 = masker11.fit().transform(fmri11_img) assert_equal(signals11.shape, (length, n_regions)) masker11 = NiftiMapsMasker(labels11_img, smoothing_fwhm=3, resampling_target=None) signals11 = masker11.fit_transform(fmri11_img) assert_equal(signals11.shape, (length, n_regions)) testing.assert_raises_regex( ValueError, 'has not been fitted. ', NiftiMapsMasker(labels11_img).inverse_transform, signals11) # Call inverse transform (smoke test) fmri11_img_r = masker11.inverse_transform(signals11) assert_equal(fmri11_img_r.shape, fmri11_img.shape) np.testing.assert_almost_equal(fmri11_img_r.get_affine(), fmri11_img.get_affine()) # Test with data and atlas of different shape: the atlas should be # resampled to the data shape22 = (5, 5, 6) affine2 = 2 * np.eye(4) affine2[-1, -1] = 1 fmri22_img, _ = generate_random_img(shape22, affine=affine2, length=length) masker = NiftiMapsMasker(labels11_img, mask_img=mask21_img) masker.fit_transform(fmri22_img) np.testing.assert_array_equal( masker._resampled_maps_img_.get_affine(), affine2) def test_nifti_maps_masker_2(): # Test resampling in NiftiMapsMasker affine = np.eye(4) shape1 = (10, 11, 12) # fmri shape2 = (13, 14, 15) # mask shape3 = (16, 17, 18) # maps n_regions = 9 length = 3 fmri11_img, _ = generate_random_img(shape1, affine=affine, length=length) _, mask22_img = generate_random_img(shape2, affine=affine, length=length) maps33_img, _ = \ testing.generate_maps(shape3, n_regions, affine=affine) mask_img_4d = nibabel.Nifti1Image(np.ones((2, 2, 2, 2), dtype=np.int8), affine=np.diag((4, 4, 4, 1))) # verify that 4D mask arguments are refused masker = NiftiMapsMasker(maps33_img, mask_img=mask_img_4d) testing.assert_raises_regex(DimensionError, "Input data has incompatible dimensionality: " "Expected dimension is 3D and you provided " "a 4D image.", masker.fit) # Test error checking assert_raises(ValueError, NiftiMapsMasker, maps33_img, resampling_target="mask") assert_raises(ValueError, NiftiMapsMasker, maps33_img, resampling_target="invalid") # Target: mask masker = NiftiMapsMasker(maps33_img, mask_img=mask22_img, resampling_target="mask") masker.fit() np.testing.assert_almost_equal(masker.mask_img_.get_affine(), mask22_img.get_affine()) assert_equal(masker.mask_img_.shape, mask22_img.shape) np.testing.assert_almost_equal(masker.mask_img_.get_affine(), masker.maps_img_.get_affine()) assert_equal(masker.mask_img_.shape, masker.maps_img_.shape[:3]) transformed = masker.transform(fmri11_img) assert_equal(transformed.shape, (length, n_regions)) fmri11_img_r = masker.inverse_transform(transformed) np.testing.assert_almost_equal(fmri11_img_r.get_affine(), masker.maps_img_.get_affine()) assert_equal(fmri11_img_r.shape, (masker.maps_img_.shape[:3] + (length,))) # Target: maps masker = NiftiMapsMasker(maps33_img, mask_img=mask22_img, resampling_target="maps") masker.fit() np.testing.assert_almost_equal(masker.maps_img_.get_affine(), maps33_img.get_affine()) assert_equal(masker.maps_img_.shape, maps33_img.shape) np.testing.assert_almost_equal(masker.mask_img_.get_affine(), masker.maps_img_.get_affine()) assert_equal(masker.mask_img_.shape, masker.maps_img_.shape[:3]) transformed = masker.transform(fmri11_img) assert_equal(transformed.shape, (length, n_regions)) fmri11_img_r = masker.inverse_transform(transformed) np.testing.assert_almost_equal(fmri11_img_r.get_affine(), masker.maps_img_.get_affine()) assert_equal(fmri11_img_r.shape, (masker.maps_img_.shape[:3] + (length,))) # Test with clipped maps: mask does not contain all maps. # Shapes do matter in that case affine1 = np.eye(4) shape1 = (10, 11, 12) shape2 = (8, 9, 10) # mask affine2 = np.diag((2, 2, 2, 1)) # just for mask shape3 = (16, 18, 20) # maps n_regions = 9 length = 21 fmri11_img, _ = generate_random_img(shape1, affine=affine1, length=length) _, mask22_img = testing.generate_fake_fmri(shape2, length=1, affine=affine2) # Target: maps maps33_img, _ = \ testing.generate_maps(shape3, n_regions, affine=affine1) masker = NiftiMapsMasker(maps33_img, mask_img=mask22_img, resampling_target="maps") masker.fit() np.testing.assert_almost_equal(masker.maps_img_.get_affine(), maps33_img.get_affine()) assert_equal(masker.maps_img_.shape, maps33_img.shape) np.testing.assert_almost_equal(masker.mask_img_.get_affine(), masker.maps_img_.get_affine()) assert_equal(masker.mask_img_.shape, masker.maps_img_.shape[:3]) transformed = masker.transform(fmri11_img) assert_equal(transformed.shape, (length, n_regions)) # Some regions have been clipped. Resulting signal must be zero assert_less((transformed.var(axis=0) == 0).sum(), n_regions) fmri11_img_r = masker.inverse_transform(transformed) np.testing.assert_almost_equal(fmri11_img_r.get_affine(), masker.maps_img_.get_affine()) assert_equal(fmri11_img_r.shape, (masker.maps_img_.shape[:3] + (length,))) def test_nifti_maps_masker_overlap(): # Test resampling in NiftiMapsMasker affine = np.eye(4) shape = (5, 5, 5) length = 10 fmri_img, _ = generate_random_img(shape, affine=affine, length=length) non_overlapping_maps = np.zeros(shape + (2,)) non_overlapping_maps[:2, :, :, 0] = 1. non_overlapping_maps[2:, :, :, 1] = 1. non_overlapping_maps_img = nibabel.Nifti1Image(non_overlapping_maps, affine) overlapping_maps = np.zeros(shape + (2,)) overlapping_maps[:3, :, :, 0] = 1. overlapping_maps[2:, :, :, 1] = 1. overlapping_maps_img = nibabel.Nifti1Image(overlapping_maps, affine) overlapping_masker = NiftiMapsMasker(non_overlapping_maps_img, allow_overlap=True) overlapping_masker.fit_transform(fmri_img) overlapping_masker = NiftiMapsMasker(overlapping_maps_img, allow_overlap=True) overlapping_masker.fit_transform(fmri_img) non_overlapping_masker = NiftiMapsMasker(non_overlapping_maps_img, allow_overlap=False) non_overlapping_masker.fit_transform(fmri_img) non_overlapping_masker = NiftiMapsMasker(overlapping_maps_img, allow_overlap=False) assert_raises_regex(ValueError, 'Overlap detected', non_overlapping_masker.fit_transform, fmri_img) PKH5 **-nilearn/input_data/tests/test_nifti_masker.py""" Test the nifti_masker module Functions in this file only test features added by the NiftiMasker class, not the underlying functions used (e.g. clean()). See test_masking.py and test_signal.py for this. """ # Author: Gael Varoquaux, Philippe Gervais # License: simplified BSD from tempfile import mkdtemp import shutil import os from distutils.version import LooseVersion from nose.tools import assert_true, assert_false, assert_raises from nose import SkipTest import numpy as np from numpy.testing import assert_array_equal from nibabel import Nifti1Image import nibabel from nilearn.input_data.nifti_masker import NiftiMasker, filter_and_mask from nilearn._utils import testing from nilearn._utils.exceptions import DimensionError from nilearn.image import index_img from nilearn._utils.testing import assert_raises_regex from nilearn._utils.class_inspect import get_params def test_auto_mask(): # This mostly a smoke test data = np.zeros((9, 9, 9)) data[3:-3, 3:-3, 3:-3] = 10 img = Nifti1Image(data, np.eye(4)) masker = NiftiMasker() # Smoke test the fit masker.fit(img) # Smoke test the transform # With a 4D img masker.transform([img, ]) # With a 3D img masker.transform(img) # check exception when transform() called without prior fit() masker2 = NiftiMasker(mask_img=img) testing.assert_raises_regex( ValueError, 'has not been fitted. ', masker2.transform, img) def test_detrend(): # Check that detrending doesn't do something stupid with 3D images data = np.zeros((9, 9, 9)) data[3:-3, 3:-3, 3:-3] = 10 img = Nifti1Image(data, np.eye(4)) mask = data.astype(np.int) mask_img = Nifti1Image(mask, np.eye(4)) masker = NiftiMasker(mask_img=mask_img, detrend=True) # Smoke test the fit X = masker.fit_transform(img) assert_true(np.any(X != 0)) def test_with_files(): # Standard masking data = np.zeros((40, 40, 40, 2)) data[20, 20, 20] = 1 data_img = Nifti1Image(data, np.eye(4)) with testing.write_tmp_imgs(data_img) as filename: masker = NiftiMasker() masker.fit(filename) masker.transform(filename) def test_nan(): data = np.ones((9, 9, 9)) data[0] = np.nan data[:, 0] = np.nan data[:, :, 0] = np.nan data[-1] = np.nan data[:, -1] = np.nan data[:, :, -1] = np.nan data[3:-3, 3:-3, 3:-3] = 10 img = Nifti1Image(data, np.eye(4)) masker = NiftiMasker(mask_args=dict(opening=0)) masker.fit(img) mask = masker.mask_img_.get_data() assert_true(mask[1:-1, 1:-1, 1:-1].all()) assert_false(mask[0].any()) assert_false(mask[:, 0].any()) assert_false(mask[:, :, 0].any()) assert_false(mask[-1].any()) assert_false(mask[:, -1].any()) assert_false(mask[:, :, -1].any()) def test_matrix_orientation(): """Test if processing is performed along the correct axis.""" # the "step" kind generate heavyside-like signals for each voxel. # all signals being identical, standardizing along the wrong axis # would leave a null signal. Along the correct axis, the step remains. fmri, mask = testing.generate_fake_fmri(shape=(40, 41, 42), kind="step") masker = NiftiMasker(mask_img=mask, standardize=True, detrend=True) timeseries = masker.fit_transform(fmri) assert(timeseries.shape[0] == fmri.shape[3]) assert(timeseries.shape[1] == mask.get_data().sum()) std = timeseries.std(axis=0) assert(std.shape[0] == timeseries.shape[1]) # paranoid assert(not np.any(std < 0.1)) # Test inverse transform masker = NiftiMasker(mask_img=mask, standardize=False, detrend=False) masker.fit() timeseries = masker.transform(fmri) recovered = masker.inverse_transform(timeseries) np.testing.assert_array_almost_equal(recovered.get_data(), fmri.get_data()) def test_mask_3d(): # Dummy mask data = np.zeros((40, 40, 40, 2)) data[20, 20, 20] = 1 data_img = Nifti1Image(data, np.eye(4)) with testing.write_tmp_imgs(data_img, create_files=True)\ as filename: masker = NiftiMasker(mask_img=filename) assert_raises(TypeError, masker.fit) def test_mask_4d(): # Dummy mask mask = np.zeros((10, 10, 10), dtype=int) mask[3:7, 3:7, 3:7] = 1 mask_bool = mask.astype(bool) mask_img = Nifti1Image(mask, np.eye(4)) # Dummy data data = np.zeros((10, 10, 10, 3), dtype=int) data[..., 0] = 1 data[..., 1] = 2 data[..., 2] = 3 data_img_4d = Nifti1Image(data, np.eye(4)) data_imgs = [index_img(data_img_4d, 0), index_img(data_img_4d, 1), index_img(data_img_4d, 2)] # check whether transform is indeed selecting niimgs subset sample_mask = np.array([0, 2]) masker = NiftiMasker(mask_img=mask_img, sample_mask=sample_mask) masker.fit() data_trans = masker.transform(data_imgs) data_trans_img = index_img(data_img_4d, sample_mask) data_trans_direct = data_trans_img.get_data()[mask_bool, :] data_trans_direct = np.swapaxes(data_trans_direct, 0, 1) assert_array_equal(data_trans, data_trans_direct) masker = NiftiMasker(mask_img=mask_img, sample_mask=sample_mask) masker.fit() data_trans2 = masker.transform(data_img_4d) assert_array_equal(data_trans2, data_trans_direct) def test_4d_single_scan(): mask = np.zeros((10, 10, 10)) mask[3:7, 3:7, 3:7] = 1 mask_img = Nifti1Image(mask, np.eye(4)) # Test that, in list of 4d images with last dimension=1, they are # considered as 3d data_5d = [np.random.random((10, 10, 10, 1)) for i in range(5)] data_4d = [d[..., 0] for d in data_5d] data_5d = [nibabel.Nifti1Image(d, np.eye(4)) for d in data_5d] data_4d = [nibabel.Nifti1Image(d, np.eye(4)) for d in data_4d] masker = NiftiMasker(mask_img=mask_img) masker.fit() data_trans_5d = masker.transform(data_5d) data_trans_4d = masker.transform(data_4d) assert_array_equal(data_trans_4d, data_trans_5d) def test_5d(): mask = np.zeros((10, 10, 10)) mask[3:7, 3:7, 3:7] = 1 mask_img = Nifti1Image(mask, np.eye(4)) # Test that, in list of 4d images with last dimension=1, they are # considered as 3d data_5d = [np.random.random((10, 10, 10, 3)) for i in range(5)] data_5d = [nibabel.Nifti1Image(d, np.eye(4)) for d in data_5d] masker = NiftiMasker(mask_img=mask_img) masker.fit() testing.assert_raises_regex( DimensionError, "Input data has incompatible dimensionality: " "Expected dimension is 4D and you provided " "a list of 4D images \(5D\).", masker.transform, data_5d) def test_sessions(): # Test the sessions vector data = np.ones((40, 40, 40, 4)) # Create a border, so that the masking work well data[0] = 0 data[-1] = 0 data[:, -1] = 0 data[:, 0] = 0 data[..., -1] = 0 data[..., 0] = 0 data[20, 20, 20] = 1 data_img = Nifti1Image(data, np.eye(4)) masker = NiftiMasker(sessions=np.ones(3, dtype=np.int)) assert_raises(ValueError, masker.fit_transform, data_img) def test_joblib_cache(): if not LooseVersion(nibabel.__version__) > LooseVersion('1.1.0'): # Old nibabel do not pickle raise SkipTest from sklearn.externals.joblib import hash, Memory mask = np.zeros((40, 40, 40)) mask[20, 20, 20] = 1 mask_img = Nifti1Image(mask, np.eye(4)) with testing.write_tmp_imgs(mask_img, create_files=True) as filename: masker = NiftiMasker(mask_img=filename) masker.fit() mask_hash = hash(masker.mask_img_) masker.mask_img_.get_data() assert_true(mask_hash == hash(masker.mask_img_)) # Test a tricky issue with memmapped joblib.memory that makes # imgs return by inverse_transform impossible to save cachedir = mkdtemp() try: masker.memory = Memory(cachedir=cachedir, mmap_mode='r', verbose=0) X = masker.transform(mask_img) # inverse_transform a first time, so that the result is cached out_img = masker.inverse_transform(X) out_img = masker.inverse_transform(X) out_img.to_filename(os.path.join(cachedir, 'test.nii')) finally: # enables to delete "filename" on windows del masker shutil.rmtree(cachedir, ignore_errors=True) def test_mask_init_errors(): # Errors that are caught in init mask = NiftiMasker(mask_strategy='oops') testing.assert_raises_regex( ValueError, "Unknown value of mask_strategy 'oops'", mask.fit) def test_compute_epi_mask(): # Taken from test_masking.py, but used to test that the masker class # is passing parameters appropriately. mean_image = np.ones((9, 9, 3)) mean_image[3:-2, 3:-2, :] = 10 mean_image[5, 5, :] = 11 mean_image = Nifti1Image(mean_image.astype(float), np.eye(4)) masker = NiftiMasker(mask_strategy='epi', mask_args=dict(opening=False)) masker.fit(mean_image) mask1 = masker.mask_img_ masker2 = NiftiMasker(mask_strategy='epi', mask_args=dict(opening=False, exclude_zeros=True)) masker2.fit(mean_image) mask2 = masker2.mask_img_ # With an array with no zeros, exclude_zeros should not make # any difference np.testing.assert_array_equal(mask1.get_data(), mask2.get_data()) # Check that padding with zeros does not change the extracted mask mean_image2 = np.zeros((30, 30, 3)) mean_image2[3:12, 3:12, :] = mean_image.get_data() mean_image2 = Nifti1Image(mean_image2, np.eye(4)) masker3 = NiftiMasker(mask_strategy='epi', mask_args=dict(opening=False, exclude_zeros=True)) masker3.fit(mean_image2) mask3 = masker3.mask_img_ np.testing.assert_array_equal(mask1.get_data(), mask3.get_data()[3:12, 3:12]) # However, without exclude_zeros, it does masker4 = NiftiMasker(mask_strategy='epi', mask_args=dict(opening=False)) masker4.fit(mean_image2) mask4 = masker4.mask_img_ assert_false(np.allclose(mask1.get_data(), mask4.get_data()[3:12, 3:12])) def test_filter_and_mask(): data = np.zeros([20, 30, 40, 5]) mask = np.zeros([20, 30, 40, 2]) mask[10, 15, 20, :] = 1 data_img = nibabel.Nifti1Image(data, np.eye(4)) mask_img = nibabel.Nifti1Image(mask, np.eye(4)) masker = NiftiMasker() params = get_params(NiftiMasker, masker) assert_raises_regex(DimensionError, "Input data has incompatible dimensionality: " "Expected dimension is 3D and you provided " "a 4D image.", filter_and_mask, data_img, mask_img, params) PKH!nilearn/decomposition/__init__.py""" The :mod:`nilearn.decomposition` module includes a subject level variant of the ICA called Canonical ICA. """ from .canica import CanICA from .dict_learning import DictLearning __all__ = ['CanICA', 'DictLearning'] PKHnilearn/decomposition/canica.py""" CanICA """ # Author: Alexandre Abraham, Gael Varoquaux, # License: BSD 3 clause from operator import itemgetter import numpy as np from scipy.stats import scoreatpercentile from sklearn.decomposition import fastica from sklearn.externals.joblib import Memory, delayed, Parallel from sklearn.utils import check_random_state from .multi_pca import MultiPCA class CanICA(MultiPCA): """Perform Canonical Independent Component Analysis. Parameters ---------- mask: Niimg-like object or MultiNiftiMasker instance, optional Mask to be used on data. If an instance of masker is passed, then its mask will be used. If no mask is given, it will be computed automatically by a MultiNiftiMasker with default parameters. n_components: int Number of components to extract smoothing_fwhm: float, optional If smoothing_fwhm is not None, it gives the size in millimeters of the spatial smoothing to apply to the signal. do_cca: boolean, optional Indicate if a Canonical Correlation Analysis must be run after the PCA. standardize: boolean, optional If standardize is True, the time-series are centered and normed: their variance is put to 1 in the time dimension. threshold: None, 'auto' or float If None, no thresholding is applied. If 'auto', then we apply a thresholding that will keep the n_voxels, more intense voxels across all the maps, n_voxels being the number of voxels in a brain volume. A float value indicates the ratio of voxels to keep (2. means that the maps will together have 2 x n_voxels non-zero voxels ). n_init: int, optional The number of times the fastICA algorithm is restarted random_state: int or RandomState Pseudo number generator state used for random sampling. target_affine: 3x3 or 4x4 matrix, optional This parameter is passed to image.resample_img. Please see the related documentation for details. target_shape: 3-tuple of integers, optional This parameter is passed to image.resample_img. Please see the related documentation for details. low_pass: None or float, optional This parameter is passed to signal.clean. Please see the related documentation for details high_pass: None or float, optional This parameter is passed to signal.clean. Please see the related documentation for details t_r: float, optional This parameter is passed to signal.clean. Please see the related documentation for details memory: instance of joblib.Memory or string Used to cache the masking process. By default, no caching is done. If a string is given, it is the path to the caching directory. memory_level: integer, optional Rough estimator of the amount of memory used by caching. Higher value means more memory for caching. n_jobs: integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs', -2 'all CPUs but one', and so on. verbose: integer, optional Indicate the level of verbosity. By default, nothing is printed References ---------- * G. Varoquaux et al. "A group model for stable multi-subject ICA on fMRI datasets", NeuroImage Vol 51 (2010), p. 288-299 * G. Varoquaux et al. "ICA-based sparse features recovery from fMRI datasets", IEEE ISBI 2010, p. 1177 """ def __init__(self, mask=None, n_components=20, smoothing_fwhm=6, do_cca=True, threshold='auto', n_init=10, random_state=None, standardize=True, detrend=True, low_pass=None, high_pass=None, t_r=None, target_affine=None, target_shape=None, mask_strategy='epi', mask_args=None, memory=Memory(cachedir=None), memory_level=0, n_jobs=1, verbose=0 ): super(CanICA, self).__init__( n_components=n_components, do_cca=do_cca, random_state=random_state, # feature_compression=feature_compression, mask=mask, smoothing_fwhm=smoothing_fwhm, standardize=standardize, detrend=detrend, low_pass=low_pass, high_pass=high_pass, t_r=t_r, target_affine=target_affine, target_shape=target_shape, mask_strategy=mask_strategy, mask_args=mask_args, memory=memory, memory_level=memory_level, n_jobs=n_jobs, verbose=verbose) self.threshold = threshold self.n_init = n_init def _unmix_components(self): """Core function of CanICA than rotate components_ to maximize independance""" random_state = check_random_state(self.random_state) seeds = random_state.randint(np.iinfo(np.int32).max, size=self.n_init) results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)( delayed(self._cache(fastica, func_memory_level=2)) (self.components_.T, whiten=True, fun='cube', random_state=seed) for seed in seeds) ica_maps_gen_ = (result[2].T for result in results) ica_maps_and_sparsities = ((ica_map, np.sum(np.abs(ica_map), axis=1).max()) for ica_map in ica_maps_gen_) ica_maps, _ = min(ica_maps_and_sparsities, key=itemgetter(-1)) # Thresholding ratio = None if isinstance(self.threshold, float): ratio = self.threshold elif self.threshold == 'auto': ratio = 1. elif self.threshold is not None: raise ValueError("Threshold must be None, " "'auto' or float. You provided %s." % str(self.threshold)) if ratio is not None: abs_ica_maps = np.abs(ica_maps) threshold = scoreatpercentile( abs_ica_maps, 100. - (100. / len(ica_maps)) * ratio) ica_maps[abs_ica_maps < threshold] = 0. self.components_ = ica_maps # flip signs in each component so that peak is +ve for component in self.components_: if component.max() < -component.min(): component *= -1 # Overriding MultiPCA._raw_fit overrides MultiPCA.fit behavior def _raw_fit(self, data): """Helper function that directly process unmasked data. Useful when called by another estimator that has already unmasked data. Parameters ---------- data: ndarray or memmap Unmasked data to process """ MultiPCA._raw_fit(self, data) self._unmix_components() return self PKgoHZ?tFtFnilearn/decomposition/base.py""" Base class for decomposition estimators, utilities for masking and dimension reduction of group data """ from __future__ import division from math import ceil import itertools import numpy as np from scipy import linalg from sklearn.base import BaseEstimator from sklearn.externals.joblib import Memory, Parallel, delayed from sklearn.linear_model import LinearRegression from sklearn.utils import check_random_state from sklearn.utils.extmath import randomized_svd from .._utils.cache_mixin import CacheMixin, cache from .._utils.niimg import _safe_get_data from .._utils.compat import _basestring from ..input_data import NiftiMapsMasker from ..input_data.masker_validation import check_embedded_nifti_masker def mask_and_reduce(masker, imgs, confounds=None, reduction_ratio='auto', n_components=None, random_state=None, memory_level=0, memory=Memory(cachedir=None), n_jobs=1): """Mask and reduce provided 4D images with given masker. Uses a PCA (randomized for small reduction ratio) or a range finding matrix on time series to reduce data size in time direction. For multiple images, the concatenation of data is returned, either as an ndarray or a memorymap (useful for big datasets that do not fit in memory). Parameters ---------- masker: NiftiMasker or MultiNiftiMasker Instance used to mask provided data. imgs: list of 4D Niimg-like objects See http://nilearn.github.io/manipulating_images/input_output.html. List of subject data to mask, reduce and stack. confounds: CSV file path or 2D matrix, optional This parameter is passed to signal.clean. Please see the corresponding documentation for details. reduction_ratio: 'auto' or float between 0. and 1. - Between 0. or 1. : controls data reduction in the temporal domain , 1. means no reduction, < 1. calls for an SVD based reduction. - if set to 'auto', estimator will set the number of components per reduced session to be n_components. n_components: integer, optional Number of components per subject to be extracted by dimension reduction random_state: int or RandomState Pseudo number generator state used for random sampling. memory_level: integer, optional Integer indicating the level of memorization. The higher, the more function calls are cached. memory: joblib.Memory Used to cache the function calls. Returns ------ data: ndarray or memorymap Concatenation of reduced data. """ if not hasattr(imgs, '__iter__'): imgs = [imgs] if reduction_ratio == 'auto': if n_components is None: # Reduction ratio is 1 if # neither n_components nor ratio is provided reduction_ratio = 1 else: if reduction_ratio is None: reduction_ratio = 1 else: reduction_ratio = float(reduction_ratio) if not 0 <= reduction_ratio <= 1: raise ValueError('Reduction ratio should be between 0. and 1.,' 'got %.2f' % reduction_ratio) if confounds is None: confounds = itertools.repeat(confounds) if reduction_ratio == 'auto': n_samples = n_components reduction_ratio = None else: # We'll let _mask_and_reduce_single decide on the number of # samples based on the reduction_ratio n_samples = None data_list = Parallel(n_jobs=n_jobs)( delayed(_mask_and_reduce_single)( masker, img, confound, reduction_ratio=reduction_ratio, n_samples=n_samples, memory=memory, memory_level=memory_level, random_state=random_state ) for img, confound in zip(imgs, confounds)) subject_n_samples = [subject_data.shape[0] for subject_data in data_list] n_samples = np.sum(subject_n_samples) n_voxels = np.sum(_safe_get_data(masker.mask_img_)) data = np.empty((n_samples, n_voxels), order='F', dtype='float64') current_position = 0 for i, next_position in enumerate(np.cumsum(subject_n_samples)): data[current_position:next_position] = data_list[i] current_position = next_position # Clear memory as fast as possible: remove the reference on # the corresponding block of data data_list[i] = None return data def _mask_and_reduce_single(masker, img, confound, reduction_ratio=None, n_samples=None, memory=None, memory_level=0, random_state=None): """Utility function for multiprocessing from MaskReducer""" this_data = masker.transform(img, confound) # Now get rid of the img as fast as possible, to free a # reference count on it, and possibly free the corresponding # data del img random_state = check_random_state(random_state) data_n_samples = this_data.shape[0] if reduction_ratio is None: assert n_samples is not None n_samples = min(n_samples, data_n_samples) else: n_samples = int(ceil(data_n_samples * reduction_ratio)) if n_samples <= data_n_samples // 4: U, S, _ = cache(randomized_svd, memory, memory_level=memory_level, func_memory_level=3)(this_data.T, n_samples, transpose=True, random_state=random_state) U = U.T else: U, S, _ = cache(linalg.svd, memory, memory_level=memory_level, func_memory_level=3)(this_data.T, full_matrices=False) U = U.T[:n_samples].copy() S = S[:n_samples] U = U * S[:, np.newaxis] return U class BaseDecomposition(BaseEstimator, CacheMixin): """Base class for matrix factorization based decomposition estimators. Handles mask logic, provides transform and inverse_transform methods .. versionadded:: 0.2 Parameters ---------- n_components: int Number of components to extract, for each 4D-Niimage random_state: int or RandomState Pseudo number generator state used for random sampling. mask: Niimg-like object or MultiNiftiMasker instance, optional Mask to be used on data. If an instance of masker is passed, then its mask will be used. If no mask is given, it will be computed automatically by a MultiNiftiMasker with default parameters. smoothing_fwhm: float, optional If smoothing_fwhm is not None, it gives the size in millimeters of the spatial smoothing to apply to the signal. standardize: boolean, optional If standardize is True, the time-series are centered and normed: their mean is put to 0 and their variance to 1 in the time dimension. detrend: boolean, optional This parameter is passed to signal.clean. Please see the related documentation for details low_pass: False or float, optional This parameter is passed to signal.clean. Please see the related documentation for details high_pass: False or float, optional This parameter is passed to signal.clean. Please see the related documentation for details t_r: float, optional This parameter is passed to signal.clean. Please see the related documentation for details target_affine: 3x3 or 4x4 matrix, optional This parameter is passed to image.resample_img. Please see the related documentation for details. target_shape: 3-tuple of integers, optional This parameter is passed to image.resample_img. Please see the related documentation for details. mask_strategy: {'background', 'epi'}, optional The strategy used to compute the mask: use 'background' if your images present a clear homogeneous background, and 'epi' if they are raw EPI images. Depending on this value, the mask will be computed from masking.compute_background_mask or masking.compute_epi_mask. Default is 'background'. mask_args: dict, optional If mask is None, these are additional parameters passed to masking.compute_background_mask or masking.compute_epi_mask to fine-tune mask computation. Please see the related documentation for details. memory: instance of joblib.Memory or str Used to cache the masking process. By default, no caching is done. If a string is given, it is the path to the caching directory. memory_level: integer, optional Rough estimator of the amount of memory used by caching. Higher value means more memory for caching. n_jobs: integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs', -2 'all CPUs but one', and so on. verbose: integer, optional Indicate the level of verbosity. By default, nothing is printed. Attributes ---------- `mask_img_` : Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. The mask of the data. If no mask was given at masker creation, contains the automatically computed mask. """ def __init__(self, n_components=20, random_state=None, mask=None, smoothing_fwhm=None, standardize=True, detrend=True, low_pass=None, high_pass=None, t_r=None, target_affine=None, target_shape=None, mask_strategy='epi', mask_args=None, memory=Memory(cachedir=None), memory_level=0, n_jobs=1, verbose=0): self.n_components = n_components self.random_state = random_state self.mask = mask self.smoothing_fwhm = smoothing_fwhm self.standardize = standardize self.detrend = detrend self.low_pass = low_pass self.high_pass = high_pass self.t_r = t_r self.target_affine = target_affine self.target_shape = target_shape self.mask_strategy = mask_strategy self.mask_args = mask_args self.memory = memory self.memory_level = memory_level self.n_jobs = n_jobs self.verbose = verbose def fit(self, imgs, y=None, confounds=None): """Base fit for decomposition estimators : compute the embedded masker Parameters ---------- imgs: list of Niimg-like objects See http://nilearn.github.io/manipulating_images/input_output.html. Data on which the mask is calculated. If this is a list, the affine is considered the same for all. """ if isinstance(imgs, _basestring) or not hasattr(imgs, '__iter__'): # these classes are meant for list of 4D images # (multi-subject), we want it to work also on a single # subject, so we hack it. imgs = [imgs, ] if len(imgs) == 0: # Common error that arises from a null glob. Capture # it early and raise a helpful message raise ValueError('Need one or more Niimg-like objects as input, ' 'an empty list was given.') self.masker_ = check_embedded_nifti_masker(self) # Avoid warning with imgs != None # if masker_ has been provided a mask_img if self.masker_.mask_img is None: self.masker_.fit(imgs) else: self.masker_.fit() self.mask_img_ = self.masker_.mask_img_ return self def _check_components_(self): if not hasattr(self, 'components_'): if self.__class__.__name__ == 'BaseDecomposition': raise ValueError("Object has no components_ attribute. " "This may be because " "BaseDecomposition is directly " "being used.") else: raise ValueError("Object has no components_ attribute. " "This is probably because fit has not " "been called.") def transform(self, imgs, confounds=None): """Project the data into a reduced representation Parameters ---------- imgs: iterable of Niimg-like objects See http://nilearn.github.io/manipulating_images/input_output.html. Data to be projected confounds: CSV file path or 2D matrix This parameter is passed to nilearn.signal.clean. Please see the related documentation for details Returns ---------- loadings: list of 2D ndarray, For each subject, each sample, loadings for each decomposition components shape: number of subjects * (number of scans, number of regions) """ self._check_components_() components_img_ = self.masker_.inverse_transform(self.components_) nifti_maps_masker = NiftiMapsMasker( components_img_, self.masker_.mask_img_, resampling_target='maps') nifti_maps_masker.fit() # XXX: dealing properly with 4D/ list of 4D data? if confounds is None: confounds = [None] * len(imgs) return [nifti_maps_masker.transform(img, confounds=confound) for img, confound in zip(imgs, confounds)] def inverse_transform(self, loadings): """Use provided loadings to compute corresponding linear component combination in whole-brain voxel space Parameters ---------- loadings: list of numpy array (n_samples x n_components) Component signals to tranform back into voxel signals Returns ------- reconstructed_imgs: list of nibabel.Nifti1Image For each loading, reconstructed Nifti1Image """ if not hasattr(self, 'components_'): ValueError('Object has no components_ attribute. This is either ' 'because fit has not been called or because' '_DecompositionEstimator has directly been used') self._check_components_() components_img_ = self.masker_.inverse_transform(self.components_) nifti_maps_masker = NiftiMapsMasker( components_img_, self.masker_.mask_img_, resampling_target='maps') nifti_maps_masker.fit() # XXX: dealing properly with 2D/ list of 2D data? return [nifti_maps_masker.inverse_transform(loading) for loading in loadings] def _sort_by_score(self, data): """Sort components on the explained variance over data of estimator components_""" components_score = self._raw_score(data, per_component=True) order = np.argsort(components_score)[::-1] self.components_ = self.components_[order] def _raw_score(self, data, per_component=True): """Return explained variance over data of estimator components_""" return self._cache(explained_variance)(data, self.components_, per_component=per_component) def score(self, imgs, confounds=None): """Score function based on explained variance on imgs. Should only be used by DecompositionEstimator derived classes Parameters ---------- imgs: iterable of Niimg-like objects See http://nilearn.github.io/manipulating_images/input_output.html. Data to be scored confounds: CSV file path or 2D matrix This parameter is passed to nilearn.signal.clean. Please see the related documentation for details Returns ------- score: float, Holds the score for each subjects. Score is two dimensional if per_component is True. First dimension is squeezed if the number of subjects is one """ data = mask_and_reduce(self.masker_, imgs, confounds, reduction_ratio=1.) return self._raw_score(data, per_component=False) def explained_variance(X, components, per_component=True): """Score function based on explained variance Parameters ---------- data: ndarray, Holds single subject data to be tested against components per_component: boolean, Specify whether the explained variance ratio is desired for each map or for the global set of components_ Returns ------- score: ndarray, Holds the score for each subjects. score is two dimensional if per_component = True """ full_var = np.var(X) n_components = components.shape[0] S = np.sqrt(np.sum(components ** 2, axis=1)) S[S == 0] = 1 components = components / S[:, np.newaxis] projected_data = components.dot(X.T) if per_component: res_var = np.zeros(n_components) for i in range(n_components): res = X - np.outer(projected_data[i], components[i]) res_var[i] = np.var(res) return np.maximum(0., 1. - res_var / full_var) else: lr = LinearRegression(fit_intercept=True) lr.fit(components.T, X.T) res_var = X - lr.coef_.dot(components) res_var **= 2 res_var = np.sum(res_var) return np.maximum(0., 1. - res_var / full_var) PKpoH]((&nilearn/decomposition/dict_learning.py""" Dictionary learning estimator: Perform a map learning algorithm by learning a temporal dense dictionary along with sparse spatial loadings, that constitutes output maps """ # Author: Arthur Mensch # License: BSD 3 clause from __future__ import division import warnings from distutils.version import LooseVersion import numpy as np import sklearn from sklearn.base import TransformerMixin from sklearn.decomposition import dict_learning_online from sklearn.externals.joblib import Memory from sklearn.linear_model import Ridge from .base import BaseDecomposition, mask_and_reduce from .canica import CanICA if LooseVersion(sklearn.__version__) >= LooseVersion('0.17'): # check_input=False is an optimization available only in sklearn >=0.17 sparse_encode_args = {'check_input': False} def _compute_loadings(components, data): ridge = Ridge(fit_intercept=None, alpha=1e-8) ridge.fit(components.T, np.asarray(data.T)) loadings = ridge.coef_.T S = np.sqrt(np.sum(loadings ** 2, axis=0)) S[S == 0] = 1 loadings /= S[np.newaxis, :] return loadings class DictLearning(BaseDecomposition, TransformerMixin): """Perform a map learning algorithm based on spatial component sparsity, over a CanICA initialization. This yields more stable maps than CanICA. .. versionadded:: 0.2 Parameters ---------- mask: Niimg-like object or MultiNiftiMasker instance, optional Mask to be used on data. If an instance of masker is passed, then its mask will be used. If no mask is given, it will be computed automatically by a MultiNiftiMasker with default parameters. n_components: int Number of components to extract. batch_size : int, optional, default=20 The number of samples to take in each batch. n_epochs: float Number of epochs the algorithm should run on the data. alpha: float, optional, default=1 Sparsity controlling parameter. dict_init: Niimg-like object, optional Initial estimation of dictionary maps. Would be computed from CanICA if not provided. reduction_ratio: 'auto' or float between 0. and 1. - Between 0. or 1. : controls data reduction in the temporal domain. 1. means no reduction, < 1. calls for an SVD based reduction. - if set to 'auto', estimator will set the number of components per reduced session to be n_components. method : {'lars', 'cd'} Coding method used by sklearn backend. Below are the possible values. lars: uses the least angle regression method to solve the lasso problem (linear_model.lars_path) cd: uses the coordinate descent method to compute the Lasso solution (linear_model.Lasso). Lars will be faster if the estimated components are sparse. random_state: int or RandomState Pseudo number generator state used for random sampling. smoothing_fwhm: float, optional If smoothing_fwhm is not None, it gives the size in millimeters of the spatial smoothing to apply to the signal. standardize : boolean, optional If standardize is True, the time-series are centered and normed: their variance is put to 1 in the time dimension. target_affine: 3x3 or 4x4 matrix, optional This parameter is passed to image.resample_img. Please see the related documentation for details. target_shape: 3-tuple of integers, optional This parameter is passed to image.resample_img. Please see the related documentation for details. low_pass: None or float, optional This parameter is passed to signal.clean. Please see the related documentation for details. high_pass: None or float, optional This parameter is passed to signal.clean. Please see the related documentation for details. t_r: float, optional This parameter is passed to signal.clean. Please see the related documentation for details. memory: instance of joblib.Memory or string Used to cache the masking process. By default, no caching is done. If a string is given, it is the path to the caching directory. memory_level: integer, optional Rough estimator of the amount of memory used by caching. Higher value means more memory for caching. n_jobs: integer, optional, default=1 The number of CPUs to use to do the computation. -1 means 'all CPUs', -2 'all CPUs but one', and so on. verbose: integer, optional Indicate the level of verbosity. By default, nothing is printed. References ---------- * Gael Varoquaux et al. Multi-subject dictionary learning to segment an atlas of brain spontaneous activity Information Processing in Medical Imaging, 2011, pp. 562-573, Lecture Notes in Computer Science """ def __init__(self, n_components=20, n_epochs=1, alpha=10, reduction_ratio='auto', dict_init=None, random_state=None, batch_size=20, method="cd", mask=None, smoothing_fwhm=4, standardize=True, detrend=True, low_pass=None, high_pass=None, t_r=None, target_affine=None, target_shape=None, mask_strategy='epi', mask_args=None, n_jobs=1, verbose=0, memory=Memory(cachedir=None), memory_level=0): BaseDecomposition.__init__(self, n_components=n_components, random_state=random_state, mask=mask, smoothing_fwhm=smoothing_fwhm, standardize=standardize, detrend=detrend, low_pass=low_pass, high_pass=high_pass, t_r=t_r, target_affine=target_affine, target_shape=target_shape, mask_strategy=mask_strategy, mask_args=mask_args, memory=memory, memory_level=memory_level, n_jobs=n_jobs, verbose=verbose) self.n_epochs = n_epochs self.batch_size = batch_size self.method = method self.alpha = alpha self.reduction_ratio = reduction_ratio self.dict_init = dict_init def _init_dict(self, data): if self.dict_init is not None: components = self.masker_.transform(self.dict_init) else: canica = CanICA(n_components=self.n_components, # CanICA specific parameters do_cca=True, threshold=float(self.n_components), n_init=1, # mask parameter is not useful as we bypass masking mask=self.masker_, random_state=self.random_state, memory=self.memory, memory_level=self.memory_level, n_jobs=self.n_jobs, verbose=self.verbose) with warnings.catch_warnings(): warnings.simplefilter("ignore", UserWarning) # We use protected function _raw_fit as data # has already been unmasked canica._raw_fit(data) components = canica.components_ S = (components ** 2).sum(axis=1) S[S == 0] = 1 components /= S[:, np.newaxis] self.components_init_ = components def _init_loadings(self, data): self.loadings_init_ = self._cache(_compute_loadings)( self.components_init_, data) def fit(self, imgs, y=None, confounds=None): """Compute the mask and component maps across subjects Parameters ---------- imgs: list of Niimg-like objects See http://nilearn.github.io/manipulating_images/input_output.html. Data on which PCA must be calculated. If this is a list, the affine is considered the same for all. confounds: CSV file path or 2D matrix This parameter is passed to nilearn.signal.clean. Please see the related documentation for details """ # Base logic for decomposition estimators BaseDecomposition.fit(self, imgs) if self.verbose: print('[DictLearning] Loading data') data = mask_and_reduce(self.masker_, imgs, confounds, reduction_ratio=self.reduction_ratio, n_components=self.n_components, random_state=self.random_state, memory_level=max(0, self.memory_level - 1), n_jobs=self.n_jobs, memory=self.memory) if self.verbose: print('[DictLearning] Learning initial components') self._init_dict(data) self._raw_fit(data) return self def _raw_fit(self, data): """Helper function that direcly process unmasked data Parameters ---------- data: ndarray, Shape (n_samples, n_features) """ _, n_features = data.shape if self.verbose: print('[DictLearning] Computing initial loadings') self._init_loadings(data) dict_init = self.loadings_init_ n_iter = ((n_features - 1) // self.batch_size + 1) * self.n_epochs if self.verbose: print('[DictLearning] Learning dictionary') self.components_, _ = self._cache(dict_learning_online)( data.T, self.n_components, alpha=self.alpha, n_iter=n_iter, batch_size=self.batch_size, method=self.method, dict_init=dict_init, verbose=max(0, self.verbose - 1), random_state=self.random_state, return_code=True, shuffle=True, n_jobs=1) self.components_ = self.components_.T # Unit-variance scaling S = np.sqrt(np.sum(self.components_ ** 2, axis=1)) S[S == 0] = 1 self.components_ /= S[:, np.newaxis] # Flip signs in each composant so that positive part is l1 larger # than negative part. Empirically this yield more positive looking maps # than with setting the max to be positive. for component in self.components_: if np.sum(component > 0) < np.sum(component < 0): component *= -1 return self PKzoH  "nilearn/decomposition/multi_pca.py""" PCA dimension reduction on multiple subjects. This is a good initialization method for ICA. """ import numpy as np from sklearn.externals.joblib import Memory from sklearn.utils.extmath import randomized_svd from sklearn.base import TransformerMixin from .base import BaseDecomposition, mask_and_reduce class MultiPCA(BaseDecomposition, TransformerMixin): """Perform Multi Subject Principal Component Analysis. Perform a PCA on each subject, stack the results, and reduce them at group level. An optional Canonical Correlation Analysis can be performed at group level. This is a good initialization method for ICA. Parameters ---------- n_components: int Number of components to extract do_cca: boolean, optional Indicate if a Canonical Correlation Analysis must be run after the PCA. random_state: int or RandomState Pseudo number generator state used for random sampling. smoothing_fwhm: float, optional If smoothing_fwhm is not None, it gives the size in millimeters of the spatial smoothing to apply to the signal. mask: Niimg-like object, instance of NiftiMasker or MultiNiftiMasker, optional Mask to be used on data. If an instance of masker is passed, then its mask will be used. If no mask is given, it will be computed automatically by a MultiNiftiMasker with default parameters. standardize : boolean, optional If standardize is True, the time-series are centered and normed: their variance is put to 1 in the time dimension. target_affine: 3x3 or 4x4 matrix, optional This parameter is passed to image.resample_img. Please see the related documentation for details. target_shape: 3-tuple of integers, optional This parameter is passed to image.resample_img. Please see the related documentation for details. low_pass: False or float, optional This parameter is passed to signal.clean. Please see the related documentation for details high_pass: False or float, optional This parameter is passed to signal.clean. Please see the related documentation for details t_r: float, optional This parameter is passed to signal.clean. Please see the related documentation for details memory: instance of joblib.Memory or string Used to cache the masking process. By default, no caching is done. If a string is given, it is the path to the caching directory. memory_level: integer, optional Rough estimator of the amount of memory used by caching. Higher value means more memory for caching. n_jobs: integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs', -2 'all CPUs but one', and so on. verbose: integer, optional Indicate the level of verbosity. By default, nothing is printed. Attributes ---------- `masker_` : instance of MultiNiftiMasker Masker used to filter and mask data as first step. If an instance of MultiNiftiMasker is given in `mask` parameter, this is a copy of it. Otherwise, a masker is created using the value of `mask` and other NiftiMasker related parameters as initialization. `mask_img_` : Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. The mask of the data. If no mask was given at masker creation, contains the automatically computed mask. `components_` : 2D numpy array (n_components x n-voxels) Array of masked extracted components. They can be unmasked thanks to the `masker_` attribute. """ def __init__(self, n_components=20, mask=None, smoothing_fwhm=None, do_cca=True, random_state=None, standardize=False, detrend=False, low_pass=None, high_pass=None, t_r=None, target_affine=None, target_shape=None, mask_strategy='epi', mask_args=None, memory=Memory(cachedir=None), memory_level=0, n_jobs=1, verbose=0 ): self.n_components = n_components self.do_cca = do_cca BaseDecomposition.__init__(self, n_components=n_components, random_state=random_state, mask=mask, smoothing_fwhm=smoothing_fwhm, standardize=standardize, detrend=detrend, low_pass=low_pass, high_pass=high_pass, t_r=t_r, target_affine=target_affine, target_shape=target_shape, mask_strategy=mask_strategy, mask_args=mask_args, memory=memory, memory_level=memory_level, n_jobs=n_jobs, verbose=verbose) def fit(self, imgs, y=None, confounds=None): """Compute the mask and the components Parameters ---------- imgs: list of Niimg-like objects See http://nilearn.github.io/manipulating_images/input_output.html. Data on which the PCA must be calculated. If this is a list, the affine is considered the same for all. confounds: CSV file path or 2D matrix This parameter is passed to nilearn.signal.clean. Please see the related documentation for details """ BaseDecomposition.fit(self, imgs) data = mask_and_reduce(self.masker_, imgs, confounds=confounds, n_components=self.n_components, random_state=self.random_state, memory=self.memory, memory_level=max(0, self.memory_level - 1), n_jobs=self.n_jobs) self._raw_fit(data) return self def _raw_fit(self, data): """Helper function that directly process unmasked data""" if self.do_cca: S = np.sqrt(np.sum(data ** 2, axis=1)) S[S == 0] = 1 data /= S[:, np.newaxis] self.components_, self.variance_, _ = self._cache( randomized_svd, func_memory_level=2)( data.T, n_components=self.n_components, transpose=True, random_state=self.random_state, n_iter=3) if self.do_cca: data *= S[:, np.newaxis] self.components_ = self.components_.T PKH'nilearn/decomposition/tests/__init__.pyPKH  -nilearn/decomposition/tests/test_multi_pca.py""" Test the multi-PCA module """ import numpy as np from nose.tools import assert_raises, assert_true import nibabel from numpy.testing import assert_almost_equal, assert_equal from nilearn.decomposition.multi_pca import MultiPCA from nilearn.input_data import MultiNiftiMasker, NiftiMasker from nilearn._utils.testing import assert_raises_regex def test_multi_pca(): # Smoke test the MultiPCA # XXX: this is mostly a smoke test shape = (6, 8, 10, 5) affine = np.eye(4) rng = np.random.RandomState(0) # Create a "multi-subject" dataset data = [] for i in range(8): this_data = rng.normal(size=shape) # Create fake activation to get non empty mask this_data[2:4, 2:4, 2:4, :] += 10 data.append(nibabel.Nifti1Image(this_data, affine)) mask_img = nibabel.Nifti1Image(np.ones(shape[:3], dtype=np.int8), affine) multi_pca = MultiPCA(mask=mask_img, n_components=3, random_state=0) # Test that the components are the same if we put twice the same data, and # that fit output is deterministic components1 = multi_pca.fit(data).components_ components2 = multi_pca.fit(data).components_ components3 = multi_pca.fit(2 * data).components_ np.testing.assert_array_equal(components1, components2) np.testing.assert_array_almost_equal(components1, components3) # Smoke test fit with 'confounds' argument confounds = [np.arange(10).reshape(5, 2)] * 8 multi_pca.fit(data, confounds=confounds) # Smoke test that multi_pca also works with single subject data multi_pca.fit(data[0]) # Check that asking for too little components raises a ValueError multi_pca = MultiPCA() assert_raises(ValueError, multi_pca.fit, data[:2]) # Smoke test the use of a masker and without CCA multi_pca = MultiPCA(mask=MultiNiftiMasker(mask_args=dict(opening=0)), do_cca=False, n_components=3) multi_pca.fit(data[:2]) # Smoke test the transform and inverse_transform multi_pca.inverse_transform(multi_pca.transform(data[-2:])) # Smoke test to fit with no img assert_raises(TypeError, multi_pca.fit) multi_pca = MultiPCA(mask=mask_img, n_components=3) assert_raises_regex(ValueError, "Object has no components_ attribute. " "This is probably because fit has not been called", multi_pca.transform, data) def test_multi_pca_score(): shape = (6, 8, 10, 5) affine = np.eye(4) rng = np.random.RandomState(0) # Create a "multi-subject" dataset imgs = [] for i in range(8): this_img = rng.normal(size=shape) imgs.append(nibabel.Nifti1Image(this_img, affine)) mask_img = nibabel.Nifti1Image(np.ones(shape[:3], dtype=np.int8), affine) # Assert that score is between zero and one multi_pca = MultiPCA(mask=mask_img, random_state=0, memory_level=0, n_components=3) multi_pca.fit(imgs) s = multi_pca.score(imgs) assert_true(np.all(s <= 1)) assert_true(np.all(0 <= s)) # Assert that score does not fail with single subject data multi_pca = MultiPCA(mask=mask_img, random_state=0, memory_level=0, n_components=3) multi_pca.fit(imgs[0]) s = multi_pca.score(imgs[0]) assert_true(isinstance(s, float)) assert(0. <= s <= 1.) # Assert that score is one for n_components == n_sample # in single subject configuration multi_pca = MultiPCA(mask=mask_img, random_state=0, memory_level=0, n_components=5) multi_pca.fit(imgs[0]) s = multi_pca.score(imgs[0]) assert_almost_equal(s, 1., 1) # Per component score multi_pca = MultiPCA(mask=mask_img, random_state=0, memory_level=0, n_components=5) multi_pca.fit(imgs[0]) masker = NiftiMasker(mask_img).fit() s = multi_pca._raw_score(masker.transform(imgs[0]), per_component=True) assert_equal(s.shape, (5,)) assert_true(np.all(s <= 1)) assert_true(np.all(0 <= s)) PKH@(nilearn/decomposition/tests/test_base.pyimport numpy as np from nose.tools import assert_true import nibabel from numpy.testing import assert_equal, assert_array_almost_equal from nilearn._utils.testing import assert_raises_regex from nilearn.input_data import MultiNiftiMasker from nilearn.decomposition.base import BaseDecomposition, mask_and_reduce def test_mask_reducer(): shape = (6, 8, 10, 5) affine = np.eye(4) rng = np.random.RandomState(0) # Create a "multi-subject" dataset imgs = [] for i in range(8): this_img = rng.normal(size=shape) # Create fake activation to get non empty mask this_img[2:4, 2:4, 2:4, :] += 10 imgs.append(nibabel.Nifti1Image(this_img, affine)) mask_img = nibabel.Nifti1Image(np.ones(shape[:3], dtype=np.int8), affine) masker = MultiNiftiMasker(mask_img=mask_img).fit() # Test fit on multiple image data = mask_and_reduce(masker, imgs) assert_equal(data.shape, (8 * 5, 6 * 8 * 10)) data = mask_and_reduce(masker, imgs, n_components=3) assert_equal(data.shape, (8 * 3, 6 * 8 * 10)) data = mask_and_reduce(masker, imgs, reduction_ratio=0.4) assert_equal(data.shape, (8 * 2, 6 * 8 * 10)) # Test on single image data_single = mask_and_reduce(masker, imgs[0], n_components=3) assert_true(data_single.shape == (3, 6 * 8 * 10)) # Test n_jobs > 1 data = mask_and_reduce(masker, imgs[0], n_components=3, n_jobs=2, random_state=0) assert_equal(data.shape, (3, 6 * 8 * 10)) assert_array_almost_equal(data_single, data) # Test that reduced data is orthogonal data = mask_and_reduce(masker, imgs[0], n_components=3, random_state=0) assert_true(data.shape == (3, 6 * 8 * 10)) cov = data.dot(data.T) cov_diag = np.zeros((3, 3)) for i in range(3): cov_diag[i, i] = cov[i, i] assert_array_almost_equal(cov, cov_diag) # Test reproducibility data1 = mask_and_reduce(masker, imgs[0], n_components=3, random_state=0) data2 = mask_and_reduce(masker, [imgs[0]] * 2, n_components=3, random_state=0) assert_array_almost_equal(np.tile(data1, (2, 1)), data2) def test_base_decomposition(): shape = (6, 8, 10, 5) affine = np.eye(4) rng = np.random.RandomState(0) data = [] for i in range(8): this_data = rng.normal(size=shape) # Create fake activation to get non empty mask this_data[2:4, 2:4, 2:4, :] += 10 data.append(nibabel.Nifti1Image(this_data, affine)) mask = nibabel.Nifti1Image(np.ones(shape[:3], dtype=np.int8), affine) masker = MultiNiftiMasker(mask_img=mask) base_decomposition = BaseDecomposition(mask=masker, n_components=3) base_decomposition.fit(data) assert_true(base_decomposition.mask_img_ == mask) assert_true(base_decomposition.mask_img_ == base_decomposition.masker_.mask_img_) # Testing fit on data masker = MultiNiftiMasker() base_decomposition = BaseDecomposition(mask=masker, n_components=3) base_decomposition.fit(data) assert_true(base_decomposition.mask_img_ == base_decomposition.masker_.mask_img_) assert_raises_regex(ValueError, "Object has no components_ attribute. " "This may be because " "BaseDecomposition is directly " "being used.", base_decomposition.transform, data) assert_raises_regex(ValueError, 'Need one or more Niimg-like objects as input, ' 'an empty list was given.', base_decomposition.fit, []) # Test passing masker arguments to estimator base_decomposition = BaseDecomposition(target_affine=affine, target_shape=shape[:3], n_components=3, mask_strategy='background') base_decomposition.fit(data) # Score is tested in multi_pca PKH~jV  *nilearn/decomposition/tests/test_canica.py"""Test CanICA""" import numpy as np from numpy.testing import assert_array_almost_equal from nose.tools import assert_true, assert_raises import nibabel from nilearn._utils.testing import assert_less_equal from nilearn.decomposition.canica import CanICA from nilearn.image import iter_img def _make_data_from_components(components, affine, shape, rng=None, n_subjects=8): data = [] if rng is None: rng = np.random.RandomState(0) background = -.01 * rng.normal(size=shape) - 2 background = background[..., np.newaxis] for _ in range(n_subjects): this_data = np.dot(rng.normal(size=(40, 4)), components) this_data += .01 * rng.normal(size=this_data.shape) # Get back into 3D for CanICA this_data = np.reshape(this_data, (40,) + shape) this_data = np.rollaxis(this_data, 0, 4) # Put the border of the image to zero, to mimic a brain image this_data[:5] = background[:5] this_data[-5:] = background[-5:] this_data[:, :5] = background[:, :5] this_data[:, -5:] = background[:, -5:] data.append(nibabel.Nifti1Image(this_data, affine)) return data def _make_canica_components(shape): # Create two images with "activated regions" component1 = np.zeros(shape) component1[:5, :10] = 1 component1[5:10, :10] = -1 component2 = np.zeros(shape) component2[:5, -10:] = 1 component2[5:10, -10:] = -1 component3 = np.zeros(shape) component3[-5:, -10:] = 1 component3[-10:-5, -10:] = -1 component4 = np.zeros(shape) component4[-5:, :10] = 1 component4[-10:-5, :10] = -1 return np.vstack((component1.ravel(), component2.ravel(), component3.ravel(), component4.ravel())) def _make_canica_test_data(rng=None, n_subjects=8, noisy=False): if rng is None: rng = np.random.RandomState(0) shape = (30, 30, 5) affine = np.eye(4) components = _make_canica_components(shape) if noisy: # Creating noisy non positive data components[rng.randn(*components.shape) > .8] *= -5. for mp in components: assert_less_equal(mp.max(), -mp.min()) # Goal met ? # Create a "multi-subject" dataset data = _make_data_from_components(components, affine, shape, rng=rng, n_subjects=n_subjects) mask = np.ones(shape) mask[:5] = 0 mask[-5:] = 0 mask[:, :5] = 0 mask[:, -5:] = 0 mask[..., -2:] = 0 mask[..., :2] = 0 mask_img = nibabel.Nifti1Image(mask, affine) return data, mask_img, components, rng def test_canica_square_img(): data, mask_img, components, rng = _make_canica_test_data() # We do a large number of inits to be sure to find the good match canica = CanICA(n_components=4, random_state=rng, mask=mask_img, smoothing_fwhm=0., n_init=50) canica.fit(data) maps = canica.masker_.inverse_transform(canica.components_).get_data() maps = np.rollaxis(maps, 3, 0) # FIXME: This could be done more efficiently, e.g. thanks to hungarian # Find pairs of matching components # compute the cross-correlation matrix between components mask = mask_img.get_data() != 0 K = np.corrcoef(components[:, mask.ravel()], maps[:, mask])[4:, :4] # K should be a permutation matrix, hence its coefficients # should all be close to 0 1 or -1 K_abs = np.abs(K) assert_true(np.sum(K_abs > .9) == 4) K_abs[K_abs > .9] -= 1 assert_array_almost_equal(K_abs, 0, 1) # Smoke test to make sure an error is raised when no data is passed. assert_raises(TypeError, canica.fit) def test_canica_single_subject(): # Check that canica runs on a single-subject dataset data, mask_img, components, rng = _make_canica_test_data(n_subjects=1) # We do a large number of inits to be sure to find the good match canica = CanICA(n_components=4, random_state=rng, smoothing_fwhm=0., n_init=1) # This is a smoke test: we just check that things run canica.fit(data[0]) def test_component_sign(): # We should have a heuristic that flips the sign of components in # CanICA to have more positive values than negative values, for # instance by making sure that the largest value is positive. data, mask_img, components, rng = _make_canica_test_data(n_subjects=2, noisy=True) # run CanICA many times (this is known to produce different results) canica = CanICA(n_components=4, random_state=rng, mask=mask_img) for _ in range(3): canica.fit(data) for mp in iter_img(canica.masker_.inverse_transform( canica.components_)): mp = mp.get_data() assert_less_equal(-mp.min(), mp.max()) PKH%53V V 1nilearn/decomposition/tests/test_dict_learning.pyimport numpy as np from nilearn._utils.testing import assert_less_equal from nilearn.decomposition.dict_learning import DictLearning from nilearn.decomposition.tests.test_canica import _make_canica_test_data from nilearn.image import iter_img from nilearn.input_data import NiftiMasker def test_dict_learning(): data, mask_img, components, rng = _make_canica_test_data(n_subjects=8) masker = NiftiMasker(mask_img=mask_img).fit() mask = mask_img.get_data() != 0 flat_mask = mask.ravel() dict_init = masker.inverse_transform(components[:, flat_mask]) dict_learning = DictLearning(n_components=4, random_state=0, dict_init=dict_init, mask=mask_img, smoothing_fwhm=0., alpha=1) dict_learning_auto_init = DictLearning(n_components=4, random_state=0, mask=mask_img, smoothing_fwhm=0., n_epochs=10, alpha=1) maps = {} for estimator in [dict_learning, dict_learning_auto_init]: estimator.fit(data) maps[estimator] = estimator.masker_. \ inverse_transform(estimator.components_).get_data() maps[estimator] = np.reshape( np.rollaxis(maps[estimator], 3, 0)[:, mask], (4, flat_mask.sum())) masked_components = components[:, flat_mask] for this_dict_learning in [dict_learning]: these_maps = maps[this_dict_learning] S = np.sqrt(np.sum(masked_components ** 2, axis=1)) S[S == 0] = 1 masked_components /= S[:, np.newaxis] S = np.sqrt(np.sum(these_maps ** 2, axis=1)) S[S == 0] = 1 these_maps /= S[:, np.newaxis] K = np.abs(masked_components.dot(these_maps.T)) recovered_maps = np.sum(K > 0.9) assert(recovered_maps >= 2) # Smoke test n_epochs > 1 dict_learning = DictLearning(n_components=4, random_state=0, dict_init=dict_init, mask=mask_img, smoothing_fwhm=0., n_epochs=2, alpha=1) dict_learning.fit(data) def test_component_sign(): # Regression test # We should have a heuristic that flips the sign of components in # DictLearning to have more positive values than negative values, for # instance by making sure that the largest value is positive. data, mask_img, components, rng = _make_canica_test_data(n_subjects=2, noisy=True) for mp in components: assert_less_equal(-mp.min(), mp.max()) dict_learning = DictLearning(n_components=4, random_state=rng, mask=mask_img, smoothing_fwhm=0., alpha=1) dict_learning.fit(data) for mp in iter_img(dict_learning.masker_.inverse_transform( dict_learning.components_)): mp = mp.get_data() assert_less_equal(np.sum(mp[mp <= 0]), np.sum(mp[mp > 0])) PKHlSSnilearn/image/__init__.py""" Mathematical operations working on Niimg-like objects like -a (3+n)-D block of data, and an affine. """ from .resampling import resample_img, resample_to_img, reorder_img from .image import high_variance_confounds, smooth_img, crop_img, \ mean_img, swap_img_hemispheres, index_img, iter_img, threshold_img, \ math_img, load_img, clean_img from .image import new_img_like # imported this way to avoid circular imports from .._utils.niimg_conversions import concat_niimgs as concat_imgs from .._utils.niimg import copy_img __all__ = ['resample_img', 'resample_to_img', 'high_variance_confounds', 'smooth_img', 'crop_img', 'mean_img', 'reorder_img', 'swap_img_hemispheres', 'concat_imgs', 'copy_img', 'index_img', 'iter_img', 'new_img_like', 'threshold_img', 'math_img', 'load_img', 'clean_img'] PK!pH[E ssnilearn/image/image.py""" Preprocessing functions for images. See also nilearn.signal. """ # Authors: Philippe Gervais, Alexandre Abraham # License: simplified BSD import collections from distutils.version import LooseVersion import numpy as np from scipy import ndimage from scipy.stats import scoreatpercentile import copy import nibabel from sklearn.externals.joblib import Parallel, delayed from .. import signal from .._utils import (check_niimg_4d, check_niimg_3d, check_niimg, as_ndarray, _repr_niimgs) from .._utils.niimg_conversions import _index_img, _check_same_fov from .._utils.niimg import _safe_get_data from .._utils.compat import _basestring from .._utils.param_validation import check_threshold def high_variance_confounds(imgs, n_confounds=5, percentile=2., detrend=True, mask_img=None): """ Return confounds signals extracted from input signals with highest variance. Parameters ---------- imgs: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. 4D image. mask_img: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. If provided, confounds are extracted from voxels inside the mask. If not provided, all voxels are used. n_confounds: int Number of confounds to return percentile: float Highest-variance signals percentile to keep before computing the singular value decomposition, 0. <= `percentile` <= 100. mask_img.sum() * percentile / 100. must be greater than n_confounds. detrend: bool If True, detrend signals before processing. Returns ------- v: numpy.ndarray highest variance confounds. Shape: (number of scans, n_confounds) Notes ------ This method is related to what has been published in the literature as 'CompCor' (Behzadi NeuroImage 2007). The implemented algorithm does the following: - compute sum of squares for each signals (no mean removal) - keep a given percentile of signals with highest variance (percentile) - compute an svd of the extracted signals - return a given number (n_confounds) of signals from the svd with highest singular values. See also -------- nilearn.signal.high_variance_confounds """ from .. import masking if mask_img is not None: sigs = masking.apply_mask(imgs, mask_img) else: # Load the data only if it doesn't need to be masked imgs = check_niimg_4d(imgs) sigs = as_ndarray(imgs.get_data()) # Not using apply_mask here saves memory in most cases. del imgs # help reduce memory consumption sigs = np.reshape(sigs, (-1, sigs.shape[-1])).T return signal.high_variance_confounds(sigs, n_confounds=n_confounds, percentile=percentile, detrend=detrend) def _fast_smooth_array(arr): """Simple smoothing which is less computationally expensive than applying a gaussian filter. Only the first three dimensions of the array will be smoothed. The filter uses [0.2, 1, 0.2] weights in each direction and use a normalisation to preserve the local average value. Parameters ---------- arr: numpy.ndarray 4D array, with image number as last dimension. 3D arrays are also accepted. Returns ------- smoothed_arr: numpy.ndarray Smoothed array. Note ---- Rather than calling this function directly, users are encouraged to call the high-level function :func:`smooth_img` with fwhm='fast'. """ neighbor_weight = 0.2 # 6 neighbors in 3D if not on an edge nb_neighbors = 6 # This scale ensures that a uniform array stays uniform # except on the array edges scale = 1 + nb_neighbors * neighbor_weight # Need to copy because the smoothing is done in multiple statements # and there does not seem to be an easy way to do it in place smoothed_arr = arr.copy() weighted_arr = neighbor_weight * arr smoothed_arr[:-1] += weighted_arr[1:] smoothed_arr[1:] += weighted_arr[:-1] smoothed_arr[:, :-1] += weighted_arr[:, 1:] smoothed_arr[:, 1:] += weighted_arr[:, :-1] smoothed_arr[:, :, :-1] += weighted_arr[:, :, 1:] smoothed_arr[:, :, 1:] += weighted_arr[:, :, :-1] smoothed_arr /= scale return smoothed_arr def _smooth_array(arr, affine, fwhm=None, ensure_finite=True, copy=True): """Smooth images by applying a Gaussian filter. Apply a Gaussian filter along the three first dimensions of arr. Parameters ---------- arr: numpy.ndarray 4D array, with image number as last dimension. 3D arrays are also accepted. affine: numpy.ndarray (4, 4) matrix, giving affine transformation for image. (3, 3) matrices are also accepted (only these coefficients are used). If fwhm='fast', the affine is not used and can be None fwhm: scalar, numpy.ndarray, 'fast' or None Smoothing strength, as a full-width at half maximum, in millimeters. If a scalar is given, width is identical on all three directions. A numpy.ndarray must have 3 elements, giving the FWHM along each axis. If fwhm == 'fast', a fast smoothing will be performed with a filter [0.2, 1, 0.2] in each direction and a normalisation to preserve the local average value. If fwhm is None, no filtering is performed (useful when just removal of non-finite values is needed). ensure_finite: bool if True, replace every non-finite values (like NaNs) by zero before filtering. copy: bool if True, input array is not modified. False by default: the filtering is performed in-place. Returns ------- filtered_arr: numpy.ndarray arr, filtered. Notes ----- This function is most efficient with arr in C order. """ if arr.dtype.kind == 'i': if arr.dtype == np.int64: arr = arr.astype(np.float64) else: # We don't need crazy precision arr = arr.astype(np.float32) if copy: arr = arr.copy() if ensure_finite: # SPM tends to put NaNs in the data outside the brain arr[np.logical_not(np.isfinite(arr))] = 0 if fwhm == 'fast': arr = _fast_smooth_array(arr) elif fwhm is not None: # Keep only the scale part. affine = affine[:3, :3] # Convert from a FWHM to a sigma: fwhm_over_sigma_ratio = np.sqrt(8 * np.log(2)) vox_size = np.sqrt(np.sum(affine ** 2, axis=0)) sigma = fwhm / (fwhm_over_sigma_ratio * vox_size) for n, s in enumerate(sigma): ndimage.gaussian_filter1d(arr, s, output=arr, axis=n) return arr def smooth_img(imgs, fwhm): """Smooth images by applying a Gaussian filter. Apply a Gaussian filter along the three first dimensions of arr. In all cases, non-finite values in input image are replaced by zeros. Parameters ---------- imgs: Niimg-like object or iterable of Niimg-like objects See http://nilearn.github.io/manipulating_images/input_output.html. Image(s) to smooth. fwhm: scalar, numpy.ndarray, 'fast' or None Smoothing strength, as a Full-Width at Half Maximum, in millimeters. If a scalar is given, width is identical on all three directions. A numpy.ndarray must have 3 elements, giving the FWHM along each axis. If fwhm == 'fast', a fast smoothing will be performed with a filter [0.2, 1, 0.2] in each direction and a normalisation to preserve the scale. If fwhm is None, no filtering is performed (useful when just removal of non-finite values is needed) Returns ------- filtered_img: nibabel.Nifti1Image or list of. Input image, filtered. If imgs is an iterable, then filtered_img is a list. """ # Use hasattr() instead of isinstance to workaround a Python 2.6/2.7 bug # See http://bugs.python.org/issue7624 if hasattr(imgs, "__iter__") \ and not isinstance(imgs, _basestring): single_img = False else: single_img = True imgs = [imgs] ret = [] for img in imgs: img = check_niimg(img) affine = img.get_affine() filtered = _smooth_array(img.get_data(), affine, fwhm=fwhm, ensure_finite=True, copy=True) ret.append(new_img_like(img, filtered, affine, copy_header=True)) if single_img: return ret[0] else: return ret def _crop_img_to(img, slices, copy=True): """Crops image to a smaller size Crop img to size indicated by slices and adjust affine accordingly Parameters ---------- img: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. Img to be cropped. If slices has less entries than img has dimensions, the slices will be applied to the first len(slices) dimensions slices: list of slices Defines the range of the crop. E.g. [slice(20, 200), slice(40, 150), slice(0, 100)] defines a 3D cube copy: boolean Specifies whether cropped data is to be copied or not. Default: True Returns ------- cropped_img: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. Cropped version of the input image """ img = check_niimg(img) data = img.get_data() affine = img.get_affine() cropped_data = data[slices] if copy: cropped_data = cropped_data.copy() linear_part = affine[:3, :3] old_origin = affine[:3, 3] new_origin_voxel = np.array([s.start for s in slices]) new_origin = old_origin + linear_part.dot(new_origin_voxel) new_affine = np.eye(4) new_affine[:3, :3] = linear_part new_affine[:3, 3] = new_origin return new_img_like(img, cropped_data, new_affine) def crop_img(img, rtol=1e-8, copy=True): """Crops img as much as possible Will crop img, removing as many zero entries as possible without touching non-zero entries. Will leave one voxel of zero padding around the obtained non-zero area in order to avoid sampling issues later on. Parameters ---------- img: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. img to be cropped. rtol: float relative tolerance (with respect to maximal absolute value of the image), under which values are considered negligeable and thus croppable. copy: boolean Specifies whether cropped data is copied or not. Returns ------- cropped_img: image Cropped version of the input image """ img = check_niimg(img) data = img.get_data() infinity_norm = max(-data.min(), data.max()) passes_threshold = np.logical_or(data < -rtol * infinity_norm, data > rtol * infinity_norm) if data.ndim == 4: passes_threshold = np.any(passes_threshold, axis=-1) coords = np.array(np.where(passes_threshold)) start = coords.min(axis=1) end = coords.max(axis=1) + 1 # pad with one voxel to avoid resampling problems start = np.maximum(start - 1, 0) end = np.minimum(end + 1, data.shape[:3]) slices = [slice(s, e) for s, e in zip(start, end)] return _crop_img_to(img, slices, copy=copy) def _compute_mean(imgs, target_affine=None, target_shape=None, smooth=False): from . import resampling input_repr = _repr_niimgs(imgs) imgs = check_niimg(imgs) mean_data = _safe_get_data(imgs) affine = imgs.get_affine() # Free memory ASAP imgs = None if not mean_data.ndim in (3, 4): raise ValueError('Computation expects 3D or 4D ' 'images, but %i dimensions were given (%s)' % (mean_data.ndim, input_repr)) if mean_data.ndim == 4: mean_data = mean_data.mean(axis=-1) else: mean_data = mean_data.copy() mean_data = resampling.resample_img( nibabel.Nifti1Image(mean_data, affine), target_affine=target_affine, target_shape=target_shape, copy=False) affine = mean_data.get_affine() mean_data = mean_data.get_data() if smooth: nan_mask = np.isnan(mean_data) mean_data = _smooth_array(mean_data, affine=np.eye(4), fwhm=smooth, ensure_finite=True, copy=False) mean_data[nan_mask] = np.nan return mean_data, affine def mean_img(imgs, target_affine=None, target_shape=None, verbose=0, n_jobs=1): """ Compute the mean of the images (in the time dimension of 4th dimension) Note that if list of 4D images are given, the mean of each 4D image is computed separately, and the resulting mean is computed after. Parameters ---------- imgs: Niimg-like object or iterable of Niimg-like objects See http://nilearn.github.io/manipulating_images/input_output.html. Images to mean. target_affine: numpy.ndarray, optional If specified, the image is resampled corresponding to this new affine. target_affine can be a 3x3 or a 4x4 matrix target_shape: tuple or list, optional If specified, the image will be resized to match this new shape. len(target_shape) must be equal to 3. A target_affine has to be specified jointly with target_shape. verbose: int, optional Controls the amount of verbosity: higher numbers give more messages (0 means no messages). n_jobs: integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. Returns ------- mean: nibabel.Nifti1Image mean image See Also -------- nilearn.image.math_img : For more general operations on images """ if (isinstance(imgs, _basestring) or not isinstance(imgs, collections.Iterable)): imgs = [imgs, ] imgs_iter = iter(imgs) first_img = check_niimg(next(imgs_iter)) # Compute the first mean to retrieve the reference # target_affine and target_shape if_needed n_imgs = 1 running_mean, first_affine = _compute_mean(first_img, target_affine=target_affine, target_shape=target_shape) if target_affine is None or target_shape is None: target_affine = first_affine target_shape = running_mean.shape[:3] for this_mean in Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_compute_mean)(n, target_affine=target_affine, target_shape=target_shape) for n in imgs_iter): n_imgs += 1 # _compute_mean returns (mean_img, affine) this_mean = this_mean[0] running_mean += this_mean running_mean = running_mean / float(n_imgs) return new_img_like(first_img, running_mean, target_affine) def swap_img_hemispheres(img): """Performs swapping of hemispheres in the indicated nifti. Use case: synchronizing ROIs across hemispheres Parameters ---------- img: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. Images to swap. Returns ------- output: nibabel.Nifti1Image hemispherically swapped image Notes ----- Supposes a nifti of a brain that is sagitally aligned Should be used with caution (confusion might be caused with radio/neuro conventions) Note that this does not require a change of the affine matrix. """ from .resampling import reorder_img # Check input is really a path to a nifti file or a nifti object img = check_niimg_3d(img) # get nifti in x-y-z order img = reorder_img(img) # create swapped nifti object out_img = new_img_like(img, img.get_data()[::-1], img.get_affine(), copy_header=True) return out_img def index_img(imgs, index): """Indexes into a 4D Niimg-like object in the fourth dimension. Common use cases include extracting a 3D image out of `img` or creating a 4D image whose data is a subset of `img` data. Parameters ---------- imgs: 4D Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. index: Any type compatible with numpy array indexing Used for indexing the 4D data array in the fourth dimension. Returns ------- output: nibabel.Nifti1Image See Also -------- nilearn.image.concat_imgs nilearn.image.iter_img Examples -------- First we concatenate two mni152 images to create a 4D-image:: >>> from nilearn import datasets >>> from nilearn.image import concat_imgs, index_img >>> joint_mni_image = concat_imgs([datasets.load_mni152_template(), ... datasets.load_mni152_template()]) >>> print(joint_mni_image.shape) (91, 109, 91, 2) We can now select one slice from the last dimension of this 4D-image:: >>> single_mni_image = index_img(joint_mni_image, 1) >>> print(single_mni_image.shape) (91, 109, 91) """ imgs = check_niimg_4d(imgs) return _index_img(imgs, index) def iter_img(imgs): """Iterates over a 4D Niimg-like object in the fourth dimension. Parameters ---------- imgs: 4D Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. Returns ------- output: iterator of 3D nibabel.Nifti1Image See Also -------- nilearn.image.index_img """ return check_niimg_4d(imgs, return_iterator=True) def new_img_like(ref_niimg, data, affine=None, copy_header=False): """Create a new image of the same class as the reference image Parameters ---------- ref_niimg: image Reference image. The new image will be of the same type. data: numpy array Data to be stored in the image affine: 4x4 numpy array, optional Transformation matrix copy_header: boolean, optional Indicated if the header of the reference image should be used to create the new image Returns ------- new_img: image A loaded image with the same type (and header) as the reference image. """ # Hand-written loading code to avoid too much memory consumption orig_ref_niimg = ref_niimg if (not isinstance(ref_niimg, _basestring) and not hasattr(ref_niimg, 'get_data') and hasattr(ref_niimg, '__iter__')): ref_niimg = ref_niimg[0] if not (hasattr(ref_niimg, 'get_data') and hasattr(ref_niimg,'get_affine')): if isinstance(ref_niimg, _basestring): ref_niimg = nibabel.load(ref_niimg) else: raise TypeError(('The reference image should be a niimg, %r ' 'was passed') % orig_ref_niimg ) if affine is None: affine = ref_niimg.get_affine() if data.dtype == bool: default_dtype = np.int8 if (LooseVersion(nibabel.__version__) >= LooseVersion('1.2.0') and isinstance(ref_niimg, nibabel.freesurfer.mghformat.MGHImage)): default_dtype = np.uint8 data = as_ndarray(data, dtype=default_dtype) header = None if copy_header: header = copy.deepcopy(ref_niimg.get_header()) header['scl_slope'] = 0. header['scl_inter'] = 0. header['glmax'] = 0. header['cal_max'] = np.max(data) if data.size > 0 else 0. header['cal_max'] = np.min(data) if data.size > 0 else 0. return ref_niimg.__class__(data, affine, header=header) def threshold_img(img, threshold, mask_img=None): """ Threshold the given input image, mostly statistical or atlas images. Thresholding can be done based on direct image intensities or selection threshold with given percentile. .. versionadded:: 0.2 Parameters ---------- img: a 3D/4D Niimg-like object Image contains of statistical or atlas maps which should be thresholded. threshold: float or str If float, we threshold the image based on image intensities meaning voxels which have intensities greater than this value will be kept. The given value should be within the range of minimum and maximum intensity of the input image. If string, it should finish with percent sign e.g. "80%" and we threshold based on the score obtained using this percentile on the image data. The voxels which have intensities greater than this score will be kept. The given string should be within the range of "0%" to "100%". mask_img: Niimg-like object, default None, optional Mask image applied to mask the input data. If None, no masking will be applied. Returns ------- threshold_img: Nifti1Image thresholded image of the given input image. """ from . import resampling from .. import masking img = check_niimg(img) img_data = img.get_data() affine = img.get_affine() img_data = np.nan_to_num(img_data) if mask_img is not None: if not _check_same_fov(img, mask_img): mask_img = resampling.resample_img(mask_img, target_affine=affine, target_shape=img.shape[:3], interpolation="nearest") mask_data, _ = masking._load_mask_img(mask_img) # Set as 0 for the values which are outside of the mask img_data[mask_data == 0.] = 0. if threshold is None: raise ValueError("The input parameter 'threshold' is empty. " "Please give either a float value or a string as e.g. '90%'.") else: cutoff_threshold = check_threshold(threshold, img_data, percentile_func=scoreatpercentile, name='threshold') img_data[np.abs(img_data) < cutoff_threshold] = 0. threshold_img = new_img_like(img, img_data, affine) return threshold_img def math_img(formula, **imgs): """Interpret a numpy based string formula using niimg in named parameters. .. versionadded:: 0.2.3 Parameters ---------- formula: str The mathematical formula to apply to image internal data. It can use numpy imported as 'np'. imgs: images (Nifti1Image or file names) Keyword arguments corresponding to the variables in the formula as Nifti images. All input images should have the same geometry (shape, affine). Returns ------- return_img: Nifti1Image Result of the formula as a Nifti image. Note that the dimension of the result image can be smaller than the input image. The affine is the same as the input image. See Also -------- nilearn.image.mean_img : To simply compute the mean of multiple images Examples -------- Let's load an image using nilearn datasets module:: >>> from nilearn import datasets >>> anatomical_image = datasets.load_mni152_template() Now we can use any numpy function on this image:: >>> from nilearn.image import math_img >>> log_img = math_img("np.log(img)", img=anatomical_image) We can also apply mathematical operations on several images:: >>> result_img = math_img("img1 + img2", ... img1=anatomical_image, img2=log_img) Notes ----- This function is the Python equivalent of ImCal in SPM or fslmaths in FSL. """ try: # Check that input images are valid niimg and have a compatible shape # and affine. niimgs = [] for image in imgs.values(): niimgs.append(check_niimg(image)) _check_same_fov(*niimgs, raise_error=True) except Exception as exc: exc.args = (("Input images cannot be compared, you provided '{0}'," .format(imgs.values()),) + exc.args) raise # Computing input data as a dictionary of numpy arrays. Keep a reference # niimg for building the result as a new niimg. niimg = None data_dict = {} for key, img in imgs.items(): niimg = check_niimg(img) data_dict[key] = _safe_get_data(niimg) # Add a reference to numpy in the kwargs of eval so that numpy functions # can be called from there. data_dict['np'] = np try: result = eval(formula, data_dict) except Exception as exc: exc.args = (("Input formula couldn't be processed, you provided '{0}'," .format(formula),) + exc.args) raise return new_img_like(niimg, result, niimg.get_affine()) def clean_img(imgs, sessions=None, detrend=True, standardize=True, confounds=None, low_pass=None, high_pass=None, t_r=2.5): """Improve SNR on masked fMRI signals. This function can do several things on the input signals, in the following order: - detrend - standardize - remove confounds - low- and high-pass filter Low-pass filtering improves specificity. High-pass filtering should be kept small, to keep some sensitivity. Filtering is only meaningful on evenly-sampled signals. .. versionadded:: 0.2.5 Parameters ---------- imgs: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. 4D image. The signals in the last dimension are filtered. sessions : numpy array, optional Add a session level to the cleaning process. Each session will be cleaned independently. Must be a 1D array of n_samples elements. detrend: bool If detrending should be applied on timeseries (before confound removal) standardize: bool If True, returned signals are set to unit variance. confounds: numpy.ndarray, str or list of Confounds timeseries. Shape must be (instant number, confound number), or just (instant number,) The number of time instants in signals and confounds must be identical (i.e. signals.shape[0] == confounds.shape[0]). If a string is provided, it is assumed to be the name of a csv file containing signals as columns, with an optional one-line header. If a list is provided, all confounds are removed from the input signal, as if all were in the same array. low_pass, high_pass: float Respectively low and high cutoff frequencies, in Hertz. t_r: float, optional Repetition time, in second (sampling period). Returns ------- cleaned_img: Niimg-like object Input images, cleaned. Same shape as `imgs`. Notes ----- Confounds removal is based on a projection on the orthogonal of the signal space. See `Friston, K. J., A. P. Holmes, K. J. Worsley, J.-P. Poline, C. D. Frith, et R. S. J. Frackowiak. "Statistical Parametric Maps in Functional Imaging: A General Linear Approach". Human Brain Mapping 2, no 4 (1994): 189-210. `_ See Also -------- nilearn.signal.clean """ # Avoid circular import from .image import new_img_like imgs_ = check_niimg_4d(imgs) data = signal.clean( imgs_.get_data().reshape(-1, imgs.shape[-1]).T, sessions=sessions, detrend=detrend, standardize=standardize, confounds=confounds, low_pass=low_pass, high_pass=high_pass, t_r=2.5).T.reshape(imgs.shape) return new_img_like(imgs, data) def load_img(img, wildcards=True, dtype=None): """Load a Niimg-like object from filenames or list of filenames. .. versionadded:: 0.2.5 Parameters ---------- img: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. The '~' symbol is expanded to the user home folder. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. wildcards: bool, optional Use niimg as a regular expression to get a list of matching input filenames. If multiple files match, the returned list is sorted using an ascending order. If no file matches the regular expression, a ValueError exception is raised. dtype: {dtype, "auto"} Data type toward which the data should be converted. If "auto", the data will be converted to int32 if dtype is discrete and float32 if it is continuous. Returns ------- result: 3D/4D Niimg-like object Result can be nibabel.Nifti1Image or the input, as-is. It is guaranteed that the returned object has get_data() and get_affine() methods. """ return check_niimg(img, wildcards=wildcards, dtype=dtype) PK9pHb3=X=Xnilearn/image/resampling.py""" Utilities to resample a Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. """ # Author: Gael Varoquaux, Alexandre Abraham, Michael Eickenberg # License: simplified BSD import warnings from distutils.version import LooseVersion import numpy as np import scipy from scipy import ndimage, linalg from .. import _utils from .._utils.compat import _basestring ############################################################################### # Affine utils def to_matrix_vector(transform): """Split an homogeneous transform into its matrix and vector components. The transformation must be represented in homogeneous coordinates. It is split into its linear transformation matrix and translation vector components. This function does not normalize the matrix. This means that for it to be the inverse of from_matrix_vector, transform[-1, -1] must equal 1, and transform[-1, :-1] must equal 0. Parameters ---------- transform: numpy.ndarray Homogeneous transform matrix. Example: a (4, 4) transform representing linear transformation and translation in 3 dimensions. Returns ------- matrix, vector: numpy.ndarray The matrix and vector components of the transform matrix. For an (N, N) transform, matrix will be (N-1, N-1) and vector will be a 1D array of shape (N-1,). See Also -------- from_matrix_vector """ ndimin = transform.shape[0] - 1 ndimout = transform.shape[1] - 1 matrix = transform[0:ndimin, 0:ndimout] vector = transform[0:ndimin, ndimout] return matrix, vector def from_matrix_vector(matrix, vector): """Combine a matrix and vector into a homogeneous transform. Combine a rotation matrix and translation vector into a transform in homogeneous coordinates. Parameters ---------- matrix: numpy.ndarray An (N, N) array representing the rotation matrix. vector: numpy.ndarray A (1, N) array representing the translation. Returns ------- xform: numpy.ndarray An (N+1, N+1) transform matrix. See Also -------- nilearn.resampling.to_matrix_vector """ nin, nout = matrix.shape t = np.zeros((nin + 1, nout + 1), matrix.dtype) t[0:nin, 0:nout] = matrix t[nin, nout] = 1. t[0:nin, nout] = vector return t def coord_transform(x, y, z, affine): """ Convert the x, y, z coordinates from one image space to another space. Parameters ---------- x : number or ndarray The x coordinates in the input space y : number or ndarray The y coordinates in the input space z : number or ndarray The z coordinates in the input space affine : 2D 4x4 ndarray affine that maps from input to output space. Returns ------- x : number or ndarray The x coordinates in the output space y : number or ndarray The y coordinates in the output space z : number or ndarray The z coordinates in the output space Warning: The x, y and z have their Talairach ordering, not 3D numy image ordering. """ coords = np.c_[np.atleast_1d(x).flat, np.atleast_1d(y).flat, np.atleast_1d(z).flat, np.ones_like(np.atleast_1d(z).flat)].T x, y, z, _ = np.dot(affine, coords) return x.squeeze(), y.squeeze(), z.squeeze() def get_bounds(shape, affine): """Return the world-space bounds occupied by an array given an affine. The coordinates returned correspond to the **center** of the corner voxels. Parameters ---------- shape: tuple shape of the array. Must have 3 integer values. affine: numpy.ndarray affine giving the linear transformation between voxel coordinates and world-space coordinates. Returns ------- coord: list of tuples coord[i] is a 2-tuple giving minimal and maximal coordinates along i-th axis. """ adim, bdim, cdim = shape adim -= 1 bdim -= 1 cdim -= 1 # form a collection of vectors for each 8 corners of the box box = np.array([[0., 0, 0, 1], [adim, 0, 0, 1], [0, bdim, 0, 1], [0, 0, cdim, 1], [adim, bdim, 0, 1], [adim, 0, cdim, 1], [0, bdim, cdim, 1], [adim, bdim, cdim, 1]]).T box = np.dot(affine, box)[:3] return list(zip(box.min(axis=-1), box.max(axis=-1))) def get_mask_bounds(img): """ Return the world-space bounds occupied by a mask. Parameters ---------- img: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. The image to inspect. Zero values are considered as background. Returns -------- xmin, xmax, ymin, ymax, zmin, zmax: floats The world-space bounds (field of view) occupied by the non-zero values in the image Notes ----- The image should have only one connect component. The affine should be diagonal or diagonal-permuted, use reorder_img to ensure that it is the case. """ img = _utils.check_niimg_3d(img) mask = _utils.numpy_conversions._asarray(img.get_data(), dtype=np.bool) affine = img.get_affine() (xmin, xmax), (ymin, ymax), (zmin, zmax) = get_bounds(mask.shape, affine) slices = ndimage.find_objects(mask) if len(slices) == 0: warnings.warn("empty mask", stacklevel=2) else: x_slice, y_slice, z_slice = slices[0] x_width, y_width, z_width = mask.shape xmin, xmax = (xmin + x_slice.start*(xmax - xmin)/x_width, xmin + x_slice.stop *(xmax - xmin)/x_width) ymin, ymax = (ymin + y_slice.start*(ymax - ymin)/y_width, ymin + y_slice.stop *(ymax - ymin)/y_width) zmin, zmax = (zmin + z_slice.start*(zmax - zmin)/z_width, zmin + z_slice.stop *(zmax - zmin)/z_width) return xmin, xmax, ymin, ymax, zmin, zmax class BoundingBoxError(ValueError): """This error is raised when a resampling transformation is incompatible with the given data. This can happen, for example, if the field of view of a target affine matrix does not contain any of the original data.""" pass ############################################################################### # Resampling def _resample_one_img(data, A, A_inv, b, target_shape, interpolation_order, out, copy=True): "Internal function for resample_img, do not use" if data.dtype.kind in ('i', 'u'): # Integers are always finite has_not_finite = False else: not_finite = np.logical_not(np.isfinite(data)) has_not_finite = np.any(not_finite) if has_not_finite: warnings.warn("NaNs or infinite values are present in the data " "passed to resample. This is a bad thing as they " "make resampling ill-defined and much slower.", RuntimeWarning, stacklevel=2) if copy: # We need to do a copy to avoid modifying the input # array data = data.copy() #data[not_finite] = 0 from ..masking import _extrapolate_out_mask data = _extrapolate_out_mask(data, np.logical_not(not_finite), iterations=2)[0] # See https://github.com/nilearn/nilearn/issues/346 Copying the # array makes it C continuous and as such the int32 index in the C # code is a lot less likely to overflow if (LooseVersion(scipy.__version__) < LooseVersion('0.14.1')): data = data.copy() # The resampling itself ndimage.affine_transform(data, A, offset=np.dot(A_inv, b), output_shape=target_shape, output=out, order=interpolation_order) # Bug in ndimage.affine_transform when out does not have native endianness # see https://github.com/nilearn/nilearn/issues/275 # Bug was fixed in scipy 0.15 if (LooseVersion(scipy.__version__) < LooseVersion('0.15') and not out.dtype.isnative): out.byteswap(True) if has_not_finite: # We need to resample the mask of not_finite values not_finite = ndimage.affine_transform(not_finite, A, offset=np.dot(A_inv, b), output_shape=target_shape, order=0) out[not_finite] = np.nan return out def resample_img(img, target_affine=None, target_shape=None, interpolation='continuous', copy=True, order="F"): """Resample a Niimg-like object Parameters ---------- img: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. Image(s) to resample. target_affine: numpy.ndarray, optional If specified, the image is resampled corresponding to this new affine. target_affine can be a 3x3 or a 4x4 matrix. (See notes) target_shape: tuple or list, optional If specified, the image will be resized to match this new shape. len(target_shape) must be equal to 3. If target_shape is specified, a target_affine of shape (4, 4) must also be given. (See notes) interpolation: str, optional Can be 'continuous' (default) or 'nearest'. Indicates the resample method. copy: bool, optional If True, guarantees that output array has no memory in common with input array. In all cases, input images are never modified by this function. order: "F" or "C" Data ordering in output array. This function is slightly faster with Fortran ordering. Returns ------- resampled: nibabel.Nifti1Image input image, resampled to have respectively target_shape and target_affine as shape and affine. See Also -------- nilearn.image.resample_to_img Notes ----- **BoundingBoxError** If a 4x4 transformation matrix (target_affine) is given and all of the transformed data points have a negative voxel index along one of the axis, then none of the data will be visible in the transformed image and a BoundingBoxError will be raised. If a 4x4 transformation matrix (target_affine) is given and no target shape is provided, the resulting image will have voxel coordinate (0, 0, 0) in the affine offset (4th column of target affine) and will extend far enough to contain all the visible data and a margin of one voxel. **3x3 transformation matrices** If a 3x3 transformation matrix is given as target_affine, it will be assumed to represent the three coordinate axes of the target space. In this case the affine offset (4th column of a 4x4 transformation matrix) as well as the target_shape will be inferred by resample_img, such that the resulting field of view is the closest possible (with a margin of 1 voxel) bounding box around the transformed data. In certain cases one may want to obtain a transformed image with the closest bounding box around the data, which at the same time respects a voxel grid defined by a 4x4 affine transformation matrix. In this case, one resamples the image using this function given the target affine and no target shape. One then uses crop_img on the result. **NaNs and infinite values** This function handles gracefully NaNs and infinite values in the input data, however they make the execution of the function much slower. """ from .image import new_img_like # avoid circular imports # Do as many checks as possible before loading data, to avoid potentially # costly calls before raising an exception. if target_shape is not None and target_affine is None: raise ValueError("If target_shape is specified, target_affine should" " be specified too.") if target_shape is not None and not len(target_shape) == 3: raise ValueError('The shape specified should be the shape of ' 'the 3D grid, and thus of length 3. %s was specified' % str(target_shape)) if target_shape is not None and target_affine.shape == (3, 3): raise ValueError("Given target shape without anchor vector: " "Affine shape should be (4, 4) and not (3, 3)") if interpolation == 'continuous': interpolation_order = 3 elif interpolation == 'nearest': interpolation_order = 0 else: message = ("interpolation must be either 'continuous' " "or 'nearest' but it was set to '{0}'").format(interpolation) raise ValueError(message) if isinstance(img, _basestring): # Avoid a useless copy input_img_is_string = True else: input_img_is_string = False img = _utils.check_niimg(img) # noop cases if target_affine is None and target_shape is None: if copy and not input_img_is_string: img = _utils.copy_img(img) return img if target_affine is not None: target_affine = np.asarray(target_affine) shape = img.shape affine = img.get_affine() if (np.all(np.array(target_shape) == shape[:3]) and np.allclose(target_affine, affine)): if copy and not input_img_is_string: img = _utils.copy_img(img) return img # We now know that some resampling must be done. # The value of "copy" is of no importance: output is always a separate # array. data = img.get_data() # Get a bounding box for the transformed data # Embed target_affine in 4x4 shape if necessary if target_affine.shape == (3, 3): missing_offset = True target_affine_tmp = np.eye(4) target_affine_tmp[:3, :3] = target_affine target_affine = target_affine_tmp else: missing_offset = False target_affine = target_affine.copy() transform_affine = np.linalg.inv(target_affine).dot(affine) (xmin, xmax), (ymin, ymax), (zmin, zmax) = get_bounds( data.shape[:3], transform_affine) # if target_affine is (3, 3), then calculate # offset from bounding box and update bounding box # to be in the voxel coordinates of the calculated 4x4 affine if missing_offset: offset = target_affine[:3, :3].dot([xmin, ymin, zmin]) target_affine[:3, 3] = offset (xmin, xmax), (ymin, ymax), (zmin, zmax) = ( (0, xmax - xmin), (0, ymax - ymin), (0, zmax - zmin)) # if target_shape is not given (always the case with 3x3 # transformation matrix and sometimes the case with 4x4 # transformation matrix), then set it to contain the bounding # box by a margin of 1 voxel if target_shape is None: target_shape = (int(np.ceil(xmax)) + 1, int(np.ceil(ymax)) + 1, int(np.ceil(zmax)) + 1) # Check whether transformed data is actually within the FOV # of the target affine if xmax < 0 or ymax < 0 or zmax < 0: raise BoundingBoxError("The field of view given " "by the target affine does " "not contain any of the data") if np.all(target_affine == affine): # Small trick to be more numerically stable transform_affine = np.eye(4) else: transform_affine = np.dot(linalg.inv(affine), target_affine) A, b = to_matrix_vector(transform_affine) A_inv = linalg.inv(A) # If A is diagonal, ndimage.affine_transform is clever enough to use a # better algorithm. if np.all(np.diag(np.diag(A)) == A): A = np.diag(A) else: b = np.dot(A, b) data_shape = list(data.shape) # Make sure that we have a list here if isinstance(target_shape, np.ndarray): target_shape = target_shape.tolist() target_shape = tuple(target_shape) if interpolation == 'continuous' and data.dtype.kind == 'i': # cast unsupported data types to closest support dtype aux = data.dtype.name.replace('int', 'float') aux = aux.replace("ufloat", "float").replace("floatc", "float") if aux in ["float8", "float16"]: aux = "float32" warnings.warn("Casting data from %s to %s" % (data.dtype.name, aux)) resampled_data_dtype = np.dtype(aux) else: resampled_data_dtype = data.dtype # Code is generic enough to work for both 3D and 4D images other_shape = data_shape[3:] resampled_data = np.empty(list(target_shape) + other_shape, order=order, dtype=resampled_data_dtype) all_img = (slice(None), ) * 3 # Iter overr a set of 3D volumes, as the interpolation problem is # separable in the extra dimensions. This reduces the # computational cost for ind in np.ndindex(*other_shape): _resample_one_img(data[all_img + ind], A, A_inv, b, target_shape, interpolation_order, out=resampled_data[all_img + ind], copy=not input_img_is_string) return new_img_like(img, resampled_data, target_affine) def resample_to_img(source_img, target_img, interpolation='continuous', copy=True, order='F'): """Resample a Niimg-like source image on a target Niimg-like image (no registration is performed: the image should already be aligned). .. versionadded:: 0.2.4 Parameters ---------- source_img: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. Image(s) to resample. target_img: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. Reference image taken for resampling. interpolation: str, optional Can be 'continuous' (default) or 'nearest'. Indicates the resample method. copy: bool, optional If True, guarantees that output array has no memory in common with input array. In all cases, input images are never modified by this function. order: "F" or "C" Data ordering in output array. This function is slightly faster with Fortran ordering. Returns ------- resampled: nibabel.Nifti1Image input image, resampled to have respectively target image shape and affine as shape and affine. See Also -------- nilearn.image.resample_img """ target = _utils.check_niimg(target_img) target_shape = target.shape # When target shape is greater than 3, we reduce to 3, ti be compatible # with uniderlying call to resample_img if len(target_shape) > 3: target_shape = target.shape[:3] return resample_img(source_img, target_affine=target.get_affine(), target_shape=target_shape, interpolation=interpolation, copy=copy, order=order) def reorder_img(img, resample=None): """Returns an image with the affine diagonal (by permuting axes). The orientation of the new image will be RAS (Right, Anterior, Superior). If it is impossible to get xyz ordering by permuting the axes, a 'ValueError' is raised. Parameters ----------- img: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. Image to reorder. resample: None or string in {'continuous', 'nearest'}, optional If resample is None (default), no resampling is performed, the axes are only permuted. Otherwise resampling is performed and 'resample' will be passed as the 'interpolation' argument into resample_img. """ from .image import new_img_like img = _utils.check_niimg(img) # The copy is needed in order not to modify the input img affine # see https://github.com/nilearn/nilearn/issues/325 for a concrete bug affine = img.get_affine().copy() A, b = to_matrix_vector(affine) if not np.all((np.abs(A) > 0.001).sum(axis=0) == 1): # The affine is not nearly diagonal if resample is None: raise ValueError('Cannot reorder the axes: ' 'the image affine contains rotations') else: # Identify the voxel size using a QR decomposition of the # affine Q, R = np.linalg.qr(affine[:3, :3]) target_affine = np.diag(np.abs(np.diag(R))[ np.abs(Q).argmax(axis=1)]) return resample_img(img, target_affine=target_affine, interpolation=resample) axis_numbers = np.argmax(np.abs(A), axis=0) data = img.get_data() while not np.all(np.sort(axis_numbers) == axis_numbers): first_inversion = np.argmax(np.diff(axis_numbers)<0) axis1 = first_inversion + 1 axis2 = first_inversion data = np.swapaxes(data, axis1, axis2) order = np.array((0, 1, 2, 3)) order[axis1] = axis2 order[axis2] = axis1 affine = affine.T[order].T A, b = to_matrix_vector(affine) axis_numbers = np.argmax(np.abs(A), axis=0) # Now make sure the affine is positive pixdim = np.diag(A).copy() if pixdim[0] < 0: b[0] = b[0] + pixdim[0]*(data.shape[0] - 1) pixdim[0] = -pixdim[0] slice1 = slice(None, None, -1) else: slice1 = slice(None, None, None) if pixdim[1] < 0: b[1] = b[1] + 1 + pixdim[1]*(data.shape[1] - 1) pixdim[1] = -pixdim[1] slice2 = slice(None, None, -1) else: slice2 = slice(None, None, None) if pixdim[2] < 0: b[2] = b[2] + 1 + pixdim[2]*(data.shape[2] - 1) pixdim[2] = -pixdim[2] slice3 = slice(None, None, -1) else: slice3 = slice(None, None, None) data = data[slice1, slice2, slice3] affine = from_matrix_vector(np.diag(pixdim), b) return new_img_like(img, data, affine) PKHnilearn/image/tests/__init__.pyPKH'ndWdW&nilearn/image/tests/test_resampling.py""" Test the resampling code. """ import os import copy import math from nose import SkipTest from nose.tools import assert_equal, assert_raises, \ assert_false, assert_true, assert_almost_equal from numpy.testing import assert_array_equal, assert_array_almost_equal import numpy as np from nibabel import Nifti1Image from nilearn.image.resampling import resample_img, resample_to_img, reorder_img from nilearn.image.resampling import from_matrix_vector, coord_transform from nilearn.image.resampling import BoundingBoxError from nilearn._utils import testing ############################################################################### # Helper function def rotation(theta, phi): """ Returns a rotation 3x3 matrix. """ cos = np.cos sin = np.sin a1 = np.array([[cos(theta), -sin(theta), 0], [sin(theta), cos(theta), 0], [0, 0, 1]]) a2 = np.array([[1, 0, 0], [0, cos(phi), -sin(phi)], [0, sin(phi), cos(phi)]]) return np.dot(a1, a2) def pad(array, *args): """Pad an ndarray with zeros of quantity specified in args as follows args = (x1minpad, x1maxpad, x2minpad, x2maxpad, x3minpad, ...) """ if len(args) % 2 != 0: raise ValueError("Please specify as many max paddings as min" " paddings. You have specified %d arguments" % len(args)) all_paddings = np.zeros([array.ndim, 2], dtype=np.int64) all_paddings[:len(args) // 2] = np.array(args).reshape(-1, 2) lower_paddings, upper_paddings = all_paddings.T new_shape = np.array(array.shape) + upper_paddings + lower_paddings padded = np.zeros(new_shape, dtype=array.dtype) source_slices = [slice(max(-lp, 0), min(s + up, s)) for lp, up, s in zip(lower_paddings, upper_paddings, array.shape)] target_slices = [slice(max(lp, 0), min(s - up, s)) for lp, up, s in zip(lower_paddings, upper_paddings, new_shape)] padded[target_slices] = array[source_slices].copy() return padded ############################################################################### # Tests def test_identity_resample(): """ Test resampling with an identity affine. """ shape = (3, 2, 5, 2) data = np.random.randint(0, 10, shape) affine = np.eye(4) affine[:3, -1] = 0.5 * np.array(shape[:3]) rot_img = resample_img(Nifti1Image(data, affine), target_affine=affine, interpolation='nearest') np.testing.assert_almost_equal(data, rot_img.get_data()) # Smoke-test with a list affine rot_img = resample_img(Nifti1Image(data, affine), target_affine=affine.tolist(), interpolation='nearest') # Test with a 3x3 affine rot_img = resample_img(Nifti1Image(data, affine), target_affine=affine[:3, :3], interpolation='nearest') np.testing.assert_almost_equal(data, rot_img.get_data()) def test_downsample(): """ Test resampling with a 1/2 down-sampling affine. """ rand_gen = np.random.RandomState(0) shape = (6, 3, 6, 2) data = rand_gen.random_sample(shape) affine = np.eye(4) rot_img = resample_img(Nifti1Image(data, affine), target_affine=2 * affine, interpolation='nearest') downsampled = data[::2, ::2, ::2, ...] x, y, z = downsampled.shape[:3] np.testing.assert_almost_equal(downsampled, rot_img.get_data()[:x, :y, :z, ...]) def test_resampling_with_affine(): """ Test resampling with a given rotation part of the affine. """ prng = np.random.RandomState(10) data_3d = prng.randint(4, size=(1, 4, 4)) data_4d = prng.randint(4, size=(1, 4, 4, 3)) for data in [data_3d, data_4d]: for angle in (0, np.pi, np.pi / 2., np.pi / 4., np.pi / 3.): rot = rotation(0, angle) rot_img = resample_img(Nifti1Image(data, np.eye(4)), target_affine=rot, interpolation='nearest') assert_equal(np.max(data), np.max(rot_img.get_data())) assert_equal(rot_img.get_data().dtype, data.dtype) def test_resampling_continuous_with_affine(): prng = np.random.RandomState(10) data_3d = prng.randint(1, 4, size=(1, 10, 10)) data_4d = prng.randint(1, 4, size=(1, 10, 10, 3)) for data in [data_3d, data_4d]: for angle in (0, np.pi / 2., np.pi, 3 * np.pi / 2.): rot = rotation(0, angle) img = Nifti1Image(data, np.eye(4)) rot_img = resample_img( img, target_affine=rot, interpolation='continuous') rot_img_back = resample_img( rot_img, target_affine=np.eye(4), interpolation='continuous') center = slice(1, 9) # values on the edges are wrong for some reason mask = (0, center, center) np.testing.assert_allclose( img.get_data()[mask], rot_img_back.get_data()[mask]) assert_equal(rot_img.get_data().dtype, np.dtype(data.dtype.name.replace('int', 'float'))) def test_resampling_error_checks(): shape = (3, 2, 5, 2) target_shape = (5, 3, 2) affine = np.eye(4) data = np.random.randint(0, 10, shape) img = Nifti1Image(data, affine) # Correct parameters: no exception resample_img(img, target_shape=target_shape, target_affine=affine) resample_img(img, target_affine=affine) with testing.write_tmp_imgs(img) as filename: resample_img(filename, target_shape=target_shape, target_affine=affine) # Missing parameter assert_raises(ValueError, resample_img, img, target_shape=target_shape) # Invalid shape assert_raises(ValueError, resample_img, img, target_shape=(2, 3), target_affine=affine) # Invalid interpolation interpolation = 'an_invalid_interpolation' pattern = "interpolation must be either.+{0}".format(interpolation) testing.assert_raises_regex(ValueError, pattern, resample_img, img, target_shape=target_shape, target_affine=affine, interpolation="an_invalid_interpolation") # Noop target_shape = shape[:3] img_r = resample_img(img, copy=False) assert_equal(img_r, img) img_r = resample_img(img, copy=True) assert_false(np.may_share_memory(img_r.get_data(), img.get_data())) np.testing.assert_almost_equal(img_r.get_data(), img.get_data()) np.testing.assert_almost_equal(img_r.get_affine(), img.get_affine()) img_r = resample_img(img, target_affine=affine, target_shape=target_shape, copy=False) assert_equal(img_r, img) img_r = resample_img(img, target_affine=affine, target_shape=target_shape, copy=True) assert_false(np.may_share_memory(img_r.get_data(), img.get_data())) np.testing.assert_almost_equal(img_r.get_data(), img.get_data()) np.testing.assert_almost_equal(img_r.get_affine(), img.get_affine()) def test_4d_affine_bounding_box_error(): small_data = np.ones([4, 4, 4]) small_data_4D_affine = np.eye(4) small_data_4D_affine[:3, -1] = np.array([5, 4, 5]) small_img = Nifti1Image(small_data, small_data_4D_affine) bigger_data_4D_affine = np.eye(4) bigger_data = np.zeros([10, 10, 10]) bigger_img = Nifti1Image(bigger_data, bigger_data_4D_affine) # We would like to check whether all/most of the data # will be contained in the resampled image # The measure will be the l2 norm, since some resampling # schemes approximately conserve it def l2_norm(arr): return (arr ** 2).sum() # resample using 4D affine and specified target shape small_to_big_with_shape = resample_img( small_img, target_affine=bigger_img.get_affine(), target_shape=bigger_img.shape) # resample using 3D affine and no target shape small_to_big_without_shape_3D_affine = resample_img( small_img, target_affine=bigger_img.get_affine()[:3, :3]) # resample using 4D affine and no target shape small_to_big_without_shape = resample_img( small_img, target_affine=bigger_img.get_affine()) # The first 2 should pass assert_almost_equal(l2_norm(small_data), l2_norm(small_to_big_with_shape.get_data())) assert_almost_equal(l2_norm(small_data), l2_norm(small_to_big_without_shape_3D_affine.get_data())) # After correcting decision tree for 4x4 affine given + no target shape # from "use initial shape" to "calculate minimal bounding box respecting # the affine anchor and the data" assert_almost_equal(l2_norm(small_data), l2_norm(small_to_big_without_shape.get_data())) assert_array_equal(small_to_big_without_shape.shape, small_data_4D_affine[:3, -1] + np.array(small_img.shape)) def test_raises_upon_3x3_affine_and_no_shape(): img = Nifti1Image(np.zeros([8, 9, 10]), affine=np.eye(4)) exception = ValueError message = ("Given target shape without anchor " "vector: Affine shape should be \(4, 4\) and " "not \(3, 3\)") testing.assert_raises_regex( exception, message, resample_img, img, target_affine=np.eye(3) * 2, target_shape=(10, 10, 10)) def test_raises_bbox_error_if_data_outside_box(): # Make some cases which should raise exceptions # original image data = np.zeros([8, 9, 10]) affine = np.eye(4) affine_offset = np.array([1, 1, 1]) affine[:3, 3] = affine_offset img = Nifti1Image(data, affine) # some axis flipping affines axis_flips = np.array(list(map(np.diag, [[-1, 1, 1, 1], [1, -1, 1, 1], [1, 1, -1, 1], [-1, -1, 1, 1], [-1, 1, -1, 1], [1, -1, -1, 1]]))) # some in plane 90 degree rotations base on these # (by permuting two lines) af = axis_flips rotations = np.array([af[0][[1, 0, 2, 3]], af[0][[2, 1, 0, 3]], af[1][[1, 0, 2, 3]], af[1][[0, 2, 1, 3]], af[2][[2, 1, 0, 3]], af[2][[0, 2, 1, 3]]]) new_affines = np.concatenate([axis_flips, rotations]) new_offset = np.array([0., 0., 0.]) new_affines[:, :3, 3] = new_offset[np.newaxis, :] for new_affine in new_affines: exception = BoundingBoxError message = ("The field of view given " "by the target affine does " "not contain any of the data") testing.assert_raises_regex( exception, message, resample_img, img, target_affine=new_affine) def test_resampling_result_axis_permutation(): # Transform real data using easily checkable transformations # For now: axis permutations # create a cuboid full of deterministic data, padded with one # voxel thickness of zeros core_shape = (3, 5, 4) core_data = np.arange(np.prod(core_shape)).reshape(core_shape) full_data_shape = np.array(core_shape) + 2 full_data = np.zeros(full_data_shape) full_data[[slice(1, 1 + s) for s in core_shape]] = core_data source_img = Nifti1Image(full_data, np.eye(4)) axis_permutations = [[0, 1, 2], [1, 0, 2], [2, 1, 0], [0, 2, 1]] # check 3x3 transformation matrix for ap in axis_permutations: target_affine = np.eye(3)[ap] resampled_img = resample_img(source_img, target_affine=target_affine) resampled_data = resampled_img.get_data() what_resampled_data_should_be = full_data.transpose(ap) assert_array_almost_equal(resampled_data, what_resampled_data_should_be) # check 4x4 transformation matrix offset = np.array([-2, 1, -3]) for ap in axis_permutations: target_affine = np.eye(4) target_affine[:3, :3] = np.eye(3)[ap] target_affine[:3, 3] = offset resampled_img = resample_img(source_img, target_affine=target_affine) resampled_data = resampled_img.get_data() offset_cropping = np.vstack([-offset[ap][np.newaxis, :], np.zeros([1, 3])] ).T.ravel().astype(int) what_resampled_data_should_be = pad(full_data.transpose(ap), *list(offset_cropping)) assert_array_almost_equal(resampled_data, what_resampled_data_should_be) def test_resampling_nan(): # Test that when the data has NaNs they do not propagate to the # whole image for core_shape in [(3, 5, 4), (3, 5, 4, 2)]: # create deterministic data, padded with one # voxel thickness of zeros core_data = np.arange(np.prod(core_shape) ).reshape(core_shape).astype(np.float) # Introduce a nan core_data[2, 2:4, 1] = np.nan full_data_shape = np.array(core_shape) + 2 full_data = np.zeros(full_data_shape) full_data[[slice(1, 1 + s) for s in core_shape]] = core_data source_img = Nifti1Image(full_data, np.eye(4)) # Transform real data using easily checkable transformations # For now: axis permutations axis_permutation = [0, 1, 2] # check 3x3 transformation matrix target_affine = np.eye(3)[axis_permutation] resampled_img = testing.assert_warns( RuntimeWarning, resample_img, source_img, target_affine=target_affine) resampled_data = resampled_img.get_data() if full_data.ndim == 4: axis_permutation.append(3) what_resampled_data_should_be = full_data.transpose(axis_permutation) non_nan = np.isfinite(what_resampled_data_should_be) # Check that the input data hasn't been modified: assert_false(np.all(non_nan)) # Check that for finite value resampling works without problems assert_array_almost_equal(resampled_data[non_nan], what_resampled_data_should_be[non_nan]) # Check that what was not finite is still not finite assert_false(np.any(np.isfinite( resampled_data[np.logical_not(non_nan)]))) # Test with an actual resampling, in the case of a bigish hole # This checks the extrapolation mechanism: if we don't do any # extrapolation before resampling, the hole creates big # artefacts data = 10 * np.ones((10, 10, 10)) data[4:6, 4:6, 4:6] = np.nan source_img = Nifti1Image(data, 2 * np.eye(4)) resampled_img = testing.assert_warns( RuntimeWarning, resample_img, source_img, target_affine=np.eye(4)) resampled_data = resampled_img.get_data() np.testing.assert_allclose(10, resampled_data[np.isfinite(resampled_data)]) def test_resample_to_img(): # Testing resample to img function rand_gen = np.random.RandomState(0) shape = (6, 3, 6, 3) data = rand_gen.random_sample(shape) source_affine = np.eye(4) source_img = Nifti1Image(data, source_affine) target_affine = 2 * source_affine target_img = Nifti1Image(data, target_affine) result_img = resample_to_img(source_img, target_img, interpolation='nearest') downsampled = data[::2, ::2, ::2, ...] x, y, z = downsampled.shape[:3] np.testing.assert_almost_equal(downsampled, result_img.get_data()[:x, :y, :z, ...]) def test_reorder_img(): # We need to test on a square array, as rotation does not change # shape, whereas reordering does. shape = (5, 5, 5, 2, 2) rng = np.random.RandomState(42) data = rng.rand(*shape) affine = np.eye(4) affine[:3, -1] = 0.5 * np.array(shape[:3]) ref_img = Nifti1Image(data, affine) # Test with purely positive matrices and compare to a rotation for theta, phi in np.random.randint(4, size=(5, 2)): rot = rotation(theta * np.pi / 2, phi * np.pi / 2) rot[np.abs(rot) < 0.001] = 0 rot[rot > 0.9] = 1 rot[rot < -0.9] = 1 b = 0.5 * np.array(shape[:3]) new_affine = from_matrix_vector(rot, b) rot_img = resample_img(ref_img, target_affine=new_affine) np.testing.assert_array_equal(rot_img.get_affine(), new_affine) np.testing.assert_array_equal(rot_img.get_data().shape, shape) reordered_img = reorder_img(rot_img) np.testing.assert_array_equal(reordered_img.get_affine()[:3, :3], np.eye(3)) np.testing.assert_almost_equal(reordered_img.get_data(), data) # Create a non-diagonal affine, and check that we raise a sensible # exception affine[1, 0] = 0.1 ref_img = Nifti1Image(data, affine) testing.assert_raises_regex(ValueError, 'Cannot reorder the axes', reorder_img, ref_img) # Test that no exception is raised when resample='continuous' reorder_img(ref_img, resample='continuous') # Test that resample args gets passed to resample_img interpolation = 'nearest' reordered_img = reorder_img(ref_img, resample=interpolation) resampled_img = resample_img(ref_img, target_affine=reordered_img.get_affine(), interpolation=interpolation) np.testing.assert_array_equal(reordered_img.get_data(), resampled_img.get_data()) # Make sure invalid resample argument is included in the error message interpolation = 'an_invalid_interpolation' pattern = "interpolation must be either.+{0}".format(interpolation) testing.assert_raises_regex(ValueError, pattern, reorder_img, ref_img, resample=interpolation) # Test flipping an axis data = rng.rand(*shape) for i in (0, 1, 2): # Make a diagonal affine with a negative axis, and check that # can be reordered, also vary the shape shape = (i + 1, i + 2, 3 - i) affine = np.eye(4) affine[i, i] *= -1 img = Nifti1Image(data, affine) orig_img = copy.copy(img) #x, y, z = img.get_world_coords() #sample = img.values_in_world(x, y, z) img2 = reorder_img(img) # Check that img has not been changed np.testing.assert_array_equal(img.get_affine(), orig_img.get_affine()) np.testing.assert_array_equal(img.get_data(), orig_img.get_data()) # Test that the affine is indeed diagonal: np.testing.assert_array_equal(img2.get_affine()[:3, :3], np.diag(np.diag( img2.get_affine()[:3, :3]))) assert_true(np.all(np.diag(img2.get_affine()) >= 0)) def test_reorder_img_non_native_endianness(): def _get_resampled_img(dtype): data = np.ones((10, 10, 10), dtype=dtype) data[3:7, 3:7, 3:7] = 2 affine = np.eye(4) theta = math.pi / 6. c = math.cos(theta) s = math.sin(theta) affine = np.array([[1, 0, 0, 0], [0, c, -s, 0], [0, s, c, 0], [0, 0, 0, 1]]) img = Nifti1Image(data, affine) return resample_img(img, target_affine=np.eye(4)) img_1 = _get_resampled_img('f8') np.testing.assert_equal(img_1.get_data(), img_2.get_data()) def test_coord_transform_trivial(): sform = np.eye(4) x = np.random.random((10,)) y = np.random.random((10,)) z = np.random.random((10,)) x_, y_, z_ = coord_transform(x, y, z, sform) np.testing.assert_array_equal(x, x_) np.testing.assert_array_equal(y, y_) np.testing.assert_array_equal(z, z_) sform[:, -1] = 1 x_, y_, z_ = coord_transform(x, y, z, sform) np.testing.assert_array_equal(x + 1, x_) np.testing.assert_array_equal(y + 1, y_) np.testing.assert_array_equal(z + 1, z_) def test_resample_img_segmentation_fault(): if os.environ.get('APPVEYOR') == 'True': raise SkipTest('This test too slow (7-8 minutes) on AppVeyor') # see https://github.com/nilearn/nilearn/issues/346 shape_in = (64, 64, 64) aff_in = np.diag([2., 2., 2., 1.]) aff_out = np.diag([3., 3., 3., 1.]) # fourth_dim = 1024 works fine but for 1025 creates a segmentation # fault with scipy < 0.14.1 fourth_dim = 1025 try: data = np.ones(shape_in + (fourth_dim, ), dtype=np.float64) except MemoryError: # This can happen on AppVeyor and for 32-bit Python on Windows raise SkipTest('Not enough RAM to run this test') img_in = Nifti1Image(data, aff_in) resample_img(img_in, target_affine=aff_out, interpolation='nearest') def test_resampling_with_int_types_no_crash(): affine = np.eye(4) data = np.zeros((2, 2, 2)) for dtype in [np.int, np.int8, np.int16, np.int32, np.int64, np.uint, np.uint8, np.uint16, np.uint32, np.uint64, np.float32, np.float64, np.float]: img = Nifti1Image(data.astype(dtype), affine) resample_img(img, target_affine=2. * affine) PKH@LxLxL!nilearn/image/tests/test_image.py""" Test image pre-processing functions """ from nose.tools import assert_true, assert_false, assert_equal from distutils.version import LooseVersion from nose import SkipTest import platform import os import nibabel from nibabel import Nifti1Image import numpy as np from numpy.testing import assert_array_equal, assert_allclose from nilearn._utils.testing import assert_raises_regex from nilearn import signal from nilearn.image import image from nilearn.image import resampling from nilearn.image import concat_imgs from nilearn._utils import testing, niimg_conversions from nilearn.image import new_img_like from nilearn.image import threshold_img from nilearn.image import iter_img from nilearn.image import math_img X64 = (platform.architecture()[0] == '64bit') currdir = os.path.dirname(os.path.abspath(__file__)) datadir = os.path.join(currdir, 'data') def test_high_variance_confounds(): # See also test_signals.test_high_variance_confounds() # There is only tests on what is added by image.high_variance_confounds() # compared to signal.high_variance_confounds() shape = (40, 41, 42) length = 17 n_confounds = 10 img, mask_img = testing.generate_fake_fmri(shape=shape, length=length) confounds1 = image.high_variance_confounds(img, mask_img=mask_img, percentile=10., n_confounds=n_confounds) assert_true(confounds1.shape == (length, n_confounds)) # No mask. confounds2 = image.high_variance_confounds(img, percentile=10., n_confounds=n_confounds) assert_true(confounds2.shape == (length, n_confounds)) def test__fast_smooth_array(): N = 4 shape = (N, N, N) # hardcoded in _fast_smooth_array neighbor_weight = 0.2 # 6 neighbors in 3D if you are not on an edge nb_neighbors_max = 6 data = np.ones(shape) smooth_data = image._fast_smooth_array(data) # this contains the number of neighbors for each cell in the array nb_neighbors_arr = np.empty(shape) for (i, j, k), __ in np.ndenumerate(nb_neighbors_arr): nb_neighbors_arr[i, j, k] = (3 + (0 < i < N - 1) + (0 < j < N - 1) + (0 < k < N - 1)) expected = ((1 + neighbor_weight * nb_neighbors_arr) / (1 + neighbor_weight * nb_neighbors_max)) np.testing.assert_allclose(smooth_data, expected) def test__smooth_array(): """Test smoothing of images: _smooth_array()""" # Impulse in 3D data = np.zeros((40, 41, 42)) data[20, 20, 20] = 1 # fwhm divided by any test affine must be odd. Otherwise assertion below # will fail. ( 9 / 0.6 = 15 is fine) fwhm = 9 test_affines = (np.eye(4), np.diag((1, 1, -1, 1)), np.diag((.6, 1, .6, 1))) for affine in test_affines: filtered = image._smooth_array(data, affine, fwhm=fwhm, copy=True) assert_false(np.may_share_memory(filtered, data)) # We are expecting a full-width at half maximum of # fwhm / voxel_size: vmax = filtered.max() above_half_max = filtered > .5 * vmax for axis in (0, 1, 2): proj = np.any(np.any(np.rollaxis(above_half_max, axis=axis), axis=-1), axis=-1) np.testing.assert_equal(proj.sum(), fwhm / np.abs(affine[axis, axis])) # Check that NaNs in the data do not propagate data[10, 10, 10] = np.NaN filtered = image._smooth_array(data, affine, fwhm=fwhm, ensure_finite=True, copy=True) assert_true(np.all(np.isfinite(filtered))) # Check copy=False. for affine in test_affines: data = np.zeros((40, 41, 42)) data[20, 20, 20] = 1 image._smooth_array(data, affine, fwhm=fwhm, copy=False) # We are expecting a full-width at half maximum of # fwhm / voxel_size: vmax = data.max() above_half_max = data > .5 * vmax for axis in (0, 1, 2): proj = np.any(np.any(np.rollaxis(above_half_max, axis=axis), axis=-1), axis=-1) np.testing.assert_equal(proj.sum(), fwhm / np.abs(affine[axis, axis])) # Check fwhm='fast' for affine in test_affines: np.testing.assert_equal(image._smooth_array(data, affine, fwhm='fast'), image._fast_smooth_array(data)) def test_smooth_img(): # This function only checks added functionalities compared # to _smooth_array() shapes = ((10, 11, 12), (13, 14, 15)) lengths = (17, 18) fwhm = (1., 2., 3.) img1, mask1 = testing.generate_fake_fmri(shape=shapes[0], length=lengths[0]) img2, mask2 = testing.generate_fake_fmri(shape=shapes[1], length=lengths[1]) for create_files in (False, True): with testing.write_tmp_imgs(img1, img2, create_files=create_files) as imgs: # List of images as input out = image.smooth_img(imgs, fwhm) assert_true(isinstance(out, list)) assert_true(len(out) == 2) for o, s, l in zip(out, shapes, lengths): assert_true(o.shape == (s + (l,))) # Single image as input out = image.smooth_img(imgs[0], fwhm) assert_true(isinstance(out, nibabel.Nifti1Image)) assert_true(out.shape == (shapes[0] + (lengths[0],))) def test__crop_img_to(): data = np.zeros((5, 6, 7)) data[2:4, 1:5, 3:6] = 1 affine = np.diag((4, 3, 2, 1)) img = nibabel.Nifti1Image(data, affine=affine) slices = [slice(2, 4), slice(1, 5), slice(3, 6)] cropped_img = image._crop_img_to(img, slices, copy=False) new_origin = np.array((4, 3, 2)) * np.array((2, 1, 3)) # check that correct part was extracted: assert_true((cropped_img.get_data() == 1).all()) assert_true(cropped_img.shape == (2, 4, 3)) # check that affine was adjusted correctly assert_true((cropped_img.get_affine()[:3, 3] == new_origin).all()) # check that data was really not copied data[2:4, 1:5, 3:6] = 2 assert_true((cropped_img.get_data() == 2).all()) # check that copying works copied_cropped_img = image._crop_img_to(img, slices) data[2:4, 1:5, 3:6] = 1 assert_true((copied_cropped_img.get_data() == 2).all()) def test_crop_img(): data = np.zeros((5, 6, 7)) data[2:4, 1:5, 3:6] = 1 affine = np.diag((4, 3, 2, 1)) img = nibabel.Nifti1Image(data, affine=affine) cropped_img = image.crop_img(img) # correction for padding with "-1" new_origin = np.array((4, 3, 2)) * np.array((2 - 1, 1 - 1, 3 - 1)) # check that correct part was extracted: # This also corrects for padding assert_true((cropped_img.get_data()[1:-1, 1:-1, 1:-1] == 1).all()) assert_true(cropped_img.shape == (2 + 2, 4 + 2, 3 + 2)) def test_crop_threshold_tolerance(): """Check to see whether crop can skip values that are extremely close to zero in a relative sense and will crop them away""" data = np.zeros([10, 14, 12]) data[3:7, 3:7, 5:9] = 1. active_shape = (4 + 2, 4 + 2, 4 + 2) # add padding # add an infinitesimal outside this block data[3, 3, 3] = 1e-12 affine = np.eye(4) img = nibabel.Nifti1Image(data, affine=affine) cropped_img = image.crop_img(img) assert_true(cropped_img.shape == active_shape) def test_mean_img(): rng = np.random.RandomState(42) data1 = np.zeros((5, 6, 7)) data2 = rng.rand(5, 6, 7) data3 = rng.rand(5, 6, 7, 3) affine = np.diag((4, 3, 2, 1)) img1 = nibabel.Nifti1Image(data1, affine=affine) img2 = nibabel.Nifti1Image(data2, affine=affine) img3 = nibabel.Nifti1Image(data3, affine=affine) for imgs in ([img1, ], [img1, img2], [img2, img1, img2], [img3, img1, img2], # Mixture of 4D and 3D images ): arrays = list() # Ground-truth: for img in imgs: img = img.get_data() if img.ndim == 4: img = np.mean(img, axis=-1) arrays.append(img) truth = np.mean(arrays, axis=0) mean_img = image.mean_img(imgs) assert_array_equal(mean_img.get_affine(), affine) assert_array_equal(mean_img.get_data(), truth) # Test with files with testing.write_tmp_imgs(*imgs) as imgs: mean_img = image.mean_img(imgs) assert_array_equal(mean_img.get_affine(), affine) if X64: assert_array_equal(mean_img.get_data(), truth) else: # We don't really understand but arrays are not # exactly equal on 32bit. Given that you can not do # much real world data analysis with nilearn on a # 32bit machine it is not worth investigating more assert_allclose(mean_img.get_data(), truth, rtol=np.finfo(truth.dtype).resolution, atol=0) def test_mean_img_resample(): # Test resampling in mean_img with a permutation of the axes rng = np.random.RandomState(42) data = rng.rand(5, 6, 7, 40) affine = np.diag((4, 3, 2, 1)) img = nibabel.Nifti1Image(data, affine=affine) mean_img = nibabel.Nifti1Image(data.mean(axis=-1), affine=affine) target_affine = affine[:, [1, 0, 2, 3]] # permutation of axes mean_img_with_resampling = image.mean_img(img, target_affine=target_affine) resampled_mean_image = resampling.resample_img(mean_img, target_affine=target_affine) assert_array_equal(resampled_mean_image.get_data(), mean_img_with_resampling.get_data()) assert_array_equal(resampled_mean_image.get_affine(), mean_img_with_resampling.get_affine()) assert_array_equal(mean_img_with_resampling.get_affine(), target_affine) def test_swap_img_hemispheres(): # make sure input image data is not overwritten inside function data = np.random.randn(4, 5, 7) data_img = nibabel.Nifti1Image(data, np.eye(4)) image.swap_img_hemispheres(data_img) np.testing.assert_array_equal(data_img.get_data(), data) # swapping operations work np.testing.assert_array_equal( # one turn image.swap_img_hemispheres(data_img).get_data(), data[::-1]) np.testing.assert_array_equal( # two turns -> back to original data image.swap_img_hemispheres( image.swap_img_hemispheres(data_img)).get_data(), data) def test_concat_imgs(): assert_true(concat_imgs is niimg_conversions.concat_niimgs) def test_index_img(): img_3d = nibabel.Nifti1Image(np.ones((3, 4, 5)), np.eye(4)) testing.assert_raises_regex(TypeError, "Input data has incompatible dimensionality: " "Expected dimension is 4D and you provided " "a 3D image.", image.index_img, img_3d, 0) affine = np.array([[1., 2., 3., 4.], [5., 6., 7., 8.], [9., 10., 11., 12.], [0., 0., 0., 1.]]) img_4d, _ = testing.generate_fake_fmri(affine=affine) fourth_dim_size = img_4d.shape[3] tested_indices = (list(range(fourth_dim_size)) + [slice(2, 8, 2), [1, 2, 3, 2], [], (np.arange(fourth_dim_size) % 3) == 1]) for i in tested_indices: this_img = image.index_img(img_4d, i) expected_data_3d = img_4d.get_data()[..., i] assert_array_equal(this_img.get_data(), expected_data_3d) assert_array_equal(this_img.get_affine(), img_4d.get_affine()) for i in [fourth_dim_size, - fourth_dim_size - 1, [0, fourth_dim_size], np.repeat(True, fourth_dim_size + 1)]: testing.assert_raises_regex( IndexError, 'out of bounds|invalid index|out of range', image.index_img, img_4d, i) def test_iter_img(): img_3d = nibabel.Nifti1Image(np.ones((3, 4, 5)), np.eye(4)) testing.assert_raises_regex(TypeError, "Input data has incompatible dimensionality: " "Expected dimension is 4D and you provided " "a 3D image.", image.iter_img, img_3d) affine = np.array([[1., 2., 3., 4.], [5., 6., 7., 8.], [9., 10., 11., 12.], [0., 0., 0., 1.]]) img_4d, _ = testing.generate_fake_fmri(affine=affine) for i, img in enumerate(image.iter_img(img_4d)): expected_data_3d = img_4d.get_data()[..., i] assert_array_equal(img.get_data(), expected_data_3d) assert_array_equal(img.get_affine(), img_4d.get_affine()) with testing.write_tmp_imgs(img_4d) as img_4d_filename: for i, img in enumerate(image.iter_img(img_4d_filename)): expected_data_3d = img_4d.get_data()[..., i] assert_array_equal(img.get_data(), expected_data_3d) assert_array_equal(img.get_affine(), img_4d.get_affine()) # enables to delete "img_4d_filename" on windows del img img_3d_list = list(image.iter_img(img_4d)) for i, img in enumerate(image.iter_img(img_3d_list)): expected_data_3d = img_4d.get_data()[..., i] assert_array_equal(img.get_data(), expected_data_3d) assert_array_equal(img.get_affine(), img_4d.get_affine()) with testing.write_tmp_imgs(*img_3d_list) as img_3d_filenames: for i, img in enumerate(image.iter_img(img_3d_filenames)): expected_data_3d = img_4d.get_data()[..., i] assert_array_equal(img.get_data(), expected_data_3d) assert_array_equal(img.get_affine(), img_4d.get_affine()) # enables to delete "img_3d_filename" on windows del img def test_new_img_like_mgz(): """Check that new images can be generated with bool MGZ type This is usually when computing masks using MGZ inputs, e.g. when using plot_stap_map """ if not LooseVersion(nibabel.__version__) >= LooseVersion('1.2.0'): # Old nibabel do not support MGZ files raise SkipTest ref_img = nibabel.load(os.path.join(datadir, 'test.mgz')) data = np.ones(ref_img.get_data().shape, dtype=np.bool) affine = ref_img.get_affine() new_img_like(ref_img, data, affine, copy_header=False) def test_new_img_like(): # Give a list to new_img_like data = np.zeros((5, 6, 7)) data[2:4, 1:5, 3:6] = 1 affine = np.diag((4, 3, 2, 1)) img = nibabel.Nifti1Image(data, affine=affine) img2 = new_img_like([img, ], data) np.testing.assert_array_equal(img.get_data(), img2.get_data()) def test_validity_threshold_value_in_threshold_img(): shape = (6, 8, 10) maps, _ = testing.generate_maps(shape, n_regions=2) # testing to raise same error when threshold=None case testing.assert_raises_regex(ValueError, "The input parameter 'threshold' is empty. ", threshold_img, maps, threshold=None) invalid_threshold_values = ['90t%', 's%', 't', '0.1'] name = 'threshold' for thr in invalid_threshold_values: testing.assert_raises_regex(ValueError, '{0}.+should be a number followed by ' 'the percent sign'.format(name), threshold_img, maps, threshold=thr) def test_threshold_img(): # to check whether passes with valid threshold inputs shape = (10, 20, 30) maps, _ = testing.generate_maps(shape, n_regions=4) affine = np.eye(4) mask_img = nibabel.Nifti1Image(np.ones((shape), dtype=np.int8), affine) for img in iter_img(maps): # when threshold is a float value thr_maps_img = threshold_img(img, threshold=0.8) # when we provide mask image thr_maps_percent = threshold_img(img, threshold=1, mask_img=mask_img) # when threshold is a percentile thr_maps_percent2 = threshold_img(img, threshold='2%') def test_isnan_threshold_img_data(): shape = (10, 10, 10) maps, _ = testing.generate_maps(shape, n_regions=2) data = maps.get_data() data[:, :, 0] = np.nan maps_img = nibabel.Nifti1Image(data, np.eye(4)) # test threshold_img to converge properly when input image has nans. threshold_img(maps_img, threshold=0.8) def test_math_img_exceptions(): img1 = Nifti1Image(np.ones((10, 10, 10, 10)), np.eye(4)) img2 = Nifti1Image(np.zeros((10, 20, 10, 10)), np.eye(4)) img3 = Nifti1Image(np.ones((10, 10, 10, 10)), np.eye(4)) img4 = Nifti1Image(np.ones((10, 10, 10, 10)), np.eye(4) * 2) formula = "np.mean(img1, axis=-1) - np.mean(img2, axis=-1)" # Images with different shapes should raise a ValueError exception. assert_raises_regex(ValueError, "Input images cannot be compared", math_img, formula, img1=img1, img2=img2) # Images with different affines should raise a ValueError exception. assert_raises_regex(ValueError, "Input images cannot be compared", math_img, formula, img1=img1, img2=img4) bad_formula = "np.toto(img1, axis=-1) - np.mean(img3, axis=-1)" assert_raises_regex(AttributeError, "Input formula couldn't be processed", math_img, bad_formula, img1=img1, img3=img3) def test_math_img(): img1 = Nifti1Image(np.ones((10, 10, 10, 10)), np.eye(4)) img2 = Nifti1Image(np.zeros((10, 10, 10, 10)), np.eye(4)) expected_result = Nifti1Image(np.ones((10, 10, 10)), np.eye(4)) formula = "np.mean(img1, axis=-1) - np.mean(img2, axis=-1)" for create_files in (True, False): with testing.write_tmp_imgs(img1, img2, create_files=create_files) as imgs: result = math_img(formula, img1=imgs[0], img2=imgs[1]) assert_array_equal(result.get_data(), expected_result.get_data()) assert_array_equal(result.get_affine(), expected_result.get_affine()) assert_equal(result.shape, expected_result.shape) def test_clean_img(): rng = np.random.RandomState(0) data = rng.randn(10, 10, 10, 100) + .5 data_flat = data.T.reshape(100, -1) data_img = nibabel.Nifti1Image(data, np.eye(4)) data_img_ = image.clean_img( data_img, detrend=True, standardize=False, low_pass=0.1) data_flat_ = signal.clean( data_flat, detrend=True, standardize=False, low_pass=0.1) np.testing.assert_almost_equal(data_img_.get_data().T.reshape(100, -1), data_flat_) PKH$nilearn/image/tests/data/__init__.pyPKHA8!nilearn/image/tests/data/test.mgz{,a(کYe. 1tc-Ӭ,aBsieh9֜`јl(]L3֖Z6g{{W$#I ab͆ش6'I?lz^ UYѦ#飂>v~\huGa9_.Q3>ʁQSjT/,4Tyz^OT 6lIU]g7Ǖ#u,:6\59|۽%JdĘM<9^MFԩmu<&BUeiVTuГϢ-]#4ݴbI[S{MԼVu7m%'~UJ_UNI\A[~%۳SP#\h*Zus™Jdj%ٗT]? o,wf<1Ƒgx1^!½t\+/2O>}?LA&v6Jѹ}ˎxojh|hzYk94oސ]lV8PP}U: 7$ˎ.H5wRQdgRr5Mg>`m&r`Zkz,Tj(]W'QyFⰚYi)IR==V=?=O؀~/ZPKHjԼFFnilearn/datasets/func.py""" Downloading NeuroImaging datasets: functional datasets (task + resting-state) """ import warnings import os import re import json import numpy as np import numbers import nibabel from sklearn.datasets.base import Bunch from .utils import (_get_dataset_dir, _fetch_files, _get_dataset_descr, _read_md5_sum_file, _tree, _filter_columns) from .._utils import check_niimg from .._utils.compat import BytesIO, _basestring, _urllib from .._utils.numpy_conversions import csv_to_array def fetch_haxby_simple(data_dir=None, url=None, resume=True, verbose=1): """Download and load a simple example haxby dataset. Parameters ---------- data_dir: string, optional Path of the data directory. Used to force data storage in a specified location. Default: None Returns ------- data: sklearn.datasets.base.Bunch Dictionary-like object, interest attributes are: 'func': list of string. Path to nifti file with bold data. 'session_target': list of string. Path to text file containing session and target data. 'mask': string. Path to nifti mask file. 'session': list of string. Path to text file containing labels (can be used for LeaveOneLabelOut cross validation for example). References ---------- `Haxby, J., Gobbini, M., Furey, M., Ishai, A., Schouten, J., and Pietrini, P. (2001). Distributed and overlapping representations of faces and objects in ventral temporal cortex. Science 293, 2425-2430.` Notes ----- PyMVPA provides a tutorial using this dataset : http://www.pymvpa.org/tutorial.html More informations about its structure : http://dev.pymvpa.org/datadb/haxby2001.html See `additional information `_ """ # URL of the dataset. It is optional because a test uses it to test dataset # downloading if url is None: url = 'http://www.pymvpa.org/files/pymvpa_exampledata.tar.bz2' opts = {'uncompress': True} files = [ (os.path.join('pymvpa-exampledata', 'attributes.txt'), url, opts), (os.path.join('pymvpa-exampledata', 'bold.nii.gz'), url, opts), (os.path.join('pymvpa-exampledata', 'mask.nii.gz'), url, opts), (os.path.join('pymvpa-exampledata', 'attributes_literal.txt'), url, opts), ] dataset_name = 'haxby2001_simple' data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) files = _fetch_files(data_dir, files, resume=resume, verbose=verbose) # There is a common file for the two versions of Haxby fdescr = _get_dataset_descr('haxby2001') # List of length 1 are used because haxby_simple is single-subject return Bunch(func=[files[1]], session_target=[files[0]], mask=files[2], conditions_target=[files[3]], description=fdescr) def fetch_haxby(data_dir=None, n_subjects=1, fetch_stimuli=False, url=None, resume=True, verbose=1): """Download and loads complete haxby dataset Parameters ---------- data_dir: string, optional Path of the data directory. Used to force data storage in a specified location. Default: None n_subjects: int, optional Number of subjects, from 1 to 6. fetch_stimuli: boolean, optional Indicate if stimuli images must be downloaded. They will be presented as a dictionnary of categories. Returns ------- data: sklearn.datasets.base.Bunch Dictionary-like object, the interest attributes are : 'anat': string list. Paths to anatomic images. 'func': string list. Paths to nifti file with bold data. 'session_target': string list. Paths to text file containing session and target data. 'mask': string. Path to fullbrain mask file. 'mask_vt': string list. Paths to nifti ventral temporal mask file. 'mask_face': string list. Paths to nifti ventral temporal mask file. 'mask_house': string list. Paths to nifti ventral temporal mask file. 'mask_face_little': string list. Paths to nifti ventral temporal mask file. 'mask_house_little': string list. Paths to nifti ventral temporal mask file. References ---------- `Haxby, J., Gobbini, M., Furey, M., Ishai, A., Schouten, J., and Pietrini, P. (2001). Distributed and overlapping representations of faces and objects in ventral temporal cortex. Science 293, 2425-2430.` Notes ----- PyMVPA provides a tutorial making use of this dataset: http://www.pymvpa.org/tutorial.html More information about its structure: http://dev.pymvpa.org/datadb/haxby2001.html See `additional information ` Run 8 in subject 5 does not contain any task labels. The anatomical image for subject 6 is unavailable. """ if n_subjects > 6: warnings.warn('Warning: there are only 6 subjects') n_subjects = 6 dataset_name = 'haxby2001' data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) # Get the mask url_mask = 'https://www.nitrc.org/frs/download.php/7868/mask.nii.gz' mask = _fetch_files(data_dir, [('mask.nii.gz', url_mask, {})], verbose=verbose)[0] # Dataset files if url is None: url = 'http://data.pymvpa.org/datasets/haxby2001/' md5sums = _fetch_files(data_dir, [('MD5SUMS', url + 'MD5SUMS', {})], verbose=verbose)[0] md5sums = _read_md5_sum_file(md5sums) # definition of dataset files sub_files = ['bold.nii.gz', 'labels.txt', 'mask4_vt.nii.gz', 'mask8b_face_vt.nii.gz', 'mask8b_house_vt.nii.gz', 'mask8_face_vt.nii.gz', 'mask8_house_vt.nii.gz', 'anat.nii.gz'] n_files = len(sub_files) files = [ (os.path.join('subj%d' % i, sub_file), url + 'subj%d-2010.01.14.tar.gz' % i, {'uncompress': True, 'md5sum': md5sums.get('subj%d-2010.01.14.tar.gz' % i, None)}) for i in range(1, n_subjects + 1) for sub_file in sub_files if not (sub_file == 'anat.nii.gz' and i == 6) # no anat for sub. 6 ] files = _fetch_files(data_dir, files, resume=resume, verbose=verbose) if n_subjects == 6: files.append(None) # None value because subject 6 has no anat kwargs = {} if fetch_stimuli: stimuli_files = [(os.path.join('stimuli', 'README'), url + 'stimuli-2010.01.14.tar.gz', {'uncompress': True})] readme = _fetch_files(data_dir, stimuli_files, resume=resume, verbose=verbose)[0] kwargs['stimuli'] = _tree(os.path.dirname(readme), pattern='*.jpg', dictionary=True) fdescr = _get_dataset_descr(dataset_name) # return the data return Bunch( anat=files[7::n_files], func=files[0::n_files], session_target=files[1::n_files], mask_vt=files[2::n_files], mask_face=files[3::n_files], mask_house=files[4::n_files], mask_face_little=files[5::n_files], mask_house_little=files[6::n_files], mask=mask, description=fdescr, **kwargs) def fetch_nyu_rest(n_subjects=None, sessions=[1], data_dir=None, resume=True, verbose=1): """Download and loads the NYU resting-state test-retest dataset. Parameters ---------- n_subjects: int, optional The number of subjects to load. If None is given, all the subjects are used. sessions: iterable of int, optional The sessions to load. Load only the first session by default. data_dir: string, optional Path of the data directory. Used to force data storage in a specified location. Default: None Returns ------- data: sklearn.datasets.base.Bunch Dictionary-like object, the interest attributes are : 'func': string list. Paths to functional images. 'anat_anon': string list. Paths to anatomic images. 'anat_skull': string. Paths to skull-stripped images. 'session': numpy array. List of ids corresponding to images sessions. Notes ------ This dataset is composed of 3 sessions of 26 participants (11 males). For each session, three sets of data are available: - anatomical: * anonymized data (defaced thanks to BIRN defacer) * skullstripped data (using 3DSkullStrip from AFNI) - functional For each participant, 3 resting-state scans of 197 continuous EPI functional volumes were collected : - 39 slices - matrix = 64 x 64 - acquisition voxel size = 3 x 3 x 3 mm Sessions 2 and 3 were conducted in a single scan session, 45 min apart, and were 5-16 months after Scan 1. All details about this dataset can be found here : http://cercor.oxfordjournals.org/content/19/10/2209.full References ---------- :Documentation: http://www.nitrc.org/docman/?group_id=274 :Download: http://www.nitrc.org/frs/?group_id=274 :Paper to cite: `The Resting Brain: Unconstrained yet Reliable `_ Z. Shehzad, A.M.C. Kelly, P.T. Reiss, D.G. Gee, K. Gotimer, L.Q. Uddin, S.H. Lee, D.S. Margulies, A.K. Roy, B.B. Biswal, E. Petkova, F.X. Castellanos and M.P. Milham. :Other references: * `The oscillating brain: Complex and Reliable `_ X-N. Zuo, A. Di Martino, C. Kelly, Z. Shehzad, D.G. Gee, D.F. Klein, F.X. Castellanos, B.B. Biswal, M.P. Milham * `Reliable intrinsic connectivity networks: Test-retest evaluation using ICA and dual regression approach `_, X-N. Zuo, C. Kelly, J.S. Adelstein, D.F. Klein, F.X. Castellanos, M.P. Milham """ fa1 = 'http://www.nitrc.org/frs/download.php/1071/NYU_TRT_session1a.tar.gz' fb1 = 'http://www.nitrc.org/frs/download.php/1072/NYU_TRT_session1b.tar.gz' fa2 = 'http://www.nitrc.org/frs/download.php/1073/NYU_TRT_session2a.tar.gz' fb2 = 'http://www.nitrc.org/frs/download.php/1074/NYU_TRT_session2b.tar.gz' fa3 = 'http://www.nitrc.org/frs/download.php/1075/NYU_TRT_session3a.tar.gz' fb3 = 'http://www.nitrc.org/frs/download.php/1076/NYU_TRT_session3b.tar.gz' fa1_opts = {'uncompress': True, 'move': os.path.join('session1', 'NYU_TRT_session1a.tar.gz')} fb1_opts = {'uncompress': True, 'move': os.path.join('session1', 'NYU_TRT_session1b.tar.gz')} fa2_opts = {'uncompress': True, 'move': os.path.join('session2', 'NYU_TRT_session2a.tar.gz')} fb2_opts = {'uncompress': True, 'move': os.path.join('session2', 'NYU_TRT_session2b.tar.gz')} fa3_opts = {'uncompress': True, 'move': os.path.join('session3', 'NYU_TRT_session3a.tar.gz')} fb3_opts = {'uncompress': True, 'move': os.path.join('session3', 'NYU_TRT_session3b.tar.gz')} p_anon = os.path.join('anat', 'mprage_anonymized.nii.gz') p_skull = os.path.join('anat', 'mprage_skullstripped.nii.gz') p_func = os.path.join('func', 'lfo.nii.gz') subs_a = ['sub05676', 'sub08224', 'sub08889', 'sub09607', 'sub14864', 'sub18604', 'sub22894', 'sub27641', 'sub33259', 'sub34482', 'sub36678', 'sub38579', 'sub39529'] subs_b = ['sub45463', 'sub47000', 'sub49401', 'sub52738', 'sub55441', 'sub58949', 'sub60624', 'sub76987', 'sub84403', 'sub86146', 'sub90179', 'sub94293'] # Generate the list of files by session anat_anon_files = [ [(os.path.join('session1', sub, p_anon), fa1, fa1_opts) for sub in subs_a] + [(os.path.join('session1', sub, p_anon), fb1, fb1_opts) for sub in subs_b], [(os.path.join('session2', sub, p_anon), fa2, fa2_opts) for sub in subs_a] + [(os.path.join('session2', sub, p_anon), fb2, fb2_opts) for sub in subs_b], [(os.path.join('session3', sub, p_anon), fa3, fa3_opts) for sub in subs_a] + [(os.path.join('session3', sub, p_anon), fb3, fb3_opts) for sub in subs_b]] anat_skull_files = [ [(os.path.join('session1', sub, p_skull), fa1, fa1_opts) for sub in subs_a] + [(os.path.join('session1', sub, p_skull), fb1, fb1_opts) for sub in subs_b], [(os.path.join('session2', sub, p_skull), fa2, fa2_opts) for sub in subs_a] + [(os.path.join('session2', sub, p_skull), fb2, fb2_opts) for sub in subs_b], [(os.path.join('session3', sub, p_skull), fa3, fa3_opts) for sub in subs_a] + [(os.path.join('session3', sub, p_skull), fb3, fb3_opts) for sub in subs_b]] func_files = [ [(os.path.join('session1', sub, p_func), fa1, fa1_opts) for sub in subs_a] + [(os.path.join('session1', sub, p_func), fb1, fb1_opts) for sub in subs_b], [(os.path.join('session2', sub, p_func), fa2, fa2_opts) for sub in subs_a] + [(os.path.join('session2', sub, p_func), fb2, fb2_opts) for sub in subs_b], [(os.path.join('session3', sub, p_func), fa3, fa3_opts) for sub in subs_a] + [(os.path.join('session3', sub, p_func), fb3, fb3_opts) for sub in subs_b]] max_subjects = len(subs_a) + len(subs_b) # Check arguments if n_subjects is None: n_subjects = len(subs_a) + len(subs_b) if n_subjects > max_subjects: warnings.warn('Warning: there are only %d subjects' % max_subjects) n_subjects = 25 anat_anon = [] anat_skull = [] func = [] session = [] for i in sessions: if not (i in [1, 2, 3]): raise ValueError('NYU dataset session id must be in [1, 2, 3]') anat_anon += anat_anon_files[i - 1][:n_subjects] anat_skull += anat_skull_files[i - 1][:n_subjects] func += func_files[i - 1][:n_subjects] session += [i] * n_subjects dataset_name = 'nyu_rest' data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) anat_anon = _fetch_files(data_dir, anat_anon, resume=resume, verbose=verbose) anat_skull = _fetch_files(data_dir, anat_skull, resume=resume, verbose=verbose) func = _fetch_files(data_dir, func, resume=resume, verbose=verbose) fdescr = _get_dataset_descr(dataset_name) return Bunch(anat_anon=anat_anon, anat_skull=anat_skull, func=func, session=session, description=fdescr) def fetch_adhd(n_subjects=30, data_dir=None, url=None, resume=True, verbose=1): """Download and load the ADHD resting-state dataset. Parameters ---------- n_subjects: int, optional The number of subjects to load from maximum of 40 subjects. By default, 30 subjects will be loaded. If None is given, all 40 subjects will be loaded. data_dir: string, optional Path of the data directory. Used to force data storage in a specified location. Default: None url: string, optional Override download URL. Used for test only (or if you setup a mirror of the data). Default: None Returns ------- data: sklearn.datasets.base.Bunch Dictionary-like object, the interest attributes are : - 'func': Paths to functional resting-state images - 'phenotypic': Explanations of preprocessing steps - 'confounds': CSV files containing the nuisance variables References ---------- :Download: ftp://www.nitrc.org/fcon_1000/htdocs/indi/adhd200/sites/ADHD200_40sub_preprocessed.tgz """ if url is None: url = 'https://www.nitrc.org/frs/download.php/' # Preliminary checks and declarations dataset_name = 'adhd' data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) ids = ['0010042', '0010064', '0010128', '0021019', '0023008', '0023012', '0027011', '0027018', '0027034', '0027037', '1019436', '1206380', '1418396', '1517058', '1552181', '1562298', '1679142', '2014113', '2497695', '2950754', '3007585', '3154996', '3205761', '3520880', '3624598', '3699991', '3884955', '3902469', '3994098', '4016887', '4046678', '4134561', '4164316', '4275075', '6115230', '7774305', '8409791', '8697774', '9744150', '9750701'] nitrc_ids = range(7782, 7822) max_subjects = len(ids) if n_subjects is None: n_subjects = max_subjects if n_subjects > max_subjects: warnings.warn('Warning: there are only %d subjects' % max_subjects) n_subjects = max_subjects ids = ids[:n_subjects] nitrc_ids = nitrc_ids[:n_subjects] opts = dict(uncompress=True) # Dataset description fdescr = _get_dataset_descr(dataset_name) # First, get the metadata phenotypic = ('ADHD200_40subs_motion_parameters_and_phenotypics.csv', url + '7781/adhd40_metadata.tgz', opts) phenotypic = _fetch_files(data_dir, [phenotypic], resume=resume, verbose=verbose)[0] # Load the csv file phenotypic = np.genfromtxt(phenotypic, names=True, delimiter=',', dtype=None) # Keep phenotypic information for selected subjects int_ids = np.asarray(ids, dtype=int) phenotypic = phenotypic[[np.where(phenotypic['Subject'] == i)[0][0] for i in int_ids]] # Download dataset files archives = [url + '%i/adhd40_%s.tgz' % (ni, ii) for ni, ii in zip(nitrc_ids, ids)] functionals = ['data/%s/%s_rest_tshift_RPI_voreg_mni.nii.gz' % (i, i) for i in ids] confounds = ['data/%s/%s_regressors.csv' % (i, i) for i in ids] functionals = _fetch_files( data_dir, zip(functionals, archives, (opts,) * n_subjects), resume=resume, verbose=verbose) confounds = _fetch_files( data_dir, zip(confounds, archives, (opts,) * n_subjects), resume=resume, verbose=verbose) return Bunch(func=functionals, confounds=confounds, phenotypic=phenotypic, description=fdescr) def fetch_miyawaki2008(data_dir=None, url=None, resume=True, verbose=1): """Download and loads Miyawaki et al. 2008 dataset (153MB) Returns ------- data: Bunch Dictionary-like object, the interest attributes are : - 'func': string list Paths to nifti file with bold data - 'label': string list Paths to text file containing session and target data - 'mask': string Path to nifti mask file to define target volume in visual cortex - 'background': string Path to nifti file containing a background image usable as a background image for miyawaki images. References ---------- `Visual image reconstruction from human brain activity using a combination of multiscale local image decoders `_, Miyawaki, Y., Uchida, H., Yamashita, O., Sato, M. A., Morito, Y., Tanabe, H. C., ... & Kamitani, Y. (2008). Neuron, 60(5), 915-929. Notes ----- This dataset is available on the `brainliner website `_ See `additional information `_ """ url = 'https://www.nitrc.org/frs/download.php' \ '/8486/miyawaki2008.tgz?i_agree=1&download_now=1' opts = {'uncompress': True} # Dataset files # Functional MRI: # * 20 random scans (usually used for training) # * 12 figure scans (usually used for testing) func_figure = [(os.path.join('func', 'data_figure_run%02d.nii.gz' % i), url, opts) for i in range(1, 13)] func_random = [(os.path.join('func', 'data_random_run%02d.nii.gz' % i), url, opts) for i in range(1, 21)] # Labels, 10x10 patches, stimuli shown to the subject: # * 20 random labels # * 12 figure labels (letters and shapes) label_filename = 'data_%s_run%02d_label.csv' label_figure = [(os.path.join('label', label_filename % ('figure', i)), url, opts) for i in range(1, 13)] label_random = [(os.path.join('label', label_filename % ('random', i)), url, opts) for i in range(1, 21)] # Masks file_mask = [ 'mask.nii.gz', 'LHlag0to1.nii.gz', 'LHlag10to11.nii.gz', 'LHlag1to2.nii.gz', 'LHlag2to3.nii.gz', 'LHlag3to4.nii.gz', 'LHlag4to5.nii.gz', 'LHlag5to6.nii.gz', 'LHlag6to7.nii.gz', 'LHlag7to8.nii.gz', 'LHlag8to9.nii.gz', 'LHlag9to10.nii.gz', 'LHV1d.nii.gz', 'LHV1v.nii.gz', 'LHV2d.nii.gz', 'LHV2v.nii.gz', 'LHV3A.nii.gz', 'LHV3.nii.gz', 'LHV4v.nii.gz', 'LHVP.nii.gz', 'RHlag0to1.nii.gz', 'RHlag10to11.nii.gz', 'RHlag1to2.nii.gz', 'RHlag2to3.nii.gz', 'RHlag3to4.nii.gz', 'RHlag4to5.nii.gz', 'RHlag5to6.nii.gz', 'RHlag6to7.nii.gz', 'RHlag7to8.nii.gz', 'RHlag8to9.nii.gz', 'RHlag9to10.nii.gz', 'RHV1d.nii.gz', 'RHV1v.nii.gz', 'RHV2d.nii.gz', 'RHV2v.nii.gz', 'RHV3A.nii.gz', 'RHV3.nii.gz', 'RHV4v.nii.gz', 'RHVP.nii.gz' ] file_mask = [(os.path.join('mask', m), url, opts) for m in file_mask] file_names = func_figure + func_random + \ label_figure + label_random + \ file_mask dataset_name = 'miyawaki2008' data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) files = _fetch_files(data_dir, file_names, resume=resume, verbose=verbose) # Fetch the background image bg_img = _fetch_files(data_dir, [('bg.nii.gz', url, opts)], resume=resume, verbose=verbose)[0] fdescr = _get_dataset_descr(dataset_name) # Return the data return Bunch( func=files[:32], label=files[32:64], mask=files[64], mask_roi=files[65:], background=bg_img, description=fdescr) def fetch_localizer_contrasts(contrasts, n_subjects=None, get_tmaps=False, get_masks=False, get_anats=False, data_dir=None, url=None, resume=True, verbose=1): """Download and load Brainomics Localizer dataset (94 subjects). "The Functional Localizer is a simple and fast acquisition procedure based on a 5-minute functional magnetic resonance imaging (fMRI) sequence that can be run as easily and as systematically as an anatomical scan. This protocol captures the cerebral bases of auditory and visual perception, motor actions, reading, language comprehension and mental calculation at an individual level. Individual functional maps are reliable and quite precise. The procedure is decribed in more detail on the Functional Localizer page." (see http://brainomics.cea.fr/localizer/) "Scientific results obtained using this dataset are described in Pinel et al., 2007" [1] Parameters ---------- contrasts: list of str The contrasts to be fetched (for all 94 subjects available). Allowed values are:: {"checkerboard", "horizontal checkerboard", "vertical checkerboard", "horizontal vs vertical checkerboard", "vertical vs horizontal checkerboard", "sentence listening", "sentence reading", "sentence listening and reading", "sentence reading vs checkerboard", "calculation (auditory cue)", "calculation (visual cue)", "calculation (auditory and visual cue)", "calculation (auditory cue) vs sentence listening", "calculation (visual cue) vs sentence reading", "calculation vs sentences", "calculation (auditory cue) and sentence listening", "calculation (visual cue) and sentence reading", "calculation and sentence listening/reading", "calculation (auditory cue) and sentence listening vs " "calculation (visual cue) and sentence reading", "calculation (visual cue) and sentence reading vs checkerboard", "calculation and sentence listening/reading vs button press", "left button press (auditory cue)", "left button press (visual cue)", "left button press", "left vs right button press", "right button press (auditory cue)", "right button press (visual cue)", "right button press", "right vs left button press", "button press (auditory cue) vs sentence listening", "button press (visual cue) vs sentence reading", "button press vs calculation and sentence listening/reading"} or equivalently on can use the original names:: {"checkerboard", "horizontal checkerboard", "vertical checkerboard", "horizontal vs vertical checkerboard", "vertical vs horizontal checkerboard", "auditory sentences", "visual sentences", "auditory&visual sentences", "visual sentences vs checkerboard", "auditory calculation", "visual calculation", "auditory&visual calculation", "auditory calculation vs auditory sentences", "visual calculation vs sentences", "auditory&visual calculation vs sentences", "auditory processing", "visual processing", "visual processing vs auditory processing", "auditory processing vs visual processing", "visual processing vs checkerboard", "cognitive processing vs motor", "left auditory click", "left visual click", "left auditory&visual click", "left auditory & visual click vs right auditory&visual click", "right auditory click", "right visual click", "right auditory&visual click", "right auditory & visual click vs left auditory&visual click", "auditory click vs auditory sentences", "visual click vs visual sentences", "auditory&visual motor vs cognitive processing"} n_subjects: int or list, optional The number or list of subjects to load. If None is given, all 94 subjects are used. get_tmaps: boolean Whether t maps should be fetched or not. get_masks: boolean Whether individual masks should be fetched or not. get_anats: boolean Whether individual structural images should be fetched or not. data_dir: string, optional Path of the data directory. Used to force data storage in a specified location. url: string, optional Override download URL. Used for test only (or if you setup a mirror of the data). resume: bool Whether to resume download of a partly-downloaded file. verbose: int Verbosity level (0 means no message). Returns ------- data: Bunch Dictionary-like object, the interest attributes are : - 'cmaps': string list Paths to nifti contrast maps - 'tmaps' string list (if 'get_tmaps' set to True) Paths to nifti t maps - 'masks': string list Paths to nifti files corresponding to the subjects individual masks - 'anats': string Path to nifti files corresponding to the subjects structural images References ---------- Pinel, Philippe, et al. "Fast reproducible identification and large-scale databasing of individual functional cognitive networks." BMC neuroscience 8.1 (2007): 91. See Also --------- nilearn.datasets.fetch_localizer_calculation_task nilearn.datasets.fetch_localizer_button_task """ if isinstance(contrasts, _basestring): raise ValueError('Contrasts should be a list of strings, but ' 'a single string was given: "%s"' % contrasts) if n_subjects is None: n_subjects = 94 # 94 subjects available if (isinstance(n_subjects, numbers.Number) and ((n_subjects > 94) or (n_subjects < 1))): warnings.warn("Wrong value for \'n_subjects\' (%d). The maximum " "value will be used instead (\'n_subjects=94\')") n_subjects = 94 # 94 subjects available # we allow the user to use alternatives to Brainomics contrast names contrast_name_wrapper = { # Checkerboard "checkerboard": "checkerboard", "horizontal checkerboard": "horizontal checkerboard", "vertical checkerboard": "vertical checkerboard", "horizontal vs vertical checkerboard": "horizontal vs vertical checkerboard", "vertical vs horizontal checkerboard": "vertical vs horizontal checkerboard", # Sentences "sentence listening": "auditory sentences", "sentence reading": "visual sentences", "sentence listening and reading": "auditory&visual sentences", "sentence reading vs checkerboard": "visual sentences vs checkerboard", # Calculation "calculation (auditory cue)": "auditory calculation", "calculation (visual cue)": "visual calculation", "calculation (auditory and visual cue)": "auditory&visual calculation", "calculation (auditory cue) vs sentence listening": "auditory calculation vs auditory sentences", "calculation (visual cue) vs sentence reading": "visual calculation vs sentences", "calculation vs sentences": "auditory&visual calculation vs sentences", # Calculation + Sentences "calculation (auditory cue) and sentence listening": "auditory processing", "calculation (visual cue) and sentence reading": "visual processing", "calculation (visual cue) and sentence reading vs " "calculation (auditory cue) and sentence listening": "visual processing vs auditory processing", "calculation (auditory cue) and sentence listening vs " "calculation (visual cue) and sentence reading": "auditory processing vs visual processing", "calculation (visual cue) and sentence reading vs checkerboard": "visual processing vs checkerboard", "calculation and sentence listening/reading vs button press": "cognitive processing vs motor", # Button press "left button press (auditory cue)": "left auditory click", "left button press (visual cue)": "left visual click", "left button press": "left auditory&visual click", "left vs right button press": "left auditory & visual click vs " + "right auditory&visual click", "right button press (auditory cue)": "right auditory click", "right button press (visual cue)": "right visual click", "right button press": "right auditory & visual click", "right vs left button press": "right auditory & visual click " + "vs left auditory&visual click", "button press (auditory cue) vs sentence listening": "auditory click vs auditory sentences", "button press (visual cue) vs sentence reading": "visual click vs visual sentences", "button press vs calculation and sentence listening/reading": "auditory&visual motor vs cognitive processing"} allowed_contrasts = list(contrast_name_wrapper.values()) # convert contrast names contrasts_wrapped = [] # get a unique ID for each contrast. It is used to give a unique name to # each download file and avoid name collisions. contrasts_indices = [] for contrast in contrasts: if contrast in allowed_contrasts: contrasts_wrapped.append(contrast) contrasts_indices.append(allowed_contrasts.index(contrast)) elif contrast in contrast_name_wrapper: name = contrast_name_wrapper[contrast] contrasts_wrapped.append(name) contrasts_indices.append(allowed_contrasts.index(name)) else: raise ValueError("Contrast \'%s\' is not available" % contrast) # It is better to perform several small requests than a big one because: # - Brainomics server has no cache (can lead to timeout while the archive # is generated on the remote server) # - Local (cached) version of the files can be checked for each contrast opts = {'uncompress': True} if isinstance(n_subjects, numbers.Number): subject_mask = np.arange(1, n_subjects + 1) subject_id_max = "S%02d" % n_subjects else: subject_mask = np.array(n_subjects) subject_id_max = "S%02d" % np.max(n_subjects) n_subjects = len(n_subjects) subject_ids = ["S%02d" % s for s in subject_mask] data_types = ["c map"] if get_tmaps: data_types.append("t map") rql_types = str.join(", ", ["\"%s\"" % x for x in data_types]) root_url = "http://brainomics.cea.fr/localizer/" base_query = ("Any X,XT,XL,XI,XF,XD WHERE X is Scan, X type XT, " "X concerns S, " "X label XL, X identifier XI, " "X format XF, X description XD, " 'S identifier <= "%s", ' % (subject_id_max, ) + 'X type IN(%(types)s), X label "%(label)s"') urls = ["%sbrainomics_data_%d.zip?rql=%s&vid=data-zip" % (root_url, i, _urllib.parse.quote(base_query % {"types": rql_types, "label": c}, safe=',()')) for c, i in zip(contrasts_wrapped, contrasts_indices)] filenames = [] for subject_id in subject_ids: for data_type in data_types: for contrast_id, contrast in enumerate(contrasts_wrapped): name_aux = str.replace( str.join('_', [data_type, contrast]), ' ', '_') file_path = os.path.join( "brainomics_data", subject_id, "%s.nii.gz" % name_aux) file_tarball_url = urls[contrast_id] filenames.append((file_path, file_tarball_url, opts)) # Fetch masks if asked by user if get_masks: urls.append("%sbrainomics_data_masks.zip?rql=%s&vid=data-zip" % (root_url, _urllib.parse.quote(base_query % {"types": '"boolean mask"', "label": "mask"}, safe=',()'))) for subject_id in subject_ids: file_path = os.path.join( "brainomics_data", subject_id, "boolean_mask_mask.nii.gz") file_tarball_url = urls[-1] filenames.append((file_path, file_tarball_url, opts)) # Fetch anats if asked by user if get_anats: urls.append("%sbrainomics_data_anats.zip?rql=%s&vid=data-zip" % (root_url, _urllib.parse.quote(base_query % {"types": '"normalized T1"', "label": "anatomy"}, safe=',()'))) for subject_id in subject_ids: file_path = os.path.join( "brainomics_data", subject_id, "normalized_T1_anat_defaced.nii.gz") file_tarball_url = urls[-1] filenames.append((file_path, file_tarball_url, opts)) # Fetch subject characteristics (separated in two files) if url is None: url_csv = ("%sdataset/cubicwebexport.csv?rql=%s&vid=csvexport" % (root_url, _urllib.parse.quote("Any X WHERE X is Subject"))) url_csv2 = ("%sdataset/cubicwebexport2.csv?rql=%s&vid=csvexport" % (root_url, _urllib.parse.quote("Any X,XI,XD WHERE X is QuestionnaireRun, " "X identifier XI, X datetime " "XD", safe=',') )) else: url_csv = "%s/cubicwebexport.csv" % url url_csv2 = "%s/cubicwebexport2.csv" % url filenames += [("cubicwebexport.csv", url_csv, {}), ("cubicwebexport2.csv", url_csv2, {})] # Actual data fetching dataset_name = 'brainomics_localizer' data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) fdescr = _get_dataset_descr(dataset_name) files = _fetch_files(data_dir, filenames, verbose=verbose) anats = None masks = None tmaps = None # combine data from both covariates files into one single recarray from numpy.lib.recfunctions import join_by ext_vars_file2 = files[-1] csv_data2 = np.recfromcsv(ext_vars_file2, delimiter=';') files = files[:-1] ext_vars_file = files[-1] csv_data = np.recfromcsv(ext_vars_file, delimiter=';') files = files[:-1] # join_by sorts the output along the key csv_data = join_by('subject_id', csv_data, csv_data2, usemask=False, asrecarray=True)[subject_mask - 1] if get_anats: anats = files[-n_subjects:] files = files[:-n_subjects] if get_masks: masks = files[-n_subjects:] files = files[:-n_subjects] if get_tmaps: tmaps = files[1::2] files = files[::2] return Bunch(cmaps=files, tmaps=tmaps, masks=masks, anats=anats, ext_vars=csv_data, description=fdescr) def fetch_localizer_calculation_task(n_subjects=1, data_dir=None, url=None, verbose=1): """Fetch calculation task contrast maps from the localizer. Parameters ---------- n_subjects: int, optional The number of subjects to load. If None is given, all 94 subjects are used. data_dir: string, optional Path of the data directory. Used to force data storage in a specified location. url: string, optional Override download URL. Used for test only (or if you setup a mirror of the data). verbose: int, optional verbosity level (0 means no message). Returns ------- data: Bunch Dictionary-like object, the interest attributes are : 'cmaps': string list, giving paths to nifti contrast maps Notes ------ This function is only a caller for the fetch_localizer_contrasts in order to simplify examples reading and understanding. The 'calculation (auditory and visual cue)' contrast is used. See Also --------- nilearn.datasets.fetch_localizer_button_task nilearn.datasets.fetch_localizer_contrasts """ data = fetch_localizer_contrasts(["calculation (auditory and visual cue)"], n_subjects=n_subjects, get_tmaps=False, get_masks=False, get_anats=False, data_dir=data_dir, url=url, resume=True, verbose=verbose) data.pop('tmaps') data.pop('masks') data.pop('anats') return data def fetch_localizer_button_task(n_subjects=[2, ], data_dir=None, url=None, get_anats=False, verbose=1): """Fetch left vs right button press contrast maps from the localizer. Parameters ---------- n_subjects: int or list, optional The number or list of subjects to load. If None is given, all 94 subjects are used. data_dir: string, optional Path of the data directory. Used to force data storage in a specified location. url: string, optional Override download URL. Used for test only (or if you setup a mirror of the data). get_anats: boolean Whether individual structural images should be fetched or not. verbose: int, optional verbosity level (0 means no message). Returns ------- data: Bunch Dictionary-like object, the interest attributes are : 'cmaps': string list, giving paths to nifti contrast maps Notes ------ This function is only a caller for the fetch_localizer_contrasts in order to simplify examples reading and understanding. The 'left vs right button press' contrast is used. See Also --------- nilearn.datasets.fetch_localizer_calculation_task nilearn.datasets.fetch_localizer_contrasts """ data = fetch_localizer_contrasts(["left vs right button press"], n_subjects=n_subjects, get_tmaps=True, get_masks=False, get_anats=get_anats, data_dir=data_dir, url=url, resume=True, verbose=verbose) return data def fetch_abide_pcp(data_dir=None, n_subjects=None, pipeline='cpac', band_pass_filtering=False, global_signal_regression=False, derivatives=['func_preproc'], quality_checked=True, url=None, verbose=1, **kwargs): """ Fetch ABIDE dataset Fetch the Autism Brain Imaging Data Exchange (ABIDE) dataset wrt criteria that can be passed as parameter. Note that this is the preprocessed version of ABIDE provided by the preprocess connectome projects (PCP). Parameters ---------- data_dir: string, optional Path of the data directory. Used to force data storage in a specified location. Default: None n_subjects: int, optional The number of subjects to load. If None is given, all 94 subjects are used. pipeline: string, optional Possible pipelines are "ccs", "cpac", "dparsf" and "niak" band_pass_filtering: boolean, optional Due to controversies in the literature, band pass filtering is optional. If true, signal is band filtered between 0.01Hz and 0.1Hz. global_signal_regression: boolean optional Indicates if global signal regression should be applied on the signals. derivatives: string list, optional Types of downloaded files. Possible values are: alff, degree_binarize, degree_weighted, dual_regression, eigenvector_binarize, eigenvector_weighted, falff, func_mask, func_mean, func_preproc, lfcd, reho, rois_aal, rois_cc200, rois_cc400, rois_dosenbach160, rois_ez, rois_ho, rois_tt, and vmhc. Please refer to the PCP site for more details. quality_checked: boolean, optional if true (default), restrict the list of the subjects to the one that passed quality assessment for all raters. kwargs: parameter list, optional Any extra keyword argument will be used to filter downloaded subjects according to the CSV phenotypic file. Some examples of filters are indicated below. SUB_ID: list of integers in [50001, 50607], optional Ids of the subjects to be loaded. DX_GROUP: integer in {1, 2}, optional 1 is autism, 2 is control DSM_IV_TR: integer in [0, 4], optional O is control, 1 is autism, 2 is Asperger, 3 is PPD-NOS, 4 is Asperger or PPD-NOS AGE_AT_SCAN: float in [6.47, 64], optional Age of the subject SEX: integer in {1, 2}, optional 1 is male, 2 is female HANDEDNESS_CATEGORY: string in {'R', 'L', 'Mixed', 'Ambi'}, optional R = Right, L = Left, Ambi = Ambidextrous HANDEDNESS_SCORE: integer in [-100, 100], optional Positive = Right, Negative = Left, 0 = Ambidextrous Notes ----- Code and description of preprocessing pipelines are provided on the `PCP website `. References ---------- Nielsen, Jared A., et al. "Multisite functional connectivity MRI classification of autism: ABIDE results." Frontiers in human neuroscience 7 (2013). """ # People keep getting it wrong and submiting a string instead of a # list of strings. We'll make their life easy if isinstance(derivatives, _basestring): derivatives = [derivatives, ] # Parameter check for derivative in derivatives: if derivative not in [ 'alff', 'degree_binarize', 'degree_weighted', 'dual_regression', 'eigenvector_binarize', 'eigenvector_weighted', 'falff', 'func_mask', 'func_mean', 'func_preproc', 'lfcd', 'reho', 'rois_aal', 'rois_cc200', 'rois_cc400', 'rois_dosenbach160', 'rois_ez', 'rois_ho', 'rois_tt', 'vmhc']: raise KeyError('%s is not a valid derivative' % derivative) strategy = '' if not band_pass_filtering: strategy += 'no' strategy += 'filt_' if not global_signal_regression: strategy += 'no' strategy += 'global' # General file: phenotypic information dataset_name = 'ABIDE_pcp' data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) if url is None: url = ('https://s3.amazonaws.com/fcp-indi/data/Projects/' 'ABIDE_Initiative') if quality_checked: kwargs['qc_rater_1'] = b'OK' kwargs['qc_anat_rater_2'] = [b'OK', b'maybe'] kwargs['qc_func_rater_2'] = [b'OK', b'maybe'] kwargs['qc_anat_rater_3'] = b'OK' kwargs['qc_func_rater_3'] = b'OK' # Fetch the phenotypic file and load it csv = 'Phenotypic_V1_0b_preprocessed1.csv' path_csv = _fetch_files(data_dir, [(csv, url + '/' + csv, {})], verbose=verbose)[0] # Note: the phenotypic file contains string that contains comma which mess # up numpy array csv loading. This is why I do a pass to remove the last # field. This can be # done simply with pandas but we don't want such dependency ATM # pheno = pandas.read_csv(path_csv).to_records() with open(path_csv, 'r') as pheno_f: pheno = ['i' + pheno_f.readline()] # This regexp replaces commas between double quotes for line in pheno_f: pheno.append(re.sub(r',(?=[^"]*"(?:[^"]*"[^"]*")*[^"]*$)', ";", line)) # bytes (encode()) needed for python 2/3 compat with numpy pheno = '\n'.join(pheno).encode() pheno = BytesIO(pheno) pheno = np.recfromcsv(pheno, comments='$', case_sensitive=True) # First, filter subjects with no filename pheno = pheno[pheno['FILE_ID'] != b'no_filename'] # Apply user defined filters user_filter = _filter_columns(pheno, kwargs) pheno = pheno[user_filter] # Go into specific data folder and url data_dir = os.path.join(data_dir, pipeline, strategy) url = '/'.join([url, 'Outputs', pipeline, strategy]) # Get the files results = {} file_ids = [file_id.decode() for file_id in pheno['FILE_ID']] if n_subjects is not None: file_ids = file_ids[:n_subjects] pheno = pheno[:n_subjects] results['description'] = _get_dataset_descr(dataset_name) results['phenotypic'] = pheno for derivative in derivatives: ext = '.1D' if derivative.startswith('rois') else '.nii.gz' files = [(file_id + '_' + derivative + ext, '/'.join([url, derivative, file_id + '_' + derivative + ext]), {}) for file_id in file_ids] files = _fetch_files(data_dir, files, verbose=verbose) # Load derivatives if needed if ext == '.1D': files = [np.loadtxt(f) for f in files] results[derivative] = files return Bunch(**results) def _load_mixed_gambles(zmap_imgs): """Ravel zmaps (one per subject) along time axis, resulting, in a n_subjects * n_trials 3D niimgs and, and then make gain vector y of same length. """ X = [] y = [] mask = [] for zmap_img in zmap_imgs: # load subject data this_X = zmap_img.get_data() affine = zmap_img.get_affine() finite_mask = np.all(np.isfinite(this_X), axis=-1) this_mask = np.logical_and(np.all(this_X != 0, axis=-1), finite_mask) this_y = np.array([np.arange(1, 9)] * 6).ravel() # gain levels if len(this_y) != this_X.shape[-1]: raise RuntimeError("%s: Expecting %i volumes, got %i!" % ( zmap_img, len(this_y), this_X.shape[-1])) # standardize subject data this_X -= this_X.mean(axis=-1)[..., np.newaxis] std = this_X.std(axis=-1) std[std == 0] = 1 this_X /= std[..., np.newaxis] # commit subject data X.append(this_X) y.extend(this_y) mask.append(this_mask) y = np.array(y) X = np.concatenate(X, axis=-1) mask = np.sum(mask, axis=0) > .5 * len(mask) mask = np.logical_and(mask, np.all(np.isfinite(X), axis=-1)) X = X[mask, :].T tmp = np.zeros(list(mask.shape) + [len(X)]) tmp[mask, :] = X.T mask_img = nibabel.Nifti1Image(mask.astype(np.int), affine) X = nibabel.four_to_three(nibabel.Nifti1Image(tmp, affine)) return X, y, mask_img def fetch_mixed_gambles(n_subjects=1, data_dir=None, url=None, resume=True, return_raw_data=False, verbose=0): """Fetch Jimura "mixed gambles" dataset. Parameters ---------- n_subjects: int, optional (default 1) The number of subjects to load. If None is given, all the subjects are used. data_dir: string, optional (default None) Path of the data directory. Used to force data storage in a specified location. Default: None. url: string, optional (default None) Override download URL. Used for test only (or if you setup a mirror of the data). resume: bool, optional (default True) If true, try resuming download if possible. verbose: int, optional (default 0) Defines the level of verbosity of the output. return_raw_data: bool, optional (default True) If false, then the data will transformed into and (X, y) pair, suitable for machine learning routines. X is a list of n_subjects * 48 Nifti1Image objects (where 48 is the number of trials), and y is an array of shape (n_subjects * 48,). smooth: float, or list of 3 floats, optional (default 0.) Size of smoothing kernel to apply to the loaded zmaps. Returns ------- data: Bunch Dictionary-like object, the interest attributes are : 'zmaps': string list Paths to realigned gain betamaps (one nifti per subject). 'gain': .. If make_Xy is true, this is a list of n_subjects * 48 Nifti1Image objects, else it is None. 'y': array of shape (n_subjects * 48,) or None If make_Xy is true, then this is an array of shape (n_subjects * 48,), else it is None. References ---------- [1] K. Jimura and R. Poldrack, "Analyses of regional-average activation and multivoxel pattern information tell complementary stories", Neuropsychologia, vol. 50, page 544, 2012 """ if n_subjects > 16: warnings.warn('Warning: there are only 16 subjects!') n_subjects = 16 if url is None: url = ("https://www.nitrc.org/frs/download.php/7229/" "jimura_poldrack_2012_zmaps.zip") opts = dict(uncompress=True) files = [("zmaps%ssub%03i_zmaps.nii.gz" % (os.sep, (j + 1)), url, opts) for j in range(n_subjects)] data_dir = _get_dataset_dir('jimura_poldrack_2012_zmaps', data_dir=data_dir) zmap_fnames = _fetch_files(data_dir, files, resume=resume, verbose=verbose) subject_id = np.repeat(np.arange(n_subjects), 6 * 8) data = Bunch(zmaps=zmap_fnames, subject_id=subject_id) if not return_raw_data: X, y, mask_img = _load_mixed_gambles(check_niimg(data.zmaps, return_iterator=True)) data.zmaps, data.gain, data.mask_img = X, y, mask_img return data def fetch_megatrawls_netmats(dimensionality=100, timeseries='eigen_regression', matrices='partial_correlation', data_dir=None, resume=True, verbose=1): """Downloads and returns Network Matrices data from MegaTrawls release in HCP. This data can be used to predict relationships between imaging data and non-imaging behavioural measures such as age, sex, education, etc. The network matrices are estimated from functional connectivity datasets of 461 subjects. Full technical details in [1] [2]. .. versionadded:: 0.2.2 Parameters ---------- dimensionality: int, optional Valid inputs are 25, 50, 100, 200, 300. By default, network matrices estimated using Group ICA brain parcellations of 100 components/dimensions will be returned. timeseries: str, optional Valid inputs are 'multiple_spatial_regression' or 'eigen_regression'. By default 'eigen_regression', matrices estimated using first principal eigen component timeseries signals extracted from each subject data parcellations will be returned. Otherwise, 'multiple_spatial_regression' matrices estimated using spatial regressor based timeseries signals extracted from each subject data parcellations will be returned. matrices: str, optional Valid inputs are 'full_correlation' or 'partial_correlation'. By default, partial correlation matrices will be returned otherwise if selected full correlation matrices will be returned. data_dir: str, default is None, optional Path of the data directory. Used to force data storage in a specified location. resume: bool, default is True This parameter is required if a partially downloaded file is needed to be resumed to download again. verbose: int, default is 1 This parameter is used to set the verbosity level to print the message to give information about the processing. 0 indicates no information will be given. Returns ------- data: Bunch dictionary-like object, the attributes are : - 'dimensions': int, consists of given input in dimensions. - 'timeseries': str, consists of given input in timeseries method. - 'matrices': str, consists of given type of specific matrices. - 'correlation_matrices': ndarray, consists of correlation matrices based on given type of matrices. Array size will depend on given dimensions (n, n). - 'description': data description References ---------- [1] Stephen Smith et al, HCP beta-release of the Functional Connectivity MegaTrawl. April 2015 "HCP500-MegaTrawl" release. https://db.humanconnectome.org/megatrawl/ [2] Smith, S.M. et al. Nat. Neurosci. 18, 1565-1567 (2015). [3] N.Filippini, et al. Distinct patterns of brain activity in young carriers of the APOE-e4 allele. Proc Natl Acad Sci USA (PNAS), 106::7209-7214, 2009. [4] S.Smith, et al. Methods for network modelling from high quality rfMRI data. Meeting of the Organization for Human Brain Mapping. 2014 [5] J.X. O'Reilly et al. Distinct and overlapping functional zones in the cerebellum defined by resting state functional connectivity. Cerebral Cortex, 2009. Note: See description for terms & conditions on data usage. """ url = "http://www.nitrc.org/frs/download.php/8037/Megatrawls.tgz" opts = {'uncompress': True} error_message = "Invalid {0} input is provided: {1}, choose one of them {2}" # standard dataset terms dimensionalities = [25, 50, 100, 200, 300] if dimensionality not in dimensionalities: raise ValueError(error_message.format('dimensionality', dimensionality, dimensionalities)) timeseries_methods = ['multiple_spatial_regression', 'eigen_regression'] if timeseries not in timeseries_methods: raise ValueError(error_message.format('timeseries', timeseries, timeseries_methods)) output_matrices_names = ['full_correlation', 'partial_correlation'] if matrices not in output_matrices_names: raise ValueError(error_message.format('matrices', matrices, output_matrices_names)) dataset_name = 'Megatrawls' data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) description = _get_dataset_descr(dataset_name) timeseries_map = dict(multiple_spatial_regression='ts2', eigen_regression='ts3') matrices_map = dict(full_correlation='Znet1.txt', partial_correlation='Znet2.txt') filepath = [(os.path.join( '3T_Q1-Q6related468_MSMsulc_d%d_%s' % (dimensionality, timeseries_map[timeseries]), matrices_map[matrices]), url, opts)] # Fetch all the files files = _fetch_files(data_dir, filepath, resume=resume, verbose=verbose) # Load the files into arrays correlation_matrices = csv_to_array(files[0]) return Bunch( dimensions=dimensionality, timeseries=timeseries, matrices=matrices, correlation_matrices=correlation_matrices, description=description) def fetch_cobre(n_subjects=10, data_dir=None, url=None, verbose=1): """Fetch COBRE datasets preprocessed using NIAK 0.12.4 pipeline. Downloads and returns preprocessed resting state fMRI datasets and phenotypic information such as demographic, clinical variables, measure of frame displacement FD (an average FD for all the time frames left after censoring). For each subject, this function also returns .mat files which contains all the covariates that have been regressed out of the functional data. The covariates such as motion parameters, mean CSF signal, etc. It also contains a list of time frames that have been removed from the time series by censoring for high motion. NOTE: The number of time samples vary, as some samples have been removed if tagged with excessive motion. This means that data is already time filtered. See output variable 'description' for more details. .. versionadded 0.2.3 Parameters ---------- n_subjects: int, optional The number of subjects to load from maximum of 146 subjects. By default, 10 subjects will be loaded. If n_subjects=None, all subjects will be loaded. data_dir: str, optional Path to the data directory. Used to force data storage in a specified location. Default: None url: str, optional Override download url. Used for test only (or if you setup a mirror of the data). Default: None verbose: int, optional Verbosity level (0 means no message). Returns ------- data: Bunch Dictionary-like object, the attributes are: - 'func': string list Paths to Nifti images. - 'mat_files': string list Paths to .mat files of each subject. - 'phenotypic': ndarray Contains data of clinical variables, sex, age, FD. - 'description': data description of the release and references. Notes ----- More information about datasets structure, See: https://figshare.com/articles/COBRE_preprocessed_with_NIAK_0_12_4/1160600 """ if url is None: # Here we use the file that provides URL for all others url = "https://figshare.com/api/articles/1160600/15/files" dataset_name = 'cobre' data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) fdescr = _get_dataset_descr(dataset_name) # First, fetch the file that references all individual URLs files = _fetch_files(data_dir, [("files", url + "?offset=0&limit=300", {})], verbose=verbose)[0] files = json.load(open(files, 'r')) # Index files by name files_ = {} for f in files: files_[f['name']] = f files = files_ # Fetch the phenotypic file and load it csv_name = 'cobre_model_group.csv' csv_file = _fetch_files( data_dir, [(csv_name, files[csv_name]['downloadUrl'], {'md5': files[csv_name].get('md5', None), 'move': csv_name})], verbose=verbose)[0] # Load file in filename to numpy arrays names = ['id', 'sz', 'age', 'sex', 'fd'] csv_array = np.recfromcsv(csv_file, names=names, skip_header=True) # Change dtype of id and condition column csv_array = csv_array.astype( [('id', '|U17'), ('sz', ' max_subjects: warnings.warn('Warning: there are only %d subjects' % max_subjects) n_subjects = max_subjects n_sz = np.ceil(float(n_subjects) / max_subjects * csv_array['sz'].sum()) n_ct = np.floor(float(n_subjects) / max_subjects * np.logical_not(csv_array['sz']).sum()) # First, restrict the csv files to the adequate number of subjects sz_ids = csv_array[csv_array['sz'] == 1.]['id'][:n_sz] ct_ids = csv_array[csv_array['sz'] == 0.]['id'][:n_ct] ids = np.hstack([sz_ids, ct_ids]) csv_array = csv_array[np.in1d(csv_array['id'], ids)] # Call fetch_files once per subject. func = [] mat = [] for i in ids: f = 'fmri_' + i + '_session1_run1.nii.gz' m = 'fmri_' + i + '_session1_run1_extra.mat' f, m = _fetch_files( data_dir, [(f, files[f]['downloadUrl'], {'md5': files[f].get('md5', None), 'move': f}), (m, files[m]['downloadUrl'], {'md5': files[m].get('md5', None), 'move': m}) ], verbose=verbose) func.append(f) mat.append(m) return Bunch(func=func, mat_files=mat, phenotypic=csv_array, description=fdescr) PKHS`ˎ@@nilearn/datasets/struct.py""" Downloading NeuroImaging datasets: structural datasets """ import warnings import os import numpy as np from scipy import ndimage from sklearn.datasets.base import Bunch from .utils import _get_dataset_dir, _fetch_files, _get_dataset_descr from .._utils import check_niimg, niimg from ..image import new_img_like _package_directory = os.path.dirname(os.path.abspath(__file__)) # Useful for the very simple examples MNI152_FILE_PATH = os.path.join(_package_directory, "data", "avg152T1_brain.nii.gz") def fetch_icbm152_2009(data_dir=None, url=None, resume=True, verbose=1): """Download and load the ICBM152 template (dated 2009) Parameters ---------- data_dir: string, optional Path of the data directory. Used to force data storage in a non- standard location. Default: None (meaning: default) url: string, optional Download URL of the dataset. Overwrite the default URL. Returns ------- data: sklearn.datasets.base.Bunch dictionary-like object, interest keys are: "t1", "t2", "t2_relax", "pd": anatomical images obtained with the given modality (resp. T1, T2, T2 relaxometry and proton density weighted). Values are file paths. "gm", "wm", "csf": segmented images, giving resp. gray matter, white matter and cerebrospinal fluid. Values are file paths. "eye_mask", "face_mask", "mask": use these images to mask out parts of mri images. Values are file paths. References ---------- VS Fonov, AC Evans, K Botteron, CR Almli, RC McKinstry, DL Collins and BDCG, "Unbiased average age-appropriate atlases for pediatric studies", NeuroImage,Volume 54, Issue 1, January 2011 VS Fonov, AC Evans, RC McKinstry, CR Almli and DL Collins, "Unbiased nonlinear average age-appropriate brain templates from birth to adulthood", NeuroImage, Volume 47, Supplement 1, July 2009, Page S102 Organization for Human Brain Mapping 2009 Annual Meeting. DL Collins, AP Zijdenbos, WFC Baare and AC Evans, "ANIMAL+INSECT: Improved Cortical Structure Segmentation", IPMI Lecture Notes in Computer Science, 1999, Volume 1613/1999, 210-223 Notes ----- For more information about this dataset's structure: http://www.bic.mni.mcgill.ca/ServicesAtlases/ICBM152NLin2009 """ if url is None: url = "http://www.bic.mni.mcgill.ca/~vfonov/icbm/2009/" \ "mni_icbm152_nlin_sym_09a_nifti.zip" opts = {'uncompress': True} keys = ("csf", "gm", "wm", "pd", "t1", "t2", "t2_relax", "eye_mask", "face_mask", "mask") filenames = [(os.path.join("mni_icbm152_nlin_sym_09a", name), url, opts) for name in ("mni_icbm152_csf_tal_nlin_sym_09a.nii", "mni_icbm152_gm_tal_nlin_sym_09a.nii", "mni_icbm152_wm_tal_nlin_sym_09a.nii", "mni_icbm152_pd_tal_nlin_sym_09a.nii", "mni_icbm152_t1_tal_nlin_sym_09a.nii", "mni_icbm152_t2_tal_nlin_sym_09a.nii", "mni_icbm152_t2_relx_tal_nlin_sym_09a.nii", "mni_icbm152_t1_tal_nlin_sym_09a_eye_mask.nii", "mni_icbm152_t1_tal_nlin_sym_09a_face_mask.nii", "mni_icbm152_t1_tal_nlin_sym_09a_mask.nii")] dataset_name = 'icbm152_2009' data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) sub_files = _fetch_files(data_dir, filenames, resume=resume, verbose=verbose) fdescr = _get_dataset_descr(dataset_name) params = dict([('description', fdescr)] + list(zip(keys, sub_files))) return Bunch(**params) def load_mni152_template(): """Load skullstripped 2mm version of the MNI152 originally distributed with FSL Returns ------- mni152_template: nibabel object corresponding to the template References ---------- VS Fonov, AC Evans, K Botteron, CR Almli, RC McKinstry, DL Collins and BDCG, Unbiased average age-appropriate atlases for pediatric studies, NeuroImage, Volume 54, Issue 1, January 2011, ISSN 1053-8119, DOI: 10.1016/j.neuroimage.2010.07.033 VS Fonov, AC Evans, RC McKinstry, CR Almli and DL Collins, Unbiased nonlinear average age-appropriate brain templates from birth to adulthood, NeuroImage, Volume 47, Supplement 1, July 2009, Page S102 Organization for Human Brain Mapping 2009 Annual Meeting, DOI: 10.1016/S1053-8119(09)70884-5 """ return check_niimg(MNI152_FILE_PATH) def load_mni152_brain_mask(): """Load brain mask from MNI152 T1 template .. versionadded:: 0.2.5 Returns ------- mask_img: Nifti-like mask image corresponding to grey and white matter. References ---------- Refer to load_mni152_template function for more information about the MNI152 T1 template See Also -------- nilearn.datasets.load_mni152_template for details about version of the MNI152 T1 template and related. """ # Load MNI template target_img = load_mni152_template() mask_voxels = (target_img.get_data() > 0).astype(int) mask_img = new_img_like(target_img, mask_voxels) return mask_img def fetch_icbm152_brain_gm_mask(data_dir=None, threshold=0.2, resume=True, verbose=1): """Downloads ICBM152 template first, then loads 'gm' mask image. .. versionadded:: 0.2.5 Parameters ---------- data_dir: str, optional Path of the data directory. Used to force storage in a specified location. Defaults to None. threshold: float, optional The parameter which amounts to include the values in the mask image. The values lies above than this threshold will be included. Defaults to 0.2 (one fifth) of values. resume: bool, optional If True, try resuming partially downloaded data. Defaults to True. verbose: int, optional verbosity level (0 means no message). Returns ------- gm_mask_img: Nifti image Corresponding to brain grey matter from ICBM152 template. Notes ----- This function relies on ICBM152 templates where we particularly pick grey matter template and threshold the template at .2 to take one fifth of the values. Then, do a bit post processing such as binary closing operation to more compact mask image. Note: It is advised to check the mask image with your own data processing. See Also -------- nilearn.datasets.fetch_icbm152_2009: for details regarding the ICBM152 template. nilearn.datasets.load_mni152_template: for details about version of MNI152 template and related. """ # Fetching ICBM152 grey matter mask image icbm = fetch_icbm152_2009(data_dir=data_dir, resume=resume, verbose=verbose) gm = icbm['gm'] gm_img = check_niimg(gm) gm_data = niimg._safe_get_data(gm_img) # getting one fifth of the values gm_mask = (gm_data > threshold) gm_mask = ndimage.binary_closing(gm_mask, iterations=2) gm_mask_img = new_img_like(gm_img, gm_mask) return gm_mask_img def fetch_oasis_vbm(n_subjects=None, dartel_version=True, data_dir=None, url=None, resume=True, verbose=1): """Download and load Oasis "cross-sectional MRI" dataset (416 subjects). Parameters ---------- n_subjects: int, optional The number of subjects to load. If None is given, all the subjects are used. dartel_version: boolean, Whether or not to use data normalized with DARTEL instead of standard SPM8 normalization. data_dir: string, optional Path of the data directory. Used to force data storage in a specified location. Default: None url: string, optional Override download URL. Used for test only (or if you setup a mirror of the data). resume: bool, optional If true, try resuming download if possible verbose: int, optional verbosity level (0 means no message). Returns ------- data: Bunch Dictionary-like object, the interest attributes are : - 'gray_matter_maps': string list Paths to nifti gray matter density probability maps - 'white_matter_maps' string list Paths to nifti white matter density probability maps - 'ext_vars': np.recarray Data from the .csv file with information about selected subjects - 'data_usage_agreement': string Path to the .txt file containing the data usage agreement. References ---------- [1] http://www.oasis-brains.org/ [2] Open Access Series of Imaging Studies (OASIS): Cross-sectional MRI Data in Young, Middle Aged, Nondemented, and Demented Older Adults. Marcus, D. S and al., 2007, Journal of Cognitive Neuroscience. Notes ----- In the DARTEL version, original Oasis data [1] have been preprocessed with the following steps: 1. Dimension swapping (technically required for subsequent steps) 2. Brain Extraction 3. Segmentation with SPM8 4. Normalization using DARTEL algorithm 5. Modulation 6. Replacement of NaN values with 0 in gray/white matter density maps. 7. Resampling to reduce shape and make it correspond to the shape of the non-DARTEL data (fetched with dartel_version=False). 8. Replacement of values < 1e-4 with zeros to reduce the file size. In the non-DARTEL version, the following steps have been performed instead: 1. Dimension swapping (technically required for subsequent steps) 2. Brain Extraction 3. Segmentation and normalization to a template with SPM8 4. Modulation 5. Replacement of NaN values with 0 in gray/white matter density maps. An archive containing the gray and white matter density probability maps for the 416 available subjects is provided. Gross outliers are removed and filtered by this data fetcher (DARTEL: 13 outliers; non-DARTEL: 1 outlier) Externals variates (age, gender, estimated intracranial volume, years of education, socioeconomic status, dementia score) are provided in a CSV file that is a copy of the original Oasis CSV file. The current downloader loads the CSV file and keeps only the lines corresponding to the subjects that are actually demanded. The Open Access Structural Imaging Series (OASIS) is a project dedicated to making brain imaging data openly available to the public. Using data available through the OASIS project requires agreeing with the Data Usage Agreement that can be found at http://www.oasis-brains.org/app/template/UsageAgreement.vm """ # check number of subjects if n_subjects is None: n_subjects = 403 if dartel_version else 415 if dartel_version: # DARTEL version has 13 identified outliers if n_subjects > 403: warnings.warn('Only 403 subjects are available in the ' 'DARTEL-normalized version of the dataset. ' 'All of them will be used instead of the wanted %d' % n_subjects) n_subjects = 403 else: # all subjects except one are available with non-DARTEL version if n_subjects > 415: warnings.warn('Only 415 subjects are available in the ' 'non-DARTEL-normalized version of the dataset. ' 'All of them will be used instead of the wanted %d' % n_subjects) n_subjects = 415 if n_subjects < 1: raise ValueError("Incorrect number of subjects (%d)" % n_subjects) # pick the archive corresponding to preprocessings type if url is None: if dartel_version: url_images = ('https://www.nitrc.org/frs/download.php/' '6364/archive_dartel.tgz?i_agree=1&download_now=1') else: url_images = ('https://www.nitrc.org/frs/download.php/' '6359/archive.tgz?i_agree=1&download_now=1') # covariates and license are in separate files on NITRC url_csv = ('https://www.nitrc.org/frs/download.php/' '6348/oasis_cross-sectional.csv?i_agree=1&download_now=1') url_dua = ('https://www.nitrc.org/frs/download.php/' '6349/data_usage_agreement.txt?i_agree=1&download_now=1') else: # local URL used in tests url_csv = url + "/oasis_cross-sectional.csv" url_dua = url + "/data_usage_agreement.txt" if dartel_version: url_images = url + "/archive_dartel.tgz" else: url_images = url + "/archive.tgz" opts = {'uncompress': True} # missing subjects create shifts in subjects ids missing_subjects = [8, 24, 36, 48, 89, 93, 100, 118, 128, 149, 154, 171, 172, 175, 187, 194, 196, 215, 219, 225, 242, 245, 248, 251, 252, 257, 276, 297, 306, 320, 324, 334, 347, 360, 364, 391, 393, 412, 414, 427, 436] if dartel_version: # DARTEL produces outliers that are hidden by nilearn API removed_outliers = [27, 57, 66, 83, 122, 157, 222, 269, 282, 287, 309, 428] missing_subjects = sorted(missing_subjects + removed_outliers) file_names_gm = [ (os.path.join( "OAS1_%04d_MR1", "mwrc1OAS1_%04d_MR1_mpr_anon_fslswapdim_bet.nii.gz") % (s, s), url_images, opts) for s in range(1, 457) if s not in missing_subjects][:n_subjects] file_names_wm = [ (os.path.join( "OAS1_%04d_MR1", "mwrc2OAS1_%04d_MR1_mpr_anon_fslswapdim_bet.nii.gz") % (s, s), url_images, opts) for s in range(1, 457) if s not in missing_subjects] else: # only one gross outlier produced, hidden by nilearn API removed_outliers = [390] missing_subjects = sorted(missing_subjects + removed_outliers) file_names_gm = [ (os.path.join( "OAS1_%04d_MR1", "mwc1OAS1_%04d_MR1_mpr_anon_fslswapdim_bet.nii.gz") % (s, s), url_images, opts) for s in range(1, 457) if s not in missing_subjects][:n_subjects] file_names_wm = [ (os.path.join( "OAS1_%04d_MR1", "mwc2OAS1_%04d_MR1_mpr_anon_fslswapdim_bet.nii.gz") % (s, s), url_images, opts) for s in range(1, 457) if s not in missing_subjects] file_names_extvars = [("oasis_cross-sectional.csv", url_csv, {})] file_names_dua = [("data_usage_agreement.txt", url_dua, {})] # restrict to user-specified number of subjects file_names_gm = file_names_gm[:n_subjects] file_names_wm = file_names_wm[:n_subjects] file_names = (file_names_gm + file_names_wm + file_names_extvars + file_names_dua) dataset_name = 'oasis1' data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) files = _fetch_files(data_dir, file_names, resume=resume, verbose=verbose) # Build Bunch gm_maps = files[:n_subjects] wm_maps = files[n_subjects:(2 * n_subjects)] ext_vars_file = files[-2] data_usage_agreement = files[-1] # Keep CSV information only for selected subjects csv_data = np.recfromcsv(ext_vars_file) # Comparisons to recfromcsv data must be bytes. actual_subjects_ids = [("OAS1" + str.split(os.path.basename(x), "OAS1")[1][:9]).encode() for x in gm_maps] subject_mask = np.asarray([subject_id in actual_subjects_ids for subject_id in csv_data['id']]) csv_data = csv_data[subject_mask] fdescr = _get_dataset_descr(dataset_name) return Bunch( gray_matter_maps=gm_maps, white_matter_maps=wm_maps, ext_vars=csv_data, data_usage_agreement=data_usage_agreement, description=fdescr) PKlmHsUllnilearn/datasets/atlas.py""" Downloading NeuroImaging datasets: atlas datasets """ import os import xml.etree.ElementTree import numpy as np from sklearn.datasets.base import Bunch from sklearn.utils import deprecated #from . import utils from .utils import _get_dataset_dir, _fetch_files, _get_dataset_descr from .._utils import check_niimg from ..image import new_img_like from .._utils.compat import _basestring def fetch_atlas_craddock_2012(data_dir=None, url=None, resume=True, verbose=1): """Download and return file names for the Craddock 2012 parcellation The provided images are in MNI152 space. Parameters ---------- data_dir: string directory where data should be downloaded and unpacked. url: string url of file to download. resume: bool whether to resumed download of a partly-downloaded file. verbose: int verbosity level (0 means no message). Returns ------- data: sklearn.datasets.base.Bunch dictionary-like object, keys are: scorr_mean, tcorr_mean, scorr_2level, tcorr_2level, random References ---------- Licence: Creative Commons Attribution Non-commercial Share Alike http://creativecommons.org/licenses/by-nc-sa/2.5/ Craddock, R. Cameron, G.Andrew James, Paul E. Holtzheimer, Xiaoping P. Hu, and Helen S. Mayberg. "A Whole Brain fMRI Atlas Generated via Spatially Constrained Spectral Clustering". Human Brain Mapping 33, no 8 (2012): 1914-1928. doi:10.1002/hbm.21333. See http://www.nitrc.org/projects/cluster_roi/ for more information on this parcellation. """ if url is None: url = "ftp://www.nitrc.org/home/groups/cluster_roi/htdocs" \ "/Parcellations/craddock_2011_parcellations.tar.gz" opts = {'uncompress': True} dataset_name = "craddock_2012" keys = ("scorr_mean", "tcorr_mean", "scorr_2level", "tcorr_2level", "random") filenames = [ ("scorr05_mean_all.nii.gz", url, opts), ("tcorr05_mean_all.nii.gz", url, opts), ("scorr05_2level_all.nii.gz", url, opts), ("tcorr05_2level_all.nii.gz", url, opts), ("random_all.nii.gz", url, opts) ] data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) sub_files = _fetch_files(data_dir, filenames, resume=resume, verbose=verbose) fdescr = _get_dataset_descr(dataset_name) params = dict([('description', fdescr)] + list(zip(keys, sub_files))) return Bunch(**params) def fetch_atlas_destrieux_2009(lateralized=True, data_dir=None, url=None, resume=True, verbose=1): """Download and load the Destrieux cortical atlas (dated 2009) Parameters ---------- lateralized: boolean, optional If True, returns an atlas with distinct regions for right and left hemispheres. data_dir: string, optional Path of the data directory. Use to forec data storage in a non- standard location. Default: None (meaning: default) url: string, optional Download URL of the dataset. Overwrite the default URL. Returns ------- data: sklearn.datasets.base.Bunch dictionary-like object, contains: - Cortical ROIs, lateralized or not (maps) - Labels of the ROIs (labels) References ---------- Fischl, Bruce, et al. "Automatically parcellating the human cerebral cortex." Cerebral cortex 14.1 (2004): 11-22. Destrieux, C., et al. "A sulcal depth-based anatomical parcellation of the cerebral cortex." NeuroImage 47 (2009): S151. """ if url is None: url = "https://www.nitrc.org/frs/download.php/7739/" url += "destrieux2009.tgz" opts = {'uncompress': True} lat = '_lateralized' if lateralized else '' files = [ ('destrieux2009_rois_labels' + lat + '.csv', url, opts), ('destrieux2009_rois' + lat + '.nii.gz', url, opts), ('destrieux2009.rst', url, opts) ] dataset_name = 'destrieux_2009' data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) files_ = _fetch_files(data_dir, files, resume=resume, verbose=verbose) params = dict(maps=files_[1], labels=np.recfromcsv(files_[0])) with open(files_[2], 'r') as rst_file: params['description'] = rst_file.read() return Bunch(**params) def fetch_atlas_harvard_oxford(atlas_name, data_dir=None, symmetric_split=False, resume=True, verbose=1): """Load Harvard-Oxford parcellation from FSL if installed or download it. This function looks up for Harvard Oxford atlas in the system and load it if present. If not, it downloads it and stores it in NILEARN_DATA directory. Parameters ---------- atlas_name: string Name of atlas to load. Can be: cort-maxprob-thr0-1mm, cort-maxprob-thr0-2mm, cort-maxprob-thr25-1mm, cort-maxprob-thr25-2mm, cort-maxprob-thr50-1mm, cort-maxprob-thr50-2mm, sub-maxprob-thr0-1mm, sub-maxprob-thr0-2mm, sub-maxprob-thr25-1mm, sub-maxprob-thr25-2mm, sub-maxprob-thr50-1mm, sub-maxprob-thr50-2mm, cort-prob-1mm, cort-prob-2mm, sub-prob-1mm, sub-prob-2mm data_dir: string, optional Path of data directory. It can be FSL installation directory (which is dependent on your installation). symmetric_split: bool, optional If True, split every symmetric region in left and right parts. Effectively doubles the number of regions. Default: False. Not implemented for probabilistic atlas (*-prob-* atlases) Returns ------- data: sklearn.datasets.base.Bunch dictionary-like object, keys are: - "maps": nibabel.Nifti1Image, 4D maps if a probabilistic atlas is requested and 3D labels if a maximum probabilistic atlas was requested. - "labels": string list, labels of the regions in the atlas. """ atlas_items = ("cort-maxprob-thr0-1mm", "cort-maxprob-thr0-2mm", "cort-maxprob-thr25-1mm", "cort-maxprob-thr25-2mm", "cort-maxprob-thr50-1mm", "cort-maxprob-thr50-2mm", "sub-maxprob-thr0-1mm", "sub-maxprob-thr0-2mm", "sub-maxprob-thr25-1mm", "sub-maxprob-thr25-2mm", "sub-maxprob-thr50-1mm", "sub-maxprob-thr50-2mm", "cort-prob-1mm", "cort-prob-2mm", "sub-prob-1mm", "sub-prob-2mm") if atlas_name not in atlas_items: raise ValueError("Invalid atlas name: {0}. Please chose an atlas " "among:\n{1}".format( atlas_name, '\n'.join(atlas_items))) url = 'http://www.nitrc.org/frs/download.php/7700/HarvardOxford.tgz' # For practical reasons, we mimic the FSL data directory here. dataset_name = 'fsl' # Environment variables default_paths = [] for env_var in ['FSL_DIR', 'FSLDIR']: path = os.getenv(env_var) if path is not None: default_paths.extend(path.split(':')) data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, default_paths=default_paths, verbose=verbose) opts = {'uncompress': True} root = os.path.join('data', 'atlases') atlas_file = os.path.join(root, 'HarvardOxford', 'HarvardOxford-' + atlas_name + '.nii.gz') if atlas_name[0] == 'c': label_file = 'HarvardOxford-Cortical.xml' else: label_file = 'HarvardOxford-Subcortical.xml' label_file = os.path.join(root, label_file) atlas_img, label_file = _fetch_files( data_dir, [(atlas_file, url, opts), (label_file, url, opts)], resume=resume, verbose=verbose) names = {} from xml.etree import ElementTree names[0] = 'Background' for label in ElementTree.parse(label_file).findall('.//label'): names[int(label.get('index')) + 1] = label.text names = list(names.values()) if not symmetric_split: return Bunch(maps=atlas_img, labels=names) if atlas_name in ("cort-prob-1mm", "cort-prob-2mm", "sub-prob-1mm", "sub-prob-2mm"): raise ValueError("Region splitting not supported for probabilistic " "atlases") atlas_img = check_niimg(atlas_img) atlas = atlas_img.get_data() labels = np.unique(atlas) # Build a mask of both halves of the brain middle_ind = (atlas.shape[0] - 1) // 2 # Put zeros on the median plane atlas[middle_ind, ...] = 0 # Split every zone crossing the median plane into two parts. left_atlas = atlas.copy() left_atlas[middle_ind:, ...] = 0 right_atlas = atlas.copy() right_atlas[:middle_ind, ...] = 0 new_label = 0 new_atlas = atlas.copy() # Assumes that the background label is zero. new_names = [names[0]] for label, name in zip(labels[1:], names[1:]): new_label += 1 left_elements = (left_atlas == label).sum() right_elements = (right_atlas == label).sum() n_elements = float(left_elements + right_elements) if (left_elements / n_elements < 0.05 or right_elements / n_elements < 0.05): new_atlas[atlas == label] = new_label new_names.append(name) continue new_atlas[right_atlas == label] = new_label new_names.append(name + ', left part') new_label += 1 new_atlas[left_atlas == label] = new_label new_names.append(name + ', right part') atlas_img = new_img_like(atlas_img, new_atlas, atlas_img.get_affine()) return Bunch(maps=atlas_img, labels=new_names) def fetch_atlas_msdl(data_dir=None, url=None, resume=True, verbose=1): """Download and load the MSDL brain atlas. Parameters ---------- data_dir: string, optional Path of the data directory. Used to force data storage in a specified location. Default: None url: string, optional Override download URL. Used for test only (or if you setup a mirror of the data). Returns ------- data: sklearn.datasets.base.Bunch Dictionary-like object, the interest attributes are : - 'maps': str, path to nifti file containing regions definition. - 'labels': string list containing the labels of the regions. - 'region_coords': tuple list (x, y, z) containing coordinates of each region in MNI space. - 'networks': string list containing names of the networks. - 'description': description about the atlas. References ---------- :Download: https://team.inria.fr/parietal/files/2015/01/MSDL_rois.zip :Paper to cite: `Multi-subject dictionary learning to segment an atlas of brain spontaneous activity `_ Gael Varoquaux, Alexandre Gramfort, Fabian Pedregosa, Vincent Michel, Bertrand Thirion. Information Processing in Medical Imaging, 2011, pp. 562-573, Lecture Notes in Computer Science. :Other references: `Learning and comparing functional connectomes across subjects `_. Gael Varoquaux, R.C. Craddock NeuroImage, 2013. """ url = 'https://team.inria.fr/parietal/files/2015/01/MSDL_rois.zip' opts = {'uncompress': True} dataset_name = "msdl_atlas" files = [(os.path.join('MSDL_rois', 'msdl_rois_labels.csv'), url, opts), (os.path.join('MSDL_rois', 'msdl_rois.nii'), url, opts)] data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) files = _fetch_files(data_dir, files, resume=resume, verbose=verbose) csv_data = np.recfromcsv(files[0]) labels = csv_data['name'].tolist() region_coords = csv_data[['x', 'y', 'z']].tolist() net_names = csv_data['net_name'].tolist() fdescr = _get_dataset_descr(dataset_name) return Bunch(maps=files[1], labels=labels, region_coords=region_coords, networks=net_names, description=fdescr) @deprecated('This function has been replace by fetch_coords_power_2011 and ' 'will be removed in nilearn 0.2.5') def fetch_atlas_power_2011(): return fetch_coords_power_2011() def fetch_coords_power_2011(): """Download and load the Power et al. brain atlas composed of 264 ROIs. Returns ------- data: sklearn.datasets.base.Bunch dictionary-like object, contains: - "rois": coordinates of 264 ROIs in MNI space References ---------- Power, Jonathan D., et al. "Functional network organization of the human brain." Neuron 72.4 (2011): 665-678. """ dataset_name = 'power_2011' fdescr = _get_dataset_descr(dataset_name) package_directory = os.path.dirname(os.path.abspath(__file__)) csv = os.path.join(package_directory, "data", "power_2011.csv") params = dict(rois=np.recfromcsv(csv), description=fdescr) return Bunch(**params) def fetch_atlas_smith_2009(data_dir=None, mirror='origin', url=None, resume=True, verbose=1): """Download and load the Smith ICA and BrainMap atlas (dated 2009) Parameters ---------- data_dir: string, optional Path of the data directory. Used to force data storage in a non- standard location. Default: None (meaning: default) mirror: string, optional By default, the dataset is downloaded from the original website of the atlas. Specifying "nitrc" will force download from a mirror, with potentially higher bandwith. url: string, optional Download URL of the dataset. Overwrite the default URL. Returns ------- data: sklearn.datasets.base.Bunch dictionary-like object, contains: - 20-dimensional ICA, Resting-FMRI components: - all 20 components (rsn20) - 10 well-matched maps from these, as shown in PNAS paper (rsn10) - 20-dimensional ICA, BrainMap components: - all 20 components (bm20) - 10 well-matched maps from these, as shown in PNAS paper (bm10) - 70-dimensional ICA, Resting-FMRI components (rsn70) - 70-dimensional ICA, BrainMap components (bm70) References ---------- S.M. Smith, P.T. Fox, K.L. Miller, D.C. Glahn, P.M. Fox, C.E. Mackay, N. Filippini, K.E. Watkins, R. Toro, A.R. Laird, and C.F. Beckmann. Correspondence of the brain's functional architecture during activation and rest. Proc Natl Acad Sci USA (PNAS), 106(31):13040-13045, 2009. A.R. Laird, P.M. Fox, S.B. Eickhoff, J.A. Turner, K.L. Ray, D.R. McKay, D.C Glahn, C.F. Beckmann, S.M. Smith, and P.T. Fox. Behavioral interpretations of intrinsic connectivity networks. Journal of Cognitive Neuroscience, 2011 Notes ----- For more information about this dataset's structure: http://www.fmrib.ox.ac.uk/analysis/brainmap+rsns/ """ if url is None: if mirror == 'origin': url = "http://www.fmrib.ox.ac.uk/analysis/brainmap+rsns/" elif mirror == 'nitrc': url = [ 'https://www.nitrc.org/frs/download.php/7730/', 'https://www.nitrc.org/frs/download.php/7729/', 'https://www.nitrc.org/frs/download.php/7731/', 'https://www.nitrc.org/frs/download.php/7726/', 'https://www.nitrc.org/frs/download.php/7728/', 'https://www.nitrc.org/frs/download.php/7727/', ] else: raise ValueError('Unknown mirror "%s". Mirror must be "origin" ' 'or "nitrc"' % str(mirror)) files = [ 'rsn20.nii.gz', 'PNAS_Smith09_rsn10.nii.gz', 'rsn70.nii.gz', 'bm20.nii.gz', 'PNAS_Smith09_bm10.nii.gz', 'bm70.nii.gz' ] if isinstance(url, _basestring): url = [url] * len(files) files = [(f, u + f, {}) for f, u in zip(files, url)] dataset_name = 'smith_2009' data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) files_ = _fetch_files(data_dir, files, resume=resume, verbose=verbose) fdescr = _get_dataset_descr(dataset_name) keys = ['rsn20', 'rsn10', 'rsn70', 'bm20', 'bm10', 'bm70'] params = dict(zip(keys, files_)) params['description'] = fdescr return Bunch(**params) def fetch_atlas_yeo_2011(data_dir=None, url=None, resume=True, verbose=1): """Download and return file names for the Yeo 2011 parcellation. The provided images are in MNI152 space. Parameters ---------- data_dir: string directory where data should be downloaded and unpacked. url: string url of file to download. resume: bool whether to resumed download of a partly-downloaded file. verbose: int verbosity level (0 means no message). Returns ------- data: sklearn.datasets.base.Bunch dictionary-like object, keys are: - "thin_7", "thick_7": 7-region parcellations, fitted to resp. thin and thick template cortex segmentations. - "thin_17", "thick_17": 17-region parcellations. - "colors_7", "colors_17": colormaps (text files) for 7- and 17-region parcellation respectively. - "anat": anatomy image. Notes ----- For more information on this dataset's structure, see http://surfer.nmr.mgh.harvard.edu/fswiki/CorticalParcellation_Yeo2011 Yeo BT, Krienen FM, Sepulcre J, Sabuncu MR, Lashkari D, Hollinshead M, Roffman JL, Smoller JW, Zollei L., Polimeni JR, Fischl B, Liu H, Buckner RL. The organization of the human cerebral cortex estimated by intrinsic functional connectivity. J Neurophysiol 106(3):1125-65, 2011. Licence: unknown. """ if url is None: url = "ftp://surfer.nmr.mgh.harvard.edu/" \ "pub/data/Yeo_JNeurophysiol11_MNI152.zip" opts = {'uncompress': True} dataset_name = "yeo_2011" keys = ("thin_7", "thick_7", "thin_17", "thick_17", "colors_7", "colors_17", "anat") basenames = ( "Yeo2011_7Networks_MNI152_FreeSurferConformed1mm.nii.gz", "Yeo2011_7Networks_MNI152_FreeSurferConformed1mm_LiberalMask.nii.gz", "Yeo2011_17Networks_MNI152_FreeSurferConformed1mm.nii.gz", "Yeo2011_17Networks_MNI152_FreeSurferConformed1mm_LiberalMask.nii.gz", "Yeo2011_7Networks_ColorLUT.txt", "Yeo2011_17Networks_ColorLUT.txt", "FSL_MNI152_FreeSurferConformed_1mm.nii.gz") filenames = [(os.path.join("Yeo_JNeurophysiol11_MNI152", f), url, opts) for f in basenames] data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) sub_files = _fetch_files(data_dir, filenames, resume=resume, verbose=verbose) fdescr = _get_dataset_descr(dataset_name) params = dict([('description', fdescr)] + list(zip(keys, sub_files))) return Bunch(**params) def fetch_atlas_aal(version='SPM12', data_dir=None, url=None, resume=True, verbose=1): """Downloads and returns the AAL template for SPM 12. This atlas is the result of an automated anatomical parcellation of the spatially normalized single-subject high-resolution T1 volume provided by the Montreal Neurological Institute (MNI) (D. L. Collins et al., 1998, Trans. Med. Imag. 17, 463-468, PubMed). Parameters ---------- version: string, optional The version of the AAL atlas. Must be SPM5, SPM8 or SPM12. Default is SPM12. data_dir: string directory where data should be downloaded and unpacked. url: string url of file to download. resume: bool whether to resumed download of a partly-downloaded file. verbose: int verbosity level (0 means no message). Returns ------- data: sklearn.datasets.base.Bunch dictionary-like object, keys are: - "maps": str. path to nifti file containing regions. - "labels": dict. labels dictionary with their region id as key and name as value Notes ----- For more information on this dataset's structure, see http://www.gin.cnrs.fr/AAL-217?lang=en Automated Anatomical Labeling of Activations in SPM Using a Macroscopic Anatomical Parcellation of the MNI MRI Single-Subject Brain. N. Tzourio-Mazoyer, B. Landeau, D. Papathanassiou, F. Crivello, O. Etard, N. Delcroix, B. Mazoyer, and M. Joliot. NeuroImage 2002. 15 :273-28 Licence: unknown. """ versions = ['SPM5', 'SPM8', 'SPM12'] if version not in versions: raise ValueError('The version of AAL requested "%s" does not exist.' 'Please choose one among %s.' % (version, str(versions))) if url is None: baseurl = "http://www.gin.cnrs.fr/AAL_files/aal_for_%s.tar.gz" url = baseurl % version opts = {'uncompress': True} dataset_name = "aal_" + version # keys and basenames would need to be handled for each spm_version # for now spm_version 12 is hardcoded. basenames = ("AAL.nii", "AAL.xml") filenames = [(os.path.join('aal', 'atlas', f), url, opts) for f in basenames] data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) atlas_img, labels_file = _fetch_files(data_dir, filenames, resume=resume, verbose=verbose) fdescr = _get_dataset_descr(dataset_name) # We return the labels contained in the xml file as a dictionary xml_tree = xml.etree.ElementTree.parse(labels_file) root = xml_tree.getroot() labels = [] indices = [] for label in root.getiterator('label'): indices.append(label.find('index').text) labels.append(label.find('name').text) params = {'description': fdescr, 'maps': atlas_img, 'labels': labels, 'indices': indices} return Bunch(**params) def fetch_atlas_basc_multiscale_2015(version='sym', data_dir=None, resume=True, verbose=1): """Downloads and loads multiscale functional brain parcellations This atlas includes group brain parcellations generated from resting-state functional magnetic resonance images from about 200 young healthy subjects. Multiple scales (number of networks) are available, among 7, 12, 20, 36, 64, 122, 197, 325, 444. The brain parcellations have been generated using a method called bootstrap analysis of stable clusters called as BASC, (Bellec et al., 2010) and the scales have been selected using a data-driven method called MSTEPS (Bellec, 2013). Note that two versions of the template are available, 'sym' or 'asym'. The 'asym' type contains brain images that have been registered in the asymmetric version of the MNI brain template (reflecting that the brain is asymmetric), while the 'sym' type contains images registered in the symmetric version of the MNI template. The symmetric template has been forced to be symmetric anatomically, and is therefore ideally suited to study homotopic functional connections in fMRI: finding homotopic regions simply consists of flipping the x-axis of the template. .. versionadded:: 0.2.3 Parameters ---------- version: str, optional Available versions are 'sym' or 'asym'. By default all scales of brain parcellations of version 'sym' will be returned. data_dir: str, optional directory where data should be downloaded and unpacked. url: str, optional url of file to download. resume: bool whether to resumed download of a partly-downloaded file. verbose: int verbosity level (0 means no message). Returns ------- data: sklearn.datasets.base.Bunch dictionary-like object, Keys are: - "scale007", "scale012", "scale020", "scale036", "scale064", "scale122", "scale197", "scale325", "scale444": str, path to Nifti file of various scales of brain parcellations. - "description": details about the data release. References ---------- Bellec P, Rosa-Neto P, Lyttelton OC, Benali H, Evans AC, Jul. 2010. Multi-level bootstrap analysis of stable clusters in resting-state fMRI. NeuroImage 51 (3), 1126-1139. URL http://dx.doi.org/10.1016/j.neuroimage.2010.02.082 Bellec P, Jun. 2013. Mining the Hierarchy of Resting-State Brain Networks: Selection of Representative Clusters in a Multiscale Structure. Pattern Recognition in Neuroimaging (PRNI), 2013 pp. 54-57. Notes ----- For more information on this dataset's structure, see https://figshare.com/articles/basc/1285615 """ versions = ['sym', 'asym'] if version not in versions: raise ValueError('The version of Brain parcellations requested "%s" ' 'does not exist. Please choose one among them %s.' % (version, str(versions))) keys = ['scale007', 'scale012', 'scale020', 'scale036', 'scale064', 'scale122', 'scale197', 'scale325', 'scale444'] if version == 'sym': url = "https://ndownloader.figshare.com/files/1861819" elif version == 'asym': url = "https://ndownloader.figshare.com/files/1861820" opts = {'uncompress': True} dataset_name = "basc_multiscale_2015" data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) folder_name = 'template_cambridge_basc_multiscale_nii_' + version basenames = ['template_cambridge_basc_multiscale_' + version + '_' + key + '.nii.gz' for key in keys] filenames = [(os.path.join(folder_name, basename), url, opts) for basename in basenames] data = _fetch_files(data_dir, filenames, resume=resume, verbose=verbose) descr = _get_dataset_descr(dataset_name) params = dict(zip(keys, data)) params['description'] = descr return Bunch(**params) def fetch_coords_dosenbach_2010(): """Load the Dosenbach et al. 160 ROIs. These ROIs cover much of the cerebral cortex and cerebellum and are assigned to 6 networks. Returns ------- data: sklearn.datasets.base.Bunch dictionary-like object, contains: - "rois": coordinates of 160 ROIs in MNI space - "labels": ROIs labels - "networks": networks names References ---------- Dosenbach N.U., Nardos B., et al. "Prediction of individual brain maturity using fMRI.", 2010, Science 329, 1358-1361. """ dataset_name = 'dosenbach_2010' fdescr = _get_dataset_descr(dataset_name) package_directory = os.path.dirname(os.path.abspath(__file__)) csv = os.path.join(package_directory, "data", "dosenbach_2010.csv") out_csv = np.recfromcsv(csv) # We add the ROI number to its name, since names are not unique names = out_csv['name'] numbers = out_csv['number'] labels = np.array(['{0} {1}'.format(name, number) for (name, number) in zip(names, numbers)]) params = dict(rois=out_csv[['x', 'y', 'z']], labels=labels, networks=out_csv['network'], description=fdescr) return Bunch(**params) PKmHG!nilearn/datasets/__init__.py""" Helper functions to download NeuroImaging datasets """ from .struct import (fetch_icbm152_2009, load_mni152_template, load_mni152_brain_mask, fetch_oasis_vbm, fetch_icbm152_brain_gm_mask, MNI152_FILE_PATH) from .func import (fetch_haxby_simple, fetch_haxby, fetch_nyu_rest, fetch_adhd, fetch_miyawaki2008, fetch_localizer_contrasts, fetch_abide_pcp, fetch_localizer_button_task, fetch_localizer_calculation_task, fetch_mixed_gambles, fetch_megatrawls_netmats, fetch_cobre) from .atlas import (fetch_atlas_craddock_2012, fetch_atlas_destrieux_2009, fetch_atlas_harvard_oxford, fetch_atlas_msdl, fetch_atlas_power_2011, fetch_coords_power_2011, fetch_atlas_smith_2009, fetch_atlas_yeo_2011, fetch_atlas_aal, fetch_atlas_basc_multiscale_2015, fetch_coords_dosenbach_2010) from .utils import get_data_dirs __all__ = ['MNI152_FILE_PATH', 'fetch_icbm152_2009', 'load_mni152_template', 'fetch_oasis_vbm', 'fetch_haxby_simple', 'fetch_haxby', 'fetch_nyu_rest', 'fetch_adhd', 'fetch_miyawaki2008', 'fetch_localizer_contrasts', 'fetch_localizer_button_task', 'fetch_abide_pcp', 'fetch_localizer_calculation_task', 'fetch_atlas_craddock_2012', 'fetch_atlas_destrieux_2009', 'fetch_atlas_harvard_oxford', 'fetch_atlas_msdl', 'fetch_atlas_power_2011', 'fetch_coords_power_2011', 'fetch_atlas_smith_2009', 'fetch_atlas_yeo_2011', 'fetch_mixed_gambles', 'fetch_atlas_aal', 'fetch_megatrawls_netmats', 'fetch_cobre', 'fetch_atlas_basc_multiscale_2015', 'fetch_coords_dosenbach_2010', 'load_mni152_brain_mask', 'fetch_icbm152_brain_gm_mask', 'get_data_dirs'] PKmH&\elelnilearn/datasets/utils.py""" Downloading NeuroImaging datasets: utility functions """ import os import numpy as np import base64 import collections import contextlib import fnmatch import hashlib import shutil import time import sys import tarfile import warnings import zipfile from .._utils.compat import _basestring, cPickle, _urllib, md5_hash def _format_time(t): if t > 60: return "%4.1fmin" % (t / 60.) else: return " %5.1fs" % (t) def _md5_sum_file(path): """ Calculates the MD5 sum of a file. """ with open(path, 'rb') as f: m = hashlib.md5() while True: data = f.read(8192) if not data: break m.update(data) return m.hexdigest() def _read_md5_sum_file(path): """ Reads a MD5 checksum file and returns hashes as a dictionary. """ with open(path, "r") as f: hashes = {} while True: line = f.readline() if not line: break h, name = line.rstrip().split(' ', 1) hashes[name] = h return hashes def readlinkabs(link): """ Return an absolute path for the destination of a symlink """ path = os.readlink(link) if os.path.isabs(path): return path return os.path.join(os.path.dirname(link), path) def _chunk_report_(bytes_so_far, total_size, initial_size, t0): """Show downloading percentage. Parameters ---------- bytes_so_far: int Number of downloaded bytes total_size: int Total size of the file (may be 0/None, depending on download method). t0: int The time in seconds (as returned by time.time()) at which the download was resumed / started. initial_size: int If resuming, indicate the initial size of the file. If not resuming, set to zero. """ if not total_size: sys.stderr.write("\rDownloaded %d of ? bytes." % (bytes_so_far)) else: # Estimate remaining download time total_percent = float(bytes_so_far) / total_size current_download_size = bytes_so_far - initial_size bytes_remaining = total_size - bytes_so_far dt = time.time() - t0 download_rate = current_download_size / max(1e-8, float(dt)) # Minimum rate of 0.01 bytes/s, to avoid dividing by zero. time_remaining = bytes_remaining / max(0.01, download_rate) # Trailing whitespace is to erase extra char when message length # varies sys.stderr.write( "\rDownloaded %d of %d bytes (%.1f%%, %s remaining)" % (bytes_so_far, total_size, total_percent * 100, _format_time(time_remaining))) def _chunk_read_(response, local_file, chunk_size=8192, report_hook=None, initial_size=0, total_size=None, verbose=1): """Download a file chunk by chunk and show advancement Parameters ---------- response: _urllib.response.addinfourl Response to the download request in order to get file size local_file: file Hard disk file where data should be written chunk_size: int, optional Size of downloaded chunks. Default: 8192 report_hook: bool Whether or not to show downloading advancement. Default: None initial_size: int, optional If resuming, indicate the initial size of the file total_size: int, optional Expected final size of download (None means it is unknown). verbose: int, optional verbosity level (0 means no message). Returns ------- data: string The downloaded file. """ try: if total_size is None: total_size = response.info().get('Content-Length').strip() total_size = int(total_size) + initial_size except Exception as e: if verbose > 2: print("Warning: total size could not be determined.") if verbose > 3: print("Full stack trace: %s" % e) total_size = None bytes_so_far = initial_size t0 = time_last_display = time.time() while True: chunk = response.read(chunk_size) bytes_so_far += len(chunk) time_last_read = time.time() if (report_hook and # Refresh report every half second or when download is # finished. (time_last_read > time_last_display + 0.5 or not chunk)): _chunk_report_(bytes_so_far, total_size, initial_size, t0) time_last_display = time_last_read if chunk: local_file.write(chunk) else: break return def get_data_dirs(data_dir=None): """ Returns the directories in which nilearn looks for data. This is typically useful for the end-user to check where the data is downloaded and stored. Parameters ---------- data_dir: string, optional Path of the data directory. Used to force data storage in a specified location. Default: None Returns ------- paths: list of strings Paths of the dataset directories. Notes ----- This function retrieves the datasets directories using the following priority : 1. defaults system paths 2. the keyword argument data_dir 3. the global environment variable NILEARN_SHARED_DATA 4. the user environment variable NILEARN_DATA 5. nilearn_data in the user home folder """ # We build an array of successive paths by priority # The boolean indicates if it is a pre_dir: in that case, we won't add the # dataset name to the path. paths = [] # Check data_dir which force storage in a specific location if data_dir is not None: paths.extend(data_dir.split(os.pathsep)) # If data_dir has not been specified, then we crawl default locations if data_dir is None: global_data = os.getenv('NILEARN_SHARED_DATA') if global_data is not None: paths.extend(global_data.split(os.pathsep)) local_data = os.getenv('NILEARN_DATA') if local_data is not None: paths.extend(local_data.split(os.pathsep)) paths.append(os.path.expanduser('~/nilearn_data')) return paths def _get_dataset_dir(dataset_name, data_dir=None, default_paths=None, verbose=1): """ Create if necessary and returns data directory of given dataset. Parameters ---------- dataset_name: string The unique name of the dataset. data_dir: string, optional Path of the data directory. Used to force data storage in a specified location. Default: None default_paths: list of string, optional Default system paths in which the dataset may already have been installed by a third party software. They will be checked first. verbose: int, optional verbosity level (0 means no message). Returns ------- data_dir: string Path of the given dataset directory. Notes ----- This function retrieves the datasets directory (or data directory) using the following priority : 1. defaults system paths 2. the keyword argument data_dir 3. the global environment variable NILEARN_SHARED_DATA 4. the user environment variable NILEARN_DATA 5. nilearn_data in the user home folder """ paths = [] # Search possible data-specific system paths if default_paths is not None: for default_path in default_paths: paths.extend([(d, True) for d in default_path.split(os.pathsep)]) paths.extend([(d, False) for d in get_data_dirs(data_dir=data_dir)]) if verbose > 2: print('Dataset search paths: %s' % paths) # Check if the dataset exists somewhere for path, is_pre_dir in paths: if not is_pre_dir: path = os.path.join(path, dataset_name) if os.path.islink(path): # Resolve path path = readlinkabs(path) if os.path.exists(path) and os.path.isdir(path): if verbose > 1: print('\nDataset found in %s\n' % path) return path # If not, create a folder in the first writeable directory errors = [] for (path, is_pre_dir) in paths: if not is_pre_dir: path = os.path.join(path, dataset_name) if not os.path.exists(path): try: os.makedirs(path) if verbose > 0: print('\nDataset created in %s\n' % path) return path except Exception as exc: short_error_message = getattr(exc, 'strerror', str(exc)) errors.append('\n -{0} ({1})'.format( path, short_error_message)) raise OSError('Nilearn tried to store the dataset in the following ' 'directories, but:' + ''.join(errors)) def _uncompress_file(file_, delete_archive=True, verbose=1): """Uncompress files contained in a data_set. Parameters ---------- file: string path of file to be uncompressed. delete_archive: bool, optional Wheteher or not to delete archive once it is uncompressed. Default: True verbose: int, optional verbosity level (0 means no message). Notes ----- This handles zip, tar, gzip and bzip files only. """ if verbose > 0: sys.stderr.write('Extracting data from %s...' % file_) data_dir = os.path.dirname(file_) # We first try to see if it is a zip file try: filename, ext = os.path.splitext(file_) with open(file_, "rb") as fd: header = fd.read(4) processed = False if zipfile.is_zipfile(file_): z = zipfile.ZipFile(file_) z.extractall(data_dir) z.close() processed = True elif ext == '.gz' or header.startswith(b'\x1f\x8b'): import gzip gz = gzip.open(file_) if ext == '.tgz': filename = filename + '.tar' out = open(filename, 'wb') shutil.copyfileobj(gz, out, 8192) gz.close() out.close() # If file is .tar.gz, this will be handle in the next case if delete_archive: os.remove(file_) file_ = filename filename, ext = os.path.splitext(file_) processed = True if tarfile.is_tarfile(file_): with contextlib.closing(tarfile.open(file_, "r")) as tar: tar.extractall(path=data_dir) processed = True if not processed: raise IOError( "[Uncompress] unknown archive file format: %s" % file_) if delete_archive: os.remove(file_) if verbose > 0: sys.stderr.write('.. done.\n') except Exception as e: if verbose > 0: print('Error uncompressing file: %s' % e) raise def _filter_column(array, col, criteria): """ Return index array matching criteria Parameters ---------- array: numpy array with columns Array in which data will be filtered col: string Name of the column criteria: integer (or float), pair of integers, string or list of these if integer, select elements in column matching integer if a tuple, select elements between the limits given by the tuple if a string, select elements that match the string """ # Raise an error if the column does not exist. This is the only way to # test it across all possible types (pandas, recarray...) try: array[col] except: raise KeyError('Filtering criterion %s does not exist' % col) if (not isinstance(criteria, _basestring) and not isinstance(criteria, bytes) and not isinstance(criteria, tuple) and isinstance(criteria, collections.Iterable)): filter = np.zeros(array.shape[0], dtype=np.bool) for criterion in criteria: filter = np.logical_or(filter, _filter_column(array, col, criterion)) return filter if isinstance(criteria, tuple): if len(criteria) != 2: raise ValueError("An interval must have 2 values") if criteria[0] is None: return array[col] <= criteria[1] if criteria[1] is None: return array[col] >= criteria[0] filter = array[col] <= criteria[1] return np.logical_and(filter, array[col] >= criteria[0]) return array[col] == criteria def _filter_columns(array, filters, combination='and'): """ Return indices of recarray entries that match criteria. Parameters ---------- array: numpy array with columns Array in which data will be filtered filters: list of criteria See _filter_column combination: string, optional String describing the combination operator. Possible values are "and" and "or". """ if combination == 'and': fcomb = np.logical_and mask = np.ones(array.shape[0], dtype=np.bool) elif combination == 'or': fcomb = np.logical_or mask = np.zeros(array.shape[0], dtype=np.bool) else: raise ValueError('Combination mode not known: %s' % combination) for column in filters: mask = fcomb(mask, _filter_column(array, column, filters[column])) return mask def _fetch_file(url, data_dir, resume=True, overwrite=False, md5sum=None, username=None, password=None, handlers=[], verbose=1): """Load requested file, downloading it if needed or requested. Parameters ---------- url: string Contains the url of the file to be downloaded. data_dir: string Path of the data directory. Used for data storage in the specified location. resume: bool, optional If true, try to resume partially downloaded files overwrite: bool, optional If true and file already exists, delete it. md5sum: string, optional MD5 sum of the file. Checked if download of the file is required username: string, optional Username used for basic HTTP authentication password: string, optional Password used for basic HTTP authentication handlers: list of BaseHandler, optional urllib handlers passed to urllib.request.build_opener. Used by advanced users to customize request handling. verbose: int, optional verbosity level (0 means no message). Returns ------- files: string Absolute path of downloaded file. Notes ----- If, for any reason, the download procedure fails, all downloaded files are removed. """ # Determine data path if not os.path.exists(data_dir): os.makedirs(data_dir) # Determine filename using URL parse = _urllib.parse.urlparse(url) file_name = os.path.basename(parse.path) if file_name == '': file_name = md5_hash(parse.path) temp_file_name = file_name + ".part" full_name = os.path.join(data_dir, file_name) temp_full_name = os.path.join(data_dir, temp_file_name) if os.path.exists(full_name): if overwrite: os.remove(full_name) else: return full_name if os.path.exists(temp_full_name): if overwrite: os.remove(temp_full_name) t0 = time.time() local_file = None initial_size = 0 try: # Download data url_opener = _urllib.request.build_opener(*handlers) request = _urllib.request.Request(url) request.add_header('Connection', 'Keep-Alive') if username is not None and password is not None: if not url.startswith('https'): raise ValueError( 'Authentication was requested on a non secured URL (%s).' 'Request has been blocked for security reasons.' % url) # Note: HTTPBasicAuthHandler is not fitted here because it relies # on the fact that the server will return a 401 error with proper # www-authentication header, which is not the case of most # servers. encoded_auth = base64.b64encode( (username + ':' + password).encode()) request.add_header(b'Authorization', b'Basic ' + encoded_auth) if verbose > 0: displayed_url = url.split('?')[0] if verbose == 1 else url print('Downloading data from %s ...' % displayed_url) if resume and os.path.exists(temp_full_name): # Download has been interrupted, we try to resume it. local_file_size = os.path.getsize(temp_full_name) # If the file exists, then only download the remainder request.add_header("Range", "bytes=%s-" % (local_file_size)) try: data = url_opener.open(request) content_range = data.info().get('Content-Range') if (content_range is None or not content_range.startswith( 'bytes %s-' % local_file_size)): raise IOError('Server does not support resuming') except Exception: # A wide number of errors can be raised here. HTTPError, # URLError... I prefer to catch them all and rerun without # resuming. if verbose > 0: print('Resuming failed, try to download the whole file.') return _fetch_file( url, data_dir, resume=False, overwrite=overwrite, md5sum=md5sum, username=username, password=password, handlers=handlers, verbose=verbose) local_file = open(temp_full_name, "ab") initial_size = local_file_size else: data = url_opener.open(request) local_file = open(temp_full_name, "wb") _chunk_read_(data, local_file, report_hook=(verbose > 0), initial_size=initial_size, verbose=verbose) # temp file must be closed prior to the move if not local_file.closed: local_file.close() shutil.move(temp_full_name, full_name) dt = time.time() - t0 if verbose > 0: # Complete the reporting hook sys.stderr.write(' ...done. ({0:.0f} seconds, {1:.0f} min)\n' .format(dt, dt // 60)) except (_urllib.error.HTTPError, _urllib.error.URLError) as e: if 'Error while fetching' not in str(e): # For some odd reason, the error message gets doubled up # (possibly from the re-raise), so only add extra info # if it's not already there. e.reason = ("%s| Error while fetching file %s; " "dataset fetching aborted." % ( str(e.reason), file_name)) raise finally: if local_file is not None: if not local_file.closed: local_file.close() if md5sum is not None: if (_md5_sum_file(full_name) != md5sum): raise ValueError("File %s checksum verification has failed." " Dataset fetching aborted." % local_file) return full_name def _get_dataset_descr(ds_name): module_path = os.path.dirname(os.path.abspath(__file__)) fname = ds_name try: with open(os.path.join(module_path, 'description', fname + '.rst'))\ as rst_file: descr = rst_file.read() except IOError: descr = '' if descr == '': print("Warning: Could not find dataset description.") return descr def movetree(src, dst): """Move an entire tree to another directory. Any existing file is overwritten""" names = os.listdir(src) # Create destination dir if it does not exist if not os.path.exists(dst): os.makedirs(dst) errors = [] for name in names: srcname = os.path.join(src, name) dstname = os.path.join(dst, name) try: if os.path.isdir(srcname) and os.path.isdir(dstname): movetree(srcname, dstname) os.rmdir(srcname) else: shutil.move(srcname, dstname) except (IOError, os.error) as why: errors.append((srcname, dstname, str(why))) # catch the Error from the recursive movetree so that we can # continue with other files except Exception as err: errors.extend(err.args[0]) if errors: raise Exception(errors) def _fetch_files(data_dir, files, resume=True, mock=False, verbose=1): """Load requested dataset, downloading it if needed or requested. This function retrieves files from the hard drive or download them from the given urls. Note to developpers: All the files will be first downloaded in a sandbox and, if everything goes well, they will be moved into the folder of the dataset. This prevents corrupting previously downloaded data. In case of a big dataset, do not hesitate to make several calls if needed. Parameters ---------- data_dir: string Path of the data directory. Used for data storage in a specified location. files: list of (string, string, dict) List of files and their corresponding url with dictionary that contains options regarding the files. Eg. (file_path, url, opt). If a file_path is not found in data_dir, as in data_dir/file_path the download will be immediately cancelled and any downloaded files will be deleted. Options supported are: * 'move' if renaming the file or moving it to a subfolder is needed * 'uncompress' to indicate that the file is an archive * 'md5sum' to check the md5 sum of the file * 'overwrite' if the file should be re-downloaded even if it exists resume: bool, optional If true, try resuming download if possible mock: boolean, optional If true, create empty files if the file cannot be downloaded. Test use only. verbose: int, optional verbosity level (0 means no message). Returns ------- files: list of string Absolute paths of downloaded files on disk """ # There are two working directories here: # - data_dir is the destination directory of the dataset # - temp_dir is a temporary directory dedicated to this fetching call. All # files that must be downloaded will be in this directory. If a corrupted # file is found, or a file is missing, this working directory will be # deleted. files = list(files) files_pickle = cPickle.dumps([(file_, url) for file_, url, _ in files]) files_md5 = hashlib.md5(files_pickle).hexdigest() temp_dir = os.path.join(data_dir, files_md5) # Create destination dir if not os.path.exists(data_dir): os.makedirs(data_dir) # Abortion flag, in case of error abort = None files_ = [] for file_, url, opts in files: # 3 possibilities: # - the file exists in data_dir, nothing to do. # - the file does not exists: we download it in temp_dir # - the file exists in temp_dir: this can happen if an archive has been # downloaded. There is nothing to do # Target file in the data_dir target_file = os.path.join(data_dir, file_) # Target file in temp dir temp_target_file = os.path.join(temp_dir, file_) # Whether to keep existing files overwrite = opts.get('overwrite', False) if (abort is None and (overwrite or (not os.path.exists(target_file) and not os.path.exists(temp_target_file)))): # We may be in a global read-only repository. If so, we cannot # download files. if not os.access(data_dir, os.W_OK): raise ValueError('Dataset files are missing but dataset' ' repository is read-only. Contact your data' ' administrator to solve the problem') if not os.path.exists(temp_dir): os.mkdir(temp_dir) md5sum = opts.get('md5sum', None) dl_file = _fetch_file(url, temp_dir, resume=resume, verbose=verbose, md5sum=md5sum, username=opts.get('username', None), password=opts.get('password', None), handlers=opts.get('handlers', []), overwrite=overwrite) if 'move' in opts: # XXX: here, move is supposed to be a dir, it can be a name move = os.path.join(temp_dir, opts['move']) move_dir = os.path.dirname(move) if not os.path.exists(move_dir): os.makedirs(move_dir) shutil.move(dl_file, move) dl_file = move if 'uncompress' in opts: try: if not mock or os.path.getsize(dl_file) != 0: _uncompress_file(dl_file, verbose=verbose) else: os.remove(dl_file) except Exception as e: abort = str(e) if (abort is None and not os.path.exists(target_file) and not os.path.exists(temp_target_file)): if not mock: warnings.warn('An error occured while fetching %s' % file_) abort = ("Dataset has been downloaded but requested file was " "not provided:\nURL: %s\n" "Target file: %s\nDownloaded: %s" % (url, target_file, dl_file)) else: if not os.path.exists(os.path.dirname(temp_target_file)): os.makedirs(os.path.dirname(temp_target_file)) open(temp_target_file, 'w').close() if abort is not None: if os.path.exists(temp_dir): shutil.rmtree(temp_dir) raise IOError('Fetching aborted: ' + abort) files_.append(target_file) # If needed, move files from temps directory to final directory. if os.path.exists(temp_dir): # XXX We could only moved the files requested # XXX Movetree can go wrong movetree(temp_dir, data_dir) shutil.rmtree(temp_dir) return files_ def _tree(path, pattern=None, dictionary=False): """ Return a directory tree under the form of a dictionaries and list Parameters: ----------- path: string Path browsed pattern: string, optional Pattern used to filter files (see fnmatch) dictionary: boolean, optional If True, the function will return a dict instead of a list """ files = [] dirs = [] if not dictionary else {} for file_ in os.listdir(path): file_path = os.path.join(path, file_) if os.path.isdir(file_path): if not dictionary: dirs.append((file_, _tree(file_path, pattern))) else: dirs[file_] = _tree(file_path, pattern) else: if pattern is None or fnmatch.fnmatch(file_, pattern): files.append(file_path) files = sorted(files) if not dictionary: return sorted(dirs) + files if len(dirs) == 0: return files if len(files) > 0: dirs['.'] = files return dirs PKH!nilearn/datasets/data/__init__.pyPKHЈQQ+nilearn/datasets/data/avg152T1_brain.nii.gz8~OͬMH"H("- %Ht!!-t Hw Wor9￾ssǽ˻S,mnqWn򘏂6EVfEaNp^Ӌ0cqG%ڥv>5Y-B!B!B!B!B!B!B!B!Bt Sih'ށ=Q%s3T;3#|f1L$X9GM?dHX9XgxtՁ3g!B!B!B!B!B!B!B!B!B߽z0"sTEwC0Iap-y8F&cpSӇn|9~|By o89|ۅ9S=Ow,Ϥd?ii0堌 NK~Q%j AP e?;Gt-5iW߭?Fzԣ 72<#LOMB!B!B!B!B!B!B!B!IC]Bv6{;QwCvT$zݏ ƻ0tmr+Wgz&A~ׂTM ?2bp~=NTgֿp' ׷AJt PkX @<]8X~w\=m N#:߆q0C6pub% [8M(#25_/Ix2B`oq'5_7 5̇0L%DBz$vòA>7pXD~@xᨾELU` 6K.jV ՠ^rU_ CR\7q8Q lGsx[G=>j%,D0U7Q:Jy[gѻgj1p}ΠY-Yԃag!xj,JPKae8\p/&_Ain ֑^gKQgM (Eۨ ƘSH2խBE莃{U ЛM+}lsֆ$\q态UAR/ U֛2(3U/gJ>s0cM: !nE*U+q%pG'G_&o1+j\ GcVAjzz!'uv5vrsq]soS/;@׍OT̿7.npP`Sݜ2ÔU)zb_֫?195urEKnG!3y2g0Kr͓@CG{ҁ})/oދp NvNc ۩{3QGBW`"~,!`G̀`24xZlyў!m5:dwBMU._!JXz/mrpWJ`>l,lW!1Уhu8oҵu+VfE&tں"7 t*?Ҟ}-;p~ ר7q?q}nI/?4Ȥv"zP_ӈ[t}Qx+eNm?}r8m*>"%Fڟ9 \v{U5;^0YTZi{UrvU麟E[8Y YAu]Hٽ);4>_9/Ng&Qv:_D?RnP<%MF;ΥuID wy Ƒ<s  3Q;R_S|sAb(Ht_mQ{8TzUj[!=|!B!B!B!B!B!B!B!x5 gpsy]Nׇfu2sfngc̓N0NR`[_Drs7[ZouI]=_(3L@31'(M_O>U+wŒP tj!tbO9(-\'}yȊ1DiäRc W(u9+qL&-?'+@(WCSQ&}?__\ЋԜJQ̈u egNsU.B#6\Ex_h4mx9=g5|2pu{a GY5B"8 d8Q ܉:#S3x:6"6KnkO4vʹ#*7ufh %1u4P`7rX bGk^J'i"$jKp7X&RJ3}#`*EW)0au/AW슫i}1O]+)D 7x0x>FT&S.D!DκgK)pNfh|ȗP7qޅј#{t) P2n/Yhhj6_`R@]8B`pmPNa;;Wz ѣ42Q M T LȆR E% b]N/b^w3C9hS!߇=߀AhziGԒЋ\b{M%Ur4 n*{H,J |l>^yRAFŪhQ uyg(c^̧CX Q zǔ0 0%$.= :%m6/ j>b_xzc+v耊x{eFpP_RUB7G砱O~A.2XJY=GxiC_ u5*ZpR(TڍE` 8 /P=i0T*d*q3ԵU] OFD{WXEABUmTX5OC8x, yyR>Ç j)'B!B!B!B!B!B!B!ulp5X|C^Sp,[%q ~-1*埜?')a&܅X cKInGQkKӨ}M t!z?ĝ9?q%.+we(ИO`OZDIJ@UN27c S*E+c/l9=n \K O7ظQF$Vnu{7B[s hﵡҰbqsn#*\)9= I3+BRhsq8`siRocp׏B ui.ݠx<&,FZ@0 w95eZ4MvW04䶪ta&{c*U0..j!%^['s>=(;U?s!do}:XG/ںU˘&'{cyz@= q͂:H߂Ԓ^fT{X 3p({&IE$n!BUT_Ə6]%;32|[Lǥ!IW6wD3LǗ,Oyrc324#3V[`,Cu9o;v@S9khǡOnnp|krJ*oc}M9tP^ve$>4EIt[.*lVƴX(ă8-=s8|XnB\E/r"NLp.ؔ f%!c2z8B bNY|VR y+ŜԈ۬i9OkW.؃N"pAp^JCmn">JWpoPrZI] sB#pS3^h޳fVz.ŽTrs_ɇ &\~𩑭]#'^O.ϻb` :Ѫ !dlEii^ǕyߜL"< ٌhh*%{AF7A *d3aS~I_? Ǎ7;< 1ȯs|3ZngktG^ֻUw}S<^fU ]sZȽN8#GȻ,BqEN"SZ:Fw{Op;.v(k\7Yp"^tb]^.2݈΁r][HU3>}G:Ԩ=SB!B!B!B!B!B!B!B*$F!PNF֯崾H59Yk4GEYj6qW nJg(?LV`<[ԏGp1&/85߀]981o掦șɣ$8 ܘj`,{ӡ7-ꁿ jlxE֍a9-;#.W_eyX z~֨w~:M ;\EIYbG8lj4peqh+}tw.Y'Z!oS"㚏,)ǰD ?~3MTZ|KpHjsPfCMcs4|,3aE| O)|}Vt=KR^8k@hɑ2AnEj+QS|6sFS(䁑8NيsڬX/ڹL8cOZa:#̈ ?7 m#v}M~Wmno𩜇>FXn}ne`k~kxh.٢!.̔7Kxc4RQ^ɨfb,l\،Nc~8P>nk3!PQh7Ix jQy^ɵ}D*~a砗 ߪkf~DMlv5# Kz@%3 Ehi]?eH) ug =o' =oyP^{vW һq;PB`=&nM)un2K*N"ƌڥǀrfqzWcФ~!ռ`~7SumǸ$V*!oT42;|}:]P1O-anOp,L)lLvUNh9;<Ћ\MݟkVo;/e_T`]-PiŇ;% 8弦g1i?nv)a6N7N5y2C:^DR-BU;y?72 LjsƄ^ i{@C#xxaAU}w&TFx9`^ld1tՁ[oow*9v7:'O8#7~O@07']緻*|GcGG\K>Js˪"~J'nPO,B!B!B!B!B!B!B!_P4T R3A`7柞|. 2:g?33AYȊ7 07j?_Q+XrEbۺ8] u 0]y`(8 b̄=3슖!Ho xnbnK yvPKڎiZnxz7T:۔NG2R1-ZnLCoNzZ_=]LhN wh1eax: qpci^j~6cy O@h !Jw jI]^b4pG^y5t 8 Pn8c+IY3MaS23KCP?U &Hma,MWh4<6=;BYbҘ \~rq%N<͇RGWԇa GaJė-g8p4 C|NE)9fT:\44qzEc `fW[&`;6smcgb(Pzi@yϔ妴~릑I(|{ev>6 u .j3z4_'Nyއ7!v'>ų6/p/4rfvntJVq=*ֹqp';LN1U \ѐolM(gv"E]8UysRn)6(u1z5A`{(@Ywb͵2e_i ɻh,.xTy%9^v|V4ĉT* s%6 xn7 .# R0} \'\ŻmEO e[ǘ;t{A(aח444PqӁak LFjo>@=NFxf[|Q|[goL3n`OlGszriJ2}O\c:KU{1_R\h&vՐo<_ :?NCMPJGM(sQ?JuLp^j%8܏KwoMskK 9.&8Z gx^ױ](r49:]ѧԃZJW7N*w;0]70såtZ;1گ~hݭq %!78FUXt^">B<[p^'YȱVn/{:ƴC?a9̫{qg_Կ~E=i%xYw #nxGqvsp3srCu?zi/cHz;c7KY~[zwK;;g{\nu0ɋ) E/*޻:X]M;IX}B=ļԍ*`,GDm>aGB6Q [s+n-[0 fB..*m;z]~?UA2ldxYu3cV t'0oS|G[h?C1ϡQBʃװZԖ SƎxA'NL>{RE EMi [@ACIV$)`@+H@_Rx ƭx-ŧ}"LJp'<w!ݠ <´3#yy4,9#Lm刺$MhO4\‰MZ3[2VKjB5ym$mH;(梩`RpeFt>0m φ/;1]6`OjϚe\^44v9",nhK\D&>|ۚ6Q?KoLhqÓv̷!y:[7SZ>N`o }qa->›x}L,S{|\/ ;HsO:8R$Ź;]9zsq>ka`4M-Tbvy"M>I )5M*\ qap?K{`zK&-T[+sAAu@G !/1%"#L.Ļsf<:Sfi䄔k (uzT '@=D{ NUۼfz'}Kڀa#$U1[Y""}M3O?peOsi,6ǹjFQ< 5dliMl:EKE!x9@}9h,n|y(<;pg"Cf;|DE^ETDJ)vEykWZ̸GP32E),534m&0=#>HKs1sf~?5J9>L*ӄc"(y#{⦔^a?҆S+z׎MkjW3SbJݵzǫ/|V;^?F0oJ2Ɏ,\y>*9:\d߰F=1 "-J`8RK5&bVq~הfƛjf.ա|YaX8Ou0Zj^LN]qݰφ+J z)Ai:w2MGF[4jzɿd~xRZ@> O] U~>mpw3" gc]Fblm쨽D :Yu߻&K+a+4 Vߨh:o})\گ|1M\ Zɯ}VT5%qF>Iͷ͛u |Ưaq%b]; Z u22J/ҟ۹TQ+5^͵8A=w]}VQ *cc<}p% mF4`0<ϻY*ǹYc|E3ᓡ`T0-(N=ó^?[[^ ęLu\73_jr;5~Kr5v|YP+g3Dv9uF0z[*lw-Stx0V]<],S񟊮q#ݫj"5T7x.'2փhXULT_ uZ8OQ̿^~BJvB!B!B!B!B!BJ8{rտ n]ow./^p-) u 9aJ~1, N~O W|Ϙ;BuVf e܀riԽbhT~PE |סWit~4ft?16i[i8lnVQbסK? U ..P]Ԝ5vPhFUIEs32$xsEzFAFirzw{/{Y^)으̝MB󬝙Lȗf)o }a#cVҫJ<ۮoɜt.&N|I˻"`rac}HL8jlH]q+ eIwF33IF22?CZAƬ,Л5[pb_Єx/7##֠%v*7<qU9ΥckȞHHERXP$a>d2  ec>*΁?7T#z|kqSnaDzMȨH+Y^HWZxTK##Mj+R9c+J [K3(r?XMO}]ɮ^d˳h62r6;2\-660Lxk<ǟp >Ɖ 59qތԊH::?$8ՎfUOٕ1y1MVFFJYj:sƵ3>Fhw/aHP,tԮ?~)S" LU:ϬTc=}1 q΢Ѧ9lxn$}'Iyihf]T` i^q}1_|c*R7IdxUIEوːS`qT6}C=a&akH3_T0r_rWWAeUQ9~\gdT)W 8AII?HzĞFg9nJ%p~3u*9ip?4«6pv.lbo=f!asuy3|/k)=x[Dz By ' RBѲ[l~}\ȟ:|7mM_%bvoB~l~Q[ g.Çs g;$Ƴꈚ{K!j'S˞9sE7QBc^)bs*ޥn̮ꔺ2J";?'Uj 0w; ۻ5Lw "Me1.W>A/=P_3c"gZ8 :/ގ::;ʦ\c۽Q#JO22Dx~xb{k}M;sʃyu ߠJԔZ]7\?.|;Յ=?v踿\&X rn {oZ:>KzTg0Rq\ڔᷰS ZoaNkh]õFc3ޙjcW [=80޶w xWqFW[nkA)74Y % ׏3;t Ջ*^sG)E~>?y!i6U>?Tuպ`MM yʧ)T|>S=n{%Ȧ̂Jp^T#_EC*N!;Tp[5W^3~UP-R'eӰ.PYW=ZWI`!+c6JzJ=q?NbSj>bZoce.&ǏRk:hp ;'H  ̚7럙Of/_/`o>Vڗ,s)Bce]nPFAH)y"VR,rz8%Y%ޖY ;sˮaazLQ@;mev>K dԭӌ|7ܓ,//ˣ,S$pwʗ{}+>{W:1 B/>=/w$Wos" 0N?.b70MQ1zcpPTDIZ#SIA}TFQqZGGD" ^1BDrQHysˇ:ɭb%19 ԟPI,*&B9YYC(ב[smpd 7+-%<#큢- |OW vB' HHhM ~ȗOE~܆RKN]'U$k`b.!JfRQ׺:D12>A>VҮ'<6T:Ԫ>B=AYҢE&^U,H" wfc#*%NjF*܌Oldܟ\TmkֆK^?ቅq)\vRu K/WRQ exǰ;.&{?gPPġ9KUt\ѹ~\Fx Jl K_0R< r64:ȑGnO!(ZU!Qu_TCU[tH$frZ5kvu23[ mDI6`/w\u+E֗3]5@B> J%XZVe/9CIt}U=u1Ն>`&O*ݩ tlyT6N)z.+ëyuEWGT.V>bxGQ#T!AӭtE=\SuK|\Խu;` 3Gp!R\7֫MtfKta7̢.N](K5}Bu3S!.74scv^3oͣQdz^gW{ z9xCEqvݾPT gQՕtw}]6>|PW~-U]Z,zRAX /iE R⪢n*MнүD~ -'4se̦G(UټBUL·G ?:n(e-uΙ*:P]SdTvG㋉u{5 7:笚n3gү}_Cm`8fd%J@8]#Sҙl([vM&7ޓ|s {w:I%@KTdY^%Iu.UFS#ʊ/pN'N6U& OQm ~~w|JNL PI9Cv/"0 kb,&jANxG䪦N ^ iop!j:٦GgM| /bjNMu%r?8;{әxϮ͝<;'O~ D6rZ[ݝ ab b<%=q8;D0Q'1Y2uwNOeFҽ␈3Dux #Rm\M:`LGVœ=sO݇yEs̒O׬&o"N31'R|,cLDJO4YgWǵvb#Na-Ow`#CIv`bd].ԙ>PkK>ԁOeLxٞ h9"J0L/_SD?~Sf2=B~i Kј≭3)H ^H1Er[E||T0&nkJ؟-/n ΪXkrIOH F d0_~^7˟ƺg=bV@H wi-&1 w:#6%엎V,+<6ɝ<[Fu9`xeUބ l%Ѧurp pxo r:7}g ,ŧ4Q'` b4Qٽ fi{2B`\pv8+?aWʿzbϊ8S (Lw+^^F qxaX)xvv[s@|{o]{%[Ɣ1K5v%6{Qpv99bo~fE?Gv'-q T6Q YX,A'imfEPAK7ݥ s)H ܚNg;F(2GM0K_ʻo{/&")3-Zc*#Wa1_usn-ƷMs+0gcLczXAāwAFY'T"/kp֣\ŬХp8:nqXMT:@Ѯ|s̿~lwӰK?Q{c?"3vHM5cUЙe7un`}y^.b!4܎OY ? 8j4|Hwm!Cwh Kj2p|C&<6oguF^g݇N7g=TUY$wvDvPOYel4{dճ؉]'mEh߰)ӍGl3bWֿkŝNk;N7ʎhiUvgߴk5\_}iw˜Jw>xiofi־䞱!B"D!B"D!B"D!B"D!B"t,s۾wnNYW^gv5^JlSE9AD?DXx]qS$\L6~k1[/A$x]LA:܇D]EB4DFaqCY1X q55C".'CcPe:DHyA>oYEԔӊ ,QIY!/xObZOS]+J )3+DYXquir̯.*3T[$KEpI~/p` #ފ[S-'e*&}r8`?1@,D?=ԒSy@Wt-Yǒbuڌ!jlmXEX ZLձnRQe:=-&"%qG|7T5<.DPzDtJ]^%h%%,O[[^ NvBIOcHMSd$&`ZKOЇay"f/KOedJ֙Y}dMH.@} d2Әbrb֤Da;{w?Fu'r:|r1D4KWwu nT T0dwDFweD#uH~G1C5&Vn [j_vbXKEK'Sujz+**v ?(.b @^&꽉鰱pv빜mgl%{ s|l7FAU2x)Fxf]s C&pM UOgp=U&&ds섻JlXcI{_6&A&+pLMq 9GVx ^oTG#zgF[|]Ϯm't+f9L]Vt%=Vo+ugU|1i5{p ǃ8a=K2jSQT1&j+=>N5v'$ֈJۉc/xJ)^a]H.["53V;vqZ쁸,oEi1#E 7[l&ZK|h!CIUhZt+\0zޯRW(a6< 20=h'X ?`&NW~v{=Fs2=NO4rU-8?BB1G" +UAި3zz74R1bAA -Q<28>cG7(m:yzWz3tX&>pS_%ߚh{7V58Ji,8p5h.]:FE11oxFӣWzޥ !2|po|9\pf!K76tKe41d~yM*.G1E \Wp?gNJVEefz>k4jFe,߉:;Ϫ%2by&c!iR/uz7+Sq<ApWZ""&~d,Qe^n>3&>2y}X8-&|n9\[>ΡAĢ$n0>=yu=yPUH%V/dFY` ޓMpœgŦr̥橛** 1=a{sJA2_9:+{nQO$7:E( Ŕ; %=IO4)YL"=_4Pop;A<fx~+*7Pӿ.ȷl!Jh,&mn[w!"Y_#^_N dlٟO̸-qxNbۜf'kFƺ&a +SkOϚH+*IƝM%uCv}Hj5͖jcNnp7fXId_e?=cY&t]a(}ly9dE=$.|vYpx6v)~h4On:rxھ(=!&c<8|M'e~uZ"~H^b 3nڦCDbHͷSb (s?Y|M÷SD_́E/iGe8ʚ vS@-QTFV24AT.2j/ݐJtMK'!ž>ĴYk|D7BBWGUZ5qQ./kzM餪ȷ b7gۿk`N )au0U@~TdTYJa3^Zcյ2F{$!:& :X{;\ٗv# Yz&:J8W4urAWg:U%knM {詚®quͩEgCٌU>H,r=xV,#S,B{N^rZ#ӐgE 1,H_⍠3ӂ>쒓gDul,<+X¿rj2@ޝJ39}, |;$Ux@cW|#FCPp9IhS8@·њMv°)^s[GHۿID1Fd;mos%!w#GtfOW| r|Y7ЕI̟\,I95Yp'IPw]xcxU=oG1ҲlȞ Kv'gog>oZ򟰣V=s^w!B"D!B"D!B"D!B"D!B{|5|';h- &C)>ݩپ$%  ~WoqZx|*ҋ%p7|#:,n.#2q*˟`+?4wZdjDkp S!P(q7],,g014Ӽz,>$NO?cGh;}!B86WHGJ/{މ/;P/F7x }nJ`%l)ES =dY:/k$$'6?1Mhp1݆&Alo^nj#[^YWc^AM&LP&' .F7ri$Tu>6Q T0EfU,,xRvJn}#Q5d ]-^+/WS'WpYzNxg8o0CVtO]BuljSQ{L1Y E4`jB( j/L=Igxpo..F<[{JToVM.X3Z$J1ˢ5fZ*4RQuo{*﹞[* !gh Cc|bQs@f r$0T'Y&`]l$3F{&gdN^r>y;tK̃()ʈ!bN\.PI+L:Ltjsg JcsMxIV+^N_[ȼMO6z[z.3F?w*BI" Wp㱋؏U]|ETu}}xC_]Y/ &8 $ w, zyz^&NDh E7~g-*f;*~yh[5Ϩ ZeLFoM.b2Oua:'rj;X_C%z3"Kꁪ铺W۩i eqFӏ`h#_T;+fޒˡNR{-kG5@t)K'jYv>r3FW6G=U\C=g֪Z!6|/Y Zt \W/U\]L뜄nf2)0Ϳ_ت`WpgQ%u<>O'~ЇWeǶ֙@s^Hl$_ |ꭚ^WjB1)`<Ֆ6GySUEn|񎎥+`oy䄻Q]p;1Քtl)nٹ܀k䎪۪GW d S8HXa{OGN#qdnH]J6v[=v^v_le L;9ڎ,UP?W%GՐM0 Bgl&ڴ*~Hi\l g7za{š]ZکͲE&M5븘G<꾪lAN@}gə :[-\|lFVd\c)%8"oN:Bp 뚣œVTI^QP)R}4O+lD-(]Jg] "ž`#SW}Q|gU|%L'}[0 {AHb&*N5Bv7=EXRL y)-6avQGvcUQ[~// 7ijfs7xY,_2El* cDF78Tŗ[̈́V'Va#݇s"a|YL(DX b3EL'Ve9-GQFhJ_B,Թ>2plR9uKF_=&7t@֭fik戏e:[ %TBڍf l 䰇~jwNTk|@F#^{XT*'LgV`wKh( =',XK6O`em;?ڑ3_V/7V!I>WdLJ)`/ZK4gyO~tASٔż"5fװ:Q?OS:)vqVKj%ֈbu|ќ(lunb6hăLƶ8NeuYm$ya.l$oHo-L J@<-͇ ʴG$y\7?ns qE 5~~31R3[ D7gr ~nduzkyL,X{6䥠qXók$!^N|Y- K יHrts'^9!Tsܜ(&HG8"^]|IMnai6C8ad{Ȳl=p1'eXv#KM Yh,`W(QʙtF(w'Qu]ĬV\{ܰ%Ǔ XlAkq<؆32/.B3S*V7JǀAfCD`1Kl=oS_`O48CK}ͬ'%ThqDY4_G0ZrNؿ]TXdwc?{b`a¾"7? l]v7"D!B"D!B"D!B"D!B"euǹucܗ~ ukw}+eDJ&0j(lpEhu#8crQd'*1:/U~e6: 9!Ht^ޔ$s20Fy%YXOqmP9-M~6,sE@$~`.N }pacEŏ3/瑌\[4>G>,ZAS:.W>Va>@, >D$^wJŰxxNt7Ӛ/Pɾ*fn Q&<''^""+d}p!3!#HBx-hfjʺT;NNݼLjjZ/Z,(󳝠TvCi1|$'ZAcZIx,lFh&=L!)GQ^Ll<[9)6=N&DaE$5L4",1)ȧ!!e|t$<O辶ڦ2z/hR!oagD/~„깊SNnkI6DcaqRLrݲ gͫm,7a .I:j4S4_od=(,1<' گUT=K3WQOTK ۃdׇAġ;FU9R2xؚ뾺z)x xmX3Ye @>NB5WaW4{ zW\ŕu'x‡AGZ9[aQMV$}MDg<]@1٣.`2 x^ETBT_;#***3PF,]ЪJvj$Ɖ;OkdUxf+K{tU@՝tt}HH<_`9SK5S-UB3cNA]aW'A'DZۦ&n D2O鱉U|M30ήSVF+10GNJ֫'j'UUt+I@Hb?ȋl7u` ᣨ_cS.Fm4Zjz, _7T˰ܶħTN&VST1LW{TM=AwShdz655^⃩PnJV\rTIuNRNy۬AG1/|uT;TULU,sϹHALRI0-ղv}u@V r0Jo=9)/(*1UVRh(_&Ώ!6|ϱ;fDyW[i+ݣ jH1-T97yfw ޞvJ\|3\%2)5-RjYnn7ezpB#䓞6Lw}yԮWdo9nIDU=Zn^k-S4μݼ A&k<2̑e-y[SU\f(Ճ&:#vgd2,GOH3)ftfB~rRRR*3]G5Og}QtT1UsȨşnV߰Ч3ЃT@%䷲lK_kZ/#}كbNJ'=Ji*:]VmtF/P Nk~EQ)Wd~&@Yv`mXSg=r]eEy!^#b|\k׉cf.)_ ׿ɪîOr<{tCy$%2P Aj16a"┨-$2U ۪N5 L=M s&G7yv_^4fRLc׹sK1~G.g0OH` HZi&KE`?C_@ _(mut=((*'jlZ-CMB~3y=7[8ֆx5޿fc'Zl]_mca|)NtԺiN@&wGKDb>ȧv &)EuVٝtL?vnQ:e8X}N/e~ |~5~%qcfwt|YAuAIGXrL\NU4S=;S6ұI5{M{`+{?ȋ%լ]C79;dž!`TO7.5(%8<: ~%m!kbhkiY*Y**gUvl4ʋKtYLL!_ 7>=lxD6 S 閴acJzZ-穵Wt?jo@&k+yt>5h^4oC~>gف [e }W0E,x*(7~",:- tt%,&‡q8n:Y8BdqDP:i/3YvI~r.FRMiax'灅7aS]xqQ ʮ2 2ގ)C⹸_;v(m l ǫҕ뱨hbvtZx/fV{G~G|5z5N:|,dT򚫸n9 ƆXZ  #G xwԭٲ}nuugqm^Xegb^r#ٳMȞb ,rl++S1bY;CHэ?s29s9H䠣p֑߃oTMYCw~m*ӷbq*b'*NOn73R s&+T K:a&򃼁R| 1J`)ȿkK,PbfrLy"=dWrK[qG1Ż8HtǜMeGev\~jV}i5sZ 8 ". lO%r-P چk:3fN&n35B;i&ܘK| ZiWPInRhfП?vdu*f,< pl]49ݽn%;-K.ANp dΛܙp8tWkUWթB!B!B!B!B!B!B!B!B}Y@w;{YXeW1T"o};J z8RGz̿VG9 a! 0dXUJQ qFx'g[*.CCx aM|d@4l/!5 ,_"ؚrR9A/%S* /Տj >޴a6|%aJjMd)kHIrj3 Dgyj&,_/` O #Wh'"R|6pIDc1M.TsCݰ>vg Q7jKaLL XMDE1EU)G ߢ/(Ά}\)^[aXd_`tNuf* Gf0Z픫TSա'X* +0NHkTT!'#؝_C!nwHY<[XচPAK_wpr@Ob 8EIC' fVH p= Gy,2]6Ws! =a?OHx BHt yZ x2KVd\T|q& ,xKU'N/w3F/m4m;T'yk۞/%$8,X:>x)T P=̃ n^y^/^,>y܁ci8ib׸zEOKx?{`,~My $tM1xK}ҫ5xu^H4ڪ\q;<VY24,٫xzyB]NOCvQNܫUҺ|ίq- ?Xe֪iUo7kyQ SY^{Gz g+r!pW xq:~_vz+OT pQ3BIa*7-ZQ֧ 'QWe #c8Ǯ[T4qxS7*Q8FR/t0|mx(!;:b;ԄF6c|7w^k/\УQ! T5pn}TţԐSU&yc9W6܏' :7qr8~!eTvSO2a%*n}{hW_꩜9UvC2uP˜"(8ڲV r_D”(,ʪbe>|/"Ԃ؉:R)^7z{=롧:i/OtR+dJ^cyuY+ԃ:Q?gv*x ɂT吪bHTC*{LJOh=:y1zn_sK.h%fUWU\޴ٍِqrU2~^dM z)cSSl{]y(yu_׹M8^o;-mfM /֛ZPMRUewUvYV͆Mp 7B|CLH%45Z6..H3}M v@6|zN}e⺘v׹ MTK"f?6 *>k9o /G V7Ç&T oOt NmjwIt9OԱz--ި~Xب FQ j gv9[/`*F7Z.ަt./wdiOiw}^ 6Lo ѹL%˚p M4*z.U#LXL4icxٽإ Xt ;͊u[aIcvvF/'7< 1iCW{={?yjzFg*d5TT6uH5/(5]YdAi)0ngH@'bNҨ"~Ri{3ԙn 0<ٜ v_AXzJy5ǘ|Y}#?X[Ihau>6 &'jx%V39R}k\酳a2$jBqEfd4n/Hu\kde8=ƫtnmtU.Igy1,`c43J)9%D4|nÂ5A&(OG?wND]?Tʦ/y7߉p/?z%»4| \I-Mp;knXf5nFՎR%TGSOXQr⶿O6R $7| [(UD.g˞jES[Gb+g77WFUTYzʯ`QQdNZQtyꤛDQK:n7{j?,о7^svߓcvw"zSʬNj%ey^w0CqZnb+ަX[.pȑ.[QQjON6w}Y D qCɒsmp}Πk`F;BW nbj8emQЎo`WoZ8/o;Y>L'jș3ِ`/-_BV^ k>]cU#_P1 qV%cbMnlX˄$+][Lw;-KZVB;)r$cj\"ijRzbזNGqX$)W\k:r3C4}YVG 'Z%kc;Vig+dKLh^q`I/ 7ir1eWQ1q&?[uETBCtGoˍ'E?qLR-ܙng/u"YrFe\-TK5⽈K[zgxR,ګ^ ޯjbζ*7._$r8Ed[q@Sʩ0tr;z_&ST\Q 䧺w">3ۺeV(7grF8MeOFxnm\a8G5T٧/ 2\̿9t* E9WCHWNs[ݲVS+yˁk FSY|K_8W٪z3 }u;ENHM!#?'ƐEP1p OiY)evn5-/oU4>wlP!5֧]K j:@ueܢʡ[JSt=~Los6:̴ 8:ՇϘ4ƺN_d8%G_8.CejI$1ߧiXKQgއU}͝z颓ZT64UGӍB C\(B?L8W <]ЅunOq UZ50cg(!pCg,i(vU+c3́/!.{V8"|";W!mԞO=x@/Xip+4 %j!Mq `xbo;)&Gr'Sk:ԈsJ>M1ThdTEOX6+ULoa/\b8YFԉ:4 F- bDy`"uR12уuZf_֙u4}GraE(#z-}T!B_H،q%iּ8>d~NO1 &Cq=fy3hWC/kxuF.ݡQBեfzjNzk׻xs zK-@vRk\ӽ$g:oכ_{}\ދQ[u/dk̡o8nK4~MdDtt"oHHSM#_8;U QʉtwF/2L9 =7E]> 4L傕j XP{K\I }G4>EZ+ 1hU4Sc[>̝K^M/wOn|A-, 3Ni8Q78/p:1Ơcb'r|H埩FD)E₤K K=e.LV#<|Y$k{(: Zx3r"m# 3߯ꈕbI6 B*A?`Am.gjn'>*}HӠ*\Ts2éͣ$*X2>FL /4fZ!:qbD::gGXu=/_wՑ{zk 3vaaiWƇ8$Ư,z{aZײWV!'u: 0p.T]l0tC^oN<ma>,0^pTS\MsL6ߤfASQuoBU5OqG L椐gT XtTTmx[pMJ։]A)qe٩DE5Y#oEPLaTy5q4Q-MM#|GDӱd0ARҫRezޭ{z37R?1bQPw7 K6|:7ɻo.d\O5LnGemʦr>k >öp[0qZܒ}a4^ioϵǍ@~VB_߿jyj.9z^q}7YTqLRjngHNϥS+SЃcrv;NU*6u9.n&hq>ИA:̏Mū)BdK=Nݔ_\}T_a>.(.~X@_3<:n2/P;ӱ7zc Xz~xU", iww 2E1N:8 iTTXtYE)0Zj8F$½vʻ2;lGe QV*9 Przl ơkd:G8cxǰ04N* t-kFK7h|Q7MV,=%xN7Ve'hͧkr)cJ frK_)} Տ&.wQy> .q:S;7P?cY$D`yjL*D~ZJ8.4Q#<V[g3^cxLW1;1)%3xFs/Fzsj=Je_k-D:78wpb9Oqf|Q7~W8񑼯eNWv ZM6x"γ:|_!ڷq=2Yd(:~k8ODK)K Cꭩ\1>b6@T6CEM}-:YR&{ȯYctF] g _m_V(K]$jt멝">su9??ISU'AVd}roWH9Ň?!sEL彣wy3Ė1I6"O~ikE`[`u-*C5A\2V-yކ3c}T᎗%9 }dYTVWi -b cd O`y`}^b+e _vj(1JS !$v`>nGAVVOΐUmK,f3e9JKTL"Nb:יּ9}m*P:jow&؎uY$3%nV"1tPFpwRYxس*‘GyxuM, M>(q3FQܤB߅ {?n; ^AX.*.n:wtjDFvDyn:QZTѤ~k׋C^9\ʖv|:8ݥ9-MiuU*+$^ڠ/v`N 퍏CURUB֑bhoh]Bt M䲳9nQ猝Lܗ,9R$sO+1b(/|cr:0 ^k~y; jYCt 7fe^|9Tw6Tq}UuX H"Üq7ėw<:yKp;Τ2KzS9OLn?7?`5,QAD'z9_jE-LB=QE)vk2DMLUCN❵Pn:y\ƌK'{C~ ",ҢFݑ9%w;̺ESi_ {}@$V/桑֫kB-JUtfZZv' ObO+ t[rolv:Ȅ_@@}%I1*BB]ty?SUT܉9\Ud'OS%,w5X!3HvyVm2x{S/{mj$|$֋y:$y~7˻Xȉb*.\Y8.x٨  FTK;p#N؉y( ]"T3VP-XR5j vrUK[Cd6uC0hB?c  S%~5vQ؈͡4$쀗AauKp?PXoV̮] mUSVm4 FP+Zg T̃N8:夢`"\Ua(Qrx&+٭\P+Ub RU>q#̅冿:*99 Kԅu7=_/i5=80IuB" ^B*˫y'^T料z 'y,jڢ@1ϱu\kFϣT0lVX'(Sh-TX|VэI<7BdUG4WvuφG)dLIݠnܑra${H; ]h1y#i%d}<^y a:'n\1ݭ$P }cޘ-xq V‡:?r;V:]؂9qlj^G0dQY*=g2;2b%E`kt:ǧ=M Crȉ`jRNT;dT{JqIoE6WXCUlp?f{ż:9o2ob;NCFN8^o_+cނIiPTCNirr;灃!M,V܁؀^i/YW83]~;VUIq^Hm$Njڴޘ{QTo5 3d@LO{-IakշרhXüA^'/7Gx0sQFLKɬr>U zxIUgA|8';`|馔MSHTs7RFV =}xISFuOӍ9i+5tΫ'k/qKiiTbͳLuUJphXOfwVW$Fԧ[j{ȝ?U$Q) )zav }a R 9éȷ:d_Wևt=+sNC/oN_oKq-¦BV׻tuk `/5DXV{PY}LӋ,'Y ki7ϏVzG<ug֣T]P/DJ养 *'O~Y2 15m /LGO"\Ri2壶!.צ yLޖa>?ˎ*KƘ^ uFN{mt'<?4oE R{OLӁꛮ|6$R]NOs<bPeZ73oUDrzbBet:gzNPШ벜*/wsf:*jr2?1Y%BW 7]b~a\ryX_U\W%#"shfw:upkqJ#O%7䱼7{e=njİ+Z~Gf}l +Vd:KQ9L.,1OKVAN >@h&M&mk4fs[wQnHz 5}tnt;хa5p锩W,gZNV衚 {nqH*sYcAF:"ב?+P.cw_@n`:1ڐU`H5Qŷ]{[<I$F3y |O8j%<(㞜*8bJw &2\X_khQԹrVuIKuV߫PxijC-d*#&ꔓQX!3mn ՗>ӛp(rT\Q/d +R#mЯz%(Ϊ/x6TqEErML(8օ@ӑ[Y"q>wd1GlN>^-sxZ%Pd@U>b~!XM'SM}z7J+ N&R: x&V{[k*ic"%L\⎺%(7.bQUtx$9E(5SGt{M[87u/~ISy4b9=bet1`=W l7WT7[a;L^QQYଡ଼ +t.o&KJOdAV\9룬>DmD_jzuJ{f_}0 X9Ws|N? +QUF|+hDlPWJ n.}g+CYAzIdSHmx%'r{Zbc:9[p}$JRtŽ&y| 3qrGi#,0TLE{}hWb|i9nR݉Vh޷ wփ;`gu3#+= [}֫ Ӟޮqzs5|DL~EK)k;U ➥SeuqĺnpO[kD!=N/$OQE,rQDe]ޫ||,J;|ά;TU:y󂧂NUo9Op:sc{i`ے5ZK9-yww|F9TKc& 6 {5te+U˽Kd'x^EX>n$ /&d\ٛH"އkG{?)xbkr`SI< 96=2rbH"|DMlt_{֛1窘'8>ubXQt3w#3m~B.nꬴR:~?PAo:oTv&{?7w-[Ja< NlO2sª"~%a1-xGuPoj1ȞlE'"3;?0Ҟ(F [s`g'i\J[<+Bd_pSn Zj|FPgdC'h_U5Vݮ 7s.|u)#O~F}{jO$>fB#23EGXZee:RX`|xq6B!B!B!B!B!B!B!#8"pi_EK?-*%| ܑ~NLV xj<&Բwߏ',-p rr^@wpNYu W0Od;`&l߹q|^K0pj.WU4P؁>/dV`:-re$f8 A|u`URui x W5J1ebB*v~>ަ|x}b+wp3lF&PDlyjbY!'綴 ӢmCaV50mJ9YGS#āxx kEzDa,W0HLT<tb ުd*33,m6|^T*(q8Ƣܜ_:&BŠZΤK+s&G1ĒPV}]LՒkw06cȯԞQAuSdOdE''dSXz(:Jy%r5F0GVHQ|P =B<{<8 ^s*a~O4n8 5Vt/n~oS~ˇߡWN0605X KUɣt  BF>%$\ տr)) whg!5ܟK>i(1 H9y-ÃX3Wa_ ;`x|Fؠf-VEsZQK 5aڂ]3@lWbKqlWxx a5 @^} ϦzNUJ?YEE1ʤB Bp:@.[: ݣm(UGe5$OqC0Jtlİ\4e dʸ@?9^^Pijnl?W&שݸ\d)$8`w\}hw}u6jƽnʑj+x.92^TUMvNpW]3 `~z*)j*Tz8s>pOW =S`>m GG#W -A)a=#) njzNs1U^v:0̃-Y~4j5L-*J$QYdS&ǝpCVW.dx_:lYWJztmy7R@9=\Gg30Pn$QU: -p$鲲laVvab6U]݀[KudkøZUkqwww@pw%www]'_Η;wn$y{U]]]QH6yW:/US5vQ#Ֆ=*uZjJnGzd6·b&T*ZXTRs%oOEF4FWoh.w#ɫmTesh[5T$oR^E-/y)8vae=/b1f~"P;ET$LB\2\.'sTTfVohoWܛluC3S4 c>At="ɚlk%q$m 7E ,Yy+m~lA$iJ!#\/bDw,ztɼ/Pk4{^%d0r쉝:Tأ݅{;OLݴICd ׏>ֲ|*6Y~fr3 mvzO٩lȿcX=#1$3g{vM湱[TjE!}&zK֯0MMo)TWx&^ՙNhFAs6yqG(V94-k.1fǘRHGL[rʫfWZ={T C.b:' =j&_Tᬱ5fkkE%_k純EFxY**aVԫ @}!@k{v[փ@{1h&ݼl #XP S?^lJ;C.5X+^Bo<}zvUWlw[GFsw:0 6¬ԈUkuB ^QVy?Yawtw8SoU{/eZ=/g/dNOgUoR=Wc$h?8`4fa"g V J=T8ʩUTmUWWyh3|`9%sO{wJV+\jrg(/')y[ ZZht>ghb{#%<}Vv69̷F3{h]ո`&wN9 \@FkкTF5BuU+~UEjƗVmkYɱqZih~tgo`v/h|8 zƸ_} |˝VT+d:%U*US+ 2Ĭyk V9is;Cg>c)~Ъ#v# 'D=g N k-"N;?X7~}59fZ=ȟQꢼ(h4%κ}/d1Rh}2(m q6kbES{Xkx6%Fz\9tB43pUgm_żMY*Y[+2j!KV=!Rt愝5f\b T֎X=i>chQn|,/&2OѠI4# g)rT#w㼎κ**(ڑS9'46>E4x^h/v@{ҍL^Q>nl1i /7]=%]p R, ܎},MT ]PuT9b;jyك/ߚǗK$&p]n.vտUP֢ OTy>o@4Ҥ3Uwl"7meqr2ƿ#jFB{kȵ0-Kyhɱ~Bݒ) <.$L‘4Xux wtJO=4&ًE@(/΄dKYEYO&ϨV(Hڿю+*7!|j׶") GV|9[`EyLԢK^YrQ4t6U3׳0C.TQVʊtY,w.#XU-Jhׅ_^L;ѡx o~7r >tXWZ[*`zWfoq*ӻ;c?Ws'>v`m+a34K|G{L<>w bK_"9[ug^z7Yv_)4&qKZA5pZzv%h}rm$&Lj-8̩)xdt1:rK1+3XխHC6lX- NCCd=,i #ayS']+Қyr3:\f܆V7fJ4U&`и`|FIbN6 p'\Ǟnn?mp8z#?ZT[k5=>޹{o|A9Z':a"51/<:)^Z| -?AW^RAWTN?.hn[S|jb' slq,BrFݻj"&G f -  .~\ͣ"']7idh~0q%>&TnK*V>[WCq n')N%)E-<2X6WH#ph+҉-|#{娖6 ۹/dLDmV 땳FE'6_*aYk"@2ѓd vcFR56ݝ8U25 D~Eu(\@ Trr ɿwFgVwsЎFhȾt ]jX:^ E3!ʢ;DwucT{qd[:ݡ86khlh[j0+XNǩ$ fnށa*y*vʩbM , 9?Hh ?7Zu59f+߱;[FW;?fw*}mv )1Y'ױ]{?C};93}??~¶e3w1s\3?ȷ?*爈wd#SxvJlJb'ٮ~41nR"(u+yWggײ| WPֈ~oy]/OR ~}g>sbQ LWS/jDq4s|Wn+J+l'S9ts~ρ,T2+޺f^ zQ0+R~}aOi(oq*YL%Uk#~DWmfFBcY\$~'8Li*0La1'T| cCMe-uPmPpQ jsDW?< 8wF{_d" ؋?8B؅)i5%cYIW,]-zYd b&wGoa6Ou!LPUMatB?b\ x?pI*LE=ţjdKSPW-u_}+Cu6=d9 l3),g6&J.R< cڀ`ջSVA0fxv#L k2ʣjtjA| EU t]ԉQ|1+-a47A* p އl b~>PZNr1z}i 54>Bk0TymȂM:|X'd0)mVb)T}8b)RS+Ca(#$K=uq0oxO`2Ɣ=H/zP 8{7Tۉg(bo8넠4*& Z"܌)HqNٸ WstQKjqB_Eu!l5v`g,P7DG{6\G&;Paѷ#~_Bq\7t?4*%\C5řUg^\bx %ũ݃w|?"@ j77I" E梨A\Sj*,xý^WV񮲳کfj|M).į:< p0Qe!0; Ñ+w٣XQ%b^ S~FRJyӽ4>JyU9u#o]\:*|!:}Ɠb /pv'pm&[ \ ":.2yfzz#ZRPIFKhIdvى`g-P Sc عp:W(;Z"6$4Vw\&T 'UL* QT:LSuqR]RiUbyKONb`.2(p N12G/`h͒UlQ2ry87dɑjVB&Ra9BL4a_LEn='q8!Zg>Cp q(,=uT>a|9U{TYԁ<:zZͣ )!bVOɰgW0Ƌl.n48Ou]4I(Ctΐg$9ZF"uVUGlH$S!{[0FhtZ| lja"*$ELFXA({IqSY=k^\gpTѽ=ꩽZF*&MZlT|vYM攔C9(w<_ƾqX}fpz9Zj}iU]'PDUcon95WfT[UY/+j܄x3L|}dar{j<ŭX2/Ei Nul+w{FiZ'v{y,1 !}ݨ0'f2\R=2YOiE:ŖsKe' Y^}jmwv:} InTwYhHȃp^ =*]Њ#1ﯣ˝B5o6TCrɗ nw}ͤo5,M܁:kuӺ/?/wǮwDAy8K( (V`,aMiS2)eGiłB[h!f2뤝MHl4\|TrwY9>v78lX$8֤Zg:%A^ї:*B,#haJ*Do1_c;ezu7=NQDB~]p޻YEg,UiM q=%kK2c涁_)΀گ߅}<ډsv{u;sӟߓY^fWwi~'%{pQ< {lYS#N8=O$֋qe\]Ap|YFk;{}nrZ%{֝s:i8L|'O+f-lu@<."jf)1f$NN'[>d۝-sSx>چ}̺b< ;bW3Xew>Xݻ< y$5T}u@]VyӋ_x>l0֮@p/l-#Yw牽ɹ&X =S!8밨 ~ 5D}nY_YWnQQ?⪥ow* Zsy*DO Du+XIOx3y*.] +^%d]F(/YJwmQ:8iTP+]FuRk Gʵj<)R#|N6 >rD.jgfoa>Ώ"?-pDS k񌬿Z +,+B*fgr̦޹aS?b ^k qh*=W2WkNNnnS>1UuU輸;)yCI'𣑀j"ZwͲz!ת%J%V,;|>Vպwv4VcTHPIe{o%^wg.&VG{uU: 20U/6A1O#!eN C9VGsUSՕO'YMyuLTedukrCf)%G֊Uƕ5{.RUo?9xg9yRaՏؑ#=a+6U\~'i-o[6RdQx̻B5*#4Ϋ/'#접XQWyV)wgS+:/krڬcmv엹˘ab["3Sjvzx%'r,¤+6gvv g# &Lz|]?WL5'B|;A+q;h\ ؼ. mep2At8./c%ֱ[@ߠްȰan0.X{J<#Z}0H&OX "KY'ĵO:ʹ)*..pwݟ#Ni͓MV:c/.lzwcu|a4cxV.l]+3pC{{^@>{h(ϫ< :̴FصbͦJ)'׬SƘmV*v 5ӳÁv8h,d]bv0ԋDmVr:抰c1ueЗr5N4Ȃy1+uּbq3r'꺛EwLƗXk_vfFgm)pQ},mըw}E)(>p/X/uHA;kY-rvW8iq`-w/@qCBAZgs.de94QE>j4=q"*cWX+/^iЍfgf15N7fTۭdWnkXJkV7{U tz1#""""""""""""""""""""""""""""""""""vG{i70jzs9,s?#uaWx=;Tmh'}v}]rO.ȆJnVj'6 ~4P{Bx 'xG/yw{[;":+(68bKJ'OlvR Me1 Cb3?zWS@S %Řq}pI#X ݠHkj)JGjT4K@?[m6p QtjE~}iBSb^J:R^`@(y*0/X%ܡ(O16Ƶ83!3x3uEۚ^7@1S,ķ"r0ja6=7c8Z4d%NJnA,8)<Ex"JA ZHEUPB!%\Ct1Zz~(ʆQ8?}vGKqzE i" 41xE#xcʯgG7qNîb-O:|/jZH4 2X2Ypv'{;]ؘVR+D3@stԂY*/s|M)FLrBf O!N@s@%>" N5bAjϩyRAa"1CuO){2JPݐa*CuMok܃ue_]*^.ү|F(&@QDJHFy<׬/:$K|p a>v,+S1 |r3ؓH)PjPR8,ʼnHOx]dxF-mG!kz,{qQ+yWRAUG.VZ*,)Pk)p0e9gO"3xC?Np˺"M5JK5_'j$oyH;ɾwK6XXɖ4mLNZ Ef%8/Z_Z's1sB{ҏAmd38O8u饘iD3;?ﲛtW"X[yE";"%bA`,7[*zw LL8m M;vN9RE wԟ2~lca-d9Q~砘'tAk4Y9 0@<ڳvp7f| Z'FQ5'ПSivo=P$jE/E#^𵢺plo]~]=Rͥ{ •Hl[8va:(**1]rŵOA޹QlHK hL#l[d - ݷ@Ƌ|(CZ@t#>YVio5ýVjba/ŮX"a&'J:J 8B/cHG;sȉ"ڼ(a'O}R*"HbKxGUSYD=QJ/z+ )]|0s˸+Fv3-ť+ȳTt2\')t37Xn7{uc7^4%Ĩ%UQ3Fmbs^uf# i`ؠ_sVCR:o!)VNPT=_Qw5U?jXKuĔtk8qNpp^w]Z(j[Z;ˣ"c],ޕ/]%fNC(kyTzUy3{w_#Tt.n_<A;#He)V=UOlz@{FyR/9Xƥp/Y(WENj<‡En/L F;dPVٽF&ZSw˰f7+弴p^Qmj̮ga_|#7r˿Pe$uVm/TS(~eMJe[ηG4Jlku:hRwJCfbk_3dCT47:e%ϮkmSkO|vS ;=M~y?!uR] Bd[<~SIWYD>ʹ[Q ~? {1{U^,ZFJT}YJ1STiմceﴪ#G0aYۅCVk>Vw#_ =+)? c"Q^uZQôY>1o tralYpaIyZ°ܪdp lumjy26Ts0!?EMBL[D#vheU}~*Y6b -O0,B1j]J1i-7[q6k=:kgspvs*\Ň>*쁩*]hs(ߐ@$&2"hޠv1D~'">`a,/{h9<[L͈1+f.3]v8 +PK^ U?EwF/h;ܡsˍ0 F:4c:9@O]wxV5UVʧ`%[m I}`".˲[sspc, >NR޷  o!7hկ!⼛N\'vwn Fx"8KT`PP8 ] >HٛM?rO1{IU?,82 2!>9Y~VkeE>̔~O4@J|?>`P8  Vn^ s#"`Z?~go$ UH[]?nsSo;u؊ͧrZM3oDž8sB]Zy#r"O.]:O=ٯ({CQ.8ɳY?tyWȰ J}4kMBB4'~7鲪g hJT]9 8Lσ*i+J7lu>vfN7)̣dC8X`P; 2V8sHFgP{V<s]GihQ8ΉckUn68 `[ub7)i(7_]|> U`}?>7f9'VMqܚbv\5:Xy ^0K' x}g bf_EH+1 9cן~ ~g5+[3=^9IVX43wh"5W^:wEQBp,1ihųD離`PvۧNcsҸ\kYSpq*QN~)zߘ9!OIfr/8Vxp}]N`K3<2,sî&d]2xwנoPAϻ{NBW􌉾b8$2C54M{Q)+㝍{)—fRqH{̟ nfVMhY@37}ռYdRt>ڈ&39Eys]klA#5WAXaFsXAa|Úz=1l=8oV$' <9aDZwƧb1+8ɿ}#-O`+k?vx_3xe pSn1{DžZ#c1/ZP [$^Ǭͦk]7F +9kܶxR:`QqW׬!Vxk.l-ָiB,wur~8-tFmMF;|=uXFc,yZ.c3K_=s상 A3Y\;vm|mu;]fyW^ˎ "&`O\d?rg<0 ut0쟜'ݑ[0( ISs9V5o.Q2U}L!~cQŸ%椄#l6Ri 5rzLsſg=+!6߆u )U$bˡ2>S͚?& 쒖^K!dOnS,;U;8KUuu[!O#*`sR‡qncnC~ӈ\_~YTfrxTeJ7Ĺq[̑QHvM';jkE\y~c E>ӿ{~kl=*ݨpc1/C~X~|?Fo|sQtf{ .bB~ p(p +0>>lyCl+;Eߧ!!L_{SFcYjFn\o##G8p#G8p#G8K*~eK;Nm6}p;I~FtVdf`֑%`X Vmfm|.nڶ7+'kAui2yꑕ %" dWXǷw2'&7T~Χ8<$ڲ,H?)o/o0 ţQ/s/t|D5BY< ;Y}koog5JĤߩ+ta L}s~N-vB6T ԁP|De9P>ITTXv*ϫ!SXYlA iQ9zC1v sb|k2SKBÎ{"YG.;e+Yrl+¾|vi> q) U5t.DSOq1ow$gsȁ7#ٲ!ʧ4:D6Mg!;cd۝?' ' 8ԛ1 טcA#^nG`uEjIIdOÉ\rYa#pčXFfC)N4QS^^gD"i 6bh sC_{}ٱ><*Ҳ.ךXq%` Ev\x,:b4 ca2䃑5֔Fmu& k =+=4# r|D:-!-k[Ȭ?Kl.fF42ݠH.} I1UN-'؋SmI/&%i(2jʊ2bΣvԜyJLr'qd"_b;,Zo&`Y٬~vaYSQ Z22oh+夬BrYii9 }bWj_3Vn#JEOZNdk_.8Ovʰ/o/7fN['֯v94%1LH5 osxʢ_JItB2N㫽$)2?7elhF2U,nobim2}eݝ7VYqj;v[xNG}J1fcb?N/1:~%#ʫ4SRug_Pgcj\iTg =1 73Tv=یς{റ_|վ*<Ũ&>18yalGӨѯA1sN=Tst*}\-1PuTԛ-5ssa3'QK @>op"I,&XǦ~ծEK1:FwB@I*g,X 0=uT%z_\fѣRUX k"˨*Z\wp P+T@e4O/3yxCW֒5p;EXJj2I88BxqhJ~QŔ[( ט^9a)x+-b9q+@&笂|ǃV}EWi.L, y!6_ 8֢ zYYf:0wcq㥏Abz_y/QM(Vr~"?'2vA0?V_mJ̸>w'RTGSݩ:.=y5Q$QAM11%%I&C#a@fG|3{:& ) ؆z9xqh͖*:,@e3ck >;cPQdix"p(.BE'B3s+L'hKBC>Y ~`a&sPJ]ER;˜r6*a<ɢa&x8r.dv_WV+)VCK9YEwE}ށLJpWZd?1$Cdv&]p԰)G^Z4\8jdd8#،mVC9o2HT 7;TݤSɁo(c/;M5y"fc<u"lޚD{ 5V\x M:f~ z!Y8IG(:2޷ȓz=2x G$MM&V~9],c#k{TDKޡj (nO.hs ?=ʩɛBsP@7L//]4[$lp>`W=/ZD*z^wZL[1 s>*֯;dj\d*T]A՞;ìP>ʈTl3/C2*s9\m5J_-"<:*F=~\>ZF됞sz/z:!l oKTj(Cntgnk}|f_&7zfjD]SXJݞn7 ߉b9| =w}hnAG&NOWo<x8?0ڨHSzS-Oi,%CR[qK)0=;ȟyE7XL\pwA>*ȼWlHǖ"KqIާvoR7O]]ֻtMs)ٗӪ$wAd G 6PGv{ wmcU׬NzJ)h!>mF{HL*>V۷yޮخ}gu:]@;O%>6.d7( 9E"1=]Jgؕ^gUb^+e,4 :gEi? VrZ#苪W3 twYnj>W?i̇&-cy_.ΟPգUSZܵBJ:DI$t7.|itl=h?/^{5鴺>NїѫOg5v1ȥg֮vWT낺)>aF < L3Άv׸ZL [`j-+@m8Ҹɑ u=7mS7#K;먨T:S}3~Źe溔NУVe|6*C5 3f֦k7.>:ج}XNT0_[e}/|`4>d~B^U[OfO;,!2>%E1$r=M'E-Y;>#rUȊe?A@gMj(]W:qQ0W@>|Sxkm$ꊮnAB^%iqM;_b_JLĩT^5q藸 =C}j;C8}|kB)9ީZw!=GDG Y <<}|+=缥|<ͽ=K"0y!0X %NĠ:ȑjFNy`*Ep 3.g661!746!r~lx@&Cz|&pmި[0Qt0k$:8 1ξ#ZG hg\&, q>=e~ k(M43'*o 66G_:e5dQ``a vlhh.nh7xޤxQdY=Mqem~a4п Mz%84;8)b"oǪptٯE:OMѼ̋LVTLF8a&qZ瑾",3˪_fuv apM`RXz[xLcs52>9x6Z:.z|+fcFC#}?L%}܎#"_J/e8vbЇu~JÓX~Dx߲VGS:ߝ؊F}gp8+0nsQYSD+i(Cҍ\QS:.ED2Y}. a;JÞuRRYT8)G*Q uSgnk.X0R H~H~|t6n5 S?QȪV,d{8{c/%vTy(/$w?( ?'+ړWݥ?,?_Adcj6!5y +ò؃aH !5{~S~w+b@./G8p#G8p#G8}cp^gK҉a:{N*'忽'@nj׷eNEg[+ҰJ,k|3{hߴv;#N}ynߴ˜!Kog;=P7a?L WVuhuWndž%oO\l8K]v:KDbNV6?a1&RԀRm I߼q/yoU%V([͎,:º;Q6V_,vXjJu :Z< M`BK#).- VA$Q7bT,6 rCJ(R8""BE19n0Y,5{=;םiau:+E8 ދ4j>#|'PFGN.h ~x$PD@!x66a7uw lOTgXR@6(7c |,5m [hafu/1Fym0r1yJ+nr-XgP$"m a,픔@rWQgb{F_|KDkݚsDQJcTKn*J*4~.pŒ?r'{l $;y >Yz瘵.jf=<ūG,OIw9[2,.NhWEx( GcEh?%eà#T.sMd uۛ|}uߴ쮵Zk_wn;iX9FrA61]!ȱtb>s0JU'8C&>b:HC9^휒Nkg4V5q_27'w7*m_uyCe*&lJRQ=LkRrjAi2k#6 @XI{=ծiǴ5'Whs'h[)$c:E_>C:d4[/%QC<"; ¹x `=zHfŽXS,Z`k7?^ceYۿAu}0;}ґFc/=eQQFV4=6E]Ia2$'ƑP3sup22z0L}Nfv5eIMN{v;u:YS'+[bvAJӥSSN9M54*D 8F]a:+J;h#Q`XMBWXt1D$)yW8LOK / ք;ʐ):n3(,Q4㝶*z^nzO bL|i£b}cG=.PX *U{:HtަƟb<,<\cEON6պjª, ;E-#:Prʦ UYKHuEƑq >W4K5X y/u1kgO1MJJ/fvAJ.*샻\VR9YOGK3%e=c>Bq9ߨz},!f&U5 4tS$Am# Ͽ#'@*|rQ]v]*\4Ay(|H+(*- EJH2EYŷTHWuۨL$fb!¼0Lh4)-)'Iz< ffxjjD&|b8F82ETRuPgC,fi%^BtP%&Uge2hԲBiU%TUC{wdY$W%H^ͧc^* S{#' WLoWA.?ʄ(MxSGtUCTDmzyZR7eeYꂬNSHo?13Jq TQ{_?6S/ꐯ[ˀڪ/R|XϣRqG /&ꈜꞺޭm `o[ͩcTTucSX +7ʨ-,`a>5z(ɿ()f㽞nkk.wǩ ڎtD]ӓt"Q[:=;)zk]lޣʋw3G?EΕ̆e0Nc?k5wm>AsWZQ4*~psJ*˸8\K]TJO4&ӭMO,έ:,tPP99Oc([6x2_E.xSY.w(eQRH & ,7km[ͥ|%a\@\r_ KuDЏχ;fq;漦T܇@M>̊iS@. ^wFԘ`[N'K*0t@ Mpg`[x_ QQ׬V2Aa'B^Ǡ'uB@f{ƙ7Z U^ybRIegV(`r`f9G.T;݁A. lZ>셔0O`p[[ǚnd i}`޴7F<a2,T_UC=^)Z.vzF*SyVfDq`NyhoŒL+>Nb}ΞYiczx.X}U|x I1bur" njfUWŇNx`8)v11nB7|?:2Ѕo+w5L}g;q`X'4y````O%$v'`4_ٝnt'WfN(R1,?aX]:FKyRbYa)*wK;Os]s<ӎbzmh4xjRjDJ/ O#jO[nMҰJkRcRTk@95FS]+0׭kC#Ep=vz'tʤ!tSF'@#]MIU>?RH\[Ϥ.tM&u3%ntLN!/TD[ 0̵ VA fV`lݷ[ 1F,\9d%B!xYEL"?/6Auj$oq*`rr{qʊIJ 4'jor*6 R=eEgcg|=w}B^NK\[E.\g[xQl/DuF",Jlԧ8.㵭yά6řꢛ@/p=O}p09*!G~;қ=E**yg3SzXQ S=Ii9)!V_%Sq8&FcpȎU }Nx$XX\;yiy͟KV)8;b5zniY:tœQlKKOkd;T{dz` gaG2b"{| -k~Z-:~/џ{VS>GHEO&HOT;E]n|k?tG8p#G8p#G8ۼX*^ +üx{97SԑI0v-4$I!wjj|"+^QKD>G*% ӻcq9ӬVN{Sr}DZm!2]fN[>B|]N'Uz[ke^~77!u 9i3 {†9`cl< )YeS=|1KW1 hj%.wD|\p,,|h_(h(-!Ltڲy:6C%*n<_hm2_7~5d}YXw>9cr;UX+6b|&_*BwI(= tR/xv#ȁ դʔa5 l&[oV]3w_QrȬ%kJ*NV ekYdIzDLх/0;ԇS%ԃg9'6239MKEvtN wr, ,B)l$棚,/XX xX12ԗZSr+<':6gu!vo^nf|dSy'*%$S[Mj:86x K9Ka 0| fݾx1筩;,e\y3ffu:qm"G1 ab%jybIĶx l#5rVhw"Y=Xʊ|d7nh2 `JF0Wa ޣp7#]"%E5c±o}f"_d ~g's':sY>K2l>_+Kэ#m&+eӜ lbx( 3| n, G+v*8AFtvQ>)Z ["ƥtnb*P)NqTJ>ǜCe f3[y&V8@7HBdsU@BKb-ZJVˊ~pd؜|ETFeW6mXlUFޢӘ8B4n385A%ph)EU)|銼͸WO<ү c͜1ixë$4c8b܉h ]dĮ4HVT𚨝@OTvCCG*.;$ ]`] GjzF` (+ iIpQ䖔`ay.Hu)GfZ{܏e!!'=pO4IVCtKvyijj&O xEխ ;x'։bҝ*ٺqs2)ݝݡ wiqw]~7/b4숮.KվkV̼0Fג&S"JG>f Ű+z?tUjaFyicY":b/ȼ2Xo#nU8'c|2 {XC)&~ bI@9.$åx21w3zrB7XfHlFGrcmWbG0h0LvB{'))G`>pu$WT@\3tV$R$73~^P_OUT+R[Z{|>Aa,gY-6*dN,EKqXMAPW" Y0,l~#f~gk4T݂j:{Up2~z{LTdcaheC,cWD=ݗ;v"';? 驮%d_W;ZQTuY~L;(1U#d47uRA*ixV[kx N[L82Nt.wdG#<,-]Frl=әmt@ G1EtG~&xCD-]/Z>vwq&Z->ȪnS~GQq8ب \z\sd2lP^E:1CD,9ɗk'c!2w?_KbcJIuyqܡj<"jt :JTK#t9GW:dg/f2^\mƅUЍ , - "R6'1Iq)`~/s> <~%*|,(4BƖD&qGfeYܲ^9TV vTsje+j'Bɇ8o 4ft%mtywX)/ tp5 &R2-e ?猍ExFׅ|\zg{u1 va =L?8FTZ3ƐY4QA$eRY@S9c8/]]<%s7SV? lu돔~5(>\sx 4KV^z-܄f̺+}FߥP'&⺾խ<<YOeRV'.d ^cY s[-꺬"6a;pr%k/V}V|??:{A./fu0%ܜBT]N5eBY .ZQT5rDd uLT &|jb$E3' B1ޠ*B/S1yPw,̿PόY[fmm~j=cg xmpӧw㧋Ʒ۰osZyYyTе` J>ЧH uzs.cZ/j-M f+8-o*N4H{^PJ/8d};rWp-ă{<4 Ok&vm ] EUBuuRpx`Z 91aa]wyT8۠s.8qR5Qvxk^f[!7d/8GA(XE ? ihUf7YVq[6,xx _SUCW V3$x+.S~(}泽O@ՃܫTw{KlꭔONnE/a;|bWeCee+zb-5p#qoy+lӡI>do u*S&yl?;A6319{/@rtF`́):􂛼6oI  >  >:VxcܾX?{E'[d7_0UͻMxCڥ&W`Qw_vڼ $1uaGҫ$ﶗ;;8,8=,,ר0wj$C29xXThղkkXwMbѩg nЊؿtGhkƳ#'Nq;Q]JA_ģ2d !3'HK^ȌJe%tD/f4ep6 ;&2nE%/q 1IKM ̮Z0F` Éz셏,%_bxZ~۞:>${uh;sY,YhjSY<71Y7qBS8+ HL*bR/^︛wu,'=a,UUaz$_;Mf!oW!Ymo#1r߅B!V2LUyAcOnI},6)uT+72[wJQP' ĢT'jN#I5˺X_(Z_RL5W9b K˳v!̬[n +Ava3!H'}ֳ@/ՇM|zA~˙9`!YŜ_+,q5P6 m/iNU+U>UM!P"jUT' {(=϶wϺ;cuX$d=Jy5XXx>f 7_*wHāAj_'u=oC Ӄ^*ea(CgoJNv.b(;>,rr {c贐Pl6 PPWLߊv/ 0ڋhC}p5kAGZ*&Tl>')6Џ* ~*ED!1^J0NJr_n].b.Ԁf[j(ںa5KIT4zY~juGVS6XTԤq =#~(x8ț0rF**\wVDZd58B?Vpq R+VdXQ Vu#KyD8;(d,W=^'T{d狍N.(+R*@?hy!+r4sxUՔÍR]"[T~]DŐ;:Y)0U諟\/b4 FK("v⤮b2 }<Όu-S4(ep[x{x&;rHG 6?3}E}sBFWZ!T CQ.|,C5ƾ,}:\%|'s{ !4{3~oahQ>[X)@.Zj{UKrv*mCw@Q{9}(AE\aT$Xvo~Oq6T:(&kF @O5~꾪 bvPOc+WzWmFtRMd1< aNBND Z\fo]bUˎ5bQw357HE s"+"x-kc7ͻWg2ƙUL*W^} w؏3:ѯ:-Si^d%Zk\`Y DTs:oe'S3EUgƋuݤ:ma;H&򦹵Vfʶbm´nZD<U>zwDL'([hw#O|UhtWəCew3}xGWrw2+~1,+UkQ 0[qnENBt3qO1XOտ{ |JRFHbՅ ~҅tсAan[j%s^1т <2vT,'-\fpl߫j g͟+Z'#CG)&NdbwbK6%j¯l:6 pmz V^N%;XyeF{|={|==lϴ 7b wc'nE{kӜaް g[{i%6<2L#΂G9wxg=zbL;ft.1&rjz&yֆXi>CI;ylBX]S֩dux qx"dJ#S߸z̗B? T')kŞ㌱4NR{ ;x,,,SebU[SU=D(:$7l_xPM<}%ǫl3˞gU6X 'HEua",$ʟd%Kf'P Hyi'习V;;(%Ŧ4 7f^GmXsgt|jOTgi"ҢxI#i1;@ow+'/|_̢xH[h.fJ:}M"g?:F'p0Dyq6qQS\&.Ͽ&QmDգ+H͇0ź8VdYV?mN.@sH鎨!7bh+ Oߨ0kp5>YyrNWډocfgHTaSg"얳)ͼlWe[/0xfޕu8l_Eћ"qqo:+X6n*c*4aTNn6Oޘð T|K!!Pl3Ɇ, 4֦Esq6໙d2 sM8hZ~v: AI(ǭ|l p˗1u=+7&46˩U3pYe`I!t|" 2aq`4q,>/kQ<>êԜS<}¾պ.SM-X!8E Oya~`C^bS? x8y #qC3x1z0βm|5tPi5ţ|3~Pr2p~?b.ZHs5pj/ \]) =}T|d>l@p̍a<\P`pмxCSsL{ovjWAIr{ zQ G&X"Ä2F9Wxa~9sfYd lo$4v(F|SI$D,38_8VMEqMǦ Q\ٸ /3qւEB Z!BU_If 33!Oj23?RzF}-襖FdyEGgPr3a*?Wى`e{۪>!2v~F"*MTp5z4ÌBalt 3\oX˼o  +** 3AEsUK {y/@?|' .jj '0whآ;4~f!> i+M0%pm4~Wcny"S9 %@-h EbS+5L,bqFw©KM8ʣMLDf5V̸ Bq43Zǒ@Lj$̫+B؁S}-^.>G0(k,m%gTc^u0Vmu\e:k1IomD\q^gYpm K.{$sL2o˨em} x~E-u` XHgiV4|0( ZYGmTEDsj aL G˜]~&NjYE2=+T[䑝{N\KF75U'aau]"S{ѝu q LwM,Zixo$5p.a+\ {tK?ʩL2Bn?7c\k} \Sȋ秉⢁a#SEMMqanb^Ay@TW1jx9W0Q?C>#Fx9ނ,fVQdgDsKU D%yTW>MIh tFֳņ;cNQYmuq{,)*P1"EP-W"fH!Bz:hԂ^`|hX* ]L6]i'~M=z {p]tHC1^GP| xirAi;hj;U'u8%Ry*vYey4K1XqMtLW$hU;f;UTUeS3SK7hX+n`}ﹷKwbpl! =7vpRr7P7U%HWT77J|eZp p@yEº \rK YSR F oT1+,TA;G' `2 =5YJ8/LdK}]:{W /UB7=tx %oY|%n1-݆h(Ѻ /a3agՂ+`|ħP{7Ľ^-ne ^ yF@m{8/La`(#KB|N}/SY#+wB >?3Fk|aq8n ӅEe6Y}*De[\VMdvQc_Q}z!Ujp` NM0;z< YҾ!sCʆyЯ灸,%n_&\)y8+6鹹SV`C!/%bgo:͢bk/Ħ2zǛyu 99! lM9d=_5F7!6zwԻ9#,׸i'r4 L}?WP ,^wս rک4aZ%|)˕á%2(5姛wۺt_Oǥ\X ^γTVЊ S ꢛ|zANK1fx~Oz2〙1O~[<$bj&[y^4- a'|_ya_ĦofOY8G~UVdCq6C`v$?r-_hv[}--!YIHAZMiq!^ kWS7ܠ̆R6߉Ͳuss./Ú8OӉD3ȅeqM h7¦l̶b>k>qɚ:uy»|ijOpα 8be))YO ygӊb<'4}0UxŃ9⩺) v[y}C9{}C;³X!HEd ]l}VS{'G=FT+}V_L 41򼘊݌AVGrĵIUqȳb*nms@6E4yi!Jcy>9bZ9pq} X*KeNId_ejl{U}ċy?Y:X` -9(.FT:\sqb a` ֮ɯe[1cfv* éI.yZq06Lcz/*He"J?DQDd:3~fgzYG/lXmxMP "7).rUL;/bQV.Ɩmp3 ;_QM. E]A1~-U7Zn%؁_}Obal@;ļܠ^n{^~H0:|;MBY𵮥K4P}#UџJ`-v w%l_̗'%nu6'檱eniFΣ:7K>%H~7cVN?R4N[+>;쒰ވ=2w뺳Tx]fSK+g:DNYxJtqoNEco;_<0s2nfT8l@['.0%`2 齾UPn#d\BaB}C#E2fI!f'cX/d9`;?y|uLP#(5Yr;UҖrރ.Ω\w= Fl"#N'U?(q'KMҩݩ,a򺈤*X qRd>>CvMyzu ٛ`Ü%(ҰvWQEZ ~d|$(v Wx2j{Vwmq‹l%rD܁٬&{ⴴE;bSwDww:cތp<z9SCGY:wAR0Mrbx{Yn{kURCqOgPxR<7恉.,@riqFRf7hH&VBUv.OD+<\ ee'(F`,H)+~Gu^ 8kq na/D6MԻVpn{Wm 2 Si+*U<2ZaJ:xo} 9c;O-Nm礽E40]TV? F fZzlzɻ3 n >PJ.S#ձ*N*q38|1 XToU=xˆ0FlFv"D"sP%U5tf^Y1Pk}vn}#٬WyuO v.9z^S=o;Y4Vd;/88mXUb ׅ?ߵԗ~⌃Dft_sxnXG= v@EnLu(\;{T R= V58W$[UUԿֲb9S)3~/z pDZaCzVkt`EAMkw8T׹T4+o]pB7֩z/5Ӊ1E3j;_o)3 ˄;Jߖ$~Y|orVv sTL6׏ j67p̬C/DZWzUl"_%G|,H!3v`XL%(c* !3Xr BmcP2K") XQ}O}={|={{"X`fde g24rc+6o{V.<[eUwbS%HB$Pq?y9;EDGO9sYlEqL]$,?):(jHY%JP'XUt-}DWTTy WYZ!(. WSUTKD)lĹf?Nj's-(A裺]tv7xљqK5PNIq5?`(q({Þ*QMQkP%U^IUtSE';VByC#SjEcQDF'2_:xTetFlsdCyK'XVNT).f$>huRU1GPSMPy@lո;; tX18**wQG i&p3ZUyuETtMz|rxMq*Ӝ07炟,+K0^rN=اi[Md`oEl FE,B>:*B]ITEI*qarޙj*5r9Zh|<,AUR|Z}#>cOT6)&>b."*sb'%VʩjZYN>v`%NEM.稾NSH-|2NYobcOJjtrAv>5dY6'ܪhxwv}'[2j<RM$6uIeDjw?H^7P"ORt\H5 Y-/W,?_#x#g͜dVksfj,~ q8& x]E^1b,_% uY ֪g@V#eqex?bO;x JN6h 6c Y\506rrdjgW@y?b'$ZR?2!PlXIJt<'w9<gcF%*QV]x{qQ7[R|૩VV;Zf9#.%l͓'x:ip=B3Y076sMzMF꓊PE`+ KZ28LfyqQ(6=qO1NR8Oq K #f,SELD'l*>&Kdt(Wً1Y(\$HI#/w4"3Nb!B ):Ӱd3#?u8x)Jԍ5ME? j'5"TRArȫ,UGJUZK7 ըi# HJ29@Fa۰ cBlLj4ڨ:0j3-1 f{f^E,ErKy-bjǏ_Xg'Yr"AUKEA좚#/-7lD-e^WH0|ZaoX?@<vj4͠zп2?̀yt2}8w#/Ej*v`תr^`#=\M%-> 濱$X%G.Y/9gIM 8!'}B4:bnTMLMtRF$wt}U0H脬wcJMdQTـq2>#O܁5BHD)v(1HSE&3|s_h/&| Wߚe_}BkPa5S$6>Sp1]f.Nx?DQ 7c ['ajQ: : aP9b԰de3zݦn^uS4 sNe>[C%D /7\yR)h(*cɤ*&Rũ+ATMܳ*Ȍxgp8(n*.wɸ18>WSoѹu{buퟝ"N$8`Exy/`ػՖ=s['V 2o~RSi!~35pW[@Y,kTsޙ)i m]hܽݡi)(Fww̝y3ͥY8kPNdf߷32ruBPcHEV]`òM;"=}k`K`{B+v:phй3<3^߶ʊCfhN(2z+|):8nYk9+0̠Ib|2Y̡f GU\g5LsV]Ibͪ:|gC:^ hN__?;>겋NQ.epڠ7r..03ϕ ӱ)><-|Z,5ox6t}n߷NW؟*7ܷ/~@}ɴ}N ]"V=Oݝ|3.Cȯcf#VݜZFTեݘk~2N*qygE+w#^!.['ҟ)͛RbH(|/+ڙCXjp@ 0#:$r'g^J/!p|ׯ3QEE~s2=~.7vQnZD<9N3(kes'&55U0ZB/#Dg\ o '̯˺ݷ/,k +>)d,({P‸2zۭˈmT>P1"+_ $zL,z!yyYVijʒEFuSN嬠Q;lNrSyW^%_*̟E>ɄtN7c WmnzGJ0Ma~v%23s@YgܤKh,r|pSTEa8VUޣn(P}ܣ^```.2F"#>"qv~J_1OV:wkw_zSll:mJ&ٓXmVƓ5BD*x%֋OblVjd4ÃP 4Ow_ .^w+;^'vjRɨ~r^ګ]pOkTl=a!;G0 d5YNn3/"H@ZN]ЬWo]xYVcF~V>$y^-^Pu3~=IJ^wM$p|l(P or70vo"+AYAhv9Kpt:n9x]Q'uW2Gx-i9zN*^_U݅refʱug)v T 8GTd#=~b2&cl%xBFowt?ms8ĎRΗ**'}&8Ǫ||IoRV`z ʉrj_ꕪxPAK6>β/:ĈdX9laQ|. ^ڟo+=uFE |t?9Jwg(^XPl-܃J!5,e^A^ +׺،a(a! nb5Qe[_dXhnwؔ#WG_뀕VfCq)n2o{He/`ˏ/U ZN jNuz1͸9,qﺡNYi4E7hWn.!*hG#V\zKOQ?W!`G37{?FᎾgalo*g˼ dYcJ0L2 ʊ0CM΃!?dYDžwtql04xL7west+4B? +,b_3qR x`Qb`H֣ʾLN%[f#pa1jx'탛ܔ!7,jB;c_-i7W=O e!r2c(K*Wll K9(O$8juI2irx%ڧK5IvYӿ޸qh luC%וX%X(,qRNx(0TYWTTFJgCl{"T&=X=#򒼥# b at;3t/8O.XռJ$"d,+۩q4cj:nf9o$A,։|rXiaq0mA] 檸;b&v*a;D.e+Ͳ\+TgäoW:J&!#_axjNBîo}9$EfMN뫷$|x!Ԟdq0j2,Sϋ4j%򔌫I"`ߗ ςU1.5>7hL->)&0O㤊#;$ KqO%⊭<,IGѽ[&+Y S<//6]|?^(3El16 ڰY w8fE;iVp%,-8ZD岩%(F-P$2NEGXVTfSYr8;q/Ta%hUͩgv|'6b|D#^ea.:<f6 @5 5/jԉt_D@L8ydyA_Vmg4%Ҹ0b/3x .SGT1EE-?gIY|g*ʨ<E9SKp[{V .pnФpб H:mEڑ]c`&\\|;ħBzn;2Xi2bx۰wC4{1|;2n(t},<-,%5̸~'7%wR,biCx|^F_ aye9S=PUlWtƲ)kX,t9D Ҙց$~f'$tvv6V9U=[-x8NeU:r>ZwrΫl0D, vVNOhHOְ~ONdsd"BuPDkꔪKbYXF]I [?h$5J [ʫ:$,=B'iMX{[9H:۝4$'!Z *'L[>S-SCu @1w50cZ'vd:>zDl|dä7k> Xsք͢^Ҟ#iqȢ[jQ w4< KJ 0oT;y5~6*G].Sa1hiD|#c8h~6`by9 yKifXs>Z-0'Xu>VƤlޟƒvSxiw~Ej!xi e>N+P+6ɩL&ҽ&Fp#mq&i]}N\͍KDn[j[:IMqkDbb'u[qw^ 0"\,|\B&FzGX cI\; t*Mo5'#B/˫""zj&/oA˳6Qlعc_b&b<T?ULSe5[PE~x| u e25ޟdGHJutrMQF#k$eP41Ov8K8D|7+"N 'G:J 1rr 4Yml3㔨ae!*D83 |r/3 ⹨0Q'{kg=]'W/yf2^Y',z*(&k'̡$W ( Sj&>^򦪺;hJtUU Y&T䑳imj|@g%Y]o'y+ΪɌki ZrzZ}:UL}vL6|U%z 'U];b)L:Y۩B-aIYvdY6^DЕXrܒT,[_9Xj`XC&y*)413#l1jrg".r|FV'=^I'g}.c`ѵf }iZA>6)n{(hG:[E]0Y0w*mlNK:\ ?3W4&S3C@|^.oQ3 wR'eVdK i;ĊE@r`ghw۳XA{YNg V :~`qKq+7}dg{ \=F]GDrIS~T<*m-bt5=4{HШJ!CQN70gj<'8u-L j&tg)2d!uC;{Fd%\ :Z$iJ'@s:_?Q_'__u1YAAx@'AmA2תz֐#.d4C*F>^ m$ע( .ϽN^a pXuVebc:Nso uȿ섳LYs T&',Y9s\~?aQjgq9#@|z]ytk|8 Bfʲ_BX?4wV[!a-a +ga0~AE܍p G ^r_Wüh!Tqȱ@Z B4t{ݻ&zռngM$R.yK~'T`sY{S?ʋab,+-uw@ݑ奔}`RԣܝnNow]##y9U˼~D{ K , A@/!j,j*@bN=CEXEt&[>qGi=[K|Bzy@ %xbޓsO<,d{yn*y70xuTbGY)&Yʇr"F+zD وO5AQrEW8ߋNu9>̰3 Qlڨͪ>nyw=f-!;pF^f% 9<&j1[0'򘢹8.6 ad}5 l <j/nz0^P{t"B 2bBùHzܑK;]{Or^oufgs6vbV\C3X ;ZNJn23}ys2Hqu@l^V"GRtǽ^sșa)e2thf}uü&nTS7P:G뼪&b09B)Ek\VkLdg7qOߡM*CR(|kyKD@pky^N~1+/{}V$Տum7U/ y850Շ%@(贰j*CDVqOeuȝ"S"\V属өNL!d^w;R[]حST'FŐ^FU4W4^T7Y\våVWp`G,%a/qe,9â!R"Ev}C쉹`^u d^B}L^JUGGyl̃#jXYL Iw pr:%; j%|2)ape,l QN3w"650/}`d@,-7\@Otnj rnj=Suv ~3o_:l8& L/oY< r>ku9{X46M[5>g$ܤ7XKac3 LRTFWf[E-d|? ?H1Ae:iHR&PRdNLr S pQ2UiO&,X^v4sJlDޚ^;EdsR䭹=a_ ^0f;U_zVz>9vG>FC4jǝ}MuLZӏaWԳ NιmrRFޖS^bNF˹"3G+vS+2'?)˩VB"h`6؂uۿ9mW?YhY|?FV=>GY[1KW @GW>3iW}j[l_J7kXOuRkCa)P<<*lVTTઽ 97%vyO~"47rl&fb5m]r]jBrb|'C/&xDAFp6y@SNgtXG}1CKu ,K +x5ȇQY8}BZ~(-"!>icN&q$/ 7|)CK<\ W}5̚9I㒫V6_Ѝeҙu_Otl=QEy8>XRAʗhƿ[:_*Goݝ愋YDo'QQxR<a<{DZX?; SԲxKBIc9?JD: ` hwFPUpމI {fCJp-"H#~Ǒ |4]6M겭8Ql3:0URmTeQ݌z!NvD\^sjepсY!6 =cC1%gdK~ͲPq d XZhi5"J*ަt<0ɱH1}drQv蘪 GY[v&aNQFQv3>|k1 $Nw)}¡,bXWWHw)" h!V ˰D<*OlXNEܥ\ uܴ/[Fp0z*-,6|Ϛ8I]|] {NN̯5].kɠ (dU6ϐ1瑼 XǤ;[[ωd=Hq02B1[WRU,wǫb3ǽ'Ovr4O 'Rw.*B 9–|Ǻnuz2وcyGx羽LKR.]@WQ@,ք ._ l*ީMޭ`j^J yТ?8/hF8UT`a%v'.fuGNcy|&P(E#ת &ʴ[0%nďrJma0,WHy=LB{9h竊ainzT>><fH dTY7V_6!s4n:"n@ְrڡ~s$uL}O&zv#ŌF/ayG-)0q54RO~ *N.`Qڰxj0^yCeXBy5ҦxWttpC{ǡ*ϊ^0Zwo%{񌜪YrOlˣ0h*fb.zQ^64&j9w.DQܧuaOs\nyUxKD-9T-TE|*7։r|/sx@YxNl%#^Qj%F;. ~@ܐc݌a+{yA1 HmV}}񫜋Fjbo^P>%Y!pZb(unLCͅ[6 [5xROqɿ(/>]D:_k Mf-ü 9h٦8VT 8a n_\J`Xk*Cm:n9̓;'U!$t@agꎈËc&>]$lPF_O5_y1}E O+"E \=6[( l jb [S1wD ^r?ٞЧN+.,m_ӻO68#M;8xכw'<y1$ c! h9@)Yщvp#|>%kXY[κ3N:%?Ȋr{aɵݤ@-au!)s:Hߔ VuH2dK.#㚫ުd&9KLǰ$W<[e{&Y CQs5gwcuQqX-+t=#2DRAy? /ibQZϝ wD;YT{l*_$JbD N.C1T<%gIc`w7^ǥH=;0ҬY;Wa 14dul/ϙLeIVϮHKv(?o;7f}o;7fBqC0߬oS :0}ϾCk>T hxwuhFV"xi`92:.wȕ$FggX9WmR翜x^87 *׋<ǽ˾> xJ7m؞dzߙI[mlxBYJ5G|"M6x_fYݰ Li"N*։cEPGE*ګzz$r I;6O05[5S5 \,vsDIC^{1ݲ>f:'tz^oa)-DBj*VsZ֑^ĞO⦘ xQ>*Lg=bQG}J|*?N!tbF~\/eUBvԼ"/LeYTNDeAkUFDp>%Dniύ ]Z|[}]hEh' l8lUIbym^sw|(Fꁪz fW6Ӛl.׼,O+/+*2)&5r*xllBݿ!,&(ͫž\r"j(Ϩ#f>9aLu<;/g=_@0ۗ8YxF\!bARǛ "f򽼤- 98G[& 'xg/ A"*&&b|5`lHJ~~Cc?b1R1pZq!óGqf,XJ'g(&}co xK.eef֥;`X iD|aeXLLѓݴn"r@&^Fus, MiV~3٘)>ܞKְXw?aSDW$˘{B"c\ǾqY ~bX-sG:Σ⩕R 7/!y0wz[ =mB{;}F؍7]^D%E?ކpbQҍ2(U`95FWe/{q0GqHw]'s~8ٻImi&cYi62Tan"%xVE[8 %GKgOV4O>@Ydii dwvgH Iu+EwB1"7ԩwh50D2Xl4[DF5]ykA{o$Ѵaw*+ {̒L333,b",҉qOĽ?^ iٜ[ FyN,R^rF|t$s.;d6n@y_ c^Jg-=KCCv]^QƲ jxƲY~t@.E[ds9IN^iTΧBKb')𙤅vGNQO KU1Ѹ>:bI_ģi6Y;(Uoq]|`ZF nfp#睩CAA91;ݩD7ڽH GRQBuS=TTj>4,`%ބ1[,HczYUX̳t&r>:P^uIBBZAmb4RӒdGtһmIp!ʊS*_v :C4X:&bD63p}]M0*kK'ЛtΙD{DN$B>z t< s*5i,V>ҺN( X'VV,^;Ѝ7XU T"3iMhWenBiP<#i@LohLH´!B:[/)r&Urne~Qu1;n 5#w*#<t|-h8:<ջv(ENsa mͤpIHy99VSt;'/8/1E;YrBJ,WKʧ`JbeiTF )nt:3h%F$JHʲzŪnalXkLB`0=K>VN7-w˄fUՓe&>2id&r̖B6(z^ZG*ЕVwzWirD:ڔvoHӐThϦ"XOG̼0T;XvP8r󒞁6+kIz,/f{Ijȵ،mÖCֻ,n`'} - uw!HDHVv߻IRvGvZ1# }0^Qi6*[ry3j)j* j򛆦4=HJL,Lܤ^0G0qhOd8Zt"gGN;Hk֯跽H:Ey=Ku S\WS"IZe;tTs_Tr%J=I|9LOM=UhZ:6klZn_I}@J+CC6K#Gל>Mϖcfjt=#\FSϬ63L>]_4|a=;p?Sb` yaS9U1< -UluY7b{] }LܿDr|"*R;!:nN^*c ;w0Mn!SbX`_'D+HM2sF"-* a8*C㑣^%s/ehOܢ1}H *jl-4" ZTo02#Plr߱}6#DGT}A{jv?;=ODUq3El5OeRQdkLyؘm[ Xу"(;-d=[fEy;+˻+T^u\Βdq~;Nvg| ȇدr{fzcl&?#-"1ş g//x>p#";ιp*?n\\EdƠp^M!/[gw|.lp?fyN-xΊ*v施~LS3Y4]4)e_m=7dNx*:|u\>og7$~K𜍾ng8$Co_TpzvG»nN#NZ0?mMVle+gɴ2 b$VQi%PJB [,ĩ)EP枸ȫrn+(-maCC[$ PInzU5{$4-j>YwVEd)g,1)N+RX|ұ,ΰ$KW$t]NOb.q|;S"&j́*N>V 'ǰ%^[ZsKRK9ߩ*oueZѓVV]eFш'Qdtt^8V8@1+"f=^Oe}9\oQ.SIBI`'f5CuY0gM2CuZ2YO>^xkn g )BLXMR,lMp)Wj2;vf-X\^]!ݼ/mN;id5Ce9Q\jFV}%I,l*D0[M|=Rx~7)l=e#^Q#("bpo(uFqFDv-ygWV7XYL 漙bZU KE%ـ3㊹3YNSd5DgA? HXI]`'g@G>ğ%KHWr?%oSx6'ljSҦjM/Y"O\߁׻Cu(\^ P -ltURgՃU/\~r;_ 2 CX-34m0?-'e2Ϋͺ6: ZJe7Zy eaW1Vf撁nmz*ʠ z(Yeqe"O+-7~9|3![_Dvn[(3"΢N Ml+=[fcN}>tt[ahGwۖT|rFc~Cn8%dۅ8l 5FJE& 塗eؼ%/ldI]4l$)bq#LwCfΑSNgY ߱t,T ZcoZ3vlj|6K0PR r1K;򐪣r(l"5p)Mr-Auc1Ӂ![Yr)$3Q>& 0,xC)sgmm'`%:xN:|<(ȝ8VbZɎrܙ*-8np7I-(yN5̝mWhB |5h]FWc>Xo`kޑOČ,2;JrYeUR\&9H4c t wאA8Ld4HoԲςI6ų95+ʰ,?7:0T,c ;y\ӺC-T%#B!LvbO긋UyNuUsQgbwoݬ,(J[i[ڬKn5Vh P^!: a,Ξ@'F9\hjk`bd.[usf*`n(LUg7x&rk-#Srt:~rƇИ)d6+qgD"5_C76ߥ{nuFbH4wN(vPFsB([{J0K]%3 >r6'ܚygSl-6s5f =8;@ۻgݯ4eziv83Hϒ5"MHiiK7f0}p}>VqeBt{ x|57ȤcU[H+ynI Vu*Km1CtbW [fx,lΏGa_}"lѠ=`U)TF0hR،U-=%$@/6^awSܼTgql$F|{nv+*޹YC>C\DET:I|U4w 'k&I%&:v.y.u{:uz]P5OUiyh?xQ :|A{=45eLWS$ݭ[z+xL>QrzMgPIFzy B^YjjPct1\mUY1~8Ef s=~ߓKяd^W;; Jx^Ғ BJ'>%/]Ǭӫ ^.8X TvllrD!䙭WTghcĻ5VudO 'ڇ22w>1Ue'#;HE֗Y EYj?KC\_68Ɠ%8i]VC;ثMѥUj{izlᛦ kMe, "xX-RL/=d ,HWA6Q)g+u?;ʺ|5Gp һt6e0 W^:و-q,H,ib r7~`OnTrf]IB닛x 9N2[wu5E\pMH4+, AxY$7a =CS1eN^A|zJ,ve4#QSU3^Cɍb.w~wsL7 2'*J#gMx X-Ve {C;el;CwB_%W7ľb<Tl8Ufarc ֈʈAK.kZY5Q (̦&!"jƇOxoh9FqebyGuO{#9 1HuV_l߼)ZНVtп{; j6HHN?6=8VDghlL̄հW%~WO-GC`%`kϧH`O[X Ƃf[XE\"6!9J= +iXJ,b(#ΒXb"1ܯfl?Kl{x?J~n){>Йe| ;6e;xIYXOb;?qPM睰 [CzveT /N,Kb F)3r||c`yG Tb2$".у΃آO:N mNҍu3PWM4<;wg1p:'e*5D<|DLuE-Wd1cu⃣!z.,gS'srxnbR͍B Ab#cf:{̺n󝖗ϸ5{,9QIYEJ!t_Qd{YIV|&p(r*)|Ś29 +,> qo`Yh G6Sy{VD1<pGV)8:[q;hDW SUy9w"č$kYFq@46 q/(wqb^Edgj3n`l#M}:&cϪbUM voO;o2SΣkx EЈ>v8Q~r#lʤǪ!,JayS^ujRGΊ.}vui~dzDy,o3|~t -A^m,)jqq36e&^'չ6:,}a OǢ_{s+"$84 fZn/uQ rNή趪"HϹQ :}dIixl9I5kVr!&`=$|MsCgwщJ@NxIS#T iB/cv %SuF)}"c-A}mvc2 C翥*䰌fh Heq*5W,j~ ]VghHȞbhC1108xKnu?)(7҂`sWˁP AX\jZr/-ĉXRS$ cT!럶.) H .QR$eT J0=LeHg1z,Y;qKt<՗/`+\Kl ol5]םEuY":F=%dq5KԴX TYf .:}Nh W 2"IN sĘd:-Kj>G[3z'ry y2qZLl+D:+K`qIkk$wM"#w+4 W LgBҀއ|ک.jE%9MVԼ-{ _ۗ9a*X겜lHuJbG.{MJC$KМP5+i-gQ(6FC-ڏDT.J6}Yz,m9_jv$HBZw]jU8m=t7| F}Vb;B$Ƈ*myM%P1T&ի%7zެP}`q){$LD|0=ܰn4R NTh c~b4j]pJjc-6۞J!uL}uYLd{6v5@ SϼJZ2(&'8$uapG3p!s2Rhro*(0LtobYFv}OIfb/;zI.9w[]qO\sO얿nI4b{b|7zT8E]Zط?"GVWBU![9]G3L/LCT]j"|~NigT4Ad曷&W`]9 tdZ2C @ _/E|Q"l#G26;泬.c~ڑTq뺥nپ9kV>F<:$ظX梿}Q^u.3Aŋ3t)3H$Q!?"ؘ'hAG#/ocq.E|Chy* |9fZ֒[ 1cگ{i~@:,9O,u[z,,G  *fIlDOk kj*{ȗ}QN!E+>.#T)XуjG\n 3[`/Ų(@Z~OSx]o3E qŪ@"( #O|!D)W[CY;>۟,-Ĵ2;F<|]y} 87Db;QO. t3sA*zzZ5ng ⢪҈|/"׊oD1c0udǖ߱)*96ӫ,ﮐA_̇U.>EGu bO7mqĥ`Rg)cmuJc,T3FNoU\eD%9渗r:&SA0l碱Vh05:65cvmF|9fKIL,jjV%W92 Tl/UuzC1l+b7 *K#k(#+/s".bG[E:(S2Eb˅cJʲ,v߁-&Vc/)$mh=G7Z&dbt.}Eα =pD*:b%֊-6nrx.&hQOxHUIeX7`3I ~mǂ8;qpeB—A@Q2>Xu&u_aw:!Buk4jYD'#)L͖Gq>+4决2Ea3%DRYi5YV1E=L}0 ^j?3ΞP\'"<&nA2bm:i6q>qMMq81Cxx'DmWO#"lnqn'`u\ YkF:Í?qH>x6B݉R:v#܉TkhK)쬲>qX&6Zݐ8.h"N*A+DZ+l\𧼰Z._C"b$cNzӲNlM/ifG2(=A0+G0-=bŨ6==jUu3YN {C(') {"+uGR%q3/xfl{;iA7Ovx9#@6t&6Yg:3Gaap.B,|?O RHvu[aсZd3U/-8QŽN 0E `ډTTUF&4{>9OmHnDCl2^ZgSɤ*^~iMgo{Et?c]7<Rc2 6"="8Aȇdx[' m3#>bC/LcLė`1X)j\R%_ŭòZ<(c;h7@(1dZ9Qr=p͹K[~PuQK+1*V`G6޽P@jJG ٰ_Q֟u*c9;(yP҉]E:rTKdoˌpy"?70;\Ws|Yi<Ú<,'I6z` ,pGZ0VIe-Y1~LCeW95hy?3~ᑪv{Ci &-4 -#U%"ylrۨJl74LJ* 6>ʱa;;b+YQ2eY<+7G/')YR~;wh`3i% &kɠ(mݬYbl&KG[8G\F `a|ŘSJ@0s洛6'j4ށ2dYE v8VQ-U ۓiSu|=$%NEkyR,U\RXFjq9JG{2hzl91]AgfE ESxluO86-py8Z7xeWUpmDr84=]xy=MWu sf[EV/q&<XX6,z|&?u[m9N6—N`BJ%T%UCW[tyuO`g2t!p=W#W? b:g#N_$m*!YO z:jȹa5]QG1tDNsLfs;ۜ| *o aQ"-N;Z'yzzUf)a*{<48/qcV栩y;?h8B3t(tS:CWqΑ# gXl<2ycz8^s46V'ɝ d=ȅ<3&:k1MmYR<ó& /r{ӝlX 4H-7['ohpdӉ~h}^Z/ x/taN[pҪIU\eRd#~誾6$ 6Mn4JWu~ۜ4K=3;U~R{14 fӔs.<#xý$7˔4MKҜu=@Q 5@+!ֵ jcK!^mw$1JUF o7]@fhKэJ.{ǽ}e[S]m m:.FIN]Y|BA6k|;'DUQSߞP{^,rД閶z"et8&%~lɡb"cj)Dxz4e x/QX0>0I~+2Ę |Cl,&妡֪l5C/fZ‰ͭJn^G[Ӎub".b%leoUo!<} w]W\+xgWLXgeBykH[qMY|6y5ŝ^&, XںIYE9s 󦩘f UWٮ + p76rmV25TJ/YV "QyVf,v.W$ӽgeauXo RO5DOpbuaU41iyDRgu]yOk0)\a}u ew@>VVRv=|'6ESfTE~Zg5EXQúD. e#D}mz ê٣W;8%uj]QNu^֑Lnj:"7agDmo7L01δ׏dNWZa++Uu]TI-7z$Řr5/2J#H4U%E$Te,Y̌33333h1[ ^zw^8ϒe?}fz">@@M]Ӝw;zzw0k` owWGBu.ʅ"AK#oZeE# WySXNWa(D<-$x:]&.Sb3< Ok@G ?z/w{N],<%ɻ| "XAaY%I.(p;Ɛ=pfvG6_!'UJQ(xQrplUh`9zc[ EQh)_l*WxDy)_8##ԋW%EvQ W',$/٪~Bi=hA9Gf0ҍtfSoVA*|:q !<==$6`(b<#+Po?A\L)WYP+G]UQ31UGeq,%0@h5CyKFW Z]NQ qװz%;~BN6Re''<Ω<`$>;ӛ-8 e~Qs X NK<Voe]~UEQq"zή;(a9K@Cyb?BL~Xy|",xaeyU1_#Jv[BTU)#4.n"b#YT00ot0; Z&H"`DYUp#B)N^"վ!]#h_e#zb:*2DaxZm0;ld'`l}Z!Ʈ<2̻`jSӱ'x 0gL2 DcEllTЅU}5XUV냛C)t'$Bt39O$%~D+D{c\\'^}7?D:,vzZ${t!+j5+UOy);&Dl޿ͪ8 ;1J-m?YIN[⑈]>/.no}#:E_*N2zb"AYX=bH:$GϾY" m>.b|մ-8D93E("Zu6w Ip*㝲2aćAV9oD;m|[I_w94 lHeʎt5+Dڔwl/!fd4Rq։3qC,>QUSVļpX4Jփt? Qڎ-Fa^`ӌ]q܊L9J@e A<6Ƥ$ PXo,ll U|8Ut\}SVuMd+>[bAH`eygg MEY)hSAlV)M c UjƁ:F̡]K(}&9IDz|9 RUZE2dbsɏt eI U HLz,#WaЫ@ng'r"ڲuyX9fΏ~, oJ&a?jlb .=&ʟ%LB ,I‚Ez>swѤK"d8C=xkEzo"5H*|| ͏Z҃DDNVMp CUTfiAǘ}| H1ڜr^97R)rtq- );=Y?QHDe}HOU:ɣBK^amX`_^g i@PCÙbϯ,8 -&(a<{S rGXvǽ:e H,_hA"WL68FY )F ;,LWV_#&;aX"bZl32;L~'=ɱ {hKIpە`iҟ"Ǎv t^}>rU@DF,GV Vegx"ج @b|Nΐdao)ؐ!7I2Gf0^c[d=F 9N&BFR֯GI371]ɼ+]Uw{5[gD9~}bY&v ٽr`-]LTP 3tF+ۄt|w%n0GncҀtud%MBKкbdI7h³Rz.(;Q@xɳ_8 =֕eΨ(][Qu3GQgS&[hl Xqg9j(yH|e`P#Tz9wtl3 U}Dodo6h E\-Eڬ'pY5M}32u]Ia&r?Ca`~p;̗="y iF l$wC""euMBST2 YfC,7i᳽Ziar譆{ٽ&ٯvQ-8pW{}|/ l! VagIYEVnhFcSVuWeȳA aIֈef񳼠ϛl^Zoys" dQLW~,3'MCsBҋuVŭV!O⳷{6SL+=5>\5ٴq7YO֕ (@{w_Gx_k'G1)%>MNGp,ԂV%]5`:zykw$3q\x&ƈ_vb,=_ƓPHV[Ll/߻b.f)?\ޫ j./С,;K[?:܍8;%U,˄E/ddn~Zg4ϋ֦n+Al1/9٫-Ҋ`t3L7WMx}>Bij\f-&Q]&x]7}-lw[5944b`` xb#t+^Ln:S1u V3>Ǻc?or֟1I蠾f~3uLt$K˂ֳ\:ⰻ8bvэꬳ t#nG'{i+cW`&)7f.CqWr?PTuUdW}Y3MTxLO=Ms:$P"8*4$Cl]txiQ6 }͹>myvUk@U8s̥j&7飿5e `|"U:n*r\!>vr&;I%q*3܁H|t0O7|".JDiKt3X5PMoY 1&d?8 e|C9kEXJ^!8dXR<V׭2a>"I >ĂIFa L?HR` bG2ڈ?HX/GZnp=ݞ`8T3@2V 1ȋw<>|\pu=o<629Xʾt 43aKb,šݲa>f yV,qS\|8'>iq` 렋H`K=Cx>=j oz+s*>J Oql!#A>Ɉ~HST]Mfbjm^昖3=Gig◾x>;MMu`R05B7 Y!9OPRaZXL.۹u$*p+ȍWmObUY^}RTdUֲxU?m(+z<, f;~Ic )tq\ kHow]2I~_ sqwڼFelyK0<91,+,rq>$* zNDP TViiڑaɾ@u Gh9de7cA є߀jdSe}Gfec[-r*_`O54 p5yFgkgo=ฬ(gg\=R$yIFr즥h/ן+䵛w:崢ho+vO Z.ea5@@|,aYmW"GOJR!5PJ=%vVK3q^vF#y*K{JҒ+EۓI/BEnMw7IN_p#S( !w 3d>(&@clhKJfS'mFBiP"1Ģc6Rձ\=\UU]Pj.0qh93x`Qi=;~e{ER_tNV%Ek {X濆-uP -^C9\J>3OA^YZlxxp'b=Y1`hYW,)'; ;dtUB$Cֿ[SX-6l7ޗy܅1`3{-hqa*D-EqN!5@}L~)ϪZA`Qڃmr),b$t; eUl+Y֨$"*Z4;=V4 giyk֊_W"}1IEgw"9>~!77&n<=:-hRvr Ƴ}]d[x^S;z &ǎpA{0jGȢ @`ajRw;]Ԇ: }zbL.`Y|nDe'dyXvxVGp/#8oq 2DY\֍ AQ]x.}9И9b5x)paY%|$Sw2VwDپj}G?H2>`)y3'|˗CD,[Όnw#P?BG`4腃TI)OǫZM=r/~s:kopECh{ڭiUAv0w%%huk?Sz< "2F3Jc{>%3iQZ̝hOO;B)X(5'4nvk;[BKXOR]b#zb]:C#amm6c]ȄfuC'9)xwyZ" U>eU s2-NKn&'ItB"]&TU!l&^|(˂RgU&Va9p_9ePz7ubr, Hag2e#, Jly#[U4>]A>hPwˊUBfy]fC I+RI .6 2oQZѬ _`+iSfN+~Ǡj yM7[ebрG (7k D'+]*2zFxcXyk{vG0:⯪i19yթU<+IfQB,7rDw5To-xuZO^_o&x5S~i1fF\n{^?/ќ0L4MTSX]Yfݯzsjd(1hjx< {;h{9k&zÿy^6o*֍,v֕'T:/e8E81Io&nRy5a_r7zT^FsQ%Py\VVt/h8w8ażÞema8Mp-^YҕCu1L5 |b_؈o6i\qDă8F4 JaCqguܡx Shz>OE%T}vI{6fIWʱyi2V8E>6em1bg^'l ;*4-v6oSӝdqȧ{aGeU-൬oy#R8Ў}=/DVFVj^A] XOw)pp#3RX:ܷ!֌{^ROPG00Na7-G"!-/PD wϳy'뺏%$ۙ7Pd2w¿{D jګT*/g,*|ӫw$l/_;T%o$דuƋnVawA{:^Bg-hbbfq?{,1uBe?3;,?lx#>R' Ms/A8JxiNNuU;7J,C iN?Y G;!o7"՛u5eGM[Io~R~ Ue}Z& ^p0 2۰Z@.Եu&ʒzmn_493ǜR#qHGeDM<(BfY  7 7 o ? Ox}ڤ<YAj΢0<1:c8eq }>nigta=GV|?rO됏yR:lQųa{IYen c 0#hMv:q=%z=LMuWޖ115W(WxЫ佷a9d7\c,Tt 3$\(n)q^bo饹=#~GS|yTsL5U_XCbC:4?=.0W7›<4=}{gL]KOcӭEOP&rI_Fǎ.[ƤRNel _"܆,ʦ9*ɷ?&K{zф" y@kzb:qLS lԷ)OgX%ΈrPOʆjؘU"g!qlc}x(,*vH+QvS~:dexH, 9^8`_ܿjSEWqP4XtY*]5W^V)>r'U#e>5K}v}E_T0;E{pp ~Ⱦ#^<Ϗjnr). },%wj겛,ǩjzo ~b?Txjl#ȅr,zV&)c+A$yM ю#HKʒx ;U,r ǶdL@+=BuVdvi1oň+Qʌ1aSX'ՅuZV }Yđ-q%;!q" Ó2qE)d%5Xҝuu?'Pgo]\ur*7jnl*'(_bkqeXxqK j0 ~!j:~WUEy~3Dt wo%]r,V#]I ,ם񠢌cЉGccD[8y~ѕ ^x_6gWUS|wE|4?{[*ʰ ߔuV=pV*|zl yփD 9Fєki;T'ngh}> `B8ê,,x"'|8r=OĎ̢&8LW zA"TsR7R(Kw*STqxϳzl!)Uf~: cGYTq=!|%u]f3ƽkY-leߣen*99H>Rzl,&O vh0H?G/ /LWSt8#cz  8Yc77BC텶“u]FUE*֝#P |#87I(\D8Oo?F )a xWL.X{XuZt 1P E@lڨ`De1 Y;YEBV^Ē^q?O0:K/pH^<&ȏXF+>J'Z$3.v%V yg#'4h,8Q.Y[TDJ%2di-WbA~_&יX[L*>G$4?]?rb?K̮%=.[Xr2tE#Xl5(A4M3u4ʃA1~Fe 6 EKg9hMylx{6Yt>@۳e=j.(ή0_բ70b )f93[zbX k O vjcdS@$7LO4dNXLbf"=GUVOvj_%TqƉ+*{:<9ir8/C'|y:ِP`K !ࡩ~hu R5`x:t*}X{!\/w~+8Tʪ23Yn֖ay%446rud!7!w?iHow{~h"G HX8 Y{Uz.A*òhw<}]o$Eh)7Gb(#$sթDH=8xB\f97=Hw2T}C'^q gl6]MDPBKJN muyMȴ ,ǷPB^3;X>|d?i*ZJZ~ MT?( oѷ$@d2 3* 䦜g[XVr_5Q lrѾcz:ԛkJfʋ<(pR'1-tf7k30*[f#V _˝oES[f9do+0CzrYzS*C* g|-[тalr[&w*rbR'~۹ X<ğe߾43E'Xm<֋I]^j㦽E,xMWSe91 xTq9+C|us=qR)>ݤ#\"ݫ~Jb<:a;}'a=xԿ h/ F` [Pr:&vc6;F{, XPb6- bH>ĩ?ܤY{;%SXZ۫3zeꤊe'_%PIŢH  cz ͷ<N4|;MG4 arfoo?gh-<87QTq44Q 9qoe[uGüok : D7q.,[ͼַ? g`,dXoRd/ma#O:OD'{ęF6(8LTR}Gq=EK#=#{GX]o RKg'=DƄ^0Ku\yoM놳p xbY-oӈ|=d*]YW9dS^ԇt1sXn#I1:5Fࡈe>Š~QH4E!JF8fkjhY^"ܲ2d 6cRCPPT0 v~R[fU'#yM^Nd_r_C/!"x&, MdxtR4jY2Gj\qe+aVϣZt^!W'̬J7G1d91N-Z'>RcYxI~ Xw-"D劓(橾fj:YM-:&`o]"\ֵv# eRrZ풨_qcYZ3_zai/$C8Rf%X/صl0;Hх$ Yi0]NoL7FV}U_S V :/ĸ2܌'E)gcp2-?GΩgN:.'g W yXd]$Hoi,z !CVz| EqYşn4JA|9uoiwdzDx7 JNmB] #r\l{:|9}5]dlFRVVK8*tS)]{L?1cys oc4D$,iڡNR Z#< q͡M9ݖ;eUyb2g`-֑dDQU;U*SeE0=AwAb/Ƭ iGv 9:mJDz TY&pXcQ$xa=V GmK05a/#ΐĖDTdz9Y,4*, ݢ> ڟ>!l[ώ#&U+2*86[4b&CE܈19wTTGEb ,ObXV%ҷTFUZF ٰ>]DiNd公KꝪD#qه842xvIXl*1SmZ}g-E ('aEx ɀ*n3':N 2Z4EY~R8X(v/Dz9u9L`LW;1a^v59E+.d bt|92"7bH WB#,XOV.8(& Z2Eh/S?šʊgYx7܎"(#CPm RdMw5O<$WYpMֱ|!Xsh UKg' K9:Nߊ@zw>i Ss'dP @dXP>ą%_Æ@i`'z"s!78#O%Lˋ XDP?@3+[KNNW&r^8 BQgʏGvV-T*^G YEq;o|橳nw;Eh匱NPO< uǵV6cgϋv /{"x}Iln+EJzoV6̩G[WD'eM3XqN.n Հ6| ulMfxxW64 Ng܇?d  c1LEkq `)Q8JbD0,Y(;L!hb¡42,*)Lդ"o&'u`IT(PpL,Dr'qJE#lO˼+:"c}Q 1Q42aʶktӗ( ɿeG&!ρ'tZEoQJW1,kͶ}`2g{X}VawmEox&Kɭ [ޖMB"XV-w>9nw8} 2vw= LVO(zc$qu`ͭLJ"8JYdf=& j.΀Ki]/ #|ZÁ޾V&[N=dOeV9gO{S_LƙP^ 񈚎i(:,-LĞ?"~p~_k鈅*Rl"=nbU۹y*^~8x9ʦXT72Hc[4TUJj F,Yd-,ffY #ffF|Ss;iM懺W\AUfpǩ#Nsh Dp%җ+rnv߫n=U^j.Sx,P-|*CXOJdqX" L7\5nMoWċ%.uy(&:~-^r{N2@^(5Q(ʺ{`Io=w]QQ/q[z˽6^'o)籧$= ѝx9, F]w7Vi[}҉3y[N8Ae}7|=Ux,bR6Ir{^s{tZNj8.I:;vxzZ^sziV/W:ZUU3je\W>ϣ LEiYS6y wjV{;S d==Bӎtc}Ջxq$}tZ,oRoW k1}QvұvlO;b%-&KMʛp}jKH v ۼ AM^P7EyNjԫy-׉_ZyWGqBz7^hPyzIzïx_SE&zu2^ګpSv {~zez8t<^%&^P^/, ]):'ыvs&nGTtO˶o4uȫwDMgv򭢌L׍t; Hz]MV\,rd_ j)B'<n;L\dH@pVWm^PS7Ã⾚{F( >~$VZdH+<-{YwTK-z$PTª/'|nNi*~a+kKC A{cl?ghLuUwlz8}ȷIw_$xg>f 41 zj!aX-Y>DPlmF |!-H}K*˧;Vlp$/,H 3q"=[}Ld8H~t4icIt'=L&vi9H iD x߾ɍ6>:M <ƅOoY~KD{=}ʝ|M;"^s{ߴ)NudAN|2S al`G*]386X: OAWX/x GQKSpf:vXI-o~?{|={|=7E@@8?>82݃1J78E_ =a#ie[񐎟DGQNOJp x8_( x`oԗd[RlH/}2B~7: GDlYHGƐU^~d">F[T&&B< dy ~^].0sVWE$,ah3HLu9n32?Ap$:=<:\`} vF>B+jr< ȴz$+;fy1wۢ4%iR8+mz)3ߟY]ٟgFW#v~fK!3CD.q-6>']HK-_Oj EbX#{ISa+ OXfd[ nwj/]EyȲHcbV2\%,rd'a4cw(&SD ib5X;dxS sD L Y5{j3В S&:z&srlfsqҿ~Pd\h-:+PXTl6\| pNv|Ί H"D*/y*NCjh"@p05]b$ԆoUSQuo JLwɓ@43c[]}Eվ+ MO>wUKukVma#㼿r<fgY?Z,Gb"NJWh1[ѱݻ:%@^SB3M <6,&xfM>X8E[%srax2'HWz`i7W/z6zh,h^.EFo5swY[ 0d۾v:'{ZPv\ wijIt(OZ:*$9:@=LGLKb aI`5f^Lu}]AڎkуDŽ,y F 7kyRT$XqM.Nb9+KjS Iy+"~zy-˱Gyt>h r9NlH;ڄϠCP0d ud bDQGT͌Vǡ4rT5qMTskQ-{pn彆ijG&X[VZ@aty͘faOwYVSUc\ޔ}RJB #ǁ"*SX( ~j$@Hh80O^ڀ8\@ڑgE ^>Gņ-̠F}ZF> 1gN&9iIzXehU#T'NuWIU\q$d6yG`.%83tFŷPN+aݟYgMD\j wFU L>3mI|c\B*d,.JP?*zN'=Fė=U$DcÃǂaP FJvIƲpˌh]h=ONS%Mznvχ,'[Dۓ16114*Ƌb1UHc<"pur^cl9wmq5[kiKL/|. QYT<=Y!]G5J>ffBM p.j;bu&|TsGēůl7 ~kkjUAK'5ތgt`UWWT[BO3&fjH })bk\q^dU>A/4<~'!+KSB'Z<)T)M|}APʺW !wЈ7 O*ήߨ*ʬ ?7_54edU㥇?J7qtPW<5VД4| < aO?]vsoANoW}edyK Y2wv]| QSd:/Rܸg~Gp2pD̓e%)Fڕ}Nߴc|'xޗ  o|Yn!&w}߰'\2qHxO@lS鮰U"IҐ I@?rzQaUC.!?8'璣|P2uYEҲ3>kctdqElta v_>YI ?kao9} :*xL>&GoƼ>7nZ[/ qA2v}ԷђlHz'sag3|7̃0 BAg[0ξ `9yޥ@MzκnUw"}3%e츁UVJt\e?_2QE;{ϨjJe ֑^$vF+lonCyom6."Y5I[:3^Q]x.rN#04 /Fb&lI>MX0b죚!2$ʢ Dd=IEgzA"`__ )CzcK0)D"ttQg)3cUQ3gs ϧ_"hiu,u`mB:.#-8;$'CmƐ8 =Bl2O׽KHm2oYUF0Y ivƙ煡&BB>Nd Zyi`4|yb^HDtz]TTaJ6rx?nG߀O Ix~9;8U!1`*h683/PW^߂ЀE~"JKuQPI5P?-6WEǙ !ʊ'JoToa솸љw%I» !9_5VmUHR9B@wuQS6n WFL{Ћ@%lEO9Knᪿ`Tcg5^0|3+¶wG\DQo!OFpV8(/KB*|)oY9TOY[ 2$O47m#\32%?'bja쳘#țp0HER> <a[(9ֹ o8ߜ٨k\;?jvg40w]& $"Xӛqh` jtq_NYd?YJ%"Cߣ"p\5/ X*IhЙMJ]}IWڋU7HV/qQEGKl5NfYd&х_~YF"7e1a\5F|:*2 xf^G9hf%+L ma!<5%lO :U<ڰD|4مSߡ;_+&Le܆=| _ǻh2I m-L~d*7WsAvJ?i=ﰶc:xrvIAnl.X_P=Қa]96 l- re3'FRO7|j@$ {o:},~vP A0#[nx!i :q+|h3Zڸ,YI@2,%;ɈWhùIg *KdPF2ON"dDZ[:ήtwj{LWHtb }[}dYV݀7 4C+܇@-X^$=OD ZoYSÐ+MHJor"ldĎr89n8)}YWLg+l +V<7fPrܢPd>"'b M/g2y7 k۲FJ^'VLP`ӑlmaer"˨ɺĢ5T2ZQ3e6'Ȧ=U֖y[`q$ي n<剢S 90)P_JGETf,~Tky^IOM܄͉a1դB'_a'z-!5[OJ_fL򕎢+}*bHjt@'ꡜdFdVui=W"bVI9Y~6j#![`5ib VTlq?^1s֚e7^rltK\~UkuIm | @RTU2>{+h) :{R%i(9q%sŒUؓ(]ĉKø iS32-d2䄞xzz8 Dw*2 &ВdDZNV(O(Q(}XHxY|f{Yt-sk+2a*]̧;rP+}vTvd?{ʲ&T|,$;jc>qXKoM+|`V<`d7Ġq AWr6N&t @EcnDPP}S_b7`V\A=(]L(-wbYBlcR7%yiƅMe#p1J.2NwvM`o& J)uqEyYY 66WU*L  Ì3_C*RC*۩sSlDPnSk^q*3!0sHlu`i4)`IY16d0I7s;<` yոquk)tfq]fth~ys-vۣ=>]z_Doro2#'l 6&.u^yRB_mh}ox^ 4~Zx0Χ2^9<'+z'k외㸶>H C8ۉ芕1VUUa:o) Qb)'7go3Ky\,c$=Zo<?n`SĆ d041uMO|ޕ{`Iۏ?cGeCS"IvHam<$iZږ@C,wZ1P``W-vۦ{|={'@ Ur2G>_W P?ˬѪPw0ڐv^'udy~1A)a=7wG/",+j/2Ãe,%:"H?b׮`FB]h 9xYCMEb1 |9pWX?A6E;aL3x*H+-iruz?gBx G 8K͝'(v=S]YcdWɯNzkD`ݒ$ *e15gŊg;p]PU+)3OŲ^l-D,6.oQR+Mq$,K )U>f))VPHJSox8 ^IJyp'Ȳ2'fURi|x?ZRu?GU+>A<̃YK)@k^d1@Zpn!2*yT$ V)̷b?GT/G_U yI4d>9P 3O ZSJ*H Lre/ZI1S?l'{is8& .$?1J>xj@Ycڅg'##2iWGWF~UdS2J0% gyH]tE5_%)XEiyqx ᘓaY9Vf"\(Tz* *%ȐJqU {<#3J !4s{*J&u90dlQ|6ƒCaZC$]>iLZwЕGKT`I67xvKĔ{DFq/:{RoU UY-TTo@*闣DCਚ5$\t&Ჵl!-V{To}RSt}@US_dgXDcvwBg=-Ȟjo~:/̏g#vCn # M'{àfV2[wTRl"b5V:,]8mLz4ecULԛt}YWe/ gPjR pұ\G$<'vN[jGQN[! 'P{˿̔[QL4r6bmfvT_T:Q]DQH~f[.yZiU Y~Zee?P]d9i2R9\iU7çKDe>!໸%q}N> Ҷ͒X!~'K(O3l^Zp {6E$nN\O#/$]Nmd/YfPTWU _MW& a /9<&1YZVU`u^u&wzifQM\7b=eXt֟0p4I^8Tb#QI}Tt|Ւ[7v/9܆ OȪEda =p C>몧) ˔2lnoXM^ & ,]d%zCSLRޮ*Y{6.$cIEd EIvcU3nXzAj2ʨe73ꂝٿ?k7tvxnÐ*j}[FQIӍtHדF$>~yk5`eyrqE$4Jg&-ޭWʯ"e6UPx!q-d7t6Z}%Q ֐W V(e'UL-Sdj>r60$#+T:-79' Md_DU2TR9UjS¦JB068Zn%M'2^,R/$j*WIJqjI?t=O EЋKojO #*fF2\'r54Ŀ 8v쾾h-agE tҧ=NmV9ƨpH<[a82JZ rV@\TS藺#G]|oO|8ׁN 3YaiɊS b`*j/zwFӴ{W|=-ԁ-tF!ǕKLP+t^ߑeCQhc6lVԙ{*xn\a,-Az/FvXWUt4PDr#<$E;0AFeSeY_NzTIoUL~iTPO{L[z~,.:XE.eF|)ip{?#g< 3֣`-RmuQPJ~8/Д;dFq-,&ʢs6heFNzN[~A%Y5 Hbԉ~39VÑu֚zeǬ1AOҲ,&ybQTva}碮є+D _)!WʢuN9)&l-SNģi)~t­gDp]2f c9|W"1j: R|!|N@Phi bJ.=wD27U;i?`]_:d5[E2%'%)N}NdzL37D F1>m@ϑdn͗Tq{a d5Ό]!}LLp?<ljM${wK?W~@UZڋb=R2^k}PTD淘d[XihǞ:5.IVw0~(;*Ϊ}}LQJ'U=:Dx8nsx&ހ}k/F?N)>xP_3ʭ{Fa cMYwW#',׳6fӢ,(Q֫긬2FV@e3Q4BQ7pDCqbg%%&ZʩDFq㣠7lІ &WE Y' -bQRI 5-ZRrn6b'bG*uJK}˷E ,pf(ҙz:t48Jx$*5Fl]ebWhE I\_MƟCp|(nxIu\]S̻_$9۲O:˽YW=2SsfEȯ'3'=.Q[o*#ͼ9!?tc'H`JJ S#&(4^ 3̶>BUTI"Dr3?4XL nimDMQgu&ssWE/yYUueWO[YQ,RaYx*Ǔ/n긜%Vm~RBgv 0Ix+˫0}dwJ v"pP'nz(㌾ 9ȧ`T515l\:*vѦ4åO+;(*++gjY}xG; Uȷ?pj56>`%Zඁ_&Ydx11^lOx G̗k6ь[ld ea ldgX:ݢ0LgXY6Da~|_e`v24)mD2b$ܩbGre׌FJ F5I^ 1sV$4~DEw DWшۆ&=N^; CQ|F7tdcJo ^0f QC/"I>6ʛۋ S=&" Vo2D7e+"ciEh rZ@+{=Ye\ !ͥp˩gUءߟȒ^bcˈG滧yeFPAϒ8$ ,b?œ<6(]Az v~e/8P+aQ_ALvB~d Η+ ޖO9&ydlG܃ƧJtX~L=4Ù6 'Vԇ-(NQqR6nMIAH [l`*Arٹwlb5+sJ fL3HAm= ?%kHub??›Ǘ0op" pZKSv&;B/dĊ=+Y<#4~0?RD/Y7v<4̡tN< Az$"iC2Iov> $gA{ qaρɯ&EUBdZtȷbh j06< M&+&g^^۽ͷ!5 /O3B7ζ"z BVxNpW]XczX$Te칦=zg%D,_2ؕ`I=:5HZEǂQ4( '9X<\anYs'|pfsiRBhi]B K`T 2X V uZ7\ws^Eέځv C}$J34+oP}f_6xs`dR!6 \(Il.SҤ'&fEjw}rHsU HKKNMrKt˓77ͫ%[~R"jG(m8=(M'D{AWYZ^ܭ^77ĭ{t[lЕOFY/d^VGxحz#J1,v84 c2h(F/d!aYtweT^שno7Ns_x^C,' ?Njno7 ιMn^LoY&581gu` .,9d_J1OSuw˽ ^oowțynG 99P:kSFK>{{m>auoیNF3ݝn>,G":En"Hrzr?Q6zǼGb1/<ů xٌKе+9E7l Qz;q{=e۫MzT5L3G[4?[o8):a^o/Bf|N$7%Tx"ޚ"e^a/[s} נ̨/zW1rXѰa_W^nqmD 9]\_h(ܧZߗOXc-"醴x|3&Tib]G0m3ͬ-BL5V?,Ra$lWNgwh]Y;oM0JϢG!*$ک{_O/3ֻkz$ٍG7(9 DQ$,iׅš% kTamD.mEDyJ b"K,xjYf=On 牷,?-HV(=:fxgEz v5 MLnys:H q|2\Ί("}@ereʬ#]Gs.:;W'djB")6.@ON8+]f;c9=~k0_v;x&FE|:z"\ԩVGN'+nz_+Js 7Em5?)2G("X2pt@9'AhNM7ѥ?<$ dO r_w&O` GZ̓'ܿ~a?~a?hブ]~l-fJiv1k}IQY<u$E#7o/3>VĒDI/ӯe~鼶X(z "k"@\NH@Ԟ~bLFɕb;CZfv {/]!W9!ZVEm1t(g2F@NBmx[.`tu"Wڍb  d٠ _IyNX0vП貓j&갛8=;x;bi$E pFX$Ca/N՚ٙ *,Q!-鎖6 Jُ8zB]4 9oiUF⺉wi:oʔCL6DU*Z 2!qYs6}f/ I A/^h'v3`+ӷì{b yuDMW;TCbC;8{,¨ʊ8 ~ASM%aMhM6 ֳ_)gAe)a3 z^cS|t)A7';4?6ch8.+Z&ʐբhGQ0f/GG?"NK*!΢,[l`_h)6*3m2FCgDD @%VVHkX^[et#5NVR}}H.,echmB۰eiN׉t"x6/}-] '|gi :\7u?ڢzjXkXAt@YrV=gLD׈ⲧPRi]1E|AJTJᚅEy]>-2ԃ|StqWT]TrQD'qEӊ}`!#Ey9Jd]F[F]CTT*IN=$ȍM ksY"--yA,'9Vp:תpKQ5Nf!RiPo/|9O..ArE֓eV5A]W5ZtLBP#!DԄQ%1*I^iUBfU#uSTPgE-NC'+|E􇦰L< RCVEd}HӋu+Mxw.Ieў܊>ޠ}Nכ2H?X:T5|mc&,\$!-є$RH.Gbl,sEfSEJOߗւn!%?aገN5IK$e2s$;꣐/OW\1 =ĩhBӑ&2rl$khjQuիL,_C?MI~ wiE |\?DZt@ӝtJ**Bpy\$%mHoiX3dBvd[u_V,DIY'T}8G@L$t&A8 ޏ4cnCK75PViTl@f0By@?V-&`JuAVEdv>O{v`l%u֕7S-nK%*2?֐ތZ4M#OxQ+R0j:0>,{}荪*kЏ3=|cbʖp 28%+^kW3˓d ]Ȗd*]ܷ'gz&`8 yH/跪~暴CU"X5SSr>ѣxu.4/]I~tvz4+x#\+zty$-*d֖ÇEɕ 2x@977&jn--UIA9Lf ei1CwBWC7SSp^+8BDDbXQTRc-XW›u GN;&QsLwEZ&Vs`vp/rX'"]>GUdi$]F=IvW.I@1LpzJ~#pv.{!x]| _!|ꅚj:G;t" ܱ:/HrdAEYmE'X)]YVuϫ*hJd4;@}wn-?.^//4Blrny߹BΉ.|HOQZF+`I`Мv]k/uB]0̒SuΎˍz=B7 ։O:E'p;TcRfbJm_L/6.w>|2YuNzt#Q@!!|WWj;t߅IM+UUu3P7WUdCyW͠>qK-tc,PPA{:; VQ7jl\Ɖ=ԏ9Mg)*wi`Od;&3˄,4zzjx[5S70)5lұߕ#{z/i}RoDy3kq@׾n'09x?Gϝo*NB҈X cNhJXӨۜPvqoYd2Q}Gt (a\9ttÞy.m|}ca0WlxI?h腵_zD&%h` nqijzZB  &${;GU|(%#3p&K&Cyv _@fRJO$ wq|lN5%NE2r3F44U2LSceAлTB/8_d}=*<[ ě[*Ko| ps&zJi$FKat yx;Z[0eꉚ.ZѸ>>}LJp&.8hv"#Cң,^Cs^̍Xg'Bd1I0H }NvLsJ;(>9) V/VSH !Dqn򔴠,%kCf(eMYaC+Ҙ&HVS_qF1A8):Us~w7mHQM'bPI7_IB4uDQ.| IGEq|q!(xk" WSq]`yUjqTjgZ4;84Ah=ފ\$$S̸KRjm*~c"ehn`@@$Gc% Xj:MG*hТj8 mYw!yEME[7~)&k=>3h.+|:y*c"UWy.X$+07t0;UP1Ҏ4V~EaSIF>W[U;U@>DF{T+9Gh,Nʛ XױKdiA;t LPdUOu 9VULk,]`"ZR,{Q-!- "uS7Hړ7WtMuc {nMRV@F4IBŲD:",; 1Ay\uT||`kNnH;|%/":b|Sb:yQU0ZPvB%}s?dt=rDg`T:&Cf)G*̤fH<''F 9%*L볉P/'QēdoGm=8%yVV0 b!`.C{}c49[We!_vE/UVR%- 9@/vYt?[ ~EeI"8cHyx{ Y[Aj)vߑ >:*| g/vԟbD O<^LSjA}rOMg|-FCV5QbO=K ~V{NFA^aD@yUb\w6݉3t) y4#eNFviyS"(z1P:찻Џ+l]w[AVBuqDu-43>p l$b 2a؅T4&ˈ|ubV.XymSiY~C V,$и>áM)q;_a{5@-28yGvt'oyp &3qt96v;\ƀϠ33|v|q *]B=~FMDY򯐍7-a(Od2fN`N:5֓?4Ċv⣦&9ghsWHZ(s'Нfֆd"`RvBBoY azȜr6@"x1Q fB#)j2Y[ݯUfNG ߷O؋PқEzgg`t|,i:%#||p)r.:i>W/  _`7ʪN評nP͜E؝Y uXFmw"`O'`9Ty:u\5- {[eIB6 GR\öA{%?D 'NhӁ$ SҝBcZ5P򐖁,uމlpUt4"+ca'ܗƉXu.brA[., Ua9rӴY 侜>LFbIgTIUd7*\K 7\D2R=X=CN-h>LH@$+ʸ(Y G\`sI{ vdQiY(팢uIqiڗf-T4tu`硾+ۙ\-$vRtiX>oz+jdQ\`4>&:iOB=->9ISAM8/pCJ*e9.(@hT.$]\b#$HR,jFv>r]l)NS֦(ط7/8هx7(!NZr\iP2Kɦҳ.jD~lqrRlꍚ\2\]4]F;Qu _Gq#gSJrN @?nMvSi4e{NeKt͎9Fd ;I@;ABRH:>^gGnz[]!G@ 6w&W36toܶnv^F]06$3Q;sϠ%?l(2=PvE@wWء 7gfǭsw)Kn1i:Z)B;1(?Zbⴸ!) 򵢮,,QN}ķMkjɸ≾rT;jw_QEj58Nu﹮ }/Y4:J?XVf)ج(u@kz7h&CЫt"}Ҫnӱݱn*~%*j^&y ,xɆq8FvE #DfE׋i^%^O!yOL>U0^TVeMzݾ:^do"Pm*zYsO1z | h^9㉞FDgF5‘#*YEș8%b"Uݯn5zC[%N#ZQ׶Ż>,O7̇ #{bQ Ռ GE>y4;7_lU]ՍMt/Ѽ|^/+rzT^;C_Rd C}Mn/tA/$,n|e6Џҹ›&|ď;.m*"{ ^f(BOQIvܼ҄FzýV/kZxz<Y(ګ-:D08x=V^M{@CoIy/;B&;5Ǽ=Y5u9^ӋFNx۽Z7ݛU6Mw#81he[ە2m`m%*&#m6k n-e(g[2+c`Z+\a)’.Wkb(0RQ+O46;>@rR>Wݒaow;5&]|RapA;H\HPuZXXaϽG 셻S/,xEjEYڸ E*U_)\;*xM}m{[=k`+G ;`p E;fj9Dbyr? OR/:t;t3WspA9A%@ggҰhbڊ v)q%$) B K1+IќPE7Uf[B/;1S̷ZYM|, 0J ,κI&\E=V|_%> [ C #[r'tl_zo}Wc EbZh-^GT|muGt>9U|L:Mb!옻5҂i䲖86g2wlƓ20+I,7+T -Naa;Y|S]LgFx"CRQx45kO.[ܾvzE~euYr2 goUڨP!ڤ=. SyqVu{e{6q }>_p`a,jf As*1CW Rt]?$")S|Y@ӌ_fܴ8YXoh2h) 9ib{!+5"-A ZȎ#O@u L({֟@Yh;k"x$|/TgSIqZA]ԝ\$@bW~c *eu df}Gɽk(_a?~a?{jV{CBRYP3AjENt3C,~ f8*@( v*^"t?gJm[>8ο WpTGG^%TzuCIlX<1^_كXA>_|d65_Vdżf:T*D=x1κNe{5}bPEdn߀t1+B [DTt3IxG\"~ X<8y BEVT\C ]Ro}y H|ڮ>.t4 Ljv`k}QRv5>rLon<=>7:lpvv _nE& Hǜ Zl1N q)o[ίjALY~O4R>L?Yk\LA "JӽЉCxV^Z=Ֆ;^ =Ue0(z$ʱ?@*ib /#.2L%iY~_TU̽ꢼ'zN}2>ʺ=Z )\"d&q7we55\ 24ȅ;uX #I-¬2M1XuKuձ%i IQMYj%ddGgn]7-sj94Yz"/{lfiZ*Xrb*@vW~4Ѣ4n69NgHD{"jxˆ^G[+]؍VIH]ŐL,KUj(1\$-cx):Zj^ׅl,ē @_LH27e@|'EOwނd:Lsw~亥%֋>2YP&Ea Jod_l /Ƴ8*)|ʭC ^]i:N3Ev~?*V,(NHN'YVmQRΕ!FT+SH[29Ax)x[0@yHj-/ (:zNb7O1=py qqR5hph%"e#'㿲mf$XOԼ4\0dcr&- ]G*SfY?ߑ"7EdC(9A"ayM)& 'mTL=_FTxݹpR:, G|!&ٺNU 'd0v~#*ֆqIU:K wwwmqh4N#&5и'];swt=cd'YsϚS։/V<3{#mXYQocIE/x|7@#U=H֓ U!}q훾Pڨ0/zgLw.\C V.ku=0R( ֦5.ZFќbu>Kώ#*+[(/Lr [d>Yw^YCT=O-SQ1uiF46t&j?MnA%hա ԃ!K]u"ޕހ-${e3٘˳ׅ}v-79oaD*hez3r"\Mϛ*g;U!mz`h)ڦs SBudNl!G4CW`eg/9˱J(v.*cBg3j &,rZ"=[wPC3Ziv-`&ӫT90,2N*ET/@!_ؼ8 "d^>E׏j~цA 9"^ʌ8ތ|1NpJ6bYgEbuH TxT~lnFJ3RoЇrP B$/d'􂙪<}YeYRUZ,>"HLs0{#(Ht^H2Z4۫P5JdPdo1@PUJu\iE_ ^+͚ YYFUeDiC1HETwup~1geCP$by= dECSC >9!yKʼ*x d_ՃNi=p2dUx5ŏ:$_1YݭS+VPSWm"IlzLf"@_ "RDoNN0Idt.n rLp@&?DOUrj?D:q B8 \w`'[#qERMpirNE'}&ceRYݠn0R7]&Bt*&zzr<.@;y3sqa`_~gx\ I}-/櫝H^{|#QE<,79xk'];kTmD]r']OsL%3{olt& g!zmB"fjix;⻁V"4'"j"/2\0#`3@Z~ cUu(@v̞'XjoNlj1daB` Lt6x0M\ FFZV$<8":j5>-Ku|^^I.Ƥ"mG{38>`oEH}L5*&Ezf+46(nG.@vgM}K-6uQD?%zQ<Z0AHtB&*xy?yep ژ9^vc.4  IewYBWEK\)xkxt<'YEX-mby11Hlbk=gLׂ<-KK>ʬa ;lqyk0*mBV%xUbetzA6ٙ?Fr$*<*4I-o(&=#yAHgv>YкbNS)%'J9{|#_궬 M5NW''i7OKsB}Snf'4턻0ZDXw-)RpH9( LV0VQᰛʣ@H7Nur;$Xg h^P(ȣ VsdZFb#s0d`>=lHڙ BsŁfi>~1tnXEO]#xWmh*D| bt(b1%.dϳ(!Hg~+`R 4;774DT+}YM7 ci$kZml%E֦P}QEm$MR[털ލ(ZeX5š>qV^nD%}#[meեz[t)iw+NhPh @<=j<=uqMbHdzʯ*r֥CbiYRh;SPt?㤒xz 4󖺘ש|8gՑ EengPʷ$k V978DZQ b01M"FW: c[5*><6S"LX>Esi~;F|ir'b'.b(֪ ΋ޑt0mDw{;sYn 㼐EKta]R>SgG{=Ln-t:;M:%W[=qvOH$=J39isOS@MnցCJ(oRӛJz[8$y=pFgZ$j:,0Y K갺 ewg;=nqhpҕ$=ZJ6L(6D!BRrrmDPyOW]띈Ne)O]Y]?u<ꆸ;mrAfCby`>Φgx$wY]ݕ&=u"pٻ?| 2o |\hAQ~Zp;J6ZI9qƃQtn`o#al%o()7*$YHT.;N H*6=7uE@M[ ]k(ķ3M>qեa7N.g'1* \sʆ,t-|蛪C>% I=on> -zOi+ Z492¹ʕЀ`=IZ(˚ qS`̦N%n uJۚ{.t6;gR@P. ǧ9?mE&S%dyƪ. ~ǝƕ%I`a9L4 u#ǧY[~~K[@1ޜ]fDFuQQys~wE?gf+6J(Al }OxkG# (xlE$>3'76ϗẒ:gxlztaCIHsd+},*ŗ}D/QNiIOyz\?;M=E?D!)o5r8-j!jOY0PUE4m9>Jeyzm=()ij#):PRέpqjC4٥IUR4Pd<{UN)</m5^чwўt#Wum7{7 ߦm7 D@:FyT)DFS-,k;;mI \ J鵪(= a+{HΓdpW) JN3&S@0=Y퓣E*ލ@8 &MFP,ҙ ƪ=pgFg )Nu ,}3Ux.HlTL,t&ikf,C ~ XmXF'_9Svt^޴WœHˡi;ڞl[p;BQ>kXZ <`[ B(cPaNAҔ^97[iu~7º+C= -7{jnD2c#Y>ֈ%iIHv<}Lx* N (+:<_pxYqVS5 yDdY1κWvò7\3:tfNc%qiV.ZSD@8X35̀X쉵w<𙎓1j'2#XMJ=$b n̪ƪU:9 z"{vJVGV0&&CqZ; ϏngzYk? Zj+>7ײ2Jv؞lWDeQ4=ްG,ʣZNIUJ}d?v6Bi* U, t(PvgsICi,eRør$]%( BLNQ?MYr.B"!a%ϊ@Ev.6R"1t6"ڀPnAԭ*q\C):Usyq)$'-Bs`z+ޚ,/_{:K qCx,VZ4MC듵).N樓URTy]S^6=;]2 В /E r4$~r46Mw妧&+aCb"Ex[GFI' A(WjxV>IJΗGu> $gtJڼ[)*#ˉ/uK $upRgkhO I@~2ҚX jdoLder?%^Ҍku\oCIp^3^}A+h xBV~c('Dls()@ iQnz" uIMU#dfC WSzhO$# 6_GH+:E'UJǬ('ၤ=A7ӃV55 +IkuΌ:&(+Bf'P߰zU՗EEKX›ӗZxI?:B2D}3ʪ'(mF]QIÓ0<~Au}].Uiϴg?f+y:M5I1]'H=DaNމ%#tF%k:&cey?]B\ ~$ E{*e߷=hmf=ɾa]S4D JSj^.ZU5+?P  / : #2I,gh[YoN8WKY+B8z.V"B zknyFTKFEGUATH-XV(9:{{^xF6k NG5JiT[~~kwϨX4I58iLnt!/&`q WE Zj*2as-`G>)GJ◸. 2 Ͱ:F_3|C1#UVR"qke:lq*Kb Iw᪴>;\ӰjQ5h1?@r:DxJR>YX.\/~szL&| kl u\O._ fȃg N <:ZĻEzQd)M:=o |&4M&mE%һ /b|gggz $UH" dCorEc+ 2;\M[lMt 0TǵqW<8/)BЙq@6ZG,KīH~2ɺ38#uw-޶ޗ( *r$, TEI Ix-|ꮘN2`fo7G4 ͸xȺZeaAӳƈPN# JSḬd=Sh"JWCܖQuq"uZ |apR?{z6FV*{SB h㢤KbPL%Z5RU~ghI!Ѩ%*jTvnJٟv>89iɤn{@*!.q{z~Mfl.g' #Jעh+*dyB - K ҥ5qRhffA,6#GP"k'Sp]Jޚ=W9yE/|43jQD)׫,{5H vVh{zCBs5ȨǤDvC93j=*:.#Mq~7XbXw*@Et,kBcԯjEUU[~wczq(jaXHbp{~_%58!%/!rDjz>l)xZ'UxDuS  Il!uiUrɚ_j՗ p kTZɞ6vhyNh*,:15Dw!EP5Jl3ʡzF,#4UeX^,DO<Ã=+Bl=L-PÕȶ¡\?=lS5UyT$mqȐ8L%UAZ`xJ<^2ŮECL.UT<'J V+h?y> $jq<34;L$礉a D Z]0QK cid_8 S؞W7Te]-r9 :vV4NblNriw;cx(;aZ%ÌJ+\X 3#K΁b'eu' fF0^SFp^e?`zYO5~EfW:^l=p+ ὈT4(^mF-n`2<KNJHU;_od+ {E`+)r(j ֪Jmg53QOMFyE#ˮb:9a;Soeecׇp_D.2_BV@5ܘ3ecHl&*h,qo&T3{3*{T9ZldSB ~ e||aoAR DU0]beIʮb $yfZN>q2E4擟^ NCC= B൤iDRɏ"OM~d| !iWҫ$o_\xd25M>e9Et5s6R3PܚDCJQL|NѰ8-Gm`> 3q=l#墾h*:F%s~_2wNVT#[!a#MV |PQ:|V`ǹXt0[4kI_|'5bDNyQ\0xY&/xr5L&9|))W.*PW4'!)< @n]%iV"m6q`='8Jl qA\L B"9_82'W`V$ 4=aAdVTI_ʔb`vF2p-Ùs+Ğ]x &_{a0WDYfQ%xY1J&XSi׆^l^&WRX X"|CV#NV bucs(v'+\ZqN"Xa$ t^SMhպ.k+"Ej ~ӈpMVSj /\ *Nz2S1f^D,𛼴a(* |)PԺ,鞙Il?ڥ'¾EZ84 J Yjw Dأ<}y)8/WӪ@/Z/rR)':,fG*T5h RPӛUKGI OI j'Z ?d(O'zO!/MX/>B&~=k*M$(2P>^UOm3%NE80:{jxn*j%y}=:σ4W'3R$IlLv4'i}ϖW H ZnG ImBaҪsW'x8|}Y)Ďp^" $w<ѷאqʨbpċP{2efѴKo4EU/{= bG{ZedV+G'zS顖vmcp=Jk~ xÝQngtR;An 7Ur+xe~D*iEԟt2g GXE+ OY1Uw;}Z51pKrbp;uyߑɧw.FzpOЕLr[:T^2YIm ^ɝ~>^" Ik;WsLOw'[x19,GU)Xa?D\iuϸOtM7N+'RЕ&9`ƨ_e%yK{E A #vS/%܀isT^XL<}q߹wܹn;|һ-wN2gJl_uVk6RHb(Bw1#];ԝ~t4#HT=Ecm'lѪ32~/pWl7 V&'Fj7qJkU+xN ælOd5Tw[kMn';->weo1RL Yi/l4/rs|pu{x )G^^j_[b$ ItҳlintOK]p7Ibe'pVB2"H)":!)B;G S@7ה2L|s}*ۧ gKlz1|vv܆f;iI"  Д@ l BU6zHÐXwye ]Tdy]eHH~+kynZu!=%bN[؍&0X}/x)x:fp9K'3FOW߫{ MMOڳ¨Z˳d* (˚+Uvμ 2Pl 1΂FCp7 ֋Nr\jCfJHGuO_hFˎ6/"zd|\:Vjn_PpVӓDg` QW܂![%K,1]k8cWC:Nӱ#57!$$J4sq5^:w|l6^QS49K 5X2ܙ:TGAwOFCrۆ,v邲.Mґ<.gIO9~$ %'}DI+[ P|T]:){]Қ9ϐv<$sÝժQMެ[40߲v{ D1<*r'즈|}Ju҈C%N")ٿu3=ȊҼ)?+_ d +ppnx[c 뭽mG,YA׿{}o;7f}o;=T?+-Z?foٹ#4?@oOx5? V0[" OqL&DV_UF'j߬]%"QYWEȊ?uJ9~dC,|}jGbwr/R.?>[w^h̏" E#oM Yq ɂj 7UIq[GWt¸$ǺKwIw4Hp. Np]wwn{{oݵ<Sge`ew'u" 3; 3`1 hX(NOYq=Zw,>Vz~/#y26b;|ApW9"N0|r/ˉTeqРSs-GMh26 .8 ]S7!8+r=|oh$aCBP9r a9:Mt5 k!xfy6^o7- yeZ㶘)7+ ʫ ݠjX>%fMbO*\&2Z} w 繗'$CqN~X5A6o}8 k!` W}eMhxcRΏ۰3PGVR&}Se/L)Vx%nSO-0a&?gr`N΂`WԦ}'C0gp:z PVDV$Ѵ VԇΣK|{#J\Ya*M\U+ @dUhr2ˎeF@X )5P'ׯDcuIJ $r]Ϟ@^+i6b<.fz!|zZϐ6t h5ܞId)/PDt#d#9V4+hj1%^rf/X:uAwBxi=Ze!'u;6yα!")oj]z~ZNꋼ Ya%M>*z8P?H#xV}tj&oxV6y< -Ii'~lg -v:TGF|7(eURcpq ԖuHWG՛^B$W F8 69_|9twF7j>z[%a-qLK,/`qH̓U 5UpN^@{N$@Hx|nZQ@"i]4 `z]TՆl+K (}E{_Rq '=7 w@5q2:3gJ>!/wau.ž23T{XFA>\4b%YtUzN(sV?Cqo$$ Zch!ކPMvnLzJ{l݅gMP8T,z;DdJ"&jw Z(Fd\C>.Z?ib>yOS򝢠N4iuOWя:]Ce= 6ra^&)Xs2@+헸;e|9jV[eutI9煫ި88 .FX|2"u[Twv8K,ee&%|}wP$E{e8d͵#ή[PlϘJ]%FتjQUDQDK>eݴo$/Ů-2B#5GiE9ƶ#.nS:t.74/%[qۻc*OW_OG?Vg]UeTue1QyފlaC5íI3zVcIa* +~NDBd_*Ծ V~h+p҈^WiS65ƒ6TkzFKC>ڑt>yh .IWXGy[4vc{PqT;Hàʊ{X!^ۏ= 3ڜ~p3Y_3T=L#QvxI6vMm6bjS.#SHmFJdzJC{)c7UIP&R“)q W ( Hf6'qtGBkPԝ[Fi%o'd\{܌1|.!a4DЗ\>"Ϊ539!gx"vٽȷ O32DwūK*=[eQ;e/ rh/JLycvo`7%t /o9r: /W^'|҄LK%Bx,6u*0EDPuYv&iW#d  {vU1nŰFMȒ mz?t)}EWuO Lg!qaɀK9-|?- z/L+ϙ~FZ#C+z7d9OJҏThoo.ϒXpwY6QijיK:7|9U=βl9{BfR7/lC+9Ld=>=~zZwrK|w1Q(6[GXvVUڮ\>c9t~b쁴r.Q2]B,W=O ܄]E &c]]ވ:Z2xTEm$>_.IAeOu 'S~bx)ȅRŪN,׼\\(x(ލ&y\$B+19F_S5`SܧFq<:WAY]s>3`YAh:n|y|})|]|dQ{E.'QUO/ZW D=UR+.(A:N$?iˡvC;5ʂ2 ꌾ,\V9!rT+tD;zzzz[iA*tn Iv3/к1bQWV? |$nӜ,iWh;Jp Gv0@P<O1"Y]Stǒ[%%ua6 DCp$NRēO`XBޚFYT)7J=z"3\P7c)1.#A1kjqnj|vhJv):m݌mB4i7ӡ| +JgMFu«I.YhiR Xb }YTQja"QC44֎d:%} `YvNhZul:&I ϼ҉})BkC!a$/Y?aǚ+=9mgS$K+!CoN}pM҉7lF9OeE5ZΒa%b-i͢\ cSd*b=_͇2P,;ʉ c| l(N[qw"\<'O DlAE/ٔì(TRH4ƦC#b,b%4J\*\&^%rHg k*̪$rr.|$\~ƣH4Џd-gN@NB\8y.+yۙQ3n([ KBb|;m?'jj+rmWb?bhxP yD7+ !yr֏R$^E8l'|;e+vZ[[ gEDYU\`qC29Ud78mv,vyPdY<荍:XF7 9" j< oҳC^F"4 -ҊK-x$"tRO@G":˪| }݈f{h.x8Oj:1Zo&WiD1NgEFCtm2~bX5Dʳ~!;fJ^R9ø:ⶈ'rdŇ}wP#Hf!dfFVOeV4%[+;J)CV+"+nMo]㸚;8Ӽkw[>>{).D+<$}3 7u @F mrq M%|Q1楕p-nΣK-dz-#֊"| $Y5qvtmH}SX-cx,;![Y}o]#,yPibS<Dqjgr5<7/^lz>? wEbS-e:@4R@= $GCx@;+c|#jgzeDpJv|x=`:0Z~^ 6"Us<{&64B팺(k5 daDv;Di&Da $;.b۴-ls:Պ+foݓ}ӬiMt(3[q.y+KA˪|!ωFG-E2[.bs E .FqE D2Rξ?O[e՞bw ? \}c|{=m+[,7 b(ie<\dGh 6l3 -*P&wvL+;H{]Af ϱoؙu_GROoW_4l*<3I0U+m>Fș6:m 7D$ P{TtUjPEO`QwD\ƍY{*˲>![Q#ޝ R9ȧAI^ t&ab/?єA.ZG2XpUU)Nx!x&q 'J]djXYFL&+xrC'[ޠg[xVISW DށZdz-#, ጬ!,,7.ٙW-p\c%gA(>~ %UKN_|~/YeiP(6$moN=9ZnW7 k`vu8,!}ۢDBb XHߣ=a8SHJqTb6AgxqCc O2~?rao!^&;9T78]=~n78 ?IbqߣK[Ϊ@F:Zcxbf*qH^SQ7 :M8ư`ɇZ?n![X,lh rUe 3iVN!'(Y6> P/Y~B*!dMo9fssM?sE4EV.cSD>?_wW/tH+&CʓZhupi_:ڊVONr=D6P "'zoX*,CW|(>-v`wN_'ACȏ~4)# 죔s_"(b/heb(eoyrfT4]Ioз̧rV!-V=d[p~2j *t^w㼰e<_vx߶+Q4!@Q,T6L>b3į圏.C {|2ON4AyZ%I|l:R23a\7>QfXd-,Q™r#ưB8:ZHlD˱[D kB*e"rC~վ1SA$<l>d3nd!}rN_uOTGt o%+p12 UFn,_X:SB `<VV\^դx;UIv 8TJ?UQ'w*+ e2X:b hҀ4NoyF 0z-%Jq$N& s@y MIz^73Tݩˎ<Po|:/T2F+TSz'nv7W*_lF6RG0<8Gw=| |F+H;<;Ydo]O$ ` dV+-{F~k7nmMJqd-ŧ,~F<\+ݯl7`>װHVh'W&fkFE+y+ p7FmL7:ho2Ǜ`oXs7λ?vlQFaz&.HRU\` ImV …[.(Ǫl(=≩\ `,;::- H铳D ư,;ҡs\ beyYK~ ƊF2VNXPGe7FDZ4 Pv^b9E%T#a'g][0f"oƯkE7Sbm=d{;BgN&K@ѫ*Xm ᨘ_ꏊz}do8fc $ŃY0x"S"PVTu7^M=!,5 hޚN-46wAjYPTmT1b9]uQoߢSfKmdiBnct]UOo֩tf9bch _PVqt/eW%¦|j*geޣOMH6Cp%6ՙx~K$f5Y_t,[r,O"lj+ns 꽄N,J_I};+}`ޕ/4Mfu$ ń&. 竁(G5L )S#jv]SU^WQa,n|n3{I(x= h^w8Nዤ}V7Z?p*rpM~t+4.OVl?5x&feI:M!TDq 7]ҐRռ&?>d`+l ~7@DC-tz'R5>m*P6qCJ:P_rwBr҄ӈ.~n#d-\T$#!ŗe^ 1LǷ=f<̅pꩩF6&&G֌G).a)gh jQnF{fB)hE[#N2,fR^'Y4V?0Ni$@-ɇ|3昨НfiF$ M\Nd4jc7vXryVBR|uq^JL |2&I8qp(~Gt6Y4VQ,F:{zWd8"~H)2ezhe@zl/Dhnr6.d2 }.y[N*sJR|:HϓY$]FO1~.L1_; +Lfh'uZ5ފv%yJ㳖Ɠ, +IC}Cm -ELJ}.[Fo-Uyֆ. xj?p.CM7ո)'T>`_]3z; 3*BDBYB=|h} "+Ȃ_$pNYsZsQ/iG19F8R Ugy-'[i}׍ }Zg vDS`lf|ytR 9s̭E}WڜIeUyvsniWD_xoR o2)= D 1ߠY&/$.>2i@:Y9_ntut\0T.3]{  c?ۙ%4`Tr=F=1FKq$0ycЏ]P5Ҋ,'Lb^_f4}.nS`NT\8Ƒ}/nw!=mx+חuѵ?0*5LWꁼ=y a߮8#NBBkv'xHIP>A97뀻Z-/UbH-&@QqNydgޫ9Jk4$Swu>{N,-I]haڂri8\ҘWou@wb \}sNBDl'+_b8b^s$e+ir.z[e`֪abPD X}GBN1@v(zt&I 'F^k-臺.m熾k*j<6²L#bXn"T.I`=ML?̻y(%~̶˃&aZn4BST,UVUYvw` GlF v7\/ś![n|9$Qt:M'spt%~z8]rgx8o3 &jPS: W0|Zd6Fx_[1[Cb{iyA8J,u Ǡ^c0#qr(%R ?fH< Fy{{yzۇp:˨ FudmTkjɡ]L_NRE Q U[YN/߉CR@V joɣMj.tgk&WU*$=VƵδG^'<a"5(l oo`lg|3-O-)BVM*)*@^ޚ2ӓ0MJîaXO2RMYeW ~/b r"k:/ aa z)'ڣq/4/"*z4=ǻzMr؍FtS-x C&ߟIR2ÒVRr Ya7PTl4,2!Xi|]1/Ln 61# :>p//A/"7gM5'~ AnϴFBfdh_ìezKg/GKvrZ+ Zܐ!E@@fYSP.mYfݴ.eaW Ir[V8b4mӅXom`lb(fx{MeSV d]agsH MN=xv < Gӌ7w eTڎt 9(zo^W1n5l4@!mcxS/BX~C6O޾Q d<:CSV"ͮ yv-'V; M}}gebRVȼk*427ygЅl,w"_dӇ#d .Έ!x/UdBۚbŊ$xЗعE_zz{)yC۳R|zQXj.߈:_yv#]qUG 4<" G8ޏ>i4"m;aWL}v3MUAu}pǨLeHe-q Ca)T]5_}ES, n* Cr> lbus D44k0P7F'0j MaW(_WTͅq%j&>hc}_A܂hF/?Ifѽ,6B~MwJ< LԘcJ*\oT^J\dyHDB2*V82b _9!:DѬA22;6,HڒZI=#γ9BQv; ]E?ꍳӻ,$6襯 YrSEe(s_R[X7o/-.m&챨ab}/ ,3~6cPFgub̤ m~j_ktc6)1pn[` wyYZ`)lGTԨ묩,0;'x=*N! ~#qo˜i^M}\?4֒[ qUlu^dcIWZ>\VHijLTfR6v|8:袺""64K~ojjJ2s?u~M bqEf2K^T"cp8T`ظlj%l'e3ZYtս0~S8Bj6JTy1ξR35lX-wpƙ%ՖUjQIhRzl "%w{=kt\J8#F=BD>Q4U6FW0B}beGvbRfa<-|_PwTj 3XJ5v&Y /vF aKϯ[Uul]JQfW Urt f{pZgO! Ud(?@KEO8Qw*}NcnM}ƽ/u|;a/\ C8wVo#s),)V,Ÿ-|kO府^2ǃx'KONj`T=XMA[t_߀XTXW>e]T}d-IfwóJ8}iM\uX$6FW7rZ4FP+ݒu~g,jִ3Z=jᎰ?*Z.hoX`TO3JqNre&t)<ڍ.ھ\ڙAqUMD9ZEM&vNQ 7+۫DLo>Igo@ry4'%Y!J|5I I,g.*x-ށ/0Ffh$ cz;=F(mEtv:fW7b|*ad.pu&gib<^c3]Lp*;+Jي5Z*m[Mp;sp;CM|97 (\L~קWúd ga:,z`IWN<ʝqsVkPXV Y:6&]qSwzSvnA7[-7[MVsPybHa;պ˯jT6WOqK{' rB/wџ.E }V'u,wݝNug}nV/=RtKJWb,Ngsh/ds%;zsIgF:Yb!2^gNl*ǽu7-X;d58n6{})%ckgE<- #șN27-놺^w#u8 y9 ~"`ŅY"~BK7oCN;s1NV4]O= -Fl"[en@CqyҔ&J~s"Dt'mQJt5p?V<]~ee7oH;EC 蠋*8yieGD whEUAVCR34F(kd=lG/"!wb![8aN 'S8b}=):݅tz @NƇ"\q{j2Ѥ3KYSp:  V ZfhK:*z,|ۍWʺOudM}WښT35׬tgMޒ[DmF+qZ3z%tLqԊ9uP%IvZ2W%u`EكGZ`ÔKe.ѝMA5}gQ.`$Ot삳xLלFnXO6гˮO␻m!X-*٫*\\fCA n)37lgRh)ADKy }s^Q4`Jj_鴪*~21sFGWS%ؾ[{ͮHUZl+{7t17V{TZ%QB$\5(-&mr('0SHK'%W$I1J捡ۉ2KYKO_xc&t398yOpvnO g?| gͼudv?~a?~ _8|(DQ|3a Fşc;Y\wy{QUh:|%?ܻkVM'.<1&ߞ ttXKgamO7SS^ 6)CixCZav]Z|#k΋ߠ&>%-PwG2z .tU%"1J5+` z-|_4%XTWw&r<&VISǚ b ;e}X(<"NLYu}[˷A찕Nm~Y]=UTN #Lדո]>B {Nj@'`\4^P)R D{Fmu<]eTWQޱ MEabM c F>IԐeoL\AC~=ثpoK#t&.ê~ˣb;Ml?K5{ɬ" &>O2P=`xa r^< Tdd'`:dMT]K'2mD6I %y Jb\,{o)׺F* @&zA,F ]o- r&W>f]FBa2Z߄sLxA[,,b <=V4Aӭ|AduߺHBHGqkem| 5:DWՕ%Gf _y%QQ8śri^cᢤڥC:ve0h]z`?X ZLo%2&KN<'(f)cQ/]P]0=u<9pW4)rV4\! i>vv1k嶳nx=z"_JA /u3WS/uw2gw9qvg,-.}u*^*"|"H?2f mSdKk G}O =R_3onn2gyDX8eM 쯋òZ''3M/^uP<Me7;cl" &q@.`ՠrH*ApkzcO, [,Ŗo|.'s^5M.K1 jW*)&}>v4UzК%ɩw|0M~&ib3v,-NOƤߪ(_询MY!J8ΪɗҒa<~&lɌZҶ JJj$=d%KXKfs:Σ *I :n&zŔD_ir #Z둲)t#n<3J9 8XZM5(=ʋV+ۼ8^GVJFV&8|ã"/G{2%G\ %_E,h idnH[OJH' :/FaB7C*vw5h֑%faX`aQ$K<i/IЋt2(:v%{%Z#y9s r-ʥ|?CkpZ%Q@u%Y2&X!>㧌. n oNMzs!A!J|;cVz/$Fr0wSɔW5uX~dJ?qp.o':Z& +(oɞ,SꜺCd9]u%F9c.ݞa D8ʚʯQzKh/K.wY6L"B|aH5NA3b U)YS\b A?ZFzrUzy v$HǕqV (Ⲇ4&ͨ5"hxfkZmuv&VdyBS.$7D ,,*+s^UsPPEeUBߓêe6ub[h%x`8kdv}TW[ =.:'"jrT,ϻOwzG(-ərl+벃vk WUFBe5HҐ}x!m0hJk^1T7)" x6qPdzuP2W^6W7xټL*ԪP #Mo {= /O3Bu|Kq,BOse OC<^#FnkIA,%<\Km9}M/9]޼/'%b,$d0kJazO$76BK{޵eFSd|}]TitfF<_Un֕e3YG+Fڮ❨iaz"T[5]RTI5^S$U~٠`Zq6pM< w| X9[osLods<'zyx{"X9͟Yzd \{WyNٯJKuVLM_ϓ'*No:j,'f@G6 ayL}l2wɖu%}Yu}ho?c[uVV$ɇ&+r1N':3E/2:Y X]Ub(/u&uYm5fjp^g~sܨ钺\Gb-h6A 0ĦE O,su}@k3:/IF=rs2{iqa< oG|J༧x*ۨ)jZuTTD1U]*xEMǒ9[V phM]Dv@QH܄EN"BfVc-?_1u##RcF@=^Bd_d;Q2^?^#UB53wWVQyJaexByT9VL4!QAɃAVи¾H̆__&Ҧ sV+3<(I;c V4x-ˊS ^Y>{EQZ5v6nd#yt{u)MnY v2d;h*2-l %8pUc6hw v1-4?aa pηt(|,<OTjRͷޅS@VO/|O}(A[[R`cYbf枃TRpX'ghiA*l^[z5amX 65}kD4?*Ff~Q38[V%D5HB<۷û[t9Ϧpij4r/ss')$l1 2HXT㩠F 0䖝3GdEd=qzn~Bv8hƺj\(gwiړvx{ykYcoaաoe9EF\bƺvڊJ\etrG< GX:Gm$/F1/D|d\+˩ d}w=Ph/XKu;03~Xq#=>[WHXPw*Drb t3z~;D__q>'XO*LGKQ3C!i;*h6Za6AB1z/ 3۩좶^dZ HEvY"Ѱxn 5Q!5&a5|D&5 ![7YLqSpT6w[xc:oVƮ;8n&jYp5ry"*'\" _!tì/MfEl5a>6&uHl{%:K5ZF"(g5cv*Ãkf9E/5HNQe)9$ѝU[ѭQga o1̐$L0U߰P֊u0~Kr{h-nFV[F)PO2^Aq V2od-DY9[+M-^DOxwqBYϙe!mZaHY\u0X5~{KMS T FhXx9i8QjY&\"Q=KE>%ad ^QZhQY89/v;DBo_E]CzUP6Z8C:k|&ULfxjN>0cD~| :`<"fk@2bޛ77'MK\kk),9(l#F=Qr,Z|hGF+Y3+XV ᙎ>s_ [+NדΉ|^yUF ܎oErE5Su+ָ' 񍱦Q,Ct"=Y7RPurٙqOҟemyUMWҢ8_z:ܚKf|lzK.J34NMS'YFjj{oW;@eYy1^S3Tb9ʯj23+cRZOQcdBKڗi)ꈤ 1A%Jx-<05hxL>x#A"YZ[U&tժl .sJ>2ܘ6@ QRbs3ott(Ҥbjr!$TW,j쪈36^r0껎/y-V&ᩋY-DiCJ+#d;&PZDBCM&r$6nDf'YX wD\U'<Tb?T̤3YEk* ',Qp3ZMzvCt7!+n_ "i'^EQA'-rAa)X 3#Cyh+0H^oUu!q~2H1X]R‚t }HX@aKWeaʪvn̦oqR)uS;KM9a{KRgÿ]yGlcXtqW փt ┸} lV7nê)ǢElP&$#:GHXߊg(?QN)i7 6:^ S@6aU^Trٿiht!S])+h2^x"^J!NΓ!I+=N-b˂GXqf vUy/pO¿l%,2L6o<ֿ$=!^Ɏ ^0I9$kqJ )db%kwuw"ki-L%ѽjV7F!9]H7=M4o>G~!7H2粭l.*BLT++.j< 6xSqdS֛6 G{cZs* O=N *FkP0͕-sx(;O1Nϑ٤z8 ] N?/3yR]]9禓bAzbx"TeiB4D2a$a?A+Y5v:Snɦ[H?O0_;ip9p=_oLaNYK\*"t =FO^ZnޖC\.$t8i꧂{A9u~czYI 獞n~VN+gډp;+U"y[p%SWT|g9w9ou#X}JL#xLrv9' isI,ѣu~Syߨ2C"Z.g`שduz9_Skvvs_Q +֙}0&Γ{D'lp^8MD;ÜNO'BD'1V+Y2񴊛bnSg7Mqs!Vb /̞|Uf} _l/lt8ӌ&u x$GP9K;ݾn [˭6qKU~n~ 6ăPBV;ic  J2=`*1YDjV\b'MY.) E5Vqں!n-NrGQ:hY6֛~"I*ia;j>jsR9A.gg=9d&Sn;Mŝ҉&@uY)|?{#l9bvܵn=ŒNu}AZx^1g>o43MBP 3#E&g33Ѫ8ԣy#ac@iDX!MzuuiOك(;ɏ'l)K`[3όxl%!GC64Bgm^r8tϸIr {I#lkbxyf;evZd&0A5N}rf7٧3t.A&OV<偫,gdbvTiG>Aj~dw_lk>;L)ӌc]$%]8RD9Ad ;"X+ց䶓(VޕNWǖs W/<<_d4s=i mFg3ymߩsmY\._Qx)h~U!N&KpX):AIGD,9ND3AwYFxE Pv߽iﶺYQcRvs|O'*8UCڸeyXDm[zײ3 D/'vbdg>C*n{g!M"4n?}`o#%6T&!UCr9$'gaz_?vVYԅ!0 X[R}&s D8IJ#F6: $u' a$ ,;?jN7`L 4lgHg\Std/*<:2–''nvJ6] ٍ4ieT;Q< ,ai[T߆\-ۨz== bh,J_ȿd]C+$,;8Rk"ɞ:KLNCUAg{ɬ-Oh% ǿίq;0a T#uz3Aj#[PuB}p$ĒVgs\*ʃ4%í`2)[-'_\RO [b((/Yf4ⲺKO<DVr4A߲4'ݔv3ܨ5J7ӅAr-]mUfb>a8{">,oȯ볼}p8ͥ%]mHN<~?;x`%Aq.R֡ Ij*x(M,փ ޮCmMpzi )U=^'?j/a?~a?wlS-T̖᪀?*:-@oe$&=':XSdAec@ܖ[T[R d}K=C` ƧIAAz8Z rsw,9nq;M XI>Q4u :SQu6r695^_QOk)Jć0}׺"; Š2{i:C?V`lX|Lgv<2PFؤ(  tB/sP7RL;80wPHZnH$p *Kl+Έw$KY6%3V'5kyKEÚ8tVX,al |_@%zewƎܤވQl&iz#! "v4I[;i=r:d `h&ȷ>$w`{xEL.-eM5M~F.+nBzXȖkߥ&` 9 Aa<#tyktY 23oDK趭[vnePVN2FrL:ZէJUOsX[|V58MdW}z39cEEtc*YhN'9n6 "⡸)✜%;Vj7~jqrh̓ a{ovyLx=*CzA\WuT}^BW2sX|0L&6 <9mYAJ62QIIegdΈ;H-՟DP "Wh ho *_ B^|8,0> pE}w#*FZit)ӒMɳr':&/DͦIaF/Ȓ=hJK3!]u=GѵU2@Mc\Z.N47U׷[.vAk=5EC8)MBYNMhs'tHΒ ؋'x'r!5q\.CL41]M9ʍzn(*_A !I^?s躯σ'x=³F{: dvNs-aՉOx KZ.,) f77g1ksy[$: yT#GP0nZgF t')-d)ȉ(&kiO;,1F\㫠DR OI ټ%Wqa,kOURf.$ }E-YieCSOa%c2\>6u_t!\?|H& Vg!z, +vzb?E^+O'rA&꾊:3NŖU!=[JZ#tN6r 5hs]7o!B6՜ќd ފˣ1^*Dj"g$d롾8&竮za/tՊ3H"BegDgdRbmZR8'mƫl*H%=ЍTr.aj΢ EdǕ2m1)\>۸E>'^- ARF-UTGeKzݾ%ީqgCDW2J~R$ FT\k]APN:$7pV\[xȨޟeevdv"=.%b{ b,+!j_/eϙ5x En]>2_}! 4 uLC ^xdL/1cLƓ5-ʌ5ճ|_oʿ{qMEP-Lg|9KK'Y|{Q~ 0QL TIQ׷e^'֯dY>ud[J@]+9Eb#Q*g2 r$aީR'(vJFe!vF{=r+eQ}.5RޡΫjVZ2-7Xx)"FdI1ɮc sh$H% jt6'=MYg%Vl1Hq /]-DfiC|E{Nk6F0쵉 {Y_>rcdx)R8"9ZmFwe]Me- [ At  0#W8FΐeSWgfư*f~'>PGP=؟]x ͢(ڃNhŁ"ajuw9DQw݃?{y{_E[n4VLܰ鉲\Q=o] ޏo5j d{(8ڎ*nxd#Oʂ)^v"+.6p%6vA5׿ a^'Iy_t%DQC8/]r8ʦEe||Iv-q@l`)tO 5 >L`9"r84lYrUNg3ʯSj Zz/XH`/`;Okrb"X>cy{ѽUy"5GU?Vmg"IvaZ1 b^gjQUd#zʭ#cUSيI{<&^'ka .X 0]maYIQ\t8j O .CҒ,12/\t1bPVԐ)X$^q%yHnY2QBW \ hhH iyKe' .hmz" ˒bh-CNW"XQH1f^ {-׀l#^3ư a-# hz'ϊ<6 {\$_^g50Vm Yiam;Kf]p&Xjh72zD|>[a\xme,+_ 6t =A :IM|{5WRYOIxXƊƨ{=ltʂa_r@Xg܉ct(ï Zvt2.i,hSI'k>hMY3r9$~N(DS!3 'LeR-ڝ2-2xfQtʗu7vsj|K OfWp+͞n@EI (Š7 $K( -E(66#Q"< $C-5TUDMCM#̈veT)oo*IQl/a'ezeנKaz TD"V{GcC2F>$.Xm~{;EɭVNr} * Ӊ&|cD;`sM!Eǰwa /&NbhjzwuJ<ԋSJۛ1]@lZxٳv#Q&{UxaCeQ"`t#+[55|ǒYd3qYy84c Grnd'b+ZPQt+Ύ@k`UBOv ;ͬs1T' i`@@ i)c0|вXv&4?9h?R4}΄OD/$~r\#.W,fv0^o$1IFNݨB :XQO놽qv+=à MN/2ux)R4%3~,>D<E>YC*hP~% Fm[4Υم,8 !9Mm69֪@vva ϫ.򸩮첛'Z; ‚j;M!;O'򲝉~B6U|ZP+6K-SI^ڗ555Zfu>f׍Nn|?Covj˶ˢ=Y6] 9ZnGk41o5q"n'e_Kԃ Ab5wx^KlzMs _{ :eK Ҧ.jQf:ffffefffffffFIfcfy~ϟb 1 Al\VwU}=Ě,'͍]((;@NN5 Wʢ<";DzqJ!NةI c;B7FdWV;\ϡDri3"S<6KɆ#N=IM`.Cۓhg,XsDJ÷CD0xW&I2d$cLQy絕-nWz^b/ 7dY)TyRE1X^^Ia fMI.0!2h֡R6v\1Nn,@(=3q:{~`o' Yb$K,>Y5\^Kl5 8%a|$X'LsylF HtNr-ڇ?p7#j{ S pTDh}$BY `CKgTLqpՅHߕ'`>r!9GEIKʥ^9cE?t ]' èZᨽ3 iY@9;t)9-VE$tZ V4DY&C";X Y D{yIYW|VvRq\Vb);LWrleD ކQMa ˜Eo_wIV<wbr8tbd"@Uf=9h;!Yq3waiQӠғxk awVc!1"&1G P E(IC62櫹ʫ.fEO0'n͇;axiX.j34y'i*I]Rca+k4=س*׫KP_7']<۾*0x"-d 0IU $O==oTYPMlgjYTUjʮRѬ/݄| { Yj T :hXG1QUTՊăGm 'F1qg)ȵuԁ D_Ԓsڲ~}@ܯv!rշ^G[`\!cK< K# 9ÓcJ0Yf >l'mdAN:z v)x/P]RIQo+ʲF]D#=v&'g-rZ:>Ob\,Jyք%=4]Pat걺b*Nd||CúNzo9t:>Gatt(sx<$IX]àF<Π*l&; JUD+ۻj UTr:Rz+ZRNzޥWgR'TIe֏G!kީ{霺n;:n>C_scGc6Azo|?nΦ[ͧXݘUy gS׬D[Q Z(&.S}[wR{Ji5\j!#){[p[ZD'5Eӫ~5s{[ uwzʫ)G[ئ_0]fӔ6X譃4u}]v V3%u*CEȼ=JY,8'M(w:Mb,v뺎b/dm\r%DB{Z3^]L^75YGrיr˸}&Tj ^dX:B ]s;cܶn1S;# Cϖ|_ZQ[0eH5݇nRrKGwiCU-W3^Yw*ᄢx BwC̥Q 7{Rgw uIM[ s?BÒnWn;& /}w^*?0{zw[UPrF,UBwq}Vg{DfG"U2dA7l#Vwڍr}BL )<oB#ۿxPle 98Tu56Ys^"s^;qieQUY1N Uа\n[M7$['ցv\$Mó7 bƲ>sk%fHa%vJt"ޖ5h9qU2&9Ӻ3d5}9=^89O`g!_rDo+z'zӕ5Md!7A'R/?j} 's/|ETT nN1hx{(g=1R,>A kv%tbySo4,dopaSz Σ묕쮎賐o!G$|=]|w6v{V >狁E8)'g0MB;7&Zk֪.1|TeQxV¶[͚v=qBeX[ lV`qQ{#P#o]y\zkx+9O3l m&G:z*49EǓAd0vƿmgs+a}K#9v>p߉`Wۮh3nB_T2ށQ;~om~om?k<)|e|1yH=TTyR蛭(>C3f}`κԢxCi_wX;i¯ bx1y% l?˺.9`AYV&WB<d %  |;mY;^N!*.}<.A21TqN8D?`u rXjzCaFjr.ė'5|Q gywy RjZrBN=9M*\'5"_Y9 l%.( u`=S+!hDӠ4Ѭ ]OENKMMx[7cAYŒE2G`X )T|ll"ꭆb/ɂӑ$Ԣ8VTeǶT;RAMVDu2,V~n^vh'TϪggqh;Z9eCi{\٠NoTVP>VȸiGl+/)(ζ,mtxچOҲzj#dʣ"\,>4t }PRjLr"&Ӂa L`q^W9GmPY_T( ю Z4rW89A*l? yl֯ +0௖zHe9q;h>{(*{:+ΛCb-]u9]4)6֝ o5hz(9$&sUWVKevYW YB Xrv2g}@zt>Gv&g@7EpXWyKF" B_}g%D44A}rD' I2".2A *I|&^>!tb( :VPNK0P啧Ewd/p@P^Q!~Pފ`qc 1D\6 izh1m}/h,cQtj VYd|^0j0yhNV{CFT+ APT BQʐt_O$^u@n #32XJ;~e&y-ql; m<j=UɠmWŢ电 ą淄B7y|hXm02ÕI^Z%x^7x;QB|UEb93[9ZayU *>.+~;==Yq< !aal{gرd# G2\=8=r[:|+:=*NI`%m,%e00WS=.e'YY<<)n{|C<;zk'e[o-jKh4JƓ:!Ѻusk7)`giyFNkSiy+ HNOy| VU_ћؗ#iC>KL(z.@{}Ժo\:EYig5jYy?vNWT0T~#d% 6| ah!"3p ώ"Aj*&IĊ赓IjvQVDUl):r,/o!S-5b؟ͫzn79PJWzQ5ɫF:d 1T ;CTE;h2(qZCz4_ah\iZ9^ c=v6}.oM ,aq *lG"~g/5>a:ǣ;ip|x(ř<<-pk- 9Վǹb _ks泾Vu*z ־lkU?К-fpdʚɲɉ 2NL$Oh~V]g] Z-ʋM #8K'DNK[Vup%+(0?чkw0pb5 ;Sيa:Ov>ԅWmH -q_T93tyxb$Ů<+>L`dsY#2agjmv2&opCQw-ɁUkq 9-NoT Gŝ/)?gY)ֆnCK#Q:ba,>'2gM56A|^v+L)p1G}puҎ^fxM!g (2v'4,dkYv!4Cí%r _x >a5YS DZ#lb>.uxM~&`2'0*cv: hhб IXwwBdvGM?q{r4%GuKP =$K'_ưJ⴬:Nl_K7<&_=2jfXGmg0IuahU95> 8̖}liv .Bo!"$dF9?ew^e_LxxD)Hr1qntXr+Bg~!ʰkBx,M#C (i *dz ##p):M03N!b$u9Fʆry/Mȃf&ãih{F0J!2%etyY%Η;MMU"h.?!Wq:gyfZ=>Z0|^nWT6LMH/|w^x{Fƴ5=roDʺP>-!cBŧ欷;gdmbM|fy"5~1(_ 6dU}2+|'',~Xmտeto|.pZ5 ^vA+܊. aFCǷu<3q'bĤ/w@zpuN4D ke,SVbCn~!ߎߟ&{,g]w67)K3!PF]0:O<%=Wy;0ild}8Dsx,)Ҫ :CCxitB.U_FX1$g~UZ1*jlgP>S<6ٛ%~Je] b KGa< >B+< `'wHW Ӈ^7sQS;qgDY,|*#y^:jҒE>;msA< -P+t(Web R:]`۰yN0D[|_U]hK\;긆aAo`jŗX1MUӗTv!1)'0XEE_*t,_Sw;07\EitZLQX Mިvz.(J˷B_SM_Uk ft,e:[_cS]R#\]N7W#dt| O9''{IgXhU@]J_WCr@GAz.-,LuU VaR L-W\ripgJNs ~ꝺNbUS=!}ɼN1z^c Lr_c*ʨVɆ OZjyR=GREw3_sq{dX2"P_V<'gZ ܚ-POUB]JCMꕊɝ*HJU j$zɂAm6EձRn!7R6y^m2)?  tP aJacȝi|9ab?E҅47(3f82,o1p<7&nk7[dVnxaWHlDyfuH$J}' & &C;{ܤs=S'/9I1:ۨ=r`td7{Y3|Sѫdd_DsA"`<%s+^Olβn< 1c}q&s'0JT iY핝`tn2fs˻ᐄ=iLU#^Cь#u$Ʋ#d4IC&ᖆW-1cmnr|y>8!|2͢DԞ X)C4! w;]nq/BÎzGC'7h/y΀TQ1E4{SdB^ڛ.?OQ<&ÈjqWH/q]rH7%ULTs{ F~ýI^ڈ-rk M]Нew?5DC;;hEwnn {F&<G\|KoT"6}\itT7I]@Ia߉(:n-ASHVڍ>0邼n̐'KS T p$C)1sT@iB^4G`h𠥾 ꣅ{Tc:VpSYmq(.zb-c1w߭fQer9GWi97ol= y Y GU sMfxǜ<?V` R1l mKS:(Z_r>%<[fFQY"QȨ;<us}}j@(+5O6sh #m&H8Ήb( JQGy.Z'}+|oZW 9i(RuF|R1Li`6PH=f[T!Y{BS,ow/#pfZ(|ٯCtyvhYWcEo}t"AqIͨu]Ws Fk<|*YgȴL'3ͻos/4;l_+^_G۾WuC16|Sh@%҉ᤤM̖oPӻ~'85"NB58ß?d+d]BMUS w'= 筚VpJym~omo>?(<^^Vb_c{;@kdcY.6Z~G` 'MɛUndQ~] FN#2Lld/S_xj\29Uar#V꽈ͻ=kۉ͞|ᬜ.8WeU|^"5a!iqj9$."{r͸SoCcatT C My;[mSCTwh&bu2lq Z̑` ,o>1\fE>Y6ļJV1BOx8qhljf|\'+ʫzd#d#q@{%Dмb¡8yޜu5qKIoWݳL^!d=jGғ\/{Mu&{HzPFfiYa^ϜOJ E)>b, ܄j*);XF%ʈx9D2e=3?-Z,4&XBTd1j SY2և`~tͩl4$>I jYr(RU:uȺ% fHd.amxIQL$eqk,>&*J*.C7^]PcUh<˗fD*- 4e9;Da9B*S%hʘMSʕD[ǠTPD{HlJQICe ^Y_SUQ\)l+V<[M&GʣG"#YI} Rx9S[&OHtZgdn=Nu*TQ`%jʌrV ]0IKHKbНz#i|y _{%v߲,>]SDeOiV:89nG}}v#~DV:^j:D,VsYGS/c+ib'߹c϶3 IF|3Fˋ0\EU!BE9bh@뜄h50[4!OR$yqb]RgBW aL-Q&"p=4+ǩr( i†b9KNNS Q+n$LvK]=D7%xB FU rgxJ\ڈvdaªEX, Hʪ`4G&J4#I!OrIv@Iv%15L E=l[UGT*4N+ǃ`N8#i h,zC)ZBgi,9sbػH Y^voy4b.TeG?fKkU7!9nH(>| p/ƫ?w}e,RDZNjڧv[3ilZ$ )V|C\F>Ike eWRT'u&I,Wx7 9fE?V)gs҉@L~fiLدkj:b:. 'RzMT1 ^!%Ǜ()ôQA,aw8λ>9+wNEBiC>h!<0JL̥Y$^Z%|ߊV VW1ta-r8DuewvɄڳQ f@_YZfӰȼ5L3 Be(`  b1OגIuW$5uVP ;5y%<tp@gӍC]G hk̼k8 r:/^F,/?ޟW3 Ɠ =|gY >wC>tpl^aj?4'+Br;? 5aT,k/;Ý>.G; aYzʾEvyX Of!UR]axTA5UX$W*_-AI̘0Hp~_Zgl ͨWvI{2$Et =@^}G3>? ;eK1\ʪ1҈eI3[0bff1㈙yD["74v67853) ;Aj!4=gE"C1FUVckNm+_&//~ؖşdwYQCRV/?wowpF}eOY\>cq4kt@k^r[< =}Hpuُ>X+G\;S>,|6ړŦq6bG]ofحpڞ7_DJx؜&DbaԌIw͉"3C4YNf瞝ZXngd mJu>3j7 $.S[M6c a7Y3a8;]B'^$R; J0h&hg$'- =mEIYXV!KeV*K_ZPft-OZ&v/bOcgB']:d ;B| vZRZW-> 0 Qt=߹%Oͧ勞JL$Id:yJnbytٻˁVJHԬ %~V/im:c(>ۓd;^Բ [eveGu%LK e|h*\2 Jbm0 [aaNA(|SdpC FQ(.MMJ:;g gZRΐ(aXV l/Iot#Rq!+nm'<:gtrZ>028SSdL"_2Y\L%:ڟ=f4D8'Ǭ9A ˙\#Nq?f$,^3J_Mdmf'5gWuFp8+YAkpmO݂b4_䵅kr-2l&6ʆ6;gIƐJ0InoU9V "-k02}.OШ8-ƊBOgM^ QibҰ<5׋v\&g_xONg[ <楫I,ڇv'3ц2j kݲފ!y1%yDEqڣxk,2ϲɨ &\^=σL"胝2'r/:]]cX6*DyC /-H/;$iЭ5< *`U~#q>3$U+kFX=#}+$ezd{iĉQbl ZBJEU;9'":kܤ<Th ;:boUb"qW6pQudh=$_B:X)Uf-;mFcD 1f4sGh!3LFRA$#M|œҋ3Xr5^Y#'ԝm`*f9]id>^VyO Ibʹ'|,I2y d,$+hNg7M$VFJFޑ Wa(e.l-H6=? ^E'||g*dQ XjЛʓ򈹇>j03ߨ1pAiͯ7WiO~]6Vd5xJ 5ov)*;'ȦҥOy=Wl(.qzm*sMWM|0mH [kԙ3G4%1NN"j9*x t'U^KLFJ,# $MN*hTtkuUP Uq5~L67W_v4)*Դ1~^S7bUUQ n2׏۬)NLHo܀&d7X] Jjʫ~Gzޚ h4h7V_7,-r6Bi4 RMTCMo3jJVpE"49P_˃ΰ0,1H1]UQՕT*Tͷ'Y"0J{k3(ϏhK-AQ\-1>_R-TM5L!J_(^͚8vGk&DBoOPEub=Kq+C#vox`Eqv:#e%tAO'3 ݆Y}|Y`IܔEQR*}IrjZ*ί:.l8&j_zD ^wt>njT5od^ևz[eAP0w_:CD:Za Kazf+*NKnzUF + .l&,މxBWUG6&JtJ=U)TjTedU Q PUuİ(ꕾ*kz;DZ!Sou:z\7M*nzzn_3^S)W!}L3]fꔚFabanT3HP{:B/2Y2HTb];kw4Kޫ!zާ+6}J5CLkY* ) SVUMGe]7Ћ}Y?o}{DQ/-ozѱ|k2z/Tf=NyTt,4=(\c:h][B4 &+j*?!n|7ܠH:֚O"y 7u}pFVZ7Lu7 7d?;f;: iZ`:O{ۘڭnwۗ/"t7+:bɰS0nͫ@W0V[2z<l0nLRU2^wO铦B[&WN.1p5lCz%B7dF蹎FswXnw7;QP1Q )e<+'a\s{-+˾_bFVgaVt[.\i Hk 'o=j?H}Pl=-&đ&|6]-|֕dI0AH#yS6x2J֎sPʡBUٗ6k4% s/ ]ʭ4ȭQWy71ޘU6Z;ֺ`ǥq<wnNw*(N$UH %ZEN8 =)fr/W\zxK`LUt!)Uۉϣ(yCww]2ZnlF}Hrkcj:[̧GqJ wn^_k.cJ F{<w[7 ^'m(1;=lvtr)u.R{ ;w_!Mjtm\!HG7n,: 3[McOG $Sb PڠI:;҆(D(*H3;_6,ee2WuIfm]1 ]4 F ٯrW;tlR2KgSgbk,ʒM5A|r-wz|A`$D#ù~p-Xh&5g4a§pQ,4te/% s GV~gS5'x>C2o0TցX)+z[Jͧuo7Xo. |0mr37B9Rĝ/Ogl4Gc8?C4x>hA1o!g#?nY7BoY.N?MXeI06kn$E,k6'kAf9$bX5r[SISŢQP{5ZE :y gE<}۝8* u 3/$cttWٙ- C]q,'UI>2 ˣI[֥gg`n'w<5l1Z8I۬Cvt]7_zo6 =xSB&WeVsv~Z 1yj2y iv&8V yL(2 =  "VE~a ő=A[8d ikʳ '27*Z4N\ߑdA5kJJC4h͞yXJAj.}#X[ahiJ @qo]eyVé,;'QS ??~Oi?~O&x|ǁV@gF]%;Jx?epg?eɐfh+½"+?A<wFqQoYZci?[.˹+HW$ E#V߳+YM 'xl !ԢS% Vb30`gHrdE:Q>GN0.ˤ"!O"NAjYh.INWŗ8> _--N'₌3\&KH'"c] AYL0UCf@dyTYyW:GˤX_]tѕ—&R~n"+pB{g!} 8 ,:*(&d"هs"\0 L/$MEY;9Q#gwWKP'[1 TҪj5dVRTJD"YJE<ފW.]'s`0*y8*yAֳrJ3 uH 솸`l.ziCR brЖ2Jc&h+z BWǸ5=0:QA#=ګꪃ"|$EQ ^DKN%nCPEɊ/=\5SOUF]Vmrҙ$+ـ&n3p'$;*jId=ً>kN3T:^X:jH1w eOlR9obj )9a>3wJQZ.d9AҸLnClwvZ78h2;c3Z*zMvPUut,FtYER'P t$:Xb1O‹gؚc9tOTLW7!85nHI,:gh_sܯv܋.|!MMJyΨj+WIPs'}Ǯ{41Gq,BvK+<ي@Yb+T?GU1q:DOgq %hwrrRI4܅Iѽ{y,2jw?[Bmz+저4"c%SP=RjAfjDEnǬ:MH⹨f2W;4:H)2 r6MTtRa;]<© -,v*u'QH]$ynh3 q- gF :8O{"tr ߟj|L~h{%2l.NNi;, eGDe^U[UFIEB7`<$)Ό]ax 9ѻ5vVw*m8%2hL/OF_IS6W {D, FZDܣ&ެ,di0; ASESq拝Ļ:B:p!0[N3y&V O,嫉"d)Lc0ĵH-vF^U׳J!^ڗ,7@ jlV(c`݋DdK8 o Lkcxjvw |*JaF8t+qaW"]nHx7K~81)\T@S|`vE1dAjgSFa pG^9o;L3j"<ōJQ<'h|-[xc@U6 6XveSUI^Ip_PV,LpjO<[8) nmمoYBZgYES5Mʰ;FW"WWc{o<1oΏb"R\3!Aꥊcqo#ϭf.30@Ogrip zwNy''r9M<>^RFUeObITU(dto5/5 _Y <%K4l3gƣpIAQtwB C{V:H9YtdjiMW<KLG#)j.1[SB(X05|Ϣ'h_ږ,AY`f9v/jf>mhjHd϶8jZJ&6B͆SAM%U! yh L2z$a5pjri0 aU| gF]EY>} q[p gdDA9gbc`>qf:hf>g.,KabHT7V]OYVJGM G:[PLd']*4Hëi%AVWN)#@~"c\,'X<JʮAb?ND[1~R-+DjyH&ƋRvr] $UIN Bw,ee4 5_T2ŠdVDNt( )ʫ IkWw 9sdS^K%4`I"/<'v2 샓r K#QNj2R^DKգ,/|نBE "YW1`C"10k2M)AS_,dH{>ly@ gkq)a2Q5M%\2GW,3<N@ ;= _.&`,*pމWh%N4({FQQp1lVxal-Ĵ~dtU]Xs Q^% s!,.8S첬4+aMusc8U62VKE^?DYdyf=➉/2x_66؀,D.Ļ,+7ꄵgDox Xyߍǐ# wLv̰J LpOd'%1`7_pFqQHB!*@yAz!&GybzbwL!z_N {d ߌ.NĺtxG'uKeSu]CI#z فD:HI*Q]G*  ,u5G?' f/N0cSv/F΅Vr9SaZ*K1pryc9F~Q3#jtFrю3%"'Ps1̈ #-%T1-F|,~QUeJ݇rʹ#^uMmTHyc:E@)ͯUyTAu U<_)F@NNR+!"GQjjZʙ*M~:^:JQN[v;Oْ4 aTSu_=P^c$I5E|:+T&qLvRidQu\q񽇊6:8%..¿uK*q=n)Wb{,HL4C7w\נ)Scz="fу1Ԁ>3 = *:DG)5NSU.}Aꡒ|qD-'.w|NzyTu35V}cȼ\Q=XQ8qN:+>l] (=>pYR]\gu*}\=AަƎ#g퟊*r-˰;߆NV ScUS6UTӡZ |$*ڳ}Ne=g[ ͲpQzOR:n WF#/Nk:!vQ8#^oo_-' %z:l]UW2^7ҹ =+D#8Tyf%'9>*g/nG"aUAu+T/RL]Г(M[:( x R#8, ]vL@;6iz:nzO@\䑌6tiz|SKvK,Xo5]zhQUTf}P'S$R &=NȣJܦ抪殆A^MðV_JP!zޢ7zE"VoxF\E{GTT]pm3bMTT57a<"ΈP6F\R:[^0c5_u=[0ؼrD"Xߐ!#㹉 DLEm)ysG<c6ɿ P]ZkJNTuH(ϯ)݀L}T~d>춁)].f/@Gq ܎nĭ임,-BߘnЙ4n!i3>vرPܞd9YJ%w[ݙ+_wM,Q/@bOyړ#VP\nmm[w{RXi6|*Vt!h#vOd#t.fv$g0"eTvVX>|!qb|$&>r7¼f.bkBS,>_Xt1z*1`f gu86ہ8 IvO;4^GN"6%Z9L/VTJ(,qz?d$&ZGaHU[6$Y%_1΢g\/C5й4E4:θܑADy wAƟx_8tsB^-,L$U7wYvE+F#<4rʍ*vX\uK{1Bn5%=.&l:F"I¶\1VWKDPV +Nnꐀ]L!>q\&W`AױOs#Rt?kī70^ #z-tE4P^#ȩT/[ Eq~P,ktE?gU dxy\mh[O&^ғHmtӋ,xJSô?Ʋlk$cQ]m eNQݏ>cl''ʛdօ9v.dR׃1U)b"82?;~L޳bx \)s<πx KqIQ`ǧgt)MByCh&ky_NuSҗ$d* z E,kI1 D.V.oDz+қyvD;~xgCh\^MѡޗĘb) ȩ˓PbIw,K7AuD“8>;x`"qijm5Ty+V-fmxyDӱ9cr(gEr~#Izãb!_ *rN;zfc]Y{VaU)*H")yJ)TD8Mvb~S2 Zwo LnMPS娙r0I84ʁ |xK (m *>h$"'Qx!|d#h`, ᖸ{+ϩ5*/*W,>$^W#oIϒm|2TQ~ դ.UMcdZex]n8'4>͘hU:+xm-uj*C2 \ g%'y,ET˚3U։8.i H}e8!KSJ S[YM=EMu@Q@ڊ)DN:g~iXb9]2W+)WB69Ukx-Vn$7q9gwywwĎޠ0\lywHbx7Uz"*Ly@eBGv| E;g"OFSȨ>z[tOY-j|h_ qNtG88/%ĺʩ%.f=XKU:QI>k?zZn>M*Ϋ+y|6Y(>ڀf.'-' ıqYYzX'-qK4cu9k_>*U'6J6:N2!:-tǹCEF{,nƓA,[MRLxG~:jx?[ 7NzmӮbanm j9Dt m%p0p']e`~{ѺoTĉ(":j t= 4V ,E9Nd3O?Ո$ǥdRUuSsߒt7{gvbAyouhIG3~ 6~-i Mw9[N9wMq2w=4K>*DzA&ZeS[TUDgEEi;EΡNS}u{#J %8yASvzfvg *;hDcnȊ簙ғ",M=~Zj25SF፸WvtSmv.d%MʣkkPE}dP}TgywTDLX2=M\x{8$Zl32KfIf30ffffl$333333KzyGp֑nܸ:+{lG@<]%ͱY);IMzzģS?FQTC.o^f:ff0|Mx6)ʗ Cq֣gzO)Q~%dPPU/HyNW 8Nj M|<6=L:h\\znaMdy4D=A'f ԅ =gfS lDY(kc+C_*#|d5"_lKP$5ZbQi"q*XO492]F}= w]x9k' yINz?`8\4J'hZjVhod"-#-2.-8֒Qfd?r<+_I f)|QG(ZWqM\ZƪJ1pﲤ=ۻc5E:]HoX<|̃ @R`/2`íHyD..o(c 457)qEm~PT]M@)UER5e(v& 4ޥl9yd#+3J$5j̇S6,g7%GpyMV:fh0/ t.QSJQ<`QT<$7X[لߑ<[lw5!@ye7UL09*eE^i%ΊA*Jߞ-I7%=U_m?$+=HV b$_={' Ia\HZ3D vx¸ ၸH֛ Ы2J8A t.?hUE(L15aONXGL&t9yaKc/>Ef( o,Gcd(N,riHD58 ֟7J1ljezTzzΪcf*f -bH˲ ٙ80ݐƊt%B;p*2s3A'vu2 ˁ2d qfVd2 kW)}e >g _NiP ?ghZoBQj_osvgTt~OaKҲfqIEzB%W=6H9#:sW]C-( P~φ7X 9@(O#im>7cjtvV=k5:j%ʗ! ?$] UCkƹcqV,-`3h'?u(Dl ă7c~l-#'>-ABF+h^Gh42ʹc]"~NtҠdU Y l:8MvA" U \Z5U({w^V=yZPoG/} -/xsY輷!nԏ1#YlcBĝiqryh}ضݑ;vLN'ffZTa:ǫ?p޻x {]&=i\{`W]xx LhbtpI^Nr$F,ʱnl?퇣GC9Pqsø$B *@6\0)1|%v w *2Ree$[fE 'J,+A$GiuREfL6Dހ}zo 1^]XY^Zē} 6Na~L[Kr s]ܹ8Y #sE~@`3Z%Sۨ#ũ0R]̆.F\a,@|P"τ.ywN@M밾+ΉkU}`æ%4~ITYlsP%âtgx#gu4V~%KrEXgEɨ"ZH[; ~JCyhh熩AMH~9\yШQ6\4(.stX6F:f2D:?JYE<> d{Aј gE0ZX7xX6dq#DYd %*.'XL aLa,=2D=S)u|}OuUI$j9>÷ukc|$4&}Nz+3k0:c;,l\~#eu8 'H˜H2uVH5 ,ygI&&OUM4$ q8uI4^[^ƪ0_g@UX=CJҹdil{M0f3$zȧNjڨʨ{r%_Ck'inE8p&z7>qM.oW{ʣ`L/$i6+46X3si\ƛ7SUs%H&fO5W$nY&QAf8|A2`a1v%g{7ʎVC5xk:"A=Q2x7U wUzbg/J춁jڢ.xZMT̹69/Cb+eudjyQt(+CbߧZhzESS|P@~ Ξ$){ꜗDaV`%XOdVe.=gY ˽oe;xNGcUJbMXr}E c1Ql"~`ԧ8b8NJhf eX7 Tʭ>BoٞZY?X%~[! M}8zgoZ7 '#IȾi\ f2LvWDU-jVPM%WUUcEE7(A%T?5J^t~r?>Ur:Պ!l"O,!Q$L~ףt%zJ{F%~r8Qc @A4&Jo\Q{tmM^33 d:ф`q"O'6dyAZrSf΄vu ]A?V B;F:zG̜f*G:1.Vlښ.JEs!U(IfjEj/ M|Xhtt( ~X:0Fz.ipmϠJMCq7+ԗ&DLGW~ۯH7G 拙<󊚲 &r4+qL2 T(_/.*CU}\euu=B1`&~Qu.P\M*o8ҥ ԋ FfU7usyc.к*2h8iW-v 1D|"*-YLDF-z W F3\88[,؆'c캗^?i?w^T7)K̞43{ž>" uD##@ڑu2=$.K5=Y2|vOytuZ: ; B~ss~-}]ٵlp&ge6_1z7O,āvS7g(ҍ!=0Usǚ޺:fRTJ Gk-GX\]]mp\nZ-yI*/FLgH"g@k8.&8=n#҂suQ;Iù;3tFwUĂ24*,o?_ǭf2eP%kǐݴ+dO2,6)dXSP{/YAL/NJݴzϳM!fTz'VݨO_ բaTO ce qXw-r׭ܗw$A,?#AY==e8/e>Z߂-.v8:k#G*9ԖD)ne߲R_#x{Ȣ˔b<K'%4nKv+Wt5Z1Nv{i)KE`.NGwhq]noYGnikX-[Xob8(\ 4W}t ﮑh:.a9xH9],}6(%fTl%QZH|փ܆6C{OQeo\%#5!>I}q?ҍDRLbw3a#Q8gXſ/+lMHgr^IQG6wtnYqר!;@V;}vT@+T(K8 yO$'ʓ>}'QC2wu./g`9J:>vC1}l<@~FnJ- {h@&D|_at)ou0>\UX'!^D^Zqyp]U4 ۍPmxuK cif<íH ѠO Ŏ2Yow@Z erz(&JtNOwgiޝZ9dv:0de3VG,Y ش)ӏUj EY~c\d/clU GR'}|KVDZ&N|,S"h|g9Z;p}(=*#i [?_ηw?AtԳwX|G;W۞LEQuҞᑞffewlgk1׃yW_+~ůW_+~ů⟉^A>b3{/?=NLt^a _?nٯp7vfKFwqc ߻=x\Q $M9P.i2{7wAt6O/à ˼N#} :Dآ$\X6d9Yk0<W-pݐaDYOAINOZo=nx>E /ɏ"L* X 2_4 ]ye6Y ;t} [yC]4ׂfX7UDRNi"R "wsDȷdȾr,h*hX*,ވ5+E !}p]tMW$oJO}~FAiT#x4ӇQur;/n~Bc66n]%%^T!]2qah^9BanRO 9`l.-3Q*(@rٓbo;5d#i^vcj+[ŕY0[NvnYq2dBYĆ[ [*8q/%ҰUt2}Dz?'})( r)Ҿ.R3T*ڨ0&kgLb69Utf;SQ_aMf:=H/Kݼ4KZ,5͊Ծ\وqN?8,#`֑1V,&J9nl(c1t znɳM:u+)0QH$9xZZ|μ @\ RUdgJ]avF'#F<\v5 Ө0O% HOP#w9TH1d=i^Ӧ}^(F^]% lon<IM>L WɌjS=.HEv{k CiyaXR8 I{9H$MJlj-Җ쒼V2焓 O֐PݤxN휰GzT8nGCPo= i^@;Ӊ\>ߠJ7BY'?jgt0m_Ft-C)mMnvhr1BeTCYQ6;Y襳l2:a%!Rҟ$V eW1CuFօu u(NG=l6JŞi#m-g5EU{W׷TkPf-`%x*jC-4ڭ 9YVږcikYȣ4:N$g~PBQ4q؇vwe_h2V=8HM&<3}WA,$ݠ7ܖXFfhFGrM5Wwq%>BWd7*곩ă% J04D "g ϨR-f/';i4/Ǐi>qr܃j͵՜D5zvUVin^SUlL_% )3VV<ŎG'{ҙr!<\ TQƠ:vN'Nt /&[ e"I9@79 ڃ,Ja0UT`h &kj%$^= DxC9 Ѳ%Un`zY1CPRwi$[ͻp/});"XqY^Z,ǏuO^z:u\rdjxE|d%dvn8OT5ά<􄳐TUS}Trj SkD{Z0~lbh{eNR ,+-.ļ<#>NTP*X*nBƉe{fd;$RT& U^USM3KNbHe1 (cD9/Ime^X$֟nC5QEM^-D0B|8x#:hCs>NA9JQҚ?a.F| !(c,"o,-;s,;'8s+Z 7Xꘜ瘚3(%;\43ӗ cGx 1@'L`\AKQ{Tw|5P-N GEߊbY,t$֣hVm`VjjP]ITTp9RR" [D;o,H.\nx j*4<*Aws[AY\yo0 gu'H:*^ ;?UR^~iz7|U;ӛ%[OC=}<;ژ<lfR`F'*X%b+lت((&zLDP(fצ"E$f2nI"GU֩UJ~` 6x 0ڐeV@}b$(k&"C$;a9N裳Yvsd>owҪ/ڟEAD\ 47q\it|}ڸA;mVChj+\Dl2WޑM5]2.=`5޼ɂ:NuTgA;D'suj@SߌPrkB :˲*1 %4JK&gZۑ@R?YawY{U 7:| OWP0ue~WWiiZ&|ffd ̬{ʺ|#+2=Ԃ4_Op@tWeڤ LѶY/`I(Ք:'XgMsBVf%SIU3~W{SO_m9'ĀEst{^ZC/7Zt }Ŧ a-W x1Q]tJ9)qAMc%|Jg)6j(Zs|W ǃΣ||%cBܑ[e3ؿ oq 7MD+L{ӕ,(kςn:ɢϐ4QR"E}()LV,UE} o3³ʙ#=;>Z\X,DNd/*oogUwwkgހ&Is٢C%,HvF۬~8BT|2qev4>CONMOM6Uv,[ӷl-dyWL}9dcy\zT"+IIGquDGI$WOv$Yɮ– pSi/Z<3`<):ux, N6ﻷ|IElʉFe|>Ll=`>d!:$(ž x QSHL%? r Oyc7Qt5E)$2H<' \.Y:li8zC۰NӠ{K2$7nU,< {R7z)}*YkX@旣e|}.Aoh*?2ag7W|h)0Nc (6Mg_OA4gY>1hTeU_oO_"U8W{lb1%\y6̞CNUŸeƏ2N=B\8*Qe*u^v#ޚF#)$f烼Feu1k3Sfv>ucD+!DqGN]N!D^1̓%TTsrS_C5 FWSŇz%DAp "arl~"qǸ2}5|"/S~>w)SY}L7{,dHބꀺo4WmG*oT@͐8;WxK?f0H"VߜŠ"dLNJ+gY(+LWg&6;hsR&qnW=6/<rL";"lbz,bzjΩbknΪ 'w֣?u(!D^C|\c@]xwTPeU`j=D䏬] GJ%U:.@UѠ 6nꄺ^%j=đy16Ts I"Qjz;9ҷ}7&﹪; Ϡ,ԃB:ۂVF+9ҳ*j^w3ZwUl4Ci}>uSv1x)yO!BSY50J 0}+R4gpnڡ;,sR (꽌fhugs1A}lxҪym*.b.@IjXp#E A[Q#=}SHwr5 Dׯ.d+h8-~j5|u^J29Mfف쬎lfj<.}=DD%b& Tn;e\M[rtadRYT3F) /G_9aO_xDZdrBWqMvPаkBUgUL]߃; 3.2d|BJuɷ0UCVNǪa*{j?=[u5,XX]4f(]6ǣi068!V8btv:jBMUFiB[ːwS+-CG↣WMgzzn2jvKl,3/e텤-fL4p 5Kgp\fF<.[_pܜPLמ49zb0Dz/|(ᛱFPT*u>=@g֭fC'Yu\i )p҅H˰eD{ʬRu=S&DTWa3OIORs+Uᐲu6r,ԗXs 5ÜXăQ|t!ϲ@/SwaAn(Eh"G,j*> B)*XMx>d|44.pAvv]Ki2GQ tL'E^3u|mC<~\B8Q>Lҙ9]ٰA5+_7ԍM5nz.qo:ݦ$.DPU,IAdOXr;!5p]g jԤV7t4wa*np`ޝn%)Has>ѤI; hta8䡩Y B3}TuwdEeރNY\^4pXQbZ>#KH;UqXnVw]6p"^P3NTEO[]$i#g= J Faࣾ#` -M6f4'QTt$šĕw5rine]B.%p މ~NU" z峌_ox,Vȧ3uK]BWd;z٤ IYQMO##]r&S7yU&!@/H;c~*DdnUVU5t+]Jzyk&eudcpug;Lv4$U݌z T0Z?4R5UY XJиrĎ0I}QX'Ļ1yJCunu$;7gպt32Zuow{Wݗ&"mM;}6weR QM;$-U>#D+WN> >I|MepPu!Yf ,m<#ȕaMހNFzMWXd VRѐ%vc$oN7OéXJYR}x26-[W'į5zzKIqzj89>_=$' 'YyV'Ný/ O)bIE9X$s 9LҀƤ08q_۲deZD`K#se겛#{B7hG-̌W9\Ѿ.w/c#= <Єm!Lw=~*̽.m4P2 S^WX]RxwmݱO8 e8 " 杊5/ u_c .TY_,۱[ƛϺCIsY{E;8'u &q!Qm'!!hhn,343agFHiR*!m2&B岼< W,ϲh*n FuZ|,,ҵѠ(+enCE Q 18'4[{گsiQބ7:0t +-{J>r d}kg/ǬAv3! 1Ğ%E{{+NރS{= YMi1{={{NUX=S_+~ůW_+~ůW$ ?z'''G_yFp2r'sV}zd MC2Z(ˈ5ӲZKW>[xYQP&[dZ|sjl dZN7.@խh}չWN3҅$(r Xu}|0]EmND-ы7{xG~ 6X7gS+Rt&h3>M|ѡ:;ςNIZKt9`ڀI(^t(bO3Q;*rB"'CH;|d9yE䵷j󼲚Ғ0 pl$؛SZ'aq;JvwBJД[><=˺0DT'x?/8āMI ~dζ.b(B=q{vMrUUᓨŒڛ6$ʣC/H`.2D^c1m+<鯞 eѤ%o;dEO%Wy9U rfarj #֋ε޻@ˉrh͗ңJW}흸>{%CWS% oXPCJ:WKΗΨoðYk\_5^ڬSIWXkbd U^eoI''7ordNyO 4OCWUDKPƐCe PqU9bceVSk*p2:,fE1`q{& 㧠z'r"PXSKa%ilySdS++'nr8.̛55J%U#&ܓɮm"h چ. a(HwȂ `<@ڏ6S{9b: aF#7&]* )~'CPELx~~`s4NI=ܷJ8cRZ7"ej]M])Nע6SqlH67\T'لFwt'[I>?]q8ݜSVkS,a1,3Ivsr}sɹO;H[rIjOI*@vL-R1*)pB cC'U+3 FgS/bEQUeK'GmmT <9NG49Qr՚A ̏ %<.E1JVoTW'VG'GP[I5ݢ84]U}؅08; 6 6`?ayzn"#WX+e'r1kfeV^6 T"0>;VXS{0Mi mD]P V5TRyJ(j}qqligmђrxEL =/ێmTiS>+HS4:-`n/G=jKhhKU޲4q9eY|KfVL5OXv-oTG:}l)·yӜ4TR kJWE2^Hg] TW3L%yO %czӡQJ",f4&ބ0DYHNS!țzٜtXiL`pBX6`"'x! zt"AW{e ׼$6=N{.,3tm< j*@h/w{{?Ό&Lr:>Ϙ&%mM\ ;H~r[FʱG,*U8'!qYZZѫqxIeqp`PZB/sG:Qd\ׅPuMNk^wMfށv^")< 1^o!ب߰LDuRH`*RPLEu9Aahʪ;˕oC)?]{eyXeiOMqd)9CJd0zo} 1ǔr>ī^̎OfjF#3Ru9Xr*dڇuG3Yq.hb )%-C6'Zyn oZMF>1~N6T HUJ2WLnp&@5Lv+_Z:o* MIKQ7 d*rؙ>H},W6gSIIҟ'*cpMKDHELC*Ĕ2 AwZ+G=ؤ4 "g*ij.2._N ΍]"mLn ^"K枾.jl>3zӔt!=JOz"uA\oP QMy?^OE4sCA/h:@%Los[UiQS㱶o4^U^kgȈY3!^؛TYYe㈏8o̪m |kZgyGV.&-!ptzefqxXYL{RWyu4SXSGOQ?*zkfby8H·Hie"F",3Ȝ5L{[Fy̻7 !2$[p5qb$L;{GIa!v%wW>l>'8(ޜZa;rgY:>6*>Y֭s>͕.f2ßK&&317 1\vR*=V.aN#Ĥ-Y@Ф1k)^٫Nf(ƾ̳ lBTӯ:[ґM9M?sb,Ff`Y'L&tTӛTSu}PSxMnV$X(0&xJq^^/(F)A%1DsOfDm0ˣ?g/QNg:lb^d2B|J]yYnyet1ӊMb(9&0enzjxk^CX:7:bMU)ᡨYj)g+!\'uB$œ覲7"s#ʺwEA_e uP4`9&'l2p5T4-?Cx.L5tQ]UoW:g;*TwtJ$A.Wwplձp[~2 u9ٜߧ-h 6N+dDaLGO> ƙzb_p7%՟,3Uu3]W\k_sL!RGѕomb/&E 8_ki%w$ uݞݮd\QX. DfvM_o>wrz¾tL㫄Rn}<5Q7)0:P/n٨aJ"/xZndQY;\̺fي;d:{`ZN`ʘ|j}' 'U9iQ 9c@}H4`)rǐXېDMe*yeBriXR:fڏd@ gxN؁SiYz0_ٍuo*.ܲMW e5's~'`OZ~za*,cg^WWgu$:/‰?W>p2uthe2B5Ֆ?2yUU'gއA:?-Pke,?MtKyV 5FZCz42)8""pq/aQ적)gn^g$sT2) ZG)HȢfilc8tBϐVA"Gp.r?9"Z??3f8:^#!"CaC v󞪭rYHu8]![|% ֙Zi_|1O'-N*)2+_ǿ>yU96$\Ap@Tt"|]\USP6(6$p?v-MqNǎ~l b/!:+ˇlI.w.DNﺬa\v ç&<q=u,{ĂO7{ /( қAr @Dc|6_Czb#؅ ݶ?d5&t732] +<1CU4B?}m?9CK}bǡMduK[#2 $9ǗSO1ר}u.dK۟qga\NJcVVgp}]Xב}i9\_1Qw4B<φNJxlNC/euNS ;IoWîcYtR{s.}`.DYmT[zJ3jߙ:Z*wp {}A#0lCnwnʄxn - u`JNnz.sj K{6seNn/ Rx(/ GR8 閲zh,7P,U|9Ԁ$qBC JNffyGn倃z`"6^@#fR!bz 14W ;R"ibKLӓ9a:gdhj0]\ĥc#7CT3671n)@Fؤv<:a]a[P7;LrҶ49;R:l=MMo@vHYP)?BCAF7E* B-g;VGuIN"1vt; םl:ҍ@UUTE_5 k< یv ֽ'衺l6i^xi: qX !mE9l(љuJ]C@np5RM K+>= ;)y#DUrJ~Ζ-; NȌ_W&v"ba$Șcb}m3WV^i6J%_5|uYciYҁ&4\UF'ypzI|xw`_ۨcXfAij& nxhY~R2[D6E/"~9[^ڲסAIzQ\cO b%Ou9ooM,dO$:iFtw"qExjg7uYw>\tefټxۈ|{-kt*|!]Y>6A0'q  >@'Ue~1Y|~+v(J0E|.EǼ,g'YY>;I&t"cۗ8"{t;eLLy{zޭWYASzw)nY=l28cma2dt:6L, 'vt!5Y} 7MפSE$M,]5B_{S{[1`:&^&& !' FY ׅ#Ѥ nƒŃolb TMrDJɇ.Y@LQR183ļ֓Q_~U]ö7E좗8nSK'li@_=T0^[:x{'2xu\][y5EYlG'0DgY u[eu_=uds `1NWU_}#Uj_boX%Jb"hҹ$1w6 p66L/t o!/z6 N NtN^Hy M}iMl8!˥bhϓ3=KI$90$7I17`4x*  #nK(v'uN7Dq>ۮ?̠ 1Xn&Q6X=3XAV[FDm]AbO},D~\уSޗ[PO{*)N?I2{e7X':x~榢j& 3Oql ٷd=\r(yJ"фVxsIYNezٻL杽hO!׶%>eԽKUc1tL ƊSkx{YjYLSxqG46vAiķfEW8vudJCG;rfxl{\N^6A}" {O{j]J:ܬI>Ǝ^@\UPWiopg!, ʓ2%Ɇ:y\,f'O)Vu"b:(&w&J%Γ&m;}U +vgE}?c~g qv{B戨]`-P=Vi?;^$=po/"$>.+L_6zN6塔(ru8*lw˷/~$ڋG{7 Y7^PVGةS›yo̦{7#Uƴe!g6^԰lh!s^O^9׾Fa7rz9PLxu+ 9E7n@._3:txKoZw{X>??G#~ď?G#~ď?X^#0Y8ܾ>N]7$(387ЪG^Gxi*>PٚQ$S`9T RtEw, tV l$+m[,GQIUs,w_]*"*)r-<%N87kEb!o.,$'Y4>/Nj-SG"kKH\G0"dD@FWseQvl}U}Xg$0BxI*[mEDGjt(, ߳d <]Ţ˭r?vXx9_4/M9&^a!wJ%v&>W4uLdUZff(6\p{>z:%aSiTR]ݦw/3~=L\|M]ػMqUJQUmeU_Mz}\F/9Sɏבa^:?UծhR4ڍz./̞a"%Ѥ_F@6S4%# 2nT,\8u(H-%J։so;OfRcى"=XM0UYC$vY$mr*K**}%">WQu$Δ37n:K7`VIыm“e'{^{mmԿt 4f m.03 1Έ|?F?Eb+F*9f~vc;@LmQȥ"N2%N)j*򼘗)F.f o;!:lKz)t4UAi.BI rkQfu)=?#=Qvdګ23,;ȴ~r>;TUf3#65\5TׯDXzIyIDma&0{MGY!1I4ZyE9R҉47UY [5Gvڝǽ$V؋3\ ]ސn#+s''ūuD % QJp \div+BNU5=Rذob^3>RBPYi4HaS{0fv]h1L:&1$ =D"d?i-:|};IVpݍ*:c-mZ< TDYT1Lj@'qzK."PQOuuw7n{~/ Ꜧ3dU8!Ś?k= ) 9˼̮V z\֕#41wp|AwGI-⊁Iܙ5)*z"?75lOtCe 8lIx^ 4㺙ct#!B9͹& n}-UTG׸YU^/uE5LߐK<7UH@*f17us.`ѬNi=;2#/I/xH\侬:EZɕ@/Xo{g*ڮNhL4’ oI;+cd^rڎ;֞5}RAovCtctEm}mu,&n^wuېFfm|I>QLwąp% .}YiRÜ3}OcEk/)u /ӈ2F`c'MzbnY@%ĩfV: ISz{R$ ]=uDư *:uSzޢ[f I@DVj@%EY :gtstEms~2ˬ';h36D:}0go_Bnʛ: q"i-}#vkjݠu.Jυ Of7Pqa;A!'8W꘺%jʺw#EMySu3P/C6. ~v&<5!DR6q"?wt S'uu9^ W9v:#{mRf j]+m%T# LLmW?:뜎^GP&, nU}j&cStN Aۇo y[uST]mt =$I5`jFy&z/\҂2ElEPkՖ# :=]зe7eEߴI泱^H PM6KNz3;Ƚ"Mz#9&9_OS߯R:=q : ;hqp?iI}Mԉ{>R@Qָ׽lg e+bTMYÝ@^_ }H@xoi⌱a]W5DoLDw'q.΁Sv9F:4FCGأ>LrߺeoلZZ7}]5sNoFbR9 1 Tz3X7]RA2^¾KA|?FL;>t5DzL+*\$G7cz^)I#`ZbU湄~ ֧60Qt]?YNo 5 VI]4(i<>mߣ9 V}eo8sjS+v`Fh =} Ī?͔QobVKqEeXd~1AgyOݦDVTd !I}қ 5i6KE}_c&G SGe'^gwu& :-v0{p`\*VYlB>9g^l`7Q֕/P,a3ؓdepv#g܁IL\F‰d=nmc{;]&w^$DPd7)Ϲ^wRII[ړuOx Wt~LAXNUT!f}pS:7c~EA=W '#Ho{٭Nl *+^]FVYY`x 5EzU5LĔ&ncΓXq7қr[fHڴQ 6ݻN4a;jJؑvmn NByuwkozX"3벚XG_<'"{- NezQaLIn'06Nv ssTyoB㩭؉5cV[V+l:WY~m z/^o113Y£/C/ MOd<ֶPGlw0լQ?=ܢzz 9lW;Nvm2zHEǹb\l'W[@ur/ӟ݆$#̷kn"R^Nk5co&!j#tk@x\%eEU2}A1~F>mvem%{.Pي{դ9+&\1攽nW6L\>XlN:w%︛{%o+|"_tfT0(v-h٪v!.+ҩu^R I* rMv#K5SP=怾:.N2KOuӕGn6 ZLTC3I~ u[gdךܣX4"{Gj$x͞omhp6>@DS@1/%;^E9͎[gDo'CsX[6zAz<'M^\ :bonz `G 1 [zei|MD0$7%M98ZN,6!4iIҟ~ΒJ5>9ώёdEYŀ %MӵxEp6xW46)l6M0Q PJ+Nf8iQFub:X>D=ћOgp~ex6oE8'X;~@6f>d%n|ĥ!TeyTDwE>q`E }Nbi"zo~h$/_H&~yIaΑᲒ2*HkvL` B1T`R[UD6߇(+o]yZM)2Qv$Q/+)hTޝ?{OJ{e\mXA]z軪ˍj)6Kyz-_J 4Oel5 MrmsRtva/wC=H+&9XFփzqvPM>ahBySFU.~yA9EFEZLYH$A_mHS \5HA[38Q.EȞ.18JGEL9eUBa]{{pW-`!b(*;w:ѿ6챓7;dy1yN&!ޔ k[%i6]m$R&aknғ>._w_e-*hf!rL333-fffFǎ7zeC%tGEرwUVu6\< 'WC5mJ}de96rS;ȷJѲZgvCk쁬d{g+*Ҭ\:,GuyFvO*?[kK֞mBԮT)d}AQX/"'|2"]`3p]7^wᕎ(p­p;uJ<=?ŧ/*[Iī6n+ϢkB]Jf)gNv //;Y:Latv]);D|BH &d4/,Haj+Fa6W2b+A }^iYĿٓCbNJ*fؤn 9(x sqgGJrܧ:puiH =9(eCI>ɧS-ϙ66X0v;J@!s^*fdRWeNJb ?M3yE4rI즺+攻iኮ#gf*mg[䗽!C漬ء#4u2,.K i`Q^aHoj*T ]G$.YcEXv͝ p1# Yn"7I<FltsKUa,8wsIA<*w:ҩ 22:H3ߥBO7v3X9uwt#݉a -,zDlY :0r2;C3עjv ;Ǟ,،T=E 1M9sCWt~_ 'a1La=6&q}>%ciS,G r^q0?$r݀ e즾2փ/LXiOS1U%LQl9Pur]W8 a9*</Ǧ"T5bix -HwD6=`'f#vw;~HzP:uE:UB^ˉ=E3 E)j\#Sb5=Ќmtd\^{à?+Dk4!LYK2_+OΓN47o:tz5x]w H |gʚqz/OM_Jroh rHn9MoeCH4~y5+." Yr_1Ү3"`ȉju&C;h'ڶvn㑆tHc#h5vfY lp>)<+*tw"PHBRP9YS6Y[%2 +Όم8}mྍ aͼYKEcm0anX6alEl@\8o?;?ǭjc75n@#\ 1_UHgt^3p qlcxR1=k؞6mZBgK`<άL/ymaNDzHdA4>1mN.>83$Nn+8_lEoB"R'KYWd&].xѷ;m_{ʈiYyP=9[:_syXmXb1dHW軋ڨqmȋѕrp|!ř=!NA}TiG31v=* BtlDRQzm {b)"J( 7Ҹti73oSv'"vͤg˛7т gyY@m2mmG r ΩN7:L ]M6C)]ުF{d,YAt||n.oʠϝM ёfفWT7D\ - IMDL;nWlL"Ob<-9dvn˨2;]n3K^@|o '25=P~לH̦|:d@z|db"d`8jo24$,Ŀsɟ2 VKڴm ȖO ҺTuk=Ufέ> ~W֌LQ<]V ib,+纸zb8ܲp2^҅;uPYyX@l_3(1uT0[(l=GIˆ 2Ƚ uQL*{߅-ui[Vm*s]-5Ezݖ0ޞm]]MIsAO1E>.nj4ߤ"od659^Dr ]iwՐO^'VW5LMx;&hZ؈)/:K+KO7,']Uʑ^b> xI~_ĕ _;&:F!/q6\1ˢdb`Hi2X HOpr p:QdLst@no8sBN-w@RTߩSs|G~T<ҮBTds/n;|\1}Q1oev':*@82v`ՙ˔!w{φ  d뀜3?/\# 6QքNC C8Իm֏U u1@X6`Rj,D| jIj5!vHmz5;;ʌW'9DN{x!qPJ3OviQS$XVbvf*R2QM-UUcki^C|5Udv.TPֳ%v0N,%aHt,>}^w:6ĜPWYRlx^`'KPҌ5MQ X &̼R+A?Q!xҾ *ְ.1:nOd=MgzWu{ d ,ˑ&)lA>Y %Hu*`3,,dV f.rLbf %GuHS"7rc]c@yS_Lb .hu,IxEG^|S/IHo.:EM`5CrvӪ:l2\F5ļ7Yu㍙`sڑeL'>{/.8*qª.򆪛M>It#Q+r"dvzϬwڿ>QYU0 .cJv^^^+X/:!qr\7-sW]G!%w!J.g,NE9:$Yپn& ]W6ɭ"?>=N!ꫪ&6ꮒ{ Ր1RB46m]&|żۤ/'wLnUH=FS\H4Tsqk^g8Oy7}C6d3Di҄jlyF3-DɕeYW11MPYM(8grvrмP`Aȁ$ȼkQmx+\PZgΪ*#^h8{&߳G*gav#Z7:8D-A!HQG$)H;=(12.l𧚚$3Muv ~ `m]4iZgSlgMv7_5T9U7݄ܡ(_|Q-M|[b+,ȗl?ԧVqEXғ,1UqSKSgY :lߚY&`SgEKu^Y}3F nɍ#W$ 쪃"B*0Wޚ zXFnEro'BC\EP3՟zXN#@`5]KG$in JU4|74Ue+EeUpBVT^YnK֙eqța8}XC?n8/"bH*iH~RWԬrBo$frs5dGߛ]d}fPy~XAHl] 8zY}y fٚ&\1&iKKb;8;oI7reߩ S $V>!CU^>^@k"r >FWt ^D3ɠ.+e4bҺ3v*ޤOL@(;ڲFr5bj10FatBbrA淚E?䟪)_LA4P3|>O3Ʌ*kB!i[ʉla`Wk/#w/U:&- !#Vɦ*dXK2EüJ4Hv6DZ0U5DF}O"6궎%T{ Mdev\@Y1$Cl8۞#"y]ښ*l.|C5Szۓ'';E^_,'@U#mF\&aa/G5̶sRҏ/$;փ 8{&<.e@Ybz9Dy1~PfПm6w&7Z5IqUK;z[LNMs]G%tO`XdL7GeHr ځ=@w]q6 7Lļ9Һ.%YA \xBJJgb5.R"&n斺lp>B\<*yq}qޡSqA'a\ J@)E|dݸ_ ʌT@!lRj>iپȨjs[&]Epk#C ϰ4AVt%iqZ_},=P `5gl6m*+MIД쌸i(l/m^ J p&+AEQ[n懞煔E{NFYK!CDC&!:ߞ*X!4_Du^? ~t ba_GC L5^6_kv}k!Z#6~sBwԇb&#| ?Bd@H#느 {rDJcEEQ%R=%Sga|bˌ+~Mquښz񒤩)tFUUum;>J>!giW%[N R\bW]o+sMlb9i*XfYn0Mg+ؔD[;.'wˡ؜^^uF'i(}#gڶ"Gs1 dgK7 ;؎5[d^$n` .f˄fn:X6]oVibӡN2SW!l)x T5g~TI"MTlb .7p6YQgi^GRQG]w\l} Olg&Vq>5f{A<7|@DХuQ;> 3eN7B _LQ@-ql{ص&`덺Ϊj"#v-bf+r*J7^G]d_+ɑ3+/BM0 R| /!RHbj?b l3GA>G !"愎J#KPCPإYQM)"d\(H#*2^tm]FwY*bl+kbl衺{FU𵙼Lj!Z뼪#*\}N}B֝ QgLJ4PEor-МF$F RYd93cTfHS">&Tڅ-ʭgZA7BPEYGƔ">Ic^. >"-D&̋l}l%3%půJ91%kNVB%nR=[Do'[&dk&Ҋ|E%ד[Qr|̬nX񋨠m&fo!.={i%K˼_c%HXHG^jwP`W.d~KUl.P̼ A7xT8Bf./V`pq1疮]cŪGtB:6F  Oꅑ =eqZ%ocgEpqGSMcpZQVe;Bdكnk Y)њ_fX=$ H)ґLc )!{2fcenQZl1\PzګLz[_CM'C9r8&xfƛz)[?*A]] LyQ$Q%UDoNY%֛6p;-w]a؝svU W9q狘#q~~A}a^T:c]vqSf{k{:bUxMR1ZvHI VHG*E3[t]"WuJ6="jJb~]}'(/CTr] 1ˉСn;Enq]+3|,6O,JE>"K 0.R( ]b>m6.5C.5tEU$`g$$•pa놊 ԂrFvl J΅<EFMP[LoT1[ѝw4b`UKMT u)0[f2%\'1;uyRW| 5wrBQG>:\go4bc+ovӡiC@dՀjj&2dھGmz;]7|CqPUGUQTA{F2V<- \"?BS}&'l>LtVG%W$4~oJ"n*@h`Z)F2jEhxW^/dYvA:2B.ϾeȎ6Q3zT-Lu]4MU6VN֡6 5.ZEϱjm[[qDm6VNgo6@ #zE@JCL{_׵QMU7UkOʼnhsw>tLhnȩȢ6&$'o޿|A~jh6re= CIT_t(joMrmzіP0Ҝ]DES;'ƿtGġzYr{=ER=Ö6OD_gcۡj1U!|dr&j?@N_\.cg}AWh[Bu4UјƦyVuI!Q,k+VsKqsV'x#^`yxGv_}ڷtvn]e!VU*A1]3TSVݖCABQ?򼪋k|2?H공2}br|W/d!9)n+?O$"oUozXT\{YFvMrXe.=d+ "@W6/ ϧi"N, |x"*D{݁|,j׈?{[J͟.0)EcwJg Mi Bz'9_u`q^ |ړ^! o/?W_+~ůW_+~ůW_+[baБ#&ەTd'þK>"QPb]Sywց!K %M{og^됹wJ/Iưj/68RJdy.Dh((?U [_:v%Tilcj bHg4J̭H .tېjb'baNCE & %t|PU~IreKGaUdWJ T5;]T]":Y"SȦd֘v q./FxMX$]B^ⱼ⡞`Cr9Bv Nw|GW(K|d3+J0ԮTc9:XvV᪮/GW%P#y }&ӵٚF^46lU\xV]"?,[ E,_%頹Lk(pVgq8>%gnuעy0a%{U >ƛ Z=xK_j;Pˬ=1T7sKel]ztdoJ0yWޓ/utGrCgtI=1Hىv=?.S zqpf N a.`b)303-_xg/4!ciD,KTTQ5WOT{A/^/UXYFg4GLm["&Fv" "C"]Ԓ+U"TA~a;*1N*} S}k-l߄O$d]ѭc+?Ňp HMDZ[Lkzr"gUk}6 4F*k"Ω& (*+?KBѻl(墲nRLgI*Di|QR4E)8f &1" l+i23 ~w_gn&N&},#JR,J©I*ZD pSCWGKu4+3wU_[p݃kzﭷB}г{gw{N30C:VH $Z?.vu˶üZo1Zc;Zf^{Ol6ON-m Q_R2)8Use9Fv%'H=lKu/  _1UaKPbla6*Wew1m'n2.rJ,Bgz,_E7;&nF.;lG8vէ6/E .!L c:lq.#zI\\AQ]Wb=~'TѝHUbuyZ;r* 8uwe]yWU'qQ\W-6Y x 񨎗P;(wиIi$(6̞S/%BvKMl d7u*c]s7-t3 1%Hn77IEYo[LS-芥 { ;md5! #=V[Йm#nDxG"l \G;Rb tx zn[ҹq-Dn 27Avӻz1>6Fk"_ʲ?ClI{!>v:ʗY _d~e?)k+Gxao sYLZWl ٹC׳̢#+<&Tٮtۈ(Zj+b(؞UD:u 0vؚxx'vJݱNz3' qHbz fԫ xf7u?}[tÂ':;ͻg itdzq(8GV4Z MuUt|X`7&|lCW!PѬsqȇuP %,M6=ǵ>~VRUuLB=59%(NN3,Qp+4JKIz{z21Tו҉a&( l .܃锆3Pw '@q!Gnk OU1m1hFZ=i(a;K&Ȇ-.F@K$1B>K^oCVL BCsC4Ha2,Ql>SSB2Lvs7JuSUY栗ۖ0zgVOغ%ͤ;^E~0 њׂqq,lO,r] 6Ȼͳ~pF5~D vέ~PW6IM#e3{x<-H7KdSrې;m@΃UBKQhB|10tUcyN7v`4'lD;Bϵצ]kgwLҼ| 2Y@y_R1 @|C;?10 u2T~xWmQg-v'.'lV/>L} h]@%ҙ]j,{)?Sn / Hp1ߛfI}|/# Єf[ @{; b{$8&1B1SWߓAVf*ā6.KLOݴ & Wb9\0jM&< To afVlkǓKPK(ޅҤLj UX)(AQ1;pq򴐔o@ 0y3S+g8R{ q 鎃wgs`쪆~cJ$.<^ ÅxNlp;tT7UJDZ!o1wEw?b;Sa[V[߂3xuE>UZcTbG0^.b^R1haWk!3|1#Ҧxj%"*n&#a:qsHx@q|hsj>85$&ƽTIH}']( D{|,EDUu s ]Y\#dҡ6_JY^i-k4DrӈJr ـZn>|Iz/)[vt"X̒T$tU\Uocv+E;-" ! yD}fEܒ&MY Hߧ'zZ#|¢a⥌gκl7 + ΫE^'_ɮ8FHi>Clr[M(.(e'@VUm4Z>A7L4΢S>^UXtNf-؛u]fFU>ӘL><̴^SKP ` 8j> le4洊˕p \*r]]S) tuZZu<։7Ya X܍tݟn0u0\dt11~QIM6B4W̮Kw:SƦ.{?bkyL ?lŒeN@˽\CUGP^IXƩ_'2Fr%]7xn3Uc\Wbɛ qpxx]7*+jM6uФE=Sփ0߶B;>da]mYҲ'ظ.`*-&~ou씸#fw'u* -ڧZhkVC\O}j^᪣کf6jN$ub\~LmO2f2NO=vc)Yu7G'H!q6І])lZxjԭ3SlcsC\pÄ2X= Leg]lJexjeJ&Y?pLː^G8R!CM9>6KQR {ˁ戺!S.KQ\Ip\EYQ7,vЙ`ξ!Hjt:m>5t eW]A[]N[[bmOn `+bl?3x?Qx׼2mxdVA<`˻ĈJj ETV+{͍hQ{b?I1Lq}̔ctWprQ[)uJ*BRH{3''NVwE2>#X-w7P'bKl;X9~]d(b.1?~O@BxOqIWo2!F+YNh[1!b ~3b4*n8TE?!)%V([ژv)mgJiCd.ʋ{Us bW;'쵷L]@{Q|!Uë?@,VL\P*GXʽwT{GEQM׺G9Te\r/ /VX%_!kf\Jec*0I?]yuŜqTTTwY>7Q]cyUt6TlۯJmCC2 d??iPv\0G` $~{cĿ13lSTUu9F_R(MVׯ';$֛C@ [eoE\H&ȬaJoBdzmÞcۼBayW?rZV޳Hb0?EiÜ!>ЭمZ1sIcsΆ5 \L ~ %s naW v)NЖ^VaB[H6~O!V-F 2]+v6o6o6o6o6o6(2:dнa m_j \yXJ~O+iOY~``~o7ݻ?~n͐сQ^7v_]c$1?W b+d$HPm#LwG Lbh&v8zhŤ*)Sf=i;5SU|LwQb6ل8vj$y~9ȏjeAj| ̯%g׮- _ϟ&h; |1FTn9S}Q{{U+l63>i56ݼ7@ LLK~f oJ ;`T;no]L~fu\/mB}^/+tG;VRA#Vɢ+h7?x9A{rw1:55ӵh3&P>i1#F9LuX]#vءv.!]+RiE^FVz+⋚k& z{r$!tvq7p!,g,e n 7` ~vQ`X0 zj6p}pBGd/F`D[TWr;kc.]sH#LѼ?a>|'KfBB8\ M!5ESDwhՠ!dZ{|6F.xTI;YGRi9Vqm8oa9Wp'vљ~!S?y??,"*J?װP lE=TLlE϶ce, mKMLT>[2dmaPS\Sa<'FH*Og7(oyr6k1ACD5^aUpX;*1l╸,P]2wV%0#\1zdޚ2b&x 'Y"鴪KDeVy:<c Wĝ¾6j#bt⑸!߉ ~8&^^ʤ'>\C,,.ۂ k6]4W)RԓYeYsQ/B @*Kcp91&w|v`"w cB #*|nrne$6-nOL=]YwQIk=Qp !g+nf\&)?P^tC\NJO ԭfr6I󏋹Ln9%g"dl8Wjr`~:)pGS?߀ )c0'ԃcQ8|=KA?u*bV|C_PVꐓT _ۢX¶;V3sOO=&DbTM9!!RG"*M+1KB<`C|-cy Ց !._ %ٸI:=@klOP5#d}_0'3x>"LەtU)ύ\wPWK17Jtoq|,lPHm[-v"ę6uʄmJx;D+M.L1sUL\g~VW@|!!:b|F좻xgBV8C1(<1)NiZV۠ Ii*Ƕ5d +GҭHk/}qs B :?ZJ"Z쥜>*`L윤ރm/U8Be>X/ϱ*a(T0FFbVbwmi}z.gRp:Xmf|g9CLHX\ʼnt*BٚPYMZ\F%2+7 ~ /P̠z)#V֕eOyJ5%lwN3Tz91]*,e/lWzZ.8Ѓ֯?e Wa=NC]/04]~a4sBcxdP©d"Su5fV&=it$+N@7=w]DT{Qކ4r+PSk`+2GJl~.J>4Zְ4ΘbhC9oj H*?d U}5AFDRZ 䴛Kz:B.u˄3v@#`( o?gCrA Sa&ix6s.+Kdv_x>[QO}rr^bڲk5ɄTĆ h*_l.xuyIvOБ0J;bo^fti):^>镌Mm^yt ~U*f 4 \ob&0 + Wro%geX 1!d> ^X~3=ؘײ*w~$FȜm7J|f-~GkE-GeX/8clgCi4jJL;9n"VY&~C%g|$Hz(+!!,M9_b(f癬$:~ct/p`mS\ss>oe=]O7 V"=ofA(džxB3B?=!)BڐN.78rlHXj݊B'd\v_(6;zo ryq91@,w)p,g9g1pYf+tx3gҟxG1h3@a qR3kR9;(b_Bd/úrTu[+㏤gFRӫKd&|LDD$YDRLNߋ_λcTq][YP, W2 "{6|!қ)޶vx temtyhB:/!1d/m|K?„*to]2zZgJb<$Bb3V!.準]m$u*"%.HI~0l_K<6I>Fqx,9.Lj! W"Nu>Yo\IV7ce?YAyf>qU9-:s.g/b{B|%Wt!^@;vF4tdR=ftν& :ALSv7 tYR6~6]PulD&ɛ&ӤJa٢p s>_w>UJXvj'{j'|F;2y CGȔZmWYL?%yy`;&D=~TE|6$*{v {'5HUXX&% >aQdOQ I;Q]zZCmjʗ, Dy lU,vJjf[&m $&)Rl~@?֜eG&a7%D[Җv1u;]nໃ+#9^S2;M|{>heW\M#<O'F!<CT,B6 z\NEgZsMbC7S4+텪:K)bwjFs) ҫ2M'd5D?/M"j?t\FW=v". .{B鞩f+Yf>ʾ|W:eVx~RLB{1~P[ 냻um>ҏ-@K?3|r?eýuWƕrۍfp^+E4n Wb2tID7]I/W n~;X^džGXڵvI\n꒥l4]{6"O}Eŕ D^.bN!0yl!GM&_0bdV+{!^l'^u\yy";Rh6U7Q쯸/Rh_q@Nm=?w!.@n M.^ /X&Fmm;]ʋU"eN2W2Oa1!ޭK ױ) zf%RĀL?9=sO]ƈЈҚrܤ2]:g*\>|g1>,g֫b {-zľu̗ؼ1^C]'\]S@zcܪ {*>y8Ă4cHnFe9v1pUerD̋uST OLlu8c(jX:)`wO;r>}-f^^MyqdIJSNAQlu򉚭G"p-D^'.X6ߋw/O}z1OuK#2Gu:OUXS}g^h탯漃ܪ/_ Eb+(yfʋ jMQ4" Ts~5o T:/6TM}*|6n, sFWE[٤Kj qWz U_i=QB,$Oe*R=d Әvc{JQX6p:D1}|$W %W|<!|zx,zom|Ʒm|Ʒm|Ʒm|Ʒm|Ʒm|Q&hx`u , JR@`ՍO#,4C ׁ] UYIgg@g7ȸJd3%x?z#O)2>efZ˯}6|(.oGlz?8mJ- }1_=5>:t m  39_M0L0 ٛ:e]8#?'qK.]@KQ?M̯U*;ky:>>~Q{3_i=@Tq}yOERUTpOӿg=ͮJDmnR[OƌUxSO3)`6vZqqtN*\npTkwPՌ[59dL[gU?rg°.?aƟ3nIf/5T4E=W~+Tx|bobd{R?'m$K?cVIdRߛ?uo=VsaLoa:POYYpK9&Ʈk:ɥ"l!f{}[Q%&Ojanq+N±RU&k!3H 틺-g[=aLmz ʃS)r,ls?&U:S0P]G[$),%Jh 0S{to&>>Cl;@;WEZ狱}TU] `&rp5qIQO {]>0?r<%% a7)1GSuR\m7\KqWMBu, xs^67fnt3O񅼝5leQIS)wiS1e; ]b΋Ǣ{c~5o>ğ>b/ؖ*yF)/0Ҵ&Fs]YfD"tYH$ʍzDs?awXd ,eyaDRieEk~f5B7y~t/" ^DUSVUS[ W}DCeYQשd/\QLZf;* HT1,9AThojʺX8&uO8 vş^9y^o]m5h=U`qYnWU7B$k =g2_p@uyV%B1Qe]0nI6 >0C a-/޺̼_5 5tY{Gٽ$iFQuA UW ֺb;5,#rvP CypZX&HWEZ EO IOjŬwoPF_n9[\HN>7_ )Aupڃxn l SSEr!ҩ/*s "FOo|ه}~5]U> a&KO,;^$1K:^1{[ B>4wuؓU,6G*"4zpVp:Y!]b=?n.7= @VWߝ.D'[\EZ .BVCo/X_|R1+JC *K;ͦҦߨzI?y*RfP RX7D[ ĸ& 6YS9Ω w#x}`m@LJg:0 ZB#ŒbjCa:Ҥ$<7z-"V4S}Rj*PThn@ *z/g"%jo杀NbJAlWyzn`'!!Þ2FiXt*6Z/q3L!8?.]fi׬,gRa0XOPY_#-L:R|jw"|%F]PwENx1۟}_D&kU]H"jZϜI C~tSt"Dg ~C˳jΑQ u]{{= ZoE[ʹ 0oF."H$*|Za2|c[Bӕ uB=]NTl^TԧQcFVEUtE'\ғ,,:V"9)R uT%BG=&y]5h#T3qBcf[fZ|K[OG/.Y\ũx[uMR^QI(]JGR9fQl ,/Tzqc' (3p[4W\MDUj ZVۣ[BpB#Ȟ*Pqy7sTqyţ; n}^XU7jTI GDǑtȢߘuiZZ~*`)H?/Tľ(HeX8oT3;ێ*(KU8*߫Z[uBF>MNBhQA~I"<S9HToDȀ.t+7l9G)m Md6;#ƨ*Pdd\u-❰kYSNPF/I;قkj!:n.0#ly2\+z,7iJ4K߳Z$u, CJq|#ƪf۠hc/1rT*Gߴ7f| AکD}GpNe䅬 x2ف7nBY(T^DB=jST0a\Fn_F+5<"qVz x%d2#Mc.U[V_yD3Fyws#~l_Cw39RGlx(Sw-*0Mio6SLb>)E*y^Q4Xf7M`֙)&jY)-|395CY){G"\^Q"/2MaL6 CH{MʠJn4YuLM1;MeFtfZY֕_m~~q$duyTUa4rϜb/rT#I\3wȕ(5B6ڱQƌ0W-s0C0ISWY囊tOqq) 1jFgE̪> e 473;c5 ͓y*5Lgsbd m;di`s\6c,z/^uPRUrT%1FgyJ_1&0EꜺo@Y[6gVVF+QNr8l'_VAql#D'%H?C*$ƒYnP@QF=y'1MYoWxgLjK@QR4 >P+ v*[tlyT=UJ@R n*BBi3W^^weiZU@'. ,7VIxUIٽILƱbZfa h]Y2ʮ A%h=jpO=X@tqDEލ5@kVQ9!yc rhf"./,W4t55]R|=@ES:x?\2NzbQCJt&uDv1ILN,wi4%夷3743{zIbVLևs{TQQZtS~wl8/9FKQIҊTi,fP[Yd s*\"=،5 "v9,'VtU`dr(v#:# B ^ w+ Jȟ=ԄR澾) V7umg=OPk ֒fX)Em1{7mDp,[ϲ_/R=42y!>WgD:ǴH^)YˌOGڣ)x$Ma+^]ܥ9fU<#6j/ 蚛V#Xjԫ$Ar7e+ckiqW 0վ?v[MR#]v1_Y mM O'ʼns׏Pɇѯn܀k.[ýv;?sL HkGE<BCy|9;)t]lFlFlFlFlFlFlFlFlFlFl- W X#ګ=cWo@>v9-o{])R֥I>Yo}$ #|7$Z|ܒn܋nEl"ʗ=~ ozzDgYP5VTROU`VmCn\zQ\$6꼜O3ܟB}KgG}d>^ѻt"z(ꉺ+NN Oo>:x_23t_ϹT~%PI^Q_"YGuP?REb#ZH?9 [69Ӫ ?<^YJ %7d:ZfY]rduy6[qMϺ>v@uS|&,}ZCR =mZH=碁(5 YTڡn6 JϸI>npO3]8EN;$KG35^O5)r )/S1rwr8;p4N\+Fyi@VhF&gb_E TJfr$nn} j.Ih̽Z$طߝɆ.NŬ(l-B-*".˫8OϗdC9G 2g@$j!{MB2JޱxBy}er2o`G0uN7g7~D`kZkUnO 8NbJYIWxI76ۣb9o¿(ko _ t ](9 wwz8cPOzεPOmOo?h=6iZ%Z24),j_+?M|!ae6qfKmegbrN G.pSg Ә'/wd- O99E7Ҧd%á\v]jLϳ " 'l($0NW-X:Pz-慙 ~g8#. ^wxQz7b(ANhs)+TW9\UZR ' z>(Vq=YhOƀ+D ='j3}^ݗs&qyqhn+5QςpH<4٩NJ|Nk&ѣ<~|jj<9;ڰN%jc MDZ=;LP\V@~xkG &!&1H.lV HarLIt0: 2+ RD=QDeЁ*L3 hB^MA,Lb/r>T @B},3p'V@{4JYdP m U&ѝMdDg TBԧj2HC/z [2\aʚꝞ3ڄf7 nz;&?Rn'(ۡ#~."?#_q2qHUiUA.\NXzۢJ0v񻢾P,5},3XJi_F#Űuܵzn rrtPu uNJ0ݺgVUVYmgyd8OD4Ƣ Ҍ7YM|5%T%}We{E MUN}N6԰^%S~ovM_Ln7_qЯ{$d+(k1/d~smEMu#d2ί4W1>_͸nK5J"6-ۉ"PQ]@@&wZ;q{ϟLďT1NC|> 5`n+Z& =iK_nKns+h&"35CxoRήt Ej5}$IHw8̧9DV]>.fO b6, i,>F݉uR ]"@f`j;ѩ >ښgTω .y!!˕Kz#</ !/̧-y1Of@ H11 Pz3AE$rpbSx*MMZƫ6麙@f+"h_^VUO9ETO?2B%%׉˕Nk>)Y~n6v&gp(7^ϑl884,8 u Mu]<3| wH1T fI&lՖj!*ۓ\2鶚!J8.;9Dh0LkZvXNω1KS7e }u.CpQS,ofLZ|[ڈZ:&r(gl WUtmiX>@ݞ뻼yϮe@'R03B+X0Ѥ]H/Si|/xCIoZ!)MĿVߒol'}dq}JqYE[WWϑ8UvzW ]˙bxW;joYwgH &N*БdyArߘutzVlWYGΔMiZ1ZLAf+LѰ%dz\ MDڸ=|qFv5G[YgX P}dH*TLZ*7Kgsd!|#񾥧YNV{I8Re1otJ"kY ķ'zTL)a2 Z@'kՍf0)[VkF8y&~4WP=GzXrA79t_}Sv,SY KVO[rVKMd5 =x,[Vz[urHF6ڪ*.nEoi \_.ӕtWO/ub Z9݌W7d!1ZjKQHo%E4 *T+Ǻ*{zM4C\q:K4ɭGW!q"I}^ oIh`e;yy -Ȼ$m-egqU\DFb" [hVWP͡6D Ru[-Q g|} Qzޠwh?V4J$rnw(f:{_l ZFJOz^Ꝏ .zo^:"Mϝ$h.2 =M?׸_ f;#ȓ"ŋ$,c]z~YnAqzW=QP$Lҋ5>C6NRu~'gYcuZY iX +W7SsvS廟UuK0/-/Z^j}MĿ`~T dQqSHrTY?"O۬ثls !knX:,!ڇ>9%63)`(m"s"Nuґ|/Bo >Lj BS@=qy mUn2 5!ibfo.}va(D.Qiܬᾤh5Z|:aZfYn9 լʭC~}]~/ǟԂ@)*?AZyL Sd7 7~&Yn@h4z3=\rzL$c2T> ӟTh\&I=BFuexAmСp@=h `y>Ay፝b:b:7A#ئ>&+o'>x#J=z _XD}򅎶&rKO;ACmI5Yg~Mĕx-o!s bAψ*J)vҧx8*KGoVV"3dKeVkf^~#CYhJoxX֖v#Ye!]Xڑ<(\-z,f;Ы~!XNGreSb.*.kJ-0ܓXOǁd*ֲS8=NV/`X~ZSmt 0I8C];XJ'D3Y֥sWK oH /HߺV:q()hA'"\q.cdQa6Wփ-ehQ=罇k|ir%aM!IWm6Z3ɿ}ja_IH6W5@RG81♝&Y;f=\5O_b;Iտy_:ඛ Ɨ]Dh :ϢH9I 1 tZB_S;Xs^+y96SDQtro-6T'w0"DՅ 1:WfW¥{F=ԗdiEѝ7җUxR$yKˈQVs|UT-zHˇ"R[`g/!X~G۝[w-hv{bdMʖ2nt;ػķtӬn1Rz,%rfvm\syC}^~GD~tpy~w3dd;.YqQfb7 b[w<îm7UR4F<()Vj47)T@8'SH xQa ydEKK]ShwdSr,ܟğd"K,q{8㝎NS T+EXKb)4yye=˦x{!Fx~#`'i\uݾit rwQ O>QyCtiKG*8CcHq&`E+Q.uslbO'j*-w:A(}TGWq9sΑL{5$*PMgepWhk>!cNV71yB}Z8)YAgޡO#z+MdONu%x0.M!Rr(V?kXWGy?B? D뤂Ƹ9ͶNVTՔ]d OϙesDZQ(*>,!C?),P{ (X->MV s %Ҩj,L8+ήK&Eei4և)C]ㅼ% ". om񐝰[/hI|eAqFu}Ct(Z(TmŇ42%tǧqA2[]B=2tZrǩpNf_l_,'lW+?hӐ2JQOwjA2<]ƨ:o}Fl;PVdSޑ U7pgkf@rs-8$9&3W:Naӷ$ x-D.ڀ&-yyI}VUIXPլ37)$i(#*̒B L40B^5_'?K!@)X&o*'x/\[Cz^D,HvVl#J@:l3Kb9]MX 5ֲ`V TO2$"XJYIXzy?#5I:ONHAҋԶ,BmYi:b/Qt[$[I!rW'xۮLn(,..ɧ^ [|Fh,w8n{.nxÇj!0mLcW_ )-4k@aJnB4Tka-jϸ(eU 1Ce2}FH2;qt@nFkne$Hln/Rr"r Δ4!>8t=]Fg? }Rx%F j&,yW9yO2 to"SX"X L| Ivt>CZ ɂު,Ejފtv1E\+# :'ZɎ_.t3Rrn]l'TKA?I~WпP))!W&遳n9 u<%11f]w_sJU{xyGsTP2쳓GXj;Z@Ҡ"8LeB̧u$.Ɓ(-IfNlU~}TWDou.}Xx;H~E|~^楘t嬩#U؇yeg8` 26'j]!BN<;:H'C + EͨreIfd6̖efV{qߋ;7bd{>G YrUUZ2gp<6 ࣩ`kΙ3: M7EܕSE zp_U>=*ļkURTֳmJfBRHbnPGI7Bq2T6A'P+Φr!ᄞfzEdt9]Si{o'=.#3bT⸚fKvt {X7㮽%Cסhd B;sja2I Uy%7qNR2~!02 É F(?gQNVW-V>j,",oA4Xn:Y-tSlӆJU9?CMs;ϙ& 47eX~6):BqH鎻F|i=cljx.n wh%>\VYq69|t5 Y;ZdñHQԜ5 6YN_YS?%Ϩ{h?NVϪ%l PgM#-`퀽 ?/(Kl>  -C 6l-zAd)3*,/L$Lo$r::j ^$UcU-SսM}lsmi 0Ԕ5yNFt?Dw^a-6*JG-,D4SLsYMR&>Ⱦ{}n6&ij;Wwb{ )!dvh&)u5!o58?'&Դ;Q&uZ Om]=L,xǚa"$8.Y ˆ?\g^Yq_ʃĬ6OHHfVV%tӸ!v6N⾵ٟӽym˲|nd /㤉gufladђ|Ȍ y#=a=xR8oH=v&vA%aGX<"Nz^wt-+ ޲jLw:!1`XΪ-C3ˢzm1de֞"O3.Ȥ0bsWm͢[++ܹ@J><5L{e^٧ L[Y>F$Kj<*BgEjTCdze^Vh7*Gvwe l6(7Ò@S C7{:[pآl#֝qQĖ#K; _"5?Jq$ jh<=,dnZ7[:?֊2l< 1'2I= -FwȌQ/&@Nl eq㫿,|vSDZfex]]7 2njw _!;o̮-&t"7|qқg6!⹨ݭiU ;*DY} wGgƥc=HV!P*g%gIA:cuDVwoC_x ݞKjz4eq;BY\ueݱn[7H![%{|!oҲ{c뙄MEL,ҽi=mnG(Qt%m|-~$4PL0 C=7VtңE-^y4^AѾU|aױV@ROlR[E;qbJ ݮnOڞm EgZEB+\mJs~櫝 qhG06%)s/S8/'&NxdWRUSEUHpGúC7ՌěP9 "h:Iŷ-w[hkG)WdCKJX(~ߴO:(Z`;]fo֘`v;RSp,,.1 P?ޓW4/J&gwljcڛG :Χ"VM6lIDL1Sl-:3n3Uu5,O>Tm vzx_kRU,,Ѻ8TtO[/ɺR:\_۞tun6L>{[~W,fP]hX!^vz$C;%tu!gr#dYd'Wejˠ&Fr ;EEރ Wm'R  [&G< TdSB~to=0ӟ 'ki-dv>r/# )v6+G;I_0- pkMmrƆzz AGў`0jX<`&u>+(bBLgMq]Ó*SHGJp)FQ& }sZ8y4i)$Xa3{.} њ%cIQ]c0}#Wi/ YjJAg9JOb )~]0uqت8!"jV}m/Ɵ}-YN7sUˇtYKd,ʟr_A^M:lqEty[1ϷyKGvCj6AƯ :HsGV?nDdUI_͑&pä_ǙDv?EULUlve,3XqIΧOƍ Z<, BLI+27i+xjV Ex3#KrGt,1ǽpv>9?i'(:FӉS򹎀&7D3)ؗ;յ-j>w+ ^DFzi`nȃl )AYfԛc.dY7sCu!া6?kE sbg!\ƘBRBF9>#([%:ɍh&X4-vu䵝y/ AA 9[6>'s47-J;(-6@v;Q[~QtV9D}.˄J'u\29 u[?qncRocsDYCip?pof򭈭ϙL\S&iQy?~cV|vex;Ds)ǐ j  Om.N&;=AneV8cuUB'op 7sA>eqo ':2Gyd=~V,hp' _=sV&)r^B fcUW7#_ǬI$a/xLO"';xn2/C6E߫#,`g?6颷ӸgG3gGc{rzsd5BLRTyy6'7xt) FlבQQQQQQQQQQQQ_{=G=g<%G$u XC"hgoSšh9Mhx4˻Ch_?i7@,-9i]t;/ ƅY$I.r%#^ϋ+I_2-2ѺN1DaD$̢xaXr <|'6NeOkOM_R'Ӓ/MݧV6H{9hS1VMt3Ra;_P7'?ƉOwv;PZC#;rɾj6M'*CPiYD.5lWerƢz'l+`Uu2'ϰGO WUiy2XЧ- ¥Avg.3)o$>Cm)jNTCtdNх؛e̲#tMy[A=`]p%k6{}|- feK֬1Wxمw nz"{9dC|MM5R3f[qsV?ENqE֔x?A]_DE]%=i=vW6!;(M_r9lѪ*,SCy:6Ch͔QhsOзuMA{deC5x*Rj'k ksA|»+x9Zt/XCE%~b0L\LUSWSo]y+,C [Q!M /$/bnj@6kP2qfbE[U|UV˅mEP'd;w>t+{*z5yiY1eYɢ鋚ɗ~ ZwBfU8ǫ ]c ,+ 8wHɋ#af/L&"x_\Q,wfMNF hDvq! Qrʭ/ˡ<ou9Vj,i* YJ7e nB70UifA.K͞jig˩4A|/bw xܥ֨*Z!ˉ{4* zaTP{L;MdD᥸;{J6yoO Ln3v1<:=F a24{M$7uAԕ9WNye=e1yrvf(O^wL" > 롒+L; cdg A=} x/}*r .S:tPr]GܧhTk2#Z3q'wXOh~QQ1Xb+Z%5V&ŻH[VZ8A'qɇ+$ ?ΘlPޘ#V橉$ϳ'K(;` 7ᴤCy I?~,۠䤿[5ɤʽeVAAqbg7eb $Ey"}6m40d5L %*Smd,a"u>r.i"차妠ebfk4( aA"i$1kTKh77:44Eu~R/6>Nʈg6#Ih<<Oa/墤~kP5Dmq?2 d**LFd}L`r%_H-NtfӼ7`Iʊjrzzw=_f䳞`EsyxGze$Sclx_$ZGx*6s[VUч}ftS Zsy4Qi#9 sv'FP)Yoq<>62 A"Ax0^O2 q3 ɂ*X*i.%Dt2NZ= 2G( ZݢiֈmeiF wH8(hŤA}I|H&+4̍*$۪ۙ-ZS0t-UP8,. YrY ۡ3om8b|pJR;6=I_XǩYAHPVsLhsķ =h 3tC^8Ht6^v,b2@PMYASHQD,>/-o5\ycΌwUU̢}huPzˎ^} .}>caFx>9?>ߢiui8ij\.}zI Ⴉ`u=U_T!iSߕg2s,Cװ,\QuKsˤc|9cuxC)IcAM_[Q_nE*g~9lGx\uZR-@XN#ˋf<eDSu˸7[Ue?}n)nU`3XO;[LEXW;\KeAu\wpG15-$>k)!Nx5tpEz ׼8.Wֱ͖{kuW|jaI߱|U1\9ȼUotdkYe.4m,*$|EL}^v%jmɬ/I bγ-X('DĖn,Ɠ+d EOUr;Ⱦu: '?pGǥyɉef8cWVf!:{c9Z a9qnԕ!S9ܿ,.L'IqD{᭞g0:Cw9Ky.b.$yV**}o^lt{@,u5fzE7[-Z4;-Z&F.V戻}Yϰߧ!./*& cTkY ^ +GPhfl/y\V nFh2Bn."A8 =Z[Φһ8:3=5 1Q/d)dz]P/ '@SQcڞ6lT3YG[U, ŒՔ9us=W^c=QF|h !;ZHVzbdtڝ& ]__3܃Uҭ֝M̓2r"ڿ#7N 62zE&fj99[Fof&؝n.ɫ" _ySl'_@|'?dyySU#fnFȂ v* K˝*[OgD L6_Q_ p&TkD^kQ@($4TJYl/c7a&_GpSڄѓMsՎB|Cߜ1kZu-hv)Wɛ{ջICL=+z"gO: ca"pCz/yV8-xEYV($5nb'6wxy[{= 32}o+i}%בQQQQQQQQQQQQ^ M}۝Vj}?* ?<"N#%Im|UCk6Z/9^Aݨ⡼9JyVqRsl-r%oG uyC%>%E>LNN<_O KO|wN.< 94?f$%[$a,2] Mv93ƳsϏ,=JJ,Ls@ UEс¯!uޡ,,({0lT8 X!6E,myK-Rw(yCꭐ *~ɗfeHN6AHKsJt}?*-EL̎2..*g;=>XTe\Ȣұ N}-rsZWSۄICI~ +-;t&_6*8*HVE U79A#' ͣL WK/0&%z*b /?S:iqZWEEڕ%E,=g%c6dZ(y.ٞ&DC=Ĕ1L$QCz`5PTl]łDY5IOPdy5A%spQ@gu /LtCRuV|AO򓵐-][M~J1*ɣ9MA&ְ=OۏK,|Jh3MF>)8z2\wKDJOum.N2?MWֺ5|:* nNw:&zOy4>-Z!5(h4U}B5WYDy7Ɖx)\ӉX "Rg1Ga^uS-i<O{}o59tRx1SO5X3A:IvV|~h1 Oig6 (9)N[Ҫ' 1EVڊ#_H, aĆ*7 qq\>ҏ`]Q1C+Z Mo-0KC2\Px0Syj?,wF }Lrydw>|pJ.w$bCsB;[_'s8>DEZBY[ELGgY]Їd)YAtZ$ƺU\2?*"Da4+ɧ*hC(Xe>zY4ݬ&'Х WxXHVU$UG60Q< O&ᬹ.uKw/3扎/˚<1Ib\0TMF-b>tP/^_n8;>m7Ub3CPщB,]Ē&cX1]Mzn 鯺,/L<) F|HWWةG9V>`5X&jfTcTU^,iGPwO M)*ȏd.ݴnzV)kFna|D|;γ,-?βИt ga)5dqz4QPdYs֍uM6sC.8"8-YJG?GyY#:Օ &,qs%%N&rbu/ig )./o @W#NrVpEgx[W=%{# &yHR7 1`{/:P7po2=uD>q~L3YSf&>޹{FDj.Ɍ/86ހ@Z(;t)LD֩=Y>f9a ї)9v"'vm ^~iK1Wp3 =x jjqE͡0$8b^UX4a<=iYK}L-s>_ ,\+Y`DzGlpC4w,PYM h|}=4e>蚳EsSҀ woLUte1HePuTQDv{K7g3BN!Q4,%QBEBvf0njgl5ނzl3{9]OpVv "Lrx#LA( Y:]J h^KZd/V)T}H(^?}sAO ?􄪐fX ,H9;$d2ndyfrAA8i*[,6]qܿ 5tgt$ Ⱥ7-+>D椼VN |T=06@`|6T>%fg7jd9=o[L3!f9iPv?ʣO._ފob5A%]ܰٴd L* CyECf$fcOz^huMe DcICM1LY5Uf%Ś#Nt<?>M?A^K:fdڛ%$GvF6Ccϔwf ']HUy.[x j,慙d=eT&C"33M|( Myu&PN{}]"~gT|Sf"$/lmx5ޱ*GT:pvGqr'i?UUMLi3BS\q2^Y7\0GL=kH^(g}E`ُN쎬ounsı]86d1Ή*TU^I&WT=r$kgYCa>/IXJTstx s =:P4cr:栃P>i{u&G$m奞4)3IVl s5["^gb5D**V/jĤJp *YyV!H6uPONw'yH.FwU{` 3B-U_jڦZ$eA8/N@;q]fU~Vo*Hls{T%!šNGӡih8j+obLL8lsnPSTgq1A|5u]D↝5 YnBUץM7s bYCnv^{d"af쁅y OP Բ~/J%bl9Y {5VK|5O$*bبZBL73 X%_mdodZ:~M$կ"mɢ뿽})i.虺>>ʛ;MWNns;\l'MNvDVk֥Y4I51:e\$GYMT.m 3uyUb<4T'T1]D/ymH@7l;0vh(O *RO/֔`i<뇦*$<.@J;[d'/uon//K$/;z%b@\'pMSMv G%Eb -DPİMm֍p&0GKl,NUj9MLvlR$k2^i|.AOotk@PU -'Zm΢!ko`1WӸP[cbD4r-a[PNg=I{un]HZf-HjlggB?ʧ-;,,6/ِF[HآԍDQvtg$ᬪPu֥`9B/󄢅,gjR A,A3ہ'|Ý8F)l&UR"XEmeU/#PyPJܧmNߛ֗a>}GjCS޼![CO87( KF<ڠ 2_1Gb/W_Hw KwboN!߱"ƚ ٣ d6'wѺ#UM裦 ]+ dB_e+oFrۢJ_Et&%d<0N9:f#nGkyJjq'd\G{!=Á&NK{Uz(@׹4398O:SwҹC>Hފv&IYw~RT%'!Vv!k7ʺq{ibHʊ.!S(_X?6J1 t)?>EĐd]KXrfr:m4ngvHmJVާVR"aF ̮7r#zmtLlz>t݆/J:I۵g5=7j$ QuS7$؎5ݝK<];/4{'b;2fq̿yE_/;8Sp_dn_2%#V]T&-s4<[6Y͜W*\Α_D'LUU K:I^EY^fb*vU`]<l 7v$ b(#ZK`).)mš,A*|*_trW 뤛nd[cX"koMr#PؤZcjolJ89Z+؝O:ce{l@kC9@_:1nƱ|W7$_fk/p4nGpFeQ3rA;!jhD#шF4hD#шF4hD#шF4 _$]7'zfYNV)O5p?u!#tWە;9V@>Frѓ$5^[W` li]|d˪[V@<ڃSa(ˌ*8;FDCoULqQ7{ڭֹeVd U ֗Go/#H~Az`oi>~Y11w#{I3\$ Dg qXOXLTJ$#isD^1Fuudi6tW孡OYI~"Z.{󊎃<]N9 UL9c$Yb)pvHY U$Wr*Yx,Kj" I>,Tr+L6ܯ( rS{i9X)mr2 {סMr*H'[qkM.<\mD"Q\|^r%t46EU=RCOgc#Dnp+,:j$6(b-SoLB8 cᓼ΋-ʖNE_zҰOpF^ ږ!hFɸ:Tgdɉhy$V!x/b|=0Ptb1n}BWYd$B~r\ϗd1U^ւ:( ^,ȺqQbiGslyK A0!]298BsPRyH8WI.f uً}F|뜉' Y/u4f5ySozgT^1Wxv_Sy9Nz`@+Q]͖I!vFqXqqRWhZYh(og硊,'2A A{ 8)}UkKDw/kyU/S,Jgo%Ye.cDzlųBi]_lw*ʋ4*H_4X>߱&毠| \BHv~ʹxw;/^M͗_hud(>~CWj5 Pb4b($Y,x JdN?ڹQ?=A7c0ߠOtE"H-Z4a~&+~wWQE #+:5/)AtsET+ #YmHI>diaq71$MB=ӫz a<E_W- σ|'㩹jz sTMuQ'%=^2.Y#Np:8 YZzCmԉ|%Y{YdlWE*ʬ"FH$mbO<C`:D%/t'p&)*Xf+]% DH9v2DrBqE[InУjct'<_Ɩn3Tt!=:Jr3NS}+ґCY'e~L6y;cdolXtY7z s3Hw+q5J3Q[c02XO­V p1r_q1^3E>4dsXѹg1jQU 9]p6 S;1F>;PeWQy{L{zA]@o2d.\,L PKthE'MGTn<Dp"U*!&i(~ZLQE(/Β= o#.z9@c/7U7'f߭H5Ý]N3" {%z,eqj N=7'G#%[$+7aj *lt:?oNRvW}Q2|t![yO  5x-UIw"=[7Tuq*=-˚r!?D^}3tzCƆCoM'؁n|4DHj~G|Sue\74U5Sb K&ób<{|WpNnRUq2<oG16ɬF^on70Bܞh.fe2*+z*Vk([5p5Z 8#onѨ}>@pD uSQ; 7;R=NGw]uP%g]n =Wr:j~'&=⺜6>l "url'ۍo cȂ(y`wNVUcy]|M5pB=]T7eAV s;Q@ 7ǒE 5MSuRVAB|< G<Ͳ7B];9w2K&@la:k<h+wMzAڧ|,^a;bGR2SU&5}2$42^WՐDqseEAߙ˝t_nnpH"If}>KԔ5x*צu7lVn=ݢVc_M=*AЙ̃ YKVNX)[wWE#V4gaR iF-u{wVIMq .pOEΡwQDBIcq"'g4*~S=iu]Yg3 n0OZTl78N)>GKB~ȍj#;T]z*:Lg tySXİF9QKܚLעƪwt ~'Ix G0I~KBTx@]eqVPkYZ[>T]B jp_`Ex7 fՆz=OMףSճNt}>0V182ǒjb^چ2E?vԑvU,NSUz5E%1ٰAq!FޓM-.[xͩOGC5ь?AhCn?_x{ pBGHE{?H?!TL}I z[ʧr,b;`3\.E~{AY :L:0b|jH }ldLSD]f$37]GHg謇ߩjb7 Px(en5ZR /*h:h yEH|pTIn,]H#_0&-i:G5Jp(@LȢlq;>(/r_|ڰ1~nJꜩ*j*KFPW`/C:b1OnЩגџ5}8a<=eY[^W+X.nc~gE95T͒]o+n"Iik;vFN3j> ށc[u%} zMy3C4LƢ[79~_Ԗ(M'2ʓ$yMѼ ,_;yjT1AG @]w<:^]}UQ-K$t']wQ"(?Ԏ'*lg)pF\n_iq.ۈ+$֢3.?Y  p=z⺢}Y75Sy(uqd,ӅlWb̩+Ac8-k (huQh%nG KMգ2JBZY(ErS$ vz0@s䤽ە6/`-ɗ2Jr:t1 ܦ4KvYU Kh=֊w*cu N\`j!OWRox|4Ώ`P>0Q0V&ⅺ[&25GhS*8_Ғ<WdN(r=vO^G=UmM[vnԽi}'SI_2%[:-)lɵzpH]˾I5pLrxY^&B LTr~e}ǰS8Y'r|ܫ%bcYMoy*v$uI[_D0zg ]POu 'Nz3L$>Jҳk3eťbh ~Uu<;@ AUTJ(ƫ}{Kx_t9eX'#5򪨵p= :3/5"]nܼ1sOUG6o: twb>D$syTgEzG00ɐgEf|dL2>ӓRot=Z^:K;g/nAӷ֤mK5!}%HuYAf xJg6J~ W,'C/pQYlxWS!3X& %ʤ^Jw0fwWUg"$5):@M T~ P>|&YNsR<{T(2h :sHff@Fa,T17E`=Y㘊CUd6uq/oAEVY.9:L-)2 x)YzL Xq)ovג?,R!Ղ1~ jDIޑ!ևFy[gI^}[iP0XiRoJᯋNv2ف<-k"u_hޓI\- /+P͈9kq!֒b :9'>OV*kd#_4hD#шF4hD#шF4hD#шF4N'NvU 6P<jr݇ =Y{|i:fZ}!k3#I%: ;۟/Yү܅nF\յN#榲QJ7c-iy`ýk)[}" j6..V07)hUU/0g6!ߊh/O'rX.3-Lt DZEX*∮bxf,nl!xc8NSw 2*(p86 q-_mpǒCJ:AH!;|I H٧24{t(FESFs/;8!A >uKyyН':ɀ&2> EʮRCVNzrژ-tS­4IǺCBLjnhwuXԇG`]d~>-Ǖz˹ jz$hw[oƂho<*(%p(TdX֟x,}&ZoaVTԻ77j59wa"^y:wL/lGd%xKPu Z2 _FzژEQt,-7,==e2$F*2DN]L%{]Ł23'dE29.nvkъ+nz.bR.V1,%Zʛ;j.Yz#Qqoªc 4!tI'&ޯb O[{N䲚PVr*tװ/& Jˎ 5r[LM]QC)ϋySvYM>\dҬ)8&~|޺)X;r=nwG i'IO]6E+>P6W@HY,8!,Wk}}eԍdU" sF1vMӧtF\6Q5nM܀Cd@N7I IA:~p]@%% "9Xj>o=;UU9KBSuG, o#:]0^zl F7v+V\L4۳~N0g;#r !oFBI!"Gy'dy >6+`獕aM.k=n3"m0syeZS~EЍJjs"xFx !<&_iR}^C2.-t7-E._*ʋl$ ):}l@7{ +:LJ;X""ek9 Nܼ2 oO "x_xdOnyrcxUN6S"+5X03`>ڇwaiO)K>t%}]2ը2]U6^2Xx~}p~VYvE(0M^^$Y8b"h%y6_EdKy*fhgUiIP98vgX]5*5/*tEkjn*|\ɏ4a_t  {7d /bO[hw"X x":ήs?Nʬ0}=Tͭo%vOgm,R5j'_⑸._Y2T܁~NB=.NYMLTZ~vl(r XUdW[0^7ږx0}Gn9kofgoR]'J߮SGL_sRFKַxۨ[YhDM?rB6Q0][҄OU Q&LTE6ު,J}t#Axk^燣ZpT#1H(66PHmq:G39[;1JITu6hrH*A*Ll,mQ@PMN54 Viu]'``RmU Wu|Ͽ4*N*Y?CP\u{47Z#8.iT7FmSM6*VZ-*@7B\_ClTPuZ]N42f^H4mYLu7]A7'YkYl@9YkGMeq+'e]wq]F3^42h&MG~ UlO%jz [o A8?`HՕ%g5?3zQ3Ίr{~ںS텎6No3#Zȣᾪ&o%,2U&h*M%&*?D\06|2t/.u6V_4Xzޣ7GMW{؅Ûq=ɳ*jruTmxX/:V+7;ZOj,g/$=r,-0Xj?|uGtdMS? M4Kj8j^l*M'[\<6+K"VZܒ6a *)OT; ACBvȄnt`U[cQpUD/DMWT5UU ?KXìj|]HP5euX:ٸMl`WXu5䯐ZBAG~|3QtY}E&ʽ"*BjpS70=W1<w[ Lf+d0uJ%Kʗ2Dsɮ^G)GM>0$SW]h8I=zj>`ciK2 M !YR '2u sI@ ѕV1) xE>7q Su (@drC%/"T[U" }y6\sH5^P< )uP)SJirgQYaJ45?^`‰VpK$F0(_NJb"CD2o着~gj*4]ۮ]/Kp$v:G)6BǵXڀ;QoSeQ"/hDwEqXK vYҗ]gaJ$Q%hkO|+]Ϻc\^5|μdN9d ߪ)hs7m`3C{>X:X/3x d3JqL4DNJ҂=E[h#qzvW1ǛH+hz;N'zJ*92_\eQ,}=M)HBh>;RʙwjN`y'f>U;el3IMnbvY|!?J%)Ggp+6OYeV~ÍM?aJ)%G2LWvCj[pLn3n; ud2K'PM ! 9\k 7 Y2'[0U a") ^4ǚereWthV n6lg΢c<#@%^4 @6W]a1pC%x;t[lb7[_;2,7[EkL@PmX!:8vAƲ *o& 8qr~Dټ:]ƟUhuxS1]O2zaj$tf<%go,'tQ7 ?/¢ >;'i=(O8g=s&^^VЛAIiG<Ӡ5+K ~}dEoj|=3֒tt1h=~X2{oO羽b#j2qd4eşդY.^$݃R=U o>na XjkxLݬq]g8nJ+8ĪvhM;3X*B7, i=Ƃ7iѵGDފtsΛnZV?G::_oEDMXjC@5YB4A+U;*\ kZT^>C:˟>M#Fv="S7Ā9ΣZ-(s3ǹm%*m-C[ſtɁ#UYX ;9{i*XԃO=G&1Vt9(m5O:v%BXSa -ܰ(UE.⫙b9+_?guh>d`ͳӞj/P$g]76Ҿ0H(|&{IfWyBv*X}US#unchq7 O{H,_ohww!53'@ L߱}/hD#шF4hD#шF4hD#шF4hD#шJ]CVu͊VY'_7@slW DX , Oq6%wg|.( P?3=;>u'YnޙZku#pʐtl)MDӞ|!K{JNg=yA{b\e-B'5:t'Y _rty; l*E)dVH Zns4'z.#<-: !Iq9Q>/TBiCv Q]eF^nkMb.c^Й48NT$7ٟ 7`'--x vq CW]&Cd3'֍|$xi khê>lL:M$qbN;rOsBx/Ii#X9M~z ]%N U+%cŃa:EN PY7Woa;C87 _Dt4Bvcw +C)D! tW̜=S>E~wK r+e=K,PewK:PU&UG2H#RwX:d$OxOt<7k6PPp[?ǝdY܋oP2n/;7H&?z*%&d/^Ѝ!>fQ^9;C(rA0=ūCS֟I)Ǔ lVyveܲMq!Zg˛"&h-$/Y7P2AG|U|oGy$V t c`36rS-dh Ŀ!^ggƿ_TUQMP7+& j)ҥ>+ I%Q;i ysEn3'?)͹ s'p(c7Nzz3?h8lɼf"}OMB}NT{0Yel2/16j2qy@UEV#I &%n"=.@qQD7; ɏ+̡Prre ">%uUcA/Og8A&DchRZj |Ly8YjU!F(vI7?9D/.Fudg.Oy7E^I/J*Uh!r lr"Ql bףּJnQܺ9;p]wdpi]wiox_ܸ7G3{؝gڵJo5\WQ%`Q!Xy#?(˲Ǣ"9sQVދZ);|$F{bYLlY}Iw?3Y[Pz5 J@_*f[L=RXb|,slCr<ދԤ$Wサ,fvrlY#:in*kyt4*5Ǽ&[&V8P{?#E7z=)YARwBq3$1 y1tzu"uPu0O,ޗZTi*r*?E:,鮨;?Y]B'Җ+Œ+.Ll|2 NL2\ v2mdX֑TqxNLlk:zj"3CmZ /#2,.9exs~楻IQdGKu,"ZA?z:VE-A=JΠ.ZLBpjb+pW+`ߍv:Hf|3syFoBuzxEICD@3V'2U:%f.׊|;J\$ K 9zNl_#z$=Kϲh׺)Bjj"[z›|ƯEOD)M"QZS]eq!g3֊g#惴cb$M*19JK,ƓzYw4-M6o:jjL 2AAu0e38l,DS!hÇdf򥪥.qf Y.-}dl46u:PaԷT'ܫh5s'~g;}::8}^yGЫ4=Ǎݬ5Ew*׼8*zj{I'=PUZENFu1blv잃j,Q"ULQ"57Nh:*|$ʖ忙渙e逪)B&h2PV*~*>z~*X"gXO*ẑX2Jn͌?!Ǽ`W!"Ԫ|.?L0 DMg3޴5cue4b"eIJvRl}RQg9xF(rP=5\H˶W֍༻7Q]/6e#ʴQC`)k~g'T& M_sWUel/Zqm QM~+z. %sj%Ywfss f&֟P"O?P~wO9L5U ~ g#׉bȮk朤 (󩵪m>Z_׵kGdx +cT:A.$O^7.CFLlxWyUuЙM7CM}䳜vWi(R\ q/,;~YN]Qz^&a⛷K.}@}5U׏mN)d#Zt؀Sӗ[>GSYmimJVz,$__@,+D\ ndΏjxzms] ^6a>Ӑ 'd ;kB歲ZK/l2#V|U[x.tb%(+3DRHzgH1+va]%8 :fbپJ;sp*WC_PẈe.|iI|%WSZ+sbeoEGx5x2oL[SqK`S ?d)V)t![~8id,fu+[U XbrNjG:r '_#e/N$l@K}\lY߭Nci^F3yhjd2UJČݳ;w1O< ).@l3RDaYWdc65U#[VCZXu4GGPO:cs,ؐY Xs~_3HJWDD\V BoV6وOx,˴5(Sf9jXy v{ V^ V*:io.5ei2".FEOI;fCd){E9H,Ԓu#51S,J<)-B[jE撾"e9WO4Y,rǒ :z9i6YKEV4-UUB#bz)o4Wc H;reI`*;jb-[̖@#ČRw`%?_q֡`CX1du !2*[MGS(u'l86(:gQc:!n=cR-DMfJ%Yڬʉ"@Kn| 7dXZYVkPEqNIuF択,ZNSƾ"T50!AO),:]*x|Ӡ|b ;ky~63}&Of*yeuH(3 ŵab=g׋>Qؤ 5TehZ!n$]e Oƻ~n]#c!FVu_eh42%爆Jw/*y~# :O@nS,xsa0-FYIj8pˆsP|g@N97kl|yxϏpwq6s[ͫ\blHyyM+t/4ǬǦ28^|\ :p0CSӎ!(D! QB(D! QB(D! QB(D! w_ q=vq9_1ĿٽM.yPC\@M׊$9mDOzv')JKJӺ G|$CgZT")H{|?O΁ʪ]qwxtI^Y| /(|Y7/7q'W熉l5Yt;G/O%|.TBXI=?/ ts;[HvǸ MLa˖l]r8 '(-o.@) ࢬ'wx;낧Suyh(e0'5r׼l#sA6\g*1܊oQ`Tpv" SMd'"qJ':A|叠-g14TuATT|e@i1l֯vyݝ5^U+CA^E1Ox(*3+ oǓ}eWF9*VA0Ii>f.W܍BQ]dQOu;=F];x즋j1['6:M@OyHB>Cd.znp% t`v}^D>sw"Ŗ򓲙g+G\X/[h\zǡ8cə_P2nm4eYg~fc|r3ƃJ]^:AJ)أ _+>YF?1zW|w͙̭n2zfvpW٠Bj%syGG Z?,ƎCm(#—9"#9KZ;kD"bsѻB(V:yweLQg.YǐM~ R ϡ1;p{6cʬHʇ*1E(Bz=qF5_U 3$xOeVvW/̺ۿ: );CNݠU [ɉ^$LdXh$}o\sS`-GCZVQ)T{hSM`!Վ%X'܆h اTFE7wTF얷e!1AGg4 sYN˯ber%ZPXw}DBgF|*\`9w3y[ct) UqfG^oȦ,sFSG{i$q"Ӥt_7*l]%hz EO(89KuFD7{vq<"s*>Wfbok=b%C\)4AD2Ci (7^$PYo:x H^,X:XVcTmaf#שOa'OҢXϦ)ow{+M.đ‹5QRM[l y'vtd׉]Cmtc]VsUoK\)c4.s # у}+,QMV,}r!Cx?RzW\h,:EdQ2,ϦY]pM9;Xŏ 3t}D <jkZ|#ۈp6"5xj4Y]aq|x,Sr)OvtKpxՆj>b//F+XhMnQ^=ɨ9H(rrg p1zڢd/9ŚY >19USl2")]Cvl4W źzjڛf+˒j\swl%t1DRzb:I &$=D{jd#T77 z8~0 0_&_xqFlmUȴtM/ם8Tg4M"1d7E}|d6?Zw/&34Ʌ{'ڮkjNl_pꂼmE͟jNU}ou،УTuB4)G/7tHyMm}t,Qã6Hu=(IUNKu{[k}NmUuY&MOԓU9?M0XPqp3As@Rd|EN#館&2Slv`kYӋδ(aisKOSxx:1SkYlڙw:i&HKvO2sTXlI&oi"zF9~:@Rn3bf\Rm+2@:N5SLIrhƻ\ oD7UMԫt/Vq5xF1V]+X&L>D'mWͅ&좏*lVo"t<ϲ!ꤶGw7)Up~- 9*8(bzc(=Ama[eTu}IVl*KFO*@|k-bf. ' ȏ"-6,C53ui=Wu v{>z5E|r3Jt:@BV^'{_YgCF3\q)]UKݺm: fݺީNД`oK$˖*wK:)hWLΦH-3襗؊Ȯ\Dpa9*G&jNd+im 7DAl > 0[dU2_D2J*{W={Ny=0o]o# dd\ӿwWDqex>8ϭHxW,VR`iR+HJl"Mw}lkCTJW\bdw[Be.sVX̫ʵ갈 GvdV5}Ջ˸YG9Sre2y~O4z!'ݐNg!I;@&Q^CS^iZ H&SBG"z7wͧ3x_8 EuRqk֎b`n%: I^BlƷL5l6UA@O8S&2qѲ76&o%/]w^MuTޓhb>;<=m7o̊Lx3ӿM$3ZGf"J'Y<"q"zUk|Ԟ&KtkuD5\WHnPPU M`~x5yngH*b?GR㭿l3 e#xf.ިif)::'a=M `Br@OQ2W"TTk$LNƹ IH& W DNPqMKO1Rf^ q*}IݪqICq2v)CH;#+P 7f'`L2UI)e!cBfSݢ*/3)06xXMJ2MUˡkn8׋jZ ZOqX'[?sNg,}d8 %ܨ[A,:HpG;z,DN#*ψ:5!7b諞X'47+>Yv׬L``e3Uf|]c6;qI'(m Cm!23씷C |T ]̬3 Dzy:"qt^XPK߀ΛAj" [Cx g(!D$L֑t9Z2DEϟ6{CY(L|XM;=I!mmΠ[/UD4 5A5'.^b6j~ٖ&GRoURrԼ"o>l!m5p[ Яe{jT-XF5HbYw+N@Yo/+ֺrmnnZ)j\% &Jc} <#y_& +C aHEt'x *G+I \Oe*v0>Dj+37+ raYeo/T x[2 9 <_I;lޗ@(* pF!!H2ф"od$;ʐGOv(,BZ}JPK&qNѢ4$jǬ.YI&ykͮ4r]z],1H߯R@_ҋ| x;p ԺbfMps}An[liIhyD3p:(?-٧Cu^]WSr=GPT'2UbҺZ61?p+lOHb@/OdV^5/)EӒ0zSL`os0@VTDq~ t+h-\.hr/f>/C >9t /r$1Ɇ@q#F?Oxֽb4 5Mst E>H*Fɿ. -TVc_Q|vG1i|] C5XedQ_ƕXML.,=K?&i܎p.*)kҘZA7xHĤgi~E.gysWwQT\l?D~BN{Io֜oFDBE d&{XOލ x.$f 5L M*3>C^Nk|i.P]dv:a8nta9Wci<%XJQjj& /χPen:WZSPImC(;uC.*. Wq8ޑ3 NzcnmT"SpERNr<ѻno$M[GBo罧l+!V]xZ],pU`[GFh rH||?(Zֲ1zIj> gxE&oCSۈ3i.☭>Hl vyvfk[vCq >_za|{ȴ6LQNSd4\UUkdޑ!&hOF꫈f2|e_BܗL%3^1,rcP)KH!wN:zł:~h:V'65MrsQmmA'I9:zQWÉ:~j{%#љ%!BLVIu[URU=C08]oDGxEH4*Nd7 %c-j7 Zjn`jJt(˷{PgГ%ɡD'#W% Pj2UjS CEZ%5;LNC/ d-ђC$tYl=n g< E#h[*گR&fY^CMgT =_E!?Cz\ۋSr c )>=,Oa)Ƴ6Nߒ l W6yB:!C. EXJIZnX?MGWj%ǯ;9 *&WVm%|IoJ.Зbz߆ IV2Li\?RwVIC>|А=Mdah{HUd=Bܩ,U0s\IeNSѬ6o+d+{8#Nzƾar͓KnfҶjP:;IS~ںrbNWdҽ4 ǣpDI#}Lp7iW&9liTo pfD5_Oncd{_ cj (:1Ǻa$I3WH?Y:Ȉ/y0J}Ug] RC! )|fޮ V8;CBm7LsOVZOr;xWހa_Bi,V S$FZ͉{"ib F+Z`/#Hq~T1aQ5D}Ҩ:$eL"tCp?ҏgoy'yQVmmBMfe|+In9(z~dNM0䦓~""oO4d#X(FHΛeZ^ۭ<-/o|%e*= 6և n6PH ĤUWe~Ѵ ^!̠J#!Xo Iue5>2T>͟y1P8Db 4ku*NJ^wڟHM~eQF*^[Z\b{2-m%H6{hN6xe6Y Aݖ%j{@u܋z! |^kuqSŤ4~y&Zo:FCʬe|ß(&Ϗ^&B6㉿twۅ:{Y P#|d@iX-inFde!L7!T|!d 29?NxpV̆l'q s S- P_XI"pf ZǘL0UChw:ZX{To,0)Osz@X2>ʱ^lTuu f(2Ղ%PÌj<^jr./ vYNQA4 9D٬L.'GbMEA."hR}g1[vvt I!eym5=QA-I<]uBX/G'qox[řA8,.ÿE0HԳa }pϐY$|6ۼ>@sqfS_]cm5At0ETK(Rzt\RK^7^!:n9[ ?|}%ݿh*8&z L:j \wO`iYy8{͇~W%mvD||/GBT't`jC4l/][w + % -@6L?d&5Fa  lE 6y#!;_wɗßɂ,?.:Chc`-غ}_ܲ{t $  N ݃ nSgSW*aS/U$P5טݣrQ:84-OWv |N_FQ7ⷌ?ceurVu&un/~9wǏ?~Ǐ?~Ǐ?~Ǐ?~b7c[|}}$pGCQ8 $᠙lpZ8b煝巘n v;kܱpO|F*bŚcQ/҂|A `]. vtXvMqXQ(шAQ8?jc$okrt(YUp=n7_v: N R@ x Uk]hPs'e^x:~Acp/@9pW`FVfd$./kω;,ܶ-t,Qj!HK^I'<;kh xV<.( Ill(/iif˄ E9|Ȫp Sk:R?Qcf sp"v[r (rh~LĒcEa9\o3Ia6FѶ$zHwo08 A41Bbą:ueɏ(VݤsXաGOgs܍EТ0{!Jr{YݦBo%`{W$)P0K=F; | J^0P2Yy|kmM0.eSe[H34MvzD36sK,TznΡ_y{씼 UP,`HթB^ز=?C1xK.ZKΐ8?Nl$u\v I(Xcz5"Śuz`Rz'݉|g;YX,`gcL[\31T=l !Ḳq '98F|PQ>k\ fn|x>KV~|܋",Ð`@C,i,_nZ܏ޣǼ9<$`$kOe_/;C3W3e)20Cι"\\\vvn{z+K,7L΀!`NDhNb'% r'P+uwS44V`%s@򲡁ㅍN]whocAUw\ {Q71O`CWNU3q=|݉g<4eh{TYV'S%o8+F{=\W9Ӷl6/;90Mw[a^ܔ$}Hct r8eܬJ4EvwRw`q_\^IUWSߔ ,Sl6=NӸ%sY$; yے4 YJ!-޳\N*/9a.ʣtA톽p䣐NZ ,?އ&@^7%u=9A4{~>frWܔ<]^sWm#*+^H> QGZPgDk^g[A3yt*]fx9l֙G&¬ż8P^Y5KGYQHQ T3J5PJ湾>˳WYhPvUT Ao.JjkGwP-buITUg],6 =މf?8,FWަjeYzzcxg%^3{5cEǞb0OuEʱ< P(Y͋49gm V>!_<}M}Ռ,U:{uWTP+t?45t~x15@Ay#vHZDl:㺤ar$f/ro,KXص+Z;uySODEx;n򅲝佖͉=0,Qj^d[~TFEq&jX%ƪn{*>?O?3<'YyN2t|Lot£IrYVUuلQhtwѝE=_-鲞wѩh:Ay(ުibR{+.2h<L:&<ɱMeh*׿/@Atm y\A]yE_EZ(ч8+lI+0'^ոB8s@_QIT$_ YuM3j瀞Iod'kɖ4O'B<DR7=jwt<퇏Ȋz;IJ .Se%5jfy5tvKpٝ-ƘfXjS2Z{wt#O,[j@*WZ&f=,^ l%?( 47>4Kq# 2 :u]c. O> []S)d< ʹѓ>޴MY=FNE+;A@VY,psSvcHouWE& ӝFhV_&UB]RVb)PAwˮ1^]Wu/j >H+{X }HS`yG}I~X:b3" YΪõo4)~6|=c~z3x2ލ&Fi[h#X:L Njjꚮz=|;\&{Q17Ek~ @YX[RM|/${nr=J:fZϥD&;?.wyS  ~I9uK!Kh"3*ON!{(:+KoHS/%\Eu-؃4~ O$jUtxn8*緳k#y9WYagb[c2X%n*xAs8ÿ8S`:V~8] 1$&K!\\` PV_+8!yxEqn *j!^d8n dI Zziw -,P5E!ӊ\"D @IX@7btWOkڤ{fʥ&)8D 8V w ⡷3["'_b~7 /J{ [XVt[BlSu]ffZaݍZwVTeMsZMtG3Kgs_/0!OVcy9Tm\KS("浞{#BǀNOGjrBAj g=DW~ U}DS"MT3֗$m喵W[MΟ"iX.%lgC>AF %p\TA64RՂ{a-~ J0xQPV8nb3M2Q3Rَ$44 L&dgoX^^ykqSoyF%cC ;8t;G㴳cZ 4;Rȋb>}~D-,Ni&1e?Q_\I~J2+Uɤ3dRe+V$gXuQEȃb?H _h:`M4maP*T ƃA޻5C7Jɇ)g3*7Uriԡl:#o 4ԩU~Qii!+؍mPGp%ړ'9ڑeE:Z'030ս^Ew`"SR@Y5VUu6M|01, u%][1m&U4miV2t^K0v 5d.EN&ijZ> rN{疥 cbL,آ%S6'b=mJҊ5ۣ/Xژh [4)T.ZqDqN$VtEZo{\v$ui:[2R+=Ŭ*rYC󎲆^t#YDcd2X]An{6gst2lx*WmXo:e 1C}ѱ4ӨMSv+k{_y6Y-p(q.*N:Hߐ?_4VB=q*zBZU۸`ixz}ӫ%k=DO-J:}Aj$=I,:^($8:* =_ pUOmTr.ۃ[~An|x,3^5<Ǽ u[Qnfc(][gT?dai; [ )m/K9dO4%ObeM3^IOw<r^\zt!iGYhqvː飪*<>+ale#MB?bY~7X;2SQnD\]Uoe|EEݔ5 pZ,WZZѺVEHj%<+G²1rͬ3#Q7lGQz:5I<=}T>Kx*Ϫ+1 B}\-MߪZ&i C1 r|P5zezR",̻s'(WZU[qI]SME/w.,o̺Hk@gl"/GNm:! 2#U֓EH/W'9(9wؗDb( NLUKU21?uuOON(7z er5xk/r<)[1>ja -8gCD>}Bu@`⬖YO$v_ƨu< FiiY~dp//0 ~Əh$((&9ת"z1zSFTfqD:֤i:lg^GݚaEcgmWD9':>a0J٢2L:3t"2d)Fۓ hS0 2Oq'Y:žbUC~5vXǣt1v&8GQ[3/n ݲ~"Jw sqo8}+!x};bŤ*nC?D2s[ MIE+z$5OB_B7~kt9D{ܙ;`E9Ad r_ kǏ?~Ǐ?~Ǐ?~Ǐ?~Ǐh@{'_Y58NQ}ͪM%F5NC4.ll$"81FNC7"0YIi|*/bU~"t(8v_/qg=+D?DA;YP̧_svn2J~ie*15SNDn|睎(9'~OVL^l5O/ܴ)JSע*n/e4jnŢ5MۺZg8撂G^;fMPWGj/h9&p644_%bzrP*DK'Ud/qr 4 1TwX1cqb|uޤe4>䃡)n!k)\kaEOh$AJ(BE'6qv[cXOrXc@`:7ekZ3h@cd%uMLk.>*JXiQ֢̒ӾovO<;>)zi,=]F߳ ,.Fz~$ӫ>`A0*ktSl1^)#ueDiv瑏eXJvks,.3JVr SjqڦN勮3Z;WZe˅ I9Ze[UeZQeF *=}Hfy;9OQt4mʍ:a`C4I`ʼn!nlcI:_xWYI5PM"$7Z%p,dTl~lj`D*" c(";#r̩&/C]Ww)xN24e6p#$l)8̒yhG{<?_}y1w|1XL?z\R' /6}T%LYĩt 2lCGpM.nx+L(KKdU@nǷcY'U}VgTJ]ԯbRouj~wRcFbVM'ah5ͦs| Ƥz2!`)X x=ķjJrܷM1c,LZ.*i8NSVK^~t4lU.z)`HXvthl 栵Q9qY'ipxK:@ʏ}1hVfRJٰJ:Jz/pXى 9ZҨH5}2]>wp-8zfez@ڪ:-Bx?ՓUf2xR4ǂY_l;<uXoF%dx6EuGUL[W3ZH?H,OBb,'9V;t:V87D8e|}vKlPU&lhXp2j69b)#"#…5Gzq[3kS]TXUd܈㋔߸佂$swgD7H^a}Rm[^|'A"n}K_Q'2;Zs䬳D {}luBQb/kٽ}p>CZG;8DKΑatYڢ?:|L#oϫ b:/~p,qU 7ø>=?ȅPnlkC@\b.ުJI*x% @.ZDې߫]K1+Z>T o< E^KdSb!ҨPG ƞuiX)D_ ֐9H^=OtH`_%Ėh#Zc:I扤j2JNUU:Wg<\G}[y6};>:[ҧt[MTT&e*PwՙuRoGk,X\#ߣu7eoFljTGDO<%>hzfY'ܞM&d ~_3'UmLJC9LnFУa2Q1NJ ܣ|* 7j3L1#L_45-pS|-3UQHIxQ}OUޥ+E&j{n#M<QO>ظeyBsy]BW=+r2~N|zDZ2|.j !H)#==yM<ϻA;-=_3s4Ubcjl(򢲨$)SDE_ ryuss{Zce6L,W5Wxvgp.MaFx{VfDž<ґ!JѺ׫?ϵD*~Bu]`"#N6PB;gA_\ ,-%4WT{5Z̢>BpW455^ RbMUVaQ[iӒut,>y}@bo ;"*d&YlN8y @K\Xm`A6fSJ@WT#dubfBc`GX*v 7Y+1[E[J=WN?KZrl๧uXիuc@]-{'t:DY݁L)p|҉rb9D@UG2 챷arGOU%5_gT+tߦn=7'b+A>VlG7v?dOqlb_|UGܠKWM=&I >S I:Cl䃋tm|\_|㭵XwqyiDLY t/z;U6oi _g;w/_Z ?L,npG81 Xun\RGuNe<,'%y6{*¨/X %&QZ̨֓]1-%'fn]6CTuilyPAlTGh2z %]iEb7h.+M$:z)/J>?伌)4>&r2L3(&N;UՋ&d+&}!Tc_yYqCfnngӝۿ|#V{؀FA؅p6E]رH6\n3 E«֌lڢo IⳜ. Ɖcr5j+ɇXWl·6 P}T-v#>ډ"kt E``gȭ EG:BgMV5k,,-zp6LO  8n nMdi1:oK`#OOY7.HIKžamw4Md/&ONM z]VY0 M⼕c-XE~Ǐ?~Ǐ?~Ǐ?~Ǐ?~Ǐ?0,k8KYI;76{c%px.yMhߞ]e00/mInMq=M+b O;+28kܟx5]J֖Nntg;>#;VFF(k`SO"A} 8%U|#4K%2XZcb,IǢ'Q`%;t /.|= g~wSl+*ў}%xmT,ޘN{'[#>|[~l E;L*]{ƍ|GOZeos<C3H vf;>e_W->nr ]D rF*w>dHtRAx01#)T4䲖:-`z\'KTa&^&>"d-+*_ɻc'##sCzgTd苪 btC}ep'iƲ겞b20AhcCVY( e\Rſ,)g*L,!bߴ.-u.T{"-SBD*ѝºmrHA Ff`49u ,H 1$ĉ@YKSXPxC7eH7)2hR$gA,_' wl;q p"*-ow S!`5yÃU_Wj(hm%&h_n`i SPxmѽqCX^ޅEj)ֵ{rj4dsGN7fB^EI樏}gV( ktDuJ9SɾnM՞Z/yj$ሑ[Liu6wEqK$C܁=%&~*lQA/k1I[`;S{DOI{dWv^m۶tl۶m۶cFǶκ9_Ωw:{1\sl7S8Ys Pjx+!: w{HAA7 \6teF# [ד|SƋЛtA1et]RXqHQEuW5X"%WoLP3D'aJ8emU@@@@\X`4îGȇ:ix 6T2GE0<4_T? lI1h(vᲥXӉ,'=:,D_0/cZT~.'p(V2 }dzj=COKIvM{vI*z[z9`:mnj}?'Je^6$c "~)͖hYt1t /X*:)$G,JaJ:TЋtbL#Өh0AClS Wm4<\xC"?Ok4BrNex92+/ϑ9Q|Du5L\y%if142W;,!]*`j=/WWfEN'A3tWĉQ܋.OBeV)`KE=$3F7R3e'Y]ud~ Ϡ ٕ^rώmToO3/jtmȖ\7cO9Y\hM2zFDQk>*f&+-}es0TZő^b_ Asm'25X z| Ӛ_9(hsˉPO l^fHHaobi:J&g* WO1L@OfU %76s8ajjtye\&ŌLڤ D 0mt490G1ݼ)Πbr)[^s=M٩dpa8U!*~X7yYꥼ$2EN_57[9/G6gyXRN3eM9-*,W-.8 [}ljNʅr!C ^ura.,g| {4TLj*W1VF3 i3'mZ04ԋfVRT7LkQ,7ԝ"UCy~DQWw35W evy1|1lqE\uʢy,堓 ?hHX >p؟ r,!K7f&l,'4 H<-[W,HeicflY5B7SnTs43U󺚗((sJŐ,悾^N&jG{NBԚV6ѭ g\X⁷ Tڡd|(:E(6ȴf ie-H6QXR"ĝ{j 8d(c" 9H~_$:dzHKg qBpu{?vsxbIi=Y".$++_zӡZZOTqMՊCKm6tjO aQN;k7쮀Vx=ls[GPL-u]UhKyH/^DUh?}K]R dsVvPvJ H/z ♼KY1]&^w|I'eQ#n]:otV"#B6+ױ>S6'9%uh?1G-xq\#H9.aM[SsxMz&I}CdAgqDCuN.)lOEgOZO6(.Nα<'oL7L{&|$!9ÿr%O@{^!N LVH 6Y-o 7Eq2ީmyyΫג4C|L:J>MQ P<Òx (*+.Ӻ4c϶pZk{î7xf9vy< G—8JF|{-BH zD3KVoSiRNrla5̙ .õpjgsƹy.kkN#-'=tpYv)xϟ| |s@u{sޡnR7zq >OW4ͅ ípq n8\1BOhuػ~Ƴi#6diG $ԧl:QE=Xg~E̕3ޒx?twZ)JB|l;O3$oy4)Iz,,*rd N]8"iI6 <#[h1V=# aH\6>

CM׹sqs@jid[xByTdQda PTqMJSʌT}?@3\wDZT.y"~pT=1EFvS<Q?) ҥe]^Ʃ|o+>-}OSTEw>AUeRY%[A|\[lEiYN}5 L֭d^'Iq7~[UHYzRjޠ"b(CF6PU%SXW4S]ϢKg6y>QKH4`Z<)|n 5Ibhj./IV/U 袍"Z7}_<9o*"+:~W05&YF1UV5TqGrSQ`Xo| >;֡W?%(Q&?0Y@oN  Ǫ)Hin|| nCa 9"d]L[ :#FT=1A**F)bl%>䎻R.%QSD&mM13A=I)ps|unDw_-7~ I}'<{vRM~n6{]'u'^nlҚ7rˍ㶢詽,;_t=KF;,}4z/{ fö:$u{*g}Y"a[oR':K;_WZǺr 聝 WM !UYbעQFfup6nV 2LhO2 6h]q@qUУL T6՝咙E$,;&֩:^.ֲBbl itwy?24m6{Qo)jI3MQ/t_NQgEY.I0KY]VSih2z*ܓ>~*o ^B7Or V\#4/pnw=+*j?;BFe7b%nAʑY.>ŪHwqS6MIj\Ӵqt_XcQ&Z_48ّuF, 5*wre8 b;,b_b.qϾɝ:`zNafl4~fryf.dƨ| ] ~{?J)Fr2Z0.2KW*,g+Ss(*<S#0e0Y]|ETm7Px_q7ERL]W8\Tc4oE7TFSQTWYOϘ[^$y'{Zɣjn鴪8N3az<"YlH^pQ}b5 #D9MqnX4E/c2&CwEy2Mo!O- #YA66BH*(?E 4.N_w!d}\wu:]E179&W.RO T;`-@ŪIlKGU&]Vҝ-T>%}t,"#kX=Ü2D/4؋mTGzb.Y?>],,5Gslt{Q攚/hs/*nH@ST 4 yb9mFo>"IԩkLwS*IyK"uT7EipT=кOꨨ~We!vni^4X_l^5[fW9 uvFWS{99(h ZQ1CU%1J,=1`~>[ЉtY7Ayqt(e(.K=x]"޺ so|je˱jFVؤ/QmU˺-0(>koeV9U¹߭d?A =l1 hj㩤k ҩתnO{|AUH(sƆ3og h !+'$]`=CSWŒVi5_ͥ QSTnٝI`ӟtErxɂҏ7#|1#`2<S@MΝfu28' "OE1CUequ1YRe4$}GUJ_kyCtĺl9[חvMܬvr rHbmv| Y(kr++AWyoX)~4A;}W;C륓vq]a5CuST ΧZCV UϤkAɭ27x*an7SQ>Jn}V7dԱy?@IsIzn/ۈr6(wu z@KC.aMgۀ=kfXo|0K砫4 Υ޳Q1LoșNv\U;h4Ga,73QUz}AQYOSDW uFۇ͚<zMNx#MGrG9e9` 3Tu\taK6SEUKQM$(ӪHzrP)i 'e'P=kus8wev1f`|&ONZTu,rj2Ad=ˮ+ _p% V|ʗl4| -\E5ш!yiuMoqI;> 䦠#"/cgSm|]U\}Jc}f?/)o ?AcO{`u.#,YokX(ɝa3җDq6 Ɣ;xR6frX#L [M6H B|xYۉjEQxuULcmpE}"EnBܟ!%p) >VjؔE^=oPV PpZV|g=i0I/{3k[>d7XGzq 64;vj녕GYH=Uq^Y;vΣ+p\֭`Q+xG;ܕZԺGr~e=i3WίܽeHU~)  eAx=忬}%,pzvTͦߔ;=n벪"J.=_< i~k8%Wi_v^".N4XіVuniy@ta3=QW7ë2HƕdCqZ",t߱wo:Ҝ3$׫ ѓ%ksέ'y&ULTa>Zrx$ʈɬ6n~^W7|1!whFw9 n2#1Yt6YTXـgPё)cfeZ̲ˆ?8:o$\HKKh ߱Ml;mI֑Mܠ(^("YV4q~Q^/a@ [>obl$0F;JU%yPSf }T\R=։d*EoY2¼3Me  Q3j^!/cLMx:*%GpcX;Ԯ|,7ʑ1}otlIukDG-f wngUMaia6=ocG_ , 5U\m;#фAQZ,S[/p>߷G ?耦#*dSHQw!6rK&pl^mmU_5H#^lsp pL{a!Amy3Nd >4t$. ~ڟ#ѝhﭨ]:hnx oTkV6vdA)-^"r7̓&~ʂ?[uY8[{ pU8%T]~p6m<*6Q5tWMV-kѫx!=,>ᑤ}ǝNe" (jnjeY^,h*n)^ ]Ô1e'RUc|?(|9Nq;@(.1CIh4T2׼W5TM pϪƬ;+bݴ;M^`w$igDT.;)(S]S7,]F2$22 +J8>ƛ|$8#IPnfXh1;yl*:L= F e^&ʳ|( [⬨,|<6xS[;w-ey/~\{Q΋!`&Rhn,u\8rTE߂-GaT#][-V7TbX22 h L |=>x.(ϲ`ŜΫaaܕ/osu]\A&ѱtN!kTDg_Gzrr;oEֺ'Þl.֋418I=>(J&c˓|2Nls5/+ɡ2 VO9eH5<ܶ`)19Z% "pQ 7O,ϩ,1?ɺhƩY^c5YW!GH&\TQz9'h"Cu( ! f VRWIw]"F*-)`'l293!JS   &OxRN~5yEPTv[ _q!@yoT-=l9&6Y˚!\ B%2&"lŠ89{,fg 9(J'tlO޹QQOPL_Ed:K+R̡/avEZKڲ W` u6VlcwZm 9 a2j-M`Y>֕,"(dg AΓES 8>)MkSGJ`Y0sGQcqĬyY~NQp_G3X@ɗM2(MD$XN9iʇ+ 9LZ[]d6& uý(~'gNdϼh2k.fI{3K]|9$g ۦ>i˔U79UeIET`PNq8A[mݝɢjj:if:Aƻ^PR@7-.^4]uk::ܭysv:ܵp޸2ՕG/HPV\X/5]'QW#U=ibqQnWtJ͒dDSXKh)vN 71t#"*bP[bJ_!#yAjvZ{vy؅om%Տt&+VVary,JeV5KsTZsd+HZr Qh;Htzbho7ޠ->F"O` Sii/thǫbF(Gz :"LD1`qpW])h. ևP7Z_$SslyT43$2re"e{EeB9 w.d%ۺI6b6}=󺯖ZUO?L,h#bsrL1tA|ʎYl/LpSԫu-E]Y[Y)uNuPTSwm|6W=}qlȥ~yk`.{=>$r B;üe6YOPrQum_~q&Zyk8҈,-:o7DxQRlEDR~Τi<ڋ>dKYa"vV)β,g#7O)B@~TDhYNV,J'ˏp<|$zYozXt]Q;4.k M/'-8{ 64Gbج+;I$n:'6;PnWg`(d}ɢA:Yn%{=2Jr$D\k=$v>wtmޟ6o͍#DnpC('83 |QXIkp)kv$mjCMWyS3DRUyńJSrL/z4.rX.ZI#eg,LϮ"!ȳj׻ ˂ N R3 IAw:?uJO%0b:W5Y~D.,5'Tu?3{5sy/zeC/!cgyT-iSĒBFUtHʾ;Ћl&Ncx O'O~b'dvY}PQfTdBtK}qpH<-YZC)fLwDcD cJќևHߜGx.ɉx C^_Zp )+¬@@e䳰qx = v>e%7n{[~0{d,GÙڀNN{(IY)V>+v'O_D~." s!g s Cn78ڱ}4Z+93֋;Tx D_9W?Uwp7Jc7#5[񿪢V'駹>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>k &_SZwNH"}݅~IЪ>lOwMDW*Nᏸc!-9O_1)/Ɔz%h,JG]t5g'^A Ag!hM& +K<.;S+]<8Ǐڴ-B8ĭ78<$uI):mxo֝̂U"I-$Z@4@5F2^v gusdl%E D T#xc"Oٻ`y~grzWV/yŅ7dcjYZ3VGY%bwqAA+w1+"t,*,&s;NAV`(FW&rn 4vcA,Fs1t"-_zt\gi^w"NT΃6T{PH DvDt,n ␆ oǐ/zzXÎ轓.@ x3Vdg$P94F#з9KM=*)S:H a!vrgMU桸+..+{pFp7ևwcPQP}TSC 7AzoX; ?LDG }GI u鐠[$BUC9@8l^^eˋ)^h,MQR39$VPFv@$cu9=r09Awa+Y nEs TSiYWL14QQ") ie, |(҈(XDZɆz^|:8 aodQstp<G1Q6HT @S _: } _6Q*H p.j/+?իHt %A#٬^|{ƊPBLp=VںicZ4HGxfΦVh (r|&lB taq sh- _` ,Mf{2njǸ*g5-t |QԓnLBNa{ۅ܁>́kCgH~kr >ܤý,Sd1PHEeh>]1t|>|3䖳%Dz1Ϥ7# 7d;N*cf' MvOu*ynz6jl/^'pJ>mⲜb"z~)j9J!2xr4:ci^5R =BMmE v?Tuڑxejj:NZj]zީvxuT|F+][Re[Z@9D|gkIdh0m ,L(s%Z*nWz%JL%0Mސ-M/! &(f|q);k|Dž&Be@s;U| wEZDl>/ ]Xkpjqǽ*K҆Ӛ:lpғ\W/$aZw~kHy#6p&Hڙ}eYo}YR.'+X uCa#֎eFl' Nny ڢH9kdh=tsiyXr"mo!5YO 5,FyR~9b{"(6JCۏ2"Mu ;\KZ<@(.㪂j|*𗢲.b I˒n+jE#Mh(8h|[!G\<9n_v/l(%||,/_8dĉOCt(iM+Ļ" l1jj?J;D@e,9K7Jwx$͊yyo/+JVvV[xk^Nh߫OH5~+@j} V;02}f a -M$t>ͷ7:9c =C\v*Pcz'}g6h$,^4up^Y 8$V"sp)lew2$(gϱj!VU?iLkYVGv<{"oQ3Vn3X8t_wL8<Yw#*Ʊ)É9A0aϣ_&F{pTE7@r0';,x`??CAH$[[E')I dEP{|qx oF 8 X$$Do&@TEo_;SػsdHQy7 O7n'XOQ{>W4 QM?Ѿw`9KuVcDfRPEX2&92 t,jn x=::%gQ/oUCٝ~Nwq E;arlǜC8ohއ)GNx^krꂔƘ#[jvWg.|{ 12 vm`1EWE<=igS vog4 rrKF0cuL.oMqSԍJ Ꚏ.k06Iɷ䕓ٍhqڻʽrcZӱ:j2hBP!eJb(!R$J2U&S\[!!Vzy{ckyU)z'L(=@NWGfygu fxX5s"?;Lݝ9Z;fb;+*=q857̛3N^{;$oD/yl}45٘ʗ]NǜɩmnH]s8/qWknpl/Uw޳#5jgGg .r˹;5*FܮO`SP&ɗ>Ԡ.5mY9o&Y[Vs\j]jUCw+`?_d3_F]­ukI_/hKEA~Ir굢]Ll#ÝєdK=h c†%~^5>x-U\-j <*}zR{;lt=8+wsAt3o"_3:wOŏ^qSTwQ䘳Lpx=`a=o>}LO O_{kR;VOnCw>ŠF[>%ݯ]]~rʹ۹¢d]ꉜb967WIpHׇ:A>[}l!*C{"l'ߥLiz0>薔Sn $%A{m^co,N~}[n/8+uuG9/,NݓMedVxdr*j=vuaf/iv}Ք yM~R'U8ٜ r$n)6oR?j$qx m&<; ̵AG>6ߛ#y$5:U5525+t -hKNXjqoɍ㹻h :秆j&oD{񏹻_Vry2,Y|LKaOK-KIrɽI`Ӯ yqXt6Cst§m|1$W;lQ-v\=]W%vu+pL ;_ KOy퍹QhKto4&X$g8U`n-=LNدcx)7l#VgGvf[^iW__=Ewgkn]?,gDN[XQve{0sx/zr'9 7Bҭ(/5{?Pg&yX白WKnB\2&#r?)ʅQ 𧄅뿚D[ _ h&g=wyqP6@#W N[eeKx[5cWILҚsʫF7TˠD?iLPM ;ߵDS2Ǒ]ij}N].b9QO  )7,լ)69ɾ"{[$,~&xI geoe;_V7GsSU{W@{Q[Y ƍS6&ܭns>r-NWOԑMFa\m&BY,QOx"c7<8H,GD4ؙ!F'R5Gžaqn`dZꧢU-ɚ䡜69aBP0>gyv><`f?3S41RG9ߋ2M'ޑS+7p|,(OeqoI]cQ?CJM&,uB1|-,?vIH R)o,L|zE_TB2nS╲OkON,ʪzuZPoL'-m0͝N?_OޤHxu*^f[zcM~ vq@>(iʡwZ456 lZ0ԻL2Bqפv:nh{<a5uklϋE ] s2a`R9'4N_T@"lM.OH6D#)(VjjN"\85#:rfITL|EKp'MƩVnkNƃiIx]:kғԌ^ti#gE?F[|ќpY3vY-r薸i|C<$:`_yjw(M>EӢs j蜴8ߩn /:;6Ov˃aFM~`Si돓.cH8-dM'cvs_]85̭ ' Z;oIמDw*|e9´ _Mxmv5rj'v*/֏zZGٽb,2|?OꙭW>Ny2lcH%/F)K9+keJ?lt7Wr]o:cDO .gOW.CRwZ6S;Onk~V)7 6;-"xZ\)vg۾|Wj}K9u!l?wnOf/;Ǜ{^)sl/s:{SL7T;Rq0#*/Ý@Sb>ǥ=UσT+Lv ddHWl_%TW=^Lީa!D]V?9ghb@7jw z-D 0cE֗O||E6 C]]/ 2*z'[] -{~SRVjW9a?hl Gmuߤ>_.0Mþ\7K=~ J ٶq?WZ,VA{ؖ$>G)nYAL=4jÎQh{X)f@'9Uo_!Z fs!~w9Me*4W{}v7[/hL\0jl;ɳf6E}/gG9:9$~9W5*&n֤n2~g.uj4'M񔠪馋"z|w{>OVlN#)qo2YYT#ǞN}ԋKת9siI-YY9Cέp_|_Ԫ\yP푋Օj>jܞѻ:yFeiu8.vz%u7El7rqhv|e0޻__7:TOS/j'G壢asb^2UL=׭>F *hkK\1Χrn&iw ͝UNp@}Q>YΗ>ҩ#:6ICVv3m}^n]1JNC\W9X&yqݤDrkt[QՎ䕌+ډ~}=Rq:gi"{l/ug|pEmdMa[1/ʽ_sg<{ kfp\0{l#cш^jvk~^-"P3ݛpT,}2SG_Fs?G4z)tW7=fn8ꟻ'3h`Y6ᒠlH/b4c[ N^-6˱^۰S)x3RPy{wnT|o0_mfDs0Z tיOQV7t_GG:F]W,IMTp^aSψ8SlTui~SZ>P=Q+Ի)dVZwăf3Ȕ5mrhFuvegX*#u\6&Y_QSP+Tm3*}sΧ{hL|$9UN_;?U3N|^VQFϝ==^O>C]M z]!_e h1_X4sI]DQ LBLU}L 9Nd;I$z:wf\s23 }]Z>$;ejl.wg=dҼI E cc*PKHQS$S$/nilearn/datasets/data/confounds_with_header.csvcsf constant linearTrend pc1 wm global motion-pitch motion-roll motion-yaw motion-x motion-y motion-z gm compcor1 compcor2 compcor3 compcor4 compcor5 1.229839302974849852e+04 1.000000000000000000e+00 0.000000000000000000e+00 -1.209729536732183641e-01 9.371661171813821056e+03 9.955469319526111576e+03 1.032000000000000001e-01 -6.370000000000000662e-02 -1.516000000000000125e-01 8.400000000000000522e-02 -3.760000000000000148e-02 -1.119999999999999989e-02 1.061830985554671679e+04 -3.998130120715670532e-02 -9.426659583461102776e-03 -7.456785131929072330e-02 8.386601617226326078e-03 -3.655864871360897661e-02 1.227561693822728921e+04 1.000000000000000000e+00 1.000000000000000000e+00 -1.327750198554419891e-01 9.361044879814751766e+03 9.947987149952519758e+03 9.529999999999999583e-02 -7.080000000000000182e-02 -1.562000000000000055e-01 7.220000000000000029e-02 -1.980000000000000163e-02 2.099999999999999870e-03 1.061151430137066563e+04 -3.036472243669530169e-02 -8.704616161306136246e-02 -2.379645927002238537e-02 -6.263368565484577627e-03 -7.439287526447796450e-02 1.223879397170608172e+04 1.000000000000000000e+00 2.000000000000000000e+00 -9.783709536959696307e-02 9.369707317259690171e+03 9.945132834526119041e+03 9.710000000000000575e-02 -7.950000000000000122e-02 -1.453000000000000125e-01 9.719999999999999474e-02 -4.390000000000000152e-02 -2.409999999999999989e-02 1.059199154806529441e+04 -2.553735949083452139e-03 6.901926327939383876e-02 1.039545770217302767e-01 -3.567045347484516793e-02 -5.337239841466561580e-02 1.226398615803303255e+04 1.000000000000000000e+00 3.000000000000000000e+00 -1.282148276715167068e-01 9.347113522522880885e+03 9.943648642214780921e+03 9.180000000000000659e-02 -6.069999999999999701e-02 -1.600999999999999923e-01 8.770000000000000018e-02 -4.179999999999999688e-02 -1.329999999999999932e-02 1.059190504575471095e+04 7.901905231774197791e-02 2.557508494258098342e-02 -1.037594358704445885e-01 6.786376123117258963e-02 -7.283346221118980335e-03 1.222940510041291236e+04 1.000000000000000000e+00 4.000000000000000000e+00 -1.297649066612099888e-01 9.343649311970804774e+03 9.925640884969254330e+03 8.730000000000000260e-02 -7.059999999999999609e-02 -1.481999999999999984e-01 7.119999999999999940e-02 -3.130000000000000143e-02 -1.179999999999999973e-02 1.057099247065995587e+04 7.595937788298047499e-02 -1.701211692836823719e-02 8.140232707714770144e-02 7.865477355516237168e-02 -2.113934620198187897e-02 1.219875179476351332e+04 1.000000000000000000e+00 5.000000000000000000e+00 -1.192224525413226738e-01 9.360618189681015792e+03 9.913370059972809031e+03 9.520000000000000684e-02 -5.979999999999999899e-02 -1.448000000000000120e-01 7.499999999999999722e-02 -4.540000000000000285e-02 -1.319999999999999993e-02 1.054844239335070779e+04 1.380512059013935811e-01 6.387776264023765915e-02 1.089006881341100796e-01 3.620700496714779337e-02 -3.116285509529148623e-02 1.225196261202608912e+04 1.000000000000000000e+00 6.000000000000000000e+00 -1.525859524065708850e-01 9.360218137067467978e+03 9.905494670353064066e+03 9.420000000000000595e-02 -6.759999999999999343e-02 -1.547000000000000042e-01 8.980000000000000482e-02 -5.519999999999999907e-02 -6.499999999999999702e-03 1.053491815413159020e+04 1.263047186645633591e-01 -6.671797544625615584e-03 -1.093880790352755664e-02 1.577659322379371398e-01 -1.040877365004760080e-01 1.220366839104729661e+04 1.000000000000000000e+00 7.000000000000000000e+00 -1.553091478122576863e-01 9.365525405537269762e+03 9.904751770197943188e+03 9.840000000000000135e-02 -7.580000000000000626e-02 -1.403999999999999970e-01 9.679999999999999716e-02 -4.499999999999999833e-02 -1.319999999999999993e-02 1.054768221173824895e+04 3.470513438703820563e-02 3.315839899596026690e-03 1.884226262343367542e-01 7.405489340766269191e-02 -6.623627011220842298e-02 1.224724174174174186e+04 1.000000000000000000e+00 8.000000000000000000e+00 -1.134852969241980786e-01 9.365742593190216212e+03 9.932068887960309439e+03 9.389999999999999736e-02 -7.149999999999999412e-02 -1.287999999999999978e-01 1.001999999999999974e-01 -5.170000000000000290e-02 -1.859999999999999848e-02 1.057895506512949396e+04 3.282741288916622702e-02 8.650120832370053470e-02 9.400330970274539677e-02 -1.091031899172504316e-01 9.759276814115985799e-02 1.229378653164883690e+04 1.000000000000000000e+00 9.000000000000000000e+00 -1.224205789609634221e-01 9.375814554451962977e+03 9.947690281855195281e+03 8.530000000000000082e-02 -4.719999999999999890e-02 -1.424999999999999878e-01 8.550000000000000655e-02 -4.859999999999999737e-02 -1.170000000000000033e-02 1.059464094618281524e+04 -3.032912058061220523e-02 -4.845769387985224448e-03 -5.807504498826625977e-02 -8.712754217657325828e-03 -5.129935386294508237e-02 1.229511844852665126e+04 1.000000000000000000e+00 1.000000000000000000e+01 -1.274827731372382977e-01 9.367010003456587583e+03 9.921651758171476104e+03 8.019999999999999352e-02 -5.210000000000000048e-02 -1.293999999999999873e-01 8.989999999999999380e-02 -5.240000000000000213e-02 -1.880000000000000074e-02 1.054438133654954618e+04 5.060063793803967758e-02 -8.791690433901930279e-02 8.077688814393965688e-02 6.598076275547939962e-03 -1.563682389136117901e-01 1.226899278282188425e+04 1.000000000000000000e+00 1.100000000000000000e+01 -1.188832904787095329e-01 9.369687636504624606e+03 9.917656502064744927e+03 9.500000000000000111e-02 -5.390000000000000346e-02 -1.218000000000000055e-01 1.067000000000000032e-01 -5.129999999999999838e-02 -3.599999999999999728e-02 1.055154646474671426e+04 1.007990452101989870e-01 9.320365786486643767e-02 3.026027595892693753e-02 -1.051321974860948277e-01 4.407716902418187810e-02 1.227125582125093933e+04 1.000000000000000000e+00 1.200000000000000000e+01 -1.469653439223530811e-01 9.374495311811740976e+03 9.935928532606074441e+03 9.619999999999999385e-02 -6.439999999999999891e-02 -1.048000000000000043e-01 1.343999999999999917e-01 -5.279999999999999971e-02 -3.459999999999999881e-02 1.058780487521049508e+04 6.302170310965170275e-02 -3.702287854849971305e-02 -2.158364686029662707e-02 9.348948216328349659e-02 4.105990383656753923e-02 1.226176392408033098e+04 1.000000000000000000e+00 1.300000000000000000e+01 -1.381165519819842757e-01 9.365160175368295313e+03 9.947698216956128817e+03 9.619999999999999385e-02 -4.630000000000000088e-02 -1.096000000000000030e-01 1.141999999999999960e-01 -3.880000000000000115e-02 -3.280000000000000276e-02 1.060281913872164660e+04 -4.549619282926359309e-02 -3.255920294803336251e-02 1.118065357936516013e-01 7.472800708968180272e-02 2.106075133136286678e-02 1.229780568752346153e+04 1.000000000000000000e+00 1.400000000000000000e+01 -1.026956893442365071e-01 9.362869594493318800e+03 9.951028801312719224e+03 9.439999999999999780e-02 -4.780000000000000221e-02 -8.630000000000000171e-02 1.087999999999999939e-01 -2.469999999999999973e-02 -2.290000000000000022e-02 1.060531437136936074e+04 -5.270080576924769666e-02 2.133417864412356924e-02 4.578118027763203313e-02 -3.786537247688900182e-02 1.155452935705533341e-01 1.230878348172391088e+04 1.000000000000000000e+00 1.500000000000000000e+01 -8.658189776469626953e-02 9.373217986415304040e+03 9.960922986161598601e+03 1.028000000000000025e-01 -4.639999999999999680e-02 -8.880000000000000393e-02 1.212000000000000022e-01 -4.569999999999999757e-02 -4.070000000000000007e-02 1.061191975773031663e+04 -4.549989157029257769e-02 9.935070588020183946e-02 -4.644874225100306675e-02 -8.675905843260728334e-02 1.073699690425763131e-01 1.231153409170889609e+04 1.000000000000000000e+00 1.600000000000000000e+01 -1.091849899866933621e-01 9.373108458851311298e+03 9.975759069908195670e+03 1.040999999999999981e-01 -4.689999999999999725e-02 -8.110000000000000542e-02 1.233999999999999958e-01 -3.289999999999999869e-02 -3.749999999999999861e-02 1.063640818788119032e+04 -5.874732692000298601e-02 -1.002430503070073087e-02 -1.016766338672729225e-01 5.468246640903851041e-02 2.379627294569736096e-02 1.230232685224286797e+04 1.000000000000000000e+00 1.700000000000000000e+01 -1.250002423990524691e-01 9.372832800187818066e+03 9.969332455375431891e+03 1.016999999999999987e-01 -5.099999999999999672e-02 -7.929999999999999549e-02 1.242000000000000048e-01 -2.409999999999999989e-02 -4.059999999999999720e-02 1.063930677416865001e+04 -8.278669208619780784e-02 -1.122620053837667942e-01 8.404358445148607873e-03 1.181995201928659062e-01 3.049903428896739294e-02 1.230076218796921967e+04 1.000000000000000000e+00 1.800000000000000000e+01 -9.435582054314632650e-02 9.391865898544561787e+03 9.959312934145693362e+03 8.659999999999999643e-02 -4.379999999999999866e-02 -7.349999999999999589e-02 1.015999999999999959e-01 -2.220000000000000098e-02 -2.000000000000000042e-02 1.063108415263396455e+04 -8.471337135996262480e-02 -1.510706671430129405e-02 7.623528181475512644e-02 -1.085478670521101419e-01 -3.730923364058631470e-03 1.232509056224192864e+04 1.000000000000000000e+00 1.900000000000000000e+01 -8.678985714180999234e-02 9.374806173756240241e+03 9.952753451946126006e+03 8.500000000000000611e-02 -4.590000000000000330e-02 -6.119999999999999746e-02 1.096000000000000030e-01 -4.220000000000000140e-02 -3.030000000000000054e-02 1.061827446384972063e+04 2.189630059529366554e-02 2.991428365082782117e-03 -8.900627996928600627e-02 -9.083486755464556528e-02 7.161804295605339599e-02 PKH@ $nilearn/datasets/data/power_2011.csvROI,X,Y,Z 1,-25,-98,-12 2,27,-97,-13 3,24,32,-18 4,-56,-45,-24 5,8,41,-24 6,-21,-22,-20 7,17,-28,-17 8,-37,-29,-26 9,65,-24,-19 10,52,-34,-27 11,55,-31,-17 12,34,38,-12 13,-7,-52,61 14,-14,-18,40 15,0,-15,47 16,10,-2,45 17,-7,-21,65 18,-7,-33,72 19,13,-33,75 20,-54,-23,43 21,29,-17,71 22,10,-46,73 23,-23,-30,72 24,-40,-19,54 25,29,-39,59 26,50,-20,42 27,-38,-27,69 28,20,-29,60 29,44,-8,57 30,-29,-43,61 31,10,-17,74 32,22,-42,69 33,-45,-32,47 34,-21,-31,61 35,-13,-17,75 36,42,-20,55 37,-38,-15,69 38,-16,-46,73 39,2,-28,60 40,3,-17,58 41,38,-17,45 42,-49,-11,35 43,36,-9,14 44,51,-6,32 45,-53,-10,24 46,66,-8,25 47,-3,2,53 48,54,-28,34 49,19,-8,64 50,-16,-5,71 51,-10,-2,42 52,37,1,-4 53,13,-1,70 54,7,8,51 55,-45,0,9 56,49,8,-1 57,-34,3,4 58,-51,8,-2 59,-5,18,34 60,36,10,1 61,32,-26,13 62,65,-33,20 63,58,-16,7 64,-38,-33,17 65,-60,-25,14 66,-49,-26,5 67,43,-23,20 68,-50,-34,26 69,-53,-22,23 70,-55,-9,12 71,56,-5,13 72,59,-17,29 73,-30,-27,12 74,-41,-75,26 75,6,67,-4 76,8,48,-15 77,-13,-40,1 78,-18,63,-9 79,-46,-61,21 80,43,-72,28 81,-44,12,-34 82,46,16,-30 83,-68,-23,-16 84,-58,-26,-15 85,27,16,-17 86,-44,-65,35 87,-39,-75,44 88,-7,-55,27 89,6,-59,35 90,-11,-56,16 91,-3,-49,13 92,8,-48,31 93,15,-63,26 94,-2,-37,44 95,11,-54,17 96,52,-59,36 97,23,33,48 98,-10,39,52 99,-16,29,53 100,-35,20,51 101,22,39,39 102,13,55,38 103,-10,55,39 104,-20,45,39 105,6,54,16 106,6,64,22 107,-7,51,-1 108,9,54,3 109,-3,44,-9 110,8,42,-5 111,-11,45,8 112,-2,38,36 113,-3,42,16 114,-20,64,19 115,-8,48,23 116,65,-12,-19 117,-56,-13,-10 118,-58,-30,-4 119,65,-31,-9 120,-68,-41,-5 121,13,30,59 122,12,36,20 123,52,-2,-16 124,-26,-40,-8 125,27,-37,-13 126,-34,-38,-16 127,28,-77,-32 128,52,7,-30 129,-53,3,-27 130,47,-50,29 131,-49,-42,1 132,-31,19,-19 133,-2,-35,31 134,-7,-71,42 135,11,-66,42 136,4,-48,51 137,-46,31,-13 138,-10,11,67 139,49,35,-12 140,8,-91,-7 141,17,-91,-14 142,-12,-95,-13 143,18,-47,-10 144,40,-72,14 145,8,-72,11 146,-8,-81,7 147,-28,-79,19 148,20,-66,2 149,-24,-91,19 150,27,-59,-9 151,-15,-72,-8 152,-18,-68,5 153,43,-78,-12 154,-47,-76,-10 155,-14,-91,31 156,15,-87,37 157,29,-77,25 158,20,-86,-2 159,15,-77,31 160,-16,-52,-1 161,42,-66,-8 162,24,-87,24 163,6,-72,24 164,-42,-74,0 165,26,-79,-16 166,-16,-77,34 167,-3,-81,21 168,-40,-88,-6 169,37,-84,13 170,6,-81,6 171,-26,-90,3 172,-33,-79,-13 173,37,-81,1 174,-44,2,46 175,48,25,27 176,-47,11,23 177,-53,-49,43 178,-23,11,64 179,58,-53,-14 180,24,45,-15 181,34,54,-13 182,-21,41,-20 183,-18,-76,-24 184,17,-80,-34 185,35,-67,-34 186,47,10,33 187,-41,6,33 188,-42,38,21 189,38,43,15 190,49,-42,45 191,-28,-58,48 192,44,-53,47 193,32,14,56 194,37,-65,40 195,-42,-55,45 196,40,18,40 197,-34,55,4 198,-42,45,-2 199,33,-53,44 200,43,49,-2 201,-42,25,30 202,-3,26,44 203,11,-39,50 204,55,-45,37 205,42,0,47 206,31,33,26 207,48,22,10 208,-35,20,0 209,36,22,3 210,37,32,-2 211,34,16,-8 212,-11,26,25 213,-1,15,44 214,-28,52,21 215,0,30,27 216,5,23,37 217,10,22,27 218,31,56,14 219,26,50,27 220,-39,51,17 221,2,-24,30 222,6,-24,0 223,-2,-13,12 224,-10,-18,7 225,12,-17,8 226,-5,-28,-4 227,-22,7,-5 228,-15,4,8 229,31,-14,2 230,23,10,1 231,29,1,4 232,-31,-11,0 233,15,5,7 234,9,-4,6 235,54,-43,22 236,-56,-50,10 237,-55,-40,14 238,52,-33,8 239,51,-29,-4 240,56,-46,11 241,53,33,1 242,-49,25,-1 243,-16,-65,-20 244,-32,-55,-25 245,22,-58,-23 246,1,-62,-18 247,33,-12,-34 248,-31,-10,-36 249,49,-3,-38 250,-50,-7,-39 251,10,-62,61 252,-52,-63,5 253,-47,-51,-21 254,46,-47,-17 255,47,-30,49 256,22,-65,48 257,46,-59,4 258,25,-58,60 259,-33,-46,47 260,-27,-71,37 261,-32,-1,54 262,-42,-60,-9 263,-17,-59,64 264,29,-5,54 PKHz֖(nilearn/datasets/data/dosenbach_2010.csvnumber,x,y,z,name,network 1,6,64,3,vmPFC,default 2,29,57,18,aPFC,fronto-parietal 3,-29,57,10,aPFC,fronto-parietal 4,0,51,32,mPFC,default 5,-25,51,27,aPFC,default 6,9,51,16,vmPFC,default 7,-6,50,-1,vmPFC,default 8,27,49,26,aPFC,cingulo-opercular 9,42,48,-3,vent aPFC,fronto-parietal 10,-43,47,2,vent aPFC,fronto-parietal 11,-11,45,17,vmPFC,default 12,39,42,16,vlPFC,fronto-parietal 13,8,42,-5,vmPFC,default 14,9,39,20,ACC,default 15,46,39,-15,vlPFC,default 16,40,36,29,dlPFC,fronto-parietal 17,23,33,47,sup frontal,default 18,34,32,7,vPFC,cingulo-opercular 19,-2,30,27,ACC,cingulo-opercular 20,-16,29,54,sup frontal,default 21,-1,28,40,ACC,fronto-parietal 22,46,28,31,dlPFC,fronto-parietal 23,-52,28,17,vPFC,fronto-parietal 24,-44,27,33,dlPFC,fronto-parietal 25,51,23,8,vFC,cingulo-opercular 26,38,21,-1,ant insula,cingulo-opercular 27,9,20,34,dACC,cingulo-opercular 28,-36,18,2,ant insula,cingulo-opercular 29,40,17,40,dFC,fronto-parietal 30,-6,17,34,basal ganglia,cingulo-opercular 31,0,15,45,mFC,cingulo-opercular 32,58,11,14,frontal,sensorimotor 33,-46,10,14,vFC,cingulo-opercular 34,44,8,34,dFC,fronto-parietal 35,60,8,34,dFC,sensorimotor 36,-42,7,36,dFC,fronto-parietal 37,-55,7,23,vFC,sensorimotor 38,-20,6,7,basal ganglia,cingulo-opercular 39,14,6,7,basal ganglia,cingulo-opercular 40,-48,6,1,vFC,cingulo-opercular 41,10,5,51,pre-SMA,sensorimotor 42,43,1,12,vFC,sensorimotor 43,0,-1,52,SMA,sensorimotor 44,37,-2,-3,mid insula,cingulo-opercular 45,53,-3,32,frontal,sensorimotor 46,58,-3,17,precentral gyrus,sensorimotor 47,-12,-3,13,thalamus,cingulo-opercular 48,-42,-3,11,mid insula,sensorimotor 49,-44,-6,49,precentral gyrus,sensorimotor 50,-26,-8,54,parietal,sensorimotor 51,46,-8,24,precentral gyrus,sensorimotor 52,-54,-9,23,precentral gyrus,sensorimotor 53,44,-11,38,precentral gyrus,sensorimotor 54,-47,-12,36,parietal,sensorimotor 55,33,-12,16,mid insula,sensorimotor 56,-36,-12,15,mid insula,sensorimotor 57,-12,-12,6,thalamus,cingulo-opercular 58,11,-12,6,thalamus,cingulo-opercular 59,32,-12,2,mid insula,cingulo-opercular 60,59,-13,8,temporal,sensorimotor 61,-30,-14,1,mid insula,cingulo-opercular 62,-38,-15,59,parietal,sensorimotor 63,52,-15,-13,inf temporal,default 64,-47,-18,50,parietal,sensorimotor 65,46,-20,45,parietal,sensorimotor 66,-55,-22,38,parietal,sensorimotor 67,-54,-22,22,precentral gyrus,sensorimotor 68,-54,-22,9,temporal,sensorimotor 69,41,-23,55,parietal,sensorimotor 70,42,-24,17,post insula,sensorimotor 71,11,-24,2,basal ganglia,cingulo-opercular 72,-59,-25,-15,inf temporal,default 73,1,-26,31,post cingulate,default 74,18,-27,62,parietal,sensorimotor 75,-38,-27,60,parietal,sensorimotor 76,-30,-28,9,post insula,cingulo-opercular 77,-24,-30,64,parietal,sensorimotor 78,51,-30,5,temporal,cingulo-opercular 79,-41,-31,48,post parietal,sensorimotor 80,-4,-31,-4,post cingulate,cingulo-opercular 81,54,-31,-18,fusiform,cingulo-opercular 82,-41,-37,16,temporal,sensorimotor 83,-53,-37,13,temporal,sensorimotor 84,28,-37,-15,fusiform,default 85,-3,-38,45,precuneus,default 86,34,-39,65,sup parietal,sensorimotor 87,8,-40,50,precuneus,cingulo-opercular 88,-41,-40,42,IPL,fronto-parietal 89,58,-41,20,parietal,cingulo-opercular 90,-8,-41,3,post cingulate,default 91,-61,-41,-2,inf temporal,default 92,-28,-42,-11,occipital,default 93,-5,-43,25,post cingulate,default 94,9,-43,25,precuneus,default 95,43,-43,8,temporal,cingulo-opercular 96,54,-44,43,IPL,fronto-parietal 97,-55,-44,30,parietal,cingulo-opercular 98,-28,-44,-25,lat cerebellum,cerebellum 99,-35,-46,48,post parietal,fronto-parietal 100,42,-46,21,sup temporal,cingulo-opercular 101,-48,-47,49,IPL,fronto-parietal 102,-41,-47,29,angular gyrus,cingulo-opercular 103,-59,-47,11,temporal,cingulo-opercular 104,-53,-50,39,IPL,fronto-parietal 105,5,-50,33,precuneus,default 106,-18,-50,1,occipital,occipital 107,44,-52,47,IPL,fronto-parietal 108,-5,-52,17,post cingulate,default 109,-24,-54,-21,lat cerebellum,cerebellum 110,-37,-54,-37,inf cerebellum,cerebellum 111,10,-55,17,post cingulate,default 112,-6,-56,29,precuneus,default 113,-34,-57,-24,lat cerebellum,cerebellum 114,-32,-58,46,IPS,fronto-parietal 115,-11,-58,17,post cingulate,default 116,32,-59,41,IPS,fronto-parietal 117,51,-59,34,angular gyrus,default 118,-34,-60,-5,occipital,occipital 119,36,-60,-8,occipital,occipital 120,-6,-60,-15,med cerebellum,cerebellum 121,-25,-60,-34,inf cerebellum,cerebellum 122,32,-61,-31,inf cerebellum,cerebellum 123,46,-62,5,temporal,occipital 124,-48,-63,35,angular gyrus,default 125,-52,-63,15,TPJ,cingulo-opercular 126,-44,-63,-7,occipital,occipital 127,-16,-64,-21,med cerebellum,cerebellum 128,21,-64,-22,lat cerebellum,cerebellum 129,19,-66,-1,occipital,occipital 130,1,-66,-24,med cerebellum,cerebellum 131,-34,-67,-29,inf cerebellum,cerebellum 132,11,-68,42,precuneus,default 133,17,-68,20,occipital,occipital 134,-36,-69,40,IPS,default 135,39,-71,13,occipital,occipital 136,-9,-72,41,occipital,default 137,45,-72,29,occipital,default 138,-11,-72,-14,med cerebellum,cerebellum 139,29,-73,29,occipital,occipital 140,33,-73,-30,inf cerebellum,cerebellum 141,-2,-75,32,occipital,default 142,-29,-75,28,occipital,occipital 143,5,-75,-11,med cerebellum,cerebellum 144,14,-75,-21,med cerebellum,cerebellum 145,-16,-76,33,occipital,occipital 146,-42,-76,26,occipital,default 147,9,-76,14,occipital,occipital 148,15,-77,32,occipital,occipital 149,20,-78,-2,occipital,occipital 150,-21,-79,-33,inf cerebellum,cerebellum 151,-6,-79,-33,inf cerebellum,cerebellum 152,-5,-80,9,post occipital,occipital 153,29,-81,14,post occipital,occipital 154,33,-81,-2,post occipital,occipital 155,18,-81,-33,inf cerebellum,cerebellum 156,-37,-83,-2,post occipital,occipital 157,-29,-88,8,post occipital,occipital 158,13,-91,2,post occipital,occipital 159,27,-91,2,post occipital,occipital 160,-4,-94,12,post occipital,occipital PKH(nilearn/datasets/description/__init__.pyPKHObމ*nilearn/datasets/description/ABIDE_pcp.rstABIDE Notes ----- The Autism Brain Imaging Data Exchange (ABIDE) dataset provides previously collected resting state functional magnetic resonance imaging datasets from 539 individuals with ASD and 573 typical controls for the purpose of data sharing in the broader scientific community. This grass-root initiative involved 16 international sites, sharing 20 samples yielding 1112 datasets composed of both MRI data and an extensive array of phenotypic information common across nearly all sites (see below). Note that this is the preprocessed version of ABIDE provided by the preprocess connectome projects (PCP). Content ------- :'phenotypic': Behavioral information. References ---------- For more information about this dataset's structure: http://preprocessed-connectomes-project.github.io http://www.childmind.org/en/healthy-brain-network/abide/ Nielsen, Jared A., et al. "Multisite functional connectivity MRI classification of autism: ABIDE results." Frontiers in human neuroscience 7 (2013). Licence: Consistent with the policies of the 1000 Functional Connectomes Project, data usage is unrestricted for non-commercial research purposes.PKHQ>s*nilearn/datasets/description/aal_SPM12.rstAAl atlas for SPM 12 Notes ----- This atlas is the result of an automated anatomical parcellation of the spatially normalized single-subject high-resolution T1 volume provided by the Montreal Neurological Institute (MNI) (D. L. Collins et al., 1998, Trans. Med. Imag. 17, 463-468, PubMed). Using this parcellation method, three procedures to perform the automated anatomical labeling of functional studies are proposed: (1) labeling of an extremum defined by a set of coordinates, (2) percentage of voxels belonging to each of the AVOI intersected by a sphere centered by a set of coordinates, and (3) percentage of voxels belonging to each of the AVOI intersected by an activated cluster. Content ------- :"regions": str. path to nifti file containing regions. :"labels": dict. labels dictionary with their region id as key and name as value References ----- For more information on this dataset's structure, see http://www.gin.cnrs.fr/AAL-217?lang=en Automated Anatomical Labeling of Activations in SPM Using a Macroscopic Anatomical Parcellation of the MNI MRI Single-Subject Brain. N. Tzourio-Mazoyer, B. Landeau, D. Papathanassiou, F. Crivello, O. Etard, N. Delcroix, B. Mazoyer, and M. Joliot. NeuroImage 2002. 15 :273-28 Licence: unknown. PKH7\hh%nilearn/datasets/description/adhd.rstADHD 200 Notes ----- Part of the the 1000 Functional Connectome Project. Phenotypic information includes: diagnostic status, dimensional ADHD symptom measures, age, sex, intelligence quotient (IQ) and lifetime medication status. Preliminary quality control assessments (usable vs. questionable) based upon visual timeseries inspection are included for all resting state fMRI scans. Includes preprocessed data from 40 participants. Project was coordinated by Michael P. Milham. Content ------- :'func': Nifti images of the resting-state data :'phenotypic': Explanations of preprocessing steps :'confounds': CSV files containing the nuisance variables References ---------- For more information about this dataset's structure: http://fcon_1000.projects.nitrc.org/indi/adhd200/index.html Licence: usage is unrestricted for non-commercial research purposes.PKH 5nilearn/datasets/description/brainomics_localizer.rstBrainomics Localizer Notes ----- A protocol that captures the cerebral bases of auditory and visual perception, motor actions, reading, language comprehension and mental calculation at an individual level. Individual functional maps are reliable and quite precise. Content ------- :'func': Nifti images of the neural activity maps :'cmaps': Nifti images of contrast maps :'tmaps': Nifti images of corresponding t-maps :'masks': Structural images of the mask used for each subject. :'anats': Structural images of anatomy of each subject References ---------- For more information about this dataset's structure: http://brainomics.cea.fr/localizer/ Pinel, Philippe, et al. "Fast reproducible identification and large-scale databasing of individual functional cognitive networks." BMC neuroscience 8.1 (2007): 91. Licence: usage is unrestricted for non-commercial research purposes. PKHkŷff.nilearn/datasets/description/craddock_2012.rstCraddock 2012 Notes ----- Collection of regions of interest (ROI) that have been generated from applying spatially constrained clustering on resting-state data. Several clustering statistics are used to compare methodological trade-offs as well as determine an adequate number of clusters. The proposed functional and random parcellations perform equivalently for most of the metrics evaluated. The online release also contains the scripts to derive these ROI atlases by using spatially constrained Ncut spectral clustering. Content ------- :'random': result of random clustering for comparison :'scorr_2level': parcellation results when emphasizing spatial homogeneity :'scorr_mean': group-mean parcellation results when emphasizing spatial homogeneity :'tcorr_2level': parcellation results when emphasizing temporal homogeneity :'tcorr_mean': group-mean parcellation results when emphasizing temporal homogeneity References ---------- For more information on this dataset's structure, see http://www.nitrc.org/projects/cluster_roi/ Craddock, R. Cameron, G.Andrew James, Paul E. Holtzheimer, Xiaoping P. Hu, and Helen S. Mayberg. "A Whole Brain fMRI Atlas Generated via Spatially Constrained Spectral Clustering". Human Brain Mapping 33, no 8 (2012): 1914–1928. doi:10.1002/hbm.21333. Licence: Creative Commons Attribution Non-commercial Share Alike.PKHJ5 months apart), intrasession (<1 h apart), and multiscan (across all 3 scans) reliability and consistency for both region-of-interest and voxel-wise analyses have been studied. Content ------- :'func': Nifti images with BOLD data :'anat_anon': Nifti images with subject anatomy :'anat_skull': Nifti images with subject skull information :'session': Information on session References ---------- For more information on this dataset's structure, see http://cercor.oxfordjournals.org/content/19/10/2209.full `The Resting Brain: Unconstrained yet Reliable `_ Z. Shehzad, A.M.C. Kelly, P.T. Reiss, D.G. Gee, K. Gotimer, L.Q. Uddin, S.H. Lee, D.S. Margulies, A.K. Roy, B.B. Biswal, E. Petkova, F.X. Castellanos and M.P. Milham. Licence: unknown. PKHvќ'nilearn/datasets/description/oasis1.rstOasis Notes ----- The Open Access Series of Imaging Studies (OASIS) is a project aimed at making MRI data sets of the brain freely available to the scientific community. OASIS is made available by the Washington University Alzheimer’s Disease Research Center, Dr. Randy Buckner at the Howard Hughes Medical Institute (HHMI) at Harvard University, the Neuroinformatics Research Group (NRG) at Washington University School of Medicine, and the Biomedical Informatics Research Network (BIRN). Content ------- :'gray_matter_maps': Nifti images with gray matter density probability :'white_matter_maps': Nifti images with white matter density probability maps :'ext_vars': Behavioral information on the participants :'data_usage_agreement': Text file containing the data usage agreement References ---------- For more information about this dataset's structure: http://www.oasis-brains.org/ Open Access Series of Imaging Studies (OASIS): Cross-sectional MRI Data in Young, Middle Aged, Nondemented, and Demented Older Adults. Marcus, D. S and al., 2007, Journal of Cognitive Neuroscience. Licence: provided under an open access data use agreement (DUA).PKH  +nilearn/datasets/description/power_2011.rstPower 2011 atlas Notes ----- 264 ROIs obtained by meta-analysis. Content ------- :'rois': Coordinates of ROIs in MNI space. References ---------- Power, Jonathan D., et al. "Functional network organization of the human brain." Neuron 72.4 (2011): 665-678. PKH2+nilearn/datasets/description/smith_2009.rstSmith 2009 Atlas Notes ----- This atlas provides spatial maps of the major brain networks during task-constrained brain activity and task-unconstrained (resting) brain activity. Those were derived from 6 minutes of resting-state time series from 36 subjects as well as from the from the smoothed task activity coordinates of healthy subjects stored in the BrainMap database. Content ------- :'rsn20': 20 ICA maps derived from resting-state decomposition :'rsn10': 10 ICA maps from the above that matched across task and rest :'rsn70': 70 ICA maps derived from resting-state decomposition :'bm20': 20 ICA maps derived from decomposition BrainMap task data :'bm10': 10 ICA maps from the above that matched across task and rest :'bm70': 70 ICA maps derived from decomposition BrainMap task data References ---------- For more information about this dataset's structure: http://www.fmrib.ox.ac.uk/analysis/brainmap+rsns/ S.M. Smith, P.T. Fox, K.L. Miller, D.C. Glahn, P.M. Fox, C.E. Mackay, N. Filippini, K.E. Watkins, R. Toro, A.R. Laird, and C.F. Beckmann. Correspondence of the brain's functional architecture during activation and rest. Proc Natl Acad Sci USA (PNAS), 106(31):13040-13045, 2009. A.R. Laird, P.M. Fox, S.B. Eickhoff, J.A. Turner, K.L. Ray, D.R. McKay, D.C Glahn, C.F. Beckmann, S.M. Smith, and P.T. Fox. Behavioral interpretations of intrinsic connectivity networks. Journal of Cognitive Neuroscience, 2011 Licence: unknown.PKHc3)nilearn/datasets/description/yeo_2011.rstYeo 2011 Atlas Notes ----- This atlas provides a labeling of some cortical voxels in the MNI152 space. Four versions of the atlas are available, according to the cortical model (thick or thin cortical surface) and to the number of regions considered (7 or 17). Content ------- :'anat': Background anatomical image for reference and visualization :'thin_7': Cortical parcelation into 7 regions, thin cortical model :'thin_17': Cortical parcelation into 17 regions, thin cortical model :'thick_7': Cortical parcelation into 17 regions, thick cortical model :'thick_17': Cortical parcelation into 17 regions, thick cortical model :'colors_7': Text file for the coloring of 7-regions parcellation :'colors_17': Text file for the coloring of 17-regions parcellation References ---------- For more information on this dataset's structure, see http://surfer.nmr.mgh.harvard.edu/fswiki/CorticalParcellation_Yeo2011 Yeo BT, Krienen FM, Sepulcre J, Sabuncu MR, Lashkari D, Hollinshead M, Roffman JL, Smoller JW, Zollei L., Polimeni JR, Fischl B, Liu H, Buckner RL. The organization of the human cerebral cortex estimated by intrinsic functional connectivity. J Neurophysiol 106(3):1125-65, 2011. Licence: unknown. PKHWo& & +nilearn/datasets/description/Megatrawls.rstMegaTrawls Network Matrices HCP Notes ----- Contains network matrices data of two types, full correlation and partial correlation which were estimated using each subject specific timeseries signals extracted from group of ICA nodes or parcellations. In total, 461 functional connectivity datasets were used to obtain these matrices and is part of HCP Megatrawls release. The number of nodes available for download are 25, 50, 100, 200, 300 with combination of two variants of timeseries extraction methods, multiple spatial regression (ts2) and eigen regression (ts3). These matrices can be used to predict the relationships between subjects functional connectivity datasets and their behavioural measures. Both can be downloaded from HCP connectome website under conditions. See disclaimer below. Content ------- :'dimensions': contains given input in dimensions used in fetching data. :'timeseries': contains given specific timeseries method used in fetching data. :'matrices': contains given specific type of matrices name. :'correlation_matrices': contains correlation network matrices data. References ---------- For more technical details about predicting the measures, refer to: Stephen Smith et al, HCP beta-release of the Functional Connectivity MegaTrawl. April 2015 "HCP500-MegaTrawl" release. https://db.humanconnectome.org/megatrawl/ Disclaimer ---------- IMPORTANT: This is open access data. You must agree to Terms and conditions of using this data before using it, available at: http://humanconnectome.org/data/data-use-terms/open-access.html Open Access Data (all imaging data and most of the behavioral data) is available to those who register an account at ConnectomeDB and agree to the Open Access Data Use Terms. This includes agreement to comply with institutional rules and regulations. This means you may need the approval of your IRB or Ethics Committee to use the data. The released HCP data are not considered de-identified, since certain combinations of HCP Restricted Data (available through a separate process) might allow identification of individuals. Different national, state and local laws may apply and be interpreted differently, so it is important that you consult with your IRB or Ethics Committee before beginning your research. If needed and upon request, the HCP will provide a certificate stating that you have accepted the HCP Open Access Data Use Terms. Please note that everyone who works with HCP open access data must review and agree to these terms, including those who are accessing shared copies of this data. If you are sharing HCP Open Access data, please advice your co-researchers that they must register with ConnectomeDB and agree to these terms. Register and sign the Open Access Data Use Terms at ConnectomeDB: https://db.humanconnectome.org/ PKH{{5nilearn/datasets/description/basc_multiscale_2015.rstAn atlas of multiscale brain parcellations Content ------- This work is a derivative from the Cambridge sample found in the [1000 functional connectome project] (http://fcon_1000.projects.nitrc.org/fcpClassic/FcpTable.html) (Liu et al., 2009), originally released under Creative Commons -- Attribution Non-Commercial. It includes group brain parcellations generated from resting-state functional magnetic resonance images for about 200 young healthy subjects. Multiple scales (number of networks) are available, and includes 7, 12, 20, 36, 64, 122, 197, 325, 444. The brain parcellations have been generated using a method called bootstrap analysis of stable clusters (BASC, Bellec et al., 2010) and the scales have been selected using a data-driven method called MSTEPS (Bellec, 2013). This release more specifically contains the following files: :'description': a markdown (text) description of the release. :'scale007', 'scale012', 'scale020', 'scale036', 'scale064', 'scale122', 'scale197', 'scale325', 'scale444' brain_parcellation_cambridge_basc_multiscale_(sym,asym)_scale(NNN).nii.gz: a 3D volume .nii format at 3 mm isotropic resolution, in the MNI non-linear 2009a space (http://www.bic.mni.mcgill.ca/ServicesAtlases/ICBM152NLin2009). Region number I is filled with Is (background is filled with 0s). Note that two versions of the template are available, ending with either nii_sym or nii_asym. The asym flavor contains brain images that have been registered in the asymmetric version of the MNI brain template (reflecting that the brain is asymmetric), while with the sym flavor they have been registered in the symmetric version of the MNI template. The symmetric template has been forced to be symmetric anatomically, and is therefore ideally suited to study homotopic functional connections in fMRI: finding homotopic regions simply consists of flipping the x-axis of the template. Preprocessing ------------- The datasets were analysed using the NeuroImaging Analysis Kit (NIAK https://github.com/SIMEXP/niak) version 0.12.14, under CentOS version 6.3 with Octave (http://gnu.octave.org) version 3.8.1 and the Minc toolkit (http://www.bic.mni.mcgill.ca/ServicesSoftware/ServicesSoftwareMincToolKit) version 0.3.18. Each fMRI dataset was corrected for inter-slice difference in acquisition time and the parameters of a rigid-body motion were estimated for each time frame. Rigid-body motion was estimated within as well as between runs, using the median volume of the first run as a target. The median volume of one selected fMRI run for each subject was coregistered with a T1 individual scan using Minctracc (Collins and Evans, 1998), which was itself non-linearly transformed to the Montreal Neurological Institute (MNI) template (Fonov et al., 2011) using the CIVET pipeline (Ad-Dabbagh et al., 2006). The MNI symmetric template was generated from the ICBM152 sample of 152 young adults, after 40 iterations of non-linear coregistration. The rigid-body transform, fMRI-to-T1 transform and T1-to-stereotaxic transform were all combined, and the functional volumes were resampled in the MNI space at a 3 mm isotropic resolution. The "scrubbing" method of (Power et al., 2012), was used to remove the volumes with excessive motion (frame displacement greater than 0.5 mm). A minimum number of 60 unscrubbed volumes per run, corresponding to ~180 s of acquisition, was then required for further analysis. The following nuisance parameters were regressed out from the time series at each voxel: slow time drifts (basis of discrete cosines with a 0.01 Hz high-pass cut-off), average signals in conservative masks of the white matter and the lateral ventricles as well as the first principal components (95% energy) of the six rigid-body motion parameters and their squares (Giove et al., 2009). The fMRI volumes were finally spatially smoothed with a 6 mm isotropic Gaussian blurring kernel. Bootstrap Analysis of Stable Clusters ------------------------------------- Brain parcellations were derived using BASC (Bellec et al. 2010). A region growing algorithm was first applied to reduce the brain into regions of roughly equal size, set to 1000 mm3. The BASC used 100 replications of a hierarchical clustering with Ward's criterion on resampled individual time series, using circular block bootstrap. A consensus clustering (hierarchical with Ward's criterion) was generated across all the individual clustering replications pooled together, hence generating group clusters. The generation of group clusters was itself replicated by bootstraping subjects 500 times, and a (final) consensus clustering (hierarchical Ward's criterion) was generated on the replicated group clusters. The MSTEPS procedure (Bellec et al., 2013) was implemented to select a data-driven subset of scales in the range 5-500, approximating the group stability matrices up to 5% residual energy, through linear interpolation over selected scales. Note that the number of scales itself was selected by the MSTEPS procedure in a data-driven fashion, and that the number of individual, group and final (consensus) number of clusters were not necessarily identical. References ---------- Ad-Dabbagh Y, Einarson D, Lyttelton O, Muehlboeck J S, Mok K, Ivanov O, Vincent R D, Lepage C, Lerch J, Fombonne E, Evans A C, 2006. The CIVET Image-Processing Environment: A Fully Automated Comprehensive Pipeline for Anatomical Neuroimaging Research. In: Corbetta, M. (Ed.), Proceedings of the 12th Annual Meeting of the Human Brain Mapping Organization. Neuroimage, Florence, Italy. Bellec P, Rosa-Neto P, Lyttelton O C, Benali H, Evans A C, Jul. 2010 Multi-level bootstrap analysis of stable clusters in resting-state fMRI. NeuroImage 51 (3), 1126-1139. URL http://dx.doi.org/10.1016/j.neuroimage.2010.02.082 Bellec P, Jun. 2013. Mining the Hierarchy of Resting-State Brain Networks: Selection of Representative Clusters in a Multiscale Structure. In: Pattern Recognition in Neuroimaging (PRNI), 2013 International Workshop on. pp. 54-57. Collins D L, Evans A C, 1997. Animal: validation and applications of nonlinear registration-based segmentation. International Journal of Pattern Recognition and Artificial Intelligence 11, 1271-1294. Fonov V, Evans A C, Botteron K, Almli C R, McKinstry, R C, Collins D L, Jan. 2011. Unbiased average age-appropriate atlases for pediatric studies. NeuroImage 54 (1), 313-327. URL http://dx.doi.org/10.1016/j.neuroimage.2010.07.033 Giove F, Gili T, Iacovella V, Macaluso E, Maraviglia B, Oct. 2009. Images-based suppression of unwanted global signals in resting-state functional connectivity studies. Magnetic resonance imaging 27 (8), 1058-1064. URL http://dx.doi.org/10.1016/j.mri.2009.06.004 Liu H, Stufflebeam S M, Sepulcre J, Hedden T, Buckner R L, Dec. 2009 Evidence from intrinsic activity that asymmetry of the human brain is controlled by multiple factors. Proceedings of the National Academy of Sciences 106 (48), 20499-20503. URL http://dx.doi.org/10.1073/pnas.0908073106 Power J D, Barnes K A, Snyder A Z, Schlaggar B L, Petersen S E, Feb 2012 Spurious but systematic correlations in functional connectivity MRI networks arise from subject motion. NeuroImage 59 (3), 2142-2154. URL http://dx.doi.org/10.1016/j.neuroimage.2011.10.018 PKHb&nilearn/datasets/description/cobre.rstCOBRE datasets preprocessed using NIAK 0.12.4 version pipeline Content ------- This work is a derivative from the COBRE sample found in the [International Neuroimaging Data-sharing Initiative (INDI)](http://fcon_1000.projects.nitrc.org/indi/retro/cobre.html), originally released under Creative Commons -- Attribution Non-Commercial. It includes preprocessed resting-state functional magnetic resonance images for 72 patients diagnosed with schizophrenia (58 males, age range = 18-65 yrs) and 74 healthy controls (51 males, age range = 18-65 yrs). The fMRI dataset for each subject are single nifti files (.nii.gz), featuring 150 EPI blood-oxygenation level dependent (BOLD) volumes were obtained in 5 mns (TR = 2 s, TE = 29 ms, FA = 75 degrees, 32 slices, voxel size = 3x3x4 mm3 , matrix size = 64x64, FOV = mm2). The COBRE preprocessed fMRI release more specifically contains the following files: :'description': a markdown (text) description of the release. :'phenotypic': numpy array contains a comma-separated values, with the sz (1: patient with schizophrenia, 0: control), age, sex, and FD (frame displacement, as defined by Power et al. 2012) variables. Each column codes for one variable, starting with the label, and each line has the label of the corresponding subject. :'func': contains list of filenames to functional datasets fmri_szxxxSUBJECT_session1_run1.nii.gz, a 3D+t nifti volume at 3 mm isotropic resolution, in the MNI non-linear 2009a symmetric space (http://www.bic.mni.mcgill.ca/ServicesAtlases/ICBM152NLin2009). Note that the number of time samples may vary, as some samples have been removed if tagged with excessive motion. See the _extra.mat for more info. :'mat_files': contains list of filenames to .mat files fmri_szxxxSUBJECT_session1_run1_extra.mat, a matlab/octave file for each subject. Each .mat file contains the following variables: * confounds: a TxK array. Each row corresponds to a time sample, and each column to one confound that was regressed out from the time series during preprocessing. * labels_confounds: cell of strings. Each entry is the label of a confound that was regressed out from the time series. * mask_suppressed: a T2x1 vector. T2 is the number of time samples in the raw time series (before preprocessing), T2=119. Each entry corresponds to a time sample, and is 1 if the corresponding sample was removed due to excessive motion (or to wait for magnetic equilibrium at the beginning of the series). Samples that were kept are tagged with 0s. * time_frames: a Tx1 vector. Each entry is the time of acquisition (in s) of the corresponding volume. Preprocessing ------------- The datasets were analysed using the NeuroImaging Analysis Kit (NIAK https://github.com/SIMEXP/niak) version 0.12.14, under CentOS version 6.3 with Octave(http://gnu.octave.org) version 3.8.1 and the Minc toolkit (http://www.bic.mni.mcgill.ca/ServicesSoftware/ServicesSoftwareMincToolKit) version 0.3.18. Each fMRI dataset was corrected for inter-slice difference in acquisition time and the parameters of a rigid-body motion were estimated for each time frame. Rigid-body motion was estimated within as well as between runs, using the median volume of the first run as a target. The median volume of one selected fMRI run for each subject was coregistered with a T1 individual scan using Minctracc (Collins and Evans, 1998), which was itself non-linearly transformed to the Montreal Neurological Institute (MNI) template (Fonov et al., 2011) using the CIVET pipeline (Ad-Dabbagh et al., 2006). The MNI symmetric template was generated from the ICBM152 sample of 152 young adults, after 40 iterations of non-linear coregistration. The rigid-body transform, fMRI-to-T1 transform and T1-to-stereotaxic transform were all combined, and the functional volumes were resampled in the MNI space at a 3 mm isotropic resolution. The "scrubbing" method of (Power et al., 2012), was used to remove the volumes with excessive motion (frame displacement greater than 0.5 mm). A minimum number of 60 unscrubbed volumes per run, corresponding to ~180 s of acquisition, was then required for further analysis. For this reason, 16 controls and 29 schizophrenia patients were rejected from the subsequent analyses. The following nuisance parameters were regressed out from the time series at each voxel: slow time drifts (basis of discrete cosines with a 0.01 Hz high-pass cut-off), average signals in conservative masks of the white matter and the lateral ventricles as well as the first principal components (95% energy) of the six rigid-body motion parameters and their squares (Giove et al., 2009). The fMRI volumes were finally spatially smoothed with a 6 mm isotropic Gaussian blurring kernel. References ---------- Ad-Dab'bagh Y, Einarson D, Lyttelton O, Muehlboeck J S, Mok K, Ivanov O, Vincent R D, Lepage C, Lerch J, Fombonne E, Evans A C, 2006. The CIVET Image-Processing Environment: A Fully Automated Comprehensive Pipeline for Anatomical Neuroimaging Research. In: Corbetta M. (Ed.), Proceedings of the 12th Annual Meeting of the Human Brain Mapping Organization. Neuroimage, Florence, Italy. Bellec P, Rosa-Neto P, Lyttelton O C, Benali H, Evans A C, Jul. 2010. Multi-level bootstrap analysis of stable clusters in resting-state fMRI. NeuroImage 51 (3), 1126–1139. URL http://dx.doi.org/10.1016/j.neuroimage.2010.02.082 Collins D L, Evans A C, 1997. Animal: validation and applications of nonlinear registration-based segmentation. International Journal of Pattern Recognition and Artificial Intelligence 11, 1271-1294. Fonov V, Evans A C, Botteron K, Almli C R, McKinstry R C, Collins D L, Jan. 2011. Unbiased average age-appropriate atlases for pediatric studies. NeuroImage 54 (1), 313-327. URL http://dx.doi.org/10.1016/j.neuroimage.2010.07.033 Giove F, Gili T, Iacovella V, Macaluso E, Maraviglia B, Oct. 2009. Images-based suppression of unwanted global signals in resting-state functional connectivity studies. Magnetic resonance imaging 27 (8), 1058-1064. URL http://dx.doi.org/10.1016/j.mri.2009.06.004 Power J D, Barnes K A, Snyder A Z, Schlaggar B L, Petersen S E, Feb. 2012. Spurious but systematic correlations in functional connectivity MRI networks arise from subject motion. NeuroImage 59 (3), 2142-2154. URL http://dx.doi.org/10.1016/j.neuroimage.2011.10.018 Other derivatives ----------------- This dataset was used in a publication, see the link below. https://github.com/SIMEXP/glm_connectome PKH/nilearn/datasets/description/dosenbach_2010.rstDosenbach 2010 atlas Notes ----- 160 regions of interest covering much of the cerebral cortex and cerebellum. They were obtained from meta-analyses of fMRI activation studies and assigned into 6 networks according to a modularity analysis of resting-state data. Content ------- :'rois': Coordinates of ROIs in MNI space. :'labels': ROIs labels. :'networks': Networks names. References ---------- Dosenbach N.U., Nardos B., et al. "Prediction of individual brain maturity using fMRI.", 2010, Science 329, 1358-1361. PKH"nilearn/datasets/tests/__init__.pyPKHL5(z1z1$nilearn/datasets/tests/test_utils.py""" Test the datasets module """ # Author: Alexandre Abraham # License: simplified BSD import contextlib import os import shutil import numpy as np import zipfile import tarfile import gzip from tempfile import mkdtemp, mkstemp from nose import with_setup from nose.tools import assert_true, assert_false, assert_equal from nilearn import datasets from nilearn._utils.testing import (mock_request, wrap_chunk_read_, FetchFilesMock, assert_raises_regex) currdir = os.path.dirname(os.path.abspath(__file__)) datadir = os.path.join(currdir, 'data') tmpdir = None url_request = None file_mock = None def setup_tmpdata(): # create temporary dir global tmpdir tmpdir = mkdtemp() def setup_mock(utils_mod=datasets.utils, dataset_mod=datasets.utils): global original_url_request global mock_url_request mock_url_request = mock_request() original_url_request = utils_mod._urllib.request utils_mod._urllib.request = mock_url_request global original_chunk_read global mock_chunk_read mock_chunk_read = wrap_chunk_read_(utils_mod._chunk_read_) original_chunk_read = utils_mod._chunk_read_ utils_mod._chunk_read_ = mock_chunk_read global original_fetch_files global mock_fetch_files mock_fetch_files = FetchFilesMock() original_fetch_files = dataset_mod._fetch_files dataset_mod._fetch_files = mock_fetch_files def teardown_mock(utils_mod=datasets.utils, dataset_mod=datasets.utils): global original_url_request utils_mod._urllib.request = original_url_request global original_chunk_read utils_mod.chunk_read_ = original_chunk_read global original_fetch_files dataset_mod._fetch_files = original_fetch_files def teardown_tmpdata(): # remove temporary dir global tmpdir if tmpdir is not None: shutil.rmtree(tmpdir) @with_setup(setup_tmpdata, teardown_tmpdata) def test_get_dataset_dir(): # testing folder creation under different environments, enforcing # a custom clean install os.environ.pop('NILEARN_DATA', None) os.environ.pop('NILEARN_SHARED_DATA', None) expected_base_dir = os.path.expanduser('~/nilearn_data') data_dir = datasets.utils._get_dataset_dir('test', verbose=0) assert_equal(data_dir, os.path.join(expected_base_dir, 'test')) assert os.path.exists(data_dir) shutil.rmtree(data_dir) expected_base_dir = os.path.join(tmpdir, 'test_nilearn_data') os.environ['NILEARN_DATA'] = expected_base_dir data_dir = datasets.utils._get_dataset_dir('test', verbose=0) assert_equal(data_dir, os.path.join(expected_base_dir, 'test')) assert os.path.exists(data_dir) shutil.rmtree(data_dir) expected_base_dir = os.path.join(tmpdir, 'nilearn_shared_data') os.environ['NILEARN_SHARED_DATA'] = expected_base_dir data_dir = datasets.utils._get_dataset_dir('test', verbose=0) assert_equal(data_dir, os.path.join(expected_base_dir, 'test')) assert os.path.exists(data_dir) shutil.rmtree(data_dir) expected_base_dir = os.path.join(tmpdir, 'env_data') expected_dataset_dir = os.path.join(expected_base_dir, 'test') data_dir = datasets.utils._get_dataset_dir( 'test', default_paths=[expected_dataset_dir], verbose=0) assert_equal(data_dir, os.path.join(expected_base_dir, 'test')) assert os.path.exists(data_dir) shutil.rmtree(data_dir) no_write = os.path.join(tmpdir, 'no_write') os.makedirs(no_write) os.chmod(no_write, 0o400) expected_base_dir = os.path.join(tmpdir, 'nilearn_shared_data') os.environ['NILEARN_SHARED_DATA'] = expected_base_dir data_dir = datasets.utils._get_dataset_dir('test', default_paths=[no_write], verbose=0) # Non writeable dir is returned because dataset may be in there. assert_equal(data_dir, no_write) assert os.path.exists(data_dir) os.chmod(no_write, 0o600) shutil.rmtree(data_dir) # Verify exception for a path which exists and is a file test_file = os.path.join(tmpdir, 'some_file') with open(test_file, 'w') as out: out.write('abcfeg') assert_raises_regex(OSError, 'Nilearn tried to store the dataset in the following ' 'directories, but', datasets.utils._get_dataset_dir, 'test', test_file, verbose=0) def test_md5_sum_file(): # Create dummy temporary file out, f = mkstemp() os.write(out, b'abcfeg') os.close(out) assert_equal(datasets.utils._md5_sum_file(f), '18f32295c556b2a1a3a8e68fe1ad40f7') os.remove(f) def test_read_md5_sum_file(): # Create dummy temporary file out, f = mkstemp() os.write(out, b'20861c8c3fe177da19a7e9539a5dbac /tmp/test\n' b'70886dcabe7bf5c5a1c24ca24e4cbd94 test/some_image.nii') os.close(out) h = datasets.utils._read_md5_sum_file(f) assert_true('/tmp/test' in h) assert_false('/etc/test' in h) assert_equal(h['test/some_image.nii'], '70886dcabe7bf5c5a1c24ca24e4cbd94') assert_equal(h['/tmp/test'], '20861c8c3fe177da19a7e9539a5dbac') os.remove(f) def test_tree(): # Create a dummy directory tree parent = mkdtemp() open(os.path.join(parent, 'file1'), 'w').close() open(os.path.join(parent, 'file2'), 'w').close() dir1 = os.path.join(parent, 'dir1') dir11 = os.path.join(dir1, 'dir11') dir12 = os.path.join(dir1, 'dir12') dir2 = os.path.join(parent, 'dir2') os.mkdir(dir1) os.mkdir(dir11) os.mkdir(dir12) os.mkdir(dir2) open(os.path.join(dir1, 'file11'), 'w').close() open(os.path.join(dir1, 'file12'), 'w').close() open(os.path.join(dir11, 'file111'), 'w').close() open(os.path.join(dir2, 'file21'), 'w').close() tree_ = datasets.utils._tree(parent) # Check the tree # assert_equal(tree_[0]['dir1'][0]['dir11'][0], 'file111') # assert_equal(len(tree_[0]['dir1'][1]['dir12']), 0) # assert_equal(tree_[0]['dir1'][2], 'file11') # assert_equal(tree_[0]['dir1'][3], 'file12') # assert_equal(tree_[1]['dir2'][0], 'file21') # assert_equal(tree_[2], 'file1') # assert_equal(tree_[3], 'file2') assert_equal(tree_[0][1][0][1][0], os.path.join(dir11, 'file111')) assert_equal(len(tree_[0][1][1][1]), 0) assert_equal(tree_[0][1][2], os.path.join(dir1, 'file11')) assert_equal(tree_[0][1][3], os.path.join(dir1, 'file12')) assert_equal(tree_[1][1][0], os.path.join(dir2, 'file21')) assert_equal(tree_[2], os.path.join(parent, 'file1')) assert_equal(tree_[3], os.path.join(parent, 'file2')) # Clean shutil.rmtree(parent) def test_movetree(): # Create a dummy directory tree parent = mkdtemp() dir1 = os.path.join(parent, 'dir1') dir11 = os.path.join(dir1, 'dir11') dir12 = os.path.join(dir1, 'dir12') dir2 = os.path.join(parent, 'dir2') os.mkdir(dir1) os.mkdir(dir11) os.mkdir(dir12) os.mkdir(dir2) os.mkdir(os.path.join(dir2, 'dir12')) open(os.path.join(dir1, 'file11'), 'w').close() open(os.path.join(dir1, 'file12'), 'w').close() open(os.path.join(dir11, 'file111'), 'w').close() open(os.path.join(dir12, 'file121'), 'w').close() open(os.path.join(dir2, 'file21'), 'w').close() datasets.utils.movetree(dir1, dir2) assert_false(os.path.exists(dir11)) assert_false(os.path.exists(dir12)) assert_false(os.path.exists(os.path.join(dir1, 'file11'))) assert_false(os.path.exists(os.path.join(dir1, 'file12'))) assert_false(os.path.exists(os.path.join(dir11, 'file111'))) assert_false(os.path.exists(os.path.join(dir12, 'file121'))) dir11 = os.path.join(dir2, 'dir11') dir12 = os.path.join(dir2, 'dir12') assert_true(os.path.exists(dir11)) assert_true(os.path.exists(dir12)) assert_true(os.path.exists(os.path.join(dir2, 'file11'))) assert_true(os.path.exists(os.path.join(dir2, 'file12'))) assert_true(os.path.exists(os.path.join(dir11, 'file111'))) assert_true(os.path.exists(os.path.join(dir12, 'file121'))) def test_filter_columns(): # Create fake recarray value1 = np.arange(500) strings = np.asarray(['a', 'b', 'c']) value2 = strings[value1 % 3] values = np.asarray(list(zip(value1, value2)), dtype=[('INT', int), ('STR', 'S1')]) f = datasets.utils._filter_columns(values, {'INT': (23, 46)}) assert_equal(np.sum(f), 24) f = datasets.utils._filter_columns(values, {'INT': [0, 9, (12, 24)]}) assert_equal(np.sum(f), 15) value1 = value1 % 2 values = np.asarray(list(zip(value1, value2)), dtype=[('INT', int), ('STR', b'S1')]) # No filter f = datasets.utils._filter_columns(values, []) assert_equal(np.sum(f), 500) f = datasets.utils._filter_columns(values, {'STR': b'b'}) assert_equal(np.sum(f), 167) f = datasets.utils._filter_columns(values, {'INT': 1, 'STR': b'b'}) assert_equal(np.sum(f), 84) f = datasets.utils._filter_columns(values, {'INT': 1, 'STR': b'b'}, combination='or') assert_equal(np.sum(f), 333) def test_uncompress(): # Create dummy file fd, temp = mkstemp() os.close(fd) # Create a zipfile dtemp = mkdtemp() ztemp = os.path.join(dtemp, 'test.zip') with contextlib.closing(zipfile.ZipFile(ztemp, 'w')) as testzip: testzip.write(temp) datasets.utils._uncompress_file(ztemp, verbose=0) assert(os.path.exists(os.path.join(dtemp, temp))) shutil.rmtree(dtemp) dtemp = mkdtemp() ztemp = os.path.join(dtemp, 'test.tar') with contextlib.closing(tarfile.open(ztemp, 'w')) as tar: tar.add(temp) datasets.utils._uncompress_file(ztemp, verbose=0) assert(os.path.exists(os.path.join(dtemp, temp))) shutil.rmtree(dtemp) dtemp = mkdtemp() ztemp = os.path.join(dtemp, 'test.gz') f = gzip.open(ztemp, 'wb') f.close() datasets.utils._uncompress_file(ztemp, verbose=0) assert(os.path.exists(os.path.join(dtemp, temp))) shutil.rmtree(dtemp) os.remove(temp) @with_setup(setup_mock, teardown_mock) @with_setup(setup_tmpdata, teardown_tmpdata) def test_fetch_file_overwrite(): # overwrite non-exiting file. fil = datasets.utils._fetch_file(url='http://foo/', data_dir=tmpdir, verbose=0, overwrite=True) assert_equal(len(mock_url_request.urls), 1) assert_true(os.path.exists(fil)) with open(fil, 'r') as fp: assert_equal(fp.read(), '') # Modify content with open(fil, 'w') as fp: fp.write('some content') # Don't overwrite existing file. fil = datasets.utils._fetch_file(url='http://foo/', data_dir=tmpdir, verbose=0, overwrite=False) assert_equal(len(mock_url_request.urls), 1) assert_true(os.path.exists(fil)) with open(fil, 'r') as fp: assert_equal(fp.read(), 'some content') # Overwrite existing file. fil = datasets.utils._fetch_file(url='http://foo/', data_dir=tmpdir, verbose=0, overwrite=True) assert_equal(len(mock_url_request.urls), 1) assert_true(os.path.exists(fil)) with open(fil, 'r') as fp: assert_equal(fp.read(), '') @with_setup(setup_mock, teardown_mock) @with_setup(setup_tmpdata, teardown_tmpdata) def test_fetch_files_overwrite(): # overwrite non-exiting file. files = ('1.txt', 'http://foo/1.txt') fil = datasets.utils._fetch_files(data_dir=tmpdir, verbose=0, files=[files + (dict(overwrite=True),)]) assert_equal(len(mock_url_request.urls), 1) assert_true(os.path.exists(fil[0])) with open(fil[0], 'r') as fp: assert_equal(fp.read(), '') # Modify content with open(fil[0], 'w') as fp: fp.write('some content') # Don't overwrite existing file. fil = datasets.utils._fetch_files(data_dir=tmpdir, verbose=0, files=[files + (dict(overwrite=False),)]) assert_equal(len(mock_url_request.urls), 1) assert_true(os.path.exists(fil[0])) with open(fil[0], 'r') as fp: assert_equal(fp.read(), 'some content') # Overwrite existing file. fil = datasets.utils._fetch_files(data_dir=tmpdir, verbose=0, files=[files + (dict(overwrite=True),)]) assert_equal(len(mock_url_request.urls), 1) assert_true(os.path.exists(fil[0])) with open(fil[0], 'r') as fp: assert_equal(fp.read(), '') PKH y22$nilearn/datasets/tests/test_atlas.py""" Test the datasets module """ # Author: Alexandre Abraham # License: simplified BSD import os import shutil import csv import numpy as np import nibabel from nose import with_setup from nose.tools import assert_true, assert_equal, assert_not_equal from nilearn._utils.testing import assert_raises_regex from . import test_utils as tst from nilearn._utils.compat import _basestring from nilearn.datasets import utils, atlas, struct def setup_mock(): return tst.setup_mock(utils, atlas) def teardown_mock(): return tst.teardown_mock(utils, atlas) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_get_dataset_dir(): # testing folder creation under different environments, enforcing # a custom clean install os.environ.pop('NILEARN_DATA', None) os.environ.pop('NILEARN_SHARED_DATA', None) expected_base_dir = os.path.expanduser('~/nilearn_data') data_dir = utils._get_dataset_dir('test', verbose=0) assert_equal(data_dir, os.path.join(expected_base_dir, 'test')) assert os.path.exists(data_dir) shutil.rmtree(data_dir) expected_base_dir = os.path.join(tst.tmpdir, 'test_nilearn_data') os.environ['NILEARN_DATA'] = expected_base_dir data_dir = utils._get_dataset_dir('test', verbose=0) assert_equal(data_dir, os.path.join(expected_base_dir, 'test')) assert os.path.exists(data_dir) shutil.rmtree(data_dir) expected_base_dir = os.path.join(tst.tmpdir, 'nilearn_shared_data') os.environ['NILEARN_SHARED_DATA'] = expected_base_dir data_dir = utils._get_dataset_dir('test', verbose=0) assert_equal(data_dir, os.path.join(expected_base_dir, 'test')) assert os.path.exists(data_dir) shutil.rmtree(data_dir) expected_base_dir = os.path.join(tst.tmpdir, 'env_data') expected_dataset_dir = os.path.join(expected_base_dir, 'test') data_dir = utils._get_dataset_dir( 'test', default_paths=[expected_dataset_dir], verbose=0) assert_equal(data_dir, os.path.join(expected_base_dir, 'test')) assert os.path.exists(data_dir) shutil.rmtree(data_dir) no_write = os.path.join(tst.tmpdir, 'no_write') os.makedirs(no_write) os.chmod(no_write, 0o400) expected_base_dir = os.path.join(tst.tmpdir, 'nilearn_shared_data') os.environ['NILEARN_SHARED_DATA'] = expected_base_dir data_dir = utils._get_dataset_dir('test', default_paths=[no_write], verbose=0) # Non writeable dir is returned because dataset may be in there. assert_equal(data_dir, no_write) assert os.path.exists(data_dir) # Set back write permissions in order to be able to remove the file os.chmod(no_write, 0o600) shutil.rmtree(data_dir) # Verify exception for a path which exists and is a file test_file = os.path.join(tst.tmpdir, 'some_file') with open(test_file, 'w') as out: out.write('abcfeg') assert_raises_regex(OSError, 'Nilearn tried to store the dataset ' 'in the following directories, but', utils._get_dataset_dir, 'test', test_file, verbose=0) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fail_fetch_atlas_harvard_oxford(): # specify non-existing atlas item assert_raises_regex(ValueError, 'Invalid atlas name', atlas.fetch_atlas_harvard_oxford, 'not_inside') # specify existing atlas item target_atlas = 'cort-maxprob-thr0-1mm' target_atlas_fname = 'HarvardOxford-' + target_atlas + '.nii.gz' ho_dir = os.path.join(tst.tmpdir, 'fsl', 'data', 'atlases') os.makedirs(ho_dir) nifti_dir = os.path.join(ho_dir, 'HarvardOxford') os.makedirs(nifti_dir) target_atlas_nii = os.path.join(nifti_dir, target_atlas_fname) # Create false atlas atlas_data = np.zeros((10, 10, 10), dtype=int) # Create an interhemispheric map atlas_data[:, :2, :] = 1 # Create a left map atlas_data[:5, 3:5, :] = 2 # Create a right map, with one voxel on the left side atlas_data[5:, 7:9, :] = 3 atlas_data[4, 7, 0] = 3 nibabel.Nifti1Image(atlas_data, np.eye(4) * 3).to_filename( target_atlas_nii) dummy = open(os.path.join(ho_dir, 'HarvardOxford-Cortical.xml'), 'w') dummy.write("\n" "\n" '\n' '\n' '\n' "") dummy.close() ho = atlas.fetch_atlas_harvard_oxford(target_atlas, data_dir=tst.tmpdir, symmetric_split=True) assert_true(isinstance(ho.maps, nibabel.Nifti1Image)) assert_true(isinstance(ho.labels, list)) assert_equal(len(ho.labels), 5) assert_equal(ho.labels[0], "Background") assert_equal(ho.labels[1], "R1, left part") assert_equal(ho.labels[2], "R1, right part") assert_equal(ho.labels[3], "R2") assert_equal(ho.labels[4], "R3") @with_setup(setup_mock, teardown_mock) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_atlas_craddock_2012(): bunch = atlas.fetch_atlas_craddock_2012(data_dir=tst.tmpdir, verbose=0) keys = ("scorr_mean", "tcorr_mean", "scorr_2level", "tcorr_2level", "random") filenames = [ "scorr05_mean_all.nii.gz", "tcorr05_mean_all.nii.gz", "scorr05_2level_all.nii.gz", "tcorr05_2level_all.nii.gz", "random_all.nii.gz", ] assert_equal(len(tst.mock_url_request.urls), 1) for key, fn in zip(keys, filenames): assert_equal(bunch[key], os.path.join(tst.tmpdir, 'craddock_2012', fn)) assert_not_equal(bunch.description, '') @with_setup(setup_mock, teardown_mock) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_atlas_smith_2009(): bunch = atlas.fetch_atlas_smith_2009(data_dir=tst.tmpdir, verbose=0) keys = ("rsn20", "rsn10", "rsn70", "bm20", "bm10", "bm70") filenames = [ "rsn20.nii.gz", "PNAS_Smith09_rsn10.nii.gz", "rsn70.nii.gz", "bm20.nii.gz", "PNAS_Smith09_bm10.nii.gz", "bm70.nii.gz", ] assert_equal(len(tst.mock_url_request.urls), 6) for key, fn in zip(keys, filenames): assert_equal(bunch[key], os.path.join(tst.tmpdir, 'smith_2009', fn)) assert_not_equal(bunch.description, '') def test_fetch_coords_power_2011(): bunch = atlas.fetch_coords_power_2011() assert_equal(len(bunch.rois), 264) assert_not_equal(bunch.description, '') @with_setup(setup_mock, teardown_mock) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_atlas_destrieux_2009(): datadir = os.path.join(tst.tmpdir, 'destrieux_2009') os.mkdir(datadir) dummy = open(os.path.join( datadir, 'destrieux2009_rois_labels_lateralized.csv'), 'w') dummy.write("name,index") dummy.close() bunch = atlas.fetch_atlas_destrieux_2009(data_dir=tst.tmpdir, verbose=0) assert_equal(len(tst.mock_url_request.urls), 1) assert_equal(bunch['maps'], os.path.join( tst.tmpdir, 'destrieux_2009', 'destrieux2009_rois_lateralized.nii.gz')) dummy = open(os.path.join( datadir, 'destrieux2009_rois_labels.csv'), 'w') dummy.write("name,index") dummy.close() bunch = atlas.fetch_atlas_destrieux_2009( lateralized=False, data_dir=tst.tmpdir, verbose=0) assert_equal(len(tst.mock_url_request.urls), 1) assert_equal(bunch['maps'], os.path.join( tst.tmpdir, 'destrieux_2009', 'destrieux2009_rois.nii.gz')) @with_setup(setup_mock, teardown_mock) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_atlas_msdl(): datadir = os.path.join(tst.tmpdir, 'msdl_atlas') os.mkdir(datadir) os.mkdir(os.path.join(datadir, 'MSDL_rois')) data_dir = os.path.join(datadir, 'MSDL_rois', 'msdl_rois_labels.csv') csv = np.rec.array([(1.5, 1.5, 1.5, 'Aud', 'Aud'), (1.2, 1.3, 1.4, 'DMN', 'DMN')], dtype=[('x', ' " "" "") dataset = atlas.fetch_atlas_aal(data_dir=tst.tmpdir, verbose=0) assert_true(isinstance(dataset.maps, _basestring)) assert_true(isinstance(dataset.labels, list)) assert_true(isinstance(dataset.indices, list)) assert_equal(len(tst.mock_url_request.urls), 1) assert_raises_regex(ValueError, 'The version of AAL requested "FLS33"', atlas.fetch_atlas_aal, version="FLS33", data_dir=tst.tmpdir, verbose=0) assert_not_equal(dataset.description, '') @with_setup(setup_mock, teardown_mock) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_atlas_basc_multiscale_2015(): # default version='sym' data_sym = atlas.fetch_atlas_basc_multiscale_2015(data_dir=tst.tmpdir, verbose=0) # version='asym' data_asym = atlas.fetch_atlas_basc_multiscale_2015(version='asym', verbose=0, data_dir=tst.tmpdir) keys = ['scale007', 'scale012', 'scale020', 'scale036', 'scale064', 'scale122', 'scale197', 'scale325', 'scale444'] dataset_name = 'basc_multiscale_2015' name_sym = 'template_cambridge_basc_multiscale_nii_sym' basenames_sym = ['template_cambridge_basc_multiscale_sym_' + key + '.nii.gz' for key in keys] for key, basename_sym in zip(keys, basenames_sym): assert_equal(data_sym[key], os.path.join(tst.tmpdir, dataset_name, name_sym, basename_sym)) name_asym = 'template_cambridge_basc_multiscale_nii_asym' basenames_asym = ['template_cambridge_basc_multiscale_asym_' + key + '.nii.gz' for key in keys] for key, basename_asym in zip(keys, basenames_asym): assert_equal(data_asym[key], os.path.join(tst.tmpdir, dataset_name, name_asym, basename_asym)) assert_equal(len(data_sym), 10) assert_raises_regex(ValueError, 'The version of Brain parcellations requested "aym"', atlas.fetch_atlas_basc_multiscale_2015, version="aym", data_dir=tst.tmpdir, verbose=0) assert_equal(len(tst.mock_url_request.urls), 2) assert_not_equal(data_sym.description, '') assert_not_equal(data_asym.description, '') def test_fetch_coords_dosenbach_2010(): bunch = atlas.fetch_coords_dosenbach_2010() assert_equal(len(bunch.rois), 160) assert_equal(len(bunch.labels), 160) assert_equal(len(np.unique(bunch.networks)), 6) assert_not_equal(bunch.description, '') PKHr@a@a#nilearn/datasets/tests/test_func.py""" Test the datasets module """ # Author: Alexandre Abraham # License: simplified BSD import os import numpy as np import json import nibabel from sklearn.utils import check_random_state from nose import with_setup from nose.tools import (assert_true, assert_equal, assert_raises, assert_not_equal) from . import test_utils as tst from nilearn.datasets import utils, func from nilearn._utils.testing import assert_raises_regex from nilearn._utils.compat import _basestring, _urllib def setup_mock(): return tst.setup_mock(utils, func) def teardown_mock(): return tst.teardown_mock(utils, func) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_haxby_simple(): local_url = "file:" + _urllib.request.pathname2url(os.path.join(tst.datadir, "pymvpa-exampledata.tar.bz2")) haxby = func.fetch_haxby_simple(data_dir=tst.tmpdir, url=local_url, verbose=0) datasetdir = os.path.join(tst.tmpdir, 'haxby2001_simple', 'pymvpa-exampledata') for key, file in [ ('session_target', 'attributes.txt'), ('func', 'bold.nii.gz'), ('conditions_target', 'attributes_literal.txt')]: assert_equal(haxby[key], [os.path.join(datasetdir, file)]) assert_true(os.path.exists(os.path.join(datasetdir, file))) assert_equal(haxby['mask'], os.path.join(datasetdir, 'mask.nii.gz')) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fail_fetch_haxby_simple(): # Test a dataset fetching failure to validate sandboxing local_url = "file:" + _urllib.request.pathname2url(os.path.join(tst.datadir, "pymvpa-exampledata.tar.bz2")) datasetdir = os.path.join(tst.tmpdir, 'haxby2001_simple', 'pymvpa-exampledata') os.makedirs(datasetdir) # Create a dummy file. If sandboxing is successful, it won't be overwritten dummy = open(os.path.join(datasetdir, 'attributes.txt'), 'w') dummy.write('stuff') dummy.close() path = 'pymvpa-exampledata' opts = {'uncompress': True} files = [ (os.path.join(path, 'attributes.txt'), local_url, opts), # The following file does not exists. It will cause an abortion of # the fetching procedure (os.path.join(path, 'bald.nii.gz'), local_url, opts) ] assert_raises(IOError, utils._fetch_files, os.path.join(tst.tmpdir, 'haxby2001_simple'), files, verbose=0) dummy = open(os.path.join(datasetdir, 'attributes.txt'), 'r') stuff = dummy.read(5) dummy.close() assert_equal(stuff, 'stuff') @with_setup(setup_mock, teardown_mock) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_haxby(): for i in range(1, 6): haxby = func.fetch_haxby(data_dir=tst.tmpdir, n_subjects=i, verbose=0) # subject_data + (md5 + mask if first subj) assert_equal(len(tst.mock_url_request.urls), 1 + 2 * (i == 1)) assert_equal(len(haxby.func), i) assert_equal(len(haxby.anat), i) assert_equal(len(haxby.session_target), i) assert_true(haxby.mask is not None) assert_equal(len(haxby.mask_vt), i) assert_equal(len(haxby.mask_face), i) assert_equal(len(haxby.mask_house), i) assert_equal(len(haxby.mask_face_little), i) assert_equal(len(haxby.mask_house_little), i) tst.mock_url_request.reset() assert_not_equal(haxby.description, '') @with_setup(setup_mock, teardown_mock) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_nyu_rest(): # First session, all subjects nyu = func.fetch_nyu_rest(data_dir=tst.tmpdir, verbose=0) assert_equal(len(tst.mock_url_request.urls), 2) assert_equal(len(nyu.func), 25) assert_equal(len(nyu.anat_anon), 25) assert_equal(len(nyu.anat_skull), 25) assert_true(np.all(np.asarray(nyu.session) == 1)) # All sessions, 12 subjects tst.mock_url_request.reset() nyu = func.fetch_nyu_rest(data_dir=tst.tmpdir, sessions=[1, 2, 3], n_subjects=12, verbose=0) # Session 1 has already been downloaded assert_equal(len(tst.mock_url_request.urls), 2) assert_equal(len(nyu.func), 36) assert_equal(len(nyu.anat_anon), 36) assert_equal(len(nyu.anat_skull), 36) s = np.asarray(nyu.session) assert_true(np.all(s[:12] == 1)) assert_true(np.all(s[12:24] == 2)) assert_true(np.all(s[24:] == 3)) assert_not_equal(nyu.description, '') @with_setup(setup_mock, teardown_mock) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_adhd(): local_url = "file://" + tst.datadir sub1 = [3902469, 7774305, 3699991] sub2 = [2014113, 4275075, 1019436, 3154996, 3884955, 27034, 4134561, 27018, 6115230, 27037, 8409791, 27011] sub3 = [3007585, 8697774, 9750701, 10064, 21019, 10042, 10128, 2497695, 4164316, 1552181, 4046678, 23012] sub4 = [1679142, 1206380, 23008, 4016887, 1418396, 2950754, 3994098, 3520880, 1517058, 9744150, 1562298, 3205761, 3624598] subs = np.array(sub1 + sub2 + sub3 + sub4, dtype='i8') subs = subs.view(dtype=[('Subject', 'i8')]) tst.mock_fetch_files.add_csv( 'ADHD200_40subs_motion_parameters_and_phenotypics.csv', subs) adhd = func.fetch_adhd(data_dir=tst.tmpdir, url=local_url, n_subjects=12, verbose=0) assert_equal(len(adhd.func), 12) assert_equal(len(adhd.confounds), 12) assert_equal(len(tst.mock_url_request.urls), 13) # Subjects + phenotypic assert_not_equal(adhd.description, '') @with_setup(setup_mock, teardown_mock) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_miyawaki2008(): dataset = func.fetch_miyawaki2008(data_dir=tst.tmpdir, verbose=0) assert_equal(len(dataset.func), 32) assert_equal(len(dataset.label), 32) assert_true(isinstance(dataset.mask, _basestring)) assert_equal(len(dataset.mask_roi), 38) assert_true(isinstance(dataset.background, _basestring)) assert_equal(len(tst.mock_url_request.urls), 1) assert_not_equal(dataset.description, '') @with_setup(setup_mock, teardown_mock) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_localizer_contrasts(): local_url = "file://" + tst.datadir ids = np.asarray([('S%2d' % i).encode() for i in range(94)]) ids = ids.view(dtype=[('subject_id', 'S3')]) tst.mock_fetch_files.add_csv('cubicwebexport.csv', ids) tst.mock_fetch_files.add_csv('cubicwebexport2.csv', ids) # Disabled: cannot be tested without actually fetching covariates CSV file # All subjects dataset = func.fetch_localizer_contrasts(["checkerboard"], data_dir=tst.tmpdir, url=local_url, verbose=0) assert_true(dataset.anats is None) assert_true(dataset.tmaps is None) assert_true(dataset.masks is None) assert_true(isinstance(dataset.ext_vars, np.recarray)) assert_true(isinstance(dataset.cmaps[0], _basestring)) assert_equal(dataset.ext_vars.size, 94) assert_equal(len(dataset.cmaps), 94) # 20 subjects dataset = func.fetch_localizer_contrasts(["checkerboard"], n_subjects=20, data_dir=tst.tmpdir, url=local_url, verbose=0) assert_true(dataset.anats is None) assert_true(dataset.tmaps is None) assert_true(dataset.masks is None) assert_true(isinstance(dataset.cmaps[0], _basestring)) assert_true(isinstance(dataset.ext_vars, np.recarray)) assert_equal(len(dataset.cmaps), 20) assert_equal(dataset.ext_vars.size, 20) # Multiple contrasts dataset = func.fetch_localizer_contrasts( ["checkerboard", "horizontal checkerboard"], n_subjects=20, data_dir=tst.tmpdir, verbose=0) assert_true(dataset.anats is None) assert_true(dataset.tmaps is None) assert_true(dataset.masks is None) assert_true(isinstance(dataset.ext_vars, np.recarray)) assert_true(isinstance(dataset.cmaps[0], _basestring)) assert_equal(len(dataset.cmaps), 20 * 2) # two contrasts are fetched assert_equal(dataset.ext_vars.size, 20) # get_anats=True dataset = func.fetch_localizer_contrasts(["checkerboard"], data_dir=tst.tmpdir, url=local_url, get_anats=True, verbose=0) assert_true(dataset.masks is None) assert_true(dataset.tmaps is None) assert_true(isinstance(dataset.ext_vars, np.recarray)) assert_true(isinstance(dataset.anats[0], _basestring)) assert_true(isinstance(dataset.cmaps[0], _basestring)) assert_equal(dataset.ext_vars.size, 94) assert_equal(len(dataset.anats), 94) assert_equal(len(dataset.cmaps), 94) # get_masks=True dataset = func.fetch_localizer_contrasts(["checkerboard"], data_dir=tst.tmpdir, url=local_url, get_masks=True, verbose=0) assert_true(dataset.anats is None) assert_true(dataset.tmaps is None) assert_true(isinstance(dataset.ext_vars, np.recarray)) assert_true(isinstance(dataset.cmaps[0], _basestring)) assert_true(isinstance(dataset.masks[0], _basestring)) assert_equal(dataset.ext_vars.size, 94) assert_equal(len(dataset.cmaps), 94) assert_equal(len(dataset.masks), 94) # get_tmaps=True dataset = func.fetch_localizer_contrasts(["checkerboard"], data_dir=tst.tmpdir, url=local_url, get_tmaps=True, verbose=0) assert_true(dataset.anats is None) assert_true(dataset.masks is None) assert_true(isinstance(dataset.ext_vars, np.recarray)) assert_true(isinstance(dataset.cmaps[0], _basestring)) assert_true(isinstance(dataset.tmaps[0], _basestring)) assert_equal(dataset.ext_vars.size, 94) assert_equal(len(dataset.cmaps), 94) assert_equal(len(dataset.tmaps), 94) # all get_*=True dataset = func.fetch_localizer_contrasts(["checkerboard"], data_dir=tst.tmpdir, url=local_url, get_anats=True, get_masks=True, get_tmaps=True, verbose=0) assert_true(isinstance(dataset.ext_vars, np.recarray)) assert_true(isinstance(dataset.anats[0], _basestring)) assert_true(isinstance(dataset.cmaps[0], _basestring)) assert_true(isinstance(dataset.masks[0], _basestring)) assert_true(isinstance(dataset.tmaps[0], _basestring)) assert_equal(dataset.ext_vars.size, 94) assert_equal(len(dataset.anats), 94) assert_equal(len(dataset.cmaps), 94) assert_equal(len(dataset.masks), 94) assert_equal(len(dataset.tmaps), 94) assert_not_equal(dataset.description, '') # grab a given list of subjects dataset2 = func.fetch_localizer_contrasts(["checkerboard"], n_subjects=[2, 3, 5], data_dir=tst.tmpdir, url=local_url, get_anats=True, get_masks=True, get_tmaps=True, verbose=0) # Check that we are getting only 3 subjects assert_equal(dataset2.ext_vars.size, 3) assert_equal(len(dataset2.anats), 3) assert_equal(len(dataset2.cmaps), 3) assert_equal(len(dataset2.masks), 3) assert_equal(len(dataset2.tmaps), 3) np.testing.assert_array_equal(dataset2.ext_vars, dataset.ext_vars[[1, 2, 4]]) np.testing.assert_array_equal(dataset2.anats, np.array(dataset.anats)[[1, 2, 4]]) np.testing.assert_array_equal(dataset2.cmaps, np.array(dataset.cmaps)[[1, 2, 4]]) np.testing.assert_array_equal(dataset2.masks, np.array(dataset.masks)[[1, 2, 4]]) np.testing.assert_array_equal(dataset2.tmaps, np.array(dataset.tmaps)[[1, 2, 4]]) @with_setup(setup_mock, teardown_mock) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_localizer_calculation_task(): local_url = "file://" + tst.datadir ids = np.asarray(['S%2d' % i for i in range(94)]) ids = ids.view(dtype=[('subject_id', 'S3')]) tst.mock_fetch_files.add_csv('cubicwebexport.csv', ids) tst.mock_fetch_files.add_csv('cubicwebexport2.csv', ids) # Disabled: cannot be tested without actually fetching covariates CSV file # All subjects dataset = func.fetch_localizer_calculation_task(data_dir=tst.tmpdir, url=local_url, verbose=0) assert_true(isinstance(dataset.ext_vars, np.recarray)) assert_true(isinstance(dataset.cmaps[0], _basestring)) assert_equal(dataset.ext_vars.size, 1) assert_equal(len(dataset.cmaps), 1) # 20 subjects dataset = func.fetch_localizer_calculation_task(n_subjects=20, data_dir=tst.tmpdir, url=local_url, verbose=0) assert_true(isinstance(dataset.ext_vars, np.recarray)) assert_true(isinstance(dataset.cmaps[0], _basestring)) assert_equal(dataset.ext_vars.size, 20) assert_equal(len(dataset.cmaps), 20) assert_not_equal(dataset.description, '') @with_setup(setup_mock, teardown_mock) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_localizer_button_task(): local_url = "file://" + tst.datadir ids = np.asarray(['S%2d' % i for i in range(94)]) ids = ids.view(dtype=[('subject_id', 'S3')]) tst.mock_fetch_files.add_csv('cubicwebexport.csv', ids) tst.mock_fetch_files.add_csv('cubicwebexport2.csv', ids) # Disabled: cannot be tested without actually fetching covariates CSV file # All subjects dataset = func.fetch_localizer_button_task(data_dir=tst.tmpdir, url=local_url, verbose=0) assert_true(isinstance(dataset.ext_vars, np.recarray)) assert_true(isinstance(dataset.cmaps[0], _basestring)) assert_equal(dataset.ext_vars.size, 1) assert_equal(len(dataset.cmaps), 1) # 20 subjects dataset = func.fetch_localizer_button_task(n_subjects=20, data_dir=tst.tmpdir, url=local_url, verbose=0) assert_true(isinstance(dataset.ext_vars, np.recarray)) assert_true(isinstance(dataset.cmaps[0], _basestring)) assert_equal(dataset.ext_vars.size, 20) assert_equal(len(dataset.cmaps), 20) assert_not_equal(dataset.description, '') @with_setup(setup_mock, teardown_mock) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_abide_pcp(): local_url = "file://" + tst.datadir ids = [('50%03d' % i).encode() for i in range(800)] filenames = ['no_filename'] * 800 filenames[::2] = ['filename'] * 400 pheno = np.asarray(list(zip(ids, filenames)), dtype=[('subject_id', int), ('FILE_ID', 'U11')]) # pheno = pheno.T.view() tst.mock_fetch_files.add_csv('Phenotypic_V1_0b_preprocessed1.csv', pheno) # All subjects dataset = func.fetch_abide_pcp(data_dir=tst.tmpdir, url=local_url, quality_checked=False, verbose=0) assert_equal(len(dataset.func_preproc), 400) assert_not_equal(dataset.description, '') # Smoke test using only a string, rather than a list of strings dataset = func.fetch_abide_pcp(data_dir=tst.tmpdir, url=local_url, quality_checked=False, verbose=0, derivatives='func_preproc') def test__load_mixed_gambles(): rng = check_random_state(42) n_trials = 48 affine = np.eye(4) for n_subjects in [1, 5, 16]: zmaps = [] for _ in range(n_subjects): zmaps.append(nibabel.Nifti1Image(rng.randn(3, 4, 5, n_trials), affine)) zmaps, gain, _ = func._load_mixed_gambles(zmaps) assert_equal(len(zmaps), n_subjects * n_trials) assert_equal(len(zmaps), len(gain)) @with_setup(setup_mock) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_mixed_gambles(): local_url = "file://" + os.path.join(tst.datadir, "jimura_poldrack_2012_zmaps.zip") for n_subjects in [1, 5, 16]: mgambles = func.fetch_mixed_gambles(n_subjects=n_subjects, data_dir=tst.tmpdir, url=local_url, verbose=0, return_raw_data=True) datasetdir = os.path.join(tst.tmpdir, "jimura_poldrack_2012_zmaps") assert_equal(mgambles["zmaps"][0], os.path.join(datasetdir, "zmaps", "sub001_zmaps.nii.gz")) assert_equal(len(mgambles["zmaps"]), n_subjects) def test_check_parameters_megatrawls_datasets(): # testing whether the function raises the same error message # if invalid input parameters are provided message = "Invalid {0} input is provided: {1}." for invalid_input_dim in [1, 5, 30]: assert_raises_regex(ValueError, message.format('dimensionality', invalid_input_dim), func.fetch_megatrawls_netmats, dimensionality=invalid_input_dim) for invalid_input_timeserie in ['asdf', 'time', 'st2']: assert_raises_regex(ValueError, message.format('timeseries', invalid_input_timeserie), func.fetch_megatrawls_netmats, timeseries=invalid_input_timeserie) for invalid_output_name in ['net1', 'net2']: assert_raises_regex(ValueError, message.format('matrices', invalid_output_name), func.fetch_megatrawls_netmats, matrices=invalid_output_name) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_megatrawls_netmats(): # smoke test to see that files are fetched and read properly # since we are loading data present in it files_dir = os.path.join(tst.tmpdir, 'Megatrawls', '3T_Q1-Q6related468_MSMsulc_d100_ts3') os.makedirs(files_dir) with open(os.path.join(files_dir, 'Znet2.txt'), 'w') as net_file: net_file.write("1") files_dir2 = os.path.join(tst.tmpdir, 'Megatrawls', '3T_Q1-Q6related468_MSMsulc_d300_ts2') os.makedirs(files_dir2) with open(os.path.join(files_dir2, 'Znet1.txt'), 'w') as net_file2: net_file2.write("1") megatrawl_netmats_data = func.fetch_megatrawls_netmats(data_dir=tst.tmpdir) # expected number of returns in output name should be equal assert_equal(len(megatrawl_netmats_data), 5) # check if returned bunch should not be empty # dimensions assert_not_equal(megatrawl_netmats_data.dimensions, '') # timeseries assert_not_equal(megatrawl_netmats_data.timeseries, '') # matrices assert_not_equal(megatrawl_netmats_data.matrices, '') # correlation matrices assert_not_equal(megatrawl_netmats_data.correlation_matrices, '') # description assert_not_equal(megatrawl_netmats_data.description, '') # check if input provided for dimensions, timeseries, matrices to be same # to user settings netmats_data = func.fetch_megatrawls_netmats(data_dir=tst.tmpdir, dimensionality=300, timeseries='multiple_spatial_regression', matrices='full_correlation') assert_equal(netmats_data.dimensions, 300) assert_equal(netmats_data.timeseries, 'multiple_spatial_regression') assert_equal(netmats_data.matrices, 'full_correlation') @with_setup(setup_mock, teardown_mock) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_cobre(): ids_sc = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 15, 16, 21, 22, 25, 28, 29, 32, 34, 37, 39, 40, 41, 42, 44, 46, 47, 49, 59, 60, 64, 71, 72, 73, 75, 77, 78, 79, 80, 81, 82, 84, 85, 88, 89, 92, 94, 96, 97, 98, 99, 100, 101, 103, 105, 106, 108, 109, 110, 112, 117, 122, 126, 132, 133, 137, 142, 143, 145] ids_con = [13, 14, 17, 18, 19, 20, 23, 24, 26, 27, 30, 31, 33, 35, 36, 38, 43, 45, 48, 50, 51, 52, 53, 54, 55, 56, 57, 58, 61, 62, 63, 65, 66, 67, 68, 69, 74, 76, 86, 87, 90, 91, 93, 95, 102, 104, 107, 111, 113, 114, 115, 116, 118, 119, 120, 121, 123, 124, 125, 127, 128, 129, 130, 131, 134, 135, 136, 138, 139, 140, 141, 144, 146, 147] ids_sch = ['szxxx0040%03d' % i for i in ids_sc] ids_cont = ['contxxx0040%03d' % i for i in ids_con] ids = np.asarray(ids_sch + ids_cont, dtype='|U17') sz = np.asarray([i.startswith('s') for i in ids], dtype='v~\huGa9_.Q3>ʁQSjT/,4Tyz^OT 6lIU]g7Ǖ#u,:6\59|۽%JdĘM<9^MFԩmu<&BUeiVTuГϢ-]#4ݴbI[S{MԼVu7m%'~UJ_UNI\A[~%۳SP#\h*Zus™Jdj%ٗT]? o,wf<1Ƒgx1^!½t\+/2O>}?LA&v6Jѹ}ˎxojh|hzYk94oސ]lV8PP}U: 7$ˎ.H5wRQdgRr5Mg>`m&r`Zkz,Tj(]W'QyFⰚYi)IR==V=?=O؀~/ZPKH](nilearn/datasets/tests/data/__init__.pyc ]Wc@sdS(N((((sE/tmp/pip-build-HbS81i/nilearn/nilearn/datasets/tests/data/__init__.pytsPKH+`nilearn/connectome/__init__.py""" Tools for computing functional connectivity matrices and also implementation of algorithm for sparse multi subjects learning of Gaussian graphical models. """ from .connectivity_matrices import sym_to_vec, ConnectivityMeasure from .group_sparse_cov import (GroupSparseCovariance, GroupSparseCovarianceCV, group_sparse_covariance) __all__ = ['sym_to_vec', 'ConnectivityMeasure', 'GroupSparseCovariance', 'GroupSparseCovarianceCV', 'group_sparse_covariance'] PKHX33+nilearn/connectome/connectivity_matrices.pyimport warnings from math import sqrt import numpy as np from scipy import linalg from sklearn.base import BaseEstimator, TransformerMixin, clone from sklearn.covariance import LedoitWolf from .. import signal from .._utils.extmath import is_spd def _check_square(matrix): """Raise a ValueError if the input matrix is square. Parameters ---------- matrix : numpy.ndarray Input array. """ if matrix.ndim != 2 or (matrix.shape[0] != matrix.shape[-1]): raise ValueError('Expected a square matrix, got array of shape' ' {0}.'.format(matrix.shape)) def _check_spd(matrix): """Raise a ValueError if the input matrix is not symmetric positive definite. Parameters ---------- matrix : numpy.ndarray Input array. """ if not is_spd(matrix, decimal=7): raise ValueError('Expected a symmetric positive definite matrix.') def _form_symmetric(function, eigenvalues, eigenvectors): """Return the symmetric matrix with the given eigenvectors and eigenvalues transformed by function. Parameters ---------- function : function numpy.ndarray -> numpy.ndarray The transform to apply to the eigenvalues. eigenvalues : numpy.ndarray, shape (n_features, ) Input argument of the function. eigenvectors : numpy.ndarray, shape (n_features, n_features) Unitary matrix. Returns ------- output : numpy.ndarray, shape (n_features, n_features) The symmetric matrix obtained after transforming the eigenvalues, while keeping the same eigenvectors. """ return np.dot(eigenvectors * function(eigenvalues), eigenvectors.T) def _map_eigenvalues(function, symmetric): """Matrix function, for real symmetric matrices. The function is applied to the eigenvalues of symmetric. Parameters ---------- function : function numpy.ndarray -> numpy.ndarray The transform to apply to the eigenvalues. symmetric : numpy.ndarray, shape (n_features, n_features) The input symmetric matrix. Returns ------- output : numpy.ndarray, shape (n_features, n_features) The new symmetric matrix obtained after transforming the eigenvalues, while keeping the same eigenvectors. Note ---- If input matrix is not real symmetric, no error is reported but result will be wrong. """ eigenvalues, eigenvectors = linalg.eigh(symmetric) return _form_symmetric(function, eigenvalues, eigenvectors) def _geometric_mean(matrices, init=None, max_iter=10, tol=1e-7): """Compute the geometric mean of symmetric positive definite matrices. The geometric mean of n positive definite matrices M_1, ..., M_n is the minimizer of the sum of squared distances from an arbitrary matrix to each input matrix M_k gmean(M_1, ..., M_n) = argmin_X sum_{k=1}^N dist(X, M_k)^2 where the used distance is related to matrices logarithm dist(X, M_k) = ||log(X^{-1/2} M_k X^{-1/2)}|| In case of positive numbers, this mean is the usual geometric mean. References ---------- See Algorithm 3 of: P. Thomas Fletcher, Sarang Joshi. Riemannian Geometry for the Statistical Analysis of Diffusion Tensor Data. Signal Processing, 2007. Parameters ---------- matrices : list of numpy.ndarray, all of shape (n_features, n_features) List of matrices whose geometric mean to compute. Raise an error if the matrices are not all symmetric positive definite of the same shape. init : numpy.ndarray, shape (n_features, n_features), optional Initialization matrix, default to the arithmetic mean of matrices. Raise an error if the matrix is not symmetric positive definite of the same shape as the elements of matrices. max_iter : int, optional Maximal number of iterations. tol : positive float or None, optional The tolerance to declare convergence: if the gradient norm goes below this value, the gradient descent is stopped. If None, no check is performed. Returns ------- gmean : numpy.ndarray, shape (n_features, n_features) Geometric mean of the matrices. """ # Shape and symmetry positive definiteness checks n_features = matrices[0].shape[0] for matrix in matrices: _check_square(matrix) if matrix.shape[0] != n_features: raise ValueError("Matrices are not of the same shape.") _check_spd(matrix) # Initialization matrices = np.array(matrices) if init is None: gmean = np.mean(matrices, axis=0) else: _check_square(init) if init.shape[0] != n_features: raise ValueError("Initialization has incorrect shape.") _check_spd(init) gmean = init norm_old = np.inf step = 1. # Gradient descent for n in range(max_iter): # Computation of the gradient vals_gmean, vecs_gmean = linalg.eigh(gmean) gmean_inv_sqrt = _form_symmetric(np.sqrt, 1. / vals_gmean, vecs_gmean) whitened_matrices = [gmean_inv_sqrt.dot(matrix).dot(gmean_inv_sqrt) for matrix in matrices] logs = [_map_eigenvalues(np.log, w_mat) for w_mat in whitened_matrices] logs_mean = np.mean(logs, axis=0) # Covariant derivative is # - gmean.dot(logms_mean) if np.any(np.isnan(logs_mean)): raise FloatingPointError("Nan value after logarithm operation.") norm = np.linalg.norm(logs_mean) # Norm of the covariant derivative on # the tangent space at point gmean # Update of the minimizer vals_log, vecs_log = linalg.eigh(logs_mean) gmean_sqrt = _form_symmetric(np.sqrt, vals_gmean, vecs_gmean) # Move along the geodesic gmean = gmean_sqrt.dot( _form_symmetric(np.exp, vals_log * step, vecs_log)).dot(gmean_sqrt) # Update the norm and the step size if norm < norm_old: norm_old = norm elif norm > norm_old: step = step / 2. norm = norm_old if tol is not None and norm / gmean.size < tol: break if tol is not None and norm / gmean.size >= tol: warnings.warn("Maximum number of iterations {0} reached without " "getting to the requested tolerance level " "{1}.".format(max_iter, tol)) return gmean def sym_to_vec(symmetric): """Return the flattened lower triangular part of an array, after multiplying above the diagonal elements by sqrt(2). Acts on the last two dimensions of the array if not 2-dimensional. .. versionadded:: 0.2 Parameters ---------- symmetric : numpy.ndarray, shape (..., n_features, n_features) Input array. Returns ------- output : numpy.ndarray, shape (..., n_features * (n_features + 1) / 2) The output flattened lower triangular part of symmetric. """ scaling = sqrt(2) * np.ones(symmetric.shape[-2:]) np.fill_diagonal(scaling, 1.) tril_mask = np.tril(np.ones(symmetric.shape[-2:])).astype(np.bool) return symmetric[..., tril_mask] * scaling[tril_mask] def _cov_to_corr(covariance): """Return correlation matrix for a given covariance matrix. Parameters ---------- covariance : 2D numpy.ndarray The input covariance matrix. Returns ------- correlation : 2D numpy.ndarray The ouput correlation matrix. """ diagonal = np.atleast_2d(1. / np.sqrt(np.diag(covariance))) correlation = covariance * diagonal * diagonal.T return correlation def _prec_to_partial(precision): """Return partial correlation matrix for a given precision matrix. Parameters ---------- precision : 2D numpy.ndarray The input precision matrix. Returns ------- partial_correlation : 2D numpy.ndarray The 2D ouput partial correlation matrix. """ partial_correlation = -_cov_to_corr(precision) np.fill_diagonal(partial_correlation, 1.) return partial_correlation class ConnectivityMeasure(BaseEstimator, TransformerMixin): """A class that computes different kinds of functional connectivity matrices. .. versionadded:: 0.2 Parameters ---------- cov_estimator : estimator object, optional. The covariance estimator. By default the LedoitWolf estimator is used. This implies that correlations are slightly shrunk towards zero compared to a maximum-likelihood estimate kind : {"correlation", "partial correlation", "tangent",\ "covariance", "precision"}, optional The matrix kind. Attributes ---------- `cov_estimator_` : estimator object A new covariance estimator with the same parameters as cov_estimator. `mean_` : numpy.ndarray The mean connectivity for the tangent kind. `whitening_` : numpy.ndarray The inverted square-rooted geometric mean of the covariance matrices. References ---------- For the use of "tangent", see the paper: G. Varoquaux et al. "Detection of brain functional-connectivity difference in post-stroke patients using group-level covariance modeling, MICCAI 2010. """ def __init__(self, cov_estimator=LedoitWolf(store_precision=False), kind='covariance'): self.cov_estimator = cov_estimator self.kind = kind def fit(self, X, y=None): """Fit the covariance estimator to the given time series for each subject. Parameters ---------- X : list of numpy.ndarray, shape for each (n_samples, n_features) The input subjects time series. Returns ------- self : ConnectivityMatrix instance The object itself. Useful for chaining operations. """ self.cov_estimator_ = clone(self.cov_estimator) if not hasattr(X, "__iter__"): raise ValueError("'subjects' input argument must be an iterable. " "You provided {0}".format(X.__class__)) subjects_types = [type(s) for s in X] if set(subjects_types) != set([np.ndarray]): raise ValueError("Each subject must be 2D numpy.ndarray.\n You " "provided {0}".format(str(subjects_types))) subjects_dims = [s.ndim for s in X] if set(subjects_dims) != set([2]): raise ValueError("Each subject must be 2D numpy.ndarray.\n You" "provided arrays of dimensions " "{0}".format(str(subjects_dims))) n_subjects = [s.shape[1] for s in X] if len(set(n_subjects)) > 1: raise ValueError("All subjects must have the same number of " "features.\nYou provided: " "{0}".format(str(n_subjects))) if self.kind == 'tangent': covariances = [self.cov_estimator_.fit(x).covariance_ for x in X] self.mean_ = _geometric_mean(covariances, max_iter=30, tol=1e-7) self.whitening_ = _map_eigenvalues(lambda x: 1. / np.sqrt(x), self.mean_) return self def transform(self, X): """Apply transform to covariances matrices to get the connectivity matrices for the chosen kind. Parameters ---------- X : list of numpy.ndarray with shapes (n_samples, n_features) The input subjects time series. Returns ------- output : numpy.ndarray, shape (n_samples, n_features, n_features) The transformed connectivity matrices. """ if self.kind == 'correlation': covariances_std = [self.cov_estimator_.fit( signal._standardize(x, detrend=False, normalize=True) ).covariance_ for x in X] connectivities = [_cov_to_corr(cov) for cov in covariances_std] else: covariances = [self.cov_estimator_.fit(x).covariance_ for x in X] if self.kind == 'covariance': connectivities = covariances elif self.kind == 'tangent': connectivities = [_map_eigenvalues(np.log, self.whitening_.dot( cov).dot(self.whitening_)) for cov in covariances] elif self.kind == 'precision': connectivities = [linalg.inv(cov) for cov in covariances] elif self.kind == 'partial correlation': connectivities = [_prec_to_partial(linalg.inv(cov)) for cov in covariances] else: raise ValueError('Allowed connectivity kinds are ' '"correlation", ' '"partial correlation", "tangent", ' '"covariance" and "precision", got kind ' '"{}"'.format(self.kind)) return np.array(connectivities) PKlmHO.ZOJJ&nilearn/connectome/group_sparse_cov.py""" Implementation of algorithm for sparse multi-subjects learning of Gaussian graphical models. """ # Authors: Philippe Gervais # License: simplified BSD import warnings import collections import operator import itertools import numpy as np import scipy.linalg import sklearn.cross_validation import sklearn.covariance from sklearn.utils.extmath import fast_logdet from sklearn.covariance import empirical_covariance from sklearn.base import BaseEstimator from sklearn.externals.joblib import Memory, delayed, Parallel from .._utils import CacheMixin from .._utils import logger from .._utils.extmath import is_spd def compute_alpha_max(emp_covs, n_samples): """Compute the critical value of the regularization parameter. Above this value, the precisions matrices computed by group_sparse_covariance are diagonal (complete sparsity) This function also returns the value below which the precision matrices are fully dense (i.e. minimal number of zero coefficients). Parameters ---------- emp_covs : array-like, shape (n_features, n_features, n_subjects) covariance matrix for each subject. n_samples : array-like, shape (n_subjects,) number of samples used in the computation of every covariance matrix. n_samples.sum() can be arbitrary. Returns ------- alpha_max : float minimal value for the regularization parameter that gives a fully sparse matrix. alpha_min : float minimal value for the regularization parameter that gives a fully dense matrix. See also -------- The formula used in this function was derived using the same method as in: Duchi, John, Stephen Gould, and Daphne Koller. 'Projected Subgradient Methods for Learning Sparse Gaussians'. ArXiv E-prints 1206 (1 June 2012): 3249. """ A = np.copy(emp_covs) n_samples = np.asarray(n_samples).copy() n_samples /= n_samples.sum() for k in range(emp_covs.shape[-1]): # Set diagonal to zero A[..., k].flat[::A.shape[0] + 1] = 0 A[..., k] *= n_samples[k] norms = np.sqrt((A ** 2).sum(axis=-1)) return np.max(norms), np.min(norms[norms > 0]) def _update_submatrix(full, sub, sub_inv, p, h, v): """Update submatrix and its inverse. sub_inv is the inverse of the submatrix of "full" obtained by removing the p-th row and column. sub_inv is modified in-place. After execution of this function, it contains the inverse of the submatrix of "full" obtained by removing the n+1-th row and column. This computation is based on the Sherman-Woodbury-Morrison identity. """ n = p - 1 v[:n + 1] = full[:n + 1, n] v[n + 1:] = full[n + 2:, n] h[:n + 1] = full[n, :n + 1] h[n + 1:] = full[n, n + 2:] # change row: first usage of SWM identity coln = sub_inv[:, n:n + 1] # 2d array, useful for sub_inv below V = h - sub[n, :] coln = coln / (1. + np.dot(V, coln)) # The following line is equivalent to # sub_inv -= np.outer(coln, np.dot(V, sub_inv)) sub_inv -= np.dot(coln, np.dot(V, sub_inv)[np.newaxis, :]) sub[n, :] = h # change column: second usage of SWM identity rown = sub_inv[n:n + 1, :] # 2d array, useful for sub_inv below U = v - sub[:, n] rown = rown / (1. + np.dot(rown, U)) # The following line is equivalent to (but faster) # sub_inv -= np.outer(np.dot(sub_inv, U), rown) sub_inv -= np.dot(np.dot(sub_inv, U)[:, np.newaxis], rown) sub[:, n] = v # equivalent to sub[n, :] += U # Make sub_inv symmetric (overcome some numerical limitations) sub_inv += sub_inv.T.copy() sub_inv /= 2. def _assert_submatrix(full, sub, n): """Check that "sub" is the matrix obtained by removing the p-th col and row in "full". Used only for debugging. """ true_sub = np.empty_like(sub) true_sub[:n, :n] = full[:n, :n] true_sub[n:, n:] = full[n + 1:, n + 1:] true_sub[:n, n:] = full[:n, n + 1:] true_sub[n:, :n] = full[n + 1:, :n] np.testing.assert_almost_equal(true_sub, sub) def group_sparse_covariance(subjects, alpha, max_iter=50, tol=1e-3, verbose=0, probe_function=None, precisions_init=None, debug=False): """Compute sparse precision matrices and covariance matrices. The precision matrices returned by this function are sparse, and share a common sparsity pattern: all have zeros at the same location. This is achieved by simultaneous computation of all precision matrices at the same time. Running time is linear on max_iter, and number of subjects (len(subjects)), but cubic on number of features (subjects[0].shape[1]). Parameters ---------- subjects : list of numpy.ndarray input subjects. Each subject is a 2D array, whose columns contain signals. Each array shape must be (sample number, feature number). The sample number can vary from subject to subject, but all subjects must have the same number of features (i.e. of columns). alpha : float regularization parameter. With normalized covariances matrices and number of samples, sensible values lie in the [0, 1] range(zero is no regularization: output is not sparse) max_iter : int, optional maximum number of iterations. tol : positive float or None, optional The tolerance to declare convergence: if the duality gap goes below this value, optimization is stopped. If None, no check is performed. verbose : int, optional verbosity level. Zero means "no message". probe_function : callable or None This value is called before the first iteration and after each iteration. If it returns True, then optimization is stopped prematurely. The function is given as arguments (in that order): - empirical covariances (ndarray), - number of samples for each subject (ndarray), - regularization parameter (float) - maximum iteration number (integer) - tolerance (float) - current iteration number (integer). -1 means "before first iteration" - current value of precisions (ndarray). - previous value of precisions (ndarray). None before first iteration. precisions_init: numpy.ndarray initial value of the precision matrices. If not provided, a diagonal matrix with the variances of each input signal is used. debug : bool, optional if True, perform checks during computation. It can help find numerical problems, but increases computation time a lot. Returns ------- emp_covs : numpy.ndarray, shape (n_features, n_features, n_subjects) empirical covariances matrices precisions : numpy.ndarray, shape (n_features, n_features, n_subjects) estimated precision matrices Notes ----- The present algorithm is based on: Jean Honorio and Dimitris Samaras. "Simultaneous and Group-Sparse Multi-Task Learning of Gaussian Graphical Models". arXiv:1207.4255 (17 July 2012). http://arxiv.org/abs/1207.4255. """ emp_covs, n_samples = empirical_covariances( subjects, assume_centered=False) precisions = _group_sparse_covariance( emp_covs, n_samples, alpha, max_iter=max_iter, tol=tol, verbose=verbose, precisions_init=precisions_init, probe_function=probe_function, debug=debug) return emp_covs, precisions def _group_sparse_covariance(emp_covs, n_samples, alpha, max_iter=10, tol=1e-3, precisions_init=None, probe_function=None, verbose=0, debug=False): """Internal version of group_sparse_covariance. See its docstring for details. """ if tol == -1: tol = None if not isinstance(alpha, (int, float)) or alpha < 0: raise ValueError("Regularization parameter alpha must be a " "positive number.\n" "You provided: {0}".format(str(alpha))) n_subjects = emp_covs.shape[-1] n_features = emp_covs[0].shape[0] n_samples = np.asarray(n_samples) n_samples /= n_samples.sum() # essential for numerical stability # Check diagonal normalization. ones = np.ones(emp_covs.shape[0]) for k in range(n_subjects): if (abs(emp_covs[..., k].flat[::emp_covs.shape[0] + 1] - ones) > 0.1).any(): warnings.warn("input signals do not all have unit variance. This " "can lead to numerical instability.") break if precisions_init is None: # Fortran order make omega[..., k] contiguous, which is often useful. omega = np.ndarray(shape=emp_covs.shape, dtype=np.float, order="F") for k in range(n_subjects): # Values on main diagonals are far from zero, because they # are timeseries energy. omega[..., k] = np.diag(1. / np.diag(emp_covs[..., k])) else: omega = precisions_init.copy() # Preallocate arrays y = np.ndarray(shape=(n_subjects, n_features - 1), dtype=np.float) u = np.ndarray(shape=(n_subjects, n_features - 1), dtype=np.float) y_1 = np.ndarray(shape=(n_subjects, n_features - 2), dtype=np.float) h_12 = np.ndarray(shape=(n_subjects, n_features - 2), dtype=np.float) q = np.ndarray(shape=(n_subjects,), dtype=np.float) aq = np.ndarray(shape=(n_subjects,), dtype=np.float) # temp. array c = np.ndarray(shape=(n_subjects,), dtype=np.float) W = np.ndarray(shape=(omega.shape[0] - 1, omega.shape[1] - 1, omega.shape[2]), dtype=np.float, order="F") W_inv = np.ndarray(shape=W.shape, dtype=np.float, order="F") # Auxilliary arrays. v = np.ndarray((omega.shape[0] - 1,), dtype=np.float) h = np.ndarray((omega.shape[1] - 1,), dtype=np.float) # Optional. tolerance_reached = False max_norm = None omega_old = np.empty_like(omega) if probe_function is not None: # iteration number -1 means called before iteration loop. probe_function(emp_covs, n_samples, alpha, max_iter, tol, -1, omega, None) probe_interrupted = False # Start optimization loop. Variables are named following (mostly) the # Honorio-Samaras paper notations. # Used in the innermost loop. Computed here to save some computation. alpha2 = alpha ** 2 for n in range(max_iter): if max_norm is not None: suffix = (" variation (max norm): {max_norm:.3e} ".format( max_norm=max_norm)) else: suffix = "" if verbose > 1: logger.log("* iteration {iter_n:d} ({percentage:.0f} %){suffix}" " ...".format(iter_n=n, percentage=100. * n / max_iter, suffix=suffix), verbose=verbose) omega_old[...] = omega for p in range(n_features): if p == 0: # Initial state: remove first col/row W = omega[1:, 1:, :].copy() # stack of W(k) W_inv = np.ndarray(shape=W.shape, dtype=np.float) for k in range(W.shape[2]): # stack of W^-1(k) W_inv[..., k] = scipy.linalg.inv(W[..., k]) if debug: np.testing.assert_almost_equal( np.dot(W_inv[..., k], W[..., k]), np.eye(W_inv[..., k].shape[0]), decimal=10) _assert_submatrix(omega[..., k], W[..., k], p) assert(is_spd(W_inv[..., k])) else: # Update W and W_inv if debug: omega_orig = omega.copy() for k in range(n_subjects): _update_submatrix(omega[..., k], W[..., k], W_inv[..., k], p, h, v) if debug: _assert_submatrix(omega[..., k], W[..., k], p) assert(is_spd(W_inv[..., k], decimal=14)) np.testing.assert_almost_equal( np.dot(W[..., k], W_inv[..., k]), np.eye(W_inv[..., k].shape[0]), decimal=10) if debug: # Check that omega has not been modified. np.testing.assert_almost_equal(omega_orig, omega) # In the following lines, implicit loop on k (subjects) # Extract y and u y[:, :p] = omega[:p, p, :].T y[:, p:] = omega[p + 1:, p, :].T u[:, :p] = emp_covs[:p, p, :].T u[:, p:] = emp_covs[p + 1:, p, :].T for m in range(n_features - 1): # Coordinate descent on y # T(k) -> n_samples[k] # v(k) -> emp_covs[p, p, k] # h_22(k) -> W_inv[m, m, k] # h_12(k) -> W_inv[:m, m, k], W_inv[m+1:, m, k] # y_1(k) -> y[k, :m], y[k, m+1:] # u_2(k) -> u[k, m] h_12[:, :m] = W_inv[:m, m, :].T h_12[:, m:] = W_inv[m + 1:, m, :].T y_1[:, :m] = y[:, :m] y_1[:, m:] = y[:, m + 1:] c[:] = - n_samples * ( emp_covs[p, p, :] * (h_12 * y_1).sum(axis=1) + u[:, m] ) c2 = np.sqrt(np.dot(c, c)) # x -> y[:][m] if c2 <= alpha: y[:, m] = 0 # x* = 0 else: # q(k) -> T(k) * v(k) * h_22(k) # \lambda -> gamma (lambda is a Python keyword) q[:] = n_samples * emp_covs[p, p, :] * W_inv[m, m, :] if debug: assert(np.all(q > 0)) # x* = \lambda* diag(1 + \lambda q)^{-1} c # Newton-Raphson loop. Loosely based on Scipy's. # Tolerance does not seem to be important for numerical # stability (tolerance of 1e-2 works) but has an effect on # overall convergence rate (the tighter the better.) gamma = 0. # initial value # Precompute some quantities cc = c * c two_ccq = 2. * cc * q for _ in itertools.repeat(None, 100): # Function whose zero must be determined (fval) and # its derivative (fder). # Written inplace to save some function calls. aq = 1. + gamma * q aq2 = aq * aq fder = (two_ccq / (aq2 * aq)).sum() if fder == 0: msg = "derivative was zero." warnings.warn(msg, RuntimeWarning) break fval = - (alpha2 - (cc / aq2).sum()) / fder gamma = fval + gamma if abs(fval) < 1.5e-8: break if abs(fval) > 0.1: warnings.warn("Newton-Raphson step did not converge.\n" "This may indicate a badly conditioned " "system.") if debug: assert gamma >= 0., gamma y[:, m] = (gamma * c) / aq # x* # Copy back y in omega (column and row) omega[:p, p, :] = y[:, :p].T omega[p + 1:, p, :] = y[:, p:].T omega[p, :p, :] = y[:, :p].T omega[p, p + 1:, :] = y[:, p:].T for k in range(n_subjects): omega[p, p, k] = 1. / emp_covs[p, p, k] + np.dot( np.dot(y[k, :], W_inv[..., k]), y[k, :]) if debug: assert(is_spd(omega[..., k])) if probe_function is not None: if probe_function(emp_covs, n_samples, alpha, max_iter, tol, n, omega, omega_old) is True: probe_interrupted = True logger.log("probe_function interrupted loop", verbose=verbose, msg_level=2) break # Compute max of variation omega_old -= omega omega_old = abs(omega_old) max_norm = omega_old.max() if tol is not None and max_norm < tol: logger.log("tolerance reached at iteration number {0:d}: {1:.3e}" "".format(n + 1, max_norm), verbose=verbose) tolerance_reached = True break if tol is not None and not tolerance_reached and not probe_interrupted: warnings.warn("Maximum number of iterations reached without getting " "to the requested tolerance level.") return omega class GroupSparseCovariance(BaseEstimator, CacheMixin): """Covariance and precision matrix estimator. Parameters ---------- alpha : float regularization parameter. With normalized covariances matrices and number of samples, sensible values lie in the [0, 1] range(zero is no regularization: output is not sparse) tol : positive float, optional The tolerance to declare convergence: if the dual gap goes below this value, iterations are stopped max_iter : int, optional maximum number of iterations. The default value (10) is rather conservative. verbose : int, optional verbosity level. Zero means "no message". memory : instance of joblib.Memory or string, optional Used to cache the masking process. By default, no caching is done. If a string is given, it is the path to the caching directory. memory_level : int, optional Caching aggressiveness. Higher values mean more caching. Attributes ---------- `covariances_` : numpy.ndarray, shape (n_features, n_features, n_subjects) empirical covariance matrices. `precisions_` : numpy.ndarraye, shape (n_features, n_features, n_subjects) precisions matrices estimated using the group-sparse algorithm. Notes ------ The model used has been introduced in: Gael Varoquaux, et al. `Brain Covariance Selection: Better Individual Functional Connectivity Models Using Population Prior `_'. The algorithm used is based on what is described in: Jean Honorio and Dimitris Samaras. "Simultaneous and Group-Sparse Multi-Task Learning of Gaussian Graphical Models". http://arxiv.org/abs/1207.4255. """ def __init__(self, alpha=0.1, tol=1e-3, max_iter=10, verbose=0, memory=Memory(cachedir=None), memory_level=0): self.alpha = alpha self.tol = tol self.max_iter = max_iter self.memory = memory self.memory_level = memory_level self.verbose = verbose def fit(self, subjects, y=None): """Fits the group sparse precision model according to the given training data and parameters. Parameters ---------- subjects : list of numpy.ndarray with shapes (n_samples, n_features) input subjects. Each subject is a 2D array, whose columns contain signals. Sample number can vary from subject to subject, but all subjects must have the same number of features (i.e. of columns). Returns ------- self : GroupSparseCovariance instance the object itself. Useful for chaining operations. """ logger.log("Computing covariance matrices", verbose=self.verbose) self.covariances_, n_samples = empirical_covariances( subjects, assume_centered=False) logger.log("Computing precision matrices", verbose=self.verbose) ret = self._cache(_group_sparse_covariance)( self.covariances_, n_samples, self.alpha, tol=self.tol, max_iter=self.max_iter, verbose=max(0, self.verbose - 1), debug=False) self.precisions_ = ret return self def empirical_covariances(subjects, assume_centered=False, standardize=False): """Compute empirical covariances for several signals. Parameters ---------- subjects : list of numpy.ndarray, shape for each (n_samples, n_features) input subjects. Each subject is a 2D array, whose columns contain signals. Sample number can vary from subject to subject, but all subjects must have the same number of features (i.e. of columns). assume_centered : bool, optional if True, assume that all input signals are centered. This slightly decreases computation time by avoiding useless computation. standardize : bool, optional if True, set every signal variance to one before computing their covariance matrix (i.e. compute a correlation matrix). Returns ------- emp_covs : numpy.ndarray, shape : (feature number, feature number, subject number) empirical covariances. n_samples : numpy.ndarray, shape: (subject number,) number of samples for each subject. dtype is np.float. """ if not hasattr(subjects, "__iter__"): raise ValueError("'subjects' input argument must be an iterable. " "You provided {0}".format(subjects.__class__)) n_subjects = [s.shape[1] for s in subjects] if len(set(n_subjects)) > 1: raise ValueError("All subjects must have the same number of " "features.\nYou provided: {0}".format(str(n_subjects)) ) n_subjects = len(subjects) n_features = subjects[0].shape[1] # Enable to change dtype here because depending on user, conversion from # single precision to double will be required or not. emp_covs = np.empty((n_features, n_features, n_subjects), order="F") for k, s in enumerate(subjects): if standardize: s = s / s.std(axis=0) # copy on purpose M = empirical_covariance(s, assume_centered=assume_centered) # Force matrix symmetry, for numerical stability # of _group_sparse_covariance emp_covs[..., k] = M + M.T emp_covs /= 2 n_samples = np.asarray([s.shape[0] for s in subjects], dtype=np.float) return emp_covs, n_samples def group_sparse_scores(precisions, n_samples, emp_covs, alpha, duality_gap=False, debug=False): """Compute scores used by group_sparse_covariance. The log-likelihood of a given list of empirical covariances / precisions. Parameters ---------- precisions : numpy.ndarray, shape (n_features, n_features, n_subjects) estimated precisions. n_samples : array-like, shape (n_subjects,) number of samples used in estimating each subject in "precisions". n_samples.sum() must be equal to 1. emp_covs : numpy.ndarray, shape (n_features, n_features, n_subjects) empirical covariance matrix alpha : float regularization parameter duality_gap : bool, optional if True, also returns a duality gap upper bound. debug : bool, optional if True, some consistency checks are performed to help solving numerical problems Returns ------- log_lik : float log-likelihood of precisions on the given covariances. This is the opposite of the loss function, without the regularization term objective : float value of objective function. This is the value minimized by group_sparse_covariance() duality_gap : float duality gap upper bound. The returned bound is tight: it vanishes for the optimal precision matrices """ n_features, _, n_subjects = emp_covs.shape log_lik = 0 for k in range(n_subjects): log_lik_k = - np.sum(emp_covs[..., k] * precisions[..., k]) log_lik_k += fast_logdet(precisions[..., k]) log_lik += n_samples[k] * log_lik_k l2 = np.sqrt((precisions ** 2).sum(axis=-1)) l12 = l2.sum() - np.diag(l2).sum() # Do not count diagonal terms objective = alpha * l12 - log_lik ret = (log_lik, objective) # Compute duality gap if requested if duality_gap is True: A = np.empty(precisions.shape, dtype=np.float, order="F") for k in range(n_subjects): # TODO: can be computed more efficiently using W_inv. See # Friedman, Jerome, Trevor Hastie, and Robert Tibshirani. # 'Sparse Inverse Covariance Estimation with the Graphical Lasso'. # Biostatistics 9, no. 3 (1 July 2008): 432-441. precisions_inv = scipy.linalg.inv(precisions[..., k]) if debug: assert is_spd(precisions_inv) A[..., k] = n_samples[k] * (precisions_inv - emp_covs[..., k]) if debug: np.testing.assert_almost_equal(A[..., k], A[..., k].T) # Project A on the set of feasible points alpha_max = np.sqrt((A ** 2).sum(axis=-1)) mask = alpha_max > alpha for k in range(A.shape[-1]): A[mask, k] *= alpha / alpha_max[mask] # Set zeros on diagonals. Essential to get an always positive # duality gap. A[..., k].flat[::A.shape[0] + 1] = 0 alpha_max = np.sqrt((A ** 2).sum(axis=-1)).max() dual_obj = 0 # dual objective for k in range(n_subjects): B = emp_covs[..., k] + A[..., k] / n_samples[k] dual_obj += n_samples[k] * (n_features + fast_logdet(B)) # The previous computation can lead to a non-feasible point, because # one of the Bs may not be positive definite. # Use another value in this case, that ensure positive definiteness # of B. The upper bound on the duality gap is not tight in the # following, but is smaller than infinity, which is better in any case. if not np.isfinite(dual_obj): for k in range(n_subjects): A[..., k] = - n_samples[k] * emp_covs[..., k] A[..., k].flat[::A.shape[0] + 1] = 0 alpha_max = np.sqrt((A ** 2).sum(axis=-1)).max() # the second value (0.05 is arbitrary: positive in ]0,1[) gamma = min((alpha / alpha_max, 0.05)) dual_obj = 0 for k in range(n_subjects): # add gamma on the diagonal B = ((1. - gamma) * emp_covs[..., k] + gamma * np.eye(emp_covs.shape[0])) dual_obj += n_samples[k] * (n_features + fast_logdet(B)) gap = objective - dual_obj ret = ret + (gap,) return ret def group_sparse_covariance_path(train_subjs, alphas, test_subjs=None, tol=1e-3, max_iter=10, precisions_init=None, verbose=0, debug=False, probe_function=None): """Get estimated precision matrices for different values of alpha. Calling this function is faster than calling group_sparse_covariance() repeatedly, because it makes use of the first result to initialize the next computation. Parameters ---------- train_subjs : list of numpy.ndarray list of signals. alphas : list of float values of alpha to use. Best results for sorted values (decreasing) test_subjs : list of numpy.ndarray list of signals, independent from those in train_subjs, on which to compute a score. If None, no score is computed. verbose : int verbosity level tol, max_iter, debug, precisions_init : Passed to group_sparse_covariance(). See the corresponding docstring for details. probe_function : callable This value is called before the first iteration and after each iteration. If it returns True, then optimization is stopped prematurely. The function is given as arguments (in that order): - empirical covariances (ndarray), - number of samples for each subject (ndarray), - regularization parameter (float) - maximum iteration number (integer) - tolerance (float) - current iteration number (integer). -1 means "before first iteration" - current value of precisions (ndarray). - previous value of precisions (ndarray). None before first iteration. Returns ------- precisions_list : list of numpy.ndarray estimated precisions for each value of alpha provided. The length of this list is the same as that of parameter "alphas". scores : list of float for each estimated precision, score obtained on the test set. Output only if test_subjs is not None. """ train_covs, train_n_samples = empirical_covariances( train_subjs, assume_centered=False, standardize=True) scores = [] precisions_list = [] for alpha in alphas: precisions = _group_sparse_covariance( train_covs, train_n_samples, alpha, tol=tol, max_iter=max_iter, precisions_init=precisions_init, verbose=max(0, verbose - 1), debug=debug, probe_function=probe_function) # Compute log-likelihood if test_subjs is not None: test_covs, _ = empirical_covariances( test_subjs, assume_centered=False, standardize=True) scores.append(group_sparse_scores(precisions, train_n_samples, test_covs, 0)[0]) precisions_list.append(precisions) precisions_init = precisions if test_subjs is not None: return precisions_list, scores else: return precisions_list class EarlyStopProbe(object): """Callable probe for early stopping in GroupSparseCovarianceCV. Stop optimizing as soon as the score on the test set starts decreasing. An instance of this class is supposed to be passed in the probe_function argument of group_sparse_covariance(). """ def __init__(self, test_subjs, verbose=0): self.test_emp_covs, _ = empirical_covariances(test_subjs) self.verbose = verbose def __call__(self, emp_covs, n_samples, alpha, max_iter, tol, iter_n, omega, prev_omega): log_lik, _ = group_sparse_scores( omega, n_samples, self.test_emp_covs, alpha) if iter_n > -1 and self.last_log_lik > log_lik: logger.log("Log-likelihood on test set is decreasing. " "Stopping at iteration %d" % iter_n, verbose=self.verbose) return True self.last_log_lik = log_lik class GroupSparseCovarianceCV(BaseEstimator, CacheMixin): """Sparse inverse covariance w/ cross-validated choice of the parameter. A cross-validated value for the regularization parameter is first determined using several calls to group_sparse_covariance. Then a final optimization is run to get a value for the precision matrices, using the selected value of the parameter. Different values of tolerance and of maximum iteration number can be used in these two phases (see the tol and tol_cv keyword below for example). Parameters ---------- alphas : integer initial number of points in the grid of regularization parameter values. Each step of grid refinement adds that many points as well. n_refinements : integer number of times the initial grid should be refined. cv : integer number of folds in a K-fold cross-validation scheme. If None is passed, defaults to 3. tol_cv : float tolerance used to get the optimal alpha value. It has the same meaning as the `tol` parameter in :func:`group_sparse_covariance`. max_iter_cv : integer maximum number of iterations for each optimization, during the alpha- selection phase. tol : float tolerance used during the final optimization for determining precision matrices value. max_iter : integer maximum number of iterations in the final optimization. verbose : integer verbosity level. 0 means nothing is printed to the user. n_jobs : integer maximum number of cpu cores to use. The number of cores actually used at the same time cannot exceed the number of folds in folding strategy (that is, the value of cv). debug : bool if True, activates some internal checks for consistency. Only useful for nilearn developers, not users. early_stopping : bool if True, reduce computation time by using a heuristic to reduce the number of iterations required to get the optimal value for alpha. Be aware that this can lead to slightly different values for the optimal alpha compared to early_stopping=False. Attributes ---------- `covariances_` : numpy.ndarray, shape (n_features, n_features, n_subjects) covariance matrices, one per subject. `precisions_` : numpy.ndarray, shape (n_features, n_features, n_subjects) precision matrices, one per subject. All matrices have the same sparsity pattern (if a coefficient is zero for a given matrix, it is also zero for every other.) `alpha_` : float penalization parameter value selected. `cv_alphas_` : list of floats all values of the penalization parameter explored. `cv_scores_` : numpy.ndarray, shape (n_alphas, n_folds) scores obtained on test set for each value of the penalization parameter explored. See also -------- GroupSparseCovariance, sklearn.covariance.GraphLassoCV Notes ----- The search for the optimal penalization parameter (alpha) is done on an iteratively refined grid: first the cross-validated scores on a grid are computed, then a new refined grid is centered around the maximum, and so on. """ def __init__(self, alphas=4, n_refinements=4, cv=None, tol_cv=1e-2, max_iter_cv=50, tol=1e-3, max_iter=100, verbose=0, n_jobs=1, debug=False, early_stopping=True): self.alphas = alphas self.n_refinements = n_refinements self.tol_cv = tol_cv self.max_iter_cv = max_iter_cv self.cv = cv self.tol = tol self.max_iter = max_iter self.verbose = verbose self.n_jobs = n_jobs self.debug = debug self.early_stopping = early_stopping def fit(self, subjects, y=None): """Compute cross-validated group-sparse precisions. Parameters ---------- subjects : list of numpy.ndarray with shapes (n_samples, n_features) input subjects. Each subject is a 2D array, whose columns contain signals. Sample number can vary from subject to subject, but all subjects must have the same number of features (i.e. of columns.) Returns ------- self: GroupSparseCovarianceCV the object instance itself. """ # Empirical covariances emp_covs, n_samples = \ empirical_covariances(subjects, assume_centered=False) n_subjects = emp_covs.shape[2] # One cv generator per subject must be created, because each subject # can have a different number of samples from the others. cv = [] for k in range(n_subjects): cv.append(sklearn.cross_validation.check_cv( self.cv, subjects[k], None, classifier=False)) path = list() # List of (alpha, scores, covs) n_alphas = self.alphas if isinstance(n_alphas, collections.Sequence): alphas = list(self.alphas) n_alphas = len(alphas) n_refinements = 1 else: n_refinements = self.n_refinements alpha_1, _ = compute_alpha_max(emp_covs, n_samples) alpha_0 = 1e-2 * alpha_1 alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1), n_alphas)[::-1] covs_init = itertools.repeat(None) for i in range(n_refinements): # Compute the cross-validated loss on the current grid train_test_subjs = [] for train_test in zip(*cv): assert(len(train_test) == n_subjects) train_test_subjs.append(list(zip(*[(subject[train, :], subject[test, :]) for subject, (train, test) in zip(subjects, train_test)]))) if self.early_stopping: probes = [EarlyStopProbe(test_subjs, verbose=max(0, self.verbose - 1)) for _, test_subjs in train_test_subjs] else: probes = itertools.repeat(None) this_path = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)( delayed(group_sparse_covariance_path)( train_subjs, alphas, test_subjs=test_subjs, max_iter=self.max_iter_cv, tol=self.tol_cv, verbose=max(0, self.verbose - 1), debug=self.debug, # Warm restart is useless with early stopping. precisions_init=None if self.early_stopping else prec_init, probe_function=probe) for (train_subjs, test_subjs), prec_init, probe in zip(train_test_subjs, covs_init, probes)) # this_path[i] is a tuple (precisions_list, scores) # - scores: scores obtained with the i-th folding, for each value # of alpha. # - precisions_list: corresponding precisions matrices, for each # value of alpha. precisions_list, scores = list(zip(*this_path)) # now scores[i][j] is the score for the i-th folding, j-th value of # alpha (analoguous for precisions_list) precisions_list = list(zip(*precisions_list)) scores = [np.mean(sc) for sc in zip(*scores)] # scores[i] is the mean score obtained for the i-th value of alpha. path.extend(list(zip(alphas, scores, precisions_list))) path = sorted(path, key=operator.itemgetter(0), reverse=True) # Find the maximum score (avoid using the built-in 'max' function # to have a fully-reproducible selection of the smallest alpha in # case of equality) best_score = -np.inf last_finite_idx = 0 for index, (alpha, this_score, _) in enumerate(path): if this_score >= .1 / np.finfo(np.float).eps: this_score = np.nan if np.isfinite(this_score): last_finite_idx = index if this_score >= best_score: best_score = this_score best_index = index # Refine the grid if best_index == 0: # We do not need to go back: we have chosen # the highest value of alpha for which there are # non-zero coefficients alpha_1 = path[0][0] alpha_0 = path[1][0] covs_init = path[0][2] elif (best_index == last_finite_idx and not best_index == len(path) - 1): # We have non-converged models on the upper bound of the # grid, we need to refine the grid there alpha_1 = path[best_index][0] alpha_0 = path[best_index + 1][0] covs_init = path[best_index][2] elif best_index == len(path) - 1: alpha_1 = path[best_index][0] alpha_0 = 0.01 * path[best_index][0] covs_init = path[best_index][2] else: alpha_1 = path[best_index - 1][0] alpha_0 = path[best_index + 1][0] covs_init = path[best_index - 1][2] alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0), len(alphas) + 2) alphas = alphas[1:-1] if n_refinements > 1: logger.log("[GroupSparseCovarianceCV] Done refinement " "% 2i out of %i" % (i + 1, n_refinements), verbose=self.verbose) path = list(zip(*path)) cv_scores_ = list(path[1]) alphas = list(path[0]) self.cv_scores_ = np.array(cv_scores_) self.alpha_ = alphas[best_index] self.cv_alphas_ = alphas # Finally, fit the model with the selected alpha logger.log("Final optimization", verbose=self.verbose) self.covariances_ = emp_covs self.precisions_ = _group_sparse_covariance( emp_covs, n_samples, self.alpha_, tol=self.tol, max_iter=self.max_iter, verbose=max(0, self.verbose - 1), debug=self.debug) return self PKH$nilearn/connectome/tests/__init__.pyPKH)  1nilearn/connectome/tests/test_group_sparse_cov.pyfrom nose.tools import assert_equal, assert_true, assert_raises import numpy as np from nilearn._utils.testing import generate_group_sparse_gaussian_graphs from nilearn.connectome.group_sparse_cov import (group_sparse_covariance, group_sparse_scores) from nilearn.connectome import GroupSparseCovariance, GroupSparseCovarianceCV def test_group_sparse_covariance(): # run in debug mode. Should not fail # without debug mode: cost must decrease. signals, _, _ = generate_group_sparse_gaussian_graphs( density=0.1, n_subjects=5, n_features=10, min_n_samples=100, max_n_samples=151, random_state=np.random.RandomState(0)) alpha = 0.1 # These executions must hit the tolerance limit emp_covs, omega = group_sparse_covariance(signals, alpha, max_iter=20, tol=1e-2, debug=True, verbose=0) emp_covs, omega2 = group_sparse_covariance(signals, alpha, max_iter=20, tol=1e-2, debug=True, verbose=0) np.testing.assert_almost_equal(omega, omega2, decimal=4) class Probe(object): def __init__(self): self.objective = [] def __call__(self, emp_covs, n_samples, alpha, max_iter, tol, n, omega, omega_diff): if n >= 0: _, objective = group_sparse_scores(omega, n_samples, emp_covs, alpha) self.objective.append(objective) # Use a probe to test for number of iterations and decreasing objective. probe = Probe() emp_covs, omega = group_sparse_covariance( signals, alpha, max_iter=4, tol=None, verbose=0, probe_function=probe) objective = probe.objective # check number of iterations assert_equal(len(objective), 4) # np.testing.assert_array_less is a strict comparison. # Zeros can occur in np.diff(objective). assert_true(np.all(np.diff(objective) <= 0)) assert_equal(omega.shape, (10, 10, 5)) # Test input argument checking assert_raises(ValueError, group_sparse_covariance, signals, "") assert_raises(ValueError, group_sparse_covariance, 1, alpha) assert_raises(ValueError, group_sparse_covariance, [np.ones((2, 2)), np.ones((2, 3))], alpha) # Check consistency between classes gsc1 = GroupSparseCovarianceCV(alphas=4, tol=1e-1, max_iter=20, verbose=0, early_stopping=True) gsc1.fit(signals) gsc2 = GroupSparseCovariance(alpha=gsc1.alpha_, tol=1e-1, max_iter=20, verbose=0) gsc2.fit(signals) np.testing.assert_almost_equal(gsc1.precisions_, gsc2.precisions_, decimal=4) PKHV::6nilearn/connectome/tests/test_connectivity_matrices.pyimport copy import warnings from math import sqrt, exp, log, cosh, sinh import numpy as np from scipy import linalg from numpy.testing import assert_array_almost_equal, assert_array_equal from nose.tools import assert_raises, assert_equal, assert_true from sklearn.utils import check_random_state from sklearn.covariance import EmpiricalCovariance, LedoitWolf from nilearn._utils.extmath import is_spd from nilearn.connectome.connectivity_matrices import ( _check_square, _check_spd, _map_eigenvalues, _form_symmetric, _geometric_mean, sym_to_vec, _prec_to_partial, ConnectivityMeasure) def grad_geometric_mean(mats, init=None, max_iter=10, tol=1e-7): """Return the norm of the covariant derivative at each iteration step of geometric_mean. See its docstring for details. Norm is intrinsic norm on the tangent space of the manifold of symmetric positive definite matrices. Returns ------- grad_norm : list of float Norm of the covariant derivative in the tangent space at each step. """ mats = np.array(mats) # Initialization if init is None: gmean = np.mean(mats, axis=0) else: gmean = init norm_old = np.inf step = 1. grad_norm = [] for n in range(max_iter): # Computation of the gradient vals_gmean, vecs_gmean = linalg.eigh(gmean) gmean_inv_sqrt = _form_symmetric(np.sqrt, 1. / vals_gmean, vecs_gmean) whitened_mats = [gmean_inv_sqrt.dot(mat).dot(gmean_inv_sqrt) for mat in mats] logs = [_map_eigenvalues(np.log, w_mat) for w_mat in whitened_mats] logs_mean = np.mean(logs, axis=0) # Covariant derivative is # - gmean.dot(logms_mean) norm = np.linalg.norm(logs_mean) # Norm of the covariant derivative on # the tangent space at point gmean # Update of the minimizer vals_log, vecs_log = linalg.eigh(logs_mean) gmean_sqrt = _form_symmetric(np.sqrt, vals_gmean, vecs_gmean) gmean = gmean_sqrt.dot( _form_symmetric(np.exp, vals_log * step, vecs_log)).dot(gmean_sqrt) # Update the norm and the step size if norm < norm_old: norm_old = norm if norm > norm_old: step = step / 2. norm = norm_old grad_norm.append(norm / gmean.size) if tol is not None and norm / gmean.size < tol: break return grad_norm def test_check_square(): non_square = np.ones((2, 3)) assert_raises(ValueError, _check_square, non_square) def test_check_spd(): non_sym = np.array([[0, 1], [0, 0]]) assert_raises(ValueError, _check_spd, non_sym) non_spd = np.ones((3, 3)) assert_raises(ValueError, _check_spd, non_spd) def test_map_eigenvalues(): # Test on exp map sym = np.ones((2, 2)) sym_exp = exp(1.) * np.array([[cosh(1.), sinh(1.)], [sinh(1.), cosh(1.)]]) assert_array_almost_equal(_map_eigenvalues(np.exp, sym), sym_exp) # Test on sqrt map spd_sqrt = np.array([[2., -1., 0.], [-1., 2., -1.], [0., -1., 2.]]) spd = spd_sqrt.dot(spd_sqrt) assert_array_almost_equal(_map_eigenvalues(np.sqrt, spd), spd_sqrt) # Test on log map spd = np.array([[1.25, 0.75], [0.75, 1.25]]) spd_log = np.array([[0., log(2.)], [log(2.), 0.]]) assert_array_almost_equal(_map_eigenvalues(np.log, spd), spd_log) def test_geometric_mean_couple(): n_features = 7 spd1 = np.ones((n_features, n_features)) spd1 = spd1.dot(spd1) + n_features * np.eye(n_features) spd2 = np.tril(np.ones((n_features, n_features))) spd2 = spd2.dot(spd2.T) vals_spd2, vecs_spd2 = np.linalg.eigh(spd2) spd2_sqrt = _form_symmetric(np.sqrt, vals_spd2, vecs_spd2) spd2_inv_sqrt = _form_symmetric(np.sqrt, 1. / vals_spd2, vecs_spd2) geo = spd2_sqrt.dot(_map_eigenvalues(np.sqrt, spd2_inv_sqrt.dot(spd1).dot( spd2_inv_sqrt))).dot(spd2_sqrt) assert_array_almost_equal(_geometric_mean([spd1, spd2]), geo) def test_geometric_mean_diagonal(): n_matrices = 20 n_features = 5 diags = [] for k in range(n_matrices): diag = np.eye(n_features) diag[k % n_features, k % n_features] = 1e4 + k diag[(n_features - 1) // (k + 1), (n_features - 1) // (k + 1)] = \ (k + 1) * 1e-4 diags.append(diag) geo = np.prod(np.array(diags), axis=0) ** (1 / float(len(diags))) assert_array_almost_equal(_geometric_mean(diags), geo) def test_geometric_mean_geodesic(): n_matrices = 10 n_features = 6 sym = np.arange(n_features) / np.linalg.norm(np.arange(n_features)) sym = sym * sym[:, np.newaxis] times = np.arange(n_matrices) non_singular = np.eye(n_features) non_singular[1:3, 1:3] = np.array([[-1, -.5], [-.5, -1]]) spds = [] for time in times: spds.append(non_singular.dot(_map_eigenvalues(np.exp, time * sym)).dot( non_singular.T)) gmean = non_singular.dot(_map_eigenvalues(np.exp, times.mean() * sym)).dot( non_singular.T) assert_array_almost_equal(_geometric_mean(spds), gmean) def random_diagonal(p, v_min=1., v_max=2., random_state=0): """Generate a random diagonal matrix. Parameters ---------- p : int The first dimension of the array. v_min : float, optional (default to 1.) Minimal element. v_max : float, optional (default to 2.) Maximal element. random_state : int or numpy.random.RandomState instance, optional random number generator, or seed. Returns ------- output : numpy.ndarray, shape (p, p) A diagonal matrix with the given minimal and maximal elements. """ random_state = check_random_state(random_state) diag = random_state.rand(p) * (v_max - v_min) + v_min diag[diag == np.amax(diag)] = v_max diag[diag == np.amin(diag)] = v_min return np.diag(diag) def random_spd(p, eig_min, cond, random_state=0): """Generate a random symmetric positive definite matrix. Parameters ---------- p : int The first dimension of the array. eig_min : float Minimal eigenvalue. cond : float Condition number, defined as the ratio of the maximum eigenvalue to the minimum one. random_state : int or numpy.random.RandomState instance, optional random number generator, or seed. Returns ------- ouput : numpy.ndarray, shape (p, p) A symmetric positive definite matrix with the given minimal eigenvalue and condition number. """ random_state = check_random_state(random_state) mat = random_state.randn(p, p) unitary, _ = linalg.qr(mat) diag = random_diagonal(p, v_min=eig_min, v_max=cond * eig_min, random_state=random_state) return unitary.dot(diag).dot(unitary.T) def random_non_singular(p, sing_min=1., sing_max=2., random_state=0): """Generate a random nonsingular matrix. Parameters ---------- p : int The first dimension of the array. sing_min : float, optional (default to 1.) Minimal singular value. sing_max : float, optional (default to 2.) Maximal singular value. random_state : int or numpy.random.RandomState instance, optional random number generator, or seed. Returns ------- output : numpy.ndarray, shape (p, p) A nonsingular matrix with the given minimal and maximal singular values. """ random_state = check_random_state(random_state) diag = random_diagonal(p, v_min=sing_min, v_max=sing_max, random_state=random_state) mat1 = random_state.randn(p, p) mat2 = random_state.randn(p, p) unitary1, _ = linalg.qr(mat1) unitary2, _ = linalg.qr(mat2) return unitary1.dot(diag).dot(unitary2.T) def test_geometric_mean_properties(): n_matrices = 40 n_features = 15 spds = [] for k in range(n_matrices): spds.append(random_spd(n_features, eig_min=1., cond=10., random_state=0)) input_spds = copy.copy(spds) gmean = _geometric_mean(spds) # Generic assert_true(isinstance(spds, list)) for spd, input_spd in zip(spds, input_spds): assert_array_equal(spd, input_spd) assert(is_spd(gmean, decimal=7)) # Invariance under reordering spds.reverse() spds.insert(0, spds[1]) spds.pop(2) assert_array_almost_equal(_geometric_mean(spds), gmean) # Invariance under congruent transformation non_singular = random_non_singular(n_features, random_state=0) spds_cong = [non_singular.dot(spd).dot(non_singular.T) for spd in spds] assert_array_almost_equal(_geometric_mean(spds_cong), non_singular.dot(gmean).dot(non_singular.T)) # Invariance under inversion spds_inv = [linalg.inv(spd) for spd in spds] init = linalg.inv(np.mean(spds, axis=0)) assert_array_almost_equal(_geometric_mean(spds_inv, init=init), linalg.inv(gmean)) # Gradient norm is decreasing grad_norm = grad_geometric_mean(spds, tol=1e-20) difference = np.diff(grad_norm) assert_true(np.amax(difference) <= 0.) # Check warning if gradient norm in the last step is less than # tolerance max_iter = 1 tol = 1e-20 with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") gmean = _geometric_mean(spds, max_iter=max_iter, tol=tol) assert_equal(len(w), 1) grad_norm = grad_geometric_mean(spds, max_iter=max_iter, tol=tol) assert_equal(len(grad_norm), max_iter) assert_true(grad_norm[-1] > tol) # Evaluate convergence. A warning is printed if tolerance is not reached for p in [.5, 1.]: # proportion of badly conditionned matrices spds = [] for k in range(int(p * n_matrices)): spds.append(random_spd(n_features, eig_min=1e-2, cond=1e6, random_state=0)) for k in range(int(p * n_matrices), n_matrices): spds.append(random_spd(n_features, eig_min=1., cond=10., random_state=0)) if p < 1: max_iter = 30 else: max_iter = 60 gmean = _geometric_mean(spds, max_iter=max_iter, tol=1e-5) def test_geometric_mean_errors(): n_features = 5 # Non square input matrix mat1 = np.ones((n_features, n_features + 1)) assert_raises(ValueError, _geometric_mean, [mat1]) # Input matrices of different shapes mat1 = np.eye(n_features) mat2 = np.ones((n_features + 1, n_features + 1)) assert_raises(ValueError, _geometric_mean, [mat1, mat2]) # Non spd input matrix assert_raises(ValueError, _geometric_mean, [mat2]) def test_sym_to_vec(): sym = np.ones((3, 3)) vec = np.array([1., sqrt(2), 1., sqrt(2), sqrt(2), 1.]) assert_array_almost_equal(sym_to_vec(sym), vec) def test_prec_to_partial(): prec = np.array([[2., -1., 1.], [-1., 2., -1.], [1., -1., 1.]]) partial = np.array([[1., .5, -sqrt(2.) / 2.], [.5, 1., sqrt(2.) / 2.], [-sqrt(2.) / 2., sqrt(2.) / 2., 1.]]) assert_array_almost_equal(_prec_to_partial(prec), partial) def test_connectivity_measure_errors(): # Raising error for input subjects not iterable conn_measure = ConnectivityMeasure() assert_raises(ValueError, conn_measure.fit, 1.) # Raising error for input subjects not 2D numpy.ndarrays assert_raises(ValueError, conn_measure.fit, [np.ones((100, 40)), np.ones((10,))]) # Raising error for input subjects with different number of features assert_raises(ValueError, conn_measure.fit, [np.ones((100, 40)), np.ones((100, 41))]) def test_connectivity_measure_outputs(): n_subjects = 10 n_features = 49 n_samples = 200 # Generate signals and compute covariances emp_covs = [] ledoit_covs = [] signals = [] random_state = check_random_state(0) ledoit_estimator = LedoitWolf() for k in range(n_subjects): signal = random_state.randn(n_samples, n_features) signals.append(signal) signal -= signal.mean(axis=0) emp_covs.append((signal.T).dot(signal) / n_samples) ledoit_covs.append(ledoit_estimator.fit(signal).covariance_) kinds = ["correlation", "tangent", "precision", "partial correlation"] # Check outputs properties for cov_estimator, covs in zip([EmpiricalCovariance(), LedoitWolf()], [emp_covs, ledoit_covs]): input_covs = copy.copy(covs) for kind in kinds: conn_measure = ConnectivityMeasure(kind=kind, cov_estimator=cov_estimator) connectivities = conn_measure.fit_transform(signals) # Generic assert_true(isinstance(connectivities, np.ndarray)) assert_equal(len(connectivities), len(covs)) for k, cov_new in enumerate(connectivities): assert_array_equal(input_covs[k], covs[k]) assert(is_spd(covs[k], decimal=7)) # Positive definiteness if expected and output value checks if kind == "tangent": assert_array_almost_equal(cov_new, cov_new.T) gmean_sqrt = _map_eigenvalues(np.sqrt, conn_measure.mean_) assert(is_spd(gmean_sqrt, decimal=7)) assert(is_spd(conn_measure.whitening_, decimal=7)) assert_array_almost_equal(conn_measure.whitening_.dot( gmean_sqrt), np.eye(n_features)) assert_array_almost_equal(gmean_sqrt.dot( _map_eigenvalues(np.exp, cov_new)).dot(gmean_sqrt), covs[k]) elif kind == "precision": assert(is_spd(cov_new, decimal=7)) assert_array_almost_equal(cov_new.dot(covs[k]), np.eye(n_features)) elif kind == "correlation": assert(is_spd(cov_new, decimal=7)) d = np.sqrt(np.diag(np.diag(covs[k]))) if cov_estimator == EmpiricalCovariance(): assert_array_almost_equal(d.dot(cov_new).dot(d), covs[k]) assert_array_almost_equal(np.diag(cov_new), np.ones((n_features))) elif kind == "partial correlation": prec = linalg.inv(covs[k]) d = np.sqrt(np.diag(np.diag(prec))) assert_array_almost_equal(d.dot(cov_new).dot(d), -prec + 2 * np.diag(np.diag(prec))) PKH2bnilearn/regions/__init__.py""" The :mod:`nilearn.regions` class module includes region extraction procedure on a 4D statistical/atlas maps and its function. """ from .region_extractor import connected_regions, RegionExtractor from .signal_extraction import ( img_to_signals_labels, signals_to_img_labels, img_to_signals_maps, signals_to_img_maps, ) __all__ = [ 'connected_regions', 'RegionExtractor', 'img_to_signals_labels', 'signals_to_img_labels', 'img_to_signals_maps', 'signals_to_img_maps', ] PKHB'99#nilearn/regions/region_extractor.py""" Better brain parcellations for Region of Interest analysis """ import numbers import numpy as np from scipy.ndimage import label from scipy.stats import scoreatpercentile from sklearn.externals.joblib import Memory from .. import masking from ..input_data import NiftiMapsMasker from .._utils import check_niimg, check_niimg_4d from ..image import new_img_like, resample_img from ..image.image import _smooth_array, threshold_img from .._utils.niimg_conversions import concat_niimgs, _check_same_fov from .._utils.niimg import _safe_get_data from .._utils.compat import _basestring from .._utils.ndimage import _peak_local_max from .._utils.segmentation import _random_walker def _threshold_maps_ratio(maps_img, threshold): """ Automatic thresholding of atlas maps image. Considers the given threshold as a ratio to the total number of voxels in the brain volume. This gives a certain number within the data voxel size which means that nonzero voxels which fall above than this size will be kept across all the maps. Parameters ---------- maps_img: Niimg-like object an image of brain atlas maps. threshold: float If float, value is used as a ratio to n_voxels to get a certain threshold size in number to threshold the image. The value should be positive and within the range of number of maps (i.e. n_maps in 4th dimension). Returns ------- threshold_maps_img: Nifti1Image gives us thresholded image. """ maps = check_niimg(maps_img) n_maps = maps.shape[-1] if not isinstance(threshold, numbers.Real) or threshold <= 0 or threshold > n_maps: raise ValueError("threshold given as ratio to the number of voxels must " "be Real number and should be positive and between 0 and " "total number of maps i.e. n_maps={0}. " "You provided {1}".format(n_maps, threshold)) else: ratio = threshold maps_data = np.nan_to_num(maps.get_data()) abs_maps = np.abs(maps_data) # thresholding cutoff_threshold = scoreatpercentile( abs_maps, 100. - (100. / n_maps) * ratio) maps_data[abs_maps < cutoff_threshold] = 0. threshold_maps_img = new_img_like(maps, maps_data) return threshold_maps_img def connected_regions(maps_img, min_region_size=1350, extract_type='local_regions', smoothing_fwhm=6, mask_img=None): """ Extraction of brain connected regions into separate regions. Note: the region size should be defined in mm^3. See the documentation for more details. .. versionadded:: 0.2 Parameters ---------- maps_img: Niimg-like object an image of brain activation or atlas maps to be extracted into set of separate brain regions. min_region_size: int, default 1350 mm^3, optional Minimum volume in mm3 for a region to be kept. For example, if the voxel size is 3x3x3 mm then the volume of the voxel is 27mm^3. By default, it is 1350mm^3 which means we take minimum size of 1350 / 27 = 50 voxels. extract_type: str {'connected_components', 'local_regions'} \ default local_regions, optional If 'connected_components', each component/region in the image is extracted automatically by labelling each region based upon the presence of unique features in their respective regions. If 'local_regions', each component/region is extracted based on their maximum peak value to define a seed marker and then using random walker segementation algorithm on these markers for region separation. smoothing_fwhm: scalar, default 6mm, optional To smooth an image to extract most sparser regions. This parameter is passed `_smooth_array` and exists only for extract_type 'local_regions'. mask_img: Niimg-like object, default None If given, mask image is applied to input data. If None, no masking is applied. Returns ------- regions_extracted_img: Nifti1Image gives the image in 4D of extracted brain regions. Each 3D image consists of only one separated region. index_of_each_map: numpy array an array of list of indices where each index denotes the identity of each extracted region to their family of brain maps. """ all_regions_imgs = [] index_of_each_map = [] maps_img = check_niimg(maps_img, atleast_4d=True) maps = _safe_get_data(maps_img).copy() affine = maps_img.get_affine() min_region_size = min_region_size / np.prod(np.diag(abs(affine[:3]))) allowed_extract_types = ['connected_components', 'local_regions'] if extract_type not in allowed_extract_types: message = ("'extract_type' should be given either of these {0} " "You provided extract_type='{1}'").format(allowed_extract_types, extract_type) raise ValueError(message) if mask_img is not None: if not _check_same_fov(maps_img, mask_img): mask_img = resample_img(mask_img, target_affine=maps_img.get_affine(), target_shape=maps_img.shape[:3], interpolation="nearest") mask_data, _ = masking._load_mask_img(mask_img) # Set as 0 to the values which are outside of the mask maps[mask_data == 0.] = 0. for index in range(maps.shape[-1]): regions = [] map_3d = maps[..., index] # Mark the seeds using random walker if extract_type == 'local_regions': smooth_map = _smooth_array(map_3d, affine=affine, fwhm=smoothing_fwhm) seeds = _peak_local_max(smooth_map) seeds_label, seeds_id = label(seeds) # Assign -1 to values which are 0. to indicate to ignore seeds_label[map_3d == 0.] = -1 rw_maps = _random_walker(map_3d, seeds_label) # Now simply replace "-1" with "0" for regions separation rw_maps[rw_maps == -1] = 0. label_maps = rw_maps else: # Connected component extraction label_maps, n_labels = label(map_3d) # Takes the size of each labelized region data labels_size = np.bincount(label_maps.ravel()) # set background labels sitting in zero index to zero labels_size[0] = 0. for label_id, label_size in enumerate(labels_size): if label_size > min_region_size: region_data = (label_maps == label_id) * map_3d region_img = new_img_like(maps_img, region_data) regions.append(region_img) index_of_each_map.extend([index] * len(regions)) all_regions_imgs.extend(regions) regions_extracted_img = concat_niimgs(all_regions_imgs) return regions_extracted_img, index_of_each_map class RegionExtractor(NiftiMapsMasker): """Class for brain region extraction. Region Extraction is a post processing technique which is implemented to automatically segment each brain atlas maps into different set of separated brain activated region. Particularly, to show that each decomposed brain maps can be used to focus on a target specific Regions of Interest analysis. .. versionadded:: 0.2 Parameters ---------- maps_img: 4D Niimg-like object Image containing a set of whole brain atlas maps or statistically decomposed brain maps. mask_img: Niimg-like object or None, default None, optional Mask to be applied to input data, passed to NiftiMapsMasker. If None, no masking is applied. min_region_size: int, default 1350 mm^3, optional Minimum volume in mm3 for a region to be kept. For example, if the voxel size is 3x3x3 mm then the volume of the voxel is 27mm^3. By default, it is 1350mm^3 which means we take minimum size of 1350 / 27 = 50 voxels. threshold: number, default 1., optional A value used either in ratio_n_voxels or img_value or percentile `thresholding_strategy` based upon the choice of selection. thresholding_strategy: str {'ratio_n_voxels', 'img_value', 'percentile'}, optional If default 'ratio_n_voxels', we apply thresholding that will keep the more intense nonzero brain voxels (denoted as n_voxels) across all maps (n_voxels being the number of voxels in the brain volume). A float value given in `threshold` parameter indicates the ratio of voxels to keep meaning (if float=2. then maps will together have 2. x n_voxels non-zero voxels). If set to 'percentile', images are thresholded based on the score obtained with the given percentile on the data and the voxel intensities which are survived above this obtained score will be kept. If set to 'img_value', we apply thresholding based on the non-zero voxel intensities across all maps. A value given in `threshold` parameter indicates that we keep only those voxels which have intensities more than this value. extractor: str {'connected_components', 'local_regions'} default 'local_regions', optional If 'connected_components', each component/region in the image is extracted automatically by labelling each region based upon the presence of unique features in their respective regions. If 'local_regions', each component/region is extracted based on their maximum peak value to define a seed marker and then using random walker segementation algorithm on these markers for region separation. standardize: bool, True or False, default False, optional If True, the time series signals are centered and normalized by putting their mean to 0 and variance to 1. Recommended to set as True if signals are not already standardized. passed to class NiftiMapsMasker. detrend: bool, True or False, default False, optional This parameter is passed to nilearn.signal.clean basically indicates whether to detrend timeseries signals or not. passed to class NiftiMapsMasker. low_pass: float, default None, optional This value will be applied on the signals by passing to signal.clean Please see the related documentation signal.clean for more details. passed to class NiftiMapsMasker. high_pass: float, default None, optional This value will be applied on the signals by passing to signal.clean Please see the related documentation signal.clean for more details. passed to NiftiMapsMasker. t_r: float, default None, optional Repetition time in sec. This value is given to signal.clean Please see the related documentation for details. passed to NiftiMapsMasker. memory: instance of joblib.Memory, string, default None, optional Used to cache the masking process. If a string is given, the path is set with this string as a folder name in the directory. passed to NiftiMapsMasker. memory_level: int, default 0, optional Aggressiveness of memory catching. The higher the number, the higher the number of functions that will be cached. Zero mean no caching. passed to NiftiMapsMasker. verbose: int, default 0, optional Indicates the level of verbosity by printing the message. Zero indicates nothing is printed. Attributes ---------- `index_` : numpy array array of list of indices where each index value is assigned to each separate region of its corresponding family of brain maps. `regions_img_` : Nifti1Image List of separated regions with each region lying on an original volume concatenated into a 4D image. References ---------- * Abraham et al. "Region segmentation for sparse decompositions: better brain parcellations from rest fMRI", Sparsity Techniques in Medical Imaging, Sep 2014, Boston, United States. pp.8 """ def __init__(self, maps_img, mask_img=None, min_region_size=1350, threshold=1., thresholding_strategy='ratio_n_voxels', extractor='local_regions', standardize=False, detrend=False, low_pass=None, high_pass=None, t_r=None, memory=Memory(cachedir=None), memory_level=0, verbose=0): super(RegionExtractor, self).__init__( maps_img=maps_img, mask_img=mask_img, standardize=standardize, detrend=detrend, low_pass=low_pass, high_pass=high_pass, t_r=t_r, memory=memory, memory_level=memory_level, verbose=verbose) self.maps_img = maps_img self.min_region_size = min_region_size self.thresholding_strategy = thresholding_strategy self.threshold = threshold self.extractor = extractor def fit(self, X=None, y=None): """ Prepare the data and setup for the region extraction """ maps_img = check_niimg_4d(self.maps_img) list_of_strategies = ['ratio_n_voxels', 'img_value', 'percentile'] if self.thresholding_strategy not in list_of_strategies: message = ("'thresholding_strategy' should be " "either of these {0}").format(list_of_strategies) raise ValueError(message) if self.threshold is None or isinstance(self.threshold, _basestring): raise ValueError("The given input to threshold is not valid. " "Please submit a valid number specific to either of " "the strategy in {0}".format(list_of_strategies)) elif isinstance(self.threshold, numbers.Number): # foreground extraction if self.thresholding_strategy == 'ratio_n_voxels': threshold_maps = _threshold_maps_ratio(maps_img, self.threshold) else: if self.thresholding_strategy == 'percentile': self.threshold = "{0}%".format(self.threshold) threshold_maps = threshold_img(maps_img, mask_img=self.mask_img, threshold=self.threshold) # connected component extraction self.regions_img_, self.index_ = connected_regions(threshold_maps, self.min_region_size, self.extractor) self.maps_img = self.regions_img_ super(RegionExtractor, self).fit() return self PKqH 77$nilearn/regions/signal_extraction.py""" Functions for extracting region-defined signals. Two ways of defining regions are supported: as labels in a single 3D image, or as weights in one image per region (maps). """ # Author: Philippe Gervais # License: simplified BSD import numpy as np from scipy import linalg, ndimage from .. import _utils from .. import masking from ..image import new_img_like # FIXME: naming scheme is not really satisfying. Any better idea appreciated. def img_to_signals_labels(imgs, labels_img, mask_img=None, background_label=0, order="F"): """Extract region signals from image. This function is applicable to regions defined by labels. labels, imgs and mask shapes and affines must fit. This function performs no resampling. Parameters ---------- imgs: 4D Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. input images. labels_img: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. regions definition as labels. By default, the label zero is used to denote an absence of region. Use background_label to change it. mask_img: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. Mask to apply to labels before extracting signals. Every point outside the mask is considered as background (i.e. no region). background_label: number number representing background in labels_img. order: str ordering of output array ("C" or "F"). Defaults to "F". Returns ------- signals: numpy.ndarray Signals extracted from each region. One output signal is the mean of all input signals in a given region. If some regions are entirely outside the mask, the corresponding signal is zero. Shape is: (scan number, number of regions) labels: list or tuple corresponding labels for each signal. signal[:, n] was extracted from the region with label labels[n]. See also -------- nilearn.regions.signals_to_img_labels nilearn.regions.img_to_signals_maps """ labels_img = _utils.check_niimg_3d(labels_img) # TODO: Make a special case for list of strings (load one image at a # time). imgs = _utils.check_niimg_4d(imgs) target_affine = imgs.get_affine() target_shape = imgs.shape[:3] # Check shapes and affines. if labels_img.shape != target_shape: raise ValueError("labels_img and imgs shapes must be identical.") if abs(labels_img.get_affine() - target_affine).max() > 1e-9: raise ValueError("labels_img and imgs affines must be identical") if mask_img is not None: mask_img = _utils.check_niimg_3d(mask_img) if mask_img.shape != target_shape: raise ValueError("mask_img and imgs shapes must be identical.") if abs(mask_img.get_affine() - target_affine).max() > 1e-9: raise ValueError("mask_img and imgs affines must be identical") # Perform computation labels_data = labels_img.get_data() labels = list(np.unique(labels_data)) if background_label in labels: labels.remove(background_label) if mask_img is not None: mask_data = mask_img.get_data() labels_data = labels_data.copy() labels_data[np.logical_not(mask_data)] = background_label data = imgs.get_data() signals = np.ndarray((data.shape[-1], len(labels)), order=order) for n, img in enumerate(np.rollaxis(data, -1)): signals[n] = np.asarray(ndimage.measurements.mean(img, labels=labels_data, index=labels)) # Set to zero signals for missing labels. Workaround for Scipy behaviour missing_labels = set(labels) - set(np.unique(labels_data)) labels_index = dict([(l, n) for n, l in enumerate(labels)]) for l in missing_labels: signals[:, labels_index[l]] = 0 return signals, labels def signals_to_img_labels(signals, labels_img, mask_img=None, background_label=0, order="F"): """Create image from region signals defined as labels. The same region signal is used for each voxel of the corresponding 3D volume. labels_img, mask_img must have the same shapes and affines. Parameters ---------- signals: numpy.ndarray 2D array with shape: (scan number, number of regions in labels_img) labels_img: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. Region definitions using labels. mask_img: Niimg-like object, optional Boolean array giving voxels to process. integer arrays also accepted, In this array, zero means False, non-zero means True. background_label: number label to use for "no region". order: str ordering of output array ("C" or "F"). Defaults to "F". Returns ------- img: nibabel.Nifti1Image Reconstructed image. dtype is that of "signals", affine and shape are those of labels_img. See also -------- nilearn.regions.img_to_signals_labels nilearn.regions.signals_to_img_maps """ labels_img = _utils.check_niimg_3d(labels_img) signals = np.asarray(signals) target_affine = labels_img.get_affine() target_shape = labels_img.shape[:3] if mask_img is not None: mask_img = _utils.check_niimg_3d(mask_img) if mask_img.shape != target_shape: raise ValueError("mask_img and labels_img shapes " "must be identical.") if abs(mask_img.get_affine() - target_affine).max() > 1e-9: raise ValueError("mask_img and labels_img affines " "must be identical") labels_data = labels_img.get_data() labels = list(np.unique(labels_data)) if background_label in labels: labels.remove(background_label) if mask_img is not None: mask_data = mask_img.get_data() labels_data = labels_data.copy() labels_data[np.logical_not(mask_data)] = background_label # nditer is not available in numpy 1.3: using multiple loops. # Using these loops still gives a much faster code (6x) than this one: ## for n, label in enumerate(labels): ## data[labels_data == label, :] = signals[:, n] data = np.zeros(target_shape + (signals.shape[0],), dtype=signals.dtype, order=order) labels_dict = dict([(label, n) for n, label in enumerate(labels)]) # optimized for "data" in F order. for k in range(labels_data.shape[2]): for j in range(labels_data.shape[1]): for i in range(labels_data.shape[0]): label = labels_data[i, j, k] num = labels_dict.get(label, None) if num is not None: data[i, j, k, :] = signals[:, num] return new_img_like(labels_img, data, target_affine) def img_to_signals_maps(imgs, maps_img, mask_img=None): """Extract region signals from image. This function is applicable to regions defined by maps. Parameters ---------- imgs: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. Input images. maps_img: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. regions definition as maps (array of weights). shape: imgs.shape + (region number, ) mask_img: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. mask to apply to regions before extracting signals. Every point outside the mask is considered as background (i.e. outside of any region). order: str ordering of output array ("C" or "F"). Defaults to "F". Returns ------- region_signals: numpy.ndarray Signals extracted from each region. Shape is: (scans number, number of regions intersecting mask) labels: list maps_img[..., labels[n]] is the region that has been used to extract signal region_signals[:, n]. See also -------- nilearn.regions.img_to_signals_labels nilearn.regions.signals_to_img_maps """ maps_img = _utils.check_niimg_4d(maps_img) imgs = _utils.check_niimg_4d(imgs) affine = imgs.get_affine() shape = imgs.shape[:3] # Check shapes and affines. if maps_img.shape[:3] != shape: raise ValueError("maps_img and imgs shapes must be identical.") if abs(maps_img.get_affine() - affine).max() > 1e-9: raise ValueError("maps_img and imgs affines must be identical") maps_data = maps_img.get_data() if mask_img is not None: mask_img = _utils.check_niimg_3d(mask_img) if mask_img.shape != shape: raise ValueError("mask_img and imgs shapes must be identical.") if abs(mask_img.get_affine() - affine).max() > 1e-9: raise ValueError("mask_img and imgs affines must be identical") maps_data, maps_mask, labels = \ _trim_maps(maps_data, mask_img.get_data(), keep_empty=True) maps_mask = _utils.as_ndarray(maps_mask, dtype=np.bool) else: maps_mask = np.ones(maps_data.shape[:3], dtype=np.bool) labels = np.arange(maps_data.shape[-1], dtype=np.int) data = imgs.get_data() region_signals = linalg.lstsq(maps_data[maps_mask, :], data[maps_mask, :])[0].T return region_signals, list(labels) def signals_to_img_maps(region_signals, maps_img, mask_img=None): """Create image from region signals defined as maps. region_signals, mask_img must have the same shapes and affines. Parameters ---------- region_signals: numpy.ndarray signals to process, as a 2D array. A signal is a column. There must be as many signals as maps. In pseudo-code: region_signals.shape[1] == maps_img.shape[-1] maps_img: Niimg-like object See http://nilearn.github.io/manipulating_images/input_output.html. Region definitions using maps. mask_img: Niimg-like object, optional See http://nilearn.github.io/manipulating_images/input_output.html. Boolean array giving voxels to process. integer arrays also accepted, zero meaning False. Returns ------- img: nibabel.Nifti1Image Reconstructed image. affine and shape are those of maps_img. See also -------- nilearn.regions.signals_to_img_labels nilearn.regions.img_to_signals_maps """ maps_img = _utils.check_niimg_4d(maps_img) maps_data = maps_img.get_data() shape = maps_img.shape[:3] affine = maps_img.get_affine() if mask_img is not None: mask_img = _utils.check_niimg_3d(mask_img) if mask_img.shape != shape: raise ValueError("mask_img and maps_img shapes must be identical.") if abs(mask_img.get_affine() - affine).max() > 1e-9: raise ValueError("mask_img and maps_img affines must be " "identical.") maps_data, maps_mask, _ = _trim_maps(maps_data, mask_img.get_data(), keep_empty=True) maps_mask = _utils.as_ndarray(maps_mask, dtype=np.bool) else: maps_mask = np.ones(maps_data.shape[:3], dtype=np.bool) assert(maps_mask.shape == maps_data.shape[:3]) data = np.dot(region_signals, maps_data[maps_mask, :].T) return masking.unmask(data, new_img_like(maps_img, maps_mask, affine)) def _trim_maps(maps, mask, keep_empty=False, order="F"): """Crop maps using a mask. No consistency check is performed (esp. on affine). Every required check must be performed before calling this function. Parameters ---------- maps: numpy.ndarray Set of maps, defining some regions. mask: numpy.ndarray Definition of a mask. The shape must match that of a single map. keep_empty: bool If False, maps that lie completely outside the mask are dropped from the output. If True, they are kept, meaning that maps that are completely zero can occur in the output. order: "F" or "C" Ordering of the output maps array (trimmed_maps). Returns ------- trimmed_maps: numpy.ndarray New set of maps, computed as intersection of each input map and mask. Empty maps are discarded if keep_empty is False, thus the number of output maps is not necessarily the same as the number of input maps. shape: mask.shape + (output maps number,). Data ordering depends on the "order" parameter. maps_mask: numpy.ndarray Union of all output maps supports. One non-zero value in this array guarantees that there is at least one output map that is non-zero at this voxel. shape: mask.shape. Order is always C. indices: numpy.ndarray indices of regions that have an non-empty intersection with the given mask. len(indices) == trimmed_maps.shape[-1] """ maps = maps.copy() sums = abs(maps[_utils.as_ndarray(mask, dtype=np.bool), :]).sum(axis=0) if keep_empty: n_regions = maps.shape[-1] else: n_regions = (sums > 0).sum() trimmed_maps = np.zeros(maps.shape[:3] + (n_regions, ), dtype=maps.dtype, order=order) # use int8 instead of np.bool for Nifti1Image maps_mask = np.zeros(mask.shape, dtype=np.int8) # iterate on maps p = 0 mask = _utils.as_ndarray(mask, dtype=np.bool, order="C") for n, m in enumerate(np.rollaxis(maps, -1)): if not keep_empty and sums[n] == 0: continue trimmed_maps[mask, p] = maps[mask, n] maps_mask[trimmed_maps[..., p] > 0] = 1 p += 1 if keep_empty: return trimmed_maps, maps_mask, np.arange(trimmed_maps.shape[-1], dtype=np.int) else: return trimmed_maps, maps_mask, np.where(sums > 0)[0] PKH 9 DD'nilearn-0.2.5.dist-info/DESCRIPTION.rst.. -*- mode: rst -*- .. image:: https://travis-ci.org/nilearn/nilearn.svg?branch=master :target: https://travis-ci.org/nilearn/nilearn :alt: Travis Build Status .. image:: https://ci.appveyor.com/api/projects/status/github/nilearn/nilearn?branch=master&svg=true :target: https://ci.appveyor.com/project/nilearn-ci/nilearn :alt: AppVeyor Build Status .. image:: https://coveralls.io/repos/nilearn/nilearn/badge.svg?branch=master :target: https://coveralls.io/r/nilearn/nilearn nilearn ======= Nilearn is a Python module for fast and easy statistical learning on NeuroImaging data. It leverages the `scikit-learn `_ Python toolbox for multivariate statistics with applications such as predictive modelling, classification, decoding, or connectivity analysis. This work is made available by a community of people, amongst which the INRIA Parietal Project Team and the scikit-learn folks, in particular P. Gervais, A. Abraham, V. Michel, A. Gramfort, G. Varoquaux, F. Pedregosa, B. Thirion, M. Eickenberg, C. F. Gorgolewski, D. Bzdok, L. Estève and B. Cipollini. Important links =============== - Official source code repo: https://github.com/nilearn/nilearn/ - HTML documentation (stable release): http://nilearn.github.io/ Dependencies ============ The required dependencies to use the software are: * Python >= 2.6, * setuptools * Numpy >= 1.6.1 * SciPy >= 0.9 * Scikit-learn >= 0.13 (Some examples require 0.14 to run) * Nibabel >= 1.1.0 If you are using nilearn plotting functionalities or running the examples, matplotlib >= 1.1.1 is required. If you want to run the tests, you need nose >= 1.2.1 and coverage >= 3.6. Install ======= First make sure you have installed all the dependencies listed above. Then you can install nilearn by running the following command in a command prompt:: pip install -U --user nilearn More detailed instructions are available at http://nilearn.github.io/introduction.html#installation. Development =========== Detailed instructions on how to contribute are available at http://nilearn.github.io/contributing.html PKH jj%nilearn-0.2.5.dist-info/metadata.json{"classifiers": ["Intended Audience :: Science/Research", "Intended Audience :: Developers", "License :: OSI Approved", "Programming Language :: C", "Programming Language :: Python", "Topic :: Software Development", "Topic :: Scientific/Engineering", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Operating System :: Unix", "Operating System :: MacOS", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4"], "download_url": "http://nilearn.github.io", "extensions": {"python.details": {"contacts": [{"email": "gael.varoquaux@normalesup.org", "name": "Gael Varoquaux", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://nilearn.github.io"}}}, "extras": [], "generator": "bdist_wheel (0.26.0)", "license": "new BSD", "metadata_version": "2.0", "name": "nilearn", "run_requires": [{"requires": ["nibabel (>=1.1.0)"]}], "summary": "Statistical learning for neuroimaging in Python", "version": "0.2.5"}PKH_%nilearn-0.2.5.dist-info/top_level.txtnilearn PKHndnnnilearn-0.2.5.dist-info/WHEELWheel-Version: 1.0 Generator: bdist_wheel (0.26.0) Root-Is-Purelib: true Tag: py2-none-any Tag: py3-none-any PKH!uJ J nilearn-0.2.5.dist-info/METADATAMetadata-Version: 2.0 Name: nilearn Version: 0.2.5 Summary: Statistical learning for neuroimaging in Python Home-page: http://nilearn.github.io Author: Gael Varoquaux Author-email: gael.varoquaux@normalesup.org License: new BSD Download-URL: http://nilearn.github.io Platform: UNKNOWN Classifier: Intended Audience :: Science/Research Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved Classifier: Programming Language :: C Classifier: Programming Language :: Python Classifier: Topic :: Software Development Classifier: Topic :: Scientific/Engineering Classifier: Operating System :: Microsoft :: Windows Classifier: Operating System :: POSIX Classifier: Operating System :: Unix Classifier: Operating System :: MacOS Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Requires-Dist: nibabel (>=1.1.0) .. -*- mode: rst -*- .. image:: https://travis-ci.org/nilearn/nilearn.svg?branch=master :target: https://travis-ci.org/nilearn/nilearn :alt: Travis Build Status .. image:: https://ci.appveyor.com/api/projects/status/github/nilearn/nilearn?branch=master&svg=true :target: https://ci.appveyor.com/project/nilearn-ci/nilearn :alt: AppVeyor Build Status .. image:: https://coveralls.io/repos/nilearn/nilearn/badge.svg?branch=master :target: https://coveralls.io/r/nilearn/nilearn nilearn ======= Nilearn is a Python module for fast and easy statistical learning on NeuroImaging data. It leverages the `scikit-learn `_ Python toolbox for multivariate statistics with applications such as predictive modelling, classification, decoding, or connectivity analysis. This work is made available by a community of people, amongst which the INRIA Parietal Project Team and the scikit-learn folks, in particular P. Gervais, A. Abraham, V. Michel, A. Gramfort, G. Varoquaux, F. Pedregosa, B. Thirion, M. Eickenberg, C. F. Gorgolewski, D. Bzdok, L. Estève and B. Cipollini. Important links =============== - Official source code repo: https://github.com/nilearn/nilearn/ - HTML documentation (stable release): http://nilearn.github.io/ Dependencies ============ The required dependencies to use the software are: * Python >= 2.6, * setuptools * Numpy >= 1.6.1 * SciPy >= 0.9 * Scikit-learn >= 0.13 (Some examples require 0.14 to run) * Nibabel >= 1.1.0 If you are using nilearn plotting functionalities or running the examples, matplotlib >= 1.1.1 is required. If you want to run the tests, you need nose >= 1.2.1 and coverage >= 3.6. Install ======= First make sure you have installed all the dependencies listed above. Then you can install nilearn by running the following command in a command prompt:: pip install -U --user nilearn More detailed instructions are available at http://nilearn.github.io/introduction.html#installation. Development =========== Detailed instructions on how to contribute are available at http://nilearn.github.io/contributing.html PKHGB??nilearn-0.2.5.dist-info/RECORDnilearn/__init__.py,sha256=_YYMLSYohlAa1ZoStlftvkrfJ5P2F1zTVy_5NRMAEUg,2627 nilearn/masking.py,sha256=n4R_KJ5NoRh-DxMb00-CMpHzRHP_CNqavWcY-a_Uv0U,26795 nilearn/signal.py,sha256=QYngqQrF5ehs6qm0_F81kmmW7uWM_6y398BC1-TzDMA,17826 nilearn/version.py,sha256=i_8qsRn1KfmK1qJrqK_SJIhQl4k9j0FYCM5Nr4kNSIE,3616 nilearn/_utils/__init__.py,sha256=2kqlJuW85hu3k6xHXpLy5yjBROtrrUSYZ7lbJwmmtwo,461 nilearn/_utils/cache_mixin.py,sha256=j1Kkjm66m1SuV6SsnsUrwk19MEBsXVxDUEQPYnNqzxs,10026 nilearn/_utils/class_inspect.py,sha256=pA08sFRDJ-m99L1rp1WVxZMZKjRQvKmmcbroip4bCBs,2794 nilearn/_utils/compat.py,sha256=ZSi-Yf397f6IJX7vAYQItnuQ5iy3eUjpLqJwzCCtw-U,1400 nilearn/_utils/exceptions.py,sha256=lmm9VWinskrqFYX-IMRX-wmTXVAD5QM71JjMfp0q0Zo,2123 nilearn/_utils/extmath.py,sha256=hzTHwd1sTGc8OTKIq3KU5R4RaC-hTCi0Cm8Lpi2XGP4,2006 nilearn/_utils/logger.py,sha256=OxAFurXApUSpON2zFx5M2bh5ww4sbUtc_h63HzRaRGY,3596 nilearn/_utils/ndimage.py,sha256=et6trSQX6YprQYDf7MtfOxMDTqUoxBtkHCFPB5ay79E,4108 nilearn/_utils/niimg.py,sha256=5nuxHsrzWtjCvz72cC2ZnZovm3lxKzlWsklR2aKCy80,4748 nilearn/_utils/niimg_conversions.py,sha256=1i5vqoJTedCT-a4Z4PpiW15JfUXStP67K6dC24En_1o,17690 nilearn/_utils/numpy_conversions.py,sha256=VBBAJGl0mR-wLawfIiOl6EWRBL_BUMlOh-YFazVkfQY,6237 nilearn/_utils/param_validation.py,sha256=m7gRSO0zdubkYk5R1XVxcWuhTUV6WOJcLp9BeQO7unU,2426 nilearn/_utils/segmentation.py,sha256=Mg8PaSCLLDDMh7DLvaJNYgkwAxtF8yf6wP-Xr0pZWiw,11707 nilearn/_utils/testing.py,sha256=gHDlFb7CGezVBVS6cXNddx1OD7qV3fimv56yB-HY9NA,24431 nilearn/_utils/fixes/__init__.py,sha256=CHUesWdzex1ZLbrywCeyVb66xAUB_4Gcn78i6WgOuLw,704 nilearn/_utils/fixes/matplotlib_backports.py,sha256=N_N5BXucrkAa4YXhDDBqyTW-uyLlIHCUvYwAL8-Gn_c,1092 nilearn/_utils/fixes/sklearn_f_regression_nosparse.py,sha256=xT6tzTVfjbV-Rwz6-WCC26QxElUsn5yzxw-pTsZRCt4,1850 nilearn/connectome/__init__.py,sha256=YiiwmNZc8R_-BcSbFep6TEveCdxWOiuIy3rs6jvOr94,490 nilearn/connectome/connectivity_matrices.py,sha256=fxnf6ZjWEgOBg8JCAloRKPH-V-wage3M6rbrtIgAkJ8,13284 nilearn/connectome/group_sparse_cov.py,sha256=woEnSxoyNfGGVdM3_YOw5v5lBPylkIMpmD-6k8fQVqg,41546 nilearn/connectome/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 nilearn/connectome/tests/test_connectivity_matrices.py,sha256=igcTVlTXo-cV1vYZMr4WG4O2TZ7VBZWbnI4p9q9W0RE,15074 nilearn/connectome/tests/test_group_sparse_cov.py,sha256=YVRn0wp6TMweCB8Hc2_URBVVc-EJkwTOGfHyg5nsmdc,2832 nilearn/datasets/__init__.py,sha256=7BDQgXzW_RniXn9WG2x4dCq0SYHTJ2M-pWbOA67Jwwg,1980 nilearn/datasets/atlas.py,sha256=jao0C_GZSF8EGFDg4M9cD0k2ZOjUGwr6xkd0XOurHuc,27881 nilearn/datasets/func.py,sha256=wUbxQHQD02j7ItcCPYwzz9yWNqEE9xxIOvjB5VgkXrY,63814 nilearn/datasets/struct.py,sha256=ZjIy3eaxsiOaWI5--oTKSyA9x81wwvchCRxfybazgok,16526 nilearn/datasets/utils.py,sha256=kKmUYNpJ_fUW_Mj_8c_M4jNuXcXpj1tO-nrqFD-cRZc,27749 nilearn/datasets/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 nilearn/datasets/data/avg152T1_brain.nii.gz,sha256=CcklrRZd--a0G1codtZ-m7-MXiZRfngX-bQ6glYif1M,414185 nilearn/datasets/data/confounds_with_header.csv,sha256=9R4MfTYsnNE_w6r3UVV2qK6vaH65He_WpC5FhkL1B2I,9299 nilearn/datasets/data/dosenbach_2010.csv,sha256=PP69OqqwTQyaaRzRGmrndLBKISn50xT1xDfq7w4fhh4,5802 nilearn/datasets/data/power_2011.csv,sha256=oFKkIx1yc0ikT4ulXmZjy4vu3E9bBBWzVjvrgvFvlWQ,3579 nilearn/datasets/description/ABIDE_pcp.rst,sha256=YirlE-4jsResnp1_NMQOeHAYF1-4_y8xJhB2TU1YTWA,1161 nilearn/datasets/description/Megatrawls.rst,sha256=pEmYzEQHnL5ELtm8YZaU74iFaOwOWVng2V5IrO5PX2A,2854 nilearn/datasets/description/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 nilearn/datasets/description/aal_SPM12.rst,sha256=njli9lacPVa3pqvxD4xLxZaWHl0GSMFcoAEP_BCQC10,1263 nilearn/datasets/description/adhd.rst,sha256=D4TWRaf3mUkX5NdpbC4WLDSFi0-eLXZRWzJLbbOQMjE,872 nilearn/datasets/description/basc_multiscale_2015.rst,sha256=6-Y2i9d5BBwU2evlneq5KbKTJYZrgsrptxI0G7CvMzI,7291 nilearn/datasets/description/brainomics_localizer.rst,sha256=cT7tZzRKQVteEa4k6vaXfLyKYMjDTYEZSRwLTYcWkmc,908 nilearn/datasets/description/cobre.rst,sha256=_R6HLo63MHOFa4vdTkTYVBc4ArKtL4AijnPhkY3CknE,6602 nilearn/datasets/description/craddock_2012.rst,sha256=PEnLB6oxORptfe07eovsOMGL7e59gLLmDut3wuugEnw,1382 nilearn/datasets/description/dosenbach_2010.rst,sha256=pEUoJnl0CogVdbhF44k2k-_6NSl2J-jXbaOwdbtxW4c,533 nilearn/datasets/description/haxby2001.rst,sha256=cWX2ONfcUkP2dgkh_KOeQj-YcojmhnLrJT5Sa9nzGlM,1447 nilearn/datasets/description/icbm152_2009.rst,sha256=g2rDYbcgcsP4Rig4ilnp6pw185kzWWNXklzUKP6e9pA,1609 nilearn/datasets/description/miyawaki2008.rst,sha256=6d5XHSrbC2gNcYNaVFWZWwqW9KxxqZI51yH9wGGN0tQ,1022 nilearn/datasets/description/msdl_atlas.rst,sha256=vAOGTCtXwmYT-gCthnw5L25_3l95P6VUB61EWKbvthE,966 nilearn/datasets/description/nyu_rest.rst,sha256=2TG1IlNXGWfD-fDFuUyZ2CZdD1OLq9xWPOZhc_5zAto,1014 nilearn/datasets/description/oasis1.rst,sha256=utGcrD7pDETqVuE8CdhrspmJ5n5s_uVg2qINimw2wIg,1177 nilearn/datasets/description/power_2011.rst,sha256=iJL9CX7_RH3VTqKmqO8zjDrQrrQPnFQ16XYp6AKmblI,266 nilearn/datasets/description/smith_2009.rst,sha256=iUkB8Y4_bYj7lLytOvBGJSZrdA5Ki44AQ29UoMKRawo,1473 nilearn/datasets/description/yeo_2011.rst,sha256=S50FF4vfmU6JTyGllIpJLd-EMNDtve64KIxijwYBSBA,1240 nilearn/datasets/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 nilearn/datasets/tests/test_atlas.py,sha256=lhYNJlSBizbcEKEYuuNGnATeu5QGTmRgEusrOP1Ej-Y,12951 nilearn/datasets/tests/test_func.py,sha256=X0VuvM44U4u2Gtf2rfkZdvIkstryKADKdMNUY0cPLLE,24896 nilearn/datasets/tests/test_struct.py,sha256=Q5jsFa3MoLGOiKlS7Zi4cj8yPH_L_yUyKFYhJYro1CE,6467 nilearn/datasets/tests/test_utils.py,sha256=PPgkSZ4PhxH7p_SvaY1qXeneNptdoSM_FyoHbrYF1ys,12666 nilearn/datasets/tests/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 nilearn/datasets/tests/data/__init__.pyc,sha256=vBUFsiiORtq0hYReJy9g3Po7U2DL2bWPqB-sgvSeMpg,156 nilearn/datasets/tests/data/confounds_with_header.csv,sha256=9R4MfTYsnNE_w6r3UVV2qK6vaH65He_WpC5FhkL1B2I,9299 nilearn/datasets/tests/data/mmap.dat,sha256=ehLlYTYzhenf7qsyY2hzHAMO1LN05_WJesgZFZ0ohMU,400 nilearn/datasets/tests/data/pymvpa-exampledata.tar.bz2,sha256=ueG1v_2BzX4s_EA56OwrNAZXw2iOgW65f6BBXA8tz1M,582 nilearn/datasets/tests/data/spm_confounds.txt,sha256=XXWmQHLkMTF6fu3nE24Czp4s2fVvL_Zd4pPVob0PZAk,1940 nilearn/datasets/tests/data/test.mgz,sha256=7g0JAi-jDcoMFpWadpiTFBEi_owEsZcI4iZFdtK3puk,680 nilearn/decoding/__init__.py,sha256=N1SasCuVmzxgesbHPAITOkGscm4-lIgDP8YzAMffi_Q,208 nilearn/decoding/fista.py,sha256=pPZwEZWhl4YYZ5r4gHl9XfAC4cBNWzuKpHuzk1O_0m4,8443 nilearn/decoding/objective_functions.py,sha256=OBc9NJ6WLA8SKFt4fpXwk0MXk6OtnQYhHFHL_6gHrBk,7768 nilearn/decoding/proximal_operators.py,sha256=Ec3eWk93ZwCZLe4yzFou-VmfZ7sd1s8UMNi2VCecdkk,10824 nilearn/decoding/searchlight.py,sha256=RacZ-mcfOg7hSUZ2gF2ISx1Ax_R_X2wi3R_sbtQUAi8,10780 nilearn/decoding/space_net.py,sha256=AFmsp3Bu1nrutU0wIxfjdamV7QNPc4z9WP4JCUMy-f0,50943 nilearn/decoding/space_net_solvers.py,sha256=BRRYaVG7wVqfnk98_vkTBmnoxRcgntax49s6PL9VrMs,17369 nilearn/decoding/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 nilearn/decoding/tests/simulate_graph_net_data.py,sha256=jnaDBrz9GfLnpAX6O2d3sk1aO7Vyi6j_muc9lQOx280,1611 nilearn/decoding/tests/test_fista.py,sha256=FgI3-i0-oK7reTMrE8yRdCQPD6NhvB62s-ObkPOKgDE,2740 nilearn/decoding/tests/test_graph_net.py,sha256=sstUyBsTeAAV7l4tgAafK8sOj8A23hXRtVtCkIahIDI,9857 nilearn/decoding/tests/test_objective_functions.py,sha256=sM_4lg4QQQgWYXy7eGmPFFO2jtiuGSsOp1CRITDK7wE,3088 nilearn/decoding/tests/test_operators.py,sha256=jWSprth73OQdnCYq5QcltvTYtnza5V2M6YcIx_Gx1Pc,1577 nilearn/decoding/tests/test_same_api.py,sha256=ufn_bkC64PDETQvX1Oo1LlZombebYySxwugxPrq3fkU,7959 nilearn/decoding/tests/test_searchlight.py,sha256=AL9wgPQ12JeMttyV3oCKGyUTwdsjOPbXNvcfgsTIsOo,2267 nilearn/decoding/tests/test_sklearn_compatibility.py,sha256=thGrIVie395dWb7qudWyQwnt2Y3ZPKXc0Vfn-Li203E,1101 nilearn/decoding/tests/test_space_net.py,sha256=-1uGVGNiLmHrHPpfcFWz_D4G1G1c5hW-0Yt37R6emrs,13556 nilearn/decoding/tests/test_tv.py,sha256=mYKA6xWQ2PYvflTiA5GEjKpAJMbBRI6AdqFk_KE7sfI,1411 nilearn/decomposition/__init__.py,sha256=CshFWduVwN-QtQc-irUqL1e3_olWrZwdtSR671CFNdg,219 nilearn/decomposition/base.py,sha256=OStcdOAmWWSBQjgLnt2NfVmpwo-0_KYcrz8MZwnJtS8,18036 nilearn/decomposition/canica.py,sha256=pBUCgFi9SLcPcFzqgaIigkJTye1TKJSG9-7DsQhIWBw,6917 nilearn/decomposition/dict_learning.py,sha256=3DKf6-u9pkIGf5hrDbzeJZ_rGqN_IwRzlce751HagQE,10494 nilearn/decomposition/multi_pca.py,sha256=QVZLoAx2cMuuo7o-wGahFNzmtRx-c0txF6u1nemLcuo,6921 nilearn/decomposition/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 nilearn/decomposition/tests/test_base.py,sha256=mlxfrXRN_G0iijPpRROUWwIVD9PSPePBp1UHnme9q68,4124 nilearn/decomposition/tests/test_canica.py,sha256=H8twRZcyNzoSmopWA4Bj_jiiXC4tjLBRkZGfCPu_hh4,4875 nilearn/decomposition/tests/test_dict_learning.py,sha256=3dDq1y8LgF3WDkxTWbBAWOoi4b-p228_vSDoFCTrEcY,3158 nilearn/decomposition/tests/test_multi_pca.py,sha256=69WP-0KOoJkhg0YlsggAjtCEhLEexLVV7iEGuGLv5mQ,4105 nilearn/image/__init__.py,sha256=PT8nV9FXodX1a_n55wxmw-uQahgl9Rqi_6kAdC1I7-k,851 nilearn/image/image.py,sha256=qx2_BM-dXPnSKJHENslerwonpI1Ch_52oMqitkIFTb0,29461 nilearn/image/resampling.py,sha256=BT8iBzLtJJESW9H6GKL2vaSpCzqlCKxLaema3Fn-i6M,22589 nilearn/image/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 nilearn/image/tests/test_image.py,sha256=CaPhp-FUyXaNJSf4sLlOa71-QFL5AAk9j9Q1Vk-sc04,19576 nilearn/image/tests/test_resampling.py,sha256=R_jTaOuFWs6K0bwteo1zjePcmYPzmX-buldoBeX25B8,22372 nilearn/image/tests/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 nilearn/image/tests/data/test.mgz,sha256=7g0JAi-jDcoMFpWadpiTFBEi_owEsZcI4iZFdtK3puk,680 nilearn/input_data/__init__.py,sha256=wDvYcS-H7oLfxtRUy8O9cDy0gaFRZaJFAE4xVX52B1o,481 nilearn/input_data/base_masker.py,sha256=GPq4oURBqL6iRX7b6LQ1TvlKLahdylWkE8Gz5IVa4kQ,8570 nilearn/input_data/masker_validation.py,sha256=GMBnE_kcwjcA5KBAi1jCjvZNpoyfTqWF7kewhM3g86g,3302 nilearn/input_data/multi_nifti_masker.py,sha256=Xzu-EzhbLtOPYZVS1j0yPrOZYms4UyOt8VD5T2UnmNE,12153 nilearn/input_data/nifti_labels_masker.py,sha256=cxNfY4Pt2DG424vpi-SScPWT-JdJ132HJtpW7aYrzn8,11120 nilearn/input_data/nifti_maps_masker.py,sha256=AOx95I2wfWhkQKGjVRDMDQ6KOl2eIEpJo3RgP1qEZ8s,13129 nilearn/input_data/nifti_masker.py,sha256=sfiqqSOtvQGIbpSHCF_jauBVrfUyqehDbbCqkucohVY,11549 nilearn/input_data/nifti_spheres_masker.py,sha256=qTkS-U6O8WA3WIc4fZRpSkzLVQ6PMnHozMiiOGbn9G0,11852 nilearn/input_data/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 nilearn/input_data/tests/test_base_masker.py,sha256=KWIGDTyH-dNwQPw99g6S5_DEHAs4IMBQP3umT4dpEWc,1677 nilearn/input_data/tests/test_masker_validation.py,sha256=AlSQbtfZBp13ZioMiCNrxB_f_JZ4fxe899VWRwSDDV0,3023 nilearn/input_data/tests/test_multi_nifti_masker.py,sha256=oENkwMXkK6pYfSp9xVDHEwLS56npjt3xxqqpXRjll4Y,4619 nilearn/input_data/tests/test_nifti_labels_masker.py,sha256=WzCOwxgtgg_MJgUAOeD3rLmUVzinh_8dozCSgNNF-H4,8825 nilearn/input_data/tests/test_nifti_maps_masker.py,sha256=85Dds24KpmUy13qEuTyhJiL_G4acTXaUs0adryFn0yg,11120 nilearn/input_data/tests/test_nifti_masker.py,sha256=FWhkqS4SQMYlqgHdwwfYGe4ux4hyIEVVBEfXxOMBHK0,10896 nilearn/input_data/tests/test_nifti_spheres_masker.py,sha256=YDYAItg4k3ByuBoRWdxFnS0CH92_eaU8beriIQN2iuw,4372 nilearn/mass_univariate/__init__.py,sha256=MAI804K94bPJfxNdt-_T50Uc4evleCJlcew3x4qz_f0,170 nilearn/mass_univariate/permuted_least_squares.py,sha256=M-Z2dA-o6vjvRHvXDRGZHKlPFB0XT77nQ8mvb8ekvMY,19442 nilearn/mass_univariate/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 nilearn/mass_univariate/tests/test_permuted_least_squares.py,sha256=MwwPfCUMcbB0OIZJkoXR-nvgpY155QTXApbYVWLFGrQ,23016 nilearn/plotting/__init__.py,sha256=7a4z_46Ns_Z_qt0gwnOEQEptOJIDAhHFILimfNMAUQs,1891 nilearn/plotting/cm.py,sha256=SbFD1-OXqliLmsBzH4U4nIHhZh2_bVZC_y4wo2-J7Jg,10086 nilearn/plotting/displays.py,sha256=Kb9EDT5UMhpmov_inU91BmIoiMUrMiNvl-fzwvp2rRk,54073 nilearn/plotting/edge_detect.py,sha256=NSaMbLpE5Y1aeVYF478Q2p2PdDPlOsFgbLzmX-ArYSI,4639 nilearn/plotting/find_cuts.py,sha256=CJqLm0_BNxXdrwTp9kBw9MlcCy1Szwe8en494384kCk,11029 nilearn/plotting/glass_brain.py,sha256=yA9u9WlsqF9ow9v7zGOFmGhcxc_7ksV8qLeblI4HyDE,6002 nilearn/plotting/img_plotting.py,sha256=p0gdpGvKmd5wpLeZ8bnkNbZ1UfCk4MYjzcEEhWWcK20,54865 nilearn/plotting/glass_brain_files/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 nilearn/plotting/glass_brain_files/brain_schematics_front.json,sha256=xSVwjdIS8ysWSF7opg7SV5T8vMbt0c_7WteroGF-sgA,138283 nilearn/plotting/glass_brain_files/brain_schematics_side.json,sha256=db2ZBPuyarBHPywNlBTV_5tv4NV6cP-YyjOvyejzFyw,68135 nilearn/plotting/glass_brain_files/brain_schematics_top.json,sha256=Nqpi_ktT_LmS_Yu1fM3Pc27LkUNlzM8VtvRIz1LzuW4,155623 nilearn/plotting/glass_brain_files/plot_align_svg.py,sha256=DbTh6LBvyU6udoMVcA4vJYaGP28OAK4rBJi87v-GjRo,2204 nilearn/plotting/glass_brain_files/svg_to_json_converter.py,sha256=voT5zuOsiUjt4hISi59gXhIYQ6OAL5mqaTLstgp4EQs,4680 nilearn/plotting/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 nilearn/plotting/tests/test_cm.py,sha256=V9oq-3V3r1c9bkjmLFwVaAWC3eK2hEIfvnzk_rs1QjQ,647 nilearn/plotting/tests/test_displays.py,sha256=3fnIza7JGBHR6lmbMhP1nqxNy-Vek_d4pF0sQN6xoew,1859 nilearn/plotting/tests/test_edge_detect.py,sha256=Ljdmk9hR92lvdPoOiP-CdZwsxPL1GYZPw0l71upAm-s,481 nilearn/plotting/tests/test_find_cuts.py,sha256=jtzIyNQTEAY3t2SqZtzNAW3mXIq3FZzq4qR0NcUyK8I,6214 nilearn/plotting/tests/test_img_plotting.py,sha256=G7HgH_SdYRu4BJP9V87fdHp90EvY4NIu7eYFXsdnA6A,29722 nilearn/regions/__init__.py,sha256=oqckUfX03ynxDbTD1LgNCoJTACkyxWaydORKsIgYPzg,493 nilearn/regions/region_extractor.py,sha256=qwgIwEMjW7uqPfRzmfa1cWxevzejg7yGUM4xGx7Rjys,14752 nilearn/regions/signal_extraction.py,sha256=iCMsh0gWezhzMrV_Pi4fevUH_OwRLPKaVo4yeZbmdIs,14226 nilearn/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 nilearn/tests/test_cache_mixin.py,sha256=CTz-QhoeUhJHa06i1L_EKn4JbG9mfxfY02HV4oyf-mI,5261 nilearn/tests/test_class_inspect.py,sha256=7Etl4iyFXihxc3lYq9S-aCmxZ3bsBYIt4QxD-7KFrGs,1801 nilearn/tests/test_extmath.py,sha256=lxzn3XS-bNcT5IDTkeHXkZbgGGeBb4Hx4wE7_OoZ0FQ,1553 nilearn/tests/test_logger.py,sha256=2Cha1OLU6OeiIU7BplRPKtIslZrjcqwExB8ETAbNbps,2354 nilearn/tests/test_masking.py,sha256=wgEVgA__tNGcLqsxfQ12ONjJj_WbXmNqhaDhq0nfLMs,14796 nilearn/tests/test_ndimage.py,sha256=WIYU1jh2kMLm0ZCl-pFnYu1bE7yPnzTl5Nnp7mB9pNw,1791 nilearn/tests/test_niimg.py,sha256=m3HywXE1ArQu6vci3qE7VsE0moF43ViUHV8B0WRXZxI,968 nilearn/tests/test_niimg_conversions.py,sha256=kRCDuHC5c6ZEekBmR601GImha3pa050xtSheSC_0CgI,18560 nilearn/tests/test_numpy_conversions.py,sha256=Vk00IxVPf2Aa4zIewJ5kHAMXwZALrzPxGC0n2EirN9Y,8795 nilearn/tests/test_param_validation.py,sha256=9W3DkEniurvNXZ0b9ZQcFdL8NEPu-C00Vbp0qkAUeko,2142 nilearn/tests/test_segmentation.py,sha256=2x_tR1TVSdUqOHrG0nUhov0N1D5Jy08D9EV48LD6CcQ,2301 nilearn/tests/test_signal.py,sha256=G02kOfGXTRM3VlBqS27wDUQvZK1jxP4cxGdojzG4Oz4,17370 nilearn/tests/test_testing.py,sha256=PXQzG9oZCDlh__mQ19D9M6tkNP9S0c8vua9pEuzfXiw,2424 nilearn/tests/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 nilearn/tests/data/__init__.pyc,sha256=jKJufSZh--KlW8z_9Kw8IgAEwshBbqqGTioQPmQmVP8,147 nilearn/tests/data/confounds_with_header.csv,sha256=9R4MfTYsnNE_w6r3UVV2qK6vaH65He_WpC5FhkL1B2I,9299 nilearn/tests/data/mmap.dat,sha256=ehLlYTYzhenf7qsyY2hzHAMO1LN05_WJesgZFZ0ohMU,400 nilearn/tests/data/pymvpa-exampledata.tar.bz2,sha256=ueG1v_2BzX4s_EA56OwrNAZXw2iOgW65f6BBXA8tz1M,582 nilearn/tests/data/spm_confounds.txt,sha256=XXWmQHLkMTF6fu3nE24Czp4s2fVvL_Zd4pPVob0PZAk,1940 nilearn-0.2.5.dist-info/DESCRIPTION.rst,sha256=8JWCd24HrJFZZy7RlPjgD4rr-DT1idNmRsMk2t5G4rU,2116 nilearn-0.2.5.dist-info/METADATA,sha256=DhLQisjY2JncYcaNQBG9rfjaPiLPhTXCY0kPd3alumM,3146 nilearn-0.2.5.dist-info/RECORD,, nilearn-0.2.5.dist-info/WHEEL,sha256=GrqQvamwgBV4nLoJe0vhYRSWzWsx7xjlt74FT0SWYfE,110 nilearn-0.2.5.dist-info/metadata.json,sha256=u2NmJOJE0viCzGvtVsN3mCo4lAdbcS1y5Nte7so6SlA,1130 nilearn-0.2.5.dist-info/top_level.txt,sha256=PVJEwQK7DHvlmivTJeqY-C9uAIs-L1s4Gdr6Ne1x1_w,8 PKlmHPUC C nilearn/__init__.pyPKlmHr3EEt nilearn/signal.pyPKknH${)  EPnilearn/version.pyPKpHR4hh^nilearn/masking.pyPKH$'%f'f'pnilearn/plotting/cm.pyPKH}>cc nilearn/plotting/__init__.pyPKHxcA++nilearn/plotting/find_cuts.pyPKHrr!nilearn/plotting/glass_brain.pyPKlmHI]9nilearn/plotting/edge_detect.pyPKpHΤ99Lnilearn/plotting/displays.pyPKpH!QQ unilearn/plotting/img_plotting.pyPKH.nilearn/plotting/glass_brain_files/__init__.pyPKH7QHH;Pnilearn/plotting/glass_brain_files/svg_to_json_converter.pyPKHJm4nilearn/plotting/glass_brain_files/plot_align_svg.pyPKHl++>nilearn/plotting/glass_brain_files/brain_schematics_front.jsonPKH]' ' =f.nilearn/plotting/glass_brain_files/brain_schematics_side.jsonPKHE__<8nilearn/plotting/glass_brain_files/brain_schematics_top.jsonPKH")nilearn/plotting/tests/__init__.pyPKHnD!inilearn/plotting/tests/test_cm.pyPKHFF(/nilearn/plotting/tests/test_find_cuts.pyPKH/-CC'nilearn/plotting/tests/test_displays.pyPKH$*Cnilearn/plotting/tests/test_edge_detect.pyPKH:tt+lnilearn/plotting/tests/test_img_plotting.pyPKHԊxx2 nilearn/_utils/compat.pyPKH}  }8 nilearn/_utils/ndimage.pyPKH@z z "H nilearn/_utils/param_validation.pyPKHNzR nilearn/_utils/__init__.pyPKH"2--T nilearn/_utils/segmentation.pyPKH{$" *'*'v nilearn/_utils/cache_mixin.pyPKlmHU~_ ۩ nilearn/_utils/class_inspect.pyPKlmH! nilearn/_utils/extmath.pyPKlmHJ{   nilearn/_utils/logger.pyPKlmH]]#Q nilearn/_utils/numpy_conversions.pyPKlmHL+o_o_ nilearn/_utils/testing.pyPK oHFhKKC nilearn/_utils/exceptions.pyPKoH=yL nilearn/_utils/niimg.pyPK3oHF EE#^ nilearn/_utils/niimg_conversions.pyPKH M:::56 nilearn/_utils/fixes/sklearn_f_regression_nosparse.pyPKH3aDD,ë nilearn/_utils/fixes/matplotlib_backports.pyPKHъ= Q nilearn/_utils/fixes/__init__.pyPKHFKK1O nilearn/mass_univariate/permuted_least_squares.pyPKHL8# nilearn/mass_univariate/__init__.pyPKH){ nilearn/mass_univariate/tests/__init__.pyPKH(]YY< nilearn/mass_univariate/tests/test_permuted_least_squares.pyPKH[ nilearn/tests/__init__.pyPKH鸚c  #;[ nilearn/tests/test_class_inspect.pyPKHu72 2 b nilearn/tests/test_logger.pyPKH#/["["'k nilearn/tests/test_numpy_conversions.pyPKH nilearn/tests/test_extmath.pyPKHo}Yݔ nilearn/tests/test_ndimage.pyPKHL nilearn/tests/test_niimg.pyPKH#eT^^& nilearn/tests/test_param_validation.pyPKHBO" nilearn/tests/test_segmentation.pyPKHJx x  nilearn/tests/test_testing.pyPKH#U! nilearn/tests/test_cache_mixin.pyPKH99v nilearn/tests/test_masking.pyPKH5/BHH'} nilearn/tests/test_niimg_conversions.pyPKlmH^CCBS nilearn/tests/test_signal.pyPKHV nilearn/tests/data/__init__.pyPKHMFF- nilearn/tests/data/pymvpa-exampledata.tar.bz2PKH "F# nilearn/tests/data/mmap.datPKHQS$S$, nilearn/tests/data/confounds_with_header.csvPKH4I$ nilearn/tests/data/spm_confounds.txtPKHgP:_ nilearn/tests/data/__init__.pycPKHnCC%/ nilearn/decoding/space_net_solvers.pyPKHZK nilearn/decoding/__init__.pyPKH" U nilearn/decoding/fista.pyPKHF=XX'/ nilearn/decoding/objective_functions.pyPKHH*H*&$N nilearn/decoding/proximal_operators.pyPKDoH`[V**x nilearn/decoding/searchlight.pyPKQoHߩ# nilearn/decoding/space_net.pyPKH"Cjnilearn/decoding/tests/__init__.pyPKHapKK1jnilearn/decoding/tests/simulate_graph_net_data.pyPKHh $qnilearn/decoding/tests/test_fista.pyPKH>&&(|nilearn/decoding/tests/test_graph_net.pyPKHɌ  2ڢnilearn/decoding/tests/test_objective_functions.pyPKHs%(':nilearn/decoding/tests/test_same_api.pyPKHȶ*nilearn/decoding/tests/test_searchlight.pyPKHZMM4nilearn/decoding/tests/test_sklearn_compatibility.pyPKH3!Xnilearn/decoding/tests/test_tv.pyPKHn˝))(nilearn/decoding/tests/test_operators.pyPKHo44(nilearn/decoding/tests/test_space_net.pyPKHnc<nilearn/input_data/__init__.pyPKH0^( 'nilearn/input_data/masker_validation.pyPKIpHCm4z!z!! -nilearn/input_data/base_masker.pyPKYpHQy/y/(Nnilearn/input_data/multi_nifti_masker.pyPKhpH"Up+p+)~nilearn/input_data/nifti_labels_masker.pyPKupHN^I3I3':nilearn/input_data/nifti_maps_masker.pyPKpH(]--"nilearn/input_data/nifti_masker.pyPKpH,L.L.*% nilearn/input_data/nifti_spheres_masker.pyPKH$9nilearn/input_data/tests/__init__.pyPKHB ԍ,9nilearn/input_data/tests/test_base_masker.pyPKHZ6$ 2@nilearn/input_data/tests/test_masker_validation.pyPKH=I 5Lnilearn/input_data/tests/test_nifti_spheres_masker.pyPKH;  3X^nilearn/input_data/tests/test_multi_nifti_masker.pyPKH+Vy"y"4pnilearn/input_data/tests/test_nifti_labels_masker.pyPKHp+p+2nilearn/input_data/tests/test_nifti_maps_masker.pyPKH5 **-?nilearn/input_data/tests/test_nifti_masker.pyPKH!nilearn/decomposition/__init__.pyPKH4nilearn/decomposition/canica.pyPKgoHZ?tFtFvnilearn/decomposition/base.pyPKpoH]((&%Mnilearn/decomposition/dict_learning.pyPKzoH  "gvnilearn/decomposition/multi_pca.pyPKH'nilearn/decomposition/tests/__init__.pyPKH  -nilearn/decomposition/tests/test_multi_pca.pyPKH@(Inilearn/decomposition/tests/test_base.pyPKH~jV  *nilearn/decomposition/tests/test_canica.pyPKH%53V V 1nilearn/decomposition/tests/test_dict_learning.pyPKHlSSnilearn/image/__init__.pyPK!pH[E ss-nilearn/image/image.pyPK9pHb3=X=XvInilearn/image/resampling.pyPKHnilearn/image/tests/__init__.pyPKH'ndWdW&)nilearn/image/tests/test_resampling.pyPKH@LxLxL!nilearn/image/tests/test_image.pyPKH$Fnilearn/image/tests/data/__init__.pyPKHA8!Fnilearn/image/tests/data/test.mgzPKHjԼFFInilearn/datasets/func.pyPKHS`ˎ@@-Cnilearn/datasets/struct.pyPKlmHsUllnilearn/datasets/atlas.pyPKmHG!nilearn/datasets/__init__.pyPKmH&\elel nilearn/datasets/utils.pyPKH!enilearn/datasets/data/__init__.pyPKHЈQQ+enilearn/datasets/data/avg152T1_brain.nii.gzPKHQS$S$/nilearn/datasets/data/confounds_with_header.csvPKH@ $nilearn/datasets/data/power_2011.csvPKHz֖(nilearn/datasets/data/dosenbach_2010.csvPKH(nilearn/datasets/description/__init__.pyPKHObމ*)nilearn/datasets/description/ABIDE_pcp.rstPKHQ>s*nilearn/datasets/description/aal_SPM12.rstPKH7\hh%1 nilearn/datasets/description/adhd.rstPKH 5nilearn/datasets/description/brainomics_localizer.rstPKHkŷff.nilearn/datasets/description/craddock_2012.rstPKHJnilearn/datasets/description/yeo_2011.rstPKHWo& & +Cnilearn/datasets/description/Megatrawls.rstPKH{{5NOnilearn/datasets/description/basc_multiscale_2015.rstPKHb&lnilearn/datasets/description/cobre.rstPKH/*nilearn/datasets/description/dosenbach_2010.rstPKH"nilearn/datasets/tests/__init__.pyPKHL5(z1z1$̈nilearn/datasets/tests/test_utils.pyPKH y22$nilearn/datasets/tests/test_atlas.pyPKHr@a@a#anilearn/datasets/tests/test_func.pyPKHy2CC%Nnilearn/datasets/tests/test_struct.pyPKH'hhnilearn/datasets/tests/data/__init__.pyPKHQS$S$5hnilearn/datasets/tests/data/confounds_with_header.csvPKH "F$Snilearn/datasets/tests/data/mmap.datPKHMFF6%nilearn/datasets/tests/data/pymvpa-exampledata.tar.bz2PKH4I-nilearn/datasets/tests/data/spm_confounds.txtPKHA8$nilearn/datasets/tests/data/test.mgzPKH](nilearn/datasets/tests/data/__init__.pycPKH+`jnilearn/connectome/__init__.pyPKHX33+nilearn/connectome/connectivity_matrices.pyPKlmHO.ZOJJ&nilearn/connectome/group_sparse_cov.pyPKH$Kvnilearn/connectome/tests/__init__.pyPKH)  1vnilearn/connectome/tests/test_group_sparse_cov.pyPKHV::6nilearn/connectome/tests/test_connectivity_matrices.pyPKH2b"nilearn/regions/__init__.pyPKHB'99#Hnilearn/regions/region_extractor.pyPKqH 77$)nilearn/regions/signal_extraction.pyPKH 9 DD'0nilearn-0.2.5.dist-info/DESCRIPTION.rstPKH jj%9nilearn-0.2.5.dist-info/metadata.jsonPKH_%3>nilearn-0.2.5.dist-info/top_level.txtPKHndnn~>nilearn-0.2.5.dist-info/WHEELPKH!uJ J '?nilearn-0.2.5.dist-info/METADATAPKHGB??Knilearn-0.2.5.dist-info/RECORDPK7