PKCZ4K֭ }}pirt/README.md# pirt This is the root of the package PIRT. - pirt.apps: small apps related to deformations. Serve as nice examples, and can also be used for user interaction. - pirt.interp: the code related to interpolation, agnostic about deformations. - pirt.splinegrid: implements B-spline grids. - pirt.deform: classes to work with deformations. - pirt.reg: the registration algorithms. PK9ssL7pirt/__init__.py# flake8: noqa """ Pirt - Python Image Registration Toolkit """ __version__ = '2.1.0' # Check compat import sys if sys.version_info < (3, 4): raise RuntimeError('Pirt requires at least Python 3.4') # Imports from ._utils import Aarray, PointSet, Parameters from .gaussfun import (gaussiankernel, gfilter, gfilter2, diffusionkernel, diffuse, diffuse2) from .pyramid import ScaleSpacePyramid from .interp import (get_cubic_spline_coefs, meshgrid, warp, project, awarp, aproject, deform_backward, deform_forward, resize, imresize, zoom, imzoom, make_samples_absolute, SliceInVolume) from .splinegrid import GridInterface, SplineGrid, GridContainer, FieldDescription, FD from .deform import (Deformation, DeformationIdentity, DeformationGrid, DeformationField, DeformationGridForward, DeformationFieldForward, DeformationGridBackward, DeformationFieldBackward) from .reg import * # Some utils need to be imported to use them from .randomdeformations import RandomDeformations, create_random_deformation # from . import experiment # from . import deformvis # from . import testing # Clean up del sys PKk4K;V@V@pirt/_utils.py""" Small utility classes. """ import re import numpy as np try: # pragma: no cover from collections import OrderedDict as _dict # noqa except ImportError: _dict = dict # Vebdored code, copied as module from .new_pointset import PointSet def isidentifier(s): # http://stackoverflow.com/questions/2544972/ if not isinstance(s, str): return False return re.match(r'^\w+$', s, re.UNICODE) and re.match(r'^[0-9]', s) is None # Copied with changes from Pyzo/zon class Parameters(_dict): """ A dict in which the items can be get/set as attributes. """ __reserved_names__ = dir(_dict()) # Also from OrderedDict __pure_names__ = dir(dict()) __slots__ = [] def __repr__(self): identifier_items = [] nonidentifier_items = [] for key, val in self.items(): if isidentifier(key): identifier_items.append('%s=%r' % (key, val)) else: nonidentifier_items.append('(%r, %r)' % (key, val)) if nonidentifier_items: return 'Parameters([%s], %s)' % (', '.join(nonidentifier_items), ', '.join(identifier_items)) else: return 'Parameters(%s)' % (', '.join(identifier_items)) def __str__(self): # Get alignment value c = 0 for key in self: c = max(c, len(key)) # How many chars left (to print on less than 80 lines) charsLeft = 79 - (c+6) s = '<%i parameters>\n' % len(self) for key in self.keys(): valuestr = repr(self[key]) if len(valuestr) > charsLeft: valuestr = valuestr[:charsLeft-3] + '...' s += key.rjust(c+4) + ": %s\n" % (valuestr) return s def __getattribute__(self, key): try: return object.__getattribute__(self, key) except AttributeError: if key in self: return self[key] else: raise def __setattr__(self, key, val): if key in self.__class__.__reserved_names__: # Either let OrderedDict do its work, or disallow if key not in self.__class__.__pure_names__: return _dict.__setattr__(self, key, val) else: raise AttributeError('Reserved name, this key can only ' + 'be set via ``d[%r] = X``' % key) else: # if isinstance(val, dict): val = Dict(val) -> no, makes a copy! self[key] = val def __dir__(self): names = [k for k in self.keys() if isidentifier(k)] return self.__class__.__reserved_names__ + names # Vendored class from Visvis class Aarray(np.ndarray): """ Aarray(shape_or_array, sampling=None, origin=None, fill=None, dtype='float32', **kwargs) Anisotropic array; inherits from numpy.ndarray and adds a sampling and origin property which gives the sample distance and offset for each dimension. Parameters ---------- shape_or_array : shape-tuple or numpy.ndarray Specifies the shape of the produced array. If an array instance is given, the returned Aarray is a view of the same data (i.e. no data is copied). sampling : tuple of ndim elements Specifies the sample distance (i.e. spacing between elements) for each dimension. Default is all ones. origin : tuple of ndim elements Specifies the world coordinate at the first element for each dimension. Default is all zeros. fill : scalar (optional) If given, and the first argument is not an existing array, fills the array with this given value. dtype : any valid numpy data type The type of the data All extra arguments are fed to the constructor of numpy.ndarray. Implemented properties and methods ----------------------------------- * sampling - The distance between samples as a tuple * origin - The origin of the data as a tuple * get_start() - Get the origin of the data as a Point instance * get_end() - Get the end of the data as a Point instance * get_size() - Get the size of the data as a Point instance * sample() - Sample the value at the given point * point_to_index() - Given a poin, returns the index in the array * index_to_point() - Given an index, returns the world coordinate Slicing ------- This class is aware of slicing. This means that when obtaining a part of the data (for exampled 'data[10:20,::2]'), the origin and sampling of the resulting array are set appropriately. When applying mathematical opertaions to the data, or applying functions that do not change the shape of the data, the sampling and origin are copied to the new array. If a function does change the shape of the data, the sampling are set to all zeros and ones for the origin and sampling, respectively. World coordinates vs tuples --------------------------- World coordinates are expressed as Point instances (except for the "origin" property). Indices as well as the "sampling" and "origin" attributes are expressed as tuples in z,y,x order. """ _is_Aarray = True def __new__(cls, shapeOrArray, sampling=None, origin=None, fill=None, dtype='float32', **kwargs): if isinstance(shapeOrArray, np.ndarray): shape = shapeOrArray.shape ob = shapeOrArray.view(cls) if sampling is None and hasattr(shapeOrArray, 'sampling'): sampling = shapeOrArray.sampling if origin is None and hasattr(shapeOrArray, 'origin'): origin = shapeOrArray.origin else: shape = shapeOrArray ob = np.ndarray.__new__(cls, shape, dtype=dtype, **kwargs) if fill is not None: ob.fill(fill) # init sampling and origin ob._sampling = tuple( [1.0 for i in ob.shape] ) ob._origin = tuple( [0.0 for i in ob.shape] ) # set them if sampling: ob.sampling = sampling if origin: ob.origin = origin # return return ob def __array_finalize__(self, ob): """ So the sampling and origin is maintained when doing calculations with the array. """ #if hasattr(ob, '_sampling') and hasattr(ob, '_origin'): if isinstance(ob, Aarray): if self.shape == ob.shape: # Copy sampling and origin for math operation self._sampling = tuple( [i for i in ob._sampling] ) self._origin = tuple( [i for i in ob._origin] ) else: # Don't bother (__getitem__ will set them after this) # Other functions that change the shape cannot be trusted. self._sampling = tuple( [1.0 for i in self.shape] ) self._origin = tuple( [0.0 for i in self.shape] ) elif isinstance(self, Aarray): # This is an Aarray, but we do not know where it came from self._sampling = tuple( [1.0 for i in self.shape] ) self._origin = tuple( [0.0 for i in self.shape] ) def __getslice__(self, i, j): # Called only when indexing first dimension and without a step # Call base getitem method ob = np.ndarray.__getslice__(self, i, j) # Perform sampling and origin corrections sampling, origin = self._correct_sampling(slice(i,j)) ob.sampling = sampling ob.origin = origin # Done return ob def __getitem__(self, index): # Call base getitem method ob = np.ndarray.__getitem__(self, index) if isinstance(index, np.ndarray): # Masked or arbitrary indices; sampling and origin irrelevant ob = np.asarray(ob) elif isinstance(ob, Aarray): # If not a scalar, perform sampling and origin corrections # This means there is only a very small performance penalty sampling, origin = self._correct_sampling(index) if sampling: ob.sampling = sampling ob.origin = origin # Return return ob def __array_wrap__(self, out, context=None): """ So that we return a native numpy array (or scalar) when a reducting ufunc is applied (such as sum(), std(), etc.) """ if not out.shape: return out.dtype.type(out) # Scalar elif out.shape != self.shape: return out.view(type=np.ndarray) else: return out # Type Aarray def _correct_sampling(self, index): """ _correct_sampling(index) Get the new sampling and origin when slicing. """ # Init origin and sampling _origin = self._origin _sampling = self._sampling origin = [] sampling = [] # Get index always as a tuple and complete index2 = [None]*len(self._sampling) if not isinstance(index, (list,tuple)): index2[0] = index else: try: for i in range(len(index)): index2[i] = index[i] except Exception: index2[0] = index # Process for i in range(len(index2)): ind = index2[i] if isinstance(ind, slice): #print(ind.start, ind.step) if ind.start is None: origin.append( _origin[i] ) else: origin.append( _origin[i] + ind.start*_sampling[i] ) if ind.step is None: sampling.append(_sampling[i]) else: sampling.append(_sampling[i]*ind.step) elif ind is None: origin.append( _origin[i] ) sampling.append(_sampling[i]) else: pass # singleton dimension that pops out # Return return sampling, origin def _set_sampling(self,sampling): if not isinstance(sampling, (list,tuple)): raise ValueError("Sampling must be specified as a tuple or list.") if len(sampling) != len(self.shape): raise ValueError("Sampling given must match shape.") for i in sampling: if i <= 0: raise ValueError("Sampling elements must be larger than zero.") # set tmp = [float(i) for i in sampling] self._sampling = tuple(tmp) def _get_sampling(self): l1, l2 = len(self._sampling), len(self.shape) if l1 < l2: tmp = list(self._sampling) tmp.extend( [1 for i in range(l2-l1)] ) return tuple( tmp ) elif l1 > l2: tmp = [self._sampling[i] for i in range(l2)] return tuple(tmp) else: return self._sampling sampling = property(_get_sampling, _set_sampling, None, "A tuple with the sample distance for each dimension.") def _set_origin(self,origin): if not isinstance(origin, (list,tuple)): raise ValueError("Origin must be specified as a tuple or list.") if len(origin) != len(self.shape): raise ValueError("Origin given must match shape.") # set tmp = [float(i) for i in origin] self._origin = tuple(tmp) def _get_origin(self): l1, l2 = len(self._origin), len(self.shape) if l1 < l2: tmp = list(self._origin) tmp.extend( [0 for i in range(l2-l1)] ) return tuple( tmp ) elif l1 > l2: tmp = [self._origin[i] for i in range(l2)] return tuple(tmp) else: return self._origin origin = property(_get_origin, _set_origin, None, "A tuple with the origin for each dimension.") def point_to_index(self, point, non_on_index_error=False): """ point_to_index(point, non_on_index_error=False) Given a point returns the sample index (z,y,x,..) closest to the given point. Returns a tuple with as many elements as there are dimensions. If the point is outside the array an IndexError is raised by default, and None is returned when non_on_index_error == True. """ point = tuple(point.flat) # check if len(point) != len(self.shape): raise ValueError("Given point must match the number of dimensions.") # calculate indices ii = [] for i in range(len(point)): s = self.shape[i] p = ( point[-(i+1)] - self._origin[i] ) / self._sampling[i] p = int(p+0.5) if p<0 or p>=s: ii = None break ii.append(p) # return if ii is None and non_on_index_error: return None elif ii is None: raise IndexError("Sample position out of range: %s" % str(point)) else: return tuple(ii) def sample(self, point, default=None): """ sample(point, default=None) Take a sample of the array, given the given point in world-coordinates, i.e. transformed using sampling. By default raises an IndexError if the point is not inside the array, and returns the value of "default" if it is given. """ tmp = self.point_to_index(point,True) if tmp is None: if default is None: ps = str(point) raise IndexError("Sample position out of range: %s" % ps) else: return default return self[tmp] def index_to_point(self, *index): """ index_to_point(*index) Given a multidimensional index, get the corresponding point in world coordinates. """ # check if len(index)==1: index = index[0] if not hasattr(index,'__len__'): index = [index] if len(index) != len(self.shape): raise IndexError("Invalid number of indices.") # init point as list pp = [] # convert for i in range(len(self.shape)): ii = index[i] if ii<0: ii = self.shape[i] - ii p = ii * self._sampling[i] + self._origin[i] pp.append(p) # return pp.reverse() return PointSet(pp) def get_size(self): """ get_size() Get the size (as a vector) of the array expressed in world coordinates. """ pp = [] for i in range(len(self.shape)): pp.append( self._sampling[i] * self.shape[i] ) pp.reverse() return PointSet(pp) def get_start(self): """ get_start() Get the origin of the array expressed in world coordinates. Differs from the property 'origin' in that this method returns a point rather than indices z,y,x. """ pp = [i for i in self.origin] pp.reverse() return PointSet(pp) def get_end(self): """ get_end() Get the end of the array expressed in world coordinates. """ pp = [] for i in range(len(self.shape)): pp.append( self._origin[i] + self._sampling[i] * self.shape[i] ) pp.reverse() return PointSet(pp) PKc4K ^1^1pirt/deformvis.py""" deformVis module Module for making textures and meshes move using a deformation field. Visualization based on visvis. """ import visvis as vv import numpy as np ## The GLSL code SH_MV_DEFORM = vv.shaders.ShaderCodePart('deform', 'mesh', """ >>--uniforms-- uniform vec2 scaleBiasDeform; uniform sampler3D deformation; uniform vec3 deformOrigin; uniform vec3 deformSampling; uniform vec3 deformShape; //--uniforms-- >>vec4 vertex = vec4(gl_Vertex); vec4 vertex = vec4(gl_Vertex); //vec3 loc = vertex.xyz / deformSampling - deformOrigin; vec3 loc = (vertex.xyz - deformOrigin) / (deformSampling * deformShape); vec3 dloc = texture3D( deformation, loc ).xyz; dloc = ( dloc - scaleBiasDeform[1] ) / scaleBiasDeform[0]; vertex.xyz += dloc; """) SH_3F_DEFORM = vv.shaders.ShaderCodePart('deform', '3D', """ >>--uniforms-- uniform vec2 scaleBiasDeform; uniform sampler3D deformation; //--uniforms-- >>--in-loop-- // loc should be scale-biassed too :/ vec3 dloc = texture3D( deformation, loc ).xyz; dloc = ( dloc - scaleBiasDeform[1] ) / scaleBiasDeform[0]; dloc /= extent; loc = loc + dloc; //--in-loop-- >>--post-loop-- //--post-loop-- // Color based on deformation //vec3 loc2 = edgeLoc + float(iter_depth) * ray; //vec3 dloc2 = texture3D( deformation, loc2 ).xyz; //dloc2 = ( dloc2 - scaleBiasDeform[1] ); //gl_FragColor.rg *= (1.0-length(dloc2)); """) ## The classes class DeformTexture(vv.textures.TextureObjectToVisualize): """ DeformTexture(deforms) Texture to manage a deformation that can be applied to a texture or mesh. """ def __init__(self, deform): ndim = deform.shape[-1] vv.textures.TextureObjectToVisualize.__init__(self, ndim, deform, True) # Interpolate deformation ofcourse! self._interpolate = True # Store self.SetData(deform) def SetData(self, data): # Overload to reset climRef # Set self._climRef minmax = vv.textures.minmax self._climRef.Set(*minmax(data)) # Store shape, origin, sampling of the deform field self._deform_shape = data.shape if hasattr(data, 'origin'): self._deform_origin = data.origin else: self._deform_origin = [0.0 for s in data.shape] if hasattr(data, 'sampling'): self._deform_sampling = data.sampling else: self._deform_sampling = [1.0 for s in data.shape] # Update data vv.textures.TextureObjectToVisualize.SetData(self, data) def _ScaleBias_get(self): """ Given clim, get scale and bias to apply in shader. In this case, we want to map [0 1] to the full range, expressed in world coordinates. In the shader, we use the extent (in world units) to convert to texture coords. Data to OpenGL: texData = data*scale + bias In shader: data_val = (texData-bias) / scale """ # Code as in _ScaleBias_init ran = self._climRef.range if ran==0: ran = 1.0 scale = 1.0 / ran # no need for data-type-correction bias = -self._climRef.min / ran # Done return scale, bias class DeformableMixin(vv.MotionMixin): """ DeformableMixin Base class to mix with a Wobject class to make it deformable. """ def __init__(self): vv.MotionMixin.__init__(self) # Init deform stuff self._deforms = [] self._deformTexture = None self._motionAmplitude = 1.0 def _GetMotionCount(self): return len(self._deforms) def _SetMotionIndex(self, index, ii, ww): # Prepare N = self.motionCount amp = self._motionAmplitude #print(index, ii, ww) # Interpolate multiple deforms if N == 0: deform = None elif N==1: deform = amp * self._deforms[0] else: # t0 = time.time() deform = None for i, w in zip(ii, ww): if w != 0.0: if deform is None: deform = (w*amp) * self._deforms[i] else: deform += (w*amp) * self._deforms[i] # print 'deform composition took', time.time()-t0, 's' # Update deform texture (fast update because shape is the same) if deform is not None: self._deformTexture.SetData(deform) def SetDeforms(self, *deforms): """ SetDeforms(*deforms) Set deformation arrays for this wobject. Each given argument represents one deformation. Each deformation consists either of a tuple of arrays (one array for each dimension) or a single array where the last dimension is ndim elements. Call without arguments to remove all deformations. If this wobject is a texture, it is assumed that the deformations exactly match with it, in a way that the outer pixels of the texture match with the outer pixels of the deformatio. The resolution does not have to be the same; it can often be lower because deformations are in general quite smooth. Smaller deformation arrows result in higher FPS. If this wobject is not a texture, the given deformations represent a deformation somewhere in 3D space. One should use the vv.Aarray or pirt.Aarray class to store the arrays. The exact location is then specified by the origin and sampling properties. """ # Get ndim ndim = self._ndim # Quick check if not deforms or (len(deforms)==1 and deforms[0] is None): self._deforms = [] # Init deform props origin, sampling = None, None # Check deforms and make each deform a single array deforms2 = [] for deform in deforms: if isinstance(deform, np.ndarray): if deform.ndim == ndim+1 and deform.shape[-1] == ndim: new_deform = deform shape = deform.shape[:-1] if hasattr(deform, 'sampling'): sampling = deform.sampling[:-1] if hasattr(deform, 'origin'): origin = deform.origin[:-1] else: raise ValueError('Number of dimensions for deform is incorrect.') #elif isinstance(deform, (tuple, list)): elif hasattr(deform, '__len__'): if len(deform) != ndim: raise ValueError('Number of arrays for deform is incorrect.') # Build single texture new_shape = deform[0].shape+(ndim,) new_deform = np.zeros(new_shape, 'float32') for i in range(ndim): if ndim==1: new_deform[:,i] = deform[i] elif ndim==2: new_deform[:,:,i] = deform[i] elif ndim==3: new_deform[:,:,:,i] = deform[i] else: raise ValueError('DeformTexture only supports 1D, 2D and 3D.') # shape = deform[0].shape if hasattr(deform[0], 'sampling'): sampling = deform[0].sampling if hasattr(deform[0], 'origin'): origin = deform[0].origin else: raise ValueError('Deforms must be tuple or array.') deforms2.append(new_deform) # Store deformations self._deforms = deforms2 # Pick first and create deform texture if self._deforms: self._deformTexture = DeformTexture(self._deforms[0]) else: self._deformTexture = None # To update shader self._UpdateDeformShaderAfterSetDeforms(origin, sampling, shape) def _UpdateDeformShaderAfterSetDeforms(self, origin, sampling, shape): pass @vv.misc.Property def motionAmplitude(): """ Get/set the relative amplitude of the deformation (default 1). Note that values higher than 1 can cause foldings in the deformation. Also note that because the deformation is backward mapping, changing the amplitude introduces errors in the deformation. """ def fget(self): return self._motionAmplitude def fset(self, value): self._motionAmplitude = float(value) self.motionIndex = self.motionIndex return locals() class DeformableTexture3D(vv.Texture3D, DeformableMixin): """ DeformableTexture3D(parent, data) This class represents a 3D texture that can be deformed using a 3D deformation field, i.e. a vector field that specifies the displacement of the texture. This deformation field can (and probably should) be of lower resolution than the texture. By supplying multiple deformation fields (via SetDeforms()), the texture becomes a moving texture subject to the given deformations. Note that the motion is interpolated. The deformations should be backward mapping. Note that this means that interpolation between the deformations and increasing the amplitude will yield not the exact expected deformations. """ def __init__(self, *args, **kwargs): vv.Texture3D.__init__(self, *args, **kwargs) DeformableMixin.__init__(self) self._ndim = 3 def _UpdateDeformShaderAfterSetDeforms(self, origin, sampling, shape): if self._deformTexture is None: self.shader.vertex.Remove(SH_3F_DEFORM) self.Draw() return # Ignore origin and sampling; it is assumed that the deformation # exactly matches with the texture self.shader.fragment.AddOrReplace(SH_3F_DEFORM, before='renderstyle') self.shader.SetStaticUniform('deformation', self._deformTexture) self.shader.SetStaticUniform('scaleBiasDeform', self._deformTexture._ScaleBias_get) self.Draw() class DeformableMesh(vv.Mesh, DeformableMixin): """ DeformableMesh(parent, vertices, faces=None, normals=None, values=None, verticesPerFace=3) This class represents a mesh that can be deformed using a 3D deformation field, i.e. a vector field that specifies the displacement of a region of the 3D space. By supplying multiple deformation fields (via SetDeforms()), the mesh becomes a moving mesh subject to the given deformations. Note that the motion is interpolated. The deformations should be forward mapping; interpolation and changing the amplitude can be done safely. However, the normals are always the same, so for extreme deformations the lighting might become incorrect. """ def __init__(self, *args, **kwargs): vv.Mesh.__init__(self, *args, **kwargs) DeformableMixin.__init__(self) self._ndim = 3 def _UpdateDeformShaderAfterSetDeforms(self, origin, sampling, shape): if self._deformTexture is None: for shader in [self.faceShader, self.shapeShader]: shader.vertex.Remove(SH_MV_DEFORM) self.Draw() return # Reverse three props (i.e. make x-y-z) and make floats if None in [origin, sampling, shape]: raise ValueError('Need origin and sampling to determine location of the deformation.') origin = [float(i) for i in reversed(origin)] sampling = [float(i) for i in reversed(sampling)] shape = [float(i) for i in reversed(shape)] for shader in [self.faceShader, self.shapeShader]: shader.vertex.AddOrReplace(SH_MV_DEFORM) shader.SetStaticUniform('deformation', self._deformTexture) shader.SetStaticUniform('scaleBiasDeform', self._deformTexture._ScaleBias_get) shader.SetStaticUniform('deformOrigin', origin) shader.SetStaticUniform('deformSampling', sampling) shader.SetStaticUniform('deformShape', shape) self.Draw() PKa_4KfTTpirt/experiment.py""" Module experiment. Provides a means to perform experiments (not limited to registration). This module provides two classes: * Database - A generic way to store data using ssdf. * Experiment - A base class that represents an experiment. """ # todo: move to pyzolib? or somewhere else? import os import hashlib import numpy as np from visvis import ssdf class Database: """ Database(fname=None) Database for storing objects using keys. These keys can be strings (restricted similar to Python variable names) or ssdf structs. When a filename is given, the database is loaded from that file (if it exists) and save() will save the database to this file. Objects to store should be classes natively supported by SSDF, or SSDF-compatible classes (i.e. classes that implement the __to_ssdf__ and __from_ssdf__ methods). See the documentation of SSDF for more information. """ def __init__(self, fname=None): # Store filename self._fname = fname # Load if fname and os.path.exists(fname): self._db = ssdf.load(fname) else: self._db = ssdf.new() # Key of last added entry self._lastkey = '' @property def db(self): """ Get the underlying ssdf struct. """ return self._db def set(self, key, object, save_now=True): """ set(key, object, save_now=True) Add an object to the database, using the given key. If save_now is True, will save the database to disk now. """ # Correct key key = self._key2key(key) #print 'set', key # Test object if ssdf.isstruct(object): pass if isinstance(object, (int, float, str, np.ndarray, tuple, list)): pass # Default ssdf compatible elif ssdf.is_compatible_class(object): pass # Compatible else: raise ValueError('The given object is not ssdf compatible.') # Store last key self._lastkey = key # Store object in db self._db[key] = object # Save? if save_now and self._fname: ssdf.save(self._fname, self._db) def get(self, key): """ get(key) Get the stored object using the given key. Returns None if no object is stored under the given key. """ # Correct key key = self._key2key(key) #print 'get', key # Get object try: return self._db[key] except KeyError: return None def save(self, fname=None): """ save(fname=None) Save the results to disk now. If fname is not given, the last used fname is used. If there is no fname available, will raise a runtime error. """ # Use given or stored fname? if fname is None: fname = self._fname else: self._fname = fname # Check if there is an fname available if fname is None: raise RuntimeError('No known filename to write to.') # Save ssdf.save(fname, self._db) def _key2key(self, key): """ _key2key(key) Get real key from given key. """ # Hash key? if isinstance(key, str): key = key.replace(' ', '_') else: key = self._hasher(key) return key def _hasher(self, object): """ _hasher(object) Create a hash string from the given object. This object can be a string, dict or ssdf struct. """ # Init h = hashlib.sha256() # Hash if isinstance(object, str): h.update(object) elif ssdf.isstruct(object): h.update(ssdf.saves(object)) elif isinstance(object, dict): for key in params: s = key + '=' + str(params[key]) h.update(s) else: raise ValueError('I do not know how to hash this object.') # Done return '_' + h.hexdigest() NAME_SERIES_PARAMS = '_series_params' NAME_SERIES_RESULTS = '_series_results' class Experiment: """ Experiment(params, database=None) Base class to perform experiments. This class enables users to do experiments. It uses the concept of series to enable sweeping different parameters. It can use a database to store and reuse experiment results. This class promotes the separation between the experiment and its quantitative analysis. The get_result() method, can be used to obtain the raw experiment results, which can then be processed before displaying them. Series ------ When doing an experiment, one deals with statistics. Therefore an experiment should often be performed multiple times. Further, when testing an algorithm, one often wants to see how a specific parameter of the algorithm affects the performance. Therefore, one often wants to run a series of experiments. Sometimes one wants to run a series of series of experiments, and sometimes, one even wants to run a series of series of series of experiments ... This class uses the notion of series. Each series has a number, starting from zero. Series 0 simply means that the experiment is performed multiple times (while varying a parameter). Series 1 means that series 0 is performed multiple times (while varying another parameter). Series 2 means that series 1 is performed multiple times, etc. The number of series is limited by the maximum recursion depth of Python. In pseudocode: {{{ for parameter_changes in series2: for parameter_changes in series1: for parameter_changes in series0: experiment() }}} Parameters ---------- This class uses an ssdf struct to store the experiment parameters. Which parameter is changed in a series, and what values that parameter should take in that series, can be set using set_series_params(). Buffering --------- This class can make use of a Database instance. When set, it will store all results produced by the experiment() method. This way, when an experiment is repeated with the same parameters, the experiment results can be obtained from the database. The database's save() method is called at the end of each series 1. Note that in order to use a database, the results produced by experiment() must be SSDF compatible, and if the result is a custom class, this class needs to be registered to SSDF. Usage ----- To use this class, subclass it and implement the following methods: * experiment() - This method accepts one argument (a params struct) and should return a result object. * quantify_results() - To process the results, making them ready for presentation (using get_result()). * show_results() - Show the results (using quantift_results()). The quantify_results() and show_results() are not required to run the experiments, but function as a guide to process the experiment results. To run a single experiment with the given parameters, use do_experiment(). To do a series of experiments, use do_experiment_series(). There is an example at the bottom of the file (experiment.py) that defines this class. """ def __init__(self, params, database=None): # Store parameters self._params = params # Init one result self._one_result = None # Series params are stored as NAME_SERIES_PARAMS_x # Series results are stored as NAME_SERIES_RESULTS_x # Attach database self._database = database self._database_overwrite = False self._save_next_result = False def set_database(self, database= None): """ set_database(database) Set the database to use to buffer experiment results. Call without arguments to disable using a database. Special database attrinutes --------------------------- The attribute _database_overwrite can be used to overwrite the database (for example when you changed your algoritm). The attribute _save_next_result can be set to False in experiment() to signal that the result that it produces should not be stored. This variable is reset each time before experiment() is called. """ self._database = database @property def params(self): """ Get the original parameters as given during initialization. """ return self._params def _get_list_member(self, name, series_nr, clear=False): """ _get_list_member(name, series_nr, clear=False) Return a list instance that is a member of this class, corresponding with the given name and series_nr. This method enables ad hoc creation of members as needed (since we do not know how many levels the user wants). """ # Get full name full_name = '%s_%i' % (name, series_nr) # Create list and return if clear: self.__dict__[full_name] = tmp = [] else: tmp = self.__dict__[full_name] return tmp def set_series_params(self, series_nr, param, values): """ set_series_params(seriesNr, param, values) Set which parameter to vary for the given series, and what values should be iterated. """ # Test arguments if not isinstance(series_nr, int) or series_nr < 0: raise ValueError('series_nr must be an integer >= zero.') if not isinstance(param, str): raise ValueError('param must be a parameter name (as a string).') if isinstance(values, np.ndarray): values = [val for val in values] if not isinstance(values, (list, tuple)): raise ValueError('values must be a list or tuple.') # Get list (resets the parameter list) L = self._get_list_member(NAME_SERIES_PARAMS, series_nr, clear=True) # Set if None not in [param, values]: L.append(param) L.append(values) def get_series_params(self, series_nr): """ get_series_params(seriesNr) Get which parameter is varied for the given series, and what values are iterated. Returns a tuple (param, values). Raises a RuntimeError if no series params have been defined for the given series_nr. """ # Test arguments if not isinstance(series_nr, int) or series_nr < 0: raise ValueError('series_nr must be an integer >= zero.') # Get list try: L = self._get_list_member(NAME_SERIES_PARAMS, series_nr) except KeyError: raise RuntimeError( 'Series params have not been set for series %i.' % series_nr) # Return return L[0], L[1] def do_experiment_series(self, series_nr, params=None): """ do_experiment_series(series_nr) Perform a series of experiments. Returns a list of results (which may contain lists of results, etc.). The results can also be accesed using the get_result() method. """ # Get params (copy) if params is None: params = self.params params = ssdf.copy(params) # Get series parameter param, values = self.get_series_params(series_nr) # Init results series_results = self._get_list_member(NAME_SERIES_RESULTS, series_nr, True) for value in values: # Print info tmp = '=' * 2 * (series_nr+1) rv = repr(value) print('%s SERIES %i: %s = %s %s' % (tmp, series_nr, param, rv, tmp)) # Set value for this iteration params[param] = value # Perform experiment, or another series of experiments if series_nr == 0: result = self.do_experiment(params) else: result = self.do_experiment_series(series_nr-1, params) # Store result series_results.append(result) # Store database if series_nr is 1 if self._database and self._database._fname and series_nr==1: self._database.save() # Done return series_results def do_experiment(self, params=None): """ do_experiment() Perform a single experiment. The resulting result is stored returned. The result can also be accesed using the get_result() method. """ # Get params (copy when calling experiment) if params is None: params = self.params # Try getting result from database result = None if self._database and not self._database_overwrite: result = self._database.get(params) # Get result by running experiment if result: print('\b' + ' (result in db)') else: # Perform experiment self._save_next_result = True result = self.experiment(ssdf.copy(params)) # Check if result is None: raise RuntimeError('experiment should not return None.') # Store in db if self._database and self._save_next_result: self._database.set(params, result, False) # Store result and return self._one_result = result return result def get_result(self, *args): """ get_result(series0_index, series1_index, series2_index, ...) Get the result (as returned by the overloaded experiment()) for the given series indices. If not arguments are given, returns the last result. If one index is given, returns the result corresponding to the last series 0. etc. None can be given for up to one series index, in which case a list of results is returned corresponding to the range of values given in set_series_params(). """ # The last result if len(args) == 0: return self._one_result # The length of the args determines the series_nr series_nr = len(args) - 1 # Get result list try: L0 = self._get_list_member(NAME_SERIES_RESULTS, series_nr) except KeyError: raise ValueError('More indices supplied than series available.') # Get amount of None's none_count = list(args).count(None) if none_count == 0: # Return single result object L = L0 for arg in reversed(args): if isinstance(arg, int): L = L[arg] else: raise ValueError( 'get_experiment_result only accepts integers.') result = L elif none_count == 1: # Return list of result objects. Find objects one by one. # Init result = [] i = -1 while i>-2: i += 1 # Find scalar value, while filling in an index where it is None L = L0 for arg in reversed(args): if arg is None: if i >= len(L): i = -9 break arg = i if isinstance(arg, int): L = L[arg] else: raise ValueError( 'get_experiment_result only accepts integers.') else: result.append(L) else: # Not possible raise ValueError('Can only use one None value.') # Return result (which should now be an object as returned by experiment) return result def experiment(self, params): """ experiment(params) This is the method that needs to be implemented in order for the experiment to do what you want. """ raise NotImplemented("This method needs to be implemented.") def quantify_results(self): """ quantify_results() Implement this method to perform post-processing, to quantify the results. This might not be necessary in all situations. Hint ---- When implementing this method, use get_result() to obtain results and return them in a way that allows easy representation. """ raise NotImplemented("This method needs to be implemented.") def show_results(self): """ show_results() Implement this method to show the results. Hint ---- When implementing this method, use quantify_results() to obtain results ready for displaying and then dispay the result as appropriate. """ raise NotImplemented("This method needs to be implemented.") if __name__ == '__main__': # Test experiment # Let's say we want to estimate for which value of x some magic process # is maximum. We simulate this process using noise. By using a seed, # the experiments are repeatable. import time np.random.seed(1234) params = ssdf.new() params.noise_level = 3 params.x = 3 class TestExperiment(Experiment): """ In this experiment we will use series 0 to repeat the process (using different instantiations of the noise). We will use series 1 to vary the parameter x of which we want to know the optimum value. """ def experiment(self, params): """ experiment(params) Our magic box. Given params.x, it calculates a value. In a real experiment, this calculation may be unknown to us. """ t0 = time.time() noise = np.random.normal(0, params.noise_level) value = - 1 * params.x**2 + 10 * params.x + noise t1 = time.time() # Return value and elapsed time to calculate return float(value), t1-t0 def quantify_results(self, what): """ quantify_results(what) Make the results ready for presentation. Basically, we select either the value or the time from the raw results, and we average the results. """ # Get params for different series param_x, values_x = self.get_series_params(1) param_iter, values_iter = self.get_series_params(0) # Set result index if what=='value': result_index = 0 elif what=='time': result_index = 1 else: raise ValueError('Not sure what you want to quantify.') # Collect results for different x (by averaging over multiple iters) results = [] for i in range(len(values_x)): result = 0 for j in range(len(values_iter)): raw = self.get_result(j, i) result += raw[result_index] results.append( float(result)/len(values_iter) ) # Average # Done return results def show_results(self, what='value'): """ show_results() Show the results. """ # Get params for x param_x, values_x = self.get_series_params(1) # Get results results = self.quantify_results(what) # Plot or show try: import visvis as vv vv.figure(); vv.plot(values_x, results) vv.xlabel(param_x); vv.ylabel('values') except ImportError: print('param', param_x, ': values') for x, r in zip(values_x, results): print(x, ':', r) # Create experiment db = Database() exp = TestExperiment(params, db) # Set series exp.set_series_params(0, 'noise_iter', range(100, 110)) exp.set_series_params(1, 'x', range(10)) # Run experiment and show results exp.do_experiment_series(1) exp.show_results() PKRp4Kd77pirt/fitting.pyimport numpy as np import scipy.linalg from pirt import PointSet if True: # Precalculate a matrix for fitting a quadratic polynom to three points # to find the subpixel maximum value in an 1D image. # a x x + b x + c _precalculated_A1 = np.matrix(" 1 -1 1; 0 0 1; 1 1 1") # quadratic 3 points 1D _precalculated_Ai1 = scipy.linalg.pinv(_precalculated_A1) # Precalculate a matrix for fitting a quadratic polynom to five points # to find the subpixel maximum value in an 2D image. Note that this is the # same as fitting two times a 1D polynom. # a x x + b y y + c x + d y + e = value tmp = '0 1 0 -1 1 ;' # x = 0, y = -1 tmp += '1 0 -1 0 1 ;' # x = -1, y = 0 tmp += '0 0 0 0 1 ;' # x = 0, y = 0 tmp += '1 0 1 0 1 ;' # x = 1, y = 0 tmp += '0 1 0 1 1 ' # x = 0, y = 1 _precalculated_A2 = np.matrix(tmp) _precalculated_Ai2 = scipy.linalg.pinv(_precalculated_A2) del tmp def fit_lq1(pp): """ fit_lq1(points) -> t_max, [a,b,c] Fit quadratic polynom to three points in 1D. If more than three points are given, the result is the least squares solution. points can be a 2D ndarray object, resulting in a general solution. If only 3 values are given (as list of numpy array), the point locations are assumed at t=(-1,0,1). """ if isinstance(pp, np.ndarray) and pp.ndim == 2: # Arbitraty position of values. Apply general approach # Prepare A A = PointSet(3) for x in pp[:,0]: A.append(x**2, x, 1) Ai = scipy.linalg.pinv(np.matrix(A)) # Prepare B B = np.matrix(pp[:,1]) # Solve X = Ai * B.T # Find extreme P = X.A.ravel().tolist() x = -0.5 * P[1]/P[0] # Done return x, P else: # Values defined at fixed positions (-1,0,1) # Make suitable form multiplication B = np.matrix(pp[0:3]).transpose() # Solve X = _precalculated_Ai1 * B # Find extreme P = X.A.ravel().tolist() x = -0.5 * P[1]/P[0] # done return x, P def fit_lq2(patch, sample=False): """ fit_lq2(patch) --> x_max, y_max Quadratic (least squares) 2d fitting and subsequent finding of real max. Patch is a matrix of the 9 pixels around the extreme. Uses the centre and its 4 direct neighours, as specified in patch. fit_lq2(patch, True) will also return a 300x300 image illustrating the surface in the 3x3 area. """ # Get measurements at the points in A patch = patch.ravel() I = [1,3,4,5,7] # to correspond with the order of the eqs above... B = np.matrix(patch[I]).transpose() # Calculate X, which is [a,b,c,d,e] X = _precalculated_Ai2 * B # Make array, such that ravel works, and convert to list P = np.array(X).ravel().tolist() # Make tuple, so we can unpack! a,b,c,d,e = tuple(P) # Differentiating the equation to x: x = -0.5*c/a y = -0.5*d/b # Produce a sample of the fitted surface if sample: vals = np.zeros((300,300)) for ix in range(300): for iy in range(300): xx = ix/100.0-1.5 yy = iy/100.0-1.5 vals[iy,ix] = a*xx*xx + b*yy*yy + c*xx + d*yy + e #print a*x*x + b*y*y + c*x + d*y + e return x,y, vals else: return x,y PKf4KlZ88pirt/gaussfun.py""" The gaussfun module implements functions for diffusion and Gaussian derivatives for data of any dimension. Contents of this module: * gaussiankernel - Create a Gaussian kernel * gaussiankernel2 - Create a 2D Gaussian kernel * diffusionkernel - Create a discrete analog to the Gaussian kernel * gfilter - Filter data with Gaussian (derivative) kernels * diffuse - Filter data with true discrete diffusion kernels * gfilter2 - Filter data by specifying sigma in world coordinates * diffuse2 - Diffuse data by specifying sigma in world coordinates """ # SHORTED VERSION WITHOUT PYRAMID STUFF import numpy as np import scipy.ndimage from . import Aarray ## Kernels def _gaussiankernel(sigma, order, t): """ _gaussiankernel(sigma, order, t) Calculate a Gaussian kernel of the given sigma and with the given order, using the given t-values. """ # if sigma 0, kernel is a single 1. if sigma==0: return np.array([1.0]) # precalculate some stuff sigma2 = sigma**2 sqrt2 = np.sqrt(2) # Calculate the gaussian, it is unnormalized. We'll normalize at the end. basegauss = np.exp(- t**2 / (2*sigma2) ) # Scale the t-vector, what we actually do is H( t/(sigma*sqrt2) ), # where H() is the Hermite polynomial. x = t / (sigma*sqrt2) # Depending on the order, calculate the Hermite polynomial (physicists # notation). We let Mathematica calculate these, and put the first 20 # orders in here. 20 orders should be sufficient for most tasks :) if order<0: raise Exception("The order should not be negative!") elif order==0: part = 1 elif order==1: part = 2*x elif order==2: part = -2 + 4*x**2 elif order==3: part = -12*x + 8*x**3 elif order==4: part = 12 - 48*x**2 + 16*x**4 elif order==5: part = 120*x - 160*x**3 + 32*x**5 elif order==6: part = -120 + 720*x**2 - 480*x**4 + 64*x**6 elif order==7: part = -1680*x + 3360*x**3 - 1344*x**5 + 128*x**7 elif order==8: part = 1680 - 13440*x**2 + 13440*x**4 - 3584*x**6 + 256*x**8 elif order==9: part = 30240*x - 80640*x**3 + 48384*x**5 - 9216*x**7 + 512*x**9 elif order==10: part = (-30240 + 302400*x**2 - 403200*x**4 + 161280*x**6 - 23040*x**8 + 1024*x**10) elif order==11: part = (-665280*x + 2217600*x**3 - 1774080*x**5 + 506880*x**7 - 56320*x**9 + 2048*x**11) elif order==12: part = (665280 - 7983360*x**2 + 13305600*x**4 - 7096320*x**6 + 1520640*x**8 - 135168*x**10 + 4096*x**12) elif order==13: part = (17297280*x - 69189120*x**3 + 69189120*x**5 - 26357760*x**7 + 4392960*x**9 - 319488*x**11 + 8192*x**13) elif order==14: part = (-17297280 + 242161920*x**2 - 484323840*x**4 + 322882560*x**6 - 92252160*x**8 + 12300288*x**10 - 745472*x**12 + 16384*x**14) elif order==15: part = (-518918400*x + 2421619200*x**3 - 2905943040*x**5 + 1383782400*x**7 - 307507200*x**9 + 33546240*x**11 - 1720320*x**13 + 32768*x**15) elif order==16: part = (518918400 - 8302694400*x**2 + 19372953600*x**4 - 15498362880*x**6 + 5535129600*x**8 - 984023040*x**10 + 89456640*x**12 - 3932160*x**14 + 65536*x**16) else: raise Exception("This order is not implemented!") # Apply Hermite polynomial to gauss k = (-1)**order * part * basegauss ## Normalize # By calculating the normalization factor by integrating the gauss, rather # than using the expression 1/(sigma*sqrt(2pi)), we know that the KERNEL # volume is 1 when the order is 0. norm_default = 1 / basegauss.sum() # == 1 / ( sigma * sqrt(2*pi) ) # Here's another normalization term that we need because we use the Hermite # polynomials. norm_hermite = 1/(sigma*sqrt2)**order # A note on Gaussian derivatives: as sigma increases, the resulting # image (when smoothed) will have smaller intensities. To correct for # this (if this is necessary) a diffusion normalization term can be # applied: sigma**2 # Normalize and return return k * ( norm_default * norm_hermite ) def gaussiankernel(sigma, order=0, N=None, returnt=False, warn=True): """ gaussiankernel(sigma, order=0, N=None, returnt=False, warn=True) Creates a 1D gaussian derivative kernel with the given sigma and the given order. (An order of 0 is a "regular" Gaussian.) The returned kernel is a column vector, thus working in the first dimension (in images, this often is y). The returned kernel is odd by default. Using N one can specify the full kernel size (if not int, the ceil operator is applied). By specifying a negative value for N, the tail length (number of elements on both sides of the center element) can be specified. The total kernel size than becomes ceil(-N)*2+1. Though the method to supply it may be a bit obscure, this measure can be handy, since the tail length if often related to the sigma. If not given, the optimal N is determined automatically, depending on sigma and order. If the given scale is a small for the given order, a warning is produced (unless warn==True). ----- Used Literature: Koenderink, J. J. The structure of images. Biological Cybernetics 50, 5 (1984), 363-370. Lindeberg, T. Scale-space for discrete signals. IEEE Transactions on Pattern Analysis and Machine Intelligence 12, 3 (1990), 234-254. Ter Haar Romeny, B. M., Niessen, W. J., Wilting, J., and Florack, L. M. J. Differential structure of images: Accuracy of representation. In First IEEE International Conference on Image Processing, (Austin, TX) (1994). """ # Check inputs if not N: # Calculate ratio that is small, but large enough to prevent errors ratio = 3 + 0.25 * order - 2.5/((order-6)**2+(order-9)**2) # Calculate N N = int( np.ceil( ratio*sigma ) ) * 2 + 1 elif N > 0: if not isinstance(N, int): N = int( np.ceil(N) ) elif N < 0: N = -N if not isinstance(N, int): N = int( np.ceil(N) ) N = N * 2 + 1 # Check whether given sigma is large enough sigmaMin = 0.5 + order**(0.62) / 5 if warn and sigma < sigmaMin: print('WARNING: The scale (sigma) is very small for the given order, ' 'better use a larger scale!') # Create t vector which indicates the x-position t = np.arange(-N/2.0+0.5, N/2.0, 1.0, dtype=np.float64) # Get kernel k = _gaussiankernel(sigma, order, t) # Done if returnt: return k, t else: return k def gaussiankernel2(sigma, ox, oy, N=None): """ gaussiankernel2(sigma, ox, oy, N=-3*sigma) Create a 2D Gaussian kernel. """ # Default N if N is None: N = -3*sigma # Calculate kernels k1 = gaussiankernel(sigma, ox, N) k2 = gaussiankernel(sigma, oy, N) # Matrix multiply k = np.matrix(k1).T * np.matrix(k2) return k.A def diffusionkernel(sigma, N=4, returnt=False): """ diffusionkernel(sigma, N=4, returnt=False) A discrete analog to the continuous Gaussian kernel, as proposed by Toni Lindeberg. N is the tail length factor (relative to sigma). """ # Make sure sigma is float sigma = float(sigma) # Often refered to as the scale parameter, or t sigma2 = sigma*sigma # Where we start, from which we go backwards # This is also the tail length if N > 0: nstart = int(np.ceil(N*sigma)) + 1 else: nstart = abs(N) + 1 # Allocate kernel and times t = np.arange(-nstart, nstart+1, dtype='float64') k = np.zeros_like(t) # Make a start n = nstart # center (t[nstart]==0) k[n+nstart] = 0 n = n-1 k[n+nstart] = 0.01 # Iterate! for n in range(nstart-1,0,-1): # Calculate previous k[(n-1)+nstart] = 2*n/sigma2 * k[n+nstart] + k[(n+1)+nstart] # The part at the left can be erroneous, so let's use the right part only k[:nstart] = np.flipud(k[-nstart:]) # Remove the tail, which is zero k = k[1:-1] t = t[1:-1] # Normalize k = k / k.sum() # the function T that we look for is T = e^(-sigma2) * I(n,sigma2) # We found I(n,sigma2) and because we normalized it, the normalization term # e^(-sigma2) is no longer necesary. # Done if returnt: return k, t else: return k ## Filters def gfilter(L, sigma, order=0, mode='constant', warn=True): """ gfilter(L, sigma, order=0, mode='constant', warn=True) Gaussian filterering and Gaussian derivative filters. Parameters ---------- L : np.ndarray The input data to filter sigma : scalar or list-of-scalars The smoothing parameter, can be given for each dimension order : int or list-of-ints The order of the derivative, can be given for each dimension mode : {'reflect','constant','nearest','mirror', 'wrap'} Determines how edge effects are handled. (see scipy.ndimage.convolve1d) warn : boolean Whether to show a warning message if the sigma is too small to represent the required derivative. Notes ===== Makes use of the seperability property of the Gaussian by convolving 1D kernels in each dimension. Example ======= # Calculate the second order derivative with respect to x (Lx) (if the # first dimension of the image is Y). result1 = gfilter( im, 2, [0,2] ) # Calculate the first order derivative with respect to y and z (Lyz). result2 = gfilter( volume, 3, [0,1,1] ) """ # store original Lo = L # make sigma ok try: sigma = [sig for sig in sigma] except TypeError: sigma = [sigma for i in range(L.ndim)] # same for order if order is None: order = 0 try: order = [o for o in order] except TypeError: order = [order for i in range(L.ndim)] # test sigma if len(sigma) != L.ndim: tmp = "the amount of sigmas given must match the dimensions of L!" raise Exception(tmp) # test order if len(order) != L.ndim: tmp = "the amount of sigmas given must match the dimensions of L!" raise Exception(tmp) for d in range(L.ndim): # get kernel k = gaussiankernel(sigma[d], order[d], warn=warn) # convolve L = scipy.ndimage.convolve1d(L, k, d, mode=mode) # Make Aarray if we can if hasattr(Lo, 'sampling') and hasattr(Lo, 'origin'): L = Aarray(L, Lo.sampling, Lo.origin) # Done return L def diffuse(L, sigma, mode='nearest'): """ diffuse(L, sigma) Diffusion using a discrete variant of the diffusion operator. Parameters ---------- L : np.ndarray The input data to filter sigma : scalar or list-of-scalars The smoothing parameter, can be given for each dimension Details ------- In the continous domain, the Gaussian is the only true diffusion operator. However, by using a sampled Gaussian kernel in the discrete domain, errors are introduced, particularly if for small sigma. This implementation uses a a discrete variant of the diffusion operator, which is based on modified Bessel functions. This results in a better approximation of the diffusion process, particularly when applying the diffusion recursively. There are also advantages for calculating derivatives, see below. Based on: Lindeberg, T. "Discrete derivative approximations with scale-space properties: A basis for low-level feature extraction", J. of Mathematical Imaging and Vision, 3(4), pp. 349--376, 1993. Calculating derivatives ----------------------- Because this imeplementation applies diffusion using a discrete representation of the diffusion kernel, one can calculate true derivatives using small-support derivative operators. For 1D: * Lx = 0.5 * ( L[x+1] - L[x-1] ) * Lxx = L[x+1] - 2*L[x] + L(x-1) """ # Store original Lo = L # Make sigma ok try: sigma = [sig for sig in sigma] except TypeError: sigma = [sigma for i in range(L.ndim)] # Test sigma if len(sigma) != L.ndim: tmp = "the amount of sigmas given must match the dimensions of L!" raise Exception(tmp) # Diffuse for d in range(L.ndim): # get kernel k = diffusionkernel(sigma[d]) # convolve L = scipy.ndimage.convolve1d(L, k, d, mode=mode) # Make Aarray if we can if hasattr(Lo, 'sampling') and hasattr(Lo, 'origin'): L = Aarray(L, Lo.sampling, Lo.origin) # Done return L def gfilter2(L, scale, order=0, mode='reflect', warn=True): """ gfilter2(L, scale, order=0, mode='reflect', warn=True) Apply Gaussian filtering by specifying a scale in world coordinates rather than a sigma. This function determines the sigmas to apply, based on the sampling of the elements. See gfilter for more information. (If L is not an Aarray, this function yields the same result as gfilter.) """ # Determine sigmas if hasattr(L, 'sampling'): sigmas = [float(scale)/s for s in L.sampling] else: sigmas = float(scale) # Filter return gfilter(L, sigmas, order, mode, warn) def diffuse2(L, scale, mode='nearest'): """ diffuse2(L, scale, mode='nearest') Apply diffusion by specifying a scale in world coordinates rather than a sigma. This function determines the sigmas to apply, based on the sampling of the elements. See diffuse for more information. (If L is not an Aarray, this function yields the same result as diffuse.) """ # Determine sigmas if hasattr(L, 'sampling'): sigmas = [float(scale)/s for s in L.sampling] else: sigmas = float(scale) # Filter return diffuse(L, sigmas, mode) PKzn4K,aMMpirt/new_pointset.pyimport time import numpy as np # Visvis contains a Pointset and Point class, which proofed useful, but had # downsides, mostly because they do not inherit from ndarray, but wrap it. # This new version subclasses ndarray so that it can be used much more # generically. This copy was taken from stentseg. But that should not matter, # duck typing for the win. # todo: Awesome! but now where do I put this ... # todo: rename to avoid name clashes with vv.Pointset? # PointSet is a bit ugly, but "pointset" is nice because it does not imply plural # Vector seems more general (points are subset of vectors?) but it pronounces less nicely class PointSet(np.ndarray): """ The PointSet class can be used to represent sets of points or vectors, as well as singleton points. The dimensionality of the vectors in the pointset can be anything, and the dtype can be any of those supported by numpy. This class inherits from np.ndarray, which makes it very flexible; you can threat it as a regular array, and also pass it to functions that require a numpy array. The shape of the array is NxD, with N the number of points, and D the dimensionality of each point. This class has a __repr__ that displays a pointset-aware description. To see the underlying array, use print, or use pointset[...] to convert to a pure numpy array. Parameters ---------- input : various If input is in integer, it specifies the dimensionality of the array, and an empty pointset is created. If input is a list, it specifies a point, with which the pointset is initialized. If input is a numpy array, the pointset is a view on that array (ndim must be 2). dtype : dtype descrsiption The data type of the numpy array. If not given, the result will be float32. """ def __new__(cls, input, dtype=np.float32): if isinstance(input, int): # ndim is given, start empty pointset return np.ndarray.__new__(cls, (0,input), dtype=dtype) elif isinstance(input, (tuple, list, reversed)): # Point is given, initialize with that point input = list(input) if not all([isinstance(i, (float, int)) for i in input]): raise ValueError('Lists given to PointSet must be all scalars.') a = np.array(input, dtype=dtype) a.shape = 1, len(a) return a.view(cls) elif isinstance(input, np.ndarray): # Array is given, turn into pointset if not input.ndim == 2: raise ValueError('Arrays given to PointSet must have ndim=2.') if input.dtype != np.dtype(dtype): input = input.astype(dtype) return input.view(cls) else: # Don't know what to do raise ValueError('Invalid type to instantiate PointSet with (%r)' % type(input)) def __str__(self): """ print() shows elements as normal. """ return self[...].__str__() def __repr__(self): """" Return short(one line) string representation of the pointset. """ if len(self) == 0: return "" % ( self.shape[1], ) elif len(self) == 1: r = ', '.join( ['%1.4g' % i for i in self[0,:]]) return "" % r else: return "" % ( len(self), self.shape[1] ) @property def can_resize(self): """ Whether points can be appended to/removed from this pointset. This can be False if the array does not own its own data or when it is not contiguous. In that case, one should make a copy first. """ try: self.resize(self.shape[0]+1, self.shape[1], refcheck=False) except Exception: return False else: self.resize(self.shape[0]-1, self.shape[1], refcheck=False) return True def __array_wrap__(self, out, context=None): """ So that we return a native numpy array (or scalar) when a reducting ufunc is applied (such as sum(), std(), etc.) """ if not out.shape: return out.dtype.type(out) # Scalar elif out.shape != self.shape: return np.asarray(out) else: return out # Type Image def ravel(self, *args, **kwargs): # Return numpy array on ravel return np.ndarray.ravel(self,*args, **kwargs)[...] def __getitem__(self, index): """ Get a point or part of the pointset. """ # Single index from numpy scalar if isinstance(index, np.ndarray) and index.size==1: index = int(index) if isinstance(index, tuple): # Multiple indexes: return as array return np.asarray(self)[index] elif isinstance(index, slice): # Slice: return subset return np.ndarray.__getitem__(self, index) elif isinstance(index, int): # Single index: return point a = np.ndarray.__getitem__(self, index) a.shape = 1, len(a) return a else: # Probably some other form of subslicing return np.asarray(self)[index] def append(self, *p): """ Append a point to this pointset. One can give the elements of the points as separate arguments. Alternatively, a tuple or numpy array can be given. """ p = self._as_point(*p) # resize self.resize((self.shape[0]+1, self.shape[1]), refcheck=False) # append point self[-1] = p def extend(self, data): """ Extend the point set with more points. The shape[1] of the given data must match with that of this array. """ # Turn data into PointSet, which will do some checks for us pp = PointSet(data) # Check dimensions if pp.shape[1] != self.shape[1]: raise ValueError("Cannot extend pointset, because vector sizes does not match.") # Store current length curlen = len(self) # Resize newlen = curlen + len(pp) self.resize((newlen, self.shape[1]), refcheck=False) # Insert new data self[curlen:,:] = pp def insert(self, index, *p): """ Insert a point at the given index. """ # check index if index < 0: index = len(self) + index if index<0 or index>len(self): raise IndexError("Index to insert point out of range.") # make sure p is a point p = self._as_point(*p) # Get data from index to end tmp = self[index:,:].copy() # Resize self.resize((self.shape[0]+1, self.shape[1]), refcheck=False) # Insert point tmp = self[index:,:].copy() self[index] = p self[index+1:,:] = tmp def contains(self, *p): """ Check whether the given point is already in this set. """ if not len(self): return False # make sure p is a point p = self._as_point(*p) mask = np.zeros((len(self),),dtype='uint8') for i in range(self.shape[1]): mask += self[:,i]==p[i] if mask.max() >= self.shape[1]: return True else: return False def remove(self, *p, **kwargs): """ Remove the given point from the point set. Produces an error if such a point is not present. If the keyword argument `all` is given and True, all occurances of that point are removed. Otherwise only the first occurance is removed. """ # Parse kwargs all = kwargs.pop('all', False) if kwargs: raise ValueError('Invalid keyword arguments: %r' % list(kwargs.keys()) ) # make sure p is a point p = self._as_point(*p) # calculate mask mask = np.zeros((len(self),),dtype='uint8') for i in range(self.shape[1]): mask += self[:,i] == p[i] # find points given I, = np.where(mask==self.shape[1]) # produce error if not found if len(I) == 0: raise ValueError("Given point to remove was not found.") # remove first point if all: # remove all points (backwards!) for i in reversed(I): del self[i] else: del self[ I[0] ] def __delitem__(self, index): """ Remove one or multiple points from the pointset. """ # If tuple, not valid if isinstance(index, tuple): raise IndexError("Can only remove points using 1D slicing.") # Get start/stop/step if isinstance(index, slice): start, stop, step = index.indices(self._len) else: start, stop, step = index, index+1, 1 # if stepping, do it the slow way if step > 1: indices = [i for i in range(start,stop, step)] for i in reversed(indices): del self[i] return # move latter block forward tmp = self[stop:] self[start:start+len(tmp)] = tmp # reduce length newlen = start + len(tmp) self.resize((newlen, self.shape[1]), refcheck=False) def pop(self, index=-1): """ Remove and returns a point from the pointset. Removes the last by default (which is more efficient than popping from anywhere else). """ # check index index2 = index if index < 0: index2 = len(self)+ index if index2<0 or index2>len(self): raise IndexError("Index to insert point out of range.") # get point p = self[index] # remove it if index == -1: # easy self.resize((self.shape[0]-1, self.shape[1]), refcheck=False) else: # The hard way del self[index] # return return p def _as_point(self, *p): """ Return as something that can be applied to a row in the array. Check whether the point-dimensions match with this point set. """ # the point directly given? if len(p)==1 and not isinstance(p[0], (float, int)): p = p[0] if isinstance(p, np.ndarray): p = p.ravel() elif not isinstance(p, (tuple, list)): raise ValueError('Invalid point') # check whether we can append it if len(p) != self.shape[1]: tmp = "Given point does not match dimension of pointset." raise ValueError(tmp) # done return p ## Math stuff def norm(self): """ Calculate the norm (length) of the vector. This is the same as the distance to the origin, but implemented a bit faster. """ # we could do something like: # return self.distance(Point(0,0,0)) # but we don't have to perform checks now, which is faster... dists = np.zeros( (self.shape[0],), np.float64) for i in range(self.shape[1]): dists += self[:,i].astype(np.float64)**2 return np.sqrt(dists) def normalize(self): """ Return normalized vector (to unit length). """ # calculate factor array f = 1.0/self.norm() f.shape = f.size,1 f = f.repeat(self.shape[1],1) # apply return self * f def normal(self): """ Calculate the normalized normal of a vector. Use (p1-p2).normal() to calculate the normal of the line p1-p2. Only works on 2D points. For 3D points use cross(). """ # check dims if self.shape[1] != 2: raise ValueError("Normal can only be calculated for 2D points.") # prepare a = self.copy() f = 1.0/self.norm() # swap xy, y goes minus tmp = a[:,0].copy() a[:,0] = a[:,1] * f a[:,1] = -tmp * f return a ## Math stuff on two vectors def _check_and_sort(self, p1, p2, what='something'): """ _check_and_sort(p1,p2, what='something') Check if the two things (self and a second point/pointset) can be used to calculate stuff. Returns (p1,p2), if one is a point, p1 is it. """ # if one is a singleton pointset, put it in p1 if len(p2) == 1: p2,p1 = p1,p2 # only pointsets of equal length can be used if len(p1) != len(p2) and len(p1) > 1 and len(p2) > 1: # define errors err = "To calculate %s with two pointsets, " % what err += "one must be singleton, or both must have the same size." raise ValueError(err) # check dimensions if p1.shape[1] != p2.shape[1]: tmp = "To calculate %s between two pointsets, " % what err = tmp + "their dimensions must be equal." raise ValueError(err) return p1, p2 def distance(self, *p): """ Calculate the Euclidian distance between two points or pointsets. Use norm() to calculate the length of a vector. """ # the point directly given? if len(p)==1: p = p[0] # Make p a PointSet if not isinstance(p, PointSet): p = PointSet(p) # If one of the pointsets is singleton, put it in p1 p1, p2 = self._check_and_sort(self, p, 'distance') # Calculate dists = np.zeros( (p2.shape[0],), np.float64) for i in range(self.shape[1]): tmp = p1[:,i] - p2[:,i] dists += tmp.astype(np.float64)**2 return np.sqrt(dists) def angle(self, *p): """ Calculate the angle (in radians) between two vectors. For 2D uses the arctan2 method so the angle has a sign. For 3D the angle is the smallest angles between the two vectors. If no point is given, the angle is calculated relative to the positive x-axis. """ # the point directly given? if len(p)==1: p = p[0] elif len(p)==0: # use default point p = PointSet([0 for i in range(self.shape[1])]) p[0,0] = 1 # Make p a PointSet if not isinstance(p, PointSet): p = PointSet(p) # check. Keep the correct order! self._check_and_sort(self, p, 'angle') p1, p2 = self, p if p1.shape[1] == 2: # calculate 2D case angs1 = np.arctan2( p1[:,1], p1[:,0] ) angs2 = np.arctan2( p2[:,1], p2[:,0] ) dangs = angs1 - angs2 # make number between -pi and pi I = np.where(dangs<-np.pi) dangs[I] += 2*np.pi I = np.where(dangs>np.pi) dangs[I] -= 2*np.pi return dangs elif p1.shape[1] == 3: # calculate 3D case p1, p2 = p1.normalize(), p2.normalize() data = np.inner(p1, p2) # Not np.dot()! # correct for round off errors (or we will get NANs) data[data>1.0] = 1.0 data[data<-1.0] = -1.0 #return data return np.arccos(data) else: # not possible raise ValueError("Can only calculate angle for 2D and 3D vectors.") def angle2(self, *p): """ Calculate the angle (in radians) of the vector between two points. Say we have p1=(3,4) and p2=(2,1). ``p1.angle(p2)`` returns the difference of the angles of the two vectors: ``0.142 = 0.927 - 0.785`` ``p1.angle2(p2)`` returns the angle of the difference vector ``(1,3)``: ``p1.angle2(p2) == (p1-p2).angle()`` """ # the point directly given? if len(p)==1: p = p[0] elif len(p)==0: raise ValueError("Function angle2() requires another point.") # Make p a PointSet if not isinstance(p, PointSet): p = PointSet(p) # check. Keep the correct order! self._check_and_sort(self, p,'angle') p1, p2 = self, p if p1.ndim in [2,3]: # subtract and use angle() return (p1-p2).angle() #meaning: dangs = np.arctan2( data2[:,1]-data1[:,1], data2[:,0]-data1[:,0] ) else: # not possible raise ValueError("Can only calculate angle for 2D and 3D vectors.") def dot(self, *p): """ Calculate the dot product of two pointsets. The dot product is the standard inner product of the orthonormal Euclidean space. The sizes of the point sets should match, or one point set should be singular. """ # the point directly given? if len(p)==1: p = p[0] # Make p a PointSet if not isinstance(p, PointSet): p = PointSet(p) # check p1, p2 = self._check_and_sort(self,p,'dot') # calculate data = np.zeros( (p2.shape[0],), np.float64 ) for i in range(p1.ndim): tmp = p1[:,i] * p2[:,i] data += tmp return data def cross(self, *p): """ Calculate the cross product of two 3D vectors. Given two vectors, returns the vector that is orthogonal to both vectors. The right hand rule is applied; this vector is the middle finger, the argument the index finger, the returned vector points in the direction of the thumb. """ # the point directly given? if len(p)==1: p = p[0] if not self.shape[1] == 3: raise ValueError("Cross product only works for 3D vectors!") # Make p a PointSet if not isinstance(p, PointSet): p = PointSet(p) # check (note that we use the original order for calculation) p1, p2 = self._check_and_sort(self, p,'cross') # calculate a, b = self, p data = np.zeros( p2.shape, np.float64 ) data[:,0] = a[:,1]*b[:,2] - a[:,2]*b[:,1] data[:,1] = a[:,2]*b[:,0] - a[:,0]*b[:,2] data[:,2] = a[:,0]*b[:,1] - a[:,1]*b[:,0] # return return PointSet(data) if __name__ == '__main__': import visvis as vv a = PointSet(2, np.float32) b = vv.Pointset(2) pp = np.random.uniform(0, 1, (100,2)) t0 = time.time() for i in range(len(pp)): a.append(pp[i]) print('numpy resize: %1.3f s' % (time.time()-t0)) t0 = time.time() for i in range(len(pp)): b.append(pp[i]) print('PointSet append: %1.3f s' % (time.time()-t0)) PKc4KD;(;(pirt/pyramid.pyimport numpy as np import pirt from .gaussfun import diffuse2 from . import Aarray class BasePyramid: pass # When implementing HaarPyramid, maybe use a base class class HaarPyramid: pass # Implement using Haar wavelets. class ScaleSpacePyramid: """ ScaleSpacePyramid(data, min_scale=None, scale_offset=0, use_buffer=False, level_factor=2) The scale space pyramid class provides a way to manage a scale space pyramid. Given an input image (of arbitrary dimension), it provides two simple methods to obtain the image at the a specified scale or level. Parameters ---------- data : numpy array An array of any dimension. Should preferably be of float type. min_scale : scalar, optional The minimum scale to sample from the pyramid. If not given, scale_offset is used. If larger than zero, the image is smoothed to this scale before creating the zeroth level. If the smoothness is sufficient, the data is also downsampled. This makes a registration algorithm much faster, because the image data for the final scales does not have a unnecessary high resolution. scale_offset : scalar The scale of the given data. Use this if the data is already smooth. Be careful not to set this value too high, as aliasing artifacts may be introduced. Default zero. use_buffer : bool Whether a result obtained with get_scale() is buffered for later use. Only one image is buffered. Default False. level_factor : scalar The scale distance between two levels. A larger number means saving a bit of memory in trade of speed. You're probably fine with 2.0. Notes ----- Note that this scale space representation handles anisotropic arrays and that scale is expressed in world units. Note that images at higher levels do not always have a factor 2 sampling difference with the original! This is because the first and last pixel are kept the same, and the number of pixels is decreased with factors of two (or almost a factor of two if the number is uneven). The images always have the same offset though. We adopt the following concepts: * level: the level in the pyramid. Each level is a factor two smaller in size (in each dimension) than the previous. * scale: the scale in world coordinates """ def __init__(self, data, min_scale=None, scale_offset=0, use_buffer=False, level_factor=2): # Make sure data is an anisotropic array if not hasattr(data, 'sampling'): data = Aarray(data) # Check scale_offset scale_offset = float(scale_offset) if scale_offset < 0.0: raise ValueError('scale_offset should be >= 0.') # Check min_scale if min_scale is None: min_scale = scale_offset else: min_scale = float(min_scale) if min_scale < scale_offset: raise ValueError('min_scale should be >= scale_offset.') # Set lowest level image self._initialize_level0(data, min_scale, scale_offset) # Store level factor self._level_factor = float(level_factor) if self._level_factor <= 1: raise ValueError('Level factor must be > 1.') # Buffer to store image for a specific scale self._use_buffer = bool(use_buffer) self._buffer = None def _initialize_level0(self, data, min_scale, scale_offset): """ _initialize_level0(data, min_scale, scale_offset) Smooth the input image if necessary so it is at min_scale. The data is resampled at lower resolution if the scale is high enough. """ # Make image float if data.dtype not in [np.float32, np.float64]: data = data.astype('float32') # Calculate sigma (in world coordinates): amount of smoothing to apply sigma1 = scale_offset # scale that the image already has sigma2 = min_scale # scale that we want the image to have sigma = (sigma2**2 - sigma1**2)**0.5 # Smooth if sigma > 0: data = diffuse2(data, sigma) # Get scale in pixel coords pixel_scales = [min_scale/s for s in data.sampling] # Sample at lower rate? # This will make the data more isotropic if it was not if min_scale > 0: # Get zoom factors (should only be <= 1) zoom_factors = [min(1, 1.0/s) for s in pixel_scales] # Only resample if one dim can be reduced by more than 10% if min(zoom_factors) < 0.9: data = pirt.interp.zoom(data, zoom_factors, order=3, prefilter=False) # Set properties data._pyramid_scale = min_scale data._pyramid_level = 0 # Store self._levels = [data] def calculate(self, levels=None, min_shape=None): """ calculate(levels=None, min_shape=None) Create the image pyramid now. Specify either the amount of levels, or the minimum shape component of the highest level. If neither levels nor min_shape is given, uses min_shape=8. Returns (max_level, max_sigma) of the current pyramid. """ # Check if None not in [levels, min_shape]: raise ValueError('You cannot specify both levels and min_shape') if levels is None and min_shape is None: min_shape = 8 # Add levels if levels is None: while min(self._levels[-1].shape) >= min_shape*2: self._add_Level() else: while len(self._levels) < levels: self._add_Level() # Return maxLevel = len(self._levels)-1 maxSigma = self._levels[-1]._pyramid_scale return maxLevel, maxSigma def get_scale(self, scale=None): """ get_scale(scale) Get the image at the specified scale (expressed in world units). For higher scales, the image has a smaller shape than the original image. If min_scale and scale_offset are not used, a scale of 0 represents the original image. To calculate the result, the image at the level corresponding to the nearest lower scale is obtained, and diffused an extra bit to obtain the requested scale. The result is buffered (if the pyramid was instantiated with use_buffer=True), such that calling this function multiple times with the same scale is much faster. Only buffers the last used scale. The returned image has two added properties: _pyramid_scale and _pyramid_level, wich specify the image scale and level in the pyramid. """ # Check min_scale = self._levels[0]._pyramid_scale if scale is None: scale = min_scale if scale < min_scale: raise ValueError("Scale should be at least min_scale (%1.2f)." % min_scale) # Can we use the buffer? if self._buffer and self._buffer[0] == scale: return self._buffer[1] # Determine level offset. We loop untill we are one level too # high and then use the level below that. level = -1 baseScale = -1 while baseScale <= scale: level += 1 baseScale = self.get_level(level)._pyramid_scale # Correct (we went one level too far) level = max(level-1, 0) # Get data data = self.get_level(level) # Calculate sigma (in world coordinates) sigma1 = data._pyramid_scale sigma2 = scale sigma = (sigma2**2 - sigma1**2)**0.5 # Smooth a bit more if sigma > 0: data = diffuse2(data, sigma) data._pyramid_scale = scale data._pyramid_level = level # Set buffer and return if self._use_buffer: self._buffer = scale, data return data def get_level(self, level): """ get_level(level): Get the image at the specified (integer) level, zero being the lowest level (the original image). Each level is approximately a factor two smaller in size that the previous level. All levels are buffered. The returned image has two added properties: _pyramid_scale and _pyramid_level, wich specify the image scale and level in the pyramid. """ # Get integer level number and delta level_i = int(level) # Add levels if required while level_i >= len(self._levels): self._add_Level() # Get integer level data return self._levels[level_i] def _add_Level(self): """ _add_Level() Add a level to the scale space pyramid. """ # Get data data = self._levels[-1] # Calculate scales (in world coords) needed to make the pixel-scales 2.0 scales = [self._level_factor*s for s in data.sampling] # Calculate the amount of required smoothing (in world coords) sigma1 = data._pyramid_scale sigma2 = max( max(scales), sigma1*2.0 ) sigma = (sigma2**2 - sigma1**2)**0.5 # Smooth data = diffuse2(data, sigma) # Downsample (do not take every other sample, because then we will # lose the last pixel if the shape is even!) if min(data.shape) > 8: factor = 1.0/self._level_factor data = pirt.interp.zoom(data, factor, order=3, prefilter=False) # Insert in levels data._pyramid_scale = sigma2 data._pyramid_level = len(self._levels) self._levels.append(data) PKLi4K3qv*v*pirt/randomdeformations.py""" Module randomDeformations Provides a mean to produce random deformations. Defines a function create_random_deformation() and a class to keep a set of random deformations: RandomDeformations. By setting the seed of the random number generator, the (pseudo) random deformations can be repeated. """ import numpy as np import pirt # Due to the from_field_multiscale thingy on the end to make the deform # injective this function becomes really slow. def create_random_deformation_gaussian(im, amplitude=1, min_sigma=10, nblobs=50, seed=None): """ create_random_deformation(im, amplitude=1, min_sigma=10, nblobs=50, seed=None) Create a random deformation using Gaussian blobs or different scales. Returns a DeformationField instance. See also the class RandomDeformations. Parameters ---------- im : numpy array The image to create a deformation field for. amplitude : scalar The relative amplitude of the deformations. min_sigma : scalar The smallest sigma to create Gaussian blobs for. The largest sigma is a quarter of the maximum shape element of the image. nblobs : integer The amount of Gaussian blobs to compose the deformation with. seed : int or None Seed for the random numbers to draw. If you want to repeat the same deformations, apply the same seed multiple times. """ # Seed generator np.random.seed(seed) fields = [] for dimension in range(len(im.shape)): # Make field that preserves sampling if it is an Aarray, and is # expressed using float32 dtype. field = pirt.Aarray(im.shape, fill=0.0, dtype='float32') if hasattr(im, 'sampling'): field.sampling = im.sampling for iter in range(nblobs): # Get randomly sampled variables if True: # Get sigma max_sigma = max(field.shape)/4 t = 2**np.random.uniform(0,1) # between 1 and 2 sigma = (t-1) * (max_sigma-min_sigma) + min_sigma # Get amplitude amp = np.random.uniform(-1,1) * sigma**0.5 * amplitude # Get position pos = [] for d in range(field.ndim): tmp = np.random.uniform(0,field.shape[d]) pos.append(int(tmp)) # Create patch patch = pirt.gaussfun.gaussiankernel2(sigma,0,0) patch = amp * patch / patch.max() # Get tail tail = int(np.ceil( patch.shape[0]/2 )) # Put the patch in if True: # Get upper right and lower left pos1, pos2 = [], [] for d in range(field.ndim): pos1.append( pos[d] - tail ) pos2.append( pos[d] + tail ) # Get patch indices pos3, pos4 = [], [] for d in range(field.ndim): pos3.append( 0 ) pos4.append( tail*2 ) # Correct indices for d in range(field.ndim): if pos1[d] < 0: pos3[d] = -pos1[d] pos1[d] = 0 if pos2[d] >= field.shape[d]: pos4[d] = field.shape[d] - pos2[d] - 2 pos2[d] = field.shape[d] - 1 # Build slice objects slices_field = [] slices_patch = [] for d in range(field.ndim): slices_field.append( slice(pos1[d],pos2[d]+1) ) slices_patch.append( slice(pos3[d],pos4[d]+1) ) # Put patch in field[tuple(slices_field)] += patch[tuple(slices_patch)] # Store field fields.append(field) # Make sure the deform is injectrive, and has frozenedges if required gridsampling = min_sigma / 2.0 deform = pirt.DeformationFieldBackward.from_field_multiscale(fields, gridsampling, injective=True, frozenedge=True) # Apply a bit of Gaussian diffusion fields = [] for field in deform: fields.append( pirt.diffuse(field, 1.0) ) # Done return pirt.DeformationFieldBackward(*fields) def create_random_deformation(im, amplitude=20, scale=50, n=50, frozenedge=True, mapping='backward', seed=None): """ create_random_deformation(im, amplitude=20, scale=50, n=50, frozenedge=True, mapping='backward', seed=None) Create a random deformation by creating two random sets of deformation vectors which are then converted to an injective deformation field using Lee and Choi's method. See also the class RandomDeformations. Parameters ---------- im : numpy array or pirt.FieldDescription The image to create a deformation field for, or anything tha can be converted to a FieldDesctription instance. amplitude : scalar The relative amplitude of the deformations. The deformation vectors are randomly chosen with a maximum norm of this amplitude. scale : scalar The smallest resolution of the B-spline grid to regularize the deformation. Default 50. n : integer The amount of vectors to generate. Default 50. frozenedge : bool Whether the edges remain fixed or not (default True). mapping : {'forward', 'backward'} Whether the generated deformation uses forward or backward mapping. Default backward. seed : int or None Seed for the random numbers to draw. If you want to repeat the same deformations, apply the same seed multiple times. """ # Seed generator np.random.seed(seed) # Get field description fd = pirt.FD(im) # Init pointsets pp1 = pirt.PointSet(fd.ndim) pp2 = pirt.PointSet(fd.ndim) # Generate random points for iter in range(n): # Generate random deformation vector p1, p2 = [],[] for i in range(fd.ndim): d = fd.ndim - i - 1 # Get range and amplitude: in world coords! ran = fd.shape[d] * fd.sampling[d] amp = float(amplitude) # Generate numbers loc1 = np.random.uniform(0, ran) loc2 = np.random.uniform( max(0, loc1-amp), min(ran, loc1+amp)) p1.append(loc1) p2.append(loc2) # Store pp1.append(*p1) pp2.append(*p2) # Get deformationfield class if mapping.lower() == 'forward': DeformationField = pirt.DeformationFieldForward elif mapping.lower() == 'backward': DeformationField = pirt.DeformationFieldBackward else: raise ValueError('Invalid mapping.') # Create field # mmm, it might not really matter whether we regularize the deform # using a forward or backward deform, does it? gridsampling = scale / 1.0 deform = DeformationField.from_points_multiscale(fd, gridsampling, pp1, pp2, injective=True, frozenedge=frozenedge) # Apply a bit of Gaussian diffusion fields = [] for field in deform: fields.append( pirt.diffuse(field, 1.0) ) # Done return DeformationField(*fields) class RandomDeformations: """ RandomDeformations(im, amplitude=20, scale=50, n=50, frozenedge=True, mapping='backward', seed=None) Represents a collection of random deformations. This can be used in experiments to test multiple methods on the same set of random deformations. It creates (and stores) random deformations on the fly when requested. The seed given to create_random_deformation is (seed + 100*index). It can be used to produce a random, yet repeatable set of deformations. For the sake of science, let's use a seed when doing experiments that are going to be published in a scientific medium, so they can be reproduced by others. For the sake of simplicity, let's agree to use the arbitrary seeds listed here: 1234 for training sets, 5678 for test sets. See also the function create_random_deformation() Example ------- rd = RandomDeformations(im) deform1 = rd[0] deform2 = rd[1] # etc. """ def __init__(self, im, amplitude=20, scale=50, n=50, frozenedge=True, mapping='backward', seed=None): # Store image description self._fd = pirt.FD(im) # Store other parameters self._amplitude = float(amplitude) self._scale = float(scale) self._n = int(n) # self._frozenedge = bool(frozenedge) self._mapping = mapping self._seed = seed # Store dict with deforms self._deforms = {} def __getitem__(self, index): """ Get the deformation at the specified index. If it does not yet exist, will create a new deformation using arguments specified during initialization. """ # Check if not isinstance(index, int) or index<0: raise ValueError('Can only index using (non-negative) integers.') # Create item if not already there if index not in self._deforms.keys(): seed = None if self._seed is not None: seed = self._seed + index*100 deform = create_random_deformation( self._fd, self._amplitude, self._scale, self._n, self._frozenedge, self._mapping, seed ) self._deforms[index] = deform # Return return self._deforms[index] def get(self, index, *args, **kwargs): """ get(index, *args, **kwargs) Get the deformation at the specified index. If it does not yet exist, will create a new deformation using the specified arguments. Note that this will not take the seed into account. """ # Create item if not already there if index not in self._deforms.keys(): deform = create_random_deformation(self._fd, *args, **kwargs) self._deforms[index] = deform # Return return self._deforms[index] PKl+K" pirt/testing.py# -*- coding: utf-8 -*- # Copyright (c) 2016, Almar Klein # Distributed under the (new) BSD License. """ Functionality used for testing, based on pytest. This module is designed to just work, without modification, in most projects. Write your tests like this: from yourproject.xxx.testing import run_tests_if_main, raises, skipif ... run_tests_if_main() Then you can run the test file as a script, which will run all tests and report coverage. Magic! """ import os import sys import inspect import pytest PACKAGE_NAME = __name__.split('.')[0] # Get project root dir THIS_DIR = os.path.abspath(os.path.dirname(__file__)) ROOT_DIR = THIS_DIR for i in range(9): ROOT_DIR = os.path.dirname(ROOT_DIR) if os.path.basename(ROOT_DIR) == PACKAGE_NAME: ROOT_DIR = os.path.dirname(ROOT_DIR) break else: print('testing.py could not find project root dir, ' 'using testing.py directory instead.') ROOT_DIR = THIS_DIR # Inject some function names so they can be obtained with one import raises = pytest.raises skipif = pytest.mark.skipif skip = pytest.skip def run_tests_if_main(show_coverage=False): """ Run tests in a given file if it is run as a script Coverage is reported for running this single test. Set show_coverage to launch the report in the web browser. """ local_vars = inspect.currentframe().f_back.f_locals if not local_vars.get('__name__', '') == '__main__': return # we are in a "__main__" os.chdir(ROOT_DIR) fname = str(local_vars['__file__']) _clear_our_modules() _enable_faulthandler() pytest.main(['-v', '-x', '--color=yes', '--cov', PACKAGE_NAME, '--cov-config', '.coveragerc', '--cov-report', 'html', fname]) if show_coverage: import webbrowser fname = os.path.join(ROOT_DIR, 'htmlcov', 'index.html') webbrowser.open_new_tab(fname) def _enable_faulthandler(): """ Enable faulthandler (if we can), so that we get tracebacks on segfaults. """ try: import faulthandler faulthandler.enable() print('Faulthandler enabled') except Exception: print('Could not enable faulthandler') def _clear_our_modules(): """ Remove ourselves from sys.modules to force an import. """ for key in list(sys.modules.keys()): if key.startswith(PACKAGE_NAME) and 'testing' not in key: del sys.modules[key] PK4K)(pirt/apps/__init__.py""" The apps module implements a few app classes for demo purposes. In some cases the apps can be used to collect user input. To use, the apps should be imported explicitly. """ PK|o4Kv..pirt/apps/deform_by_hand.pyimport time import visvis as vv from pirt import FieldDescription from pirt import (DeformationGridForward, DeformationFieldForward, DeformationGridBackward, DeformationFieldBackward) from visvis import Point, Pointset class DeformByHand: """ DeformByHand(im, grid_sampling=40) Demo application to deform a 2D image by hand using a spline grid. Use the grid property to obtain the deformation grid. Use the run() method to wait for the user to close the figure. """ def __init__(self, im, grid_sampling=40): # Store image self._im = im # Setup visualization self._fig = fig = vv.figure() self._a1 = a1 = vv.subplot(231); self._a2 = a2 = vv.subplot(232); self._a3 = a3 = vv.subplot(233); self._a4 = a4 = vv.subplot(234); self._a5 = a5 = vv.subplot(235); self._a6 = a6 = vv.subplot(236); # Text objects self._text1 = vv.Label(fig) self._text1.position = 5, 2 self._text2 = vv.Label(fig) self._text2.position = 5, 20 # Move axes a1.parent.position = 0.0, 0.1, 0.33, 0.45 a2.parent.position = 0.33, 0.1, 0.33, 0.45 a3.parent.position = 0.66, 0.1, 0.33, 0.45 a4.parent.position = 0.0, 0.55, 0.33, 0.45 a5.parent.position = 0.33, 0.55, 0.33, 0.45 a6.parent.position = 0.66, 0.55, 0.33, 0.45 # Correct axes, share camera cam = vv.cameras.TwoDCamera() for a in [a1, a2, a3, a4, a5, a6]: a.axis.visible = False a.camera = cam # Show images im0 = im*0 self._t1 = vv.imshow(im, axes=a1) self._t2 = vv.imshow(im, axes=a2) self._t3 = vv.imshow(im, axes=a3) self._t4 = vv.imshow(im0, axes=a4) self._t5 = vv.imshow(im0, axes=a5) self._t6 = vv.imshow(im0, axes=a6) # Init pointsets self._pp1 = Pointset(2) self._pp2 = Pointset(2) self._active = None self._lines = [] # Init lines to show all deformations tmp = vv.Pointset(2) self._line1 = vv.plot(tmp, ls='', ms='.', mc='c', axes=a2) self._line2 = vv.plot(tmp, ls='+', lc='c', lw='2', axes=a2) # Init grid properties self._sampling = grid_sampling self._levels = 5 self._multiscale = True self._injective = 0.5 self._frozenedge = 1 self._forward = True # Init grid self.DeformationField = DeformationFieldForward self._field1 = self.DeformationField(FieldDescription(self._im)) self._field2 = self.DeformationField(FieldDescription(self._im)) # Bind to events a2.eventMouseDown.Bind(self.on_down) a2.eventMouseUp.Bind(self.on_up) a2.eventMotion.Bind(self.on_motion) fig.eventKeyDown.Bind(self.on_key_down) #a1.eventDoubleClick.Bind(self.OnDone) # Apply self.apply() def on_key_down(self, event): # Update level if event.key == vv.KEY_UP: self._sampling += 2 elif event.key == vv.KEY_DOWN: self._sampling -= 2 elif event.key == vv.KEY_RIGHT: self._levels += 1 elif event.key == vv.KEY_LEFT: self._levels -= 1 # elif event.text.upper() == 'M': self._multiscale = not self._multiscale elif event.text.upper() == 'I': self._injective += 0.4 if self._injective > 0.8: self._injective = -0.8 elif event.text.upper() == 'E': self._frozenedge = not self._frozenedge elif event.text.upper() == 'F': self._forward = not self._forward # reset global field if self._forward: self.DeformationField = DeformationFieldForward else: self.DeformationField = DeformationFieldBackward self._field1 = self.DeformationField(FieldDescription(self._im)) # elif event.key == vv.KEY_ESCAPE: self._pp1.clear() self._pp2.clear() self.apply() elif event.text == ' ': self.apply_deform() # else: return # Correct if self._sampling < 1: self._sampling = 1 self.apply() def on_down(self, event): if event.button != 1: return False if not vv.KEY_SHIFT in event.modifiers: return False # Store location self._active = Point(event.x2d, event.y2d) # Clear any line object for l in self._lines: l.Destroy() # Create line objects tmp = Pointset(2) tmp.append(self._active) tmp.append(self._active) l1 = vv.plot(tmp, lc='g', lw='3', axes=self._a2, axesAdjust=0) l2 = vv.plot(tmp[:1], ls='', ms='.', mc='g', axes=self._a2, axesAdjust=0) self._lines = [l1, l2] # Draw self._a2.Draw() # Prevent dragging by indicating the event needs no further handling return True def on_motion(self, event): if self._active and self._lines: # Update line tmp = Pointset(2) tmp.append(self._active) tmp.append(event.x2d, event.y2d) l1 = self._lines[0] l1.SetPoints(tmp) # Draw self._a2.Draw() def on_up(self, event): if self._active is None: return False # Get points p1 = self._active p2 = Point(event.x2d, event.y2d) # Add! self._pp1.append(p1) self._pp2.append(p2) # We're done with this one self._active = None # Clear any line object for l in self._lines: l.Destroy() # Apply self.apply() def apply_deform(self): # Apply current point-wise deformation # Compose deformations self._field1 = self._field1.compose(self._field2) # Clear points self._pp1.clear() self._pp2.clear() # Update self.apply() def apply(self): # Get sampling grid_sampling = self._sampling, self._sampling*2**self._levels # Init field and deform if not self._pp1: # Unit deform deform = self.DeformationField(FieldDescription(self._im)) elif self._multiscale: deform = self.DeformationField.from_points_multiscale(self._im, grid_sampling, self._pp1.data, self._pp2.data, injective=self._injective, frozenedge=self._frozenedge) else: DeformationGrid = DeformationGridForward if not self._forward: DeformationGrid = DeformationGridBackward grid = DeformationGrid.from_points(self._im, self._sampling, self._pp1.data, self._pp2.data, injective=self._injective, frozenedge=self._frozenedge) deform = grid.as_deformation_field() # Store grid field0 = self.DeformationField(FieldDescription(self._im)) self._field2 = deform field3 = self._field1.compose(self._field2) # Deform im2 = self._field1.apply_deformation(self._im) im3 = field3.apply_deformation(self._im) # Update imagesf self._t2.SetData(im2) self._t3.SetData(im3) # Update grids for a, field in zip( [self._a4, self._a5, self._a6], [field0, self._field1, field3] ): a.Clear() field.show(a, False) a.axis.visible = False # Update lines tmp = Pointset(2) for i in range(len(self._pp1)): tmp.append(self._pp1[i]) tmp.append(self._pp2[i]) self._line1.SetPoints(self._pp1) self._line2.SetPoints(tmp) # Draw self._a2.Draw() # Show text text1 = 'B-spline (S) with sampling %i (U/D) and %i levels (L/R).' % ( self._sampling, self._levels) text2 = 'Multiscale %i (M), injective %1.1f (I), frozen edge %i (E), forward %i (F).' % ( self._multiscale, self._injective, self._frozenedge, self._forward) # Display and apply self._text1.text = text1 self._text2.text = text2 @property def field(self): return self._field1 def run(self): # Setup detecting closing of figure self._closed = False def callback(event): self._closed = True self._fig.eventClose.Bind(callback) while not self._closed: time.sleep(0.02) vv.processEvents() self.apply_deform() if __name__ == '__main__': d = DeformByHand(vv.imread('astronaut.png')[:,:,2].astype('float32')) WHAT = 'twist' if WHAT == 'twist': d._pp1.append(108.27, 416.57 ) d._pp1.append(330.78, 385.58 ) d._pp1.append(220.08, 393.32 ) d._pp1.append(406.06, 266.02 ) d._pp1.append(389.45, 186.31 ) d._pp1.append(347.38, 122.1 ) d._pp1.append(401.63, 332.44 ) d._pp1.append(290.92, 395.54 ) # d._pp2.append(249.96, 449.78 ) d._pp2.append(414.91, 333.55 ) d._pp2.append(295.35, 363.44 ) d._pp2.append(409.38, 207.34 ) d._pp2.append(351.81, 88.892 ) d._pp2.append(272.11, 68.966 ) d._pp2.append(404.95, 280.41 ) d._pp2.append(316.39, 388.9 ) elif WHAT == 'outward': # outward d._pp1.append(265,265) d._pp2.append(225,265) d._pp1.append(328,267) d._pp2.append(368,267) elif WHAT == 'inward': # inward d._pp1.append(265,265) d._pp2.append(328,267) d._pp1.append(328,267) d._pp2.append(265,265) elif WHAT == 'enlarge': d._pp1.append(200,200) d._pp2.append(100,100) d._pp1.append(300,200) d._pp2.append(400,100) d._pp1.append(200,300) d._pp2.append(100,400) d._pp1.append(300,300) d._pp2.append(400,400) elif WHAT=='pinch': d._pp1.append(100,100) d._pp2.append(200,200) d._pp1.append(200,100) d._pp2.append(200,100) d._pp1.append(100,200) d._pp2.append(100,200) # d._pp1.append(300,100) d._pp2.append(300,100) d._pp1.append(400,100) d._pp2.append(300,200) d._pp1.append(400,200) d._pp2.append(400,200) # d._pp1.append(100,300) d._pp2.append(100,300) d._pp1.append(100,400) d._pp2.append(200,300) d._pp1.append(200,400) d._pp2.append(200,400) # d._pp1.append(300,400) d._pp2.append(300,400) d._pp1.append(400,400) d._pp2.append(300,300) d._pp1.append(400,300) d._pp2.append(400,300) else: # Identity transform d._pp1.append(100,100) d._pp2.append(100,100) if d: d.apply() if False: ## a = d._a3 t = a.wobjects[1] im = t._texture1._dataRef vv.imwrite('~/warped.jpg', im[::1,::1]/255) PKq4KE pirt/apps/spline_by_hand.pyimport numpy as np import visvis as vv from pirt import SplineGrid from pirt import interp from visvis import Point, Pointset class SplineByHand: """ SplineByHand() Demo application to influence a 1D spline grid using control points. """ def __init__(self): # Setup visualization self._fig = fig = vv.figure() self._a1 = a1 = vv.subplot(111) # Init pointset with control points self._pp = Pointset(2) # Init lines to show control points self._line1 = vv.plot(self._pp, ls='', ms='.', mc='r', mw=11, axes=a1) self._line1.hitTest = True # Init grid properties self._fieldSize = 100 self._sampling = 10 self._levels = 5 # Member to indicate the point being dragged (as an index) self._active = None # Bind to events a1.eventDoubleClick.Bind(self.on_doubleclick) self._line1.eventDoubleClick.Bind(self.on_doubleclick_line) self._line1.eventMouseDown.Bind(self.on_down_line) self._line1.eventMouseUp.Bind(self.on_up_line) a1.eventMotion.Bind(self.on_motion) fig.eventKeyDown.Bind(self.on_key_down) print('Use left/right to control #levels and up/down to control grid sampling.') # Init self.apply() a1.daspectAuto = False a1.SetLimits() a1.axis.showGrid = True def on_key_down(self, event): # Update level if event.key == vv.KEY_UP: self._sampling += 1 elif event.key == vv.KEY_DOWN: self._sampling -= 1 elif event.key == vv.KEY_RIGHT: self._levels += 1 elif event.key == vv.KEY_LEFT: self._levels -= 1 else: return # Correct if self._sampling < 1: self._sampling = 1 # Apply and print print('Using B-spline grid with %i sampling and %i levels.' % ( self._sampling, self._levels)) self.apply() def on_doubleclick(self, event): # On axes # Get new point p = Point(event.x2d, event.y2d) # Add to pointset self._pp.append(p) self._line1.SetPoints(self._pp) # Apply self.apply() def on_doubleclick_line(self, event): # On axes # Get closest point dists = Point(event.x2d, event.y2d).distance(self._pp) I, = np.where( dists == dists.min() ) if not len(I): return False # Remove from pointset self._pp.Pop(I[0]) self._line1.SetPoints(self._pp) # Apply self._a1.Draw() self.apply() def on_down_line(self, event): # On line instance if event.button != 1: return False # Get closest point dists = Point(event.x2d, event.y2d).distance(self._pp) I, = np.where( dists == dists.min() ) if not len(I): return False # Store self._active = I[0] # Prevent dragging by indicating the event needs no further handling return True def on_motion(self, event): if self._active is None: return False # Update line self._pp[self._active] = Point(event.x2d, event.y2d) self._line1.SetPoints(self._pp) # Draw self._a1.Draw() def on_up_line(self, event): if self._active is None: return False # Update point self.on_motion(event) # Deactivate self._active = None # Apply self.apply() def apply(self, event=None): # Get axes a1 = self._a1 # Get sampling grid_sampling = self._sampling, self._sampling *2**self._levels # Create grid tmp = self._pp[:,0] tmp.shape = (tmp.size,1) pp = Pointset(tmp) grid1 = SplineGrid.from_points_multiscale((self._fieldSize,), grid_sampling, pp.data, self._pp[:,1]) # Get copy grid2 = grid1.copy() # Freeze edges self.freeze_edges(grid1) # # Create second grid # grid2 = SplineGrid.from_field(grid.get_field(), grid.grid_sampling) # grid3 = SplineGrid.from_field_multiscale(grid.get_field(), grid.grid_sampling) # Get grid points ppg1 = Pointset(2) ppg2 = Pointset(2) for gx in range(grid1.grid_shape[0]): ppg1.append( (gx-1)* grid1.grid_sampling, grid1.knots[gx] ) ppg2.append( (gx-1)* grid2.grid_sampling, grid2.knots[gx] ) # Get field field = grid1.get_field() #field2 = grid2.get_field() #field3 = grid3.get_field() # Delete objects in scene for ob in a1.wobjects: if ob is not self._line1 and not isinstance(ob, vv.axises.BaseAxis): ob.Destroy() # Draw vv.plot(ppg1, ls='', ms='x', mc='b', mw=9, axes=a1, axesAdjust=False) vv.plot(ppg2, ls='', ms='x', mc='c', mw=9, axes=a1, axesAdjust=False) vv.plot(np.arange(0, field.size), field, ls='-', lc='g', lw=3, axes=a1, axesAdjust=False) #vv.plot(field2, ls=':', lc=(0,0.5,0), lw=6, axes=a1, axesAdjust=False) #vv.plot(field3, ls=':', lc=(0,0.75,0), lw=6, axes=a1, axesAdjust=False) def freeze_edges(self, grid): # Store grid for debugging self._grid = grid # Freeze left edge grid.knots[1] = 0 grid.knots[0] = -grid.knots[2] # c0, c1, c2, c3 = pirt.get_cubic_spline_coefs(0, 'B') # k1, k2, k3 = grid.knots[0], grid.knots[1], grid.knots[2] # grid.knots[0] = 0 # grid.knots[1]=0 # grid.knots[2]= ) # Calculate t factor field_edge = (grid.field_shape[0]-1) * grid.field_sampling[0] grid_edge = (grid.grid_shape[0]-4) * grid.grid_sampling t = (field_edge - grid_edge) / grid.grid_sampling # Get coefficients c0, c1, c2, c3 = interp.get_cubic_spline_coefs(t, 'B') # Freeze right edge # grid.knots[-3] = t*grid.knots[-3] # + (1-t)*0 # k0, k1 = grid.knots[-4], grid.knots[-3] # grid.knots[-2] = t**2 * (k1-k0) + t*(2*k0-k1) - k0 # k0, k1, k2, k3 = grid.knots[-4], grid.knots[-3], grid.knots[-2], grid.knots[-1] # grid.knots[-1] = -(k0*c0 + k1*c1 + k2*c2)/c3 grid.knots[-3] = t*grid.knots[-3] # + (1-t)*0 grid.knots[-1] = 0 k0, k1 = grid.knots[-4], grid.knots[-3] grid.knots[-2] = -(k0*c0 + k1*c1)/c2 if __name__ == '__main__': v = SplineByHand() vv.use().Run() PK 3KG@@pirt/deform/__init__.py# flake8: noqa """ The deform module implements classes to represent deformations: The ``DeformationGrid`` represents a deformation in world coordinates using a spline grid, and the ``DeformationField`` represents a deformation in world coordinates using an array for each dimension; it describes the deformation for each pixel/voxel. The aforementioned classes are actually base classes; one should use ``DeformationFieldBackward``, ``DeformationFieldForward``, ``DeformationGridBackward`` or ``DeformationGridForward``. """ from ._deformbase import Deformation from ._deformgrid import DeformationGrid from ._deformfield import DeformationField from ._subs import (DeformationIdentity, DeformationGridForward, DeformationGridBackward, DeformationFieldForward, DeformationFieldBackward) PKl4KKdTdTpirt/deform/_deformbase.pyimport time # noqa import numpy as np from .. import Aarray from .. import interp from ..splinegrid import FD class Deformation(object): """ Deformation This class is an abstract base class for deformation grids and deformation fields. A deformation maps one image (1D, 2D or 3D) to another. A deformation can be represented using a B-spline grid, or using a field (i.e. array). A deformation is either forward mapping or backward mapping. """ _forward_mapping = None # Must be set to True or False in subclasses ## Base properties @property def forward_mapping(self): """ Returns True if this deformation is forward mapping. """ assert self._forward_mapping is not None, ("You're using an abstract " "deformation class (but must be forward or backward)") return self._forward_mapping @property def is_identity(self): """ Returns True if this is an identity deform, or null deform; representing no deformation at all. """ if isinstance(self, DeformationGrid): return self[0]._knots is None elif isinstance(self, DeformationField): return not bool(self._fields) elif isinstance(self, DeformationIdentity): return True else: raise ValueError('Unknown deformation class.') @property def ndim(self): """ The number of dimensions of the deformation. """ return len(self._field_shape) @property def field_shape(self): """ The shape of the deformation field. """ return tuple(self._field_shape) @property def field_sampling(self): """ For each dim, the sampling (distance between pixels/voxels) of the field (all 1's if isotropic). """ return tuple(self._field_sampling) ## Modifying and combining deformations def __add__(self, other): return self.add(other) def __mul__(self, other): if isinstance(other, Deformation): # Compose that creates a new field return other.compose(self) else: # Scale factor = other return self.scale(factor) def copy(self): """ copy() Copy this deformation instance (deep copy). """ if self.is_identity: if isinstance(self, DeformationIdentity): return DeformationIdentity() else: return self.__class__(FD(self)) else: return self.scale(1.0) # efficient copy def scale(self, factor): """ scale(factor) Scale the deformation (in-place) with the given factor. Note that the result is diffeomorphic only if the original is diffeomorphic and the factor is between -1 and 1. """ if isinstance(self, DeformationGrid): # Create new grid fd = FD(self) if self.forward_mapping: newDeform = DeformationGridForward(fd, self.grid_sampling) else: newDeform = DeformationGridBackward(fd, self.grid_sampling) # Scale knots for d in range(self.ndim): if factor == 1.0: newDeform[d]._knots = self[d]._knots.copy() else: newDeform[d]._knots = self[d]._knots * float(factor) return newDeform elif isinstance(self, DeformationField): # Scale fields fields = [] for d in range(self.ndim): if factor == 1.0: fields.append( self[d].copy() ) else: fields.append( self[d] * factor ) # Create new field if self.forward_mapping: newDeform = DeformationFieldForward(*fields) else: newDeform = DeformationFieldBackward(*fields) return newDeform else: raise ValueError('Unknown deformation class.') def add(def1, def2): # noqa - flake8 wants first arg to be self """ add(other) Combine two deformations by addition. Returns a DeformationGrid instance if both deforms are grids. Otherwise returns deformation field. The mapping (forward/backward) is taken from the left deformation. Notes ----- Note that the resulting deformation is not necesarily diffeomorphic even if the individual deformations are. Although diffeomorphisms can in general not be averaged, the constraint of Choi used in this framework enables us to do so (add the individual deformations and scale with 1/n). This function can also be invoked using the plus operator. """ # Check input class if not isinstance(def1, Deformation): raise ValueError('Can only combine Deformations (left one is not).') if not isinstance(def2, Deformation): raise ValueError('Can only combine Deformations (right one is not).') if def1.is_identity: # That's easy return def2.copy() elif def2.is_identity: # That's easy return def1.copy() else: # Apply normally # Check shape match if def1.field_shape != def2.field_shape: raise ValueError('Can only combine deforms with same field shape.') # Check mapping match if def1.forward_mapping != def2.forward_mapping: raise ValueError('Can only combine deforms with the same mapping') ii = isinstance if ii(def1, DeformationGrid) and ii(def2, DeformationGrid): # Keep it a grid # Check sampling if def1.grid_sampling != def2.grid_sampling: raise ValueError('Cannot add: sampling of grids does not match.') # Create new grid fd = FD(def1) if def1.forward_mapping: newDeform = DeformationGridForward(fd, def1.grid_sampling) else: newDeform = DeformationGridBackward(fd, def1.grid_sampling) # Add knots for d in range(def1.ndim): newDeform[d]._knots = def1[d]._knots + def2[d]._knots return newDeform else: # Make it a field # Add fields fields = [] for d in range(def1.ndim): fields.append( def1.get_field(d) + def2.get_field(d) ) # Create new field if def1.forward_mapping: newDeform = DeformationFieldForward(*fields) else: newDeform = DeformationFieldBackward(*fields) return newDeform def compose(def1, def2): # noqa - flake8 wants first arg to be self """ compose(other): Combine two deformations by composition. The left is the "static" deformation, and the right is the "delta" deformation. Always returns a DeformationField instance. The mapping (forward/backward) of the result is taken from the left deformation. Notes ----- Let "h = f.compose(g)" and "o" the mathematical composition operator. Then mathematically "h(x) = g(f(x))" or "h = g o f". Practically, new deformation vectors are created by sampling in one deformation, at the locations specified by the vectors of the other. For forward mapping we sample in g at the locations of f. For backward mapping we sample in f at the locations of g. Since we cannot impose the field values for a B-spline grid without resorting to some multi-scale approach (which would use composition and is therefore infinitely recursive), the result is always a deformation field. If the deformation to sample in (f for forward mapping, g for backward) is a B-spline grid, the composition does not introduce any errors; sampling in a field introduces interpolation errors. Since the static deformation f is often a DeformationField, forward mapping is preferred with regard to the accuracy of composition. """ # Check input class if not isinstance(def1, Deformation): raise ValueError('Can only combine Deformations (left one is not).') if not isinstance(def2, Deformation): raise ValueError('Can only combine Deformations (right one is not).') if def1.is_identity: # That's easy return def2.copy().as_deformation_field() elif def2.is_identity: # That's easy return def1.copy().as_deformation_field() else: # Apply normally # Check shape match if def1.field_shape != def2.field_shape: raise ValueError('Can only combine deforms with same field shape.') # Check mapping match if def1.forward_mapping != def2.forward_mapping: raise ValueError('Can only combine deforms with the same mapping') if def1.forward_mapping: fields = def1._compose_forward(def2) return DeformationFieldForward(*fields) else: fields = def1._compose_backward(def2) return DeformationFieldBackward(*fields) def _compose_forward(def1, def2): # noqa - flake8 wants first arg to be self # Sample in def2 at the locations pointed to by def1 # Sample in the other at the locations pointed to by this field # Make sure the first is a deformation field def1 = def1.as_deformation_field() # Get sample positions in pixel coordinates sampleLocations = def1.get_deformation_locations() fields = [] if isinstance(def2, DeformationGrid): # Composition with a grid has zero error for d in range(def1.ndim): field1 = def1[d] grid2 = def2[d] field = grid2.get_field_in_samples(sampleLocations) field = Aarray(field1+field, def1.field_sampling) fields.append(field) elif isinstance(def2, DeformationField): # Composition with a field introduces interpolation artifacts for d in range(def1.ndim): field1 = def1[d] field2 = def2[d] # linear or cubic. linear faster and I've not seen cubic doing better field = interp.warp(field2, sampleLocations, 'linear') field = Aarray(field1+field, def1.field_sampling) fields.append(field) else: raise ValueError('Unknown deformation class.') # Done return fields def _compose_backward(def1, def2): # noqa - flake8 wants first arg to be self # Sample in def1 at the locations pointed to by the def2 return def2._compose_forward(def1) ## Getting and applying def get_deformation_locations(self): """ get_deformation_locations() Get a tuple of arrays (x,y,z order) that represent sample locations to apply the deformation. The locations are absolute and expressed in pixel coordinates. As such, they can be fed directly to interp() or project(). """ # Get as deformation field deform = self.as_deformation_field() # Reverse fields deltas = [s for s in reversed(deform)] # Make absolute and return return interp.make_samples_absolute(deltas) def get_field(self, d): """ get_field(d) Get the field corresponding to the given dimension. """ if isinstance(self, DeformationGrid): return self[d].get_field() elif isinstance(self, DeformationField): return self[d] else: raise ValueError('Unknown deformation class.') def get_field_in_points(self, pp, d, interpolation=1): """ get_field_in_points(pp, d, interpolation=1) Obtain the field for dimension d in the specied points. The interpolation value is used only if this is a deformation field. The points pp should be a point set (x-y-z order). """ assert isinstance(pp, np.ndarray) and pp.ndim == 2 if isinstance(self, DeformationGrid): return self[d].get_field_in_points(pp) elif isinstance(self, DeformationField): data = self[d] samples = [] samling_xyz = [s for s in reversed(self.field_sampling)] for i in range(self.ndim): s = pp[:,i] / samling_xyz[i] samples.append(s) return interp.interp(data, samples, order=interpolation) else: raise ValueError('Unknown deformation class.') def apply_deformation(self, data, interpolation=3): """ apply_deformation(data, interpolation=3) Apply the deformation to the given data. Returns the deformed data. Parameters ---------- data : numpy array The data to deform interpolation : {0,1,3} The interpolation order (if backward mapping is used). """ # Null deformation if self.is_identity: return data # Make sure it is a deformation field deform = self.as_deformation_field() # Need upsampling? deform = deform.resize_field(data) # Reverse (from z-y-x to x-y-z) samples = [s for s in reversed(deform)] # Deform! # t0 = time.time() if self.forward_mapping: result = interp.deform_forward(data, samples) # print 'forward deformation took %1.3f seconds' % (time.time()-t0) else: result = interp.deform_backward(data, samples, interpolation) # print 'backward deformation took %1.3f seconds' % (time.time()-t0) # Make Aarray and return result = Aarray(result, deform.field_sampling) return result def show(self, axes=None, axesAdjust=True): """ show(axes=None, axesAdjust=True) Illustrates 2D deformations. It does so by creating an image of a grid and deforming it. The image is displayed in the given (or current) axes. Returns the texture object of the grid image. Requires visvis. """ import visvis as vv # Test dimensions if self.ndim != 2: raise RuntimeError('Show only works for 2D data.') # Create image shape, sampling = self.field_shape, self.field_sampling im = Aarray(shape, sampling, fill=0.0, dtype=np.float32) # step = 10 mayorStep = step*5 # im[1::step,:] = 1 im[:,1::step] = 1 im[::mayorStep,:] = 1.2 im[1::mayorStep,:] = 1.2 im[2::mayorStep,:] = 1.2 im[:,::mayorStep] = 1.2 im[:,1::mayorStep] = 1.2 im[:,2::mayorStep] = 1.2 # Deform im2 = self.apply_deformation(im) # Draw t = vv.imshow(im2, axes=axes, axesAdjust=axesAdjust) # Done return t ## Converting def inverse(self): """ inverse() Get the inverse deformation. This is only valid if the current deformation is diffeomorphic. The result is always a DeformationField instance. """ # Identity deforms are their own inverse if self.is_identity: return self # Get deform as a deformation field deform = self.as_deformation_field() # Get samples samples = [s for s in reversed(deform)] # Get fields fields = [] for field in deform: fields.append( interp.deform_forward(-field, samples) ) # Done if self.forward_mapping: return DeformationFieldForward(*fields) else: return DeformationFieldBackward(*fields) def as_deformation_field(self): """ as_deformation_field() Obtain a deformation fields instance that represents the same deformation. If this is already a deformation field, returns self. """ if isinstance(self, DeformationField): return self elif isinstance(self, DeformationGrid): # Obtain deformation in each dimension. Keep in world coordinates. deforms = [g.get_field() for g in self] # Make instance if self.forward_mapping: return DeformationFieldForward(deforms) else: return DeformationFieldBackward(deforms) else: raise ValueError('Unknown deformation class.') def as_other(self, other): """ as_other(other) Returns the deformation as a forward or backward mapping, so it matches the other deformations. """ if other.forward_mapping: return self.as_forward() else: return self.as_backward() def as_forward(self): """ as_forward() Returns the same deformation as a forward mapping. Returns the original if already in forward mapping. """ if self.forward_mapping: # Quick return self else: # Slow fields = [field for field in self.inverse()] return DeformationFieldForward(*fields) def as_forward_inverse(self): """ as_forward_inverse() Returns the inverse deformation as a forward mapping. Returns the inverse of the original if already in forward mapping. If in backward mapping, the data is the same, but wrapped in a Deformation{Field/Grid}Backward instance. Note: backward and forward mappings with the same data are each-others reverse. """ if self.forward_mapping: # Slow return self.inverse() else: # Quick: same data wrapped in forward class if isinstance(self, DeformationGrid): fd = FD(self) deform = DeformationGridForward(fd, self.grid_sampling) for d in deform.ndim: deform[d]._knots = self[d]._knots return deform elif isinstance(self, DeformationField): fields = [field for field in self] return DeformationFieldForward(*fields) def as_backward(self): """ as_backward() Returns the same deformation as a backward mapping. Returns the original if already in backward mapping. """ if not self.forward_mapping: # Quick return self else: # Slow fields = [field for field in self.inverse()] return DeformationFieldBackward(*fields) def as_backward_inverse(self): """ as_forward_inverse() Returns the inverse deformation as a forward mapping. Returns the inverse of the original if already in forward mapping. If in backward mapping, the data is the same, but wrapped in a DeformationFieldBackward instance. Note: backward and forward mappings with the same data are each-others reverse. """ if not self.forward_mapping: # Slow return self.inverse() else: # Quick: same data wrapped in backward class if isinstance(self, DeformationGrid): fd = FD(self) deform = DeformationGridBackward(fd, self.grid_sampling) for d in deform.ndim: deform[d]._knots = self[d]._knots return deform elif isinstance(self, DeformationField): fields = [field for field in self] return DeformationFieldBackward(*fields) # Import at the end to work around recursion from ._deformgrid import DeformationGrid from ._deformfield import DeformationField from ._subs import (DeformationIdentity, DeformationGridForward, DeformationGridBackward, DeformationFieldForward, DeformationFieldBackward) PKnm4KE<GGpirt/deform/_deformfield.pyimport numpy as np from .. import Aarray from .. import interp from ..splinegrid import GridInterface, FD from ..splinegrid import calculate_multiscale_sampling from ._deformbase import Deformation class DeformationField(Deformation): """ DeformationField(*fields) A deformation field represents a deformation using an array for each dimension, thus specifying the deformation at each pixel/voxel. The fields should be in z-y-x order. The deformation is represented in world units (not pixels, unless pixel units are used). Can be initialized with: * DeformationField(field_z, field_y, field_x) * DeformationField(image) # Null deformation * DeformationField(3) # Null deformation specifying only the ndims This class has functionality to reshape the fields. This can be usefull during registration if using a scale space pyramid. """ def __init__(self, *fields): if len(fields)==1 and isinstance(fields[0], (list, tuple)): fields = fields[0] if not fields: raise ValueError('No arguments given to DeformationField().') if len(fields)==1 and isinstance(fields[0], int): # Null deformation ndim = fields[0] self._field_shape = tuple([1 for i in range(ndim)]) self._field_sampling = tuple([1.0 for i in range(ndim)]) self._fields = [] elif len(fields)==1 and isinstance(fields[0], FD): # Null deformation with known dimensions self._field_shape = fields[0].shape self._field_sampling = fields[0].sampling self._fields = [] else: # The fields itself are given # Check if not self._check_fields_same_shape(fields): raise ValueError('Fields must all have the same shape.') if len(fields) != fields[0].ndim: raise ValueError('There must be a field for each dimension.') # Make Aarray if they are not fields2 = [] for field in fields: if not hasattr(field, 'sampling'): field = Aarray(field) fields2.append(field) # Store dimensions and fields self._field_shape = fields2[0].shape self._field_sampling = fields2[0].sampling self._fields = tuple(fields2) def __repr__(self): mapping = ['backward', 'forward'][self.forward_mapping] if self.is_identity: return '' % ( mapping, self.ndim) else: shapestr = 'x'.join([('%i'%s) for s in self.field_shape]) samplingstr = 'x'.join([('%1.2f'%s) for s in self.field_sampling]) return '' % ( mapping, shapestr, samplingstr) def resize_field(self, new_shape): """ resize_field(new_shape) Create a new DeformationField instance, where the underlying field is resized. The parameter new_shape can be anything that can be converted to a FieldDescription instance. If the field is already of the correct size, returns self. """ if isinstance(new_shape, DeformationIdentity): return self elif self.is_identity: return self.__class__( FD(self) ) else: fd1 = FD(self) fd2 = FD(new_shape) if (fd1.shape == fd2.shape) and self._sampling_equal(fd1, fd2): return self else: return self._resize_field(fd2) def _sampling_equal(self, fd1, fd2): sam_errors = [abs(s1-s2) for s1,s2 in zip(fd1.sampling, fd2.sampling)] if max(sam_errors) > 0.0001 * min(fd1.sampling): # 0.01% error return False else: return True def _resize_field(self, fd): """ _resize_field(fd) Create a new DeformationField instance, where the underlying field is reshaped. Requires a FieldDescription instance. """ # Interpolate (upscale/downscale the other) fields = [] for field1 in self: field2 = interp.resize(field1, fd.shape, 3, 'C', prefilter=False, extra=False) fields.append( field2 ) # Verify if not self._sampling_equal(field2, fd): if not fd.defined_sampling: #sum([(s==1) for s in fd.sampling]) == self.ndim: pass # Sampling probably not given else: raise ValueError('Given reference field sampling does not match.') # Return return self.__class__(fields) ## Sequence stuff def __len__(self): return len(self._fields) def __getitem__(self, item): if isinstance(item, int): if item>=0 and item shape[d]: tmp = 1 elif self.field_shape[d] < shape[d]: tmp = -1 else: tmp = 0 # Check if larger_smaller is None: larger_smaller = tmp elif larger_smaller != tmp: raise ValueError('The shapes of these arrays do not match.') # Done return larger_smaller ## Multiscale composition from points or a field @classmethod def from_field_multiscale(cls, field, sampling, weights=None, injective=True, frozenedge=True, fd=None): """ from_field_multiscale(field, sampling, weights=None, injective=True, frozenedge=True, fd=None) Create a DeformationGrid from the given deformation field (z-y-x order). Uses B-spline grids in a multi-scale approach to regularize the sparse known deformation. This produces a smooth field (minimal bending energy), similar to thin plate splines. The optional weights array can be used to individually weight the field elements. Where the weight is zero, the values are not evaluated. The speed can therefore be significantly improved if there are relatively few nonzero elements. Parameters ---------- field : list of numpy arrays These arrays describe the deformation field (one per dimension). sampling : scalar The smallest sampling of the B-spline grid that is used to create the field. weights : numpy array This array can be used to weigh the contributions of the individual elements. injective : bool or number Whether to prevent the grid from folding. An injective B-spline grid is diffeomorphic. When a number between 0 and 1 is given, the unfold constraint can be tightened to obtain smoother deformations. frozenedge : bool Whether the edges should be frozen. This can help the registration process. Also, when used in conjunction with injective, a truly diffeomorphic deformation is obtained: every input pixel maps to a point within the image boundaries. fd : field Field description to describe the shape and sampling of the underlying field to be deformed. Notes ----- The algorithmic is based on: Lee S, Wolberg G, Chwa K-yong, Shin SY. "Image Metamorphosis with Scattered Feature Constraints". IEEE TRANSACTIONS ON VISUALIZATION AND COMPUTER GRAPHICS. 1996;2:337--354. The injective constraint desctribed in this paper is not applied by this method, but by the DeformationGrid, since it is method specifically for deformations. """ if fd is None: fd = field[0] def setR(gridAdd, residu): gridAdd._set_using_field(residu, weights, injective, frozenedge) def getR(gridRef=None): if gridRef is None: return field else: # if frozenedge: # interp.fix_samples_edges(defField) residu = [] for d in range(gridRef.ndim): residu.append(field[d] - gridRef[d]) #residu.append(field[d] - gridRef[d].get_field()) return residu return cls._multiscale(setR, getR, fd, sampling) @classmethod def from_points_multiscale(cls, image, sampling, pp1, pp2, injective=True, frozenedge=True): """ from_points_multiscale(image, sampling, pp1, pp2, injective=True, frozenedge=True) Obtains the deformation field described by the two given sets of corresponding points. The deformation describes moving the points pp1 to points pp2. Note that backwards interpolation is used, so technically, the image is re-interpolated by sampling at the points in pp2 from pixels specified by the points in pp1. Uses B-spline grids in a multi-scale approach to regularize the sparse known deformation. This produces a smooth field (minimal bending energy), similar to thin plate splines. Parameters ---------- image : numpy array or shape The image (of any dimension) to which the deformation applies. sampling : scalar The sampling of the smallest grid to describe the deform. pp1 : PointSet, 2D ndarray The base points. pp2 : PointSet, 2D ndarray The target points. injective : bool or number Whether to prevent the grid from folding. An injective B-spline grid is diffeomorphic. When a number between 0 and 1 is given, the unfold constraint can be tightened to obtain smoother deformations. frozenedge : bool Whether the edges should be frozen. This can help the registration process. Also, when used in conjunction with injective, a truly diffeomorphic deformation is obtained: every input pixel maps to a point within the image boundaries. Notes ----- The algorithmic is based on: Lee S, Wolberg G, Chwa K-yong, Shin SY. "Image Metamorphosis with Scattered Feature Constraints". IEEE TRANSACTIONS ON VISUALIZATION AND COMPUTER GRAPHICS. 1996;2:337--354. The injective constraint desctribed in this paper is not applied by this method, but by the DeformationGrid, since it is method specifically for deformations. """ assert isinstance(pp1, np.ndarray) and pp1.ndim == 2 assert isinstance(pp2, np.ndarray) and pp2.ndim == 2 # Obtain reference points and vectors if cls._forward_mapping: # using the prop on cls would get the prop, not the value! pp = pp1 dd = pp2 - pp1 else: pp = pp2 dd = pp1 - pp2 # Init residu _residu = dd.copy() def setResidu(gridAdd, residu): gridAdd._set_using_points(pp, residu, injective, frozenedge) def getResidu(defField=None): if defField is None: return _residu else: for d in range(defField.ndim): i = defField.ndim - d - 1 tmp = defField.get_field_in_points(pp, d) _residu[:,i] = dd[:,i] - tmp return _residu # Use multiscale method grid = cls._multiscale(setResidu, getResidu, image, sampling) return grid @classmethod def _multiscale(cls, setResidu, getResidu, field, sampling): """ _multiscale(setResidu, getResidu, field, sampling) Method for generating a deformation field using a multiscale B-spline grid approach. from_field_multiscale() and from_points_multiscale() use this classmethod by each supplying appropriate setResidu and getResidu functions. """ # Get sampling tmp = GridInterface(field, 1) sMin, sMax = calculate_multiscale_sampling(tmp, sampling) s, sRef = sMax, sMin*0.9 # Init our running deformation field (identity deform at first) defField = cls(FD(field)) # Init residu residu = getResidu() # defField: working field # gridAdd: grid to add to working-field if cls._forward_mapping: # using the prop on cls would get the prop, not the value! DeformationGrid = DeformationGridForward else: DeformationGrid = DeformationGridBackward while s > sRef: # Init loop error = 9999999999999999999 oldError = error**2 deltaError = oldError-error alpha = 0.9 K = 2.046392675 th = alpha * ((1/K) * s)**2 # Create grid by combining refined grid of previous pass and # the gridAdd. iter, maxIter= 0, 16 while deltaError > th and iter sRef: # Get current values in the field and calculate residual # Use refGrid, as it does not *exactly* represent the # same field as grid. residu = getResidu(defField) # Done return defField ## Testing def test_jacobian(self, show=True): """ test_jacobian(show=True) Test the determinand of the field's Jacobian. It should be all positive for the field to be diffeomorphic. Returns the number of pixels where the Jacobian <= 0. If show==True, will show a figure with these pixels indicated. """ if self.ndim == 2: # Calculate gradients gradYY, gradYX = np.gradient(np.asarray(self[0])) gradXY, gradXX = np.gradient(np.asarray(self[1])) # Calculate determinants det = (gradYY+1)*(gradXX+1) - gradXY*gradYX if show: import visvis as vv vv.figure(); vv.imshow(det<=0) # Done return (det<=0).sum() else: raise ValueError('Cannot yet check if 3D field is diffeomorphic.') # Import at the end to work around recursion from ._subs import (DeformationIdentity, DeformationGridForward, DeformationGridBackward, #DeformationFieldForward, DeformationFieldBackward ) PKl4K^FDFDpirt/deform/_deformgrid.pyimport numpy as np from .. import PointSet, Aarray from .. import interp from ..splinegrid import GridInterface, GridContainer, SplineGrid from ..splinegrid import calculate_multiscale_sampling from ._deformbase import Deformation class DeformationGrid(Deformation, GridContainer): """ DeformationGrid(image, sampling=5) A deformation grid represents a deformation using a spline grid. The 'grids' property returns a tuple of SplineGrid instances (one for each dimension). These sub-grids can also obtained by indexing and are in z,y,x order. Parameters ---------- image : shape-tuple, numpy-array, Aarray, or FieldDescription A description of the field that this grid applies to. The image itself is not stored, only the field's shape and sampling are of interest. sampling : number The spacing of the knots in the field. (For anisotropic fields, the spacing is expressed in world units.) Usage ----- After normal instantiation, the grid describes a field with all zeros. Use the From* classmethods to obtain a grid which represents the given values. Limitations ----------- The grid can in principle be of arbitrary dimension, but this implementation currently only supports 1D, 2D and 3D. """ def __init__(self, *args, **kwargs): GridContainer.__init__(self, *args, **kwargs) # Create sub grids for d in range(self.ndim): grid = SplineGrid(*args, **kwargs) grid._thisDim = d self._grids.append(grid) def show(self, axes=None, axesAdjust=True, showGrid=True): """ show(axes=None, axesAdjust=True, showGrid=True) For 2D grids, illustrates the deformation and the knots of the grid. A grid image is made that is deformed and displayed in the given (or current) axes. By default the positions of the underlying knots are also shown using markers. Returns the texture object of the grid image. Requires visvis. """ import visvis as vv # Show grid using base method t = Deformation.show(self, axes, axesAdjust) if showGrid: # Get points for all knots pp = PointSet(2) for gy in range(self.grid_shape[0]): for gx in range(self.grid_shape[1]): x = (gx-1) * self.grid_sampling y = (gy-1) * self.grid_sampling pp.append(x, y) # Draw vv.plot(pp, ms='.', mc='g', ls='', axes=axes, axesAdjust=0) # Done return t ## Classmethods @classmethod def from_field(cls, field, sampling, weights=None, injective=True, frozenedge=True, fd=None): """ from_field(field, sampling, weights=None, injective=True, frozenedge=True, fd=None) Create a DeformationGrid from the given deformation field (z-y-x order). Also see from_field_multiscale() The optional weights array can be used to individually weight the field elements. Where the weight is zero, the values are not evaluated. The speed can therefore be significantly improved if there are relatively few nonzero elements. Parameters ---------- field : list of numpy arrays These arrays describe the deformation field (one per dimension). sampling : scalar The sampling of the returned grid weights : numpy array This array can be used to weigh the contributions of the individual elements. injective : bool Whether to prevent the grid from folding. This also penetalizes large relative deformations. An injective B-spline grid is diffeomorphic. frozenedge : bool Whether the edges should be frozen. This can help the registration process. Also, when used in conjunction with injective, a truly diffeomorphic deformation is obtained: every input pixel maps to a point within the image boundaries. fd : field Field description to describe the shape and sampling of the underlying field to be deformed. """ if fd is None: fd = field[0] grid = cls(fd, sampling) grid._set_using_field(field, weights, injective, frozenedge) return grid @classmethod def from_field_multiscale(cls, field, sampling, weights=None, fd=None): """ from_field_multiscale(field, sampling, weights=None, fd=None) Create a DeformationGrid from the given deformation field (z-y-x order). Applies from_field_multiscale() for each of its subgrids. Important notice ---------------- Note that this is not the best way to make a deformation, as it does not take aspects specific to deformations into account, such as injectivity, and uses addition to combine the sub-deformations instead of composition. See DeformationField.from_points_multiscale() for a sound alternative. Parameters ---------- field : list of numpy arrays These arrays describe the deformation field (one per dimension). sampling : scalar The sampling of the returned grid weights : numpy array This array can be used to weigh the contributions of the individual elements. fd : field Field description to describe the shape and sampling of the underlying field to be deformed. """ if fd is None: fd = field[0] # Get sampling tmp = GridInterface(fd, 1) sMin, sMax = calculate_multiscale_sampling(tmp, sampling) # Make grid grid = cls(fd, sMin) # Fill grids for d in range(grid.ndim): # i = grid.ndim - d - 1 tmp = SplineGrid.from_field_multiscale(Aarray(field[d], fd.sampling), sampling, weights) grid._grids[d] = tmp # Done return grid @classmethod def from_points(cls, image, sampling, pp1, pp2, injective=True, frozenedge=True): """ from_points(image, sampling, pp1, pp2, injective=True, frozenedge=True) Obtains the deformation field described by the two given sets of corresponding points. The deformation describes moving the points pp1 to points pp2. Note that backwards interpolation is used, so technically, the image is re-interpolated by sampling at the points in pp2 from pixels specified by the points in pp1. Parameters ---------- image : numpy array or shape The image (of any dimension) to which the deformation applies. sampling : scalar The sampling of the returned grid. pp1 : PointSet, 2D ndarray The base points. pp2 : PointSet, 2D ndarray The target points. injective : bool Whether to prevent the grid from folding. This also penetalizes large relative deformations. An injective B-spline grid is diffeomorphic. frozenedge : bool Whether the edges should be frozen. This can help the registration process. Also, when used in conjunction with injective, a truly diffeomorphic deformation is obtained: every input pixel maps to a point within the image boundaries. """ assert isinstance(pp1, np.ndarray) and pp1.ndim == 2 assert isinstance(pp2, np.ndarray) and pp2.ndim == 2 # Obtain reference points and vectors if cls._forward_mapping: # using the prop on cls would get the prop, not the value! pp = pp1 dd = pp2 - pp1 else: pp = pp2 dd = pp1 - pp2 # Go grid = cls(image, sampling) grid._set_using_points(pp, dd, injective, frozenedge) return grid @classmethod def from_points_multiscale(cls, image, sampling, pp1, pp2): """ from_points_multiscale(image, sampling, pp1, pp2) Obtains the deformation field described by the two given sets of corresponding points. The deformation describes moving the points pp1 to points pp2. Applies from_points_multiscale() for each of its subgrids. See DeformationField.from_points_multiscale() for a sound alternative. Important notice ---------------- Note that this is not the best way to make a deformation, as it does not take aspects specific to deformations into account, such as injectivity, and uses addition to combine the sub-deformations instead of composition. Parameters ---------- image : numpy array or shape The image (of any dimension) to which the deformation applies. sampling : scalar The sampling of the returned grid. pp1 : PointSet, 2D ndarray The base points. pp2 : PointSet, 2D ndarray The target points. """ assert isinstance(pp1, np.ndarray) and pp1.ndim == 2 assert isinstance(pp2, np.ndarray) and pp2.ndim == 2 # Obtain reference points and vectors if cls._forward_mapping: # using the prop on cls would get the prop, not the value! pp = pp1 dd = pp2 - pp1 else: pp = pp2 dd = pp1 - pp2 # Get sampling tmp = GridInterface(image, 1) sMin, sMax = calculate_multiscale_sampling(tmp, sampling) # Make grid grid = cls(image, sMin) # Fill grids for d in range(grid.ndim): i = grid.ndim - d - 1 tmp = SplineGrid.from_points_multiscale(image, sampling, pp, dd[:,i]) grid._grids[d] = tmp # Done return grid ## Private methods to help getting/setting the grid def _set_using_field(self, deforms, weights=None, injective=True, frozenedge=True): """ _set_using_field(deforms, weights=None, injective=True, frozenedge=True) Sets the deformation by specifying deformation fields for each dimension (z-y-x order). Optionally, an array with weights can be given to weight each deformation unit. """ # Check number of deforms given errMsg = 'Deforms must be a list/tuple/DeformationField with a deform for each dim.' if not isinstance(deforms, (tuple, list, DeformationField)): raise ValueError(errMsg) elif len(deforms) != self.ndim: raise ValueError(errMsg) # Apply using SplineGrid's method for d in range(self.ndim): #i = self.ndim - d - 1 self[d]._set_using_field(deforms[d], weights) # Diffeomorphic constraints if injective: self._unfold(injective) if frozenedge: self._freeze_edges() def _set_using_points(self, pp, dd, injective=True, frozenedge=True): """ _set_using_points(pp, dd, injective=True, frozenedge=True) Deform the lattices according to points pp with the deformations defined in dd. pp is the position to apply the deformation, dd is the relative position to sample the pixels from. By default performs folding and shearing prevention to obtain a grid that is injective (i.e. can be inverted). """ assert isinstance(pp, np.ndarray) and pp.ndim == 2 # Apply using SplineGrid's method for d in range(self.ndim): i = self.ndim - d - 1 self[d]._set_using_points(pp, dd[:,i]) # Diffeomorphic constraints if injective: self._unfold(injective) if frozenedge: self._freeze_edges() def _unfold(self, factor): """ _unfold(factor) Prevent folds in the grid, by putting a limit to the values that the knots may have. The factor determines how smooth the deformation should be. Zero is no deformation, 1 is *just* no folding. Better use a value of 0.9 at the highest, to account for numerical errors. Based on: Choi, Yongchoel, and Seungyong Lee. 2000. "Injectivity conditions of 2d and 3d uniform cubic b-spline functions". GRAPHICAL MODELS 62: 2000. But we apply a smooth mapping rather than simply truncating the values. Give a negative factor to use the truncated method """ # Check factor mode = 2 if factor is False: mode = 0 return # Do not unfold elif factor is True: factor = 0.9 # Default elif factor < 0: mode = 1 factor = - factor # Get K factor if self.ndim == 1: K = 2.0 # not sure about this if self.ndim == 2: K = 2.046392675 elif self.ndim == 3: K = 2.479472335 else: return # Calculate limit, see Choi et al. 2000, Lee at al. 1996 limit = (1.0/K) * self.grid_sampling * factor for d in range(self.ndim): # Get knots knots = self[d].knots.ravel() if mode == 1: # Hard limit (as proposed by Lee et al.) I, = np.where(np.abs(knots)>limit) knots[I] = limit * np.sign(knots[I]) elif mode == 2: # Apply smooth limit, see example in scripts directory f = np.exp(-np.abs(knots)/limit) knots[:] = limit * (f-1) * -np.sign(knots) else: pass def _freeze_edges(self): """ _freeze_edges() Freeze the outer knots of the grid such that the deformation is zero at the edges of the image. This sets three rows of knots to zero at the top/left of the grid, and four rows at the bottom/right. This is because at the left there is one knot extending beyond the image, while at the right there are two. """ def get_t_factor(grid, d): field_edge = (grid.field_shape[d]-1) * grid.field_sampling[d] grid_edge = (grid.grid_shape[d]-4) * grid.grid_sampling return 1.0 - (field_edge - grid_edge) / grid.grid_sampling for d in range(len(self)): grid = self[d] # Check if grid is large enough if grid._knots.shape[d] < 6: grid._knots[:] = 0 continue if d==0: # top/left # k1, k2 = knots[0], knots[1] grid._knots[0] = 0 grid._knots[1] = - 0.25*grid._knots[2] # Get t factor and coefficients t = get_t_factor(grid, d) c1, c2, c3, c4 = interp.get_cubic_spline_coefs(t, 'B') # bottom/right grid._knots[-3] = (1-t)*grid._knots[-3] grid._knots[-1] = 0 k3, k4 = grid._knots[-3], grid._knots[-4] grid._knots[-2] = -(k3*c3 + k4*c4)/c2 elif d==1: # top/left grid._knots[:,0] = 0 grid._knots[:,1] = - 0.25*grid._knots[:,2] # Get t factor and coefficients t = get_t_factor(grid, d) c1, c2, c3, c4 = interp.get_cubic_spline_coefs(t, 'B') # bottom/right grid._knots[:,-3] = (1-t)*grid._knots[:,-3] grid._knots[:,-1] = 0 k3, k4 = grid._knots[:,-3], grid._knots[:,-4] grid._knots[:,-2] = -(k3*c3 + k4*c4)/c2 elif d==2: # top/left grid._knots[:,:,0] = 0 grid._knots[:,:,1] = - 0.25*grid._knots[:,:,2] # Get t factor and coefficients t = get_t_factor(grid, d) c1, c2, c3, c4 = interp.get_cubic_spline_coefs(t, 'B') # bottom/right grid._knots[:,:,-3] = (1-t)*grid._knots[:,:,-3] grid._knots[:,:,-1] = 0 k3, k4 = grid._knots[:,:,-3], grid._knots[:,:,-4] grid._knots[:,:,-2] = -(k3*c3 + k4*c4)/c2 # Import at the end to work around recursion from ._deformfield import DeformationField PK3K.)GGpirt/deform/_subs.pyfrom ._deformbase import Deformation from ._deformgrid import DeformationGrid from ._deformfield import DeformationField class DeformationIdentity(Deformation): """ Abstract identity deformation. It is not a grid nor a field, nor is it forward or backward mapping. It is nothing more than a simple tool to initialize a deformation with. """ pass class DeformationGridForward(DeformationGrid): """ A deformation grid representing a forward mapping; to create the deformed image, the pixels are mapped to their new locations. """ _forward_mapping = True class DeformationGridBackward(DeformationGrid): """ A deformation grid representing a backward mapping; the field represents where the pixels in the deformed image should be sampled to in the original image. """ _forward_mapping = False class DeformationFieldForward(DeformationField): """ A deformation field representing a forward mapping; to create the deformed image, the pixels are mapped to their new locations. """ _forward_mapping = True class DeformationFieldBackward(DeformationField): """ A deformation field representing a backward mapping; the field represents where the pixels in the deformed image should be sampled to in the original image. """ _forward_mapping = False PK3K2Wkxxpirt/interp/__init__.py# flake8: noqa # Copyright 2014-2017(C) Almar Klein """ The interp module implements several functions for interpolation, implemented in Numba. """ # More low level functions from ._cubic import get_cubic_spline_coefs from ._misc import meshgrid from ._backward import warp, awarp from ._forward import project, aproject from ._misc import make_samples_absolute #, uglyRoot # More higher level functions from ._func import deform_backward, deform_forward from ._func import resize, imresize from ._func import zoom, imzoom # Special kinds of functionality from ._sliceinvolume import SliceInVolume # Aliases interp = warp PK3KrT rWrWpirt/interp/_backward.py""" Low level warp() function implemented with Numba to make it super fast. """ import numpy as np import numba from ._cubic import spline_type_to_id, set_cubic_spline_coefs from ._cubic import cubicsplinecoef_cardinal, cubicsplinecoef_quadratic @numba.jit(nopython=True, nogil=True) def floor(i): if i >= 0: return int(i) else: return int(i) - 1 def warp(data, samples, order=1, spline_type=0.0): """ warp(data, samples, order='linear', spline_type=0.0) Interpolate (sample) data at the positions specified by samples (pixel coordinates). Parameters ---------- data : array (float32 or float64) Data to interpolate, can be 1D, 2D or 3D. samples : tuple with numpy arrays Each array specifies the sample position for one dimension (in x-y-z order). Can also be a stacked array as in skimage's warp() (in z-y-x order). order : integer or string Order of interpolation. Can be 0:'nearest', 1:'linear', 2: 'quadratic', 3:'cubic'. spline_type : float or string Only for cubic interpolation. Specifies the type of spline. Can be 'Basis', 'Hermite', 'Cardinal', 'Catmull-rom', 'Lagrange', 'Lanczos', 'quadratic', or a float, specifying the tension parameter for the Cardinal spline. See the docs of get_cubic_spline_coefs() for more information. Returns ------- result : array The result is of the same type as the data array, and of the same shape of the samples arrays, which can be of any shape. This flexibility makes this function suitable as a base function for higher level "sampling functions". Notes ----------- The input data can have up to three dimensions. It can be of any dtype, but float32 or float64 is recommended in general. An order of interpolation of 2 would naturally correspond to quadratic interpolation. However, due to its uneven coefficients it reques the same support (and speed) as a cubic interpolant. This implementation adds the two quadratic polynomials. Note that you can probably better use order=3 with a Catmull-Rom spline, which corresponds to the linear interpolation of the two quadratic polynomials. It can be shown (see Thevenaz et al. 2000 "Interpolation Revisited") that interpolation using a Cardinal spline is equivalent to interpolating-B-spline interpolation. """ # Check data if not isinstance(data, np.ndarray): raise ValueError('data must be a numpy array.') elif data.ndim > 3: raise ValueError('can not interpolate data with such many dimensions.') # With Numba we can allow any dtype! # todo: I think we can support skimage-like samples out of the box # Check samples if isinstance(samples, tuple): pass elif isinstance(samples, list): samples = tuple(samples) elif isinstance(samples, np.ndarray) and samples.shape[0] == data.ndim and samples[0].ndim > 0: # skimage API, note that this is z-y-x order! samples = tuple(reversed([samples[i] for i in range(samples.shape[0])])) elif data.ndim == 1: samples = (samples,) else: raise ValueError("samples must be a tuple of arrays, or an ndim*X array.") if len(samples) != data.ndim: tmp = "samples must contain as many arrays as data has dimensions." raise ValueError(tmp) for s in samples: if not isinstance(s, np.ndarray): raise ValueError("values in samples must all be numpy arrays.") if s.shape != samples[0].shape: raise ValueError("sample arrays must all have the same shape.") # With Numba we can allow any dtype for the samples! # Check order orders = {'nearest': 0, 'linear': 1, 'quadratic': 2, 'cubic': 3} if isinstance(order, str): try: order = orders[order] except KeyError: raise ValueError('Unknown order of interpolation.') if order not in [0, 1, 2, 3]: raise ValueError('Invalid order of interpolation.') if order == 2: order = 3 spline_type = 'quadratic' # Prepare spline_id and empty result array spline_id = spline_type_to_id(spline_type) result = np.empty(samples[0].shape, data.dtype) # shape of samples, dtype of data # Enable cuda. Only implemented for 2D. Looks like its even slower, oddly enough. # cuda = False # if cuda: # nocov # threadsperblock = 64 # blockspergrid = (result.size + (threadsperblock - 1)) # threadperblock # samples = [cuda.to_device(s) for s in samples] # data = cuda.to_device(data) # warp2_cuda[blockspergrid, threadsperblock](data, # result.ravel(), samples[0].ravel(), samples[1].ravel(), order, spline_id) # # result_cuda_.copy_to_host(result) # only this array is copied back # return result # Go if data.ndim == 1: warp1(data, result.ravel(), samples[0].ravel(), order, spline_id) elif data.ndim == 2: warp2(data, result.ravel(), samples[0].ravel(), samples[1].ravel(), order, spline_id) elif data.ndim == 3: warp3(data, result.ravel(), samples[0].ravel(), samples[1].ravel(), samples[2].ravel(), order, spline_id) # Make Anisotropic array if input data was too # --> No: We do not know what the sample points are # Done return result def awarp(data, samples, *args, **kwargs): """ awarp(data, samples, order='linear', spline_type=0.0) Interpolation in anisotropic array. Like warp(), but the samples are expressed in world coordimates. """ # Check if not (hasattr(data, 'sampling') and hasattr(data, 'origin')): raise ValueError('awarp() needs the data to be an Aarray.') # Correct samples samples2 = [] ndim = len(samples) for i in range(ndim): d = ndim-i-1 origin = data.origin[d] sampling = data.sampling[d] samples2.append( (samples[i]-origin) / sampling ) # Interpolate return warp(data, samples2, *args, **kwargs) @numba.jit(nopython=True, nogil=True) def warp1(data_, result_, samplesx_, order, spline_id): Ni = samplesx_.size Nx = data_.shape[0] ccx = np.empty((4, ), np.float64) if order == 3: # Iterate over all samples for i in range(0, Ni): # Get integer sample location and t-factor dx = samplesx_[i]; ix = floor(dx); tx = dx-ix if ix >= 1 and ix < Nx-2: # Cubic interpolation # Get coefficients. if spline_id <= 1.0: # By far most common, make it fast! cubicsplinecoef_cardinal(tx, ccx, spline_id) # tension=spline_id elif spline_id == 99.0: cubicsplinecoef_quadratic(tx, ccx) else: # Select spline type, slower, but ok, # because these make less sense, or are slow anyway set_cubic_spline_coefs(tx, spline_id, ccx) val = data_[ix-1] * ccx[0] val += data_[ix ] * ccx[1] val += data_[ix+1] * ccx[2] val += data_[ix+2] * ccx[3] result_[i] = val elif dx>=-0.5 and dx<=Nx-0.5: # Edge effects # Get coefficients. Slower, but only needed at edges. set_cubic_spline_coefs(tx, spline_id, ccx) # Correct stuff: calculate offset (max 2) cx1, cx2 = 0, 4 # if ix<1: cx1+=1-ix; if ix>Nx-3: cx2+=(Nx-3)-ix; # Correct coefficients, so that the sum is one val = 0.0 for cx in range(cx1, cx2): val += ccx[cx] val = 1.0/val for cx in range(cx1, cx2): ccx[cx] *= val # Combine elements val = 0.0 for cx in range(cx1, cx2): val += data_[ix+cx-1] * ccx[cx] result_[i] = val else: # Out of range result_[i] = 0.0 elif order == 1: # Iterate over all samples for i in range(0, Ni): # Get integer sample location and t-factor dx = samplesx_[i]; ix = floor(dx); tx = dx-ix if ix >= 0 and ix < Nx-1: # Linear interpolation val = data_[ix] * (1.0-tx) val += data_[ix+1] * tx result_[i] = val elif dx>=-0.5 and dx<=Nx-0.5: if ix<0: tx+=ix; ix=0; if ix>Nx-2: tx+=ix-(Nx-2); ix=Nx-2; # Linear interpolation (edges) val = data_[ix] * (1.0-tx) val += data_[ix+1] * tx result_[i] = val else: # Out of range result_[i] = 0.0 else: # Iterate over all samples for i in range(0, Ni): # Get integer sample location dx = samplesx_[i]; ix = floor(dx+0.5) if ix >= 0 and ix < Nx: # Nearest neighbour interpolation result_[i] = data_[ix] else: # Out of range result_[i] = 0.0 @numba.jit(nopython=True, nogil=True) def warp2(data_, result_, samplesx_, samplesy_, order, spline_id): Ni = samplesx_.size Ny = data_.shape[0] Nx = data_.shape[1] ccx = np.empty((4, ), np.float64) ccy = np.empty((4, ), np.float64) if order == 3: # Iterate over all samples for i in range(0, Ni): # Get integer sample location and t-factor dx = samplesx_[i]; ix = floor(dx); tx = dx-ix dy = samplesy_[i]; iy = floor(dy); ty = dy-iy if ( ix >= 1 and ix < Nx-2 and iy >= 1 and iy < Ny-2 ): # Cubic interpolation # Get coefficients. if spline_id <= 1.0: # By far most common, make it fast! cubicsplinecoef_cardinal(tx, ccx, spline_id) # tension=spline_id cubicsplinecoef_cardinal(ty, ccy, spline_id) elif spline_id == 99.0: cubicsplinecoef_quadratic(tx, ccx) cubicsplinecoef_quadratic(ty, ccy) else: # Select spline type, slower, but ok, # because these make less sense, or are slow anyway set_cubic_spline_coefs(tx, spline_id, ccx) set_cubic_spline_coefs(ty, spline_id, ccy) # Apply val = 0.0 for cy in range(4): for cx in range(4): val += data_[iy+cy-1,ix+cx-1] * ccy[cy] * ccx[cx] result_[i] = val elif ( dx>=-0.5 and dx<=Nx-0.5 and dy>=-0.5 and dy<=Ny-0.5 ): # Edge effects # Get coefficients. Slower, but only needed at edges. set_cubic_spline_coefs(tx, spline_id, ccx) set_cubic_spline_coefs(ty, spline_id, ccy) # Correct stuff: calculate offset (max 2) cx1, cx2 = 0, 4 cy1, cy2 = 0, 4 # if ix<1: cx1+=1-ix; if ix>Nx-3: cx2+=(Nx-3)-ix; # if iy<1: cy1+=1-iy; if iy>Ny-3: cy2+=(Ny-3)-iy; # Correct coefficients, so that the sum is one val = 0.0 for cx in range(cx1, cx2): val += ccx[cx] val = 1.0/val for cx in range(cx1, cx2): ccx[cx] *= val # val = 0.0 for cy in range(cy1, cy2): val += ccy[cy] val = 1.0/val for cy in range(cy1, cy2): ccy[cy] *= val # Combine elements # No need to pre-calculate indices: the compiler is well # capable of making these optimizations. val = 0.0 for cy in range(cy1, cy2): for cx in range(cx1, cx2): val += data_[iy+cy-1,ix+cx-1] * ccy[cy] * ccx[cx] result_[i] = val else: # Out of range result_[i] = 0.0 elif order == 1: # Iterate over all samples for i in range(0, Ni): # Get integer sample location and t-factor dx = samplesx_[i]; ix = floor(dx); tx = dx-ix dy = samplesy_[i]; iy = floor(dy); ty = dy-iy if ( ix >= 0 and ix < Nx-1 and iy >= 0 and iy < Ny-1 ): # Linear interpolation val = data_[iy, ix ] * (1.0-ty) * (1.0-tx) val += data_[iy, ix+1] * (1.0-ty) * tx val += data_[iy+1,ix ] * ty * (1.0-tx) val += data_[iy+1,ix+1] * ty * tx result_[i] = val elif ( dx>=-0.5 and dx<=Nx-0.5 and dy>=-0.5 and dy<=Ny-0.5 ): # Edge effects if ix<0: tx+=ix; ix=0; if ix>Nx-2: tx+=ix-(Nx-2); ix=Nx-2; # if iy<0: ty+=iy; iy=0; if iy>Ny-2: ty+=iy-(Ny-2); iy=Ny-2; # Linear interpolation (edges) val = data_[iy, ix ] * (1.0-ty) * (1.0-tx) val += data_[iy, ix+1] * (1.0-ty) * tx val += data_[iy+1,ix ] * ty * (1.0-tx) val += data_[iy+1,ix+1] * ty * tx result_[i] = val else: # Out of range result_[i] = 0.0 else: # Iterate over all samples for i in range(0, Ni): # Get integer sample location dx = samplesx_[i]; ix = floor(dx+0.5) dy = samplesy_[i]; iy = floor(dy+0.5) if ( ix >= 0 and ix < Nx and iy >= 0 and iy < Ny ): # Nearest neighbour interpolation result_[i] = data_[iy,ix] else: # Out of range result_[i] = 0.0 @numba.jit(nopython=True, nogil=True) def warp3(data_, result_, samplesx_, samplesy_, samplesz_, order, spline_id): Ni = samplesx_.size Nz = data_.shape[0] Ny = data_.shape[1] Nx = data_.shape[2] ccx = np.empty((4, ), np.float64) ccy = np.empty((4, ), np.float64) ccz = np.empty((4, ), np.float64) if order == 3: # Iterate over all samples for i in range(0, Ni): # Get integer sample location and t-factor dx = samplesx_[i]; ix = floor(dx); tx = dx-ix dy = samplesy_[i]; iy = floor(dy); ty = dy-iy dz = samplesz_[i]; iz = floor(dz); tz = dz-iz if ( ix >= 1 and ix < Nx-2 and iy >= 1 and iy < Ny-2 and iz >= 1 and iz < Nz-2 ): # Cubic interpolation # Get coefficients. if spline_id <= 1.0: # By far most common, make it fast! cubicsplinecoef_cardinal(tx, ccx, spline_id) # tension=spline_id cubicsplinecoef_cardinal(ty, ccy, spline_id) cubicsplinecoef_cardinal(tz, ccz, spline_id) elif spline_id == 99.0: cubicsplinecoef_quadratic(tx, ccx) cubicsplinecoef_quadratic(ty, ccy) cubicsplinecoef_quadratic(tz, ccz) else: # Select spline type, slower, but ok, # because these make less sense, or are slow anyway set_cubic_spline_coefs(tx, spline_id, ccx) set_cubic_spline_coefs(ty, spline_id, ccy) set_cubic_spline_coefs(tz, spline_id, ccz) # Apply val = 0.0 for cz in range(4): for cy in range(4): for cx in range(4): val += data_[iz+cz-1,iy+cy-1,ix+cx-1] * ( ccz[cz] * ccy[cy] * ccx[cx] ) result_[i] = val elif ( dx>=-0.5 and dx<=Nx-0.5 and dy>=-0.5 and dy<=Ny-0.5 and dz>=-0.5 and dz<=Nz-0.5 ): # Edge effects # Get coefficients. Slower, but only needed at edges. set_cubic_spline_coefs(tx, spline_id, ccx) set_cubic_spline_coefs(ty, spline_id, ccy) set_cubic_spline_coefs(tz, spline_id, ccz) # Correct stuff: calculate offset (max 2) cx1, cx2 = 0, 4 cy1, cy2 = 0, 4 cz1, cz2 = 0, 4 # if ix<1: cx1+=1-ix; if ix>Nx-3: cx2+=(Nx-3)-ix; # if iy<1: cy1+=1-iy; if iy>Ny-3: cy2+=(Ny-3)-iy; # if iz<1: cz1+=1-iz; if iz>Nz-3: cz2+=(Nz-3)-iz; # Correct coefficients, so that the sum is one val = 0.0 for cx in range(cx1, cx2): val += ccx[cx] val = 1.0/val for cx in range(cx1, cx2): ccx[cx] *= val # val = 0.0 for cy in range(cy1, cy2): val += ccy[cy] val = 1.0/val for cy in range(cy1, cy2): ccy[cy] *= val # val = 0.0 for cz in range(cz1, cz2): val += ccz[cz] val = 1.0/val for cz in range(cz1, cz2): ccz[cz] *= val # Combine elements # No need to pre-calculate indices: the C compiler is well # capable of making these optimizations. val = 0.0 for cz in range(cz1, cz2): for cy in range(cy1, cy2): for cx in range(cx1, cx2): val += data_[iz+cz-1,iy+cy-1,ix+cx-1] * ccz[cz] * ccy[cy] * ccx[cx] result_[i] = val else: # Out of range result_[i] = 0.0 elif order == 1: # Iterate over all samples for i in range(0, Ni): # Get integer sample location and t-factor dx = samplesx_[i]; ix = floor(dx); tx = dx-ix dy = samplesy_[i]; iy = floor(dy); ty = dy-iy dz = samplesz_[i]; iz = floor(dz); tz = dz-iz if ( ix >= 0 and ix < Nx-1 and iy >= 0 and iy < Ny-1 and iz >= 0 and iz < Nz-1 ): # Linear interpolation val = data_[iz ,iy, ix ] * (1.0-tz) * (1.0-ty) * (1.0-tx) val += data_[iz ,iy, ix+1] * (1.0-tz) * (1.0-ty) * tx val += data_[iz ,iy+1,ix ] * (1.0-tz) * ty * (1.0-tx) val += data_[iz ,iy+1,ix+1] * (1.0-tz) * ty * tx # val += data_[iz+1,iy, ix ] * tz * (1.0-ty) * (1.0-tx) val += data_[iz+1,iy, ix+1] * tz * (1.0-ty) * tx val += data_[iz+1,iy+1,ix ] * tz * ty * (1.0-tx) val += data_[iz+1,iy+1,ix+1] * tz * ty * tx result_[i] = val elif ( dx>=-0.5 and dx<=Nx-0.5 and dy>=-0.5 and dy<=Ny-0.5 and dz>=-0.5 and dz<=Nz-0.5 ): # Edge effects if ix<0: tx+=ix; ix=0; if ix>Nx-2: tx+=ix-(Nx-2); ix=Nx-2; # if iy<0: ty+=iy; iy=0; if iy>Ny-2: ty+=iy-(Ny-2); iy=Ny-2; # if iz<0: tz+=iz; iz=0; if iz>Nz-2: tz+=iz-(Nz-2); iz=Nz-2; # Linear interpolation (edges) val = data_[iz ,iy, ix ] * (1.0-tz) * (1.0-ty) * (1.0-tx) val += data_[iz ,iy, ix+1] * (1.0-tz) * (1.0-ty) * tx val += data_[iz ,iy+1,ix ] * (1.0-tz) * ty * (1.0-tx) val += data_[iz ,iy+1,ix+1] * (1.0-tz) * ty * tx # val += data_[iz+1,iy, ix ] * tz * (1.0-ty) * (1.0-tx) val += data_[iz+1,iy, ix+1] * tz * (1.0-ty) * tx val += data_[iz+1,iy+1,ix ] * tz * ty * (1.0-tx) val += data_[iz+1,iy+1,ix+1] * tz * ty * tx result_[i] = val else: # Out of range result_[i] = 0.0 else: # Iterate over all samples for i in range(0, Ni): # Get integer sample location dx = samplesx_[i]; ix = floor(dx+0.5) dy = samplesy_[i]; iy = floor(dy+0.5) dz = samplesz_[i]; iz = floor(dz+0.5) if ( ix >= 0 and ix < Nx and iy >= 0 and iy < Ny and iz >= 0 and iz < Nz ): # Nearest neighbour interpolation result_[i] = data_[iz,iy,ix] else: # Out of range result_[i] = 0.0 PK3K9pirt/interp/_backward_cuda.py# Atempt at Cuda implementation, but so far it is slower than the normal one. import numba from numba import cuda @cuda.jit((numba.float64, ), device=True, inline=True) def cuda_floor(i): if i >= 0: return int(i) else: return int(i) - 1 @cuda.jit def warp2_cuda(data_, result_, samplesx_, samplesy_, order, spline_type): Ni = samplesx_.size Ny = data_.shape[0] Nx = data_.shape[1] ccx = cuda.local.array((4, ), numba.float64) ccy = cuda.local.array((4, ), numba.float64) if order == 3: # Iterate over all samples #for i in range(0, Ni): i = cuda.grid(1) if i < Ni: # Get integer sample location and t-factor dx = samplesx_[i]; ix = cuda_floor(dx); tx = dx-ix dy = samplesy_[i]; iy = cuda_floor(dy); ty = dy-iy if ( ix >= 1 and ix < Nx-2 and iy >= 1 and iy < Ny-2 ): # Get coefficients. ccx[0] = - 0.5*tx**3 + tx**2 - 0.5*tx ccx[1] = 1.5*tx**3 - 2.5*tx**2 + 1 ccx[2] = - 1.5*tx**3 + 2*tx**2 + 0.5*tx ccx[3] = 0.5*tx**3 - 0.5*tx**2 ccy[0] = - 0.5*ty**3 + ty**2 - 0.5*ty ccy[1] = 1.5*ty**3 - 2.5*ty**2 + 1 ccy[2] = - 1.5*ty**3 + 2*ty**2 + 0.5*ty ccy[3] = 0.5*ty**3 - 0.5*ty**2 # Apply val = 0.0 for cy in range(4): for cx in range(4): val += data_[iy+cy-1,ix+cx-1] * ccy[cy] * ccx[cx] result_[i] = val elif ( dx>=-0.5 and dx<=Nx-0.5 and dy>=-0.5 and dy<=Ny-0.5 ): # Edge effects # Get coefficients. Slower, but only needed at edges. ccx[0] = - 0.5*tx**3 + tx**2 - 0.5*tx ccx[1] = 1.5*tx**3 - 2.5*tx**2 + 1 ccx[2] = - 1.5*tx**3 + 2*tx**2 + 0.5*tx ccx[3] = 0.5*tx**3 - 0.5*tx**2 ccy[0] = - 0.5*ty**3 + ty**2 - 0.5*ty ccy[1] = 1.5*ty**3 - 2.5*ty**2 + 1 ccy[2] = - 1.5*ty**3 + 2*ty**2 + 0.5*ty ccy[3] = 0.5*ty**3 - 0.5*ty**2 # Correct stuff: calculate offset (max 2) cx1, cx2 = 0, 4 cy1, cy2 = 0, 4 # if ix<1: cx1+=1-ix; if ix>Nx-3: cx2+=(Nx-3)-ix; # if iy<1: cy1+=1-iy; if iy>Ny-3: cy2+=(Ny-3)-iy; # Correct coefficients, so that the sum is one val = 0.0 for cx in range(cx1, cx2): val += ccx[cx] val = 1.0/val for cx in range(cx1, cx2): ccx[cx] *= val # val = 0.0 for cy in range(cy1, cy2): val += ccy[cy] val = 1.0/val for cy in range(cy1, cy2): ccy[cy] *= val # Combine elements # No need to pre-calculate indices: the C compiler is well # capable of making these optimizations. val = 0.0 for cy in range(cy1, cy2): for cx in range(cx1, cx2): val += data_[iy+cy-1,ix+cx-1] * ccy[cy] * ccx[cx] result_[i] = val else: # Out of range result_[i] = 0.0 elif order == 1: # Iterate over all samples #for i in range(0, Ni): i = cuda.grid(1) if i < Ni: # Get integer sample location and t-factor dx = samplesx_[i]; ix = cuda_floor(dx); tx = dx-ix dy = samplesy_[i]; iy = cuda_floor(dy); ty = dy-iy if ( ix >= 0 and ix < Nx-1 and iy >= 0 and iy < Ny-1 ): # Linear interpolation val = data_[iy, ix ] * (1.0-ty) * (1.0-tx) val += data_[iy, ix+1] * (1.0-ty) * tx val += data_[iy+1,ix ] * ty * (1.0-tx) val += data_[iy+1,ix+1] * ty * tx result_[i] = val elif ( dx>=-0.5 and dx<=Nx-0.5 and dy>=-0.5 and dy<=Ny-0.5 ): # Edge effects if ix<0: tx+=ix; ix=0; if ix>Nx-2: tx+=ix-(Nx-2); ix=Nx-2; # if iy<0: ty+=iy; iy=0; if iy>Ny-2: ty+=iy-(Ny-2); iy=Ny-2; # Linear interpolation (edges) val = data_[iy, ix ] * (1.0-ty) * (1.0-tx) val += data_[iy, ix+1] * (1.0-ty) * tx val += data_[iy+1,ix ] * ty * (1.0-tx) val += data_[iy+1,ix+1] * ty * tx result_[i] = val else: # Out of range result_[i] = 0.0 else: # Iterate over all samples #for i in range(0, Ni): i = cuda.grid(1) if i < Ni: # Get integer sample location dx = samplesx_[i]; ix = cuda_floor(dx+0.5) dy = samplesy_[i]; iy = cuda_floor(dy+0.5) if ( ix >= 0 and ix < Nx and iy >= 0 and iy < Ny ): # Nearest neighbour interpolation result_[i] = data_[iy,ix] else: # Out of range result_[i] = 0.0 PK ,K m''pirt/interp/_cubic.py""" Cubic spline coefficients and lookup tables. """ import numpy as np from numpy import sin, pi # for Lanczos import numba # Keep a cache of calculated luts LUTS = {} def get_cubic_spline_coefs(t, spline_type=0.0): """ get_cubic_spline_coefs(t, spline_type='Catmull-Rom') Calculates the coefficients for a cubic spline and returns them as a tuple. t is the ratio between "left" point and "right" point on the lattice. If performance matters, consider using get_lut() instead. spline_type can be (case insensitive): : Gives a Cardinal spline with the specified number as its tension parameter. A Cardinal spline is a type of Hermite spline, where the tangents are calculated using points p0 and p3; the coefficients can be directly applied to p0 p1 p2 p3. 'Catmull-Rom' or 'Cardinal0': is a cardinal spline a tension of 0. An interesting note: if we would create two quadractic splines, that would fit a polynomial "f(t) = a*t*t + b*t + c" to the first three and last three knots respectively, and if we would then combine the two using linear interpolation, we would obtain a catmull-rom spline. I don't know whether this is how the spline was designed, or if this is a "side-effect". 'B-pline': Basis spline. Not an interpolating spline, but an approximating spline. Here too, the coeffs can be applied to p0-p3. 'Hermite': Gives the Hermite coefficients to be applied to p1 m1 p2 m2 (in this order), where p1 p2 are the closest knots and m1 m2 the tangents. 'Lagrange': The Lagrange spline or Lagrange polynomial is an interpolating spline. It is the same as Newton polynomials, but implemented in a different manner (wiki). The Newton implementation should be more efficient, but this implementation is much simpler, and is very similar to the B-spline implementation (only the coefficients are different!). Also, when for example interpolating an image, coefficients are reused often and can be precalculated to enhance speed. 'Lanczos': Lanczos interpolation (windowed sync funcion). Note that this is not really a spline, and that sum of the coefficients is not exactly 1. Often used in audio processing. The Lanczos spline is very similar to the Cardinal spline with a tension of -0.25. 'quadratic': Quadratic interpolation with a support of 4, essentially the addition of the two quadratic polynoms. 'linear': Linear interpolation. Effective support is 2. Added for completeness and testing. 'nearest': Nearest neighbour interpolation. Added for completeness and testing. """ spline_id = spline_type_to_id(spline_type) out = np.zeros((4, ), np.float64) set_cubic_spline_coefs(t, spline_id, out) return tuple(out) @numba.jit(nopython=True) def set_cubic_spline_coefs(t, spline_id, out): """ set_cubic_spline_coefs(t, spline_id, out) Calculate cubuc spline coefficients for the given spline_id, and store them in the given array. See get_cubic_spline_coefs() and spline_type_to_id() for details. """ #if spline_id == 0.0: # cubicsplinecoef_catmullRom(t, out) == cubicsplinecoef_cardinal(t, out, 0) if spline_id <= 1.0: cubicsplinecoef_cardinal(t, out, spline_id) # tension=spline_id elif spline_id == 2.0: cubicsplinecoef_basis(t, out) elif spline_id == 3.0: cubicsplinecoef_hermite(t, out) elif spline_id == 4.0: cubicsplinecoef_lagrange(t, out) elif spline_id == 5.0: cubicsplinecoef_lanczos(t, out) # common in Audio, but slow because of sine elif spline_id == 97.0: cubicsplinecoef_nearest(t, out) elif spline_id == 98.0: cubicsplinecoef_linear(t, out) elif spline_id == 99.0: cubicsplinecoef_quadratic(t, out) # Note: the lookup table was initially implemented to provide efficient # calculation of the coefficient in the warp functions. However, with Numba # it turned out to be more efficient to calculate the coefficients directly, # possibly by LLVM making use of SIMD and/or the overhead of array management # needed with a LUT. def get_lut(spline_type, n=32768): """ get_lut(spline_type, n=32768) Calculate the look-up table for the specified spline type with n entries. Returns a float64 1D array that has a size of (n + 2 * 4) that can be used in get_coef(). The spline_type can be 'cardinal' or a float between -1 and 1 for a Cardinal spline, or 'hermite', 'lagrange', 'lanczos', 'quadratic', 'linear', 'nearest'. Note that the last three are available for completeness; its inefficient to do nearest, linear or quadratic interpolation with a cubic kernel. """ spline_id = spline_type_to_id(spline_type) # Create lut if not existing yet key = spline_id, n if key not in LUTS.keys(): lut = np.zeros((n + 2) * 4, np.float64) _calculate_lut(lut, spline_id) LUTS[key] = lut return LUTS[key] @numba.jit def _calculate_lut(lut, spline_id): n = lut.size // 4 - 2 step = 1.0 / n t = 0.0 for i in range(n): t += step out = lut[i * 4:] # For each possible t, calculate the coefficients set_cubic_spline_coefs(t, spline_id, out) def spline_type_to_id(spline_type): """ spline_type_to_id(spline_type) Method to map a spline name to an integer ID. This is used so that set_cubic_spline_coefs() can be efficient. The spline_type can also be a number between -1 and 1, representing the tension for a Cardinal spline. """ # Handle tension given for Cardinal spline tension = 0.0 if isinstance(spline_type, (float, int)): if spline_type >= -1 and spline_type <= 1: tension = float(spline_type) spline_type = 'Cardinal' else: raise ValueError('Tension parameter must be between -1 and 1.') # Get id if spline_type.lower() in ['c', 'card', 'cardinal', 'catmull–rom']: return tension # For catmull-rom, we use default tension 0 elif spline_type.lower() in ['b', 'basis', 'basic']: return 2.0 elif spline_type.lower() in ['herm', 'hermite']: return 3.0 elif spline_type.lower() in ['lag', 'lagrange']: return 4.0 elif spline_type.lower() in ['lanc', 'lanczos']: return 5.0 elif spline_type.lower() in ['near', 'nearest']: return 97.0 elif spline_type.lower() in ['lin', 'linear']: return 98.0 elif spline_type.lower() in ['quad', 'quadratic']: return 99.0 else: raise ValueError('Unknown spline type: ' + str(spline_type)) @numba.jit(nopython=True) def get_coef(lut, t): """ get_coef(lut, t) Get the coefficients for given value of t. This simply obtains the nearest coefficients in the table. For a more accurate result, use the AccurateCoef class. """ n = lut.size // 4 - 2 i1 = int(t * n + 0.5) * 4 return lut[i1:i1 + 4] ## The coefficient functions @numba.jit(nopython=True) def cubicsplinecoef_catmullRom(t, out): # See the doc for the catmull-rom spline, this is how the two splines # are combined by simply adding (and dividing by two) out[0] = - 0.5*t**3 + t**2 - 0.5*t out[1] = 1.5*t**3 - 2.5*t**2 + 1 out[2] = - 1.5*t**3 + 2*t**2 + 0.5*t out[3] = 0.5*t**3 - 0.5*t**2 @numba.jit(nopython=True) def cubicsplinecoef_cardinal(t, out, tension): tau = 0.5 * (1 - tension) out[0] = - tau * ( t**3 - 2*t**2 + t ) out[3] = tau * ( t**3 - t**2 ) out[1] = 2*t**3 - 3*t**2 + 1 - out[3] out[2] = - 2*t**3 + 3*t**2 - out[0] @numba.jit(nopython=True) def cubicsplinecoef_basis(t, out): out[0] = (1-t)**3 /6.0 out[1] = ( 3*t**3 - 6*t**2 + 4) /6.0 out[2] = (-3*t**3 + 3*t**2 + 3*t + 1) /6.0 out[3] = ( t)**3 /6.0 @numba.jit(nopython=True) def cubicsplinecoef_hermite(t, out): out[0] = 2*t**3 - 3*t**2 + 1 out[1] = t**3 - 2*t**2 + t out[2] = - 2*t**3 + 3*t**2 out[3] = t**3 - t**2 @numba.jit(nopython=True) def cubicsplinecoef_lagrange(t, out): k = -1.0 out[0] = (t )/(k ) * (t-1)/(k-1) * (t-2)/(k-2) k= 0 out[1] = (t+1)/(k+1) * (t-1)/(k-1) * (t-2)/(k-2) k= 1 out[2] = (t+1)/(k+1) * (t )/(k ) * (t-2)/(k-2) k= 2 out[3] = (t+1)/(k+1) * (t )/(k ) * (t-1)/(k-1) @numba.jit(nopython=True) def cubicsplinecoef_lanczos(t, out): tt= (1+t) out[0] = 2*sin(pi*tt)*sin(pi*tt/2) / (pi*pi*tt*tt) tt= (2-t) out[3] = 2*sin(pi*tt)*sin(pi*tt/2) / (pi*pi*tt*tt) if t!=0: tt= t out[1] = 2*sin(pi*tt)*sin(pi*tt/2) / (pi*pi*tt*tt) else: out[1] =1 if t!=1: tt= (1-t) out[2] = 2*sin(pi*tt)*sin(pi*tt/2) / (pi*pi*tt*tt) else: out[2] =1 @numba.jit(nopython=True) def cubicsplinecoef_nearest(t, out): out[0] = 0.0 out[1] = int(t < 0.5) out[2] = int(t >= 0.5) out[3] = 0.0 @numba.jit(nopython=True) def cubicsplinecoef_linear(t, out): out[0] = 0.0 out[1] = (1.0-t) out[2] = t out[3] = 0.0 @numba.jit(nopython=True) def cubicsplinecoef_quadratic(t, out): # This corresponds to adding the two quadratic polynoms, # thus keeping genuine quadratic interpolation. However, # it has the same support as a cubic spline, so why use this? # Catmull-rom is similar, except it linearly interpolates the two # quadratic functions instead of adding them. out[0] = 0.25*t**2 - 0.25*t out[1] = -0.25*t**2 - 0.75*t + 1 out[2] = -0.25*t**2 + 1.25*t out[3] = 0.25*t**2 - 0.25*t PK,KuT~88pirt/interp/_forward.py""" Low level project() function implemented with Numba to make it super fast. """ import numpy as np import numba @numba.jit(nopython=True, nogil=True) def floor(i): if i >= 0: return int(i) else: return int(i) - 1 @numba.jit(nopython=True, nogil=True) def ceil(i): if i >= 0: return int(i + 0.9999999999999999) else: return int(i) def project(data, samples): """ project(data, samples) Interpolate data to the positions specified by samples (pixel coordinates). In contrast to warp(), the project() function applies forward deformations, moving the pixels/voxels to the given locations, rather than getting the pixel values from the given locations. Although this may feel closer to how one would like to think about deformations, this function is slower and has no options to determine the interpolation, because there is no interpolation, but splatting. Parameters ---------- data : array (float32 or float64) Data to interpolate, can be 1D, 2D or 3D. samples : tuple with numpy arrays Each array specifies the sample position for one dimension (in x-y-z order). In contrast to warp(), each array must have the same shape as data. Can also be a stacked array as in skimage's warp() (in z-y-x order). Returns ------- result : array The result is of the same type and shape as the data array. """ # Check data if not isinstance(data, np.ndarray): raise ValueError('data must be a numpy array.') elif data.ndim > 3: raise ValueError('can not interpolate data with such many dimensions.') # With Numba we can allow any dtype! # Check samples if isinstance(samples, tuple): pass elif isinstance(samples, list): samples = tuple(samples) elif isinstance(samples, np.ndarray) and samples.shape[0] == data.ndim and samples[0].ndim > 0: # skimage API, note that this is z-y-x order! samples = tuple(reversed([samples[i] for i in range(samples.shape[0])])) elif data.ndim==1: samples = (samples,) else: raise ValueError("samples must be a tuple of arrays.") if len(samples) != data.ndim: tmp = "samples must contain as many arrays as data has dimensions." raise ValueError(tmp) for s in samples: if not isinstance(s, np.ndarray): raise ValueError("values in samples must all be numpy arrays.") if s.shape != data.shape: # note that this is quite a bit more restrictive than in warp() raise ValueError("sample arrays must all have the same shape as the data.") # With Numba we can allow any dtype for the samples! # Prepare empty result array - important to use zeros(), not empty()! result = np.zeros(samples[0].shape, data.dtype) # shape of samples, dtype of data # Go if data.ndim == 1: project1(data, result, samples[0]) elif data.ndim == 2: project2(data, result, samples[0], samples[1]) elif data.ndim == 3: project3(data, result, samples[0], samples[1], samples[2]) # Done return result def aproject(data, samples): """ aproject(data, samples) Interpolation in anisotropic array. Like project(), but the samples are expressed in world coordimates. """ # Check if not (hasattr(data, 'sampling') and hasattr(data, 'origin')): raise ValueError('aproject() needs the data to be an Aarray.') # Correct samples samples2 = [] ndim = len(samples) for i in range(ndim): d = ndim-i-1 origin = data.origin[d] sampling = data.sampling[d] samples2.append( (samples[i]-origin) / sampling ) # Interpolate return project(data, samples2) @numba.jit(nopython=True, nogil=True) def project1(data_, result_, deformx_): Nx = data_.shape[0] coeff_ = np.zeros(data_.shape, dtype=np.float32) for ix1 in range(0, Nx): # Calculate location to map to x2 = deformx_[ix1] # Select where the surrounding pixels map to. # This defines the region that we should fill in. This region # is overestimated as it is assumed rectangular in the destination, # which is not true in general. x2_min = x2 x2_max = x2 for ix3 in range(-1,2): if ix3==0: continue if ix1+ix3 < 0: x2_min = x2 - 1000000.0 # Go minimal continue if ix1+ix3 >= Nx: x2_max = x2 + 1000000.0 # Go maximal continue # x border val = deformx_[ix1+ix3] x2_min = min(x2_min, val) x2_max = max(x2_max, val) # Limit to bounds and make integer x2_min = max(0, min(Nx-1, x2_min )) x2_max = max(0, min(Nx-1, x2_max )) # ix2_min = max(0, floor(x2_min) ) # use max/min again to be sure ix2_max = min(Nx-1, ceil(x2_max) ) # Calculate max range to determine kernel size rangeMax = 0.1 rangeMax = max( rangeMax, max(abs(x2_min-x2), abs(x2_max-x2)) ) rangeMax = 1.0 / rangeMax # pre-divide # Sample value val = data_[ix1] # Splat value in destination for ix2 in range(ix2_min, ix2_max+1): # Calculate weights and make sure theyre > 0 wx = 1.0 - rangeMax * abs(ix2 - x2) w = max(0.0, wx) # Assign values result_[ix2 ] += val * w coeff_[ ix2 ] += w # Divide by the coeffeicients for ix2 in range(Nx): c = coeff_[ix2] if c>0: result_[ix2] = result_[ix2] / c @numba.jit(nopython=True, nogil=True) def project2(data_, result_, deformx_, deformy_): Ny = data_.shape[0] Nx = data_.shape[1] coeff_ = np.zeros(data_.shape, dtype=np.float32) for iy1 in range(0, Ny): for ix1 in range(0, Nx): # Calculate location to map to y2 = deformy_[iy1,ix1] x2 = deformx_[iy1,ix1] # Select where the surrounding pixels map to. # This defines the region that we should fill in. This region # is overestimated as it is assumed rectangular in the destination, # which is not true in general. x2_min = x2 x2_max = x2 y2_min = y2 y2_max = y2 for iy3 in range(-1,2): for ix3 in range(-1,2): if iy3*ix3==0: continue if iy1+iy3 < 0: y2_min = y2 - 1000000.0 # Go minimal continue if ix1+ix3 < 0: x2_min = x2 - 1000000.0 # Go minimal continue if iy1+iy3 >= Ny: y2_max = y2 + 1000000.0 # Go maximal continue if ix1+ix3 >= Nx: x2_max = x2 + 1000000.0 # Go maximal continue # x border val = deformx_[iy1+iy3,ix1+ix3] x2_min = min(x2_min, val) x2_max = max(x2_max, val) # y border val = deformy_[iy1+iy3,ix1+ix3] y2_min = min(y2_min, val) y2_max = max(y2_max, val) # Limit to bounds and make integer x2_min = max(0, min(Nx-1, x2_min )) x2_max = max(0, min(Nx-1, x2_max )) y2_min = max(0, min(Ny-1, y2_min )) y2_max = max(0, min(Ny-1, y2_max )) # ix2_min = max(0, floor(x2_min) ) # use max/min again to be sure ix2_max = min(Nx-1, ceil(x2_max) ) iy2_min = max(0, floor(y2_min) ) iy2_max = min(Ny-1, ceil(y2_max) ) # Calculate max range to determine kernel size rangeMax = 0.1 rangeMax = max( rangeMax, max(abs(y2_min-y2), abs(y2_max-y2)) ) rangeMax = max( rangeMax, max(abs(x2_min-x2), abs(x2_max-x2)) ) #rangeMax_sum += rangeMax rangeMax = 1.0 / rangeMax # pre-divide # Sample value val = data_[iy1,ix1] # Splat value in destination for iy2 in range(iy2_min, iy2_max+1): for ix2 in range(ix2_min, ix2_max+1): # Calculate weights and make sure theyre > 0 wy = 1.0 - rangeMax * abs(iy2 - y2) wx = 1.0 - rangeMax * abs(ix2 - x2) w = max(0.0, wy) * max(0.0, wx) # Assign values result_[iy2 ,ix2 ] += val * w coeff_[ iy2 ,ix2 ] += w # Divide by the coeffeicients for iy2 in range(Ny): for ix2 in range(Nx): c = coeff_[iy2,ix2] if c>0: result_[iy2,ix2] /= c @numba.jit(nopython=True, nogil=True) def project3(data_, result_, deformx_, deformy_, deformz_): Nz = data_.shape[0] Ny = data_.shape[1] Nx = data_.shape[2] coeff_ = np.zeros(data_.shape, dtype=np.float32) for iz1 in range(0, Nz): for iy1 in range(0, Ny): for ix1 in range(0, Nx): # Calculate location to map to z2 = deformz_[iz1,iy1,ix1] y2 = deformy_[iz1,iy1,ix1] x2 = deformx_[iz1,iy1,ix1] # Select where the surrounding pixels map to. # This defines the region that we should fill in. This region # is overestimated as it is assumed rectangular in the destination, # which is not true in general. x2_min = x2 x2_max = x2 y2_min = y2 y2_max = y2 z2_min = z2 z2_max = z2 for iz3 in range(-1,2): for iy3 in range(-1,2): for ix3 in range(-1,2): if iz3*iy3*ix3==0: continue if iz1+iz3 < 0: z2_min = z2 - 1000000.0 # Go minimal continue if iy1+iy3 < 0: y2_min = y2 - 1000000.0 # Go minimal continue if ix1+ix3 < 0: x2_min = x2 - 1000000.0 # Go minimal continue if iz1+iz3 >= Nz: z2_max = z2 + 1000000.0 # Go maximal continue if iy1+iy3 >= Ny: y2_max = y2 + 1000000.0 # Go maximal continue if ix1+ix3 >= Nx: x2_max = x2 + 1000000.0 # Go maximal continue # x border val = deformx_[iz1+iz3,iy1+iy3,ix1+ix3] x2_min = min(x2_min, val) x2_max = max(x2_max, val) # y border val = deformy_[iz1+iz3,iy1+iy3,ix1+ix3] y2_min = min(y2_min, val) y2_max = max(y2_max, val) # z border val = deformz_[iz1+iz3,iy1+iy3,ix1+ix3] z2_min = min(z2_min, val) z2_max = max(z2_max, val) # Limit to bounds and make integer x2_min = max(0, min(Nx-1, x2_min )) x2_max = max(0, min(Nx-1, x2_max )) y2_min = max(0, min(Ny-1, y2_min )) y2_max = max(0, min(Ny-1, y2_max )) z2_min = max(0, min(Nz-1, z2_min )) z2_max = max(0, min(Nz-1, z2_max )) # ix2_min = max(0, floor(x2_min) ) # use max/min again to be sure ix2_max = min(Nx-1, ceil(x2_max) ) iy2_min = max(0, floor(y2_min) ) iy2_max = min(Ny-1, ceil(y2_max) ) iz2_min = max(0, floor(z2_min) ) iz2_max = min(Nz-1, ceil(z2_max) ) # Calculate max range to determine kernel size rangeMax = 0.1 rangeMax = max( rangeMax, max(abs(z2_min-z2), abs(z2_max-z2)) ) rangeMax = max( rangeMax, max(abs(y2_min-y2), abs(y2_max-y2)) ) rangeMax = max( rangeMax, max(abs(x2_min-x2), abs(x2_max-x2)) ) rangeMax = 1.0 / rangeMax # pre-divide # Sample value val = data_[iz1,iy1,ix1] # Splat value in destination for iz2 in range(iz2_min, iz2_max+1): for iy2 in range(iy2_min, iy2_max+1): for ix2 in range(ix2_min, ix2_max+1): # Calculate weights and make sure theyre > 0 wz = 1.0 - rangeMax * abs(iz2 - z2) wy = 1.0 - rangeMax * abs(iy2 - y2) wx = 1.0 - rangeMax * abs(ix2 - x2) w = max(0.0, wz) * max(0.0, wy) * max(0.0, wx) # Assign values result_[iz2, iy2, ix2] += val * w coeff_[ iz2, iy2, ix2] += w # Divide by the coeffeicients for iz2 in range(Nz): for iy2 in range(Ny): for ix2 in range(Nx): c = coeff_[iz2,iy2,ix2] if c>0: result_[iz2,iy2,ix2] = result_[iz2,iy2,ix2] / c PKO3KEJְ((pirt/interp/_func.pyimport numpy as np from ..gaussfun import diffuse from .. import Aarray from ._backward import warp, awarp # noqa from ._forward import project, aproject # noqa from ._misc import make_samples_absolute, meshgrid ## Deformations def deform_backward(data, deltas, order=1, spline_type=0.0): """ deform_backward(data, deltas, order=1, spline_type=0.0) Interpolate data according to the deformations specified in deltas. Deltas should be a tuple of numpy arrays similar to 'samples' in the warp() function. They represent the relative sample positions expressed in world coordinates. """ # todo: this function assumes that the shape and sampling of data and deltas is equal. # Either document, enforce, or fix that (e.g. by using awarp when needed?) # Check if len(deltas) != data.ndim: tmp = "Samples must contain as many arrays as data has dimensions." raise ValueError(tmp) # Create samples samples = make_samples_absolute(deltas) # Interpolate result = warp(data, samples, order, spline_type) # Make Aarray # todo: is this necessary? can the Aarray not do this itself? if hasattr(data, 'sampling'): result = Aarray(result, data.sampling) if hasattr(data, 'origin'): result.origin = data.origin # Done return result def deform_forward(data, deltas): """deform_forward(data, deltas) Like deform_backward(), but applied to project (forward deformation). """ # Check if len(deltas) != data.ndim: tmp = "Samples must contain as many arrays as data has dimensions." raise ValueError(tmp) # Create samples samples = make_samples_absolute(deltas) # Interpolate result = project(data, samples) # Make Aarray # todo: is this necessary? can the Aarray not do this itself? if hasattr(data, 'sampling'): result = Aarray(result, data.sampling) if hasattr(data, 'origin'): result.origin = data.origin # Done return result ## Resize and zoom def resize(data, new_shape, order=3, spline_type=0.0, prefilter=False, extra=False): """ resize(data, new_shape, order=3, spline_type=0.0, prefilter=False, extra=False) Resize the data to the specified new shape. Parameters ---------- data : numpy array The data to rezize. new_shape : tuple The new shape of the data (z-y-x order). order : {0,1,3} or {'nearest', 'linear', 'cubic'} The interpolation order to use. spline_type : float or string Only for cubic interpolation. Specifies the type of spline. Can be 'Basis', 'Hermite', 'Cardinal', 'Catmull-rom', 'Lagrange', 'Lanczos', 'quadratic', or a float, specifying the tension parameter for the Cardinal spline. See the docs of get_cubic_spline_coefs() for more information. prefilter : bool Whether to apply (discrete Gaussian diffusion) anti-aliasing (when downampling). Default False. extra : bool Whether to extrapolate the data a bit. In this case, each datapoint is seen as spanning a space equal to the distance between the data points. This is the method used when you resize an image using e.g. paint.net or photoshop. If False, the first and last datapoint are exactly on top of the original first and last datapoint (like scipy.ndimage.zoom). Default False. Notes on extrapolating ---------------------- For the sake of simplicity, assume that the new shape is exactly twice that of the original. When extra if False, the sampling between the pixels is not a factor 2 of the original. When extra is True, the sampling decreases with a factor of 2, but the data now has an offset. Additionally, extrapolation is performed, which is less accurate than interpolation """ # Check new_shape if not isinstance(new_shape, (tuple,list)): raise ValueError('new_shape must be a tuple or list.') elif not len(new_shape) == len(data.shape): raise ValueError('new_shape must contain as much values as data has dimensions.') new_shape = [int(round(n)) for n in new_shape] # Get shape, sampling and origin shape = data.shape sampling = [1.0 for s in shape] origin = [0.0 for s in shape] if hasattr(data, 'sampling'): sampling = data.sampling if hasattr(data, 'origin'): origin = data.origin # Init lists ranges = [] sampling2 = [] origin2 = [] for s, n, sam, ori in zip(shape, new_shape, sampling, origin): if extra: # Get full range (expressed in "pixels") dmin, dmax = -0.5, s-0.5 drange = dmax-dmin # == s # Step size (n-1 steps in between the pixels, and 0.5 steps at each side) dstep = float(drange) / n dstep2 = 0.5 * dstep # Get sampling and offset sampling2.append( dstep*sam ) origin2.append( ori+(dmin+dstep2)*sam ) # Get range r = np.linspace(dmin+dstep2, dmax-dstep2, n) ranges.append(r) else: # Get full range (expressed in "pixels") dmin, dmax = 0, s-1 drange = dmax-dmin # == s # Step size (the outer points are exactly at the dmin and dmax) dstep = float(drange) / (n-1) # Get sampling and offset sampling2.append( dstep*sam ) origin2.append( ori ) # Get range r = np.linspace(dmin, dmax, n) ranges.append(r) # Anti-aliasing def foo(x): if x < 1.0: return 0.8/x # 1.0 is better, but people like sharp images else: return 0.0 factors = [float(s1)/s2 for s1,s2 in zip(new_shape, shape)] sigmas = [foo(f) for f in factors] if prefilter and sum(sigmas): data = diffuse(data, sigmas) # Interpolate (first make ranges x-y-z) ranges.reverse() grids = meshgrid(ranges) data2 = warp(data, grids, order, spline_type) # Make Aarray return Aarray(data2, sampling2, origin2) def imresize(data, new_shape, order=3): """ imzoom(data, factor, order=3) Convenience function to resize the image data (1D, 2D or 3D). This function uses pirt.resize() with 'prefilter' and 'extra' set to True. This makes it more suitble for generic image resizing. Use pirt.resize() for more fine-grained control. Parameters ---------- data : numpy array The data to rezize. new_shape : tuple The new shape of the data (z-y-x order). order : {0,1,3} or {'nearest', 'linear', 'cubic'} The interpolation order to use. """ return resize(data, new_shape, order, 0.0, True, True) def zoom(data, factor, order=3, spline_type=0.0, prefilter=False, extra=False): """ zoom(data, factor, order=3, spline_type=0.0, prefilter=False, extra=False) Resize the data with the specified factor. The default behavior is the same as scipy.ndimage.zoom(), but three times faster. Parameters ---------- data : numpy array The data to rezize. factor : scalar or tuple The resize factor, optionally for each dimension (z-y-z order). order : {0,1,3} or {'nearest', 'linear', 'cubic'} The interpolation order to use. spline_type : float or string Only for cubic interpolation. Specifies the type of spline. Can be 'Basis', 'Hermite', 'Cardinal', 'Catmull-rom', 'Lagrange', 'Lanczos', 'quadratic', or a float, specifying the tension parameter for the Cardinal spline. See the docs of get_cubic_spline_coefs() for more information. prefilter : bool Whether to apply (discrete Gaussian diffusion) anti-aliasing (when downampling). Default False. extra : bool Whether to extrapolate the data a bit. In this case, each datapoint is seen as spanning a space equal to the distance between the data points. This is the method used when you resize an image using e.g. paint.net or photoshop. If False, the first and last datapoint are exactly on top of the original first and last datapoint (like numpy.zoom). Default False. Notes on extrapolating ---------------------- For the sake of simplicity, assume a resize factor of 2. When extra if False, the sampling between the pixels is not a factor 2 of the original. When extra is True, the sampling decreases with a factor of 2, but the data now has an offset. Additionally, extrapolation is performed, which is less accurate than interpolation """ # Process factor if isinstance(factor, np.ndarray) and factor.size == 1: factor = float(factor) if isinstance(factor, (float, int)): factor = [factor for i in data.shape] # Check factor if not isinstance(factor, (list, tuple)): raise ValueError('Factor must be a float or tuple/list.') if len(factor) != data.ndim: raise ValueError('Factor len does not match ndim of data.') # Calculate new shape new_shape = [float(f)*s for f,s in zip(factor, data.shape)] new_shape = [int(round(s)) for s in new_shape] # Resize return resize(data, new_shape, order, spline_type, prefilter, extra) def imzoom(data, factor, order=3): """ imzoom(data, factor, order=3) Convenience function to resize the image data (1D, 2D or 3D) with the specified factor. This function uses pirt.interp.resize() with 'prefilter' and 'extra' set to True. This makes it more suitble for generic image resizing. Use pirt.resize() for more fine-grained control. Parameters ---------- data : numpy array The data to rezize. factor : scalar or tuple The resize factor, optionally for each dimension (z-y-x order). order : {0,1,3} or {'nearest', 'linear', 'cubic'} The interpolation order to use. """ return zoom(data, factor, order, 0.0, True, True) PK ,K\"pirt/interp/_misc.pyimport numpy as np import numba def meshgrid(*args): """ Meshgrid implementation for 1D, 2D, 3D and beyond. meshgrid(nx, ny) will create meshgrids with the specified shapes. meshgrid(ndarray) will create meshgrids corresponding to the shape of the given array (which must have 2 or 3 dimension). meshgrid([2,3,4], [1,2,3], [4,1,2]) uses the supplied values to create the grids. These lists can also be numpy arrays. Returns a tuple with the grids in x-y-z order, with arrays of type float32. """ # Test args if len(args) == 1: args = args[0] if isinstance(args, np.ndarray) and args.ndim in [2,3]: args = tuple(reversed(args.shape)) if not isinstance(args, (list, tuple)): raise ValueError('Invalid argument for meshgrid.') iterators = [] for arg in args: if isinstance(arg, int): iterators.append( np.arange(arg, dtype=np.float32) ) elif isinstance(arg, list): iterators.append( np.array(arg, dtype=np.float32) ) elif isinstance(arg, np.ndarray): iterators.append( arg ) else: raise ValueError('Invalid argument for meshgrid.') # Swizzle iterators because Numpy's meshgrid behaves different iterators.reverse() if len(iterators) > 1: iterators[0], iterators[1] = iterators[1], iterators[0] # Use Numpy res = np.meshgrid(*iterators, indexing='xy') res = [a.astype(np.float32) for a in res] # Swizzle back and return if len(iterators) > 1: res[0], res[1] = res[1], res[0] return tuple(reversed(res)) # todo: this seems not to be used anymore @numba.jit(nopython=True, nogil=True) def uglyRoot(n): """ uglyRoot(n) Calculates an approximation of the square root using (a few) Newton iterations. """ x = 1.0 x = x - (x * x - n) / (2.0 * x) x = x - (x * x - n) / (2.0 * x) x = x - (x * x - n) / (2.0 * x) return x def make_samples_absolute(samples): """ make_samples_absolute(samples) Note: this function is intended for sampes that represent a deformation; the number of dimensions of each array should match the number of arrays. Given a tuple of arrays that represent a relative deformation expressed in world coordinates (x,y,z order), returns a tuple of sample arrays that represents the absolute sample locations in pixel coordinates. It is assumed that the sampling of the data is the same as for the sample arrays. The origin property is not used. This process can also be done with relative ease by adding a meshgrid and then using awarp() or aproject(). But by combining it in one step, this process becomes faster and saves memory. Note that the deform_*() functions use this function. """ ndim = len(samples) absolute_samples = [] for i in range(ndim): # Get array and check sample_array = samples[i] if sample_array.ndim != ndim: raise ValueError("make_samples_absolute: the number of dimensions"+ " of each array should match the number of arrays.") # Get dimension corresponding to this sampling array d = ndim-i-1 # Get sampling sampling = 1.0 if hasattr(sample_array, 'sampling'): sampling = sample_array.sampling[d] # Instantiate result result = np.empty_like(sample_array) absolute_samples.append(result) # Apply if sample_array.ndim == 1: make_samples_absolute1(sample_array, result, sampling, d) if sample_array.ndim == 2: make_samples_absolute2(sample_array, result, sampling, d) if sample_array.ndim == 3: make_samples_absolute3(sample_array, result, sampling, d) # Done return tuple(absolute_samples) @numba.jit(nopython=True, nogil=True) def make_samples_absolute1(samples_, result_, sampling, dim=0): # Define variables sampling_i = 1.0/sampling Nx = samples_.shape[0] if dim == 0: for x in range(Nx): result_[x] = x + samples_[x] * sampling_i @numba.jit(nopython=True, nogil=True) def make_samples_absolute2(samples_, result_, sampling, dim=0): sampling_i = 1.0/sampling Ny = samples_.shape[0] Nx = samples_.shape[1] if dim == 0: for y in range(Ny): for x in range(Nx): result_[y,x] = y + samples_[y,x] * sampling_i elif dim == 1: for y in range(Ny): for x in range(Nx): result_[y,x] = x + samples_[y,x] * sampling_i @numba.jit(nopython=True, nogil=True) def make_samples_absolute3(samples_, result_, sampling, dim=0): # Define variables sampling_i = 1.0/sampling Nz = samples_.shape[0] Ny = samples_.shape[1] Nx = samples_.shape[2] if dim == 0: for z in range(Nz): for y in range(Ny): for x in range(Nx): result_[z,y,x] = z + samples_[z,y,x] * sampling_i elif dim == 1: for z in range(Nz): for y in range(Ny): for x in range(Nx): result_[z,y,x] = y + samples_[z,y,x] * sampling_i elif dim == 2: for z in range(Nz): for y in range(Ny): for x in range(Nx): result_[z,y,x] = x + samples_[z,y,x] * sampling_i PKp4K$"\!!pirt/interp/_sliceinvolume.py""" Functionality for sampling a 2D slice from a 3D volume. """ import numpy as np import numba from .. import PointSet, Aarray from ._backward import warp def get_span_vectors(normal, c, d): """ get_span_vectors(normal, prevA, prevB) -> (a,b) Given a normal, return two orthogonal vectors which are both orthogonal to the normal. The vectors are calculated so they match as much as possible the previous vectors. """ # Calculate a from previous b a1 = d.cross(normal) if a1.norm() < 0.001: # The normal and d point in same or reverse direction # -> Calculate b from previous a b1 = c.cross(normal) a1 = b1.cross(normal) # Consider the opposite direction a2 = -1 * a1 if c.distance(a1) > c.distance(a2): a1 = a2 # Ok, calculate b b1 = a1.cross(normal) # Consider the opposite b2 = -1 * b1 if d.distance(b1) > d.distance(b2): b1 = b2 # Done return a1.normalize(), b1.normalize() class SliceInVolume: """ SliceInVolume(self, pos, normal=None, previous=None) Defines a slice in a volume. The two span vectors are in v and u respectively. In other words, vec1 is up, vec2 is right. """ def __init__(self, pos, normal=None, previous=None): if isinstance(pos, (tuple, list)): pos = PointSet(pos) elif isinstance(pos, np.ndarray): pos = PointSet(pos) elif hasattr(pos, '_is_Point'): # visvis.Point pos = PointSet(pos.data) assert pos.ndim == 2 and pos.shape == (1, 3) # Init vectors self._pos = pos self._normal = None self._vec1 = None self._vec2 = None # Calculate normal if normal is None and previous is None: self._normal = PointSet((0,0,-1)) elif normal is None: # Normal is defined by difference in position, # and the previous normal self._normal = pos - previous._pos self._normal += previous._normal self._normal = self._normal.normalize() elif normal is not None: self._normal = normal.normalize() # Calculate vec1 and vec2 if previous is None: # Use arbitrary vector arbitrary = PointSet((1,0,0)) self._vec1 = arbitrary.cross(self._normal) if self._vec1.norm() < 0.0001: arbitrary = PointSet((0,1,0)) self._vec1 = arbitrary.cross(self._normal) # second self._vec2 = self._vec1.cross(self._normal) else: # Use previous tmp = get_span_vectors(self._normal, previous._vec1, previous._vec2) self._vec1, self._vec2 = tmp # Normalize self._vec1 = self._vec1.normalize() self._vec2 = self._vec2.normalize() def get_slice(self, volume, N=128, spacing=1.0): vec1 = self._vec1 * spacing vec2 = self._vec2 * spacing im = slice_from_volume(volume, self._pos, vec1, vec2, N) return Aarray(im, (spacing,spacing)) def convert_local_to_global(self, p2d, p3d): """ convert_local_to_global(p2d, p3d) Convert local 2D points to global 3D points. UNTESTED """ pos = self._pos.copy() # pos.x += p2d.y * self._vec1.x + p2d.x * self._vec2.x pos.y += p2d.y * self._vec1.y + p2d.x * self._vec2.y pos.z += p2d.y * self._vec1.z + p2d.x * self._vec2.z # return pos def slice_from_volume(data, pos, vec1, vec2, Npatch, order=3): """ slice_from_volume(data, pos, vec1, vec2, Npatch, order=3) Samples a square 2D slice from a 3D volume, using a center position and two vectors that span the patch. The length of the vectors specify the sample distance for the patch. """ # Prepare sampling, origin = (1.0, 1.0, 1.0), (0.0, 0.0, 0.0) if hasattr(data, 'sampling'): sampling, origin = data.sampling, data.origin # Generate sample positions x = _slice_samples_from_volume(data, sampling, origin, tuple(pos.flat), tuple(vec1.flat), tuple(vec2.flat), Npatch) samplesx, samplesy, samplesz = x # Sample in 3D volume return warp(data, [samplesx, samplesy, samplesz], order) @numba.jit(nopython=True, nogil=True) def _slice_samples_from_volume(data, sampling, origin, pos, vec1, vec2, Npatch, order=3): # Init sample arrays samplesx = np.empty((Npatch, Npatch), dtype=np.float32) samplesy = np.empty((Npatch, Npatch), dtype=np.float32) samplesz = np.empty((Npatch, Npatch), dtype=np.float32) Npatch2 = Npatch / 2.0 # Set start position x, y, z = pos # Get anisotropy factors sam_x = 1.0 / sampling[2] # Do the division here sam_y = 1.0 / sampling[1] sam_z = 1.0 / sampling[0] ori_x = origin[2] ori_y = origin[1] ori_z = origin[0] # Make vectors quick v1x, v1y, v1z = vec1 v2x, v2y, v2z = vec2 # Loop for v in range(Npatch): vd = v - Npatch2 for u in range(Npatch): ud = u - Npatch2 # Determine sample positions samplesx[v, u] = ( (x + vd*v1x + ud*v2x) - ori_x) * sam_x samplesy[v, u] = ( (y + vd*v1y + ud*v2y) - ori_y) * sam_y samplesz[v, u] = ( (z + vd*v1z + ud*v2z) - ori_z) * sam_z return samplesx, samplesy, samplesz PKqKrpirt/reg/__init__.py# flake8: noqa """ The reg module implements the various registration algorithms. """ from .reg_base import AbstractRegistration, NullRegistration, BaseRegistration, GDGRegistration from .reg_gravity import GravityRegistration from .reg_demons import OriginalDemonsRegistration, DiffeomorphicDemonsRegistration from .reg_elastix import (ElastixRegistration, ElastixGroupwiseRegistration, ElastixRegistration_rigid, ElastixRegistration_affine) PK~K ;e88pirt/reg/reg_base.py""" Defines the base registration object. """ import time import numpy as np from .. import Aarray, Parameters, ScaleSpacePyramid, SplineGrid from .. import (Deformation, DeformationField, DeformationIdentity, DeformationGridForward, DeformationFieldForward, DeformationGridBackward, DeformationFieldBackward) class classproperty(property): def __get__(self, cls, owner): return self.fget.__get__(None, owner)() def create_grid_image(shape, sampling=None, step=10, bigstep=None): """ create_grid_image(shape, sampling=None, step=10, bigstep=5*step) Create an image depicting a grid. The first argument can also be an array. """ step = int(step + 0.4999999) # Accept arrays if isinstance(shape, np.ndarray): im = shape shape = im.shape if hasattr(im, 'sampling'): sampling = im.sampling # Default sampling if sampling is None: sampling = [1 for s in shape] # Calculate bigstep if bigstep is None: bigstep = step * 5 # Create image im = Aarray(shape, sampling, fill=0.0, dtype=np.float32) # im[1::step,:] = 1 im[:,1::step] = 1 im[::bigstep,:] = 1.2 im[1::bigstep,:] = 1.2 im[2::bigstep,:] = 1.2 im[:,::bigstep] = 1.2 im[:,1::bigstep] = 1.2 im[:,2::bigstep] = 1.2 # return im class Progress(object): """ Progress() Allows an algorithm to display the progress to the user. """ def __init__(self): # Init variables to show progress self._progress_last_message = '' self._progress_iter = 0 self._progress_max_iters = 0 def start(self, message, max_iters=0): """ start(message, max_iters=0) Start a progress. The message should indicate what is being done. """ # Init progress variables self._progress_last_message = '' self._progress_iter = 0 self._progress_max_iters = max_iters print(message) def next(self, extra_info=''): """ next(extra_info='') Update progress to next iteration and show the new progress. Optionally a message with extra information can be displayed. """ self._progress_iter += 1 self.show(extra_info) def show(self, extra_info=''): """ show(extra_info='') Show current progress, and optional extra info. """ # Get series of backspaces to remove previous message rem = '\b' * (len(self._progress_last_message)+1) # Create message mes = '' if self._progress_max_iters: i1, i2 = self._progress_iter, self._progress_max_iters mes += 'iter %i/%i (%1.0f%%)' % (i1, i2, 100*(float(i1)/i2)) else: mes += 'iter %i' % self._progress_iter if extra_info: mes += ', ' + str(extra_info) # Store and print message self._progress_last_message = mes print(rem + mes) class Timer(object): """ Timer() Enables registration objects to time the different components. Can be used to optimize the speed of the registration algorithms, or to study the effect of a parameter on the speed. Multiple things can be timed simultaneously. Timers can also be started and stopped multiple times; the total time is measured. """ def __init__(self): # Init variables to perform timing self._timers = {} def start(self, id): """ start(id) Start a timer for the given id, which should best be a string. The timer can be started and stopped multiple times. In the end the total time spend on 'id' can be displayed using show(). """ # Get timers for this type if id in self._timers: timers = self._timers[id] else: timers = [] self._timers[id] = timers # Add start time timers.append( time.time() ) def stop(self, id): """ stop(id) Stop the timer for 'id'. """ # Get timers for this type if id in self._timers: timers = self._timers[id] else: return # No timer to stop # Stop the last timer t = timers[-1] if t > 946080000: # 30*365*24*60*60: a time after the year 2000 timers[-1] = time.time() - t else: pass # An already stopped timer def get(self, id): """ get(id) Get the total time spend in seconds on 'id'. Returns -1 if the given id is not valid. """ # Get timers for this type if id in self._timers: timers = self._timers[id] else: return -1# No timer to get # Sum all stopped timers (do not added running timers) t_sum = 0 for t in timers: if t < 946080000: # 30*365*24*60*60: less than 30 years t_sum += t # Done return t_sum def show(self, id=None): """ show(id=None) Show (print) the results for the total timings of 'id'. If 'id' is not given, will print the result of all timers. """ # Get what timers to display if id is None: ids = self._timers.keys() elif isinstance(id, (tuple,list)): ids = id else: ids = [id] # Display timers for id in ids: t = self.get(id) print('Total time spend on %s: %1.2f seconds' % (str(id), t)) class Visualizer(object): """ Visualize Tool to visualize the images during registration. """ def __init__(self): self._f = None def init(self, fig): """ init(fig) Initialize by giving a figure. """ self._f = fig if fig is not None: import visvis as vv # noqa - so importerror is raised if visvis not available @property def fig(self): """ Get the figure instance (or None) if init() was not called. """ return self._f def imshow(self, subplot, im, *args, **kwargs): """ imshow(subplot, im, *args, **kwargs) Show the given image in specified subplot. If possible, updates the previous texture object. """ # Check if we can show if not self.fig: return else: self._f.MakeCurrent() # Import visvis import visvis as vv # Get axes if isinstance(subplot, tuple): a = vv.subplot(*subplot) else: a = vv.subplot(subplot) # Get what's in there t = None if len(a.wobjects) == 2 and isinstance(a.wobjects[1], vv.BaseTexture): t = a.wobjects[1] # Reuse, or clear and replace if t is not None: t.SetData(im) else: a.Clear() t = vv.imshow(im, *args, **kwargs) # Done return t class AbstractRegistration(object): """ AbstractRegistration(*images, makeFloat=True) Base class for registration of 2 or more images. This class only provides a common interface for the user. This base class can for example be inherited by registration classes that wrap an external registration algorithm, such as Elastix. Also see :class:`pirt.BaseRegistration`. Implements: * progress, timer, and visualizer objects * properties to handle the mapping (forward or backward) * functionality to get and set parameters * basic functionality to get resulting deformations * functionality to show the result (2D only) Parameters ---------- None """ # Inherting methods should implement register() def __init__(self, *ims, makeFloat=True): # Init images self._ims = [] # Check number of images if len(ims) < 2: raise ValueError('Need at least two images at initialisation.') # Check all images for im in ims: # Check if numpy array if not isinstance(im, np.ndarray): raise ValueError('Images to register should be numpy arrays.') # Make float (if necessary) if makeFloat and im.dtype not in [np.float32, np.float64]: im = im.astype(np.float32) # Make anisotropic arrays (is view on same data, no data copying) if not isinstance(im, Aarray): im = Aarray(im) # Store self._ims.append(im) # Init deformations self._deforms = {} # Init params self._params = self._defaultParams() # Instantiate progress and timer self._progress = Progress() self._timer = Timer() self._visualizer = Visualizer() @classmethod def register_and_get_object(cls, *ims, **params): """ register_get_object(*ims, **params) Classmethod to register the given images with the given parameters, and return the resulting registration object (after the registration has been performed). """ # Create registration object ro = cls(*ims) # Set params for param in params.keys(): ro.params[param] = params[param] # Register ro.register() # Done return ro ## Methods and properties for the registration and results @property def progress(self): """ The progress object, can be used by the algorithm to indicate its progress. """ return self._progress @property def timer(self): """ The timer object, can be used by the algorithm to measure the processing time of the different steps in the registration algorithm. """ return self._timer @property def visualizer(self): """ The visualizer object, can be used by the algorithm to display the images as they are deformed during registration. """ return self._visualizer ## Properties to handle forward/backward mapping @property def forward_mapping(self): """ Whether forward (True) or backward (False) mapping is to be used internally. """ if self.params.mapping.lower() == 'forward': return True elif self.params.mapping.lower() == 'backward': return False else: raise ValueError('Invalid mapping.') @property def DeformationField(self): """ Depending on whether forward or backward mapping is used, returns the DeformationFieldForward or DeformationFieldBackward class. """ if self.forward_mapping: return DeformationFieldForward else: return DeformationFieldBackward @property def DeformationGrid(self): """ Depending on whether forward or backward mapping is used, returns the DeformationGridForward or DeformationGridBackward class. """ if self.forward_mapping: return DeformationGridForward else: return DeformationGridBackward ## Methods and props for params def _defaultParams(self): """ Overload to create all default params. """ params = Parameters() params._class_name = self.__class__.__name__ params.mapping = 'undefined' return params @classproperty @classmethod def defaultParams(cls): """ Class property to get the default params for this registration class. """ # Instantiate a dummy class to obtain the default params im = np.zeros((10,10), 'float32') reg = cls(im,im) return reg._defaultParams() @property def params(self): """ Get params structure (as a Parameters object). """ return self._params def set_params(self, params=None, **kwargs): """ set_params(params=None, **kwargs) Set any parameters. The parameters are updated with the given dict, Parameters object, and then with the parameters given via the keyword arguments. Note that the parameter structure can also be accessed directly via the 'params' propery. """ # Combine user input D = {} if params: for key in params: D[key] = params[key] if True: for key in kwargs: D[key] = kwargs[key] # Set style elements invalidKeys = [] for key in D: self._params[key] = D[key] if key not in self._params: invalidKeys.append(key) # Give warning for invalid keys if invalidKeys: print("Warning, invalid param given: " + ','.join(invalidKeys)) ## Methods to get result and show result def get_deform(self, i=0): """ get_deform(i=0) Get the DeformationField instance for image with index i. If groupwise registration was used, this deformation field maps image i to the mean shape. """ # Check if not isinstance(i, int): raise ValueError('The argument of get_deform must be an int.') try: return self._deforms[i] except KeyError: raise KeyError('The deformation for index %i' % i + 'is not available.') def get_final_deform(self, i=0, j=1, mapping=None): """ get_final_deform(i=0, j=1, mapping=None) Get the DeformationField instance that maps image with index i to the image with index j. If groupwise registration was used, the deform is a composition of deform 'i' with the inverse of deform 'j'. Parameters ---------- i : int The source image j : int The target image mapping : {'forward', 'backward', Deformation instance} Whether the result should be a forward or backward deform. When specified here, the result can be calculated with less errors than for example using result.as_forward(). If a Deformation object is given, the mapping of that deform is used. """ # Check for ij in [i,j]: if not isinstance(ij, int): raise ValueError('Both arguments must be indices to the images.') # Get individual deforms deform1 = self._deforms.get(i, None) deform2 = self._deforms.get(j, None) # Handle mapping if mapping is None: mapForward = self.forward_mapping elif isinstance(mapping, str): if mapping.lower() == 'forward': mapForward = True elif mapping.lower() == 'backward': mapForward = False else: raise ValueError('Invalid mapping specified.') elif isinstance(mapping, Deformation): mapForward = mapping.forward_mapping else: raise ValueError('Invalid mapping specified.') # Compose if deform1 and deform2: if mapForward: deform1 = deform1.as_forward() deform2i = deform2.as_forward_inverse() deform = deform1.compose(deform2i) else: deform1 = deform1.as_backward() deform2i = deform2.as_backward_inverse() deform = deform1.compose(deform2i) elif deform1: if mapForward: deform = deform1.as_forward() else: deform = deform1.as_backward() elif deform2: if mapForward: deform = deform2.as_forward_inverse() else: deform = deform2.as_backward_inverse() else: raise ValueError('No deform available. Run register().') # Done return deform def show_result(self, how=None, fig=None): """ show_result(self, how=None, fig=None) Convenience method to show the registration result. Only works for two dimensional data. Requires visvis. """ # Check if dimension ok if self._ims[0].ndim != 2: raise RuntimeError('show_result only works for 2D data.') # Check if result is set if not self._deforms: raise RuntimeError('The result is not available; run register().') # Make how lower if string if isinstance(how, str): how = how.lower() # Import visvis import visvis as vv # Create figure if fig is None: fig = vv.figure() else: fig = vv.figure(fig.nr) fig.Clear() if how in [None, 'grid', 'diff', 'dx', 'dy']: # Title map title_map = ['Moving', 'Static', 'Deformed', 'Grid'] # Get deform deform = self.get_final_deform(0, 1) # Create third image im1_ = deform.apply_deformation(self._ims[0]) # Create fourth image if how in [None, 'grid']: im2_ = create_grid_image(im1_.shape, im1_.sampling) im2_ = deform.apply_deformation(im2_) elif how == 'diff': im2_ = np.abs(im1_-self._ims[1]) title_map[3] = 'Diff' elif how == 'dx': im2_ = deform[1] title_map[3] = 'dx' elif how == 'dy': im2_ = deform[0] title_map[3] = 'dy' # Set images ims = [self._ims[0], self._ims[1], im1_, im2_] # Imshow all figures aa, tt = [],[] for i in range(len(ims)): a = vv.subplot(2,2,i+1) t = vv.imshow(ims[i]) vv.title(title_map[i]) a.axis.visible = False aa.append(a); tt.append(t) # Done return tuple(tt) ## Some basic registration methods (and its tools) def register(self, verbose=1, fig=None): """ register(verbose=1, fig=None) Perform the registration process. Parameters ---------- verbose : int Verbosity level. 0 means silent, 1 means print some, 2 means print a lot. fig : visvis figure or None If given, will display the registration progress in the given figure. """ self._register(verbose, fig) def _register(self, verbose, fig): """ Inheriting classes should overload this method. """ raise NotImplemented() class NullRegistration(AbstractRegistration): """ NullRegistration(*images) Inherits from :class:`pirt.AbstractRegistration`. A registration algorithm that does nothing. This can be usefull to test the result if no registration would be applied. Parameters ---------- None """ def _defaultParams(self): """ Overload to create all default params. """ params = AbstractRegistration._defaultParams(self) params.mapping = 'backward' return params def _register(self, *args, **kwargs): for i in range(len(self._ims)): shape = self._ims[i].shape fields = [np.zeros(shape, 'float32') for s in shape] self._deforms[i] = DeformationFieldForward(*fields) class BaseRegistration(AbstractRegistration): """ BaseRegistration(*images) Inherits from :class:`pirt.AbstractRegistration`. An abstract registration class that provides common functionality shared by almost all registration algorithms. This class maintains an image pyramid for each image, implements methods to set the delta deform, and get the deformed image at a specified scale. Further, this class implements the high level aspect of the registration algorithm that iterates through scale space. Parameters ---------- mapping : {'forward', 'backward'} Whether forward or backward mapping is used. Default forward. combine_deforms : {'compose', 'add'} How deformations are combined. Default compose. While add is used in some (older) registration algorithms, it is a coarse approximation. final_scale : scalar The minimum scale used during the registration process. This is the scale at which the registration ends. Default 1.0. Because calculating differentials suffer from more errors as the scale decreases, the minimum value is limited at 0.5. scale_levels : integer The amount of scale levels to use during the registration. Each level represents a factor of two in scale. The default (4) works for most images, but for large images or large deformations a larger value can be used. scale_sampling : scalar The amount of iterations for each level (i.e. between each factor two in scale). What values are reasonable depends on the specific algorithm. smooth_scale : bool Whether a smooth scale space should be used (default) or the scale is reduced with a factor of two each scale_sampling iterations. """ # Implements: # * _register() # * _set_delta_deform(i, deform) # * get_deformed_image(i) # * _set_buffered_data(key1, key2, data) # * _get_buffered_data(key1, key2) # # Inherting methods should implement: # * _deform_for_image(i, iterInfo) def __init__(self, *ims): AbstractRegistration.__init__(self, *ims) # Init container for scale space for the images self._pyramids = None # Buffer to store reusable data self._buffer = {} # Attributes to steer the registration process self._current_interp_order = 1 def _defaultParams(self): """ Overload to create all default params. """ # Get params struct params = AbstractRegistration._defaultParams(self) # Set default mapping params.mapping = 'backward' # How transformations are combined params.combine_deforms = 'compose' # Parameters related to scale params.smooth_scale = True # if False uses factors of two params.scale_levels = 5 params.final_scale = 1.0 params.scale_sampling = 15 # Done return params ## Methods and props to help the algorithm def _set_delta_deform(self, i, deform): """ _set_delta_deform(i, deform) Append the given delta deform for image i. It is combined with the current deformation for that image. """ # Assume null deform if deform is None: return elif deform.is_identity: return # Get how to combine combine = self.params.combine_deforms.lower() # Get current (or null-deform) current = self._deforms.get(i, None) if current is None: current = self.DeformationField(self._ims[0].ndim) self.timer.start('Setting delta deforms') # Reshape current = current.resize_field(deform) # Track deformation amplitude? if hasattr(self, '_deform_tracking'): ff = [] for f in deform: if not isinstance(f, np.ndarray): f = f.knots ff.append(f) f = (ff[0]**2 + ff[1]**2) ** 0.5 if i==0: self._deform_tracking.append(f.mean()) # Combine if combine == 'compose': totalDeform = current.compose(deform) elif combine == 'add': totalDeform = current.add(deform) else: raise RuntimeError('param combine_deforms is invalid: %s' % str(combine)) self.timer.stop('Setting delta deforms') # Store self._deforms[i] = totalDeform def get_deformed_image(self, i, s=0): """ get_deformed_image(i, s=0) Get the image i at scale s, deformed with its current deform. Mainly intended for the registration algorithms, but can be of interest during development/debugging. """ # Get image self.timer.start('getting raw images from pyramid') if not s: s = None im = self._pyramids[i].get_scale(s) self.timer.stop('getting raw images from pyramid') # Deform it self.timer.start('deforming raw images') deform = self._deforms.get(i, None) if deform is not None: deform = deform.resize_field(im) self._deforms[i] = deform # store, so we do not need to resize later im = deform.apply_deformation(im, self._current_interp_order) self.timer.stop('deforming raw images') # Done return im def _set_buffered_data(self, key1, key2, data): """ _set_buffered_data(key1, key2, data) Buffer the given data. key1 is where the data is stored under. key2 is a check. The most likely use case is using the image number as key1 and the scale as key2. Intended for the registration algorithm subclasses. """ self._buffer[key1] = key2, data def _get_buffered_data(self, key1, key2): """ _get_buffered_data(key1, key2) Retrieve buffered data. """ key2_data = self._buffer.get(key1, None) if key2_data and key2_data[0] == key2: return key2_data[1] else: return None ## The actual methods def _register(self, verbose=1, fig=None): # For an illustration of the scale sampling, see the script # _smooth_scale_sampling.py. # Init visualizer self.visualizer.init(fig) # Init progress display if verbose >= 1: self.progress.start('%s: '% self.__class__.__name__) # Init interpolation order self._current_interp_order = 1 # Scale parameters final_scale = float(self.params.final_scale) scale_sampling = int(self.params.scale_sampling) smooth_scale = bool(self.params.smooth_scale) # Calculate iter factor for smooth scale space iter_factor = 0.5**(1.0/scale_sampling) # Check pixel_scales = [final_scale/s for s in self._ims[0].sampling] if min(pixel_scales) < 0.5: raise ValueError('final_scale expressed in pixel units should be at least 0.5.') # Create pyramids, using final_scale as an offset self._pyramids = [ScaleSpacePyramid(im, final_scale) for im in self._ims] # Calculate max scale ranges = [sh*sa for sh, sa in zip(self._ims[0].shape, self._ims[0].sampling)] self._max_scale = max_scale = max(ranges) * 0.25 # Calculate scale levels scale_levels = 1 while final_scale * 2**(scale_levels-1) < max_scale: scale_levels += 1 # Main loop for level in reversed(range(scale_levels)): # Set (initial) scale for this level scale = final_scale * 2**level if smooth_scale: scale *= 2 * iter_factor for iter in range(1, scale_sampling+1): # Skip highest scale if smooth_scale and level >= scale_levels-1: continue # Set interpolation order higher in the final iterations if level==0 and iter>0.75*scale_sampling: self._current_interp_order = 3 # Do one iteration iterInfo = (level, iter, scale) self._register_iteration(iterInfo) # Print iteration info if verbose==1: self.progress.next('(%i-%i) scale %1.2f' % iterInfo) elif verbose>1: print("Registration iter %i-%i at scale %1.2f" % iterInfo) # Next iteration. When using a smooth scale space # the final scale is reached sooner. if smooth_scale: scale = max(final_scale, scale*iter_factor) def _register_iteration(self, iterInfo): """ _register_iteration(iterInfo) Apply one iteration of the registration algorithm at the specified scale (iterInfo[2]). """ nims = len(self._ims) # Calculate deformation for each image deforms = [] for i in range(nims): deform = self._deform_for_image(i, iterInfo) deforms.append(deform) # Apply deformations for i in range(nims): self._set_delta_deform(i, deforms[i]) def _deform_for_image(self, i, iterInfo): """ _deform_for_image(i, iterInfo) Calculate the deform for the given image index. """ raise NotImplemented() class GDGRegistration(BaseRegistration): """ GDGRegistration(*images) Inherits from :class:`pirt.BaseRegistration`. Generic Groupwise Diffeomorphic Registration. Abstract class that provides a generic way to perform diffeomorphic groupwise registration. Parameters ---------- deform_wise : {'groupwise', 'pairwise'} Whether all images are deformed simultaneously, or only the first image is deformed. When registering more than 2 images, 'groupwise' registration should be used. Default 'groupwise'. injective : bool Whether the injectivity constraint should be used. This value should only be set to False in specific (testing?) situation; the resulting deformations are only guaranteed to be diffeomorphic if injective=True. frozenEdge : bool Whether the deformation is set to zero at the edges. If True (default) the resulting deformation fields are *fully* diffeomorphic; no pixels are mapped from the inside to the outside nor vice versa. final_grid_sampling : scalar The grid sampling of the grid at the final level. During the registration process, the B-spine grid sampling scales along with the scale. grid_sampling_factor : scalar between 0 and 1 To what extent the grid sampling scales with the scale. By making this value lower than 1, the grid is relatively fine at the the higher scales, allowing for more deformations. The default is 0.5. Note that setting this value to 1 when using 'frozenedge' can cause the image to be 'stuck' at higher scales. deform_limit : scalar If injective is True, the deformations at each iteration are constraint by a "magic" limit. By making this limit tighter (relative to the scale), the deformations stay in reasonable bounds. This feature helps a lot for convergence. Default value is 1. """ # Implements: # * _regularize_diffeomorphic(scale, deform, weight=None) # * get_final_deform(i, j, mapping) # * _deform_for_image(i, iterInfo) # # Inherting methods should implement: # * _deform_for_image_pair(i, i, iterInfo) def _defaultParams(self): """ Overload to create all default params. """ # Get params struct params = BaseRegistration._defaultParams(self) # Parameters related to diffeomorpic deforms and groupwise registration params.deform_wise = 'groupwise' params.injective = True params.frozenedge = True # Parameters related to B-spline based regularization params.final_grid_sampling = 16 params.grid_sampling_factor = 0.5 params.deform_limit = 1.0 # Done return params def _get_grid_sampling_old(self, scale): # This method is stupid, as changing the final_scale changes # the grid sampling in a very unexpected ways. This is why # the final_scale and grid_sampling were so dependent in previous # versions. # Get values from params final_grid_sampling = float(self.params.final_grid_sampling) grid_factor = float(self.params.grid_sampling_factor) # Get factor and bias gsf = grid_factor * final_grid_sampling / self.params.final_scale gsb = (1-grid_factor) * final_grid_sampling # Compute! grid_sampling = scale * gsf + gsb return grid_sampling def _get_grid_sampling(self, scale): # Get values from params final_grid_sampling = float(self.params.final_grid_sampling) grid_factor = float(self.params.grid_sampling_factor) final_scale = float(self.params.final_scale) # Get factor and bias gsf = grid_factor * final_grid_sampling gsb = final_grid_sampling # Compute! grid_sampling = (scale-final_scale) * gsf + gsb return grid_sampling def _get_grid_sampling_full(self, scale): # One can determine a max grid sampling from the image shape, and # scale linearly between this and the final_grid_sampling, # but this turns out not to work so nice, because it is so # dependent on image shape etc. # Calculate max sampling (4x max_scale seems nice because we have 4 knots) max_scale = self._max_scale max_grid_sampling = max_scale * 4.0 # Get values from params final_grid_sampling = float(self.params.final_grid_sampling) final_scale = float(self.params.final_scale) # Compute! t = max(0.0,scale-final_scale) / (max_scale - final_scale) grid_sampling = t*max_grid_sampling + (1.0-t)*final_grid_sampling return grid_sampling def _regularize_diffeomorphic(self, scale, deform, weight=None): """ _regularize_diffeomorphic(scale, deform, weight=None) Regularize the given DeformationField in a way that makes it diffeomorphic. Returns the result as a DeformationGrid. """ # Check if not isinstance(deform, DeformationField): raise ValueError('make_diffeomorphic needs a DeformationField.') # Get grid sampling grid_sampling = self._get_grid_sampling(scale) # Calculate factor to limit the deformation injective, frozenedge = self.params.injective, self.params.frozenedge if injective: deform_limit = float(self.params.deform_limit) injective = deform_limit * scale / grid_sampling injective = min(injective, 0.9) # must not be higher than 1! # Regularize using a B-spline grid self.timer.start('regularizing') grid = self.DeformationGrid.from_field(deform, grid_sampling, weight, injective, frozenedge) self.timer.stop('regularizing') return grid def _deform_for_image(self, i, iterInfo): """ _deform_for_image(i, iterInfo) Calculate the deform for the given image index. """ scale = iterInfo[2] # Check whether the grid would be too small anyway if self.params.frozenedge: # Get grid sampling grid_sampling = self._get_grid_sampling(scale) # Create dummy grid testGrid = SplineGrid(self._ims[0], grid_sampling) # Check if all( [s<4 for s in testGrid.grid_shape] ): print('skip because grid too small') return None # Get wise deform_wise = self.params.deform_wise.lower() # Decide which routine to use if deform_wise in ['pairwise', 'pairwise1']: return self._deform_for_image_pairwise1(i, iterInfo) elif deform_wise == 'pairwise2': return self._deform_for_image_pairwise2(i, iterInfo) elif deform_wise == 'groupwise': return self._deform_for_image_groupwise(i, iterInfo) else: raise ValueError('Invalid value for param deform_wise: %s' % repr(deform_wise)) def _deform_for_image_groupwise(self, i, iterInfo): """ _deform_for_image_groupwise(i, iterInfo) Calculate the deform for the given image index. For each image, the deform between that image and all other images is calculated. The total deform is the average of these deforms. """ # todo: Suggestion by DJ: weight the contribution of the # "nearest" images more. # Init deform totalDeform = DeformationIdentity() nims = len(self._ims) count = 0 # Collect deform for j in range(nims): if i==j: continue # Get deform deform = self._deform_from_image_pair(i, j, iterInfo) # Add to total if deform is not None: count += 1 totalDeform = totalDeform + deform #print 'totalDeform', totalDeform.__class__ # Almost done if count > 1: totalDeform = totalDeform.scale(1.0/count) # Done return totalDeform def _deform_for_image_pairwise1(self, i, iterInfo): """ _deform_for_image_pairwise1(i, iterInfo) Get the deform for the image only if i is 0; the source image. This is what's classic registration does. """ # Only for source to target if i != 0: return None # Return deform for image 0 only return self._deform_from_image_pair(0, 1, iterInfo) def _deform_for_image_pairwise2(self, i, iterInfo): """ _deform_for_image_pairwise2(i, iterInfo) Get the deform for the image only if i is 1. """ # Only for source to target if i != 1: return None # Return deform for image 0 only return self._deform_from_image_pair(1, 0, iterInfo) PKKb88pirt/reg/reg_demons.py""" Demons registration algorithm """ import numpy as np import scipy.ndimage from .. import Aarray, diffuse2 from .reg_base import GDGRegistration, BaseRegistration, create_grid_image class BaseDemonsRegistration(object): """ BaseDemonsRegistration Abstract class that implements the base functionality of the Demons algorithm. """ def _get_derivative(self, im, d, o=1, edgeMode='constant'): """ _get_derivative(im, d, o=1) Calculate the derivative (of order o) of the given image in the given dimension. """ # Set edgeMode to constant, because when the image is deformed, # its (deformed) edge will give rise to high filter response any way. # We can apply differentiation using compact support kernels # because we use a scale space pyramid based on discrete # diffusion kernels as proposed by Tony Lindeberg. The # resulting differentiation is theoretically valid. # (Of course, for low scales, the results suffer from discretisation # errors, which is why the scale should be at least 0.5/1.0.) if o == 0: return im # No differentiation elif o == 1: k = np.array([0.5, 0, -0.5], dtype='float64') elif o == 2: k = np.array([1, -2, 1], dtype='float64') else: raise ValueError('Order of differentiation must be {0,1,2}.') # For o in [1,2] tmp = scipy.ndimage.convolve1d(im, k, d, mode=edgeMode) return Aarray(tmp, im.sampling) def _get_image_and_gradient(self, image_id, iterInfo): """ _get_image_and_gradient(image_id, iterInfo) Get the image and the gradient for the given image id. Returns a tuple (mass, (gradz, grady, gradx)) """ scale = iterInfo[2] # Use buffered? buffered = self._get_buffered_data(image_id, iterInfo) if buffered is not None: im = buffered else: # Get deformed image im = self.get_deformed_image(image_id, scale) # Buffer self._set_buffered_data(image_id, iterInfo, im) # Calculate gradient of image. gradient = [] for d in range(im.ndim): tmp = self._get_derivative(im, d, 1, 'nearest') gradient.append( tmp ) # Done return im, tuple(gradient) def _visualize(self, gridStep=10): """ _visualize(self) Visualize the registration process. """ if self.visualizer.fig is None: return import visvis as vv firstpass = not self.visualizer.fig.children if True: # Show # Apply deformation to image im1 = self.get_deformed_image(0, 0) im2 = self.get_deformed_image(1, 0) # Get grid images grid1 = create_grid_image(im1.shape, im1.sampling, gridStep) grid2 = grid1.copy() deform1, deform2 = self._deforms.get(0, None), self._deforms.get(1, None) if deform1 and deform2: grid1 = deform1.apply_deformation(grid1)# + 0.1*self._deforms[0][0]) grid2 = deform2.apply_deformation(grid2)# + 0.1*self._deforms[1][0]) # Get images ims = [ None, im1, grid1, None, im2, grid2] # Update textures for i in range(len(ims)): if ims[i] is not None: self.visualizer.imshow((2,3,i+1), ims[i]) if firstpass: # Init # Show originals self.visualizer.imshow(231, self._ims[0]) self.visualizer.imshow(234, self._ims[1]) # Show titles title_map = [ 'im 1', 'deformed 1', 'grid 1', 'im 2', 'deformed 2', 'grid 2'] for i in range(len(title_map)): a = vv.subplot(2,3,i+1) vv.title(title_map[i]) a.axis.visible = False # Draw now self.visualizer.fig.DrawNow() def _deform_from_image_pair(self, i, j, iterInfo): """ _deform_from_image_pair(i, j, iterInfo) Calculate the deform for image i to image j. """ # Extract iter info level, iter, scale = iterInfo # Try using buffered data # we can make good use of the fact that our delta deforms are symetric buffered = self._get_buffered_data((i,j), iterInfo) if buffered is not None: return buffered buffered = self._get_buffered_data((j,i), iterInfo) if buffered is not None: for grid in buffered: grid._knots = - grid._knots return buffered # Get images and their gradients self.timer.start('getting images') im1, grad1 = self._get_image_and_gradient(i, iterInfo) im2, grad2 = self._get_image_and_gradient(j, iterInfo) self.timer.stop('getting images') # # Prevent too high scales # if min(im1.shape) < 16: # return None self.timer.start('calculating vectors') # Calculate norms norm1 = im1 * 0.0 # copy norm2 = im2 * 0.0 for d in range(im1.ndim): norm1 += grad1[d]**2 norm2 += grad2[d]**2 # Calculate denumerators imd = im1-im2 imd2 = imd**2 alpha = float(self.params.noise_factor) denum1 = norm1 + alpha**2 * imd2 denum2 = norm2 + alpha**2 * imd2 del norm1, norm2, imd2 # Make sure the division doesnt cause us problems denum1[denum1==0] = np.inf denum2[denum2==0] = np.inf # Find deformation; use the gradient of both images. speed_factor = float(self.params.speed_factor) if not self.forward_mapping: speed_factor *= -1 dd_ = [] for d in range(imd.ndim): tmp = imd * ( grad1[d]/denum1 + grad2[d]/denum2 ) * speed_factor dd_.append(tmp) self.timer.stop('calculating vectors') if isinstance(self, GDGRegistration): # Regularize using a B-spline grid deformForce = self.DeformationField(*dd_) deform = self._regularize_diffeomorphic(scale, deformForce) else: # Regularize deformation field using diffusion self.timer.start('regularizing') for d in range(imd.ndim): # Get sigma final_scale = float(self.params.final_scale) final_smoothing = float(self.params.final_smoothing) sigma_reg = scale * final_smoothing / final_scale # Diffuse dd_[d] = diffuse2(dd_[d], sigma_reg) dd_[d] = dd_[d].astype('float32') self.timer.stop('regularizing') # Make deform deform = self.DeformationField(*dd_) # Show gridSize = 10 if hasattr(self, '_get_grid_sampling'): gridSize = self._get_grid_sampling(scale) if i==0 and j==1: self._visualize(gridSize) elif self.params.deform_wise.lower() == 'pairwise2' and i==1: self._visualize(gridSize) # Buffer B-spline grid and return self._set_buffered_data((i,j), iterInfo, deform) return deform class OriginalDemonsRegistration(BaseRegistration, BaseDemonsRegistration): """ OriginalDemonsRegistration(*images) Inherits from :class:`pirt.BaseRegistration`. The original version of the Demons algorithm. Uses Gaussian diffusion to regularize the deformation field. This is the implementation as proposed by He Wang et al. in 2005 "Validation of an accelerated 'demons' algorithm for deformable image registration in radiation therapy" See also :class:`pirt.DiffeomorphicDemonsRegistration`. The ``speed_factor`` and ``noise_factor`` parameters are specific to this algorithm. Other important parameters are also listed below. Parameters ---------- speed_factor : scalar The relative force of the transform. This one of the most important parameters to tune. Default 3.0. noise_factor : scalar The noise factor. Default 2.5. final_scale : scalar The minimum scale used during the registration process. This is the scale at which the registration ends. Default 1.0. Because calculating differentials suffer from more errors as the scale decreases, the minimum value is limited at 0.5. scale_levels : integer The amount of scale levels to use during the registration. Each level represents a factor of two in scale. The default (4) works for most images, but for large images or large deformations a larger value can be used. scale_sampling : scalar The amount of iterations for each level (i.e. between each factor two in scale). Values between 20 and 30 are reasonable in most situations. Default 25. Higher values yield better results in general. The speed of the algorithm scales linearly with this value. """ def _defaultParams(self): """ Overload to create all default params. """ params = BaseRegistration._defaultParams(self) # Iteration speed and noise params.speed_factor = 3.0 params.noise_factor = 2.5 # Change default values params.scale_sampling = 25 # Set some things to make stuff original params.smooth_scale = False params.combine_deforms = 'add' params.mapping = 'backward' #params.deform_wise = 'pairwise' is of GDG # Smoothing factor params.final_smoothing = 8 return params def _deform_for_image(self, i, iterInfo): # implement pairwise registration # Get wise deform_wise = 'pairwise' if 'deform_wise' in self.params: deform_wise = self.params.deform_wise.lower() # Decide which routine to use if deform_wise in ['pairwise', 'pairwise1']: if i==0: return self._deform_from_image_pair(0, 1, iterInfo) elif deform_wise == 'pairwise2': if i==1: return self._deform_from_image_pair(1, 0, iterInfo) elif deform_wise == 'groupwise': self._set_buffered_data((0,1), iterInfo, None) self._set_buffered_data((1,0), iterInfo, None) if i==0: return self._deform_from_image_pair(0, 1, iterInfo) elif i==1: return self._deform_from_image_pair(1, 0, iterInfo) else: raise ValueError('Classic Demons only supports 2 images.') else: raise ValueError('Invalid value for param deform_wise: %s' % repr(deform_wise)) class DiffeomorphicDemonsRegistration(GDGRegistration, BaseDemonsRegistration): """ DiffeomorphicDemonsRegistration(*images) Inherits from :class:`pirt.GDGRegistration`. A variant of the Demons algorithm that is diffeomorphic. Based on the generice diffeomorphic groupwise registration (GDGRegistration) method . See also :class:`pirt.OriginalDemonsRegistration`. The ``speed_factor`` parameter is specific to this algorithm. The ``noise_factor`` works best set at 1.0, effectively disabling its use; it is made redundant by the B-spline based regularization. Other important parameters are also listed below. Parameters ---------- speed_factor : scalar The relative force of the transform. This one of the most important parameters to tune. Default 3.0. mapping : {'forward', 'backward'} Whether forward or backward mapping is used. Default forward. final_scale : scalar The minimum scale used during the registration process. This is the scale at which the registration ends. Default 1.0. Because calculating differentials suffer from more errors as the scale decreases, the minimum value is limited at 0.5. scale_levels : integer The amount of scale levels to use during the registration. Each level represents a factor of two in scale. The default (4) works for most images, but for large images or large deformations a larger value can be used. scale_sampling : scalar The amount of iterations for each level (i.e. between each factor two in scale). Values between 20 and 30 are reasonable in most situations. Default 25. Higher values yield better results in general. The speed of the algorithm scales linearly with this value. final_grid_sampling : scalar The grid sampling of the grid at the final level. During the registration process, the B-spine grid sampling scales along with the scale. This parameter is usually best coupled to final_scale. (When increasing final scale, this value should often be increased accordingly.) grid_sampling_factor : scalar between 0 and 1 To what extent the grid sampling scales with the scale. By making this value lower than 1, the grid is relatively fine at the the higher scales, allowing for more deformations. The default is 0.5. Note that setting this value to 1 when using 'frozenedge' can cause the image to be 'stuck' at higher scales. """ def _defaultParams(self): """ Overload to create all default params. """ params = GDGRegistration._defaultParams(self) # Change default values params.scale_sampling = 25 # Iteration speed and noise params.speed_factor = 3.0 params.noise_factor = 1.0 return params PK&sK!&!&pirt/reg/reg_elastix.py""" Registration using the elastix registration toolkit. """ from .. import Parameters, DeformationFieldBackward from .reg_base import AbstractRegistration # Elastix is optional try: import pyelastix except ImportError: pyelastix = None NEED_ELASTIX = ('The Elastix registration algorithm needs the pyelastix library ' '(install with conda or pip).') class ElastixRegistration(AbstractRegistration): """ ElastixRegistration(im1, im2) Inherits from :class:`pirt.AbstractRegistration`. Registration class for registration using the Elastix toolkit. [http://elastix.isi.uu.nl/] This class performs a bspline transformation by default. See also the convenience subclasses. The params property this class returns a struct with a few common Elastix parameters. the params2 property contains another set of more advanced parameters. Note that any parameter known to elastix can be added to the parameter structures, which enables tuning the registration in great detail. Also note that two parameter structs can be combined by adding them. Because the parameters directly represent the parameters for the Elastix toolkit, their names do not follow the style of most other registration objects in this package. Here we lists some of the common parameters, for more details we refer to the elastix manual. Parameters ---------- FinalGridSpacingInPhysicalUnits : int When using the BSplineTransform, the final spacing of the grid. This controls the smoothness of the final deformation. NumberOfResolutions : int Most registration algorithms adopt a multiresolution approach to direct the solution towards a global optimum and to speed up the process. This parameter specifies the number of scales to apply the registration at. (default 4) MaximumNumberOfIterations : int Maximum number of iterations in each resolution level. 200-2000 works usually fine for nonrigid registration. The more, the better, but the longer computation time. This is an important parameter! (default 500) """ transformation_type = 'bspline' def __init__(self, *args): # Elastix available? if pyelastix is None: raise RuntimeError(NEED_ELASTIX) AbstractRegistration.__init__(self, *args, makeFloat=True) # Check if not isinstance(self, ElastixGroupwiseRegistration): if len(args)>2: raise ValueError('Can only register two images. ' 'Use ElastixGroupwiseRegistration instead.') self._params2 = Parameters(pyelastix.get_advanced_params().as_dict()) def _defaultParams(self): """ Overload to create all default params. """ params = AbstractRegistration._defaultParams(self) params.mapping = 'backward' params.update(pyelastix.get_default_params(self.transformation_type).as_dict()) return params @property def params2(self): return self._params2 def _register(self, verbose=1, fig=None): # Compile params params_elastix = pyelastix.Parameters() # this is not a dict! for key, val in self.params2.items(): setattr(params_elastix, key, val) for key, val in self.params.items(): setattr(params_elastix, key, val) # Get images if isinstance(self, ElastixGroupwiseRegistration): im1, im2 = self._ims, None else: im1, im2 = self._ims[0], self._ims[1] # Use elastix # todo: what about keyword exactparams? im, fields = pyelastix.register(im1, im2, params_elastix, verbose=verbose) # Field is a a tuple of arrays, or a list of tuple of arrays if not isinstance(self, ElastixGroupwiseRegistration): fields = [fields] # For each deformation in the potentially groupwise registration process ... for i in range(len(fields)): field = fields[i] # Reverse (Elastix uses x-y-z order) field = [f for f in reversed(field)] # Set resulting deforms self._deforms[i] = DeformationFieldBackward(*field) # Also set deformed image self._deformed_image = im def get_elastix_deformed_image(self): """ get_elastix_deformed_image() Get the deformed input image found as Elastix created it. This should be the same (except for small interpolation errors) to the image obtained using reg.get_deformed_image(0). """ # Check if result is set if not self._deforms: raise RuntimeError('The result is not available; run register().') # Return return self._deformed_image class ElastixRegistration_affine(ElastixRegistration): """ Registration class for registration using the Elastix toolkit. [http://elastix.isi.uu.nl/] This class performs an affine transformation by default. See :class:`pirt.ElastixRegistration` for more details. Parameters ---------- AutomaticScalesEstimation : bool When using a rigid or affine transform. Scales the affine matrix elements compared to the translations, to make sure they are in the same range. In general, it's best to use automatic scales estimation. AutomaticTransformInitialization : bool When using a rigid or affine transform. Automatically guess an initial translation by aligning the geometric centers of the fixed and moving. NumberOfResolutions : int Most registration algorithms adopt a multiresolution approach to direct the solution towards a global optimum and to speed up the process. This parameter specifies the number of scales to apply the registration at. (default 4) MaximumNumberOfIterations : int Maximum number of iterations in each resolution level. 200-2000 works usually fine for nonrigid registration. The more, the better, but the longer computation time. This is an important parameter! (default 500) """ transformation_type = 'affine' class ElastixRegistration_rigid(ElastixRegistration): """ Registration class for registration using the Elastix toolkit. [http://elastix.isi.uu.nl/] This class performs a rigid transformation by default. See :class:`pirt.ElastixRegistration` for more details. Parameters ---------- AutomaticScalesEstimation : bool When using a rigid or affine transform. Scales the affine matrix elements compared to the translations, to make sure they are in the same range. In general, it's best to use automatic scales estimation. AutomaticTransformInitialization : bool When using a rigid or affine transform. Automatically guess an initial translation by aligning the geometric centers of the fixed and moving. NumberOfResolutions : int Most registration algorithms adopt a multiresolution approach to direct the solution towards a global optimum and to speed up the process. This parameter specifies the number of scales to apply the registration at. (default 4) MaximumNumberOfIterations : int Maximum number of iterations in each resolution level. 200-2000 works usually fine for nonrigid registration. The more, the better, but the longer computation time. This is an important parameter! (default 500) """ transformation_type = 'rigid' class ElastixGroupwiseRegistration(ElastixRegistration): """ ElastixGroupwiseRegistration(*images) Inherits from :class:`pirt.ElastixRegistration`. Registration class for registration using the Elastix toolkit. [http://elastix.isi.uu.nl/] This variant uses the groupwise registration approach as proposed by Metz et al. "Nonrigid registration of dynamic medical imaging data using nD+t B-splines and a groupwise optimization approach" The params property this class returns a struct with a few common Elastix parameters. the params2 property contains another set of more advanced parameters. Note that any parameter known to elastix can be added to the parameter structures, which enables tuning the registration in great detail. Also note that two parameter structs can be combined by adding them. Because the parameters directly represent the parameters for the Elastix toolkit, their names do not follow the style of most other registration objects in this package. Parameters ---------- FinalGridSpacingInPhysicalUnits : int When using the BSplineTransform, the final spacing of the grid. This controls the smoothness of the final deformation. NumberOfResolutions : int Most registration algorithms adopt a multiresolution approach to direct the solution towards a global optimum and to speed up the process. This parameter specifies the number of scales to apply the registration at. (default 4) MaximumNumberOfIterations : int Maximum number of iterations in each resolution level. 200-2000 works usually fine for nonrigid registration. The more, the better, but the longer computation time. This is an important parameter! (default 500) """ pass PKIJK.,B,Bpirt/reg/reg_gravity.py""" Registration using gravity registration. """ import numba import numpy as np import scipy.ndimage from .. import Aarray, diffuse2 from .reg_base import GDGRegistration, create_grid_image @numba.jit(nopython=True, nogil=True) def near_root3(arr): """ near_root3(n) Calculates an approximation of the square root using (a few) Newton iterations. """ for z in range(arr.shape[0]): for y in range(arr.shape[1]): for x in range(arr.shape[2]): n = arr[z, y, x] v = 1.0 v = v - (v * v - n) / (2.0 * v) v = v - (v * v - n) / (2.0 * v) v = v - (v * v - n) / (2.0 * v) arr[z, y, x] = v @numba.jit(nopython=True, nogil=True) def near_exp3(arr): """ near_exp3(n) Calculates an approximation of the exp. """ for z in range(arr.shape[0]): for y in range(arr.shape[1]): for x in range(arr.shape[2]): v = arr[z, y, x] v = 1.0 + v / 256.0; v *= v; v *= v; v *= v; v *= v v *= v; v *= v; v *= v; v *= v arr[z, y, x] = v class GravityRegistration(GDGRegistration): """ GravityRegistration(*images) Inherits from :class:`pirt.GDGRegistration` A registration algorithm based on attraction between masses in both images, which is robust for large differences between the images. The most important parameters to tune the algorithm with are scale_sampling, speed_factor and final_grid_sampling. The ``speed_factor`` and ``mass_transforms`` parameters are specific to this algorithm. Other important parameters are also listed below. Parameters ---------- speed_factor : scalar The relative force of the transform. This one of the most important parameters to tune. Typical values are between 1 and 5. Default 1. mass_transforms : int or tuple of ints How the image is transformed to obtain the mass image. The number refers to the order of differentiation; 1 and 2 are gradient magnitude and Laplacian respectively. 0 only performs normalization to subtract the background. Can be specified for all images or for each image individually. Default 1. mapping : {'forward', 'backward'} Whether forward or backward mapping is used. Default forward. final_scale : scalar The minimum scale used during the registration process. This is the scale at which the registration ends. Default 1.0. Because calculating differentials suffer from more errors as the scale decreases, the minimum value is limited at 0.5. scale_levels : integer The amount of scale levels to use during the registration. Each level represents a factor of two in scale. The default (4) works for most images, but for large images or large deformations a larger value can be used. scale_sampling : scalar The amount of iterations for each level (i.e. between each factor two in scale). For the coarse implementation, this is the amount of iterations performed before moving to the next scale (which is always a factor of two smaller). Values between 10 and 20 are reasonable in most situations. Default 15. Higher values yield better results in general. The speed of the algorithm scales linearly with this value. final_grid_sampling : scalar The grid sampling of the grid at the final level. During the registration process, the B-spine grid sampling scales along with the scale. This parameter is usually best coupled to final_scale. (When increasing final scale, this value should often be increased accordingly.) grid_sampling_factor : scalar between 0 and 1 To what extent the grid sampling scales with the scale. By making this value lower than 1, the grid is relatively fine at the the higher scales, allowing for more deformations. The default is 0.5. Note that setting this value to 1 when using 'frozenedge' can cause the image to be 'stuck' at higher scales. """ def _defaultParams(self): """ Overload to create all default params. """ params = GDGRegistration._defaultParams(self) # The order of differentiation to calculate the mass images params.mass_transforms = 1 # Iteration speed params.speed_factor = 1.0 return params def _get_derivative(self, im, d, o=1, edgeMode='constant'): """ _get_derivative(im, d, o=1) Calculate the derivative (of order o) of the given image in the given dimension. """ # Set edgeMode to constant, because when the image is deformed, # its (deformed) edge will give rise to high filter response any way. # We can apply differentiation using compact support kernels # because we use a scale space pyramid based on discrete # diffusion kernels as proposed by Tony Lindeberg. The # resulting differentiation is theoretically valid. # (Of course, for low scales, the results suffer from discretisation # errors, which is why the scale should be at least 0.5/1.0.) if o == 0: return im # No differentiation elif o == 1: k = np.array([0.5, 0, -0.5], dtype='float64') elif o == 2: k = np.array([1, -2, 1], dtype='float64') else: raise ValueError('Order of differentiation must be {0,1,2}.') # For o in [1,2] tmp = scipy.ndimage.convolve1d(im, k, d, mode=edgeMode) return Aarray(tmp, im.sampling) def _create_mass(self, image_id, im, scale): """ _create_mass(image_id, im, scale) Get the unnormalized mass image for the given image (which has the given image_id). This method can be overloaded to create the mass image in a custom way. """ # Determine order of differentiation to create the mass image if isinstance(self.params.mass_transforms, (tuple,list)): order = self.params.mass_transforms[image_id] else: order = self.params.mass_transforms if order==0: # Plain image (but background is made "black") # Flip the intensities such that the part # that is the background has the smallest values. mi, ma, me = im.min(), im.max(), im.mean() if (me-mi) > (ma-me): im = - im # And then, mass is the image mass = im elif order==1: # Gradient magnitude # Get squared derivative for each dimension massParts = [] for d in range(im.ndim): tmp = self._get_derivative(im, d, order, 'nearest') massParts.append(tmp**2) # Sum and take square root mass = np.add(*massParts)**0.5 # mass = np.add(*massParts) # near_root3(mass) # mmm, does not seem to matter much elif order==2: # Laplacian # Get second order derivative for each dimension # edgemode nearest prevents artifacts at edges, and really # improves performance. massParts = [] for d in range(im.ndim): tmp = self._get_derivative(im, d, 2, 'nearest') massParts.append(tmp) # Calculate Laplacian mass = np.add(*massParts) # Use only the positive part # Take abs, this results in kind of "sharp" mass data, but this # does not matter for the mass. The gravity field is smoothed # anyway. # Note that edges results in two masses that are very close, # resulting in a smooth single "mass" for the gravity field. # Lines result in one strong mass with two side-lobes, which # becomes a single structure in the gravity field. mass = np.abs(mass) else: raise ValueError("This order is not implemented.") # Done return mass def _normalize_mass(self, mass): """ _normalize_mass(mass) Normalize the mass image. This method can be overloaded to implement custom normalization. This normalization should preferably be such that repeated calls won't change the result. """ # The normalization is of crucial importance to this algorithm. # Sadly, it not trivial. mass *= (2/mass.std()) # i.e. make std of mass 2 mass += (0.0-mass.mean()) # i.e. move mean to 0.0 return mass def _get_mass_and_gradient(self, image_id, iterInfo): """ _get_mass_and_gradient(image_id, scale) Get the mass and the gradient for the given image id. Returns a tuple (mass, (gradz, grady, gradx)) """ scale = iterInfo[2] # Use buffered? buffered = self._get_buffered_data(image_id, iterInfo) if buffered is not None: mass = buffered else: # Get image, convert to mass, normalize im = self.get_deformed_image(image_id, scale) mass = self._create_mass(image_id, im, scale) mass = self._normalize_mass(mass) # Truncate the mass mass[mass<=0] = 0.0 self._soft_limit1(mass, 1.0) # Buffer this mass image # We could also buffer the gradient, but that costs an # awefull lot of memory (for 3D images). self._set_buffered_data(image_id, iterInfo, mass) # Smooth to get the gravity field exta_smoothing = scale*1.0 grav_field = diffuse2(mass, exta_smoothing) grav_field *= 1.0 / grav_field.mean() # Calculate gradient of mass fields. gradient = [] for d in range(mass.ndim): tmp = self._get_derivative(grav_field, d, 1, 'nearest') gradient.append( tmp ) # Done return mass, tuple(gradient) def _soft_limit1(self, data, limit): # Does not seem to be a bottleneck # if limit == 1: # data = -data # near_exp3(data) # data[:] = 1.0 - data # else: # data = -data/limit # near_exp3(data) # data[:] = -limit * (data-1) if limit == 1: data[:] = 1.0 - np.exp(-data) else: f = np.exp(-data/limit) data[:] = -limit * (f-1) def _soft_limit2(self, data, limit): f = np.exp(-np.abs(data)/limit) data[:] = -limit * (f-1) * np.sign(data) # def _grad_normalization(self, grad1, grad2, scale): # """ _grad_normalization(self, grad1, grad2, scale) # # Normalize the gradient vector field. This involves clipping # the values at the edges to prevent for bad values at the edges. # This is a practical issue caused by the edge effects during # convolution (convolution is ill-defined at the edges). # """ # # todo: do I need this with frozen edges? NO # return # # for grad in [grad1, grad2]: # for tmp in grad: # if tmp.ndim >= 1: # m = int(np.ceil(scale/tmp.sampling[0]))+1 # Margin # tmp[:m] = 0 # tmp[-m:] = 0 # tmp[m] *= 0.5 # tmp[-(m+1)] *= 0.5 # if tmp.ndim >= 2: # m = int(np.ceil(scale/tmp.sampling[1]))+1 # Margin # tmp[:,:m] = 0 # tmp[:,-m:] = 0 # tmp[:,m] *= 0.5 # tmp[:,-(m+1)] *= 0.5 # if tmp.ndim >= 3: # m = int(np.ceil(scale/tmp.sampling[2]))+1 # Margin # tmp[:,:,:m] = 0 # tmp[:,:,-m:] = 0 # tmp[:,:,m] *= 0.5 # tmp[:,:,-(m+1)] *= 0.5 def _visualize(self, mass1=None, mass2=None, gridStep=10): """ _visualize(self, mass1=None, mass2=None) Visualize the registration process. """ if self.visualizer.fig is None: return import visvis as vv firstpass = not self.visualizer.fig.children if True: # Show # Apply deformation to image im1 = self.get_deformed_image(0, 0) im2 = self.get_deformed_image(1, 0) # Get grid images grid1 = create_grid_image(im1.shape, im1.sampling, gridStep) grid2 = grid1.copy() deform1, deform2 = self._deforms.get(0, None), self._deforms.get(1, None) if deform1 and deform2: grid1 = deform1.apply_deformation(grid1)# + 0.1*self._deforms[0][0]) grid2 = deform2.apply_deformation(grid2)# + 0.1*self._deforms[1][0]) # Get images ims = [ None, im1, grid1, mass1, None, im2, grid2, mass2] # Update textures for i in range(len(ims)): if ims[i] is not None: t = self.visualizer.imshow((2,4,i+1), ims[i]) if i in [3, 7]: t.SetClim(0,1) if firstpass: # Init # Show originals self.visualizer.imshow(241, self._ims[0]) self.visualizer.imshow(245, self._ims[1]) # Show titles #title_map = ['im 1', 'im 2', 'deformed 1', 'deformed 2', 'mass 1', 'mass 2'] title_map = [ 'im 1', 'deformed 1', 'grid 1', 'mass 1', 'im 2', 'deformed 2', 'grid 2', 'mass 2'] for i in range(len(title_map)): a = vv.subplot(2,4,i+1) vv.title(title_map[i]) a.axis.visible = False # Draw now self.visualizer.fig.DrawNow() def _deform_from_image_pair(self, i, j, iterInfo): """ _deform_from_image_pair(i, j, iterInfo) Calculate the deform for image i to image j. """ # Extract iter info level, iter, scale = iterInfo # Try using buffered data # we can make good use of the fact that our delta deforms are symetric buffered = self._get_buffered_data((i,j), iterInfo) if buffered is not None: return buffered buffered = self._get_buffered_data((j,i), iterInfo) if buffered is not None: for grid in buffered: grid._knots = - grid._knots return buffered # Get mass and its gradient self.timer.start('getting mass images and gradient') mass1, grad1 = self._get_mass_and_gradient(i, iterInfo) mass2, grad2 = self._get_mass_and_gradient(j, iterInfo) self.timer.stop('getting mass images and gradient') # # Prevent too high scales # if min(mass1.shape) < 16: # print 'skip because image too small' # return None # Calculate force vectors self.timer.start('calculating vectors') factor = float(self.params.speed_factor) * scale if not self.forward_mapping: factor *= -1 dd_ = [] for d in range(mass1.ndim): #dd_.append( grad1[d] * (-1 * factor) ) #dd_.append( grad2[d] * (factor) ) dd_.append( (grad2[d]-grad1[d]) * (factor) ) self.timer.stop('calculating vectors') # Regularize using a B-spline grid deformForce = self.DeformationField(*dd_) deform = self._regularize_diffeomorphic(scale, deformForce, mass1*mass2) # Show if i==0 and j==1: self._visualize(mass1, mass2, self._get_grid_sampling(scale)) elif self.params.deform_wise.lower() == 'pairwise2' and i==1: self._visualize(mass1, mass2, self._get_grid_sampling(scale)) # Buffer B-spline grid and return self._set_buffered_data((i,j), iterInfo, deform) return deform PK3K7pirt/splinegrid/__init__.py# flake8: noqa """ The splinegrid module implements functionality for spline grids. A spline grid is used to representa field (i.e. data) using a grid. Essentially, spline grids allow the interpolation of sparse data in an optimally smooth way by adopting a multiscale approach. The only type of spline that makes sense for this purpose is the B-spline. In Pirt, spline grids are used to represent deformations, but any kind of data can be represented, like e.g. image data. The ``GridInterface`` class is the base class that enables basic grid properties and functionality. The ``SplineGrid`` class implements an actual grid that represents a scalar field. The ``GridContainer`` class can be used to wrap multiple ``SplineGrid`` instances in order to represent a vector/tensor field (such as color channels or deformations). """ from ._splinegridclasses import GridInterface, SplineGrid, GridContainer, FieldDescription, FD from ._splinegridclasses import calculate_multiscale_sampling PKm4KC>~~%pirt/splinegrid/_splinegridclasses.pyimport numpy as np from .. import PointSet, Aarray from . import _splinegridfuncs ## Helper classes and functions class FieldDescription: """ FieldDescription(*args) Describes the dimensions of a field (i.e. Aarray). It stores the following properties: shape, sampling, origin This class can for example be used to instantiate a new grid without requiring the actual field. This class can be instantiated with a shape and sampling tuple, or with any object that describes a field in a way that we know of (e.g. SplineGrid and DeformationField instances). Examples -------- * FieldDescription(shape, sampling=None, origin=None) * FieldDescription(grid) * FieldDescription(Aarray) * FieldDescription(np.ndarray) # assumes unit sampling and zero origin """ def __init__(self, shape, sampling=None, origin=None): if hasattr(shape, 'shape') and isinstance(shape.shape, (list, tuple)): # Field given field = shape shape, sampling, origin = field.shape, None, None if hasattr(field, 'sampling'): sampling = field.sampling if hasattr(field, 'origin'): origin = field.origin elif ( hasattr(shape, 'field_shape') and isinstance(shape.field_shape, (list, tuple)) ): # Grid or deformation field field = shape shape, sampling, origin = field.field_shape, None, None if hasattr(field, 'field_sampling'): sampling = field.field_sampling if hasattr(field, 'field_origin'): origin = field.field_origin # Init self._defined_samping = False self._defined_origin = False # Check and set shape if isinstance(shape, (list, tuple)): self._shape = tuple([int(s) for s in shape]) else: raise TypeError('Invalid argument for FieldDescription') # Check and set sampling if isinstance(sampling, (list, tuple)): self._defined_samping = True self._sampling = tuple([float(s) for s in sampling]) elif sampling is None: self._sampling = tuple([1.0 for i in self.shape]) else: raise TypeError('Invalid sampling for FieldDescription') # Check and set origin if isinstance(origin, (list, tuple)): self._defined_origin = True self._origin = tuple([float(s) for s in origin]) elif origin is None: self._origin = tuple([0.0 for i in self.shape]) else: raise TypeError('Invalid origin for FieldDescription') @property def ndim(self): """ The number of dimensions of the field. """ return len(self._shape) @property def shape(self): """ The shape of the field. """ return self._shape @property def sampling(self): """ The sampling between the pixels of the field. """ return self._sampling @property def origin(self): """ The origin (spatial offset) of the field. """ return self._origin @property def defined_sampling(self): """ Whether the sampling was explicitly defined. """ return self._defined_samping @property def defined_origin(self): """ Whether the origin was explicitly defined. """ return self._defined_origin # Crate shot-named alias FD = FieldDescription def calculate_multiscale_sampling(grid, sampling): """ calculate_multiscale_sampling(grid, sampling) Calculate the minimal and maximal sampling from user input. """ if isinstance(sampling, (float, int)): # Find maximal sampling for this grid # Set min and init max sMin = sMax = sampling # Determine minimal sampling tmp = [sh*sa for sh,sa in zip(grid.field_shape, grid.field_sampling)] minimalSampling = max(tmp) # Calculate max while sMax < minimalSampling: sMax *= 2 elif isinstance(sampling, (list,tuple)) and len(sampling)==2: # Two values given # Set min and init max sMin = sMax = min(sampling) # Increase sMax with powers of two untill we reach the given value sMax_ = max(sampling) while sMax < sMax_: sMax *= 2 # Select closest level if abs(sMax_ - sMax / 2) < abs(sMax_ - sMax): sMax /= 2 else: # nocov raise ValueError('For multiscale, sampling must have two values.') # Done return sMin, sMax ## The grid classes class GridInterface: """ GridInterface(field, sampling=5) Abstract class to define the interface of a spline grid. Implemented by the Grid and GridContainer classes. This class provides some generic methods and properties for grids in general. Most importantly, it handles initialization of the desctiption of the grid (dimensionality, shape and sampling of the underlying field, and the shape and sampling of the grid itself). Parameters ---------- field : shape-tuple, numpy-array, Aarray, or FieldDescription A description of the field that this grid applies to. The field itself is not stored, only the field's shape and sampling are of interest. sampling : number The spacing of the knots in the field. (For anisotropic fields, the spacing is expressed in world units.) """ def __init__(self, field, sampling): # Sets: # * _field_shape # * _field_sampling # * _grid_shape # * _grid_sampling # Check field if isinstance(field, FieldDescription): self._field_shape = field.shape self._field_sampling = field.sampling elif hasattr(field, 'sampling'): self._field_shape = field.shape self._field_sampling = field.sampling elif isinstance(field, np.ndarray): self._field_shape = field.shape self._field_sampling = [1.0 for i in field.shape] elif isinstance(field, (list, tuple)): for el in field: if not isinstance(el, int): raise ValueError('Shape must be a list/tuple of integers.') self._field_shape = [el for el in field] self._field_sampling = [1.0 for i in field] else: raise ValueError('field should be a numpy array, Aarray, or shape-tuple.') # Set spacing between knots in world units self._grid_sampling = sampling # Calculate number of rows and cols in the current lattice # +3 because we pad the field (we always pad extra!) # +1 because the first row AND last row contains a knot grid_sampling_in_pixels = self.grid_sampling_in_pixels self._grid_shape = [] for d in range(self.ndim): max_field_index = self._field_shape[d]-1 tmp = int(max_field_index / grid_sampling_in_pixels[d]) + 1 + 3 self._grid_shape.append(tmp) ## Dimensions of field and grid @property def ndim(self): """ The number of dimensions of this grid. """ return len(self._field_shape) @property def field_shape(self): """ The shape of the underlying field. (i.e. the size in each dim.) """ return tuple(self._field_shape) @property def field_sampling(self): """ For each dim, the sampling of the field, i.e. the distance (in world units) between pixels/voxels (all 1's if isotropic). """ return tuple(self._field_sampling) @property def grid_shape(self): """ The shape of the grid. (i.e. the size in each dim.) """ return tuple(self._grid_shape) @property def grid_sampling(self): """ A *scalar* indicating the spacing (in world units) between the knots. """ return self._grid_sampling @property def grid_sampling_in_pixels(self): """ For each dim, the spacing (in sub-pixels) between the knots. A dimension that has a low field sampling will have a high grid sampling in pixels (since the pixels are small, more fit between two knots). """ return tuple([self._grid_sampling/float(i) for i in self._field_sampling]) ## Methods to obtain derived grids def copy(self): """ copy() Return a deep copy of the grid. """ # Get new grid fd = FieldDescription(self) newGrid = self.__class__(fd, self.grid_sampling) if isinstance(self, GridContainer): # Copy all subgrids newGrid._map('copy', self) elif isinstance(self, SplineGrid): # Copy knots array newGrid._knots = self.knots.copy() # note the copy else: # nocov raise ValueError('Cannot copy: unknown grid class.') # Done return newGrid def refine(self): """ refine() Refine the grid, returning a new grid instance (of the same type) that represents the same field, but with half the grid_sampling. """ # Create new grid newSampling = self.grid_sampling / 2.0 fieldDes = FieldDescription(self) newGrid = self.__class__(fieldDes, newSampling) if isinstance(self, GridContainer): # Refine each of the subgrids and put in newGrid newGrid._map('refine', self) elif isinstance(self, SplineGrid): # Get knots array for the new grid newKnots = self._refine(self.knots) # Apply calculated knots, crop if necessary NS = newGrid.grid_shape if NS != newKnots.shape: pass #print('shape mismatch:', NS, newKnots.shape) # if self.ndim == 1: newGrid._knots = newKnots[:NS[0]] elif self.ndim == 2: newGrid._knots = newKnots[:NS[0], :NS[1]] elif self.ndim == 3: newGrid._knots = newKnots[:NS[0], :NS[1], :NS[2]] else: # nocov raise ValueError('Cannot refine: unknown grid class.') # Done return newGrid def add(self, other_grid): """ add(other_grid) Create a new grid by adding this grid and the given grid. """ # Check if not (self.field_shape == other_grid.field_shape and self.field_sampling == other_grid.field_sampling): # nocov raise ValueError('Can only add grids that have the same shape and sampling.') # Create empty grid with same shape as the other grid. fd = FieldDescription(self) newGrid = self.__class__(fd, self.grid_sampling) if isinstance(self, GridContainer): # Add each of the subgrids and put in newGrid newGrid._map('add', self, other_grid) elif isinstance(self, SplineGrid): # Add knots arrays newGrid._knots = self.knots + other_grid.knots else: # nocov raise ValueError('Cannot add: unknown grid class.') # Done return newGrid # # todo: result_grid? # def compose(self, other_grid, result_grid=None): # """ compose(other_grid, result_grid=None) # # Compose a new grid by calculating the field-values (of this grid) # at the knots of the given grid, and adding them to the values of # knots of the given grid. # # This method does not require the two grids to have the same shape # or sampling. # # If result_grid is given, the result is written to it. The resulting # grid is alwats returned. # # If this grid represents a deformation # ------------------------------------- # If both this grid and the given grid are C2 continuous and # injective, the result is also C2 continuous and injective (in # contrast two adding to grids). See Choi et al. 2000. # # """ # # # Create empty grid with same shape as the other grid. # if result_grid is None: # fd = FieldDescription(self) # newGrid = self.__class__(fd, self.grid_sampling) # else: # newGrid = result_grid # # if isinstance(self, GridContainer): # # Compose each of the subgrids and put in newGrid # newGrid._map('compose', self, other_grid) # elif isinstance(self, SplineGrid): # # Fill the new grid # _splinegridfuncs.get_field_grid(self, newGrid) # # Add delta to given grid # newGrid._knots += other_grid.knots # else: # raise ValueError('Cannot compose: unknown grid class.') # # # Done # return newGrid def resize_field(self, new_shape=None): # must be a keyword argument """ resize_field(new_shape) Create a new grid, where the underlying field is reshaped. The field is still the same; it only has a different shape and sampling. The parameter new_shape can be anything that can be converted to a FieldDescription instance. Note that the knots arrays are shallow copied (which makes this function very efficient). """ # Get description fd = FieldDescription(new_shape) # Create new grid newGrid = self.__class__(fd, self.grid_sampling) if isinstance(self, GridContainer): # reshape all subgrids newGrid._map('resize_field', self, new_shape=fd) elif isinstance(self, SplineGrid): # Simply copy the knots array newGrid._knots = self.knots else: # nocov raise ValueError('Cannot resize_field: unknown grid class.') # Done return newGrid ## Multiscale composition @classmethod def _multiscale(cls, setResidu, getResidu, field, sampling): """ _multiscale(setResidu, getResidu, field, sampling) General method for multiscale grid formation. from_field_multiscale() and from_points_multiscale() use this classmethod by each supplying appropriate setResidu and getResidu functions. """ # Set grid class GridClass = cls # Get sampling tmp = GridInterface(field, 1) sMin, sMax = calculate_multiscale_sampling(tmp, sampling) s, sRef = sMax, sMin*0.9 # Init refined grid (starts with highest sampling) grid = GridClass(field, s) # Init residu residu = getResidu() # grid: working grid # gridRef: refined grid # gridAdd: grid to add to working-grid while s > sRef: # Create addGrid using the residual values gridAdd = GridClass(field, s) setResidu(gridAdd, residu) # Create grid by combining refined grid of previous pass and # the gridAdd. grid = grid.add(gridAdd) # Prepare for next iter s /= 2.0 if s > sRef: # last round # Refine grid grid = grid.refine() # Get current values in the field and calculate residual # Use refGrid, as it does not *exactly* represent the # same field as grid. residu = getResidu(grid) # Done return grid class GridContainer(GridInterface): """ GridContainer(field, sampling=5) Abstract base class that represents multiple SplineGrid instances. Since each SplineGrid instance describes a field of scalar values, the GridContainer can be used to describe vectors/tensors. Examples are color and 2D/3D deformations. The implementing class should: * instantiate SplineGrid instances and append them to '_grids' * implement methods to set the grid accordingly, probably using classmethods such as from_points, from_field, etc. """ def __init__(self, *args, **kwargs): GridInterface.__init__(self, *args, **kwargs) # Init list of sub grids self._grids = [] def __len__(self): return len(self._grids) def __getitem__(self, item): if isinstance(item, int): if item>=0 and item=0) & (pp[:,d]=field_shape_R[d]) ) pp[I,:] = 0.0 # Done return pp, values def _set_using_field(self, field, weights=None): """ _set_using_field(field, weights=None) Set the grid using an existing field, optionally with weighting. """ # If not given, make unit weights if weights is None: weights = np.ones_like(field) # Go _splinegridfuncs.set_field(self, field, weights) def _set_using_points(self, pp, values): """ _set_using_points(pp, values) Set the grid using sparse data, defined at the points in pp. """ assert isinstance(pp, np.ndarray) and pp.ndim == 2 # Make sure values is an array if a list is given if not isinstance(values, np.ndarray): values = np.array(values) # Throw away points that are not inside the field pp, values = self._select_points_inside_field(pp, values) # Go _splinegridfuncs.set_field_sparse(self, pp, values) def _refine(self, knots): """ _refine(knots) Workhorse method to refine the knots array. Designed for B-splines. For other splines, this method introduces errors; the resulting grid does not exactly represent the original. Refines the grid to a new grid (newGrid). Let grid have (n+3)*(m+3) knots, then newGrid has (2n+3)*(2m+3) knots (sometimes the grid needs one row less, this is checked in the Refine() method). In both grids, the second knot lays exactly on the first pixel of the image. Below is an illustration of a few knots: ( ) ( ) ( ) x x x x ( ): knots of grid ( ) x (x) x (x) ------------ x : knots of newGrid x x x x | image | Lee tells on page 7 of "Lee et al. 1997 - Scattered Data Interpolation With Multilevel B-splines" that there are several publications on how to create a lattice from another lattice in such a way that they describe the same deformation. Our case is relatively simple because we have about the same lattice, but with a twice as high accuracy. What we do here is based on what Lee says on page 7 of his article. Note that he handles the indices a bit different as I do. For each knot in the grid we update four knots in new grid. The indexes of the knots to update in newGrid are calculated using a simple formula that can be derived from the illustration shown below: For each knot in grid ( ) we determine the 4 x's (in newGrid) from the 0's. 0 0 0 0 (x) x 0 x x 0 0 0 We can note a few things. * the loose x's are calculated using 2 neighbours in each dimension. * the x's insided ( ) are calculated using 3 neighbours in each dim. * (Knots can also be loose in one dim and not loose in another.) * the newGrid ALWAYS has its first knot between grid's first and second knot. * newGrid can have a smaller amount of rows and/or cols than you would think. According to Lee the newGrid has 2*(lat.rows-3)+3 rows, but in our case this is not necessarily true. The image does not exactly fit an integer amount of knots, we thus need one knot extra. But when we go from a course lattice to a finer one, we might need one row/col of knots less. """ # For each dimension, refine the grid with a factor of two for d in range(knots.ndim): # Obtain reference knots if d == 0: knots1, knots2 = knots[:-1], knots[1:] knots3, knots4 = knots[:-2], knots[2:] knots5 = knots[1:-1] elif d == 1: knots1, knots2 = knots[:,:-1], knots[:,1:] knots3, knots4 = knots[:,:-2], knots[:,2:] knots5 = knots[:,1:-1] elif d == 2: knots1, knots2 = knots[:,:,:-1], knots[:,:,1:] knots3, knots4 = knots[:,:,:-2], knots[:,:,2:] knots5 = knots[:,:,1:-1] # Calculate edge knots (knots between original knots) eknots = 0.5 * (knots1 + knots2) # Calculate vertex knots (knots on original knots) vknots = 0.125 * (knots3 + 6*knots5 + knots4) # Init new knots array shape = [s for s in knots.shape] shape[d] = eknots.shape[d] + vknots.shape[d] knots = np.zeros(shape, dtype=knots.dtype) # Set values by interleaving eknots and vknots if d == 0: knots[::2] = eknots knots[1::2] = vknots elif d == 1: knots[:,::2] = eknots knots[:,1::2] = vknots elif d == 2: knots[:,:,::2] = eknots knots[:,:,1::2] = vknots # Done return knots PKm4K0kmm#pirt/splinegrid/_splinegridfuncs.py""" Low level functions for spline grids, implemented with Numba to make it super fast. Provides functionality for (cubic B-spline) grids. Copyright 2010-2012 (C) Almar Klein, University of Twente. Copyright 2012-2017 (C) Almar Klein A Note on pointsets: In this module, all pointsets are 2D numpy arrays shaped (N, ndim), which express world coordinates. The coordinates they represent should be expressed using doubles wx,wy,wz, in contrast to pixel coordinates, which are expressed using integers x, y, z. """ import numpy as np import numba from ..interp._cubic import cubicsplinecoef_basis ## Functions to obtain the field that the grid represents def get_field(grid): """ get_field(grid) Sample the grid at all the pixels of the underlying field. """ # Init resulting array, make it float32, since for deformation-grids # the result is a sampler to be used in interp(). result = np.zeros(grid.field_shape, dtype=np.float32) # Decide what function to call if grid.ndim == 1: _get_field1(result, grid.grid_sampling_in_pixels, grid.knots) elif grid.ndim == 2: _get_field2(result, grid.grid_sampling_in_pixels, grid.knots) elif grid.ndim == 3: _get_field3(result, grid.grid_sampling_in_pixels, grid.knots) else: # nocov tmp = 'Grid interpolation not suported for this dimension.' raise RuntimeError(tmp) return result def get_field_sparse(grid, pp): """ get_field_sparse(grid, pp) Sparsely sample the grid at a specified set of points (which are in world coordinates). Also see get_field_at(). """ assert isinstance(pp, np.ndarray) and pp.ndim == 2 # Test dimensions if grid.ndim != pp.shape[1]: # nocov raise ValueError('Dimension of grid and pointset do not match.') # Create samples samples = [] for i in range(pp.shape[1]): samples.append(pp[:,i]) # Init result result = np.zeros_like(samples[0], dtype=np.float32) # Determine sampling grid_sampling_in_pixels = tuple([grid.grid_sampling for i in grid.grid_sampling_in_pixels]) # Decide what function to call if grid.ndim == 1: _get_field_at1(result.ravel(), grid_sampling_in_pixels, grid.knots, *[s.ravel() for s in samples]) elif grid.ndim == 2: _get_field_at2(result.ravel(), grid_sampling_in_pixels, grid.knots, *[s.ravel() for s in samples]) elif grid.ndim == 3: _get_field_at3(result.ravel(), grid_sampling_in_pixels, grid.knots, *[s.ravel() for s in samples]) else: # nocov tmp = 'Grid interpolation not suported for this dimension.' raise RuntimeError(tmp) return result def get_field_at(grid, samples): """ get_field_at(grid, samples) Sample the grid at specified sample locations (in pixels, x-y-z order), similar to pirt.interp.interp(). Also see get_field_sparse(). """ # Test dimensions if not isinstance(samples, (tuple, list)): # nocov raise ValueError('Samples must be list or tuple.') if len(samples) != grid.ndim: # nocov raise ValueError('Samples must contain one element per dimension.') sample0 = samples[0] for sample in samples: if sample0.shape != sample.shape: # nocov raise ValueError('Elements in samples must all have the same shape.') # Init result result = np.zeros_like(samples[0], dtype=np.float32) # Determine sampling grid_sampling_in_pixels = grid.grid_sampling_in_pixels # Decide what function to call if grid.ndim == 1: _get_field_at1(result.ravel(), grid_sampling_in_pixels, grid.knots, *[s.ravel() for s in samples]) elif grid.ndim == 2: _get_field_at2(result.ravel(), grid_sampling_in_pixels, grid.knots, *[s.ravel() for s in samples]) elif grid.ndim == 3: _get_field_at3(result.ravel(), grid_sampling_in_pixels, grid.knots, *[s.ravel() for s in samples]) else: # nocov tmp = 'Grid interpolation not suported for this dimension.' raise RuntimeError(tmp) return result ## Workhorse functions to get the field @numba.jit(nopython=True, nogil=True) def _get_field1(result, grid_sampling_in_pixels, knots): if result.ndim != 1: raise ValueError('This function can only sample 1D grids.') ccx = np.empty((4, ), np.float64) grid_xSpacing = grid_sampling_in_pixels[0] # For each pixel ... for x in range(result.shape[0]): # Calculate what is the leftmost (reference) knot on the grid, # and the ratio between closest and second closest knot. # Note the +1 to correct for padding. tmp = x / grid_xSpacing + 1 gx = int(tmp) tx = tmp - gx # Get coefficients cubicsplinecoef_basis(tx, ccx) # Init value val = 0.0 # For each knot ... ii = gx - 1 # x-location of first knot for i in range(4): # Calculate interpolated value. val += ccx[i] * knots[ii] ii += 1 # Store value in result array result[x] = val @numba.jit(nopython=True, nogil=True) def _get_field2(result, grid_sampling_in_pixels, knots): if result.ndim != 2: raise ValueError('This function can only sample 2D grids.') ccy = np.empty((4, ), np.float64) ccx = np.empty((4, ), np.float64) grid_ySpacing = grid_sampling_in_pixels[0] grid_xSpacing = grid_sampling_in_pixels[1] # For each pixel ... for y in range(result.shape[0]): for x in range(result.shape[1]): # Calculate what is the leftmost (reference) knot on the grid, # and the ratio between closest and second closest knot. # Note the +1 to correct for padding. tmp = y / grid_ySpacing + 1 gy = int(tmp) ty = tmp - gy # tmp = x / grid_xSpacing + 1 gx = int(tmp) tx = tmp - gx # Get coefficients cubicsplinecoef_basis(ty, ccy) cubicsplinecoef_basis(tx, ccx) # Init value val = 0.0 # For each knot ... jj = gy - 1 # y-location of first knot for j in range(4): ii = gx - 1 # x-location of first knot for i in range(4): # Calculate interpolated value. val += ccy[j] * ccx[i] * knots[jj, ii] ii += 1 jj += 1 # Store value in result array result[y, x] = val @numba.jit(nopython=True, nogil=True) def _get_field3(result, grid_sampling_in_pixels, knots): if result.ndim != 3: raise ValueError('This function can only sample 3D grids.') ccz = np.empty((4, ), np.float64) ccy = np.empty((4, ), np.float64) ccx = np.empty((4, ), np.float64) grid_zSpacing = grid_sampling_in_pixels[0] grid_ySpacing = grid_sampling_in_pixels[1] grid_xSpacing = grid_sampling_in_pixels[2] # For each pixel ... for z in range(result.shape[0]): for y in range(result.shape[1]): for x in range(result.shape[2]): # Calculate what is the leftmost (reference) knot on the grid, # and the ratio between closest and second closest knot. # Note the +1 to correct for padding. tmp = z / grid_zSpacing + 1 gz = int(tmp) tz = tmp - gz # tmp = y / grid_ySpacing + 1 gy = int(tmp) ty = tmp - gy # tmp = x / grid_xSpacing + 1 gx = int(tmp) tx = tmp - gx # Get coefficients cubicsplinecoef_basis(tz, ccz) cubicsplinecoef_basis(ty, ccy) cubicsplinecoef_basis(tx, ccx) # Init value val = 0.0 # For each knot ... kk = gz -1 # z-location of first knot for k in range(4): jj = gy - 1 # y-location of first knot for j in range(4): ii = gx - 1 # x-location of first knot for i in range(4): # Calculate interpolated value. val += ccz[k] * ccy[j] * ccx[i] * knots[kk, jj, ii] ii += 1 jj += 1 kk += 1 # Store value in result array result[z, y, x] = val @numba.jit(nopython=True, nogil=True) def _get_field_at1(result, grid_sampling_in_pixels, knots, samplesx_): assert samplesx_.ndim == 1 ccx = np.empty((4, ), np.float64) grid_xSpacing = grid_sampling_in_pixels[0] gridShapex = knots.shape[0] # For each point in the set for p in range(samplesx_.size): # Calculate wx wx = samplesx_[p] # Calculate what is the leftmost (reference) knot on the grid, # and the ratio between closest and second closest knot. # Note the +1 to correct for padding. tmp = wx / grid_xSpacing + 1 gx = int(tmp) tx = tmp - gx # Check if within bounds of interpolatable domain if gx < 1 or gx >= gridShapex-2: result[p] = 0.0 continue # Get coefficients cubicsplinecoef_basis(tx, ccx) # Init value val = 0.0 # For each knot ... ii = gx - 1 # x-location of first knot for i in range(4): # Calculate interpolated value. val += ccx[i] * knots[ii] ii += 1 # Store result[p] = val @numba.jit(nopython=True, nogil=True) def _get_field_at2(result_, grid_sampling_in_pixels, knots, samplesx_, samplesy_): assert samplesx_.ndim == 1 assert samplesy_.ndim == 1 ccy = np.empty((4, ), np.float64) ccx = np.empty((4, ), np.float64) grid_ySpacing = grid_sampling_in_pixels[0] grid_xSpacing = grid_sampling_in_pixels[1] gridShapey = knots.shape[0] gridShapex = knots.shape[1] # For each point in the set for p in range(samplesx_.size): # Calculate wx and wy wx = samplesx_[p] wy = samplesy_[p] # Calculate what is the leftmost (reference) knot on the grid, # and the ratio between closest and second closest knot. # Note the +1 to correct for padding. tmp = wy / grid_ySpacing + 1 gy = int(tmp) ty = tmp - gy # tmp = wx / grid_xSpacing + 1 gx = int(tmp) tx = tmp - gx # Check if within bounds of interpolatable domain if ( (gy < 1 or gy >= gridShapey - 2) or (gx < 1 or gx >= gridShapex - 2) ): result_[p] = 0.0 continue # Get coefficients cubicsplinecoef_basis(ty, ccy) cubicsplinecoef_basis(tx, ccx) # Init value val = 0.0 # For each knot ... jj = gy - 1 # y-location of first knot for j in range(4): ii = gx - 1 # x-location of first knot for i in range(4): # Calculate interpolated value. val += ccy[j] * ccx[i] * knots[jj, ii] ii += 1 jj += 1 # Store result_[p] = val @numba.jit(nopython=True, nogil=True) def _get_field_at3(result_, grid_sampling_in_pixels, knots, samplesx_, samplesy_, samplesz_): assert samplesx_.ndim == 1 assert samplesy_.ndim == 1 assert samplesz_.ndim == 1 ccz = np.empty((4, ), np.float64) ccy = np.empty((4, ), np.float64) ccx = np.empty((4, ), np.float64) grid_zSpacing = grid_sampling_in_pixels[0] grid_ySpacing = grid_sampling_in_pixels[1] grid_xSpacing = grid_sampling_in_pixels[2] gridShapez = knots.shape[0] gridShapey = knots.shape[1] gridShapex = knots.shape[2] # For each point in the set for p in range(samplesx_.size): # Calculate wx and wy wx = samplesx_[p] wy = samplesy_[p] wz = samplesz_[p] # Calculate what is the leftmost (reference) knot on the grid, # and the ratio between closest and second closest knot. # Note the +1 to correct for padding. tmp = wz / grid_zSpacing + 1 gz = int(tmp) tz = tmp - gz # tmp = wy / grid_ySpacing + 1 gy = int(tmp) ty = tmp - gy # tmp = wx / grid_xSpacing + 1 gx = int(tmp) tx = tmp - gx # Check if within bounds of interpolatable domain if ( (gx < 1 or gx >= gridShapex - 2) or (gy < 1 or gy >= gridShapey - 2) or (gz < 1 or gz >= gridShapez - 2)): result_[p] = 0.0 continue # Get coefficients cubicsplinecoef_basis(tz, ccz) cubicsplinecoef_basis(ty, ccy) cubicsplinecoef_basis(tx, ccx) # Init value val = 0.0 # For each knot ... kk = gz - 1 # z-location of first knot for k in range(4): jj = gy - 1 # y-location of first knot for j in range(4): ii = gx - 1 # x-location of first knot for i in range(4): # Calculate interpolated value. val += ccz[k] * ccy[j] * ccx[i] * knots[kk, jj, ii] ii+=1 jj += 1 kk += 1 # Store result_[p] = val ## Functions to set the grid using a field @numba.jit(nopython=True, nogil=True) def _set_field_using_num_and_dnum(knots_, num_, dnum_): for i in range(knots_.size): n = dnum_[i] if n > 0.0: knots_[i] = num_[i] / n def set_field(grid, field, weights): """ set_field(grid, pp) Set the grid using the specified field (and optional weights). """ # Test dimensions if grid.field_shape != field.shape: # nocov raise ValueError('Dimension of grid-field and field do not match.') # Test dtype if field.dtype != weights.dtype: # nocov raise ValueError('Field and weights must be of the same type.') # Apply proper function if grid.ndim == 1: num, dnum = _set_field1(grid.grid_sampling_in_pixels, grid.knots, field, weights) elif grid.ndim == 2: num, dnum = _set_field2(grid.grid_sampling_in_pixels, grid.knots, field, weights) elif grid.ndim == 3: num, dnum = _set_field3(grid.grid_sampling_in_pixels, grid.knots, field, weights) else: # nocov tmp = 'This method does not support grids of that dimension.' raise RuntimeError(tmp) # Apply _set_field_using_num_and_dnum(grid.knots.ravel(), num.ravel(), dnum.ravel()) def set_field_sparse(grid, pp, values): """ set_field_sparse(grid, pp, values) Set the grid by providing the field values at a set of points (wich are in world coordinates). """ assert isinstance(pp, np.ndarray) and pp.ndim == 2 # Test dimensions if grid.ndim != pp.shape[1]: # nocov raise ValueError('Dimension of grid and pointset do not match.') # Apply proper function if grid.ndim == 1: num, dnum = _set_field_sparse1(grid.grid_sampling, grid.knots, pp, values) elif grid.ndim == 2: num, dnum = _set_field_sparse2(grid.grid_sampling, grid.knots, pp, values) elif grid.ndim == 3: num, dnum = _set_field_sparse3(grid.grid_sampling, grid.knots, pp, values) else: # nocov tmp = 'This method does not support grids of that dimension.' raise RuntimeError(tmp) # Apply _set_field_using_num_and_dnum(grid.knots.ravel(), num.ravel(), dnum.ravel()) ## Workhorse functions to set the field @numba.jit(nopython=True, nogil=True) def _set_field1(grid_sampling_in_pixels, knots, field, weights): ccx = np.empty((4, ), np.float64) grid_xSpacing = grid_sampling_in_pixels[0] # Create temporary arrays the same size as the grid num = np.zeros_like(knots) dnum = np.zeros_like(knots) # For each pixel ... for x in range(field.shape[0]): # Get val and alpha val = field[x] weight = weights[x] # Evaluate this one? if weight <= 0.0: continue # Calculate what is the leftmost (reference) knot on the grid, # and the ratio between closest and second closest knot. # Note the +1 to correct for padding. tmp = x / grid_xSpacing + 1 gx = int(tmp) tx = tmp - gx # Get coefficients cubicsplinecoef_basis(tx, ccx) # Pre-normalize value omsum = 0.0 for i in range(4): omsum += ccx[i] * ccx[i] val_n = val / omsum # For each knot that this point influences # Following Lee et al. we update a numerator and a denumerator for # each knot. for i in range(4): ii = i + gx - 1 # omega = ccx[i] omega2 = weight * omega * omega num[ii] += omega2 * (val_n * omega) dnum[ii] += omega2 # Done return num, dnum @numba.jit(nopython=True, nogil=True) def _set_field2(grid_sampling_in_pixels, knots, field, weights): ccy = np.empty((4, ), np.float64) ccx = np.empty((4, ), np.float64) grid_ySpacing = grid_sampling_in_pixels[0] grid_xSpacing = grid_sampling_in_pixels[1] # Create temporary arrays the same size as the grid num = np.zeros_like(knots) dnum = np.zeros_like(knots) # For each pixel ... for y in range(field.shape[0]): for x in range(field.shape[1]): # Get val and alpha val = field[y, x] weight = weights[y, x] # Evaluate this one? if weight <= 0.0: continue # Calculate what is the leftmost (reference) knot on the grid, # and the ratio between closest and second closest knot. # Note the +1 to correct for padding. tmp = y / grid_ySpacing + 1 gy = int(tmp) ty = tmp - gy # tmp = x / grid_xSpacing + 1 gx = int(tmp) tx = tmp - gx # Get coefficients cubicsplinecoef_basis(ty, ccy) cubicsplinecoef_basis(tx, ccx) # Pre-normalize value omsum = 0.0 for j in range(4): for i in range(4): omsum += ccy[j] * ccx[i] * ccy[j] * ccx[i] val_n = val / omsum # For each knot that this point influences # Following Lee et al. we update a numerator and a denumerator for # each knot. for j in range(4): jj = j + gy - 1 for i in range(4): ii = i + gx - 1 # omega = ccy[j] * ccx[i] omega2 = weight * omega * omega num[jj,ii] += omega2 * ( val_n*omega ) dnum[jj,ii] += omega2 # Done return num, dnum @numba.jit(nopython=True, nogil=True) def _set_field3(grid_sampling_in_pixels, knots, field, weights): ccz = np.empty((4, ), np.float64) ccy = np.empty((4, ), np.float64) ccx = np.empty((4, ), np.float64) grid_zSpacing = grid_sampling_in_pixels[0] grid_ySpacing = grid_sampling_in_pixels[1] grid_xSpacing = grid_sampling_in_pixels[2] # Create temporary arrays the same size as the grid num = np.zeros_like(knots) dnum = np.zeros_like(knots) # For each pixel ... for z in range(field.shape[0]): for y in range(field.shape[1]): for x in range(field.shape[2]): # Get val and alpha val = field[z, y, x] weight = weights[z, y, x] # Evaluate this one? if weight <= 0.0: continue # Calculate what is the leftmost (reference) knot on the grid, # and the ratio between closest and second closest knot. # Note the +1 to correct for padding. tmp = z / grid_zSpacing + 1 gz = int(tmp) tz = tmp - gz # tmp = y / grid_ySpacing + 1 gy = int(tmp) ty = tmp - gy # tmp = x / grid_xSpacing + 1 gx = int(tmp) tx = tmp - gx # Get coefficients cubicsplinecoef_basis(tz, ccz) cubicsplinecoef_basis(ty, ccy) cubicsplinecoef_basis(tx, ccx) # Pre-normalize value omsum = 0.0 for k in range(4): for j in range(4): for i in range(4): omsum += ccz[k] * ccy[j] * ccx[i] * ccz[k] * ccy[j] * ccx[i] val_n = val / omsum # For each knot that this point influences # Following Lee et al. we update a numerator and a denumerator for # each knot. for k in range(4): kk = k + gz - 1 for j in range(4): jj = j + gy - 1 for i in range(4): ii = i + gx - 1 # omega = ccz[k] * ccy[j] * ccx[i] omega2 = weight * omega * omega num[kk,jj,ii] += omega2 * ( val_n*omega ) dnum[kk,jj,ii] += omega2 # Done return num, dnum @numba.jit(nopython=True, nogil=True) def _set_field_sparse1(grid_sampling, knots, pp, values): ccx = np.empty((4, ), np.float64) wGridSampling = grid_sampling # Create num, dnum num = np.zeros_like(knots) dnum = np.zeros_like(knots) # For each point ... for p in range(pp.shape[0]): # Get wx wx = pp[p, 0] # Calculate which is the closest point on the lattice to the top-left # corner and find ratio's of influence between lattice point. tmp = wx / wGridSampling + 1 gx = int(tmp) tx = tmp - gx # Get coefficients cubicsplinecoef_basis(tx, ccx) # Precalculate omsum omsum = 0.0 for i in range(4): omsum += ccx[i] * ccx[i] # Get val val = values[p] # For each knot that this point influences # Following Lee et al. we update a numerator and a denumerator for # each knot. ii = gx - 1 # x-location of first knot for i in range(4): # omega = ccx[i] omega2 = omega*omega num[ii] += omega2 * ( val*omega/omsum ) dnum[ii] += omega2 # ii += 1 # Done return num, dnum @numba.jit(nopython=True, nogil=True) def _set_field_sparse2(grid_sampling, knots, pp, values): ccy = np.empty((4, ), np.float64) ccx = np.empty((4, ), np.float64) wGridSampling = grid_sampling # Create num, dnum num = np.zeros_like(knots) dnum = np.zeros_like(knots) # For each point ... for p in range(pp.shape[0]): # Get wx and wy wx = pp[p, 0] wy = pp[p, 1] # Calculate which is the closest point on the lattice to the top-left # corner and find ratio's of influence between lattice point. tmp = wy / wGridSampling + 1 gy = int(tmp) ty = tmp - gy # tmp = wx / wGridSampling + 1 gx = int(tmp) tx = tmp - gx # Get coefficients cubicsplinecoef_basis(ty, ccy) cubicsplinecoef_basis(tx, ccx) # Precalculate omsum (denominator of eq 4 in Lee 1996) omsum = 0.0 for j in range(4): for i in range(4): omsum += ccy[j] * ccx[i] * ccy[j] * ccx[i] # Get val val = values[p] # For each knot that this point influences # Following Lee et al. we update a numerator and a denumerator for # each knot. jj = gy - 1 # y-location of first knot for j in range(4): ii = gx - 1 # x-location of first knot for i in range(4): # omega = ccy[j] * ccx[i] omega2 = omega*omega num[jj, ii] += omega2 * ( val*omega/omsum ) dnum[jj, ii] += omega2 # ii += 1 jj += 1 # Done return num, dnum @numba.jit(nopython=True, nogil=True) def _set_field_sparse3(grid_sampling, knots, pp, values): ccz = np.empty((4, ), np.float64) ccy = np.empty((4, ), np.float64) ccx = np.empty((4, ), np.float64) wGridSampling = grid_sampling # Create num, dnum num = np.zeros_like(knots) dnum = np.zeros_like(knots) # For each point ... for p in range(pp.shape[0]): # Get wx, wy and wz wx = pp[p, 0] wy = pp[p, 1] wz = pp[p, 2] # Calculate which is the closest point on the lattice to the top-left # corner and find ratio's of influence between lattice point. tmp = wz / wGridSampling + 1 gz = int(tmp) tz = tmp - gz # tmp = wy / wGridSampling + 1 gy = int(tmp) ty = tmp - gy # tmp = wx / wGridSampling + 1 gx = int(tmp) tx = tmp - gx # Get coefficients cubicsplinecoef_basis(tz, ccz) cubicsplinecoef_basis(ty, ccy) cubicsplinecoef_basis(tx, ccx) # Precalculate omsum omsum = 0.0 for k in range(4): for j in range(4): for i in range(4): omsum += ccz[k] * ccy[j] * ccx[i] * ccz[k] * ccy[j] * ccx[i] # Get val val = values[p] # For each knot that this point influences # Following Lee et al. we update a numerator and a denumerator for # each knot. kk = gz -1 # z-location of first knot for k in range(4): jj = gy - 1 # y-location of first knot for j in range(4): ii = gx - 1 # x-location of first knot for i in range(4): # omega = ccz[k] * ccy[j] * ccx[i] omega2 = omega*omega num[kk, jj, ii] += omega2 * ( val*omega/omsum ) dnum[kk, jj, ii] += omega2 # ii += 1 jj += 1 kk += 1 # Done return num, dnum PKKJfKd-- pirt-2.1.0.dist-info/LICENSE.txtPirt licensing terms -------------------- Stentseg is licensed under the terms of the (new) BSD license: Copyright (c) 2014, Almar Klein Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Stentseg Development Team nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. PK!HxQPpirt-2.1.0.dist-info/WHEEL1 0 RZq+D-Dv;_[*7Fp 8MRq%_:==ߘPT PK!H9jpirt-2.1.0.dist-info/METADATARn0+$%iOZԉHVԆ^X$U> 8_ߥjpr 9;;%1aL!w .jR_5Eek15i,,5)`x~q꧷TRWMim76Y(5> Į,YB[>^V,&A0XMyP0I4SF{[O@7߾T7/L"xt[lS_T>kӞ?Zr )*Q\KgU& ܣ3d$cfr;f^ggrz"wzkAo:3Bqi3e1x5AJH3njN_^]l{0j s=G9_PK!Hl pirt-2.1.0.dist-info/RECORD}GCY`ۀI r&ï}/M.,[Bz:-V92ą1-ĕ8XZ7C0kƬ?gBx(sӍ/BB5U;GG2yZ_v/ >&u@'B@vl=x Y"{af~֧ %/'-tOD 9b>ʻ G G/*M˰w( ]^V,szOjZc퓩p]S(4C8]^}Z;By MHH.c|[<5YA/@prbwhvPAKL4nOqNI/F, /YZL/\"q˽.FíM؉[g؀; I @ L}غUP_St8 C=>mZ#ĩV S<p2:IDv4<07nAۙr@`0 D^t^`vܦ~;̶*?AYg$dWa9\ti"U-Nս#)ۘ%SM̚hV0uei5㔊pl $vhivt@ud*%)j(^a:#[H@Ǔ!G<ٖd*= y\r_ߞۅ; KLo$6Ua{҉n50>h.͌Ұؗ#Bt3c#DFΖ: QlIBB?N4'iu!uzzbC"0B2m(A4P)KtT60#boE=5}S[dbٻݰm~L>KΡ]@1bzLK2K߃ys RT:lxE0bDFA B0B⿋?6'p_T(-:-J^1[c#p^4]At˱¢i̯s Τe a?m0TM.~=r i.l{RYa.>U~+Xh>'GPPo]oiAy2f ffi82Iޝ=NqIt:?szH/c 1mD׎=]H[0U>g0A;J =cLjc|\:7l ͬ-/9Nzx\,TPFVXl`R¡ 9eAۍy쟠61 i2ṿ^%*TPKCZ4K֭ }}pirt/README.mdPK9ssL7pirt/__init__.pyPKk4K;V@V@pirt/_utils.pyPKc4K ^1^1sGpirt/deformvis.pyPKa_4KfTTypirt/experiment.pyPKRp4Kd77pirt/fitting.pyPKf4KlZ88Tpirt/gaussfun.pyPKzn4K,aMM`pirt/new_pointset.pyPKc4KD;(;(bpirt/pyramid.pyPKLi4K3qv*v* pirt/randomdeformations.pyPKl+K" pirt/testing.pyPK4K)(vpirt/apps/__init__.pyPK|o4Kv..\pirt/apps/deform_by_hand.pyPKq4KE pirt/apps/spline_by_hand.pyPK 3KG@@ pirt/deform/__init__.pyPKl4KKdTdT]pirt/deform/_deformbase.pyPKnm4KE<GGbpirt/deform/_deformfield.pyPKl4K^FDFD2pirt/deform/_deformgrid.pyPK3K.)GGpirt/deform/_subs.pyPK3K2Wkxx)pirt/interp/__init__.pyPK3KrT rWrWpirt/interp/_backward.pyPK3K9~Npirt/interp/_backward_cuda.pyPK ,K m''@epirt/interp/_cubic.pyPK,KuT~88pirt/interp/_forward.pyPKO3KEJְ((pirt/interp/_func.pyPK ,K\"pirt/interp/_misc.pyPKp4K$"\!!pirt/interp/_sliceinvolume.pyPKqKrpirt/reg/__init__.pyPK~K ;e88pirt/reg/reg_base.pyPKKb88`pirt/reg/reg_demons.pyPK&sK!&!& pirt/reg/reg_elastix.pyPKIJK.,B,Bvpirt/reg/reg_gravity.pyPK3K7[pirt/splinegrid/__init__.pyPKm4KC>~~%_pirt/splinegrid/_splinegridclasses.pyPKm4K0kmm#pirt/splinegrid/_splinegridfuncs.pyPKKJfKd-- Wpirt-2.1.0.dist-info/LICENSE.txtPK!HxQP^pirt-2.1.0.dist-info/WHEELPK!H9j^pirt-2.1.0.dist-info/METADATAPK!Hl C`pirt-2.1.0.dist-info/RECORDPK'' g