1515
1616from nilearn import EXPAND_PATH_WILDCARDS
1717from joblib import Memory
18+ from nilearn import datasets
1819from nilearn ._utils .niimg_conversions import _resolve_globbing
1920from nilearn .input_data import NiftiMasker
2021from nilearn .input_data .masker_validation import check_embedded_nifti_masker
2122from nilearn .decomposition .base import BaseDecomposition
2223
23- import bascpp as bpp
24+ import dypac .bascpp as bpp
25+ from dypac .embeddings import Embedding
2426
2527
26- class dypac (BaseDecomposition ):
28+ class Dypac (BaseDecomposition ):
2729 """
2830 Perform Stable Dynamic Cluster Analysis.
2931
@@ -69,7 +71,10 @@ class dypac(BaseDecomposition):
6971 grey_matter: Niimg-like object or MultiNiftiMasker instance, optional
7072 A voxel-wise estimate of grey matter partial volumes.
7173 If provided, this mask is used to give more weight to grey matter in the
72- replications of functional clusters.
74+ replications of functional clusters. Use None to skip.
75+ By default, uses the ICBM152_2009 probabilistic grey matter segmentation.
76+ Note that the segmentation will be smoothed with the same kernel as the
77+ functional images.
7378
7479 std_grey_matter: float (1 <= .)
7580 The standard deviation of voxels will be adjusted to
@@ -159,8 +164,8 @@ def __init__(
159164 threshold_sim = 0.3 ,
160165 random_state = None ,
161166 mask = None ,
162- grey_matter = None ,
163- std_grey_matter = 1 ,
167+ grey_matter = "MNI" ,
168+ std_grey_matter = 3 ,
164169 smoothing_fwhm = None ,
165170 standardize = True ,
166171 detrend = True ,
@@ -215,6 +220,32 @@ def _check_components_(self):
215220 "been called."
216221 )
217222
223+ def _sanitize_imgs (self , imgs , confounds ):
224+ """Check that provided images are in the correct format."""
225+ # Base fit for decomposition estimators : compute the embedded masker
226+ if isinstance (imgs , str ):
227+ if EXPAND_PATH_WILDCARDS and glob .has_magic (imgs ):
228+ imgs = _resolve_globbing (imgs )
229+
230+ if isinstance (imgs , str ) or not hasattr (imgs , "__iter__" ):
231+ # these classes are meant for list of 4D images
232+ # (multi-subject), we want it to work also on a single
233+ # subject, so we hack it.
234+ imgs = [imgs ]
235+
236+ if len (imgs ) == 0 :
237+ # Common error that arises from a null glob. Capture
238+ # it early and raise a helpful message
239+ raise ValueError (
240+ "Need one or more Niimg-like objects as input, "
241+ "an empty list was given."
242+ )
243+
244+ # if no confounds have been specified, match length of imgs
245+ if confounds is None :
246+ confounds = list (itertools .repeat (confounds , len (imgs )))
247+ return imgs , confounds
248+
218249 def fit (self , imgs , confounds = None ):
219250 """
220251 Compute the mask and the dynamic parcels across datasets.
@@ -237,25 +268,8 @@ def fit(self, imgs, confounds=None):
237268 Returns the instance itself. Contains attributes listed
238269 at the object level.
239270 """
240- # Base fit for decomposition estimators : compute the embedded masker
241- if isinstance (imgs , str ):
242- if EXPAND_PATH_WILDCARDS and glob .has_magic (imgs ):
243- imgs = _resolve_globbing (imgs )
244-
245- if isinstance (imgs , str ) or not hasattr (imgs , "__iter__" ):
246- # these classes are meant for list of 4D images
247- # (multi-subject), we want it to work also on a single
248- # subject, so we hack it.
249- imgs = [imgs ]
250-
251- if len (imgs ) == 0 :
252- # Common error that arises from a null glob. Capture
253- # it early and raise a helpful message
254- raise ValueError (
255- "Need one or more Niimg-like objects as input, "
256- "an empty list was given."
257- )
258271 self .masker_ = check_embedded_nifti_masker (self )
272+ imgs , confounds = self ._sanitize_imgs (imgs , confounds )
259273
260274 # Avoid warning with imgs != None
261275 # if masker_ has been provided a mask_img
@@ -266,6 +280,10 @@ def fit(self, imgs, confounds=None):
266280 self .mask_img_ = self .masker_ .mask_img_
267281
268282 # Load grey_matter segmentation
283+ if self .grey_matter == "MNI" :
284+ mni = datasets .fetch_icbm152_2009 ()
285+ self .grey_matter = mni .gm
286+
269287 if self .grey_matter is not None :
270288 masker_anat = NiftiMasker (
271289 mask_img = self .mask_img_ , smoothing_fwhm = self .smoothing_fwhm
@@ -276,11 +294,7 @@ def fit(self, imgs, confounds=None):
276294 1 - grey_matter
277295 ) + self .std_grey_matter * grey_matter
278296 else :
279- self .grey_matter_ = None
280-
281- # if no confounds have been specified, match length of imgs
282- if confounds is None :
283- confounds = list (itertools .repeat (confounds , len (imgs )))
297+ self .weights_grey_matter_ = None
284298
285299 # Control random number generation
286300 self .random_state = check_random_state (self .random_state )
@@ -303,6 +317,9 @@ def fit(self, imgs, confounds=None):
303317 # Return components
304318 self .components_ = stab_maps
305319 self .dwell_time_ = dwell_time
320+
321+ # Create embedding
322+ self .embedding = Embedding (stab_maps .todense ())
306323 return self
307324
308325 def _mask_and_reduce_batch (self , imgs , confounds = None ):
@@ -345,17 +362,17 @@ def _mask_and_reduce(self, imgs, confounds=None):
345362 dwell_time: ndarray
346363 dwell time of each state.
347364 """
348-
349365 onehot_list = []
350366 for ind , img , confound in zip (range (len (imgs )), imgs , confounds ):
351367 this_data = self .masker_ .transform (img , confound )
352368 # Now get rid of the img as fast as possible, to free a
353369 # reference count on it, and possibly free the corresponding
354370 # data
355- this_data = np .multiply (this_data , self .weights_grey_matter_ )
356371 del img
357372 # Scale grey matter voxels to give them more weight in the
358373 # classification
374+ if self .weights_grey_matter_ is not None :
375+ this_data = np .multiply (this_data , self .weights_grey_matter_ )
359376 onehot = bpp .replicate_clusters (
360377 this_data .transpose (),
361378 subsample_size = self .subsample_size ,
@@ -390,17 +407,116 @@ def _mask_and_reduce(self, imgs, confounds=None):
390407
391408 return stab_maps , dwell_time
392409
393- def transform_sparse (self , img , confound = None ):
394- """Transform a 4D dataset in a component space."""
410+ def load_img (self , img , confound = None ):
411+ """
412+ Load a 4D image using the same preprocessing as model fitting.
413+
414+ Parameters
415+ ----------
416+ img : Niimg-like object.
417+ See http://nilearn.github.io/manipulating_images/input_output.html
418+ An fMRI dataset
419+
420+ Returns
421+ -------
422+ img_p : Niimg-like object.
423+ Same as input, after the preprocessing step used in the model have
424+ been applied.
425+ """
395426 self ._check_components_ ()
396- this_data = self .masker_ .transform (img , confound )
427+ tseries = self .masker_ .transform (img , confound )
428+ return self .masker_ .inverse_transform (tseries )
429+
430+ def transform (self , img , confound = None ):
431+ """
432+ Transform a 4D dataset in a component space.
433+
434+ Parameters
435+ ----------
436+ img : Niimg-like object.
437+ See http://nilearn.github.io/manipulating_images/input_output.html
438+ An fMRI dataset
439+ confound : CSV file or 2D matrix, optional.
440+ Confound parameters, to be passed to nilearn.signal.clean.
441+
442+ Returns
443+ -------
444+ weights : numpy array of shape [n_samples, n_states + 1]
445+ The fMRI tseries after projection in the parcellation
446+ space. Note that the first coefficient corresponds to the intercept,
447+ and not one of the parcels.
448+ """
449+ self ._check_components_ ()
450+ tseries = self .masker_ .transform (img , confound )
397451 del img
398- reg = LinearRegression ().fit (
399- self .components_ .transpose (), this_data .transpose ()
400- )
401- return reg .coef_
452+ return self .embedding .transform (tseries )
453+
454+ def inverse_transform (self , weights ):
455+ """
456+ Transform component weights as a 4D dataset.
457+
458+ Parameters
459+ ----------
460+ weights : numpy array of shape [n_samples, n_states + 1]
461+ The fMRI tseries after projection in the parcellation
462+ space. Note that the first coefficient corresponds to the intercept,
463+ and not one of the parcels.
464+
465+ Returns
466+ -------
467+ img : Niimg-like object.
468+ The 4D fMRI dataset corresponding to the weights.
469+ """
470+ self ._check_components_ ()
471+ return self .masker_ .inverse_transform (self .embedding .inverse_transform (weights ))
402472
403- def inverse_transform_sparse (self , weights ):
404- """Transform component weights as a 4D dataset."""
473+ def compress (self , img , confound = None ):
474+ """
475+ Provide the approximation of a 4D dataset after projection in parcellation space.
476+
477+ Parameters
478+ ----------
479+ img : Niimg-like object.
480+ See http://nilearn.github.io/manipulating_images/input_output.html
481+ An fMRI dataset
482+ confound : CSV file or 2D matrix, optional.
483+ Confound parameters, to be passed to nilearn.signal.clean.
484+
485+ Returns
486+ -------
487+ img_c : Niimg-like object.
488+ The 4D fMRI dataset corresponding to the input, compressed in the parcel space.
489+ """
405490 self ._check_components_ ()
406- self .masker_ .inverse_transform (weights * self .components_ )
491+ tseries = self .masker_ .transform (img , confound )
492+ del img
493+ return self .masker_ .inverse_transform (self .embedding .compress (tseries ))
494+
495+ def score (self , img , confound = None ):
496+ """
497+ R2 map of the quality of the compression.
498+
499+ Parameters
500+ ----------
501+ img : Niimg-like object.
502+ See http://nilearn.github.io/manipulating_images/input_output.html
503+ An fMRI dataset
504+ confound : CSV file or 2D matrix, optional.
505+ Confound parameters, to be passed to nilearn.signal.clean.
506+
507+ Returns
508+ -------
509+ score : Niimg-like object.
510+ A 3D map of R2 score of the quality of the compression.
511+
512+ Note
513+ ----
514+ The R2 score map is the fraction of the variance of fMRI time series captured
515+ by the parcels at each voxel. A score of 1 means perfect approximation.
516+ The score can be negative, in which case the parcellation approximation
517+ performs worst than the average of the signal.
518+ """
519+ self ._check_components_ ()
520+ tseries = self .masker_ .transform (img , confound )
521+ del img
522+ return self .masker_ .inverse_transform (self .embedding .score (tseries ))
0 commit comments