From ec08f482e676545ac32fa7607461d4b127c629c3 Mon Sep 17 00:00:00 2001 From: justinehansen Date: Fri, 10 Mar 2023 16:34:52 -0800 Subject: [PATCH 1/4] [ENH] Add option to return nulls in --- neuromaps/stats.py | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/neuromaps/stats.py b/neuromaps/stats.py index 63a5e5c0..7abb205a 100644 --- a/neuromaps/stats.py +++ b/neuromaps/stats.py @@ -17,7 +17,7 @@ def compare_images(src, trg, metric='pearsonr', ignore_zero=True, nulls=None, - nan_policy='omit'): + nan_policy='omit', return_nulls=False): """ Compares images `src` and `trg` @@ -44,6 +44,9 @@ def compare_images(src, trg, metric='pearsonr', ignore_zero=True, nulls=None, the nan values to the callable metric (will return nan if the metric is `spearmanr` `or pearsonr`), 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default: 'omit' + return_nulls : bool, optional + Whether to return the null distribution of comparisons. Can only be set + to `True` if `nulls` is not None. Default: False Returns ------- @@ -62,6 +65,9 @@ def compare_images(src, trg, metric='pearsonr', ignore_zero=True, nulls=None, raise ValueError('Provided callable `metric` must accept two ' 'inputs and return single value.') + if return_nulls and nulls is None: + raise ValueError('`return_nulls` cannot be True when `nulls` is None.') + srcdata, trgdata = load_data(src), load_data(trg) # drop NaNs (if nan_policy==`omit`) and zeros (if ignore_zero=True) @@ -90,13 +96,14 @@ def compare_images(src, trg, metric='pearsonr', ignore_zero=True, nulls=None, n_perm = nulls.shape[-1] nulls = nulls[mask] return permtest_metric(srcdata, trgdata, metric, n_perm=n_perm, - nulls=nulls, nan_policy=nan_policy) + nulls=nulls, nan_policy=nan_policy, + return_nulls=return_nulls) return metric(srcdata, trgdata) def permtest_metric(a, b, metric='pearsonr', n_perm=1000, seed=0, nulls=None, - nan_policy='propagate'): + nan_policy='propagate', return_nulls=False): """ Generates non-parameteric p-value of `a` and `b` for `metric` @@ -128,6 +135,8 @@ def permtest_metric(a, b, metric='pearsonr', n_perm=1000, seed=0, nulls=None, Defines how to handle when inputs contain nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default: 'propagate' + return_nulls : bool, optional + Whether to return the null distribution of comparisons. Default: False Returns ------- @@ -176,15 +185,19 @@ def nan_wrap(a, b, nan_policy='propagate'): abs_true = np.abs(true_sim) permutations = np.ones(true_sim.shape) + nulldist = np.zeros((n_perm, )) for perm in range(n_perm): # permute `a` and determine whether correlations exceed original ap = a[rs.permutation(len(a))] if nulls is None else nulls[:, perm] - permutations += np.abs( - compfunc(ap, b, nan_policy=nan_policy) - ) >= abs_true + nullcomp = compfunc(ap, b, nan_policy=nan_policy) + permutations += np.abs(nullcomp) >= abs_true + nulldist[perm] = nullcomp pvals = permutations / (n_perm + 1) # + 1 in denom accounts for true_sim + if return_nulls: + return true_sim, pvals, nulldist + return true_sim, pvals From 085f0f4bc7e5483d7c6c5f3265e4de593a804b0b Mon Sep 17 00:00:00 2001 From: justinehansen Date: Thu, 30 Mar 2023 11:49:53 -0700 Subject: [PATCH 2/4] [FIX] Update permtest_metric to work for multi-column arrays --- neuromaps/stats.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/neuromaps/stats.py b/neuromaps/stats.py index 7abb205a..654270a9 100644 --- a/neuromaps/stats.py +++ b/neuromaps/stats.py @@ -144,6 +144,9 @@ def permtest_metric(a, b, metric='pearsonr', n_perm=1000, seed=0, nulls=None, Similarity metric pvalue : float Non-parametric p-value + nulls : (n_perm, ) array)like + Null distribution of similarity metrics. Only returned if + `return_nulls` is True. Notes ----- @@ -185,18 +188,18 @@ def nan_wrap(a, b, nan_policy='propagate'): abs_true = np.abs(true_sim) permutations = np.ones(true_sim.shape) - nulldist = np.zeros((n_perm, )) + nulldist = [] for perm in range(n_perm): # permute `a` and determine whether correlations exceed original ap = a[rs.permutation(len(a))] if nulls is None else nulls[:, perm] nullcomp = compfunc(ap, b, nan_policy=nan_policy) permutations += np.abs(nullcomp) >= abs_true - nulldist[perm] = nullcomp + nulldist.append(nullcomp) pvals = permutations / (n_perm + 1) # + 1 in denom accounts for true_sim if return_nulls: - return true_sim, pvals, nulldist + return true_sim, pvals, np.array(nulldist) return true_sim, pvals From b606da3cefa43b69e5ea4f2a91c06978f813f1f1 Mon Sep 17 00:00:00 2001 From: justinehansen Date: Thu, 30 Mar 2023 11:51:04 -0700 Subject: [PATCH 3/4] [FIX] Bug fix in spins.py --- neuromaps/nulls/spins.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neuromaps/nulls/spins.py b/neuromaps/nulls/spins.py index b9b0c5e0..57d2717d 100644 --- a/neuromaps/nulls/spins.py +++ b/neuromaps/nulls/spins.py @@ -125,7 +125,7 @@ def get_parcel_centroids(surfaces, parcellation=None, method='surface', vertices, faces = load_gifti(surf).agg_data() if parc is not None: labels = load_gifti(parc).agg_data() - labeltable = parc.labeltable.get_labels_as_dict() + labeltable = load_gifti(parc).labeltable.get_labels_as_dict() for lab in np.unique(labels): if labeltable.get(lab) in drop: From 09f29f38a4bfeed767afc32d4ed3f9a47e527484 Mon Sep 17 00:00:00 2001 From: justinehansen Date: Tue, 4 Apr 2023 10:27:21 -0700 Subject: [PATCH 4/4] [FIX] Fix docs and update permtest_metric() to be more efficient --- neuromaps/stats.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/neuromaps/stats.py b/neuromaps/stats.py index 654270a9..d2ba500b 100644 --- a/neuromaps/stats.py +++ b/neuromaps/stats.py @@ -54,6 +54,9 @@ def compare_images(src, trg, metric='pearsonr', ignore_zero=True, nulls=None, Comparison metric between `src` and `trg` pvalue : float The p-value of `similarity`, if `nulls` is not None + nulls : (n_perm, ) array_like + Null distribution of similarity metrics. Only returned if + `return_nulls` is True. """ methods = ('pearsonr', 'spearmanr') @@ -144,7 +147,7 @@ def permtest_metric(a, b, metric='pearsonr', n_perm=1000, seed=0, nulls=None, Similarity metric pvalue : float Non-parametric p-value - nulls : (n_perm, ) array)like + nulls : (n_perm, ) array_like Null distribution of similarity metrics. Only returned if `return_nulls` is True. @@ -188,18 +191,18 @@ def nan_wrap(a, b, nan_policy='propagate'): abs_true = np.abs(true_sim) permutations = np.ones(true_sim.shape) - nulldist = [] + nulldist = np.zeros(((n_perm, ) + true_sim.shape)) for perm in range(n_perm): # permute `a` and determine whether correlations exceed original ap = a[rs.permutation(len(a))] if nulls is None else nulls[:, perm] nullcomp = compfunc(ap, b, nan_policy=nan_policy) permutations += np.abs(nullcomp) >= abs_true - nulldist.append(nullcomp) + nulldist[perm] = nullcomp pvals = permutations / (n_perm + 1) # + 1 in denom accounts for true_sim if return_nulls: - return true_sim, pvals, np.array(nulldist) + return true_sim, pvals, nulldist return true_sim, pvals