diff --git a/README.md b/README.md index b679210..94870f7 100644 --- a/README.md +++ b/README.md @@ -25,23 +25,26 @@ stellar models, it is configured for two by default: and [Bayestar](https://arxiv.org/pdf/1401.1508.pdf). #### Zero-points -Zero-point offsets in several bands have been derived using Gaia data -and can be included during runtime. +Zero-point offsets in several bands have been estimated using Gaia data +and can be included during runtime. +**These are currently not thoroughly vetted, so use at your own risk.** #### Dust Map `brutus` is able to incorporate a 3-D dust prior. The current prior is -based on the "Bayestar17" dust map from -[Green et al. (2018)](https://arxiv.org/abs/1801.03555). +based on the "Bayestar19" dust map from +[Green et al. (2019)](https://arxiv.org/abs/1905.02734). #### Generating SEDs `brutus` contains built-in SED generation utilities based on the MIST stellar models, modeled off of [`minesweeper`](https://github.com/pacargile/MINESweeper). These are optimized for either generating photometry from stellar mass -tracks or for a single-age stellar isochrone, and are based on +tracks or for a single-age stellar isochrone based on artificial neural networks trained on bolometric correction tables. + An empirical correction table to the models derived using several clusters is also provided, which improves the models down to ~0.5 solar masses. +**These are currently not thoroughly vetted, so use at your own risk.** Please contact Phil Cargile (pcargile@cfa.harvard.edu) and Josh Speagle (jspeagle@cfa.harvard.edu) for more information on the provided data files. diff --git a/brutus/__init__.py b/brutus/__init__.py index 87689bd..5f88186 100644 --- a/brutus/__init__.py +++ b/brutus/__init__.py @@ -2,9 +2,8 @@ # -*- coding: utf-8 -*- from __future__ import (division, print_function) -from six.moves import range from .fitting import * from .utils import * -__version__ = "0.6.9" +__version__ = "0.7.0" diff --git a/brutus/cluster.py b/brutus/cluster.py index 1b35750..d20d89d 100644 --- a/brutus/cluster.py +++ b/brutus/cluster.py @@ -7,16 +7,5 @@ """ from __future__ import (print_function, division) -import six -from six.moves import range - -import sys -import os -import warnings -import math -import numpy as np -import warnings - -from .utils import get_seds, add_mag __all__ = [""] diff --git a/brutus/dust.py b/brutus/dust.py index c329872..62dea96 100644 --- a/brutus/dust.py +++ b/brutus/dust.py @@ -8,15 +8,8 @@ """ from __future__ import (print_function, division) -import six -from six.moves import range -import sys -import os -import warnings -import math import numpy as np -import warnings import h5py import astropy.coordinates as coordinates @@ -287,9 +280,6 @@ def query(self, coords): """ - # Get number of coordinates requested. - n_coords_ret = coords.shape[0] - # Extract the correct angular pixel(s). try: pix_idx = self._find_data_idx(coords.l.deg, coords.b.deg) diff --git a/brutus/filters.py b/brutus/filters.py index 596c207..564ab65 100644 --- a/brutus/filters.py +++ b/brutus/filters.py @@ -7,8 +7,6 @@ """ from __future__ import (print_function, division) -import six -from six.moves import range # Define the set of filters available for the provided MIST models. # The Bayestar models are only defined for PanSTARRS `grizy` and 2MASS. diff --git a/brutus/fitting.py b/brutus/fitting.py index 50a315e..5a2ee55 100644 --- a/brutus/fitting.py +++ b/brutus/fitting.py @@ -7,14 +7,10 @@ """ from __future__ import (print_function, division) -from six.moves import range import sys -import os import warnings -import math import numpy as np -import warnings import h5py import time from scipy.special import xlogy, gammaln @@ -24,8 +20,10 @@ except ImportError: from scipy.misc import logsumexp -from .pdf import * -from .utils import * +from .pdf import imf_lnprior, ps1_MrLF_lnprior +from .pdf import parallax_lnprior, scale_parallax_lnprior +from .pdf import gal_lnprior, dust_lnprior +from .utils import _function_wrapper, _inverse3, magnitude, get_seds __all__ = ["loglike", "_optimize_fit", "BruteForce", "_lnpost"] @@ -608,9 +606,9 @@ def __init__(self, models, models_labels, labels_mask, pool=None): def fit(self, data, data_err, data_mask, data_labels, save_file, phot_offsets=None, parallax=None, parallax_err=None, - Nmc_prior=100, avlim=(0., 6.), av_gauss=None, + Nmc_prior=50, avlim=(0., 6.), av_gauss=None, rvlim=(1., 8.), rv_gauss=(3.32, 0.18), binary_frac=0.5, - lnprior=None, wt_thresh=1e-3, cdf_thresh=2e-4, Ndraws=2000, + lnprior=None, wt_thresh=1e-3, cdf_thresh=2e-4, Ndraws=500, apply_agewt=True, apply_grad=True, lndistprior=None, lndustprior=None, dustfile='bayestar2017_v1.h5', apply_dlabels=True, data_coords=None, logl_dim_prior=True, @@ -837,9 +835,9 @@ def fit(self, data, data_err, data_mask, data_labels, save_file, try: # Try reading in parallel-friendly way if possible. try: - ft = h5py.File(dustfile, 'r', libver='latest', swmr=True) + h5py.File(dustfile, 'r', libver='latest', swmr=True) except: - ft = h5py.File(dustfile, 'r') + h5py.File(dustfile, 'r') pass except: raise ValueError("The default dust prior is being used but " @@ -1152,13 +1150,12 @@ def _fit(self, data, data_err, data_mask, """ - Ndata, Nmodels = len(data), self.NMODEL + Ndata = len(data) models = np.array(self.models) if wt_thresh is None and cdf_thresh is None: wt_thresh = -np.inf # default to no clipping/thresholding if rstate is None: rstate = np.random - mvn = rstate.multivariate_normal if parallax is not None and parallax_err is None: raise ValueError("Must provide both `parallax` and " "`parallax_err`.") @@ -1189,9 +1186,9 @@ def _fit(self, data, data_err, data_mask, try: # Try reading in parallel-friendly way if possible. try: - ft = h5py.File(dustfile, 'r', libver='latest', swmr=True) + h5py.File(dustfile, 'r', libver='latest', swmr=True) except: - ft = h5py.File(dustfile, 'r') + h5py.File(dustfile, 'r') pass except: raise ValueError("The default dust prior is being used but " diff --git a/brutus/los.py b/brutus/los.py index ffb62e5..51df623 100644 --- a/brutus/los.py +++ b/brutus/los.py @@ -7,15 +7,9 @@ """ from __future__ import (print_function, division) -import six -from six.moves import range -import sys -import os import warnings -import math import numpy as np -import warnings from scipy.stats import truncnorm try: @@ -23,8 +17,7 @@ except ImportError: from scipy.misc import logsumexp -__all__ = ["LOS_clouds_priortransform", "LOS_clouds_loglike_bin", - "LOS_clouds_loglike_samples", +__all__ = ["LOS_clouds_priortransform", "LOS_clouds_loglike_samples", "kernel_tophat", "kernel_gauss", "kernel_lorentz"] @@ -125,134 +118,9 @@ def LOS_clouds_priortransform(u, rlims=(0., 6.), dlims=(4., 19.), return x -def LOS_clouds_loglike_bin(theta, cdfs, xedges, yedges, interpolate=True): - """ - Compute the log-likelihood for the cumulative reddening along the - line of sight (LOS) parameterized by `theta`, given input binned - stellar posteriors/bounds generated by `.pdf.bin_pdfs_distred`. Assumes - a uniform outlier model in distance and reddening across our binned - posteriors. - - Parameters - ---------- - theta : `~numpy.ndarray` of shape `(Nparams,)` - A collection of parameters that characterizes the cumulative - reddening along the LOS. Contains the fraction of outliers `P_b` - followed by the foreground reddening `fred` and a series of - `(dist, red)` pairs for each "cloud" along the LOS. - - cdfs : `~numpy.ndarray` of shape `(Nobj, Nxbin, Nybin)` - Binned versions of the CDFs. - - xedges : `~numpy.ndarray` of shape `(Nxbin+1,)` - The edges defining the bins in distance. - - yedges : `~numpy.ndarray` of shape `(Nybin+1,)` - The edges defining the bins in reddening. - - interpolate : bool, optional - Whether to linearly interpolate between bins (defined by their central - positions). Default is `True`. - - Returns - ------- - loglike : float - The computed log-likelihood. - - """ - - # Grab parameters. - pb = theta[0] - reds, dists = np.atleast_1d(theta[1::2]), np.atleast_1d(theta[2::2]) - - # Convert to bin coordinates. - dx, dy = xedges[1] - xedges[0], yedges[1] - yedges[0] - Nxedges, Nyedges = len(xedges), len(yedges) - Nxbin, Nybin = Nxedges - 1, Nyedges - 1 - x_ctrs = np.arange(0.5, Nxbin, 1.) - y_ctrs = np.arange(0.5, Nybin, 1.) - x = np.concatenate(([0], (dists - xedges[0]) / dx, [Nxbin])) - y = (reds - yedges[0]) / dy - - # Find (x,y) neighbors in bin space. - x1 = np.array(np.floor(x - 0.5), dtype='int') - x2 = np.array(np.ceil(x - 0.5), dtype='int') - y1 = np.array(np.floor(y - 0.5), dtype='int') - y2 = np.array(np.ceil(y - 0.5), dtype='int') - - # Threshold values to bin edges. - x1[np.where(x1 < 0)] = 0 - x1[np.where(x1 >= Nxbin)] = Nxbin - 1 - x2[np.where(x2 < 0)] = 0 - x2[np.where(x2 >= Nxbin)] = Nxbin - 1 - y1[np.where(y1 < 0)] = 0 - y1[np.where(y1 >= Nybin)] = Nybin - 1 - y2[np.where(y2 < 0)] = 0 - y2[np.where(y2 >= Nybin)] = Nybin - 1 - - # Threshold (x,y) to edges (and shift to deal with centers). - x[np.where(x < 0.5)] = 0.5 - x[np.where(x > Nxbin - 0.5)] = Nxbin - 0.5 - y[np.where(y < 0.5)] = 0.5 - y[np.where(y > Nybin - 0.5)] = Nybin - 0.5 - - # Define "left" and "right" versions for xs. - x1l, x1r = x1[:-1], x1[1:] - x2l, x2r = x2[:-1], x2[1:] - xl, xr = x[:-1], x[1:] - - # Compute integral along LOS using the provided CDFs. - if interpolate: - # Interpolate between bins (left side). - # Define values q(x_i, y_i). - q11, q12, q21, q22 = (cdfs[:, x1l, y1], cdfs[:, x1l, y2], - cdfs[:, x2l, y1], cdfs[:, x2l, y2]) - # Compute areas. - dx1, dx2 = (xl - x_ctrs[x1l]), (x_ctrs[x2l] - xl) - dy1, dy2 = (y - y_ctrs[y1]), (y_ctrs[y2] - y) - # Interpolate in x. - qp1, qp2 = (q11 * dx2 + q21 * dx1), (q12 * dx2 + q22 * dx1) - xsel = np.where(~((dx1 > 0.) & (dx2 > 0.))) # deal with edges - qp1[:, xsel], qp2[:, xsel] = q11[:, xsel], q12[:, xsel] # replace - # Interpolate in y. - cdf_left = qp1 * dy2 + qp2 * dy1 - ysel = np.where(~((dy1 > 0.) & (dy2 > 0.))) # deal with edges - cdf_left[ysel] = qp1[ysel] # replace edges - - # Interpolate between the bins (right side). - # Define values q(x_i, y_i). - q11, q12, q21, q22 = (cdfs[:, x1r, y1], cdfs[:, x1r, y2], - cdfs[:, x2r, y1], cdfs[:, x2r, y2]) - # Compute areas. - dx1, dx2 = (xr - x_ctrs[x1r]), (x_ctrs[x2r] - xr) - dy1, dy2 = (y - y_ctrs[y1]), (y_ctrs[y2] - y) - # Interpolate in x. - qp1, qp2 = (q11 * dx2 + q21 * dx1), (q12 * dx2 + q22 * dx1) - xsel = np.where(~((dx1 > 0.) & (dx2 > 0.))) # deal with edges - qp1[:, xsel], qp2[:, xsel] = q11[:, xsel], q12[:, xsel] # replace - # Interpolate in y. - cdf_right = qp1 * dy2 + qp2 * dy1 - ysel = np.where(~((dy1 > 0.) & (dy2 > 0.))) # deal with edges - cdf_right[ysel] = qp1[ysel] # replace edges - else: - # Just use the values from the bin we're currently in. - cdf_left, cdf_right = cdfs[:, x1l, y1], cdfs[:, x1r, y1] - - # Compute likelihood. - likes = np.sum(cdf_right - cdf_left, axis=1) - - # Add in outlier mixture model. Assume uniform in (x, y) with `pb` weight. - likes = pb * (1. / (Nybin * Nxbin)) + (1. - pb) * likes - - # Compute total log-likelihood. - loglike = np.sum(np.log(likes)) - - return loglike - - def LOS_clouds_loglike_samples(theta, dsamps, rsamps, kernel='gauss', rlims=(0., 6.), template_reds=None, - Ndraws=25): + Ndraws=25, additive_foreground=False): """ Compute the log-likelihood for the cumulative reddening along the line of sight (LOS) parameterized by `theta`, given a set of input @@ -293,6 +161,10 @@ def LOS_clouds_loglike_samples(theta, dsamps, rsamps, kernel='gauss', Ndraws : int, optional The number of draws to use for each star. Default is `25`. + additive_foreground : bool, optional + Whether the foreground is treated as just another value or added + to all background values. Default is `False`. + Returns ------- loglike : float @@ -336,6 +208,10 @@ def LOS_clouds_loglike_samples(theta, dsamps, rsamps, kernel='gauss', if template_reds is not None: reds[1:] *= template_reds[None, :, None] # reds[1:] are rescalings + # Adjust reddenings after the foreground if needed. + if additive_foreground: + reds[1:] += reds[0] # add foreground to background + # Define kernel parameters (mean, sigma) per LOS chunk. kparams = np.array([(r, rsmooth) for r in reds]) kparams[0][1] = rsmooth0 diff --git a/brutus/pdf.py b/brutus/pdf.py index 4a8d16c..61ff4c4 100644 --- a/brutus/pdf.py +++ b/brutus/pdf.py @@ -7,15 +7,11 @@ """ from __future__ import (print_function, division) -import six -from six.moves import range import sys import os import warnings -import math import numpy as np -import warnings from astropy import units from astropy.coordinates import SkyCoord from astropy.coordinates import CylindricalRepresentation as CylRep @@ -648,7 +644,8 @@ def gal_lnprior(dists, coord, labels=None, R_solar=8., Z_solar=0.025, def dust_lnprior(dists, coord, avs, dustfile='bayestar2017_v1.h5', - offset=0., scale=1., smooth=3., return_components=False): + offset=0., scale=1., smooth=1., scatter=0.2, + return_components=False): """ Log-prior for a 3-D galactic dust model. @@ -681,7 +678,13 @@ def dust_lnprior(dists, coord, avs, dustfile='bayestar2017_v1.h5', smooth : float, optional The factor used to inflate the derived uncertainties and smooth out - the dust map. Default is `3.` (3x larger errors). + the dust map. Applied *before* `scatter` is added. + Default is `1.`. + + scatter : float, optional + Additional scatter that is added in quadrature with the derived + uncertainties (*after* `smooth` has been applied) + from the 3-D dust map. Default is `0.2`. return_components : bool, optional Whether to also return the components of the reddening LOS fit. @@ -715,6 +718,7 @@ def dust_lnprior(dists, coord, avs, dustfile='bayestar2017_v1.h5', # Interpolate at corresponding distances. av_mean = scale * np.interp(dists, av_dist, av_mean) + offset av_err = smooth * scale * np.interp(dists, av_dist, av_err) + av_err = np.sqrt(av_err**2 + scatter**2) # Evaluate Gaussian prior. chi2 = (avs - av_mean)**2 / av_err**2 # chi2 @@ -915,9 +919,9 @@ def bin_pdfs_distred(data, cdf=False, ebv=False, dist_type='distance_modulus', # Generate parallax and Av realizations. for i, stuff in enumerate(zip(scales, avs, rvs, covs_sar, - parallaxes, parallax_errors, coords)): + parallaxes, parallax_errors, coord)): (scales_obj, avs_obj, rvs_obj, covs_sar_obj, - parallax, parallax_err, coord) = stuff + parallax, parallax_err, crd) = stuff # Print progress. if verbose: @@ -933,7 +937,7 @@ def bin_pdfs_distred(data, cdf=False, ebv=False, dist_type='distance_modulus', dmdraws = 5. * np.log10(ddraws) + 10. # Re-apply distance and parallax priors to realizations. - lnp_draws = lndistprior(ddraws, coord) + lnp_draws = lndistprior(ddraws, crd) if parallax is not None and parallax_err is not None: lnp_draws += parallax_lnprior(pdraws, parallax, parallax_err) lnp = logsumexp(lnp_draws, axis=1) diff --git a/brutus/plotting.py b/brutus/plotting.py index bfc40bb..4c67768 100644 --- a/brutus/plotting.py +++ b/brutus/plotting.py @@ -7,13 +7,10 @@ """ from __future__ import (print_function, division) -import six from six.moves import range -import sys -import os import warnings -import math +import logging import numpy as np from matplotlib import pyplot as plt from matplotlib.ticker import MaxNLocator, NullLocator @@ -30,14 +27,9 @@ except ImportError: from scipy.misc import logsumexp -try: - str_type = types.StringTypes - float_type = types.FloatType - int_type = types.IntType -except: - str_type = str - float_type = float - int_type = int +str_type = str +float_type = float +int_type = int __all__ = ["cornerplot", "dist_vs_red", "posterior_predictive", "photometric_offsets", "photometric_offsets_2d", "_hist2d"] @@ -47,10 +39,11 @@ def cornerplot(idxs, data, params, lndistprior=None, coord=None, avlim=(0., 6.), rvlim=(1., 8.), weights=None, parallax=None, parallax_err=None, Nr=500, applied_parallax=True, pcolor='blue', parallax_kwargs=None, span=None, - quantiles=[0.025, 0.5, 0.975], color='black', smooth=0.035, + quantiles=[0.025, 0.5, 0.975], color='black', smooth=10, hist_kwargs=None, hist2d_kwargs=None, labels=None, label_kwargs=None, show_titles=False, title_fmt=".2f", - title_kwargs=None, truths=None, truth_color='red', + title_kwargs=None, title_quantiles=[0.025, 0.5, 0.975], + truths=None, truth_color='red', truth_kwargs=None, max_n_ticks=5, top_ticks=False, use_math_text=False, verbose=False, fig=None, rstate=None): """ @@ -135,9 +128,9 @@ def cornerplot(idxs, data, params, lndistprior=None, coord=None, The standard deviation (either a single value or a different value for each subplot) for the Gaussian kernel used to smooth the 1-D and 2-D marginalized posteriors, expressed as a fraction of the span. - Default is `0.035` (3.5% smoothing). If an integer is provided instead, - this will instead default to a simple (weighted) histogram with - `bins=smooth`. + If an integer is provided instead, this will instead default + to a simple (weighted) histogram with `bins=smooth`. + Default is `10` (10 bins). hist_kwargs : dict, optional Extra keyword arguments to send to the 1-D (smoothed) histograms. @@ -156,8 +149,10 @@ def cornerplot(idxs, data, params, lndistprior=None, coord=None, show_titles : bool, optional Whether to display a title above each 1-D marginalized posterior - showing the 0.5 quantile along with the upper/lower bounds associated - with the 0.025 and 0.975 (95%/2-sigma credible interval) quantiles. + showing the quantiles specified by `title_quantiles`. By default, + This will show the median (0.5 quantile) along with the upper/lower + bounds associated with the 0.025 and 0.975 (95%/2-sigma credible + interval) quantiles. Default is `True`. title_fmt : str, optional @@ -168,6 +163,11 @@ def cornerplot(idxs, data, params, lndistprior=None, coord=None, Extra keyword arguments that will be sent to the `~matplotlib.axes.Axes.set_title` command. + title_quantiles : iterable, optional + A list of 3 fractional quantiles displayed in the title, ordered + from lowest to highest. Default is `[0.025, 0.5, 0.975]` + (spanning the 95%/2-sigma credible interval). + truths : iterable with shape `(ndim,)`, optional A list of reference values that will be overplotted on the traces and marginalized 1-D posteriors as solid horizontal/vertical lines. @@ -427,7 +427,7 @@ def cornerplot(idxs, data, params, lndistprior=None, coord=None, if show_titles: title = None if title_fmt is not None: - ql, qm, qh = quantile(x, [0.025, 0.5, 0.975], weights=weights) + ql, qm, qh = quantile(x, title_quantiles, weights=weights) q_minus, q_plus = qm - ql, qh - qm fmt = "{{0:{0}}}".format(title_fmt).format title = r"${{{0}}}_{{-{1}}}^{{+{2}}}$" diff --git a/brutus/seds.py b/brutus/seds.py index 30226f1..ff563ec 100644 --- a/brutus/seds.py +++ b/brutus/seds.py @@ -8,15 +8,11 @@ """ from __future__ import (print_function, division) -import six from six.moves import range import sys -import os import warnings -import math import numpy as np -import warnings import time from copy import deepcopy from itertools import product @@ -111,6 +107,7 @@ def __init__(self, mistfile=None, labels=["mini", "eep", "feh"], # Construct age weights. if ageweight: self.add_age_weights() + self._ageidx = self.predictions.index("loga") # Construct grid. self.build_interpolator() @@ -183,7 +180,7 @@ def add_age_weights(self, verbose=True): assert ("loga" in self.predictions) # Loop over tracks. - age_ind = self.predictions.index("loga") + age_ind = self._ageidx ageweights = np.zeros(len(self.libparams)) for i, m in enumerate(self.gridpoints["mini"]): for j, z in enumerate(self.gridpoints["feh"]): @@ -506,7 +503,6 @@ def get_sed(self, mini=1., eep=350., feh=0., av=0., rv=3.3, smf=0., params2 = dict(zip(self.predictions, params_arr2)) # Generate SED. - aidx = np.where(np.array(self.predictions) == 'loga')[0][0] mini_min = max(self.mini_bound, mini_bound) loga = params['loga'] if loga <= loga_max: @@ -520,7 +516,7 @@ def get_sed(self, mini=1., eep=350., feh=0., av=0., rv=3.3, smf=0., # Generate predictions for secondary binary component. if eep2 is None: # Convert loga to EEP. - eep2 = self.get_eep(loga, aidx, mini=mini, eep=eep, + eep2 = self.get_eep(loga, mini=mini, eep=eep, feh=feh, smf=smf, tol=tol) labels2 = {'mini': mini * smf, 'eep': eep2, 'feh': feh} labels2 = np.array([labels2[l] for l in self.labels]) @@ -548,7 +544,7 @@ def get_sed(self, mini=1., eep=350., feh=0., av=0., rv=3.3, smf=0., else: return sed, params, params2 - def get_eep(self, loga, aidx, mini=1., eep=350., feh=0., smf=0., tol=1e-3): + def get_eep(self, loga, mini=1., eep=350., feh=0., smf=0., tol=1e-3): """ Compute the corresponding EEP for a particular age. @@ -557,10 +553,6 @@ def get_eep(self, loga, aidx, mini=1., eep=350., feh=0., smf=0., tol=1e-3): loga : float The base-10 logarithm of the age. - aidx : int - The integer corresponding to the index of the `loga` prediction - (from `get_predictions`). - mini : float, optional Initial mass in units of solar masses. Default is `1.`. @@ -584,6 +576,8 @@ def get_eep(self, loga, aidx, mini=1., eep=350., feh=0., smf=0., tol=1e-3): """ + aidx = self._ageidx # index of age variable + # Define loss function. def loss(x): return (self.get_predictions([mini * smf, x, feh])[aidx] - loga)**2 @@ -714,7 +708,6 @@ def make_grid(self, mini_grid=None, eep_grid=None, feh_grid=None, feh_grid, smf_grid])), dtype=ltype) Ngrid = len(self.grid_label) - Nsmf = len(smf_grid) # Generate SEDs on the grid. ptype = np.dtype([(n, np.float) for n in self.predictions]) diff --git a/brutus/utils.py b/brutus/utils.py index 1376be4..56b7d88 100644 --- a/brutus/utils.py +++ b/brutus/utils.py @@ -7,15 +7,11 @@ """ from __future__ import (print_function, division) -import six from six.moves import range import sys -import os import warnings -import math import numpy as np -import warnings import h5py from scipy.special import xlogy, gammaln @@ -837,7 +833,6 @@ def photometric_offsets(phot, err, mask, models, idxs, reds, dreds, dists, # Initialize values. Nobj, Nfilt = phot.shape - Nmodels = len(models) Nsamps = idxs.shape[1] if sel is None: sel = np.ones(Nobj, dtype='bool')