diff --git a/OCL/src/OCLDetectorConstruction.cc b/OCL/src/OCLDetectorConstruction.cc index a309894..4ea9058 100644 --- a/OCL/src/OCLDetectorConstruction.cc +++ b/OCL/src/OCLDetectorConstruction.cc @@ -300,6 +300,9 @@ void OCLDetectorConstruction::SetPlacementParameters() { // G4double distColltoDet = 10*mm; // Distance between Collimator and Detector (surface to Surf) + // Note that there is some machine imprecision here... + // Example: 359.999847*deg should of course have been 0. + //Beamline // fOCLLaBr3_presence[0] = false; // fOCLCollimator_presence[0] = false; diff --git a/data/AnalyseSims.C b/data/AnalyseSims.C index cc07d69..0b64edc 100644 --- a/data/AnalyseSims.C +++ b/data/AnalyseSims.C @@ -43,6 +43,7 @@ void AnalyseSims::Loop() if (fChain == 0) return; //aborts if it cannot find the tree/chain of trees TH1D *h1 = new TH1D("h1","Simulated electron energy deposition",4200,0,21); + TH2D *h1_all = new TH2D("h1_all","Simulated electron energy deposition per detector",4200,0,21,30,1,31); TH1D *h2 = new TH1D("h2","Simulated and folded energy deposition",4200,0,21); Double_t cSmooth[] = {2.03936976e-04, 6.82322078e-23, 3.76053110e-05}; // make sure cal is in same units (MeVor keV) @@ -89,6 +90,37 @@ void AnalyseSims::Loop() if(EdepInCrystal29 > 0.) h1->Fill(EdepInCrystal29); if(EdepInCrystal30 > 0.) h1->Fill(EdepInCrystal30); + if(EdepInCrystal1 > 0.) h1_all->Fill(EdepInCrystal1, 1); + if(EdepInCrystal2 > 0.) h1_all->Fill(EdepInCrystal2, 2); + if(EdepInCrystal3 > 0.) h1_all->Fill(EdepInCrystal3, 3); + if(EdepInCrystal4 > 0.) h1_all->Fill(EdepInCrystal4, 4); + if(EdepInCrystal5 > 0.) h1_all->Fill(EdepInCrystal5, 5); + if(EdepInCrystal6 > 0.) h1_all->Fill(EdepInCrystal6, 6); + if(EdepInCrystal7 > 0.) h1_all->Fill(EdepInCrystal7, 7); + if(EdepInCrystal8 > 0.) h1_all->Fill(EdepInCrystal8, 8); + if(EdepInCrystal9 > 0.) h1_all->Fill(EdepInCrystal9, 9); + if(EdepInCrystal10 > 0.) h1_all->Fill(EdepInCrystal10, 10); + if(EdepInCrystal11 > 0.) h1_all->Fill(EdepInCrystal11, 11); + if(EdepInCrystal12 > 0.) h1_all->Fill(EdepInCrystal12, 12); + if(EdepInCrystal13 > 0.) h1_all->Fill(EdepInCrystal13, 13); + if(EdepInCrystal14 > 0.) h1_all->Fill(EdepInCrystal14, 14); + if(EdepInCrystal15 > 0.) h1_all->Fill(EdepInCrystal15, 15); + if(EdepInCrystal16 > 0.) h1_all->Fill(EdepInCrystal16, 16); + if(EdepInCrystal17 > 0.) h1_all->Fill(EdepInCrystal17, 17); + if(EdepInCrystal18 > 0.) h1_all->Fill(EdepInCrystal18, 18); + if(EdepInCrystal19 > 0.) h1_all->Fill(EdepInCrystal19, 19); + if(EdepInCrystal20 > 0.) h1_all->Fill(EdepInCrystal20, 20); + if(EdepInCrystal21 > 0.) h1_all->Fill(EdepInCrystal21, 21); + if(EdepInCrystal22 > 0.) h1_all->Fill(EdepInCrystal22, 22); + if(EdepInCrystal23 > 0.) h1_all->Fill(EdepInCrystal23, 23); + if(EdepInCrystal24 > 0.) h1_all->Fill(EdepInCrystal24, 24); + if(EdepInCrystal25 > 0.) h1_all->Fill(EdepInCrystal25, 25); + if(EdepInCrystal26 > 0.) h1_all->Fill(EdepInCrystal26, 26); + if(EdepInCrystal27 > 0.) h1_all->Fill(EdepInCrystal27, 27); + if(EdepInCrystal28 > 0.) h1_all->Fill(EdepInCrystal28, 28); + if(EdepInCrystal29 > 0.) h1_all->Fill(EdepInCrystal29, 29); + if(EdepInCrystal30 > 0.) h1_all->Fill(EdepInCrystal30, 30); + // use the random numb generator to sample from a gaussian with the parameters below if(EdepInCrystal1 > 0.) h2->Fill(r3->Gaus(EdepInCrystal1 , sqrt(cSmooth[0] + cSmooth[1]*EdepInCrystal1 + cSmooth[2]*EdepInCrystal1 *EdepInCrystal1 ))); if(EdepInCrystal2 > 0.) h2->Fill(r3->Gaus(EdepInCrystal2 , sqrt(cSmooth[0] + cSmooth[1]*EdepInCrystal2 + cSmooth[2]*EdepInCrystal2 *EdepInCrystal2 ))); diff --git a/data/analysis/README.md b/data/analysis/README.md new file mode 100644 index 0000000..f781ec5 --- /dev/null +++ b/data/analysis/README.md @@ -0,0 +1,27 @@ +# Response functions generated from the Oscar OCL model + +## General structure +- It is assumed that the response functions are processed from the root trees + to single histogram for all detectors combined, see `from_geant` folder. One + might for example use `export_hist.C` for that. The naming convention is + `grid_{incidentEnergy}keV_n{numberEventsSimulated}.root.m`. + Note that the files might be too large (too many bins) for mama to read propperly, + the postprocessing was done with OMpy. +- Postprocessing in `spec_to_matrix.py`: Gets efficiencies, folds response functions + and generates response matrices +- `figs/`: generated figures +- `response_export`: Exported response functions, several formats +- `efficiencies.csv`: efficiency results from `spec_to_matrix.py` +- The `compare[...]` scripts were used to compare the experimental + spectra to the geant4 simulations. The code is not *cleaned up*. + + +## Naming convention for response matrices: +- `_unnorm`: Plain response function as calculated from geant4 + (however, here and below, folded with energy response) +- `_norm_efficiency`: spectra for each incident energy are normalized to the + total efficiency (equals division by number of incident events) +- `_norm_1`: spectra for each incident energy are normalized to 1 +- `squarecut_50keV_10.000keV`: Square response matrix, with a lower (higher) cut + of 50 keV and 10 MeV, respectively. These are also small enough to be + able to load them into mama. diff --git a/data/analysis/compare.py b/data/analysis/compare.py new file mode 100644 index 0000000..f4a6f0d --- /dev/null +++ b/data/analysis/compare.py @@ -0,0 +1,869 @@ +import numpy as np +import matplotlib.pyplot as plt +from scipy.optimize import curve_fit +from scipy.ndimage import gaussian_filter1d +from scipy.interpolate import interp1d +from scipy.special import erfc +from tabulate import tabulate +from typing import Dict, Tuple, Optional, Callable + +import fnmatch +import os + +# from uncertainties import ufloat +from uncertainties import unumpy, ufloat + +from ompy import Matrix + + +class Recalibrate: + def __init__(self): + self._popt = self.get_coefficients() + pass + + def __call__(self, x): + return self.func(x, *self._popt) + + @staticmethod + def func(x, *p): + pol = np.poly1d(p[0:3]) + x_less = pol(x) + pol = np.poly1d(p[3:5]) + x_larger = pol(x) + return np.where(x<500, x_less, x_larger) + + @staticmethod + def get_coefficients(plot=False): + data = np.loadtxt("coords_Eu.txt") + x1 = data[1::2, 0] + y1 = data[0::2, 0] + if plot: + fig, ax = plt.subplots() + ax.plot(x1, y1, "o") + + data = np.loadtxt("coords_Co.txt") + x2 = data[1::2, 0] + y2 = data[0::2, 0] + if plot: + ax.plot(x2, y2, "o") + + x = np.append(x1, x2) + y = np.append(y1, y2) + + x = np.sort(x) + y = np.sort(y) + + res = np.polyfit(x, y, 1) + p = np.poly1d(res) + # print(p) + if plot: + ax.plot(x, p(x), "-", alpha=0.2) + + res = np.polyfit(x, y, 2) + p = np.poly1d(res) + # print(p) + if plot: + ax.plot(x, p(x), "--", alpha=0.2) + + popt, pcov = curve_fit(Recalibrate.func, x, y, p0=[-7e7, 1, -1, 1, 1]) + # def func(x, *p): + # pol = np.poly1d(p[0:3]) + # x_less = pol(x) + p[3]*np.sqrt(x) + # pol = np.poly1d(p[4:6]) + # x_larger = pol(x) + # return np.where(x<500, x_less, x_larger) + + # popt, pcov = curve_fit(func, x, y, p0=[ -7e7, 1, -1, 1e-1, 1, 1]) + + # print(popt) + if plot: + ax.plot(x, Recalibrate.func(x, *popt), "-", alpha=0.2) + + return popt + + +calibrate = Recalibrate() + + +class SpectrumComparison: + + def __init__(self): + pass + + def get_data(self, fname_sim, fname_exp, fname_bg, fwhm_pars, + measure_time_exp, measure_time_bg, idet=0, + recalibrate=True): + + sim_all = Matrix(path=fname_sim) + sim = np.zeros((len(sim_all.Eg), 2)) + sim[:, 0] = sim_all.Eg + sim[:, 0] *= 1000 # MeV to keV + if isinstance(idet, int): + sim[:, 1] = sim_all.values[idet, :] + else: + sim[:, 1] = sim_all.values[idet, :].sum(axis=0) + sim = do_smooth(sim, fwhm_pars) + self.fwhm_pars = fwhm_pars + + # exp_and_bg = np.loadtxt(fname_exp) + bg_all = Matrix(path=fname_bg) + bg = np.zeros((len(bg_all.Eg), 2)) + bg[:, 0] = bg_all.Eg + if isinstance(idet, int): + bg[:, 1] = bg_all.values[idet, :] + else: + bg[:, 1] = bg_all.values[idet, :].sum(axis=0) + + # subtract bgfrom experiment + exp_all = Matrix(path=fname_exp) + exp = np.zeros((len(exp_all.Eg), 2)) + exp[:, 0] = exp_all.Eg + xexp = exp_all.Eg + if isinstance(idet, int): + yexp = exp_all.values[idet, :] + else: + yexp = exp_all.values[idet, :].sum(axis=0) + + if recalibrate: + xexp = calibrate(xexp) + exp[:, 0] = xexp + + # exp = np.copy(exp_and_bg) + # xexp = exp[:, 0] + # yexp = exp[:, 1] + uyexp = unumpy.uarray(yexp, np.sqrt(yexp)) + uybg = unumpy.uarray(bg[:, 1], np.sqrt(bg[:, 1])) + + bg_ratio = measure_time_exp / measure_time_bg + uyexp = uyexp - uybg * bg_ratio + exp[:, 1] = unumpy.nominal_values(uyexp) + + # fig, ax = plt.subplots() + # ax.semilogy(xexp, exp[:, 1]/70) + # ax.plot(sim[:, 0], sim[:, 1]) + + self.sim = sim + self.exp = exp + self.xexp = xexp + self.xsim = sim[:, 0] + self.uyexp = uyexp + + def scale_sim_to_exp_fit(self, E1, E2): + exp = self.exp + sim = self.sim + (popt_exp, pcov_exp), xfit = do_fit(E1, E2, exp, + fwhm_pars=self.fwhm_pars) + (popt_sim, pcov_sim), xfit = do_fit(E1, E2, sim, + fwhm_pars=self.fwhm_pars) + + ratio = popt_exp / popt_sim + ratio = ratio[0] + # print(popt_exp[0]) + # alternative: scale the number of counts (above some threshold) + # print("ratio; scaling all counts", ratio, exp[100:, 1].sum()/sim[20:, 1].sum()/5) + # ratio = exp[100:, 1].sum()/sim[20:, 1].sum()/5 + + # print(sim) + # xsim = sim[:, 0] + ysim = sim[:, 1] + uysim_scaled = unumpy.uarray(ysim, np.sqrt(ysim)) + uysim_scaled *= ratio + + self.uysim_scaled = uysim_scaled + self.fsim_scaled = uinterp1D(self.xsim, uysim_scaled) + self.fexp = uinterp1D(self.xexp, self.uyexp) + + self.xfit = {"peak_fit": xfit} + self.scale_factor = ratio + self.popt_sim = popt_sim + self.popt_exp = popt_exp + + def scale_sim_to_exp_area(self, E1, E2): + exp = self.exp + sim = self.sim + + counts_exp = self.get_area(exp, E1, E2) + counts_sim = self.get_area(sim, E1, E2) + + iE1 = np.abs(sim[:, 0]-E1).argmin() + iE2 = np.abs(sim[:, 0]-E2).argmin() + counts_sim = sim[iE1:iE2+1, 1].sum() + + binwidth_sim = sim[1, 0] - sim[0, 0] + binwidth_exp = exp[1, 0] - exp[0, 0] + ratio = counts_exp / counts_sim * (binwidth_exp/binwidth_sim) + + # print(sim) + # xsim = sim[:, 0] + ysim = sim[:, 1] + uysim_scaled = unumpy.uarray(ysim, np.sqrt(ysim)) + uysim_scaled *= ratio + + self.uysim_scaled = uysim_scaled + self.fsim_scaled = uinterp1D(self.xsim, uysim_scaled) + self.fexp = uinterp1D(self.xexp, self.uyexp) + + self.scale_factor = ratio + self.xfit = {"area": [E1, E2]} + + @staticmethod + def get_area(arr, E1, E2): + iE1 = np.abs(arr[:, 0]-E1).argmin() + iE2 = np.abs(arr[:, 0]-E2).argmin() + return arr[iE1:iE2+1, 1].sum() + + + def scale_sim_to_exp_manual(self, ratio): + exp = self.exp + sim = self.sim + + # print(sim) + # xsim = sim[:, 0] + ysim = sim[:, 1] + uysim_scaled = unumpy.uarray(ysim, np.sqrt(ysim)) + uysim_scaled *= ratio + + self.uysim_scaled = uysim_scaled + self.fsim_scaled = uinterp1D(self.xsim, uysim_scaled) + self.fexp = uinterp1D(self.xexp, self.uyexp) + + self.scale_factor = ratio + self.xfit = {"ratio": ratio} + + def get_chi2(self): + xexp = self.xexp + fexp = self.fexp + fsim_scaled = self.fsim_scaled + + yexp = fexp(xexp) + # yexp[yexp == 0] = np.nan + + chi2 = (unumpy.nominal_values(yexp) - + unumpy.nominal_values(fsim_scaled(xexp)))**2 + chi2 /= unumpy.std_devs(yexp)**2 \ + + unumpy.std_devs(fsim_scaled(xexp))**2 + self.chi2 = chi2 + return chi2 + + def get_rel_diff(self, smooth_window_keV=20): + xexp = self.xexp + fexp = self.fexp + fsim_scaled = self.fsim_scaled + + yexp = fexp(xexp) + yexp[unumpy.nominal_values(yexp) == 0] = ufloat(np.nan, np.nan) + + rel_diff = (yexp - fsim_scaled(xexp)) / yexp * 100 + rel_diff_err = unumpy.std_devs(rel_diff) + rel_diff = unumpy.nominal_values(rel_diff) + + rel_diff_smooth = unumpy.nominal_values(yexp - fsim_scaled(xexp)) + window_len = smooth_window_keV / (xexp[1] - xexp[0]) + rel_diff_smooth = smooth(rel_diff_smooth, window_len=int(window_len-1)) + rel_diff_smooth /= unumpy.nominal_values(yexp) / 100 + + self.rel_diff = rel_diff + self.rel_diff_err = rel_diff_err + self.rel_diff_smooth = rel_diff_smooth + return rel_diff, rel_diff_smooth + + def plots(self, xmax=None, ymin=1.2e2, ymax=1e6, title=None, + plot_smoothed=True): + + xexp = self.xexp + + if hasattr(self, 'rel_diff'): + pass + else: + self.get_rel_diff() + + # fig, (ax, ax2) = plt.subplots(2, 1, sharex=True, + # gridspec_kw={'hspace': 0.0}) + fig = plt.figure(figsize=[6.4, 3.5], dpi=200) + ax = plt.subplot2grid((3, 1), (0, 0), rowspan=2) + ax2 = plt.subplot2grid((3, 1), (2, 0), sharex=ax) + fig.subplots_adjust(hspace=0, bottom=0.145) + plt.setp(ax.get_xticklabels(), visible=False) + + ax.semilogy(xexp, self.exp[:, 1], label="exp.") + ax.plot(self.xsim, unumpy.nominal_values(self.uysim_scaled), + label="sim., scaled") + + if "peak_fit" in self.xfit: + xfit = self.xfit["peak_fit"] + ax.plot(xfit, gaus_with_expbg(xfit, *self.popt_exp), + label="fit_to_exp") + ax.plot(xfit, + gaus_with_expbg(xfit, *self.popt_sim) * self.scale_factor, + label="fit_to_sim") + elif "area" in self.xfit: + E1, E2 = self.xfit["area"] + ax.axvspan(E1, E2, color="k", alpha=0.2, lw=0, + label="scaling area") + elif "ratio" in self.xfit: + pass + + # y0 = unumpy.nominal_values(self.rel_diff) + # yerr = unumpy.std_devs(self.rel_diff) + y0 = self.rel_diff + yerr = self.rel_diff_err + ax2.fill_between(xexp, y0 - yerr, y0 + yerr, color="k", alpha=0.2) + ax2.plot(xexp, y0, c="k", alpha=0.6) + if plot_smoothed: + ax2.plot(xexp, self.rel_diff_smooth, "C5--", alpha=0.8, + label="smoothed") + + ax2.axhline(y=0, color="r", lw=1) + # ax2.set_yscale('symlog') + # ax2.set_xlim(0, 1400) + + ax.set_yscale("log") + ax.set_xlabel("Energy [keV]") + ax.set_ylabel("counts / bin") + # ax.legend(loc="best") + ax.set_xlim(0, xmax) + ax.set_ylim(ymin, ymax) + + # ax2.legend(loc="best") + ax2.set_ylim(-18, 18) + ax2.set_xlabel("Energy [keV]") + ax2.set_ylabel(r"$\frac{\mathrm{exp.}-\mathrm{sim.}}" + "{\mathrm{exp.}} [\%]$") + + ax2.tick_params(labelbottom='on', top='on') + + if title is not None: + fig.suptitle(title, fontsize=12) + return fig, (ax, ax2) + + @staticmethod + def cut_array(xarr, yarr, Elow, Ehigh): + assert(Elow < Ehigh), "Elow has to be larger then Ehigh" + idx1 = np.abs(xarr - Elow).argmin() + idx2 = np.abs(xarr - Ehigh).argmin() + return yarr[idx1:idx2 + 1] + + def fom(self, Ecompare_low, Ecompare_high, printout=True): + if hasattr(self, 'rel_diff'): + pass + else: + self.get_rel_diff() + + self.get_chi2() + chi2 = np.copy(self.chi2) + rel_diff = np.copy(self.chi2) + rel_diff_smooth = np.copy(self.chi2) + chi2 = self.cut_array(self.xexp, chi2, Ecompare_low, Ecompare_high) + chi2 = chi2.sum() + rel_diff = self.cut_array(self.xexp, self.rel_diff, Ecompare_low, + Ecompare_high) + rel_diff = abs(rel_diff).mean() + rel_diff_smooth = self.cut_array(self.xexp, self.rel_diff_smooth, + Ecompare_low, Ecompare_high) + + rel_diff_smooth = abs(rel_diff_smooth).mean() + + if printout: + print("chi2 between {} and {} keV: {}". + format(Ecompare_low, Ecompare_high, chi2)) + print("Mean abs(rel_diff) between {} and {} keV: {:.2f}%" + .format(Ecompare_low, Ecompare_high, rel_diff)) + print("Mean smoothed abs(rel_diff) between {} and {} keV: {:.2f}%" + .format(Ecompare_low, Ecompare_high, rel_diff_smooth)) + + return chi2, rel_diff, rel_diff_smooth + + +def fFWHM(E, p): + # sgima in keV + return np.sqrt(p[0] + p[1] * E + p[2] * E**2) + + +def fsigma(E, p): + # sgima in keV + return fFWHM(E, p) / (2 * np.sqrt(2 * np.log(2))) + + +def do_smooth(data, pars): + data = np.copy(data) + Ebins = data[:, 0] + counts = data[:, 1] + nbins = len(data) + smoothed = np.zeros(nbins) + binwidth = Ebins[1] - Ebins[0] + for i, E in enumerate(Ebins): + if E > 0: + # FWHM=fFHWM(E, pars)/binwidth + # sigma = np.sqrt(pars[0] + pars[1]*E + pars[2]*E**2) + # sigma = FWHM/(2*np.sqrt(2*np.log(2))) + sigma = fsigma(E, pars) / binwidth + # smooth each bin in the spectrum: create new "spectrum", + # which is filled only at E + if counts[i] != 0: # smooth this! + counts_ = np.zeros(nbins) + counts_[i] = counts[i] + smoothed += gaussian_filter1d(counts_, sigma) + data[:, 1] = smoothed + return data + + +def exp_scaled(x, scale, decay_const): + return scale * np.exp(-decay_const * x) + + +def gauss(x, scale, loc, sigma): + return scale / (2. * np.pi * sigma) * np.exp(-(x - loc)**2 / (2 * sigma**2)) + + +def gaus_with_expbg(x, scale, loc, sigma, bg_scale, bg_decay): + line = gauss(x, scale, loc, sigma) + bg = exp_scaled(x, bg_scale, bg_decay) + return line + bg + + +def guess_p0(data, fitfun=gaus_with_expbg, fwhm_pars=None): + assert(fitfun == gaus_with_expbg), "Other Bg estimation not implemented" + + data = np.copy(data) + # gaus_with_expbg + # scale, loc, sigma, bg_scale, bg_decay + x1, y1 = data[0, :] + x2, y2 = data[-1, :] + if y1 < 1e-4: # workaround for exp data with too much bg + y1 = 1e-4 + if y2 < 1e-4: # workaround for exp data with too much bg + y2 = 1e-4 + bg_decay = (np.log(y2) - np.log(y1)) / (x2 - x1) + bg_decay = -bg_decay + bg_scale = y1 / np.exp(-bg_decay * x1) + + bg = exp_scaled(data[:, 0], bg_scale, bg_decay) + data[:, 1] -= bg + + scale = data[:, 1].max() + loc = data[data[:, 1] == scale].flatten()[0] + sigma = fsigma(loc, fwhm_pars) + + # peak cross section to area + scale *= (2. * np.pi * sigma) + return [scale, loc, sigma, bg_scale, bg_decay] + + +def do_fit(Elow, Ehigh, data, + p0=None, + fitfunc=gaus_with_expbg, bounds="positive", + fwhm_pars=None): + idx1 = np.abs(data[:, 0] - Elow).argmin() + idx2 = np.abs(data[:, 0] - Ehigh).argmin() + xdata = data[idx1:idx2 + 1, 0] + ydata = data[idx1:idx2 + 1, 1] + + if p0 is None: + p0 = guess_p0(np.c_[xdata, ydata], fwhm_pars=fwhm_pars) + # plt.plot(xdata, ydata) + # plt.plot(xdata, fitfunc(xdata, *p0)) + # plt.show() + + if bounds == "positive": + p_up = np.copy(p0) + p_up[:] = np.inf + bounds = (np.zeros(len(p0)), p_up) + bounds[0][1] = Elow + bounds[1][1] = Ehigh + else: + bounds = bounds + + # convert sigma to bin-width + # binwidth = data[1, 0] - data[0, 0] + # p0[2] /= binwidth + return curve_fit(fitfunc, xdata, ydata, p0=p0, bounds=bounds), xdata + + +def uinterp1D(x, uvar, fill_value="extrapolate"): + fnom = interp1d(x, unumpy.nominal_values(uvar), fill_value=fill_value) + fstd = interp1d(x, unumpy.std_devs(uvar), fill_value=fill_value) + + print() + def f(var): + std = fstd(var) + std[std < 0] = np.nan + # std = fstd(var)[fstd(var) < 0] = np.nan + return unumpy.uarray(fnom(var), std) + return f + + +def smooth(x, window_len, window='hanning', trim=True): + """smooth the data using a window with requested size. + + This method is based on the convolution of a scaled window with the signal. + The signal is prepared by introducing reflected copies of the signal + (with the window size) in both ends so that transient parts are minimized + in the begining and end part of the output signal. + + input: + x: the input signal + window_len: the dimension of the smoothing window; + should be an odd integer + : the type of window from 'flat', 'hanning', + 'hamming', 'bartlett', 'blackman' + flat window will produce a moving average smoothing. + trim: trim output so same length as imput + + output: + the smoothed signal + + example: + + t=linspace(-2,2,0.1) + x=sin(t)+randn(len(t))*0.1 + y=smooth(x) + + see also: + + numpy.hanning, numpy.hamming, numpy.bartlett, + numpy.blackman, numpy.convolve + scipy.signal.lfilter + + TODO: the window parameter could be the window itself if an array instead + of a string + NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y. + """ + + if x.ndim != 1: + raise ValueError("smooth only accepts 1 dimension arrays.") + + if x.size < window_len: + raise ValueError("Input vector needs to be bigger than window size.") + + if window_len < 3: + return x + + if window not in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']: + raise ValueError( + "Window is on of 'flat', 'hanning', 'hamming', 'bartlett'," + " 'blackman'") + + s = np.r_[x[window_len - 1:0:-1], x, x[-2:-window_len - 1:-1]] + # print(len(s)) + if window == 'flat': # moving average + w = np.ones(window_len, 'd') + else: + w = eval('np.' + window + '(window_len)') + + y = np.convolve(w / w.sum(), s, mode='valid') + if trim: + if int(window_len) % 2: # uneven + upper = -int(window_len / 2)-1 + else: + upper = -int(window_len / 2) + return y[int(window_len / 2 - 1):upper] + else: + return y + + +class PeakFitter: + def __init__(self, x: np.ndarray, y: np.ndarray, + yerr: Optional[np.ndarray] = None): + self.x = x.copy() + self.y = y.copy() + + if yerr is None: + self.yerr = None + else: + self.yerr = yerr.copy() + self.yerr[self.y == 0] = np.inf + + self._xfit = None # make it easier to retrieve current fit x values + + def fitGausBg(self, pre_region: Tuple[float, float], + post_region: Tuple[float, float]) -> Dict[str, float]: + """Fit a gamma peak with a gaussian on top of a const. bg + + Args: + pre_region: Energy region with linear spectra on the left + side of the peak + post_region: Energy region with linear spectra on the right + side of the peak + + Returns: + popt, cov: optimal parameters and covariance + """ + x = self.x + y = self.y + + pre_slice, peak_slice, post_slice, fit_slice = \ + self.get_fit_slices(pre_region, post_region) + + # pre_spec = y[pre_slice] + peak_spec = y[peak_slice] + # post_spec = y[post_slice] + + # Estimate the mean, std and constant + peak_mean = np.sum(x[peak_slice]*peak_spec)/np.sum(peak_spec) + peak_var = np.sum(peak_spec*x[peak_slice]**2)/np.sum(peak_spec) + peak_std = np.sqrt(peak_var - peak_mean**2) + + # We estimate the gauss constant from the height found at the mean + peak_const = np.max(peak_spec) / self.gaus(peak_mean, 1., peak_mean, + peak_std) + + # pol_estimate = np.polyfit(np.append(x[pre_slice], x[post_slice]), + # np.append(y[pre_slice], y[post_slice]), 0) + + # Calculate the curve fitting + initial_guess = [peak_const, peak_mean, peak_std, + y[post_slice].mean()] + yerr = self.yerr[fit_slice] if self.yerr is not None else None + popt, cov = curve_fit(self.gaus_bg, x[fit_slice], y[fit_slice], + p0=initial_guess, sigma=yerr) + return popt, cov + + def fitDoubleGausBg(self, pre_region: Tuple[float, float], + post_region: Tuple[float, float], + p0_E: Tuple[float, float], + p0_sigma=np.array([10., 10.]), + ) -> Dict[str, float]: + """Fit a two gaussians on top of a const. bg + + Args: + pre_region: Energy region with linear spectra on the left + side of the peak + post_region: Energy region with linear spectra on the right + side of the peak + p0_E: Initial guess for peaks + p0_sigma: Initial guess for sigma + + Returns: + popt, cov: optimal parameters and covariance + """ + x = self.x + y = self.y + + pre_slice, peak_slice, post_slice, fit_slice = \ + self.get_fit_slices(pre_region, post_region) + + peak_spec = y[peak_slice] + + # We estimate the constant from the height found at the mean + p0_const = np.max(peak_spec) / self.gaus(p0_E[0], 1., p0_E[0], + p0_sigma[0]) + p0_const2 = np.max(peak_spec) / self.gaus(p0_E[1], 1., p0_E[1], + p0_sigma[1]) + + # pol_estimate = np.polyfit(np.append(x[pre_slice], x[post_slice]), + # np.append(y[pre_slice], y[post_slice]), 0) + + # Calculate the curve fitting + initial_guess = [p0_const, p0_E[0], p0_sigma[0], + p0_const2, p0_E[1], p0_sigma[1], + y[post_slice].mean()] + yerr = self.yerr[fit_slice] if self.yerr is not None else None + popt, cov = curve_fit(self.doublegaus_bg, x[fit_slice], y[fit_slice], + p0=initial_guess, sigma=yerr) + return popt, cov + + def fitGausBgStep(self, pre_region: Tuple[float, float], + post_region: Tuple[float, float]) -> Dict[str, float]: + """Fit a gamma peak with a gaussian on top of a const. bg + step fct + + Args: + pre_region: Energy region with linear spectra on the left + side of the peak + post_region: Energy region with linear spectra on the right + side of the peak + """ + x = self.x + y = self.y + + pre_slice, peak_slice, post_slice, fit_slice = \ + self.get_fit_slices(pre_region, post_region) + + popt_gaus, _ = self.fitGausBg(pre_region, post_region) + # Calculate the curve fitting + p0_step_constant = 1000 + initial_guess = [*popt_gaus, p0_step_constant] + yerr = self.yerr[fit_slice] if self.yerr is not None else None + popt, cov = curve_fit(self.gaus_bg_step, x[fit_slice], y[fit_slice], + p0=initial_guess, sigma=yerr, + # bounds=[0, np.inf] + ) + return popt, cov + + def fitGausStep(self, pre_region: Tuple[float, float], + post_region: Tuple[float, float]) -> Dict[str, float]: + """Fit a gamma peak with a gaussian on top and step fct + + Args: + pre_region: Energy region with linear spectra on the left + side of the peak + post_region: Energy region with linear spectra on the right + side of the peak + """ + x = self.x + y = self.y + + pre_slice, peak_slice, post_slice, fit_slice = \ + self.get_fit_slices(pre_region, post_region) + + popt_gaus, _ = self.fitGausBg(pre_region, post_region) + # Calculate the curve fitting + p0_step_constant = 1000 + initial_guess = [*popt_gaus[:-1], p0_step_constant] + yerr = self.yerr[fit_slice] if self.yerr is not None else None + popt, cov = curve_fit(self.gaus_step, x[fit_slice], y[fit_slice], + p0=initial_guess, sigma=yerr, + # bounds=[0, np.inf] + ) + return popt, cov + + def fitDoubleGausBgStep(self, pre_region: Tuple[float, float], + post_region: Tuple[float, float], + p0_E: Tuple[float, float], + p0_sigma=np.array([10., 10.]), + ) -> Dict[str, float]: + """Fit two gaussians on top of a const. bg + step fct + + Args: + pre_region: Energy region with linear spectra on the left + side of the peak + post_region: Energy region with linear spectra on the right + side of the peak + p0_E: Initial guess for peaks + p0_sigma: Initial guess for sigma + """ + x = self.x + y = self.y + + pre_slice, peak_slice, post_slice, fit_slice = \ + self.get_fit_slices(pre_region, post_region) + + popt_gaus, _ = self.fitDoubleGausBg(pre_region, post_region, + p0_E, p0_sigma) + # Calculate the curve fitting + p0_step_constant = 0.1 + initial_guess = [*popt_gaus, p0_step_constant] + yerr = self.yerr[fit_slice] if self.yerr is not None else None + + bounds = [[0, np.inf] for i in initial_guess] + + popt, cov = curve_fit(self.doublegaus_bg_step, + x[fit_slice], y[fit_slice], + p0=initial_guess, sigma=yerr, + # bounds=[0, np.inf] + ) + return popt, cov + + def fitGausLinBgStep(self, pre_region: Tuple[float, float], + post_region: Tuple[float, float]) -> Dict[str, float]: + """Fit a gamma peak with a gaussian on top of a linear bg + step fct + + Args: + pre_region: Energy region with linear spectra on the left + side of the peak + post_region: Energy region with linear spectra on the right + side of the peak + """ + x = self.x + y = self.y + + pre_slice, peak_slice, post_slice, fit_slice = \ + self.get_fit_slices(pre_region, post_region) + + popt_gaus, _ = self.fitGausBgStep(pre_region, post_region) + # Calculate the curve fitting + pol_estimate = np.polyfit(np.append(x[pre_slice], x[post_slice]), + np.append(y[pre_slice], y[post_slice]), 1)[0] + + initial_guess = [*popt_gaus[:-1], pol_estimate, popt_gaus[-1]] + yerr = self.yerr[fit_slice] if self.yerr is not None else None + popt, cov = curve_fit(self.gaus_linbg_step, x[fit_slice], y[fit_slice], + p0=initial_guess, sigma=yerr, + # bounds=[0, np.inf] + ) + return popt, cov + + def get_fit_slices(self, pre_region: Tuple[float, float], + post_region: Tuple[float, float]): + """ Extract fit slices from tuple of Emin/Emac before / after peak + + Args: + pre_region: Energy region with linear spectra on the left + side of the peak + post_region: Energy region with linear spectra on the right + side of the peak + + Returns: + pre_slice, peak_slice, post_slice, fit_slice + """ + x = self.x + + def ix(x0): + return (np.abs(x-x0)).argmin() + + pre_slice = slice(ix(pre_region[0]), ix(pre_region[1])+1) + peak_slice = slice(ix(pre_region[1])+1, ix(post_region[0])) + post_slice = slice(ix(post_region[0]), ix(post_region[1])+1) + fit_slice = slice(ix(pre_region[0]), ix(post_region[1])+1) + + self._xfit = x[fit_slice] + return pre_slice, peak_slice, post_slice, fit_slice + + @staticmethod + def gaus(x, const, mean, std): + """ Gaussian """ + return const/(std*np.sqrt(2*np.pi)) * np.exp(-0.5*((x-mean)/std)**2) + + @staticmethod + def smoothstep(x, const, mean, std): + """ Smooth step """ + return const * erfc((x - mean) / (np.sqrt(2)*std)) + + @staticmethod + def gaus_bg(x, gaus_const, mean, std, offsett): + """ Gauss and constant background """ + return PeakFitter.gaus(x, gaus_const, mean, std) + offsett + + @staticmethod + def doublegaus_bg(x, gaus_const, mean, std, + gaus_const2, mean2, std2, offsett): + """ Gauss and constant background """ + return (PeakFitter.gaus(x, gaus_const, mean, std) + + PeakFitter.gaus(x, gaus_const2, mean2, std2) + offsett) + + @staticmethod + def gaus_linbg(x, gaus_const, mean, std, offsett, bgslope): + """ Gauss and linear background """ + return PeakFitter.gaus(x, gaus_const, mean, std) + x*bgslope + offsett + + @staticmethod + def gaus_bg_step(x, gaus_const, mean, std, offsett, step_const): + """ Gauss and constant background and step function """ + gaus_bg = PeakFitter.gaus_bg(x, gaus_const, mean, std, offsett) + step = PeakFitter.smoothstep(x, step_const, mean, std) + return gaus_bg + step + + @staticmethod + def gaus_step(x, gaus_const, mean, std, step_const): + """ Gauss and step function """ + gaus = PeakFitter.gaus(x, gaus_const, mean, std) + step = PeakFitter.smoothstep(x, step_const, mean, std) + return gaus + step + + @staticmethod + def doublegaus_bg_step(x, gaus_const, mean, std, gaus_const2, mean2, std2, + offsett, step_const): + """ Gauss and constant background and step function """ + doublegaus_bg = PeakFitter.doublegaus_bg(x, gaus_const, mean, std, + gaus_const2, mean2, std2, + offsett) + step = PeakFitter.smoothstep(x, step_const, mean, std) + return doublegaus_bg + step + + @staticmethod + def gaus_linbg_step(x, gaus_const, mean, std, offsett, bgslope, + step_const): + """ Gauss and linaer background and step function """ + gaus_linbg = PeakFitter.gaus_linbg(x, gaus_const, mean, std, offsett, + bgslope) + step = PeakFitter.smoothstep(x, step_const, mean, std) + return gaus_linbg + step diff --git a/data/analysis/compare_all_sources.py b/data/analysis/compare_all_sources.py new file mode 100644 index 0000000..449914a --- /dev/null +++ b/data/analysis/compare_all_sources.py @@ -0,0 +1,640 @@ +import numpy as np +import matplotlib.pyplot as plt +from tabulate import tabulate +import fnmatch +import os +import re +from tqdm import tqdm +from datetime import datetime +import pandas as pd + +from uncertainties import unumpy, ufloat, correlated_values +import sys + +import pandas as pd +# sys.path.append("../exp") + +from compare import SpectrumComparison, fFWHM, PeakFitter + +from scipy.optimize import curve_fit + +def save_coords_from_click(fig, fname="coords.txt"): + try: + coords = np.loadtxt(fname) + print("loading file (instead)") + # plt.show() + except OSError: + coords = [] + + def onclick(event): + """ depends on global `coords` """ + ix, iy = event.xdata, event.ydata + print(f'x = {ix:.0f}, y = {iy:.0f}') + coords.append((ix, iy)) + return coords + + cid = fig.canvas.mpl_connect('button_press_event', onclick) + plt.show() + fig.canvas.mpl_disconnect(cid) + + np.savetxt(fname, np.array(coords), + header="x y") + coords = np.array(coords) + coords = coords.astype(int) + return coords + + + +def get_fom(fnisotope, + fname_exp, fname_bg, fwhm_pars, + measure_time_exp, measure_time_bg, idets, + Efit_low, Efit_high, + do_plot=True, printout=False, + manual_ratio=None, fitpeaks=None, + xmax=1600, figtext=None): + """ get figure of merrit + + fnisotope: str, like "60Co", or "152Eu" + """ + fname_sims = [] + for file in os.listdir('mama_spectra/root_files'): + if fnmatch.fnmatch(file, f'grid_-5_*{fnisotope}*_all.m'): + print("machting:", file) + fname_sims.append(os.path.join("mama_spectra/root_files", file)) + fname_sims.sort() + grid_points = np.full_like(fname_sims, np.nan, dtype=float) + foms = np.zeros((len(fname_sims), 3)) + + for i, fname_sim in enumerate(tqdm(fname_sims)): + if i > 6: + break + if printout: + print("fitting: ", fname_sim) + + grid_point = int(re.search(r"grid_(-\d*)_", fname_sim).groups()[0]) + grid_points[i] = grid_point + + sc = SpectrumComparison() + sc.get_data(fname_sim, fname_exp, fname_bg, fwhm_pars, + measure_time_exp, measure_time_bg, idet=idets, + recalibrate=True) + + if manual_ratio is None: + sc.scale_sim_to_exp_area(Efit_low, Efit_high) + else: + sc.scale_sim_to_exp_manual(manual_ratio) + + fit_peaks(fitpeaks, fnisotope, sc, grid_point) + + # compare_plain_counts(sc, manual_ratio, Elimits) + # difference_total(sc) + + sc.get_chi2() + rel_diff, rel_diff_smooth = sc.get_rel_diff(smooth_window_keV=20) + + foms[i, :] = sc.fom(Ecompare_low, Ecompare_high, printout=False) + + if do_plot: + fig, (ax1, ax2) = sc.plots(title=fname_sim, xmax=xmax, + plot_smoothed=False) + + ax1.text(0.5, 0.9, figtext, horizontalalignment='center', + verticalalignment='center', transform=ax1.transAxes, + fontsize="large") + + fig.savefig(f"figs/{fnisotope}_{grid_point:.0f}_noleg.png") + + ax1.legend(loc="best") + # ax2.legend(loc="best") + + fig.savefig(f"figs/{fnisotope}_{grid_point:.0f}.png") + + add_peaks_plot(ax1, fnisotope, fitpeaks, grid_point) + + ax1.legend() + fig.savefig(f"figs/{fnisotope}_{grid_point:.0f}_fits.png") + # plt.show() + plt.close(fig) + + if printout: + ltab = [[name, *foms[i, :]] for i, name in enumerate(fname_sims)] + print("\nComparisons between {} and {} keV:" + .format(Ecompare_low, Ecompare_high)) + print(tabulate(ltab, + headers=["Name", "chi2", "rel_diff[%]", + "rel_diff_smoothed[%]"], + floatfmt=".2f")) + df = pd.DataFrame(foms, columns=[f"chi2_{fnisotope}", + f"rel_diff_{fnisotope}", + f"rel_diff_smoothed_{fnisotope}"]) + df["grid_point"] = grid_points + df = df[df.grid_point.notnull()] # workaround if going through whole loop + df = df.astype({"grid_point": 'int'}, copy=False) + + return df + + +def fit_peaks(fitpeaks, fnisotope, sc, grid_point): + """ Fit peaks by different functions + + Note: changes dictrionary fitpeaks inplace! + """ + fitpeak = fitpeaks[fnisotope] if fitpeaks is not None else {} + for peak, specs in fitpeak.items(): + pf = PeakFitter(sc.exp[:, 0], sc.exp[:, 1], + unumpy.std_devs(sc.uyexp)) + _fit_peaks(pf, specs, f"exp_{grid_point}", grid_point) + + pf = PeakFitter(sc.sim[:, 0], + unumpy.nominal_values(sc.uysim_scaled), + unumpy.std_devs(sc.uysim_scaled)) + _fit_peaks(pf, specs, f"sim_{grid_point}", grid_point) + + +def _fit_peaks(pf, specs, key, grid_point): + specs[key] = {} + if isinstance(specs["E"], float): + popt, pcov = pf.fitGausBgStep(specs["pre_region"], + specs["post_region"]) + print(popt) + print(pcov[0, 0]) + + specs[key]["area"] = ufloat(popt[0], np.sqrt(pcov[0, 0])) + + elif len(specs["E"]) == 2: + popt, pcov = pf.fitDoubleGausBgStep(specs["pre_region"], + specs["post_region"], + p0_E=specs["E"]) + specs[key]["area"] = [ufloat(popt[0], np.sqrt(pcov[0, 0])), + ufloat(popt[3], np.sqrt(pcov[3, 3]))] + specs[key]["popt"] = popt + specs[key]["xfit"] = pf._xfit + + +def add_peaks_plot(ax, fnisotope, fitpeaks, grid_point): + fitpeak = fitpeaks[fnisotope] if fitpeaks is not None else {} + for peak, specs in fitpeak.items(): + for item in [f"exp_{grid_point}", f"sim_{grid_point}"]: + x = specs[item]["xfit"] + popt = specs[item]["popt"] + if isinstance(specs["E"], float): + ffit = PeakFitter.gaus_bg_step + elif len(specs["E"]) == 2: + ffit = PeakFitter.doublegaus_bg_step + ax.plot(x, + ffit(x, *popt), "--", label=f"fit_{peak}_{item}") + + +def compare_plain_counts(sc, manual_ratio, Elimits): + """ compares counts within Elimits directly """ + ncounts_Elims = np.zeros((len(Elimits), 2)) + for j, (E1, E2) in enumerate(Elimits): + ncounts_Elims[j, 0] = sc.get_area(sc.exp, E1, E2) + sim_scaled = np.c_[sc.xsim, unumpy.nominal_values(sc.uysim_scaled)] + ncounts_Elims[j, 1] = sc.get_area(sim_scaled, E1, E2) + + fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True) + fig.suptitle(f"grid_point {grid_point}") + counts_exp = unumpy.uarray(ncounts_Elims[:, 0], + np.sqrt(ncounts_Elims[:, 0])) + counts_sim = unumpy.uarray(ncounts_Elims[:, 1]/manual_ratio, + np.sqrt(ncounts_Elims[:, 1])/manual_ratio) + counts_sim *= manual_ratio + counts_sim *= 5 # *5 due to binwidth + diff = (counts_exp-counts_sim)/counts_exp * 100 + ax1.errorbar(Elimits.mean(axis=1), unumpy.nominal_values(counts_exp), + yerr=unumpy.std_devs(counts_exp), fmt="o") + ax1.errorbar(Elimits.mean(axis=1), unumpy.nominal_values(counts_sim), + yerr=unumpy.std_devs(counts_sim), fmt="x") + ax2.errorbar(Elimits.mean(axis=1), unumpy.nominal_values(diff), + yerr=unumpy.std_devs(diff), fmt="x") + + ax2.axhline(0, c="k", ls="--") + ax2.set_ylim(-20, 20) + return fig, (ax1, ax2) + + +def difference_total(sc): + """ total differences between sim and exp in some area """ + sim_scaled = np.c_[sc.xsim, unumpy.nominal_values(sc.uysim_scaled)] + tot_exp = sc.get_area(sc.exp, 100, 1300) + tot_sim = sc.get_area(sim_scaled, 100, 1300) + tot_sim *= 5 # 5 due to binwitdth + tot_diff = (tot_exp-tot_sim)/tot_exp * 100 + print(f"Tot diff Elims1 [%]: {tot_diff:.2f}") + + tot_exp = sc.get_area(sc.exp, 50, 200) + tot_sim = sc.get_area(sim_scaled, 50, 200) + tot_sim *= 5 # 5 due to binwitdth + tot_diff = (tot_exp-tot_sim)/tot_exp * 100 + print(f"Tot diff Elims2 [%]: {tot_diff:.2f}") + + +def ph_efficiency(x, p): + """ also from gf3 """ + lneff_low = ln_pheff_per_region(x, p[0:3], Ec=100) + lneff_high = ln_pheff_per_region(x, p[3:6], Ec=1000) + inner = lneff_low**(-p[6]) + lneff_high**(-p[6]) + return np.exp(inner**(-1/p[6])) + + +def ph_efficiency_low(x, *p): + """ also from gf3 """ + lneff_low = ln_pheff_per_region(x, *p, Ec=1) + return np.exp(lneff_low) + +def ph_efficiency_low_unumpy(x, *p): + """ also from gf3 """ + lneff_low = ln_pheff_per_region(x, *p, Ec=1) + return unumpy.exp(lneff_low) + + +def ln_pheff_per_region(x, p0, p1, p2, Ec=1): + """ p0 + p1 * (x/Ec) + p2 * (x/Ec)**2 on log log plot""" + logeff = p0 + p1*np.log(x/Ec) + p2*np.log(x/Ec)**2 + return logeff + +def plot_photoeff(fitpeaks, grid_point, xmax=1500): + fmt_list = ["<", ">", "^", "o", "x", "+", "."] + fig, ax = plt.subplots(dpi=200) + fig.suptitle(f"grid_point {grid_point}") + + effs_exp = [] + effs_sim = [] + # for name, group in grouped: + # ax.errorbar(group["E"]+3, group["eff"], yerr=group["sigma_eff"], + # label=f"{name}_Frank_jitter", fmt="o", mfc="None", + # alpha=0.2) + + for i_isotope, (isotope, disotope) in enumerate(fitpeaks.items()): + try: + ndecays = files[isotope]["ndecays"] + except KeyError: + continue + if i_isotope < len(fmt_list): + fmt = fmt_list[i_isotope] + else: + fmt = fmt_list[len(fmt_list)%i_isotope] + + for i, (peak, values) in enumerate(disotope.items()): + try: + print("before") + val = values[f"exp_{grid_point}"] + print("after") + + print(peak, values["E"]) + label = f"{isotope}" if i == 0 else None + if isinstance(values["E"], float): + eff = val["area"]/values["intensity"] / ndecays / frac_dets + + disotope[peak]["eff_exp"] = eff + ax.errorbar(values["E"], eff.nominal_value, + yerr=eff.std_dev, + c="C0", label=label, fmt=fmt, mfc="None") + + elif len(values["E"]) == 2: + eff = [area/intensity / ndecays / frac_dets + for area, intensity in zip(val["area"], + values["intensity"])] + + disotope[peak]["eff_exp"] = eff + ax.errorbar(values["E"], + [p.nominal_value for p in eff], + yerr=[p.std_dev for p in eff], + c="C0", label=label, fmt=fmt, mfc="None") + + val = values[f"sim_{grid_point}"] + + # label = f"sim_{isotope}" if i == 0 else None + label=None + if isinstance(values["E"], float): + eff = (val["area"]/values["intensity"] / ndecays + / frac_dets) + + disotope[peak]["eff_sim"] = eff + ax.errorbar(values["E"], eff.nominal_value, + yerr=eff.std_dev, + c="C1", label=label, fmt=fmt, mfc="None") + elif len(values["E"]) == 2: + eff = [area/intensity / ndecays / frac_dets + for area, intensity in zip(val["area"], + values["intensity"])] + + disotope[peak]["sim_exp"] = eff + ax.errorbar(values["E"], + [p.nominal_value for p in eff], + yerr=[p.std_dev for p in eff], + c="C1", label=label, fmt=fmt, + mfc="None") + effs_exp.append([values["E"], disotope[peak]["eff_exp"]]) + effs_sim.append([values["E"], disotope[peak]["eff_sim"]]) + except KeyError: + print("passing this:", grid_point, peak, values.keys()) + + arr = np.array([[energy, y.nominal_value, y.std_dev] + for (energy, y) in effs_exp]) + arr = arr[arr[:, 0].argsort()] + x = arr[:, 0] + p0 = [60, 0.5, 2e-4] + popt, pcov = curve_fit(ph_efficiency_low, x, arr[:, 1], p0=p0, + sigma=arr[:, 2]) + print(f"popt exp gp{grid_point}:", popt) + print(f"pcov exp gp{grid_point}:\n", + tabulate(np.c_[[r"$p_0$", r"$p_1$", r"$p_2$"], pcov], + tablefmt="latex_booktabs", floatfmt=".1e", + headers=[r"$p_0$", r"$p_1$", r"$p_2$"])) + x = np.linspace(200, xmax, num=100) + + poptcorr = correlated_values(popt, pcov) + print(f"popt exp gp: {poptcorr}") + y = ph_efficiency_low_unumpy(x, *poptcorr) + nom = unumpy.nominal_values(y) + std = unumpy.std_devs(y) + ax.plot(x, nom, "C0-", alpha=0.7) + ax.fill_between(x, nom-std, nom+std, color="C0", alpha=0.15, + label=r"fit to $\epsilon_\mathrm{fe, exp}$") + + arr = np.array([[energy, y.nominal_value, y.std_dev] + for (energy, y) in effs_sim]) + arr = arr[arr[:, 0].argsort()] + x = arr[:, 0] + p0 = [60, 0.5, 2e-4] + popt, pcov = curve_fit(ph_efficiency_low, x, arr[:, 1], p0=p0, + sigma=arr[:, 2]) + print(f"popt sim gp{grid_point}:", popt) + print(f"pcov sim gp{grid_point}:\n", + tabulate(np.c_[[r"$p_0$", r"$p_1$", r"$p_2$"], pcov], + tablefmt="latex_booktabs", floatfmt=".1e", + headers=[r"$p_0$", r"$p_1$", r"$p_2$"])) + x = np.linspace(200, xmax, num=100) + + poptcorr = correlated_values(popt, pcov) + print(f"popt sim gp: {poptcorr}") + y = ph_efficiency_low_unumpy(x, *poptcorr) + nom = unumpy.nominal_values(y) + std = unumpy.std_devs(y) + ax.plot(x, nom, "C1-", alpha=0.7) + ax.fill_between(x, nom-std, nom+std, color="C1", alpha=0.15, + label=r"fit to $\epsilon_\mathrm{fe, sim}$") + + ax.legend() + ax.set_xlabel("Energy [keV]") + ax.set_ylabel(r"Full-energy peak efficiency $\epsilon_\mathrm{fe}$") + return fig, ax + +if __name__ == "__main__": + # fwhm_pars = np.array([73.2087, 0.50824, 9.62481e-05]) + # Frank June 2020 + fwhm_pars = np.array([60.6499, 0.458252, 0.000265552]) + + + # print(fFWHM(80, fwhm_pars)) + # sys.exit() + + files = { + "133Ba": {"t": 1049.48, "ndecays": 4.28917e+07}, + "60Co": {"t": 1123.57, "ndecays": 2.74162e+07}, + "152Eu": {"t": 1065.10, "ndecays": 3.45591e+07}, + "137Cs": {"t": 676.307, "ndecays": 2.77619e+07}, + "241Am": {"t": 969.774}, + "Bg": {"t": 1432.19, }} + + for key, file in files.items(): + try: + file["ndecays"] = ufloat(file["ndecays"], 0.03*file["ndecays"]) + except KeyError: + continue + + ndecays_sim = 2e5 + diff_binwidth = 5 + + # dets with better resolution (internat peak structure vissible) + idets = [1, 2, 6, 8, 10, 11, 12, + 14, 15, 16, 17, 18, 19, 20, 21, 22, + 24, 25, 27, 29] + frac_dets = len(idets)/30 + + # # 60Co + Elimits = np.array([ + [1110, 1230], + [1270, 1350] # very short due to Bg subraction problem + ]) + + fitpeaks = {name: {} for name in files.keys()} + peak_energy = [1173.228, 1332.492] + peak_intensity = [.9985, .999826] + peak = {"E": peak_energy[0], "intensity": peak_intensity[0], + "pre_region": [1075, 1100], "post_region": [1240, 1270]} + fitpeaks["60Co"][int(peak["E"])] = peak + peak = {"E": peak_energy[1], "intensity": peak_intensity[1], + "pre_region": [1235, 1270], "post_region": [1400, 1470]} + fitpeaks["60Co"][int(peak["E"])] = peak + + fname_exp = "exp/60Co.txt" + fname_bg = "exp/Bg.txt" + measure_time_exp = files["60Co"]["t"] # //seconds + measure_time_bg = files["Bg"]["t"] # //seconds + Efit_low = 1173 - 50 - 50 + Efit_high = 1173 + 50 + 50 + Ecompare_low = 50 + Ecompare_high = 2000 + + df = get_fom("60Co", + fname_exp, fname_bg, fwhm_pars, + measure_time_exp, measure_time_bg, idets, + Efit_low, Efit_high, + do_plot=True, printout=True, + manual_ratio=files["60Co"]["ndecays"].nominal_value/ndecays_sim/diff_binwidth, + fitpeaks=fitpeaks, + xmax=1500, + figtext=r"$^{60}$Co") + + # df_all = df + + # 152Eu + fname_exp = "exp/152Eu.txt" + fname_bg = "exp/Bg.txt" + measure_time_exp = files["152Eu"]["t"] # //seconds + measure_time_bg = files["Bg"]["t"] # //seconds + Efit_low = 720 + Efit_high = 830 + Ecompare_low = 50 + Ecompare_high = 1000 + + # for "direct" comparison of counts (no peakfitting...) + Elimits = np.array([ + [105, 130], + [220, 260], + [315, 360], + [390, 420], + [420, 460], + [660, 700], + [740, 810], + [830, 900], + [910, 1000], + [1030, 1160], + [1170, 1240], + [1260, 1342]]) + + # Photo-efficiency fits + # Energy, intensity, pre_Elow, pre_Ehigh, post_Elow, post_Ehigh + peaks = np.array( + [[244.6974, 0.0755, 200 , 220 , 260 , 280 ], # noqa + [344.2785, 0.2659, 300 , 315 , 378 , 391 ], # noqa + [778.9045, 0.1293, 720 , 740 , 805 , 830 ], # noqa + [867.38 , 0.0423, 810 , 830 , 900 , 930 ], # noqa + [964.057 , 0.1451, 900 , 920 , 995 , 1005], # noqa + [1408.013, 0.2087, 1340, 1350, 1550, 1650] # bad fit + ]) # noqa + + def peak_dict_from_arr(arr): + peak = {"E": arr[0], "intensity": arr[1], + "pre_region": [arr[2], arr[3]], + "post_region": [arr[4], arr[5]]} + return peak + + for i in range(len(peaks)): + peak = peak_dict_from_arr(peaks[i, :]) + fitpeaks["152Eu"][int(peak["E"])] = peak + + def doublepeak_dict_from_arr(arr): + peak = {"E": arr[:, 0], "intensity": arr[:, 1], + "pre_region": [arr[0, 2], arr[0, 3]], + "post_region": [arr[0, 4], arr[0, 5]]} + return peak + + # # Energy, intensity, pre_Elow, pre_Ehigh, post_Elow, post_Ehigh + # dobulepeaks = np.array( + # [[411.1165, .02237, 380 , 390 , 460 , 470 ], # noqa + # [443.9606, .02827, np.nan, np.nan, np.nan, np.nan], # noqa + # ]) # noqa + # fitpeaks["152Eu"]["411_444_double"] = doublepeak_dict_from_arr(dobulepeaks) + + df = get_fom("152Eu", + fname_exp, fname_bg, fwhm_pars, + measure_time_exp, measure_time_bg, idets, + Efit_low, Efit_high, + do_plot=True, printout=True, + manual_ratio=files["152Eu"]["ndecays"].nominal_value/ndecays_sim/diff_binwidth, + fitpeaks=fitpeaks, + xmax=1600, + figtext=r"$^{152}$Eu") + # # df_all = df_all.merge(df, on="grid_point", how="outer") + + # 133Ba + fname_exp = "exp/133Ba.txt" + fname_bg = "exp/Bg.txt" + measure_time_exp = files["133Ba"]["t"] # //seconds + measure_time_bg = files["Bg"]["t"] # //seconds + Efit_low = 285 + Efit_high = 320 + # Efit_low = 330 + # Efit_high = 400 + Ecompare_low = 50 + Ecompare_high = 300 + + # # Energy, intensity, pre_Elow, pre_Ehigh, post_Elow, post_Ehigh + # dobulepeaks = np.array( + # [[276.3989, .0716, 245 , 255 , 320 , 330], # noqa + # [302.8508, .1834, np.nan, np.nan, np.nan, np.nan], # noqa + # ]) # noqa + # fitpeaks["133Ba"]["276_302_double"] = doublepeak_dict_from_arr(dobulepeaks) + + # # Energy, intensity, pre_Elow, pre_Ehigh, post_Elow, post_Ehigh + # dobulepeaks = np.array( + # [[356.0129, .6205, 320 , 330 , 410 , 425], # noqa + # [383.8485, .0894, np.nan, np.nan, np.nan, np.nan], # noqa + # ]) # noqa + # fitpeaks["133Ba"]["276_302_double"] = doublepeak_dict_from_arr(dobulepeaks) + + + df = get_fom("133Ba", + fname_exp, fname_bg, fwhm_pars, + measure_time_exp, measure_time_bg, idets, + Efit_low, Efit_high, + do_plot=True, printout=False, + manual_ratio=files["133Ba"]["ndecays"]/ndecays_sim/diff_binwidth, + xmax=460, + figtext=r"$^{133}$Ba" + #fitpeaks=fitpeaks + ) + # # df_all = df_all.merge(df, on="grid_point", how="outer") + + # # 137Cs + fname_exp = "exp/137Cs.txt" + fname_bg = "exp/Bg.txt" + measure_time_exp = files["137Cs"]["t"] # //seconds + measure_time_bg = files["Bg"]["t"] # //seconds + Efit_low = 600 + Efit_high = 700 + Ecompare_low = 50 + Ecompare_high = 300 + + peak = {"E": 661.657, "intensity": .851, + "pre_region": [580, 615], "post_region": [705, 740]} + fitpeaks["137Cs"][int(peak["E"])] = peak + + df = get_fom("137Cs", + fname_exp, fname_bg, fwhm_pars, + measure_time_exp, measure_time_bg, idets, + Efit_low, Efit_high, + do_plot=True, printout=True, + manual_ratio=files["137Cs"]["ndecays"].nominal_value/ndecays_sim/diff_binwidth, + fitpeaks=fitpeaks, + xmax=800, + figtext=r"$^{137}$Cs") + + df = pd.read_csv('exp/labr_eff_fit_export.dat') + new_name = df.columns[0].split("# ")[1] + df = df.rename(columns={df.columns[0]: new_name}) + grouped = df.groupby("Isotope") + + for grid_point in [-5]: + fig, ax = plot_photoeff(fitpeaks, grid_point) + fig.savefig(f"figs/photoeff_{grid_point}.png") + + for grid_point in [-5]: + fig, ax = plot_photoeff(fitpeaks, grid_point, xmax=15e3) + + # photopeak eff from geant (direct, not fitted) + df = pd.read_csv("response/efficiencies.csv") + df.plot(ax=ax, x="E", y="fe", label="geant4", color="k", + linestyle="--") + + # Fits from Wanja, 6 Aug 2020' + # 12C + eff = {"E": [1778.969, 2838.29, 3200.7], + "eff": [0.13462, 0.11538, 0.1053], + "dE": [0.011, 0.15, 0.5], + "deff": [0.13462*0.135, 0.11538*0.135, 0.1053*0.135]} + ax.errorbar(x=eff["E"], xerr=eff["dE"], y=eff["eff"], yerr=eff["deff"], + label="28Si, Wanja", fmt="C3o") + ax.errorbar(x=4439.82, xerr=0.21, y=0.08262, yerr=0.08262*0.135, + label="12C, Wanja", fmt="C4o") + + # Franks fits + popt = [-0.29138, -0.0283581, -0.0271264] + pcov = [[0.0135246, -0.00290798, 0.000121002, ], + [-0.00290798, 0.000922994, -7.2436e-05, ], + [0.000121002, -7.2436e-05, 8.42127e-06, ]] + poptcorr = correlated_values(popt, pcov) + x = np.linspace(200, 15e3, num=100) + y = ph_efficiency_low_unumpy(x, *poptcorr) + nom = unumpy.nominal_values(y) + std = unumpy.std_devs(y) + ax.plot(x, nom, "C4-", alpha=0.7) + ax.fill_between(x, nom-std, nom+std, color="C4", alpha=0.15, + label=r"Frank's fit to $\epsilon_\mathrm{fe, exp}$") + + ax.legend() + fig.savefig(f"figs/photoeff_{grid_point}_wtih_geant.png") + ax.set_xlim(None, 5e3) + ax.set_ylim(None, 0.32) + fig.savefig(f"figs/photoeff_{grid_point}_wtih_geant_zoom.png") + + plt.show() + # # df_all = df_all.merge(df, on="grid_point", how="outer") + + # now = datetime.now() + # df_all.to_pickle(f'chi2_df_{now.strftime("%y%d%m_%H%M%S")}.pickle') + # print(df_all[:8]) diff --git a/data/analysis/compare_inbeam.py b/data/analysis/compare_inbeam.py new file mode 100644 index 0000000..c02c863 --- /dev/null +++ b/data/analysis/compare_inbeam.py @@ -0,0 +1,353 @@ +import numpy as np +import matplotlib.pyplot as plt +from tabulate import tabulate +import fnmatch +import os +import re +from tqdm import tqdm +from datetime import datetime + +import sys +from uncertainties import unumpy + +import pandas as pd +from ompy import Matrix, Vector +# sys.path.append("../exp") + +from compare import SpectrumComparison, do_smooth + + +class SpectrumComparisonInBeam(SpectrumComparison): + def __init__(self): + pass + + def get_data(self, exp, bg, bg_ratio, sim, fwhm_pars): + self.fwhm_pars = fwhm_pars + + xexp = exp[:, 0] + yexp = exp[:, 1] + uyexp = unumpy.uarray(yexp, np.sqrt(yexp)) + uybg = unumpy.uarray(bg[:, 1], np.sqrt(bg[:, 1])) + + uyexp = uyexp - uybg * bg_ratio + exp[:, 1] = unumpy.nominal_values(uyexp) + + sim = do_smooth(sim, fwhm_pars) + + self.sim = sim + self.exp = exp + self.xexp = exp[:, 0] + self.xsim = sim[:, 0] + self.uyexp = uyexp + + def plot_current(self): + fig, ax = plt.subplots() + ax.semilogy(self.exp[:, 0], self.exp[:, 1]) + ax.plot(self.sim[:, 0], self.sim[:, 1]) + plt.show() + + +def get_fom(fnamesim, + exp, bg, fwhm_pars, + bg_ratio, + Efit_low, Efit_high, + do_plot=True, printout=False, + manual_ratio=None, xmax_plot=1400, + ymin=1.2e3, ymax=1e7): + """ get figure of merrit + + fnamesim: str, like "60Co", or "152Eu", "1777keV" + """ + fname_sims = [] + for file in os.listdir('mama_spectra/root_files'): + if fnmatch.fnmatch(file, f'inbeam_grid_*{fnamesim}*.m'): + if "_all.m" in file: + continue + fname_sims.append(os.path.join("mama_spectra/root_files", file)) + fname_sims.sort() + grid_points = np.full_like(fname_sims, np.nan, dtype=float) + foms = np.zeros((len(fname_sims), 3)) + + for i, fname_sim in enumerate(tqdm(fname_sims)): + # if i > 2: + # break + if printout: + print("fitting: ", fname_sim) + sc = SpectrumComparisonInBeam() + + sim = Vector(path=fname_sim) + sim = np.c_[sim.E*1000, sim.values] + + sc.get_data(exp.copy(), bg.copy(), bg_ratio, sim, fwhm_pars) + + if manual_ratio is None: + sc.scale_sim_to_exp_area(Efit_low, Efit_high) + else: + sc.scale_sim_to_exp_manual(manual_ratio) + + # sc.plot_current() + chi2 = sc.get_chi2() + rel_diff, rel_diff_smooth = sc.get_rel_diff(smooth_window_keV=20) + + foms[i, :] = sc.fom(Ecompare_low, Ecompare_high, printout=False) + # print(sc.fom(Ecompare_low, Ecompare_high)) + grid_points[i] = int(re.search(r"grid_(-*\d*)_", fname_sim)[1]) + + if do_plot: + fig, (ax1, ax2) = sc.plots(title=fname_sim, xmax=xmax_plot, + plot_smoothed=False) + ax1.set_ylim(ymin, ymax) + fig.savefig(f"figs_inbeam/{fnamesim}_{grid_points[i]:.0f}_noleg.png") + ax1.legend() + fig.savefig(f"figs_inbeam/{fnamesim}_{grid_points[i]:.0f}.png") + # plt.show() + plt.close(fig) + + +def arr_from_py(fname, Emin, Emax, remove_negative=False): + mat = Matrix(path=fname) + # mat.plot(vmin=1, vmax=1e4) + if remove_negative: + mat.remove_negative() + values, E = mat.projection("Eg", Emin, Emax) + + # mat1 = mat.copy() + # mat2 = mat.copy() + # mat1.cut("Eg", 550, 550) + # mat2.cut("Eg", 1778-10, 1778+10) + + # mat1.rebin("Ex", factor=5) + # mat2.rebin("Ex", factor=5) + # fig, ax = plt.subplots() + # # mat1 /= mat2 + # values1, E = mat1.projection("Ex", Emin=1000, Emax=2000) + # values2, E = mat2.projection("Ex", Emin=1000, Emax=2000) + # ax.plot(E, values1/values2) + # fig, ax = plt.subplots() + + # ax.plot(E, values1) + # ax.plot(E, values2) + # plt.show() + + return np.c_[E, values] + + +if __name__ == "__main__": + # fwhm_pars = np.array([73.2087, 0.50824, 9.62481e-05]) + # Frank June 2020 + fwhm_pars = np.array([60.6499, 0.458252, 0.000265552]) + + + # # 144Nd -- Ex = 696 KeV + # fname_exp = "exp/inbeam/144nd/alfna.m" + # fname_bg = "exp/inbeam/144nd/alfna_bg.m" + # Efit_low = 650 + # Efit_high = 720 + # Ecompare_low = 50 + # Ecompare_high = 1000 + # bg_ratio = 0 + + # exp = arr_from_py(fname_exp, 640-40, 640+40, remove_negative=True) + # bg = arr_from_py(fname_bg, 640-40, 640+40, remove_negative=True) + # # fig, ax = plt.subplots() + # # ax.plot(exp[:, 0], exp[:, 1], label="exp") + # # # ax.plot(bg[:, 0], bg[:, 1], label="bg") + + # # # exp = arr_from_py(fname_exp, 640-20, 640+20) + # # # ax.plot(exp[:, 0], exp[:, 1], label="exp2") + + # # # exp = arr_from_py(fname_exp, 640-10, 640+10) + # # # ax.plot(exp[:, 0], exp[:, 1], label="exp3") + + # # ax.set_yscale("log") + # # ax.legend() + # # plt.show() + + # get_fom("696", + # exp, bg, fwhm_pars, bg_ratio, + # Efit_low, Efit_high, + # do_plot=True, printout=True) + + + # 28Si -- Ex = 1.7 KeV + fname_exp = "exp/inbeam/si-run1/28si/alfna.m" + fname_bg = "exp/inbeam/si-run1/28si/alfna_bg.m" + Efit_low = 1779-80 + Efit_high = 1779+35 + Ecompare_low = 50 + Ecompare_high = 1000 + bg_ratio = 0 + + exp = arr_from_py(fname_exp, 1600-40, 1600+40, remove_negative=True) + bg = arr_from_py(fname_bg, 1600-40, 1600+40, remove_negative=True) + # exp = arr_from_py(fname_exp, 1516, 1575, remove_negative=True) + # bg = arr_from_py(fname_bg, 1516, 1575, remove_negative=True) + # exp = arr_from_py(fname_exp, 1575, 1625, remove_negative=True) + # bg = arr_from_py(fname_bg, 1575, 1625, remove_negative=True) + + # small recalibration + exp[:, 0] -= 2 + bg[:, 0] -= 2 + + # test + # bg[0, 1] /= 0 + + # fig, ax = plt.subplots() + # ax.plot(exp[:, 0], exp[:, 1], label="exp") + # ax.plot(bg[:, 0], bg[:, 1], label="bg") + + # fig, ax = plt.subplots() + # ax.plot(exp[:, 0], exp[:, 1], label="exp") + # ax.plot(bg[:, 0], bg[:, 1], label="bg") + + # exp = arr_from_py(fname_exp, 1700-100, 1700+100, remove_negative=True) + # ax.plot(exp[:, 0], exp[:, 1], label="exp2") + + # exp = arr_from_py(fname_exp, 1700-150, 1700+150, remove_negative=True) + # ax.plot(exp[:, 0], exp[:, 1], label="exp3") + + # ax.set_yscale("log") + # ax.legend() + # plt.show() + + get_fom("1779", + exp, bg, fwhm_pars, bg_ratio, + Efit_low, Efit_high, + do_plot=True, printout=True, xmax_plot=2200, + ymin=1.2e2, ymax=1e5) + + """ + # 28Si -- Ex = ~4.6 KeV + fname_exp = "exp/inbeam/si-run1/28si/alfna.m" + fname_bg = "exp/inbeam/si-run1/28si/alfna_bg.m" + Efit_low = 2750 + Efit_high = 2870 + Ecompare_low = 50 + Ecompare_high = 1000 + bg_ratio = 0 + + exp = arr_from_py(fname_exp, 4300-40, 4300+40, remove_negative=True) + bg = arr_from_py(fname_bg, 4300-40, 4300+40, remove_negative=True) + + # exp = arr_from_py(fname_exp, 3800, 4200, remove_negative=True) + # bg = arr_from_py(fname_bg, 3800, 4200, remove_negative=True) + + # small recalibration + exp[:, 0] -= 2 + bg[:, 0] -= 2 + + fig, ax = plt.subplots() + ax.plot(exp[:, 0], exp[:, 1], label="exp") + ax.plot(bg[:, 0], bg[:, 1], label="bg") + + # exp = arr_from_py(fname_exp, 4300-100, 4300+100, remove_negative=True) + # ax.plot(exp[:, 0], exp[:, 1], label="exp2") + + # exp = arr_from_py(fname_exp, 4300-150, 4300+150, remove_negative=True) + # ax.plot(exp[:, 0], exp[:, 1], label="exp3") + + # ax.set_yscale("log") + # ax.legend() + + from scipy.stats import norm + from scipy.optimize import curve_fit + + def fitnorm(x, c, loc, scale, offset): + return c * norm.pdf(x, loc, scale) + offset + + iE1 = np.abs(exp[:, 0] - 1720).argmin() + iE2 = np.abs(exp[:, 0] - 1890).argmin() + xfit = exp[iE1:iE2+1, 0] + yfit = exp[iE1:iE2+1, 1] + p0 = (20*np.max(yfit), 1778, 20, 400) + bounds = (0, [np.inf, np.inf, np.inf, 410]) + + popt, pcov = curve_fit(fitnorm, xfit, yfit, p0=p0, + bounds=bounds) + ax.plot(xfit, fitnorm(xfit, *popt)) + + iE1 = np.abs(exp[:, 0] - (2837-60)).argmin() + iE2 = np.abs(exp[:, 0] - (2837+140)).argmin() + xfit1 = exp[iE1:iE2+1, 0] + yfit1 = exp[iE1:iE2+1, 1] + p0 = (20*np.max(yfit1), 2837, 20, 5) + + bounds = (0, [np.inf, np.inf, np.inf, 10]) + + popt1, pcov1 = curve_fit(fitnorm, xfit1, yfit1, p0=p0, bounds=bounds) + ax.plot(xfit1, fitnorm(xfit1, *popt1)) + ax.set_yscale("log") + + fig, ax = plt.subplots() + ax.plot(xfit, yfit-fitnorm(xfit, *popt)) + + fig, ax = plt.subplots() + ax.plot(xfit1, yfit1-fitnorm(xfit1, *popt1)) + + plt.show() + + get_fom("4617", + exp, bg, fwhm_pars, bg_ratio, + Efit_low, Efit_high, + do_plot=True, printout=True, xmax_plot=4800) + """ + + # # 12C -- Ex = 4.4 MeV + fname_exp = "exp/inbeam/12C/h_bg_subtracted.m" + # fname_bg = "exp/inbeam/si-run1/28si/alfna_bg.m" + Efit_low = 4440-80 + Efit_high = 4440+35 + Ecompare_low = 50 + Ecompare_high = 1000 + bg_ratio = 0 + + # especially 21, 26, 30 have "double peaks" + # slight impresicion: at the moment not removed from simulations + # but spectra there are almost the same for each detector + + good_dets = np.arange(30) + + # bad_dets = [3, 5, 7, 8, 18, 21, 26, 30] + # bad_dets.sort() + # bad_dets = np.array(bad_dets) + # good_dets = \ + # good_dets[bad_dets[np.searchsorted(bad_dets, good_dets)] != good_dets] + + exp_mat = Matrix(path=fname_exp) + # exp_mat.plot() + + exp = np.zeros((len(exp_mat.Eg), 2)) + exp[:, 0] = exp_mat.Eg + exp[:, 1] = exp_mat.values[good_dets, :].sum(axis=0) + exp[:, 1][exp[:, 1] < 0] = 0 # remove negative counts for comparison + + exp_all = np.zeros((len(exp_mat.Eg), 2)) + exp_all[:, 0] = exp_mat.Eg + exp_all[:, 1] = exp_mat.values[:, :].sum(axis=0) + + # dummy bg + bg = exp.copy() + + fig, ax = plt.subplots() + ax.plot(exp[:, 0], exp[:, 1], label="exp") + ax.plot(exp_all[:, 0], exp_all[:, 1], label="all detectors") + # ax.plot(bg[:, 0], bg[:, 1], label="bg") + + # fig, ax = plt.subplots() + # ax.plot(exp[:, 0], exp[:, 1], label="exp") + # ax.plot(bg[:, 0], bg[:, 1], label="bg") + + # exp = arr_from_py(fname_exp, 1700-100, 1700+100, remove_negative=True) + # ax.plot(exp[:, 0], exp[:, 1], label="exp2") + + # exp = arr_from_py(fname_exp, 1700-150, 1700+150, remove_negative=True) + # ax.plot(exp[:, 0], exp[:, 1], label="exp3") + + # ax.set_yscale("log") + # ax.legend() + # plt.show() + + get_fom("4440", + exp, bg, fwhm_pars, bg_ratio, + Efit_low, Efit_high, + do_plot=True, printout=True, xmax_plot=5000) diff --git a/data/analysis/spec_to_matrix.py b/data/analysis/spec_to_matrix.py new file mode 100644 index 0000000..1d33985 --- /dev/null +++ b/data/analysis/spec_to_matrix.py @@ -0,0 +1,246 @@ +import matplotlib.pyplot as plt +from pathlib import Path +import numpy as np +import pandas as pd +from tqdm import tqdm + +import ompy as om + + +def fFWHM(E, p): + # sgima in keV + return np.sqrt(p[0] + p[1] * E + p[2] * E**2) + + +def get_area(vec, E1, E2): + return vec[vec.index(E1):vec.index(E2)+1].sum() + + +def get_counts_bgsubtract(vec, energy, extend=10, ext_bg=None): + """ Get background subtracted nr counts at energy [+- extend] + + Args: + vec: vector to process + energy: photopeak energy + extend: get area for energy+-extend, as + counts are often placed in some bins around the "sharp" energy + ext_bg: extend where bg should be taken into account + """ + + if ext_bg is None: + ext_bg = extend+5 + + bg_below = get_area(vec, energy-ext_bg, energy-ext_bg+extend) + bg_above = get_area(vec, energy+extend, energy+ext_bg) + bg_avg = (bg_below + bg_above) / (2*(ext_bg-extend)) + + peak_counts = get_area(vec, energy-extend, energy+extend) + peak_wo_bg = peak_counts - bg_avg*extend + + return peak_wo_bg + + +def get_efficiencies(vec, energy): + """ Get total, photopeak (...) efficiencies + + Args: + vec: vector to analyze, should not be folded with energy resolution yet + energy: Energy of the FE peak + + """ + df = pd.Series() + df["E"] = energy + df["tot>50keV"] = vec.values[vec.index(50):].sum() / nevent + df["tot>200keV"] = vec.values[vec.index(200):].sum() / nevent + df["tot>500keV"] = vec.values[vec.index(500):].sum() / nevent + + eff_photo = get_counts_bgsubtract(vec, energy, extend=10) / nevent + df["fe"] = eff_photo + + if energy > 2*511: + eff_ = get_counts_bgsubtract(vec, energy-511, extend=10) / nevent + df["se"] = eff_ + + eff_ = get_counts_bgsubtract(vec, energy-511*2, extend=10) / nevent + df["de"] = eff_ + + eff_ = get_counts_bgsubtract(vec, 511, extend=5) / nevent + df["511"] = eff_ + else: + df["se"] = 0 + df["de"] = 0 + df["511"] = 0 + return df + + +def efficiency_plots(efficiencies: pd.DataFrame, energy_grid): + fig, ax = plt.subplots() + # eff.plot(x="E", ax=ax) + ax.plot(energy_grid, eff["tot>50keV"], "C0-", label="tot > 50 keV") + ax.plot(energy_grid, eff["tot>200keV"], "C0--", label="tot > 200 keV") + ax.plot(energy_grid, eff["tot>500keV"], "C0-.", label="tot > 500 keV") + ax.plot(energy_grid, eff["fe"], "C1", label="full energy") + ax.plot(energy_grid, eff["se"], "C1", label="single escape", + ls=(0, (5, 5))) + ax.plot(energy_grid, eff["de"], "C1", label="double escape", + ls=(0, (3, 5, 1, 5))) + ax.plot(energy_grid, eff["511"], "C2", label="annihilation", + ls=(0, (3, 1, 1, 1))) + ax.set_xlabel("Energy [keV]") + ax.set_ylabel(r"efficiency $\epsilon$") + ax.set_xlim(50, None) + + d = 16.3 + eff_geom = 30*(np.pi*(8.98/2)**2) / (4*np.pi*d**2) # noqa + ax.axhline(eff_geom, color="k", ls="--", alpha=0.5) + + ax.legend(loc="upper left", ncol=2) + + fig.tight_layout(pad=0.02) + return fig, ax + + +if __name__ == "__main__": + figs_dir = Path("figs") + figs_dir.mkdir(exist_ok=True) + + response_outdir = Path("response_export") + response_outdir.mkdir(exist_ok=True) + + energy_grid = np.arange(50, 1e4, 10, dtype=int) + nevents = np.linspace(6e5, 3e6, len(energy_grid), dtype=np.int) + + energy_grid = np.append(energy_grid, [int(1.2e4), int(1.5e4), int(2e4)]) + nevents = np.append(nevents, [int(3e6), int(3e6), int(3e6)]) + + fwhm_pars = np.array([60.6499, 0.458252, 0.000265552]) + + energy_out = np.arange(energy_grid[0], 21000, 10) + energy_out_uncut = np.arange(0, 21000, 10) + # response mat. with x: incident energy; y: outgoing + respmat = om.Matrix(values=np.zeros((len(energy_grid), len(energy_out))), + Ex=energy_grid, + Eg=energy_out) + + eff = pd.DataFrame(columns=["E", "tot>50keV", "tot>200keV", "tot>500keV", + "fe", "se", "de", "511"]) + eff["E"] = energy_grid + + specdir = Path("from_geant") + # fig, ax = plt.subplots() + # fig_plain, ax_plain = plt.subplots() + + for i, (energy, nevent) in enumerate(zip(tqdm(energy_grid), nevents)): + + # if energy > 1000: # just for testing + # break + + fn = specdir / f"grid_{energy}keV_n{nevent}.root.m" + if not fn.exists(): + continue + + vec = om.Vector(path=fn, units="MeV") + vec.to_keV() + + eff.loc[i] = get_efficiencies(vec, eff["E"][i]) + + # rebin and smooth; rebin first to same time smoothing + vec.rebin(mids=energy_out_uncut) + vec.values = om.gauss_smoothing(vec.values, vec.E, + fFWHM(vec.E, fwhm_pars)) + vec.rebin(mids=energy_out) + + respmat.values[i, :] = vec.values + + # # get before smoothing, not after -> large effect + # # eff_photo = get_area(vec, energy-10, energy+10) / nevent + # # print(eff_photo) + + # vec.values /= i # scale number of events + # # vec.save(f"export/resp_m{i}.m") + # vec.plot(ax=ax) + + eff.to_csv("efficiencies.csv") + + # different version of response matrix + fn = "response_unnorm" + _, ax, fig = respmat.plot(title="response unnormalized", + scale="log", vmin=1) + ax.set_ylabel(r"$E_{incident}$ [KeV]") + fig.savefig(figs_dir/f"{fn}.png") + # respmat.save(response_outdir/f"{fn}.m") # too large for mama + respmat.save(response_outdir/f"{fn}.txt") + fig.clear() + plt.close(fig) + + fn = "response_norm_efficiency" + mat = respmat.copy() + mat.values /= nevents[:, np.newaxis] + _, ax, fig = mat.plot(title="response normalized to total eff.", + scale="log", vmin=1e-5, vmax=1e-1) + ax.set_ylabel(r"$E_{incident}$ [KeV]") + fig.savefig(figs_dir/f"{fn}.png") + # mat.save(response_outdir/f"{fn}.m") # too large for mama + mat.save(response_outdir/f"{fn}.txt") + fig.clear() + plt.close(fig) + + fn = "response_norm_1" + mat = respmat.copy() + mat.values /= mat.values.sum(axis=1)[:, np.newaxis] + _, ax, fig = mat.plot(title="response normalized to 1", + scale="log", vmin=1e-5, vmax=1e-1) + ax.set_ylabel(r"$E_{incident}$ [KeV]") + fig.savefig(figs_dir/f"{fn}.png") + # mat.save(response_outdir/f"{fn}.m") # too large for mama + mat.save(response_outdir/f"{fn}.txt") + fig.clear() + plt.close(fig) + + fn = "response_squarecut_50keV_10.000keV_efficiency" + mat = respmat.copy() + mat.values /= nevents[:, np.newaxis] # scale before energy cuts! + mat.cut(axis="Ex", Emin=50, Emax=10e3) + mat.cut(axis="Eg", Emin=50, Emax=10e3) + _, ax, fig = mat.plot(title="response (square cut 50keV - 10 MeV\n" + "normalized to total efficiency", + scale="log", vmin=1e-5, vmax=1e-1) + ax.set_ylabel(r"$E_{incident}$ [KeV]") + fig.savefig(figs_dir/f"{fn}.png") + mat.save(response_outdir/f"{fn}.m") + mat.save(response_outdir/f"{fn}.txt") + fig.clear() + plt.close(fig) + + # comparison fig as spectra + fig, ax = plt.subplots() + for Ein in [0.5, 1, 2, 3, 5, 9]: + mat.plot_projection(axis="Eg", ax=ax, Emin=Ein*1e3, Emax=Ein*1e3, + label=f"E_inc = {Ein} MeV", + scale="log") + ax.set_ylim(1e-5, 1e-1) + ax.set_ylabel("response/ bin /n_incident") + ax.set_xlabel("Energy [keV]") + ax.legend() + fig.savefig(figs_dir/"response_functions.png") + + fn = "response_squarecut_50keV_10.000keV_norm_1" + mat = respmat.copy() + mat.cut(axis="Ex", Emin=50, Emax=10e3) + mat.cut(axis="Eg", Emin=50, Emax=10e3) + mat.values /= mat.values.sum(axis=1)[:, np.newaxis] + _, ax, fig = mat.plot(title="response (square cut 50keV - 10 MeV\n" + "response normalized to 1", + scale="log", vmin=1e-5, vmax=1e-1) + ax.set_ylabel(r"$E_{incident}$ [KeV]") + fig.savefig(figs_dir/f"{fn}.png") + mat.save(response_outdir/f"{fn}.m") + mat.save(response_outdir/f"{fn}.txt") + fig.clear() + plt.close(fig) + + # efficiency plots + fig, ax = efficiency_plots(eff, energy_grid) + fig.savefig(figs_dir/"eff.png", dpi=200) + + # plt.show() diff --git a/data/export_hist.C b/data/export_hist.C new file mode 100644 index 0000000..fedc8f2 --- /dev/null +++ b/data/export_hist.C @@ -0,0 +1,85 @@ +// Retreives the number of counts in the peaks and background of the spectra +// and writes this to file +// additional files to be read can be inserted via the /fnames.insert/ function + +#include +#include +#include +#include + +#include +// #include +#include +#include +#include +#include "TFile.h" + +#include "AnalyseSims.C" +#ifdef MAKECINT +#pragma link C++ defined_in “AnalyseSims.h”; +#pragma link C++ defined_in “AnalyseSims.C”; +#endif + +#include "th1_to_mama.C" +#include "th22mama.c" + +#include +using namespace std; + + +///////////////////////////////////////// +///////////////////////////////////////// + +void export_hist(){ + +TH1D* hmama; // output spectrum for mama +TH1D* hmama_raw; // before smoothing + + +std::vector fnames; + +string fout_name = "Peaks.dat"; +system("xterm -e 'mkdir mama_spectra'"); // create dir for mama spectra, if not already existend +string outdir = "mama_spectra"; + +////////////////////// + +// get list of files matching certain criterium + +string fname_tmp; +system("xterm -e 'find root_files/*.root > tmp.txt'"); +ifstream tmpfile("tmp.txt", ios::in); +while(tmpfile>>fname_tmp) +{ + fnames.push_back(fname_tmp); //adding data in to the vector +} + +for(auto fname : fnames) +{ + cout << "Working on " << fname << endl; + + // read in root file + auto fname_rootStr = fname; //+ ".root"; + const char *fname_root = fname_rootStr.c_str(); + TFile *file_root = new TFile(fname_root, "READ"); + + TTree *OSCARhits; + file_root->GetObject("OSCARhits",OSCARhits); + + // execute the macro that combines all detectors + AnalyseSims t(OSCARhits); + t.Loop(); + + // get the histogram from AnalyseSims + + TH1D *h1 = (TH1D*)gDirectory->GetList()->FindObject("h1"); + auto fname_mamaStr = outdir + "/" + fname + ".m"; + const char *fname_mama = fname_mamaStr.c_str(); + th1_to_mama(h1, fname_mama); + + // TH2D *h1_all = (TH2D*)gDirectory->GetList()->FindObject("h1_all"); + // fname_mamaStr = outdir + "/" + fname + "_all.m"; + // const char *fname_mama_all = fname_mamaStr.c_str(); + // th22mama(h1_all, fname_mama_all); + } +} diff --git a/data/mama_spectra/RunSmooth_and_Multiply.py b/data/mama_spectra/RunSmooth_and_Multiply.py deleted file mode 100755 index 5d58ad1..0000000 --- a/data/mama_spectra/RunSmooth_and_Multiply.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/python - -import glob, os - -multFactor = 1.15 # multiplication factor for cmp_bg - -def createFolder(directory): - try: - if not os.path.exists(directory): - os.makedirs(directory) - except OSError: - print('Error: Creating directory. ' + directory) - -outfolder = "../mama_spectra_smooth_{:.2f}/".format(multFactor) - -createFolder(outfolder) - -for file in glob.glob("cmp*"): - os.system("cp %s %s/%s" % (file, outfolder, file)) - command = "./Smooth_and_Multiply.sh %s %s %f" % (outfolder, file, multFactor) - os.system(command) - - # correct for some mistakes in writing the mama file - command ="""vim -E -s {} <<-EOF -:%s/\\n\\n!CAL/\\r!CAL/g -:x -EOF -""".format(outfolder+file) - # print command - os.system(command) \ No newline at end of file diff --git a/data/mama_spectra/Smooth_and_Multiply.sh b/data/mama_spectra/Smooth_and_Multiply.sh deleted file mode 100755 index 90de870..0000000 --- a/data/mama_spectra/Smooth_and_Multiply.sh +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/expect - -set SleepTime 0.5 - -set multFactor [lindex $argv 2]; -set filename [lindex $argv 1]; -set prefix [lindex $argv 0]; -set outfile "$prefix$filename" - -set timeout 2 - -spawn mama -sleep 1.5 -expect "mama>" -sleep $SleepTime -send "re\r" -expect "Destination spectrum <1>:" -send "1\r" -sleep $SleepTime -expect "Filename :" -send "$filename\r" -#expect "mama>" -#send "ds2\r" - -# Applying the smoothing -expect "mama>" -# sleep $SleepTime -send "sm\r" -expect "Destination spectrum <2>:" -# sleep $SleepTime -send "2\r" -expect "Source spectrum <1>:" -# sleep $SleepTime -send "1\r" -expect "Dimension of spectrum" -# sleep $SleepTime -send "\r" -expect "Write FWHMx (ch) around ch " -# sleep $SleepTime -send "20\r" -expect "Write FWHMx (ch) around ch " -# sleep $SleepTime -send "30\r" -expect "Smoothing-matrix OK? (y/n) :" -sleep $SleepTime -send "y\r" -expect "mama>" - -# multiplication by "arb." factor -sleep $SleepTime -send "ar\r" -expect "Write your expression:" -sleep $SleepTime -send "2=2*$multFactor \r" -expect "mama>" - -# write file -sleep $SleepTime -send "wr\r" -expect "Spectrum to write <2>:" -# sleep $SleepTime -send "2\r" -expect "Please, choose your type <1>:" -# sleep $SleepTime -send "1\r" -expect "Cal. coeff. a0 (keV) on x-axis < 0.0>:" -# sleep $SleepTime -send "\r" -expect "Cal. coeff. a1 (keV/ch) on x-axis " -# sleep $SleepTime -send "\r" -expect "Cal. coeff. a2 (keV/ch2) on x-axis < 0.0000E+00>:" -# sleep $SleepTime -send "\r" -expect "Length of output-spectrum" -# sleep $SleepTime -send "\r" -expect "Filename :" -# sleep $SleepTime -send "$outfile\r" -expect "mama>" -sleep $SleepTime -send "st\r" -expect "Are you sure you want to exit? (y/n)" -send "y\r" \ No newline at end of file diff --git a/docs/documentation/detector_housing1.pdf b/docs/documentation/detector_housing1.pdf new file mode 100644 index 0000000..ae62c3b Binary files /dev/null and b/docs/documentation/detector_housing1.pdf differ diff --git a/docs/documentation/detector_housing2.pdf b/docs/documentation/detector_housing2.pdf new file mode 100644 index 0000000..ce749a3 Binary files /dev/null and b/docs/documentation/detector_housing2.pdf differ diff --git a/OCL/documentation/files/BrilLanCe 380 Data Sheet.pdf b/docs/documentation/files/BrilLanCe 380 Data Sheet.pdf similarity index 100% rename from OCL/documentation/files/BrilLanCe 380 Data Sheet.pdf rename to docs/documentation/files/BrilLanCe 380 Data Sheet.pdf diff --git a/OCL/documentation/files/Glass_information.pdf b/docs/documentation/files/Glass_information.pdf similarity index 100% rename from OCL/documentation/files/Glass_information.pdf rename to docs/documentation/files/Glass_information.pdf diff --git a/OCL/documentation/files/High_energy_PMT_TPMO0007E-1.pdf b/docs/documentation/files/High_energy_PMT_TPMO0007E-1.pdf similarity index 100% rename from OCL/documentation/files/High_energy_PMT_TPMO0007E-1.pdf rename to docs/documentation/files/High_energy_PMT_TPMO0007E-1.pdf diff --git a/OCL/documentation/files/LaBr_2009_SaintGobain-Flamanc_Milan_L.pdf b/docs/documentation/files/LaBr_2009_SaintGobain-Flamanc_Milan_L.pdf similarity index 100% rename from OCL/documentation/files/LaBr_2009_SaintGobain-Flamanc_Milan_L.pdf rename to docs/documentation/files/LaBr_2009_SaintGobain-Flamanc_Milan_L.pdf diff --git a/OCL/documentation/files/NIMA-D-13-00318R1 NIM-D-13-00318R1.pdf b/docs/documentation/files/NIMA-D-13-00318R1 NIM-D-13-00318R1.pdf similarity index 100% rename from OCL/documentation/files/NIMA-D-13-00318R1 NIM-D-13-00318R1.pdf rename to docs/documentation/files/NIMA-D-13-00318R1 NIM-D-13-00318R1.pdf diff --git a/OCL/documentation/files/SGC BrilLanCe Scintillators Performance Summary.pdf b/docs/documentation/files/SGC BrilLanCe Scintillators Performance Summary.pdf similarity index 100% rename from OCL/documentation/files/SGC BrilLanCe Scintillators Performance Summary.pdf rename to docs/documentation/files/SGC BrilLanCe Scintillators Performance Summary.pdf diff --git a/OCL/documentation/files/truncated_icosahedron.wrl b/docs/documentation/files/truncated_icosahedron.wrl similarity index 100% rename from OCL/documentation/files/truncated_icosahedron.wrl rename to docs/documentation/files/truncated_icosahedron.wrl diff --git a/OCL/documentation/frame/Koord_ball.csv b/docs/documentation/frame/Koord_ball.csv similarity index 100% rename from OCL/documentation/frame/Koord_ball.csv rename to docs/documentation/frame/Koord_ball.csv diff --git a/OCL/documentation/frame/Koord_ball.xlsx b/docs/documentation/frame/Koord_ball.xlsx similarity index 100% rename from OCL/documentation/frame/Koord_ball.xlsx rename to docs/documentation/frame/Koord_ball.xlsx diff --git a/OCL/documentation/frame/convert_xyz_to_spheric.py b/docs/documentation/frame/convert_xyz_to_spheric.py similarity index 92% rename from OCL/documentation/frame/convert_xyz_to_spheric.py rename to docs/documentation/frame/convert_xyz_to_spheric.py index dd418d1..338ce7c 100644 --- a/OCL/documentation/frame/convert_xyz_to_spheric.py +++ b/docs/documentation/frame/convert_xyz_to_spheric.py @@ -64,13 +64,14 @@ def rotated_vector(vector, axis, theta): sph_coords = append_spherical_np(xyz) # sph_coords[:,4:] /= np.pi -# np.set_printoptions(precision=2,suppress=True) -np.set_printoptions(suppress=True) +np.set_printoptions(precision=1,suppress=True) +# np.set_printoptions(suppress=True) #convert to degree sph_coords[:,-2:] *= 180/np.pi -# print sph_coords[:,-2:] # print (theta,phi) -# print sph_coords +a = sph_coords[:, -2:] +print(a[a[:, 0].argsort()[::-1]]) # print (theta,phi) +# print(sph_coords) n_hexa = 20 n_pent = 12 @@ -93,3 +94,5 @@ def rotated_vector(vector, axis, theta): \nOCLLaBr3_theta[{:2d}]\t\t\t= {:.5f}*deg;\ \nOCLLaBr3_phi[{:2d}]\t\t\t= {:.5f}*deg;\n\ ".format(n,n,n,"20*cm",n,sph_coords[n,-2],n,sph_coords[n,-1])) + +print("\n Note: numbering in this script seems wrong!") diff --git a/docs/documentation/frame/convert_xyz_to_spheric.py.bak b/docs/documentation/frame/convert_xyz_to_spheric.py.bak new file mode 100644 index 0000000..037635e --- /dev/null +++ b/docs/documentation/frame/convert_xyz_to_spheric.py.bak @@ -0,0 +1,95 @@ +import numpy as np +import math + +def append_spherical_np(xyz): + ptsnew = np.hstack((xyz, np.zeros(xyz.shape))) + # ptsnew = np.zeros(xyz.shape) + xy = xyz[:,0]**2 + xyz[:,1]**2 + ptsnew[:,3] = np.sqrt(xy + xyz[:,2]**2) + ptsnew[:,4] = np.arctan2(np.sqrt(xy),xyz[:,2]) # theta (0,pi); from z axis down + ptsnew[:,5] = np.arctan2(xyz[:,1], xyz[:,0]) # phi (-pi,pi) + + # map phi (-pi,pi) to (0,2*pi) + gt_idx = ptsnew[:,-1] < 0 # get rows where collum [:,i] is negative + ptsnew[gt_idx,-1] = 2.*np.pi + ptsnew[gt_idx,-1] + + return ptsnew + +def rotation_matrix(axis, theta): + """ + Return the rotation matrix associated with counterclockwise rotation about + the given axis by theta radians. + """ + axis = np.asarray(axis) + axis = axis/math.sqrt(np.dot(axis, axis)) + a = math.cos(theta/2.0) + b, c, d = -axis*math.sin(theta/2.0) + aa, bb, cc, dd = a*a, b*b, c*c, d*d + bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d + return np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)], + [2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)], + [2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]]) + +def rotated_vector(vector, axis, theta): + return np.dot(rotation_matrix(axis,theta), vector) + + +# mm; Distance of global model "centre" to centre of ball/OSCAR +dx_from_center = 267.655 + +# load dataset +data = np.loadtxt("Koord_ball.csv") + +# pick gobal xyz coordinates +xyz = data[:,4:] +# transform such that center of the ball is the origin +xyz[:,0] -= dx_from_center + +# Rotate the axes such that it fits to our setup +v = np.array([[1,0,0],[2,0,1]]) + +xaxis = [1, 0 , 0] +xtheta = np.pi/10. # rotation angle + +yaxis = [0, 1, 0] +ytheta = -np.pi/2. # rotation angle + +xyz_org = np.copy(xyz) +v_dummy = np.zeros((1,3)) +# print v_dummy +for i, vec in enumerate(xyz_org): + v_dummy = rotated_vector(vec,xaxis,xtheta) + xyz[i] = rotated_vector(v_dummy,yaxis,ytheta) + +sph_coords = append_spherical_np(xyz) +# sph_coords[:,4:] /= np.pi + +# np.set_printoptions(precision=2,suppress=True) +np.set_printoptions(suppress=True) + +#convert to degree +sph_coords[:,-2:] *= 180/np.pi +# print sph_coords[:,-2:] # print (theta,phi) +# print sph_coords + +n_hexa = 20 +n_pent = 12 +n_tot = n_pent + n_hexa +for i in range(n_hexa): + print "frameHexagon_theta[%2i]\t= %f*deg;\ + \nframeHexagon_phi[%2i]\t= %f*deg;\n\ + " % (i,sph_coords[i,-2],i,sph_coords[i,-1],i,0) + +for n in range(n_pent): + i = n_tot - n_pent + n + print "framePentagon_theta[%2i]\t= %f*deg;\ + \nframePentagon_phi[%2i]\t= %f*deg;\n\ + " % (n,sph_coords[i,-2],n,sph_coords[i,-1],n,0) + +for n in range(n_tot): + print "OCLLaBr3_presence[%2i]\t\t= true;\ + \nOCLCollimator_presence[%2i]\t= true;\ + \nOCLLaBr3_Distance[%2i]\t\t= %s;\ + \nOCLLaBr3_theta[%2i]\t\t\t= %f*deg;\ + \nOCLLaBr3_phi[%2i]\t\t\t= %f*deg;\n\ + " % (n,n,n,"20*cm",n,sph_coords[n,-2],n,sph_coords[n,-1]) \ No newline at end of file diff --git a/OCL/documentation/frame/hexagons.txt b/docs/documentation/frame/hexagons.txt similarity index 100% rename from OCL/documentation/frame/hexagons.txt rename to docs/documentation/frame/hexagons.txt