diff --git a/.gitignore b/.gitignore index d448285749..5c6960e124 100644 --- a/.gitignore +++ b/.gitignore @@ -29,6 +29,11 @@ smodels/lib/nllfast/nllfast-1.2/old/ smodels/lib/nllfast/nllfast-2.1/fort.21 smodels/lib/pythia6/fort.61 smodels/lib/pythia6/pythia_card.dat +smodels/lib/resummino/resummino-*/ +smodels/lib/resummino/resummino-*.zip +smodels/lib/resummino/lhapdf/ +smodels/lib/resummino/LHAPDF-*/ +smodels/lib/resummino/resummino_install/ debian/files debian/python-module-stampdir/ debian/smodels.debhelper.log @@ -53,6 +58,7 @@ docs/manual/source/recipes/compareUL.py docs/manual/source/recipes/runAsLibrary.py docs/manual/source/recipes/runWithParameterFile.py docs/manual/source/recipes/compute_likelihood.py +docs/manual/source/recipes/lheLLPExample.py docs/manual/source/recipes/lo_xsecs_from_slha.py docs/manual/source/recipes/load_database.py docs/manual/source/recipes/lookup_efficiency.py @@ -75,6 +81,7 @@ docs/manual/source/RunSModelS.rst docs/manual/source/SlhaChecker.rst docs/manual/source/ToolBox.rst docs/manual/source/XSecComputer.rst +docs/manual/source/XSecResummino.rst docs/manual/source/FixPermissions.rst docs/manual/source/InteractivePlots.rst docs/manual/source/images/iplots_parameters.py diff --git a/.readthedocs.yml b/.readthedocs.yml index 75db84705f..067ba463c9 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -1,8 +1,15 @@ # .readthedocs.yml +version: 2 + build: - image: latest + os: ubuntu-22.04 + tools: + python: "3.11" + +sphinx: + configuration: docs/manual/source/conf.py python: - version: 3.6 - setup_py_install: true + install: + - requirements: docs/manual/requirements.txt diff --git a/Makefile b/Makefile index a30f1e27b7..44c320b7ab 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ VER=$(shell cat smodels/version) HAS_FC := $(shell smodels/lib/check_fortran_compiler.sh 2> /dev/null) HAS_CXX := $(shell command -v $(CXX) 2> /dev/null) -all: resolve_deps externaltools +all: resolve_deps # make all just resolves dependencies check_compilers: .PHONY ifndef HAS_FC @@ -17,14 +17,16 @@ resolve_deps: ## resolve the deps via pip @echo "try to resolve the python dependencies via pip" smodels/installation.py -R -smodels: all tidy +smodels_externaltools: resolve_deps externaltools @echo @echo "done. you can now run the software directly from this source directory.\n" @echo "Try e.g. \n\n ./runSModelS.py --help\n" @echo "The latest SModelS documentation can be found at: http://smodels.readthedocs.io/en/latest/" @echo "For this version documentation go to: https://smodels.readthedocs.io/en/v$(VER)" -smodels_noexternaltools: resolve_deps tidy +smodels: resolve_deps + +smodels_noexternaltools: resolve_deps @echo @echo "done. you can now run the software directly from this source directory.\n" @echo "Try e.g. \n\n ./runSModelS.py --help\n" @@ -47,6 +49,12 @@ pythia6: pythia8: cd smodels/lib && make pythia8 +resummino: + cd smodels/lib && make resummino + +nllfast: + cd smodels/lib && make nllfast + cpp: .PHONY cd cpp && make @@ -60,13 +68,13 @@ buildrpm: builddeb: buildrpm cd dist && fakeroot alien smodels-$(VER)-1.x86_64.rpm -pypi: +pypi: clean ## pypi user is walten, repository is https://upload.pypi.org/legacy/ rm -rf dist python3 setup.py sdist bdist_wheel twine upload dist/smodels-*.tar.gz -testpypi: +testpypi: clean ## testpypi user is smodels, repository is https://test.pypi.org/legacy/ # to install from testpypi: # pip3 install --user --upgrade --index-url https://test.pypi.org/simple/ smodels diff --git a/ReleaseNotes b/ReleaseNotes index dce18daf60..7de27111c2 100644 --- a/ReleaseNotes +++ b/ReleaseNotes @@ -1,3 +1,12 @@ +Release v2.3.3, Tue 19 Dec 2023 +======================================================= + + * added resummino cross section computer + * fixed bug in computation of error on muhat, for pyhf likelihoods + (affects mostly only numpy backend) + * small change in initialisation of gradient descent method for computation of + combined mu_hat, to increate robustness of method + Release v2.3.2, Wed 30 Aug 2023 ======================================================= diff --git a/docs/manual/requirements.txt b/docs/manual/requirements.txt index 529f71ed5b..6387b4353d 100644 --- a/docs/manual/requirements.txt +++ b/docs/manual/requirements.txt @@ -1,11 +1,13 @@ ###### pythonic requirements for readthedocs.io ###### jupyter nbconvert -unum -pyslha -numpy -scipy sphinx_rtd_theme -plotly -pyhf -torch +docutils==0.16 +numpy>=1.22.0 +scipy>=1.0.0 +unum>=4.0.0 +requests>=2.0.0 +pyslha>=3.1.0 +pyhf>=0.6.1 +jsonpatch>=1.25 +jsonschema>=3.2.0 diff --git a/docs/manual/rstFromHelpFiles.py b/docs/manual/rstFromHelpFiles.py index 7ba89d3430..5759327ce8 100755 --- a/docs/manual/rstFromHelpFiles.py +++ b/docs/manual/rstFromHelpFiles.py @@ -63,4 +63,5 @@ def runSModelS (): run ( "interactive-plots", "InteractivePlots" ) run ( "toolbox", "ToolBox" ) run ( "fixpermissions", "FixPermissions" ) +run ( "xsecresummino", "XSecResummino" ) runSModelS() diff --git a/docs/manual/source/Installation.rst b/docs/manual/source/Installation.rst index 6e5d8a4ee8..be80c8c0ca 100644 --- a/docs/manual/source/Installation.rst +++ b/docs/manual/source/Installation.rst @@ -19,15 +19,16 @@ SModelS is a Python library that requires Python version 3.6 or later. It depend .. include:: dependencies.rst -For speed reasons, we moreover recommend pytorch>=1.8.0 as backend for pyhf. This is, however, optional: if pytorch is not available, SModelS will use the default backend. +For performance reasons, we moreover recommend pytorch>=1.8.0 as backend for pyhf. This is, however, optional: if pytorch is not available, SModelS will use the default backend. -In addition, the :ref:`cross section computer ` provided by :ref:`smodelsTools.py ` -requires: +In addition, the :ref:`cross section computers ` provided by :ref:`smodelsTools.py ` +require: - * `Pythia 8.3 `_ (requires a C++ compiler) or `Pythia 6.4.27 `_ (requires gfortran) - * `NLL-fast `_ 1.2 (7 TeV), 2.1 (8 TeV), and 3.1 (13 TeV) (requires a fortran compiler) + * `Pythia 8.3 `_ (needs a C++ compiler) or `Pythia 6.4.27 `_ (needs gfortran) + * `NLL-fast `_ 1.2 (7 TeV), 2.1 (8 TeV), and 3.1 (13 TeV) (needs a Fortran compiler) + * `Resummino `_ (needs a C++ compiler and gfortran). Moreover, in rpm-based Linux distributions, this tool needs boost, boost-devel, gsl and gsl-devel. For deb-based distributions, libboost-dev and libgsl-dev are required. -These tools need not be installed separately, as the SModelS build system takes care of that. The current default is that both Pythia6 and Pythia8 are installed together with NLLfast. +The tools themselves, i.e. Pythia6|8, NLL-fast, and/or Resummino need not be installed separately, as the SModelS build system takes care of that (see below). SModelS however expects the tools' dependencies (boost, gsl for Resummino, as well as the compilers) to be installed by the user. Finally, the :ref:`database browser ` provided by :ref:`smodelsTools.py ` requires `IPython `_, while the :ref:`interactive plotter ` requires `plotly `_ and `pandas `_. @@ -43,27 +44,30 @@ Installation Methods make smodels - in the top-level directory. The installation will remove redundant folders, install the required - dependencies (using pip install) and compile Pythia and NLL-fast. - If the MSSM cross section computer is not needed, one can install SModelS without Pythia and NLL-fast. To this end, run:: + in the top-level directory. SModelS will install the required dependencies (using pip install), but none of the external tools (Pythia6|8, NLL-fast, Resummino). + If the MSSM :ref:`cross section computers ` are needed, one can directly install SModelS together with its external tools. To this end, run:: - make smodels_noexternaltools + make smodels_externaltools instead of *make smodels*. - In case the Python libraries can not be successfully - installed, the user can install them separately using his/her preferred method. Pythia and NLL-fast can also be compiled separately + In case the Python libraries cannot be successfully + installed, the user can install them separately using his/her preferred method. Pythia, NLL-fast and Resummino can also be compiled separately running **make externaltools**. In case the Fortran comiler isn't found, try *make FCC= smodels* or *make FCC= externaltools*. - * If Python's *setuptools* is installed in your machine, SModelS and its dependencies - can also be installed without the use of pip. + * Every external tool can also be compiled individually, run e.g.:: + + make pythia6 pythia8 nllfast resummino + + Remember, though, that the compilers as well as Resummino's dependencies (boost, gsl, see above) need to be installed already. + + * Python's *setuptools*, if installed in your machine, can also be used for installing SModelS and its dependencies. After downloading the source from the `SModelS releases page `_ and extracting it, run:: - setup.py install - within the main smodels directory. If the python libraries are installed in a system folder (as is the default behavior), + within the main SModelS directory. If the python libraries are installed in a system folder (as is the default behavior), it will be necessary to run the install command with superuser privilege. Alternatively, one can run setup.py with the "--user" flag: :: @@ -73,16 +77,16 @@ Installation Methods manually and then rerun setup.py. For Ubuntu, SL6 machines and other platforms, a recipe is given below. - - Note that this installation method will install smodels into the default system or user directory (e.g. ~/.local/lib/python3/site-packages/). + Note that this installation method will install SModelS into the default system or user directory (e.g. ~/.local/lib/python3.10/site-packages/). Depending on your platform, the environment variables $PATH, $PYTHONPATH, $LD_LIBRARY_PATH (or $DYLD_LIBRARY_PATH) might have to be set appropriately. + Note also, that setup.py will *not* attempt at downloading and compiling the external tools (Pythia6|8, NLL-fast, Resummino) at install time. + Instead, this will be done on the fly, at runtime, upon call of the :ref:`cross section computer(s) `. + The external tools will also be located in the above smodels installation directory (/lib/...). - - * Finally, SModelS is `indexed on pypi `_. Thus, if *pip3* (or *pip*) is installed in your machine, it is also possible to install SModelS directly without the need for - downloading the source code: :: + * Finally, SModelS is `indexed on pypi `_. Thus, if *pip3* (or *pip*) is installed in your machine, it is possible to install SModelS without downloading the source code: :: pip3 install smodels @@ -92,14 +96,17 @@ Installation Methods for user-specific installations. - - Note that this installation method will install smodels into the default system or user directory (e.g. ~/.local/lib/python3/site-packages/). + This installation method will install SModelS into the default system or user directory (e.g. ~/.local/lib/python3.10/site-packages/). Depending on your platform, the environment variables $PATH, $PYTHONPATH, $LD_LIBRARY_PATH (or $DYLD_LIBRARY_PATH) might have to be set appropriately. Be aware that the example files and the |parameters| discussed in the manual will also be located in your default system or user directory. Furthermore the database - folder is not included (see :ref:`database installation ` below). - This installation method is best suited for experienced python users. + folder is not included (see :ref:`database installation ` below). + + Moreover, pip will *not* attempt at downloading and compiling the external tools (Pythia6|8, NLL-fast, Resummino) at install time. Instead, this will be done on the fly, at runtime, upon call of the :ref:`cross section computer(s) `. + The external tools will also be located in the above smodels installation directory (/lib/...). + + Generally, this installation method is best suited for experienced python users. There is also a diagnostic tool available: :: diff --git a/docs/manual/source/ReleaseUpdate.rst b/docs/manual/source/ReleaseUpdate.rst index e3fcaf41c5..2bff36aa0a 100644 --- a/docs/manual/source/ReleaseUpdate.rst +++ b/docs/manual/source/ReleaseUpdate.rst @@ -37,6 +37,15 @@ What's New ========== The major novelties of all releases since v1.0 are as follows: +New in Version 2.3.3: +^^^^^^^^^^^^^^^^^^^^^ + + * added :ref:`resummino cross section ` computer + * fixed bug in computation of error on muhat, for `pyhf likelihoods `_ + (affects mostly only numpy backend) + * small change in initialisation of gradient descent method for computation of + `combined mu_hat `_, to increate robustness of method + New in Version 2.3.2: ^^^^^^^^^^^^^^^^^^^^^ diff --git a/docs/manual/source/SModelSTools.rst b/docs/manual/source/SModelSTools.rst index c9e33d2b9c..a16779ba83 100644 --- a/docs/manual/source/SModelSTools.rst +++ b/docs/manual/source/SModelSTools.rst @@ -29,8 +29,8 @@ SModelS Tools Inside SModelS there is a number of tools that may be convenient for the user: -* a :ref:`cross section calculator ` based on `Pythia8 `_ (or `Pythia6 `_) and - `NLLfast `_, +* a :ref:`cross section calculator ` based on `Pythia8 `_ (or `Pythia6 `_), + `NLLfast `_, and one based on `Resummino `_, * :ref:`SLHA and LHE file checkers ` to check your input files for completeness and sanity, * a :ref:`database browser ` to provide easy access to the |database| of experimental results, * a plotting tool to make :ref:`interactive plots ` based on `plotly `_ (v1.1.3 onwards), @@ -39,15 +39,19 @@ Inside SModelS there is a number of tools that may be convenient for the user: .. _xsecCalc: -Cross Section Calculator ------------------------- +Cross Section Calculators +------------------------- -This tool computes LHC production cross sections for *MSSM particles* -and writes them out in :ref:`SLHA convention `. This can in particular be +These tools compute LHC production cross sections for *MSSM particles* +and write them out in :ref:`SLHA convention `. This is particularly convenient for adding cross sections to SLHA input files, see :doc:`Basic Input `. -The calculation is done at LO with `Pythia8 `_ or `Pythia6.4 `_ ; K-factors -for colored particles are computed with `NLLfast `_. Signal strength multipliers can optionally be supplied for each "mother" particle. However, use at your own risk! Make sure the -output is sensible and contains all cross sections for all production mechanisms + + +Pythia and NLLfast Cross Sections +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In the basic "xseccomputer", the calculation is done at LO with `Pythia8 `_ or `Pythia6.4 `_ ; K-factors +for colored particles are computed with `NLLfast `_. Signal strength multipliers can optionally be supplied for each "mother" particle. However, use at your own risk! Make sure the output is sensible and contains all cross sections for all production mechanisms you are interested in! **The usage of the cross section calculator is:** @@ -83,8 +87,33 @@ This will compute 8 TeV and 13 TeV LO cross sections as above, but the cross sec Note that signal strength multipliers get applied only to LO cross sections. This means they are propagated to NLO and NLL level only if the LO cross sections are computed first and the NLO/NLL corrections added afterwards. In other words, if the xseccomputer is called with -n or -N argument but without -O (--LOfromSLHA), the --ssmultipliers argument will be ignored. -* **The cross section calculation is implemented by the** `xsecComputer function `_ +* **The cross section calculation is implemented by the** `XSecComputer class `_ + +.. _xsecResummino: + +Resummino Cross Sections +^^^^^^^^^^^^^^^^^^^^^^^^ + +For electroweak-ino and/or slepton production cross sections, another tool based on `Resummino `_ is available. This can calculate EW cross sections at LO, NLO, and NLL+NLO orders. No K-factors are used, all orders are calculated independently. + +**The usage of the Resummino cross section calculator is:** + +.. include:: XSecResummino.rst + +A typical +usage example is: :: + + smodelsTools.py xsecresummino -s 13 -p -n -f test/testFiles/resummino/resummino_example.slha -part 1000023 1000024 + +This will compute the (1000023,-1000024), (1000023, 1000024) and (-1000024,1000024) cross sections for sqrt(s)=13 TeV at NLO for the spectrum given in resummino_example.slha and append them to that SLHA file. +Additional settings, like the PDF sets to use, are taken from the Resummino configuration file, smodels/etc/resummino.py. There, also the default threshold for the minimum cross section is set. + +Instead of providing a list of particles via the --part argument, one can also directly specify in the resummino.py configuration file which production channels shall be considered. To this end one can either adapt the resummino.py file in the smodels/etc folder, or provide their own configuration file via the --conf argument. +Note that options set directly on the command line always take precedence over the settings in the +configuration file. + +* **The resummino cross section calculation is implemented by the** `XSecResummino class `_ .. _fileChecks: diff --git a/docs/manual/source/recipes/plotCombinedLikelihood.py b/docs/manual/source/recipes/plotCombinedLikelihood.py new file mode 100644 index 0000000000..31d26ab042 --- /dev/null +++ b/docs/manual/source/recipes/plotCombinedLikelihood.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python +# coding: utf-8 + +# In[1]: + + +import os,sys +# Set up the path to SModelS installation folder +import sys; sys.path.append("."); import smodels_paths +import numpy as np +import matplotlib.pyplot as plt + +from smodels.tools.modelTester import getCombiner + + +# ### Define input (SLHA) file and the parameters file + +# In[2]: + + +slhafile = os.path.expanduser("./inputFiles/slha/ewino_example.slha") +# Define parameters file with combineAnas = ATLAS-SUSY-2019-08,ATLAS-SUSY-2019-09: +parfile = os.path.expanduser("./parameters_comb.ini") + + +# ### Define some basic parameters for plotting the likelihoods + +# In[3]: + + +expected = False # whether to plot the observed or expected likelihood +normalize = True # whether to normalize the likelihoods +muvals = np.linspace(0.,5.,200) # Signal strength values for which to evaluate the likelihoods + + +# ### Run SModelS and get the analysis combination + +# In[4]: + + +combiner = getCombiner(slhafile, parfile) + + +# ### Use the combination to evaluate the likelihoods + +# In[5]: + + +llhdDict = combiner.getLlhds(muvals,expected,normalize) + + +# ### Compute L_SM, L_BSM, L_max and the UL on mu + +# In[6]: + + +muhat = combiner.muhat() +lmax = combiner.lmax() +lsm = combiner.lsm() +lbsm = combiner.likelihood(mu=1.0) +muULDict = {'combined' : combiner.getUpperLimitOnMu()} +for theoryPred in combiner.theoryPredictions: + anaID = theoryPred.analysisId() + muULDict[anaID] = theoryPred.getUpperLimitOnMu() + + +# ### Plot the results + +# In[7]: + + +fig = plt.figure(figsize=(8,4)) +ymin = 0. +ymax = 0. +for anaID,l in llhdDict.items(): + if anaID == 'combined': + zorder = 100 + linestyle = '--' + else: + zorder = None + linestyle = '-' + + p = plt.plot(muvals,l,label=anaID,linewidth=3,zorder=zorder,linestyle=linestyle) + ymin = min(ymin,min(l)) + ymax = max(ymax,max(l)) + + plt.vlines(muULDict[anaID],ymin=ymin,ymax=ymax,linestyle='--',linewidth=2, + label=r'$\mu_{UL}$ (%s)' %anaID,color=p[0].get_color(),alpha=0.7) + + +plt.vlines(muhat,ymin=ymin,ymax=ymax,linestyle='--',linewidth=2, + label=r'$\hat{\mu}$',color='black',alpha=0.7) + + +# plt.yscale('log') +# plt.ylim(1e-1,1e1) +plt.xlim(muvals.min(),muvals.max()) +plt.xlabel(r'$\mu$') +if normalize: + plt.ylabel('Normalized Likelihood') +else: + plt.ylabel('Likelihood') +plt.legend(framealpha=1) +plt.title(r'$\hat{\mu} = $ %1.2f, $L_{max} =$ %1.2e, $L_{SM} =$ %1.2e, $L_{BSM} =$ %1.2e' %(muhat,lmax,lsm,lbsm)) +plt.tight_layout() +plt.show() + + +# In[ ]: + + + + diff --git a/docs/manual/source/share.models.rst b/docs/manual/source/share.models.rst new file mode 100644 index 0000000000..c9a43d0efa --- /dev/null +++ b/docs/manual/source/share.models.rst @@ -0,0 +1,53 @@ +share.models package +==================== + +Submodules +---------- + +share.models.SMparticles module +------------------------------- + +.. automodule:: share.models.SMparticles + :members: + :undoc-members: + :show-inheritance: + +share.models.dgmssm module +-------------------------- + +.. automodule:: share.models.dgmssm + :members: + :undoc-members: + :show-inheritance: + +share.models.idm module +----------------------- + +.. automodule:: share.models.idm + :members: + :undoc-members: + :show-inheritance: + +share.models.mssm module +------------------------ + +.. automodule:: share.models.mssm + :members: + :undoc-members: + :show-inheritance: + +share.models.nmssm module +------------------------- + +.. automodule:: share.models.nmssm + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: share.models + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/manual/source/share.rst b/docs/manual/source/share.rst new file mode 100644 index 0000000000..c8d1d313ae --- /dev/null +++ b/docs/manual/source/share.rst @@ -0,0 +1,18 @@ +share package +============= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + share.models + +Module contents +--------------- + +.. automodule:: share + :members: + :undoc-members: + :show-inheritance: diff --git a/setup.py b/setup.py index 76638ba7ef..88c2a29424 100755 --- a/setup.py +++ b/setup.py @@ -90,7 +90,7 @@ def dataFiles (): for directory in [ "smodels/share/", "smodels/share/models/", "smodels/etc/", "smodels/lib/nllfast/nllfast-1.2/", "smodels/lib/nllfast/nllfast-2.1/", "smodels/lib/nllfast/nllfast-3.1/", - "smodels/lib/pythia6/", "smodels/lib/pythia8/" ]: + "smodels/lib/pythia6/", "smodels/lib/pythia8/", "smodels/lib/resummino/" ]: ret.append ((directory, listDirectory (directory))) for directory in ["inputFiles/slha/", "inputFiles/lhe/" ]: ret.append (( "smodels/"+directory, listDirectory (directory))) diff --git a/smodels/etc/input_resummino.in b/smodels/etc/input_resummino.in new file mode 100644 index 0000000000..2db2a40858 --- /dev/null +++ b/smodels/etc/input_resummino.in @@ -0,0 +1,94 @@ +# Input card for Resummino. + +# This input card defines the hadron collider parameters and the process. The +# SUSY model is defined in a separate file in the SLHA file format. +# All energies and masses are in GeV. + +# Collider parameters. +collider_type = proton-proton # proton-proton or proton-antiproton +center_of_mass_energy = 13000 + +# Outgoing particles using the value of the PDG-scheme number. +# The particles are listed in the table below. A minus in front of the PDG +# means charge conjugation. +# ┌─────────────────────┬───────────────────┬───────────────────┐ +# │ e_L- = 11 │ nu_eL = 12 │ │ +# │ mu_L- = 13 │ nu_muL = 14 │ │ +# │ tau_1- = 15 │ nu_tau = 16 │ │ +# ├─────────────────────┼───────────────────┼───────────────────┤ +# │ ~e_L- = 1000011 │ ~e_R- = 2000011 │ ~nu_eL = 1000012 │ +# │ ~mu_L- = 1000013 │ ~mu_R- = 2000013 │ ~nu_muL = 1000014 │ +# │ ~tau_1- = 1000015 │ ~tau_2- = 2000015 │ ~nu_tau = 1000016 │ +# ├─────────────────────┼───────────────────┼───────────────────┤ +# │ ~chi_10 = 1000022 │ ~chi_20 = 1000023 │ ~chi_30 = 1000025 │ +# │ ~chi_40 = 1000035 │ ~chi_1+ = 1000024 │ ~chi_2+ = 1000037 │ +# ├─────────────────────┼───────────────────┼───────────────────┤ +# │ ~u_L = 1000002 │ ~u_R = 2000002 │ ~d_L = 1000001 │ +# │ ~d_R = 2000001 │ ~c_L = 1000004 │ ~c_R = 2000004 │ +# │ ~s_L = 1000003 │ ~s_R = 2000003 │ ~t_L = 1000006 │ +# │ ~t_R = 2000006 │ ~b_L = 1000005 │ ~b_R = 2000005 │ +# ├─────────────────────┼───────────────────┴───────────────────┤ +# │ ~g = 1000021 │ │ +# └─────────────────────┴───────────────────────────────────────┘ +# +particle1 = 1000023 +particle2 = 1000035 + +# Defines the computation to be performed. Three computations are supported: +# +# - result = total: Outputs the total cross section. +# +# - result = pt: Outputs the value for the transverse momentum at the +# value specified by the `pt` variable. +# +# - result = ptj: Outputs the value for the transverse momentum at the +# value specified by the `pt` variable using the joint +# resummation formalism. +# +# - result = m: Outputs the value for the invariant mass distribution at the +# value specified by the `M` variable. +# +result = total # total, pt, ptj or m. +M = auto # auto = sqrt((p1 + p2)^2) +pt = auto + +# SLHA input file that defines the SUSY benchmark point. +slha = outputM_12000M_20mu100.slha + +# Z'/W' model parameters. +# (For di-lepton production, input parameters are read from the zpwp file.) +#zpwp = ssm.in + +# PDF sets for LO and NLO. They should be present in the LHAPDF local setup. +pdf_format = lhgrid # lhgrid or lhpdf +#pdf_lo = cteq6l1 +pdf_lo = PDF4LHC21_40 +pdfset_lo = 0 +pdf_nlo = PDF4LHC21_40 +pdfset_nlo = 14 + +#scale factors +# (1.0 is central scale mu = (m1 + m2) / 2) +mu_f = 1.0 +mu_r = 1.0 + +# Integration parameters. +precision = 0.001 # desired precision +max_iters = 50 # maximum iterations +#ptcutmax = 0 #6491.18850451893 +#ptcutmin = 0.1 + +# Minimum invariant mass cut for integration for massless outgoing particles. Ignored otherwise. +# (Minv_min = auto sets Minv_min = (3/4)*Mz') +# (Minv_max = auto sets Minv_max = (5/4)*Mz') +Minv_min = auto +Minv_max = auto + +# optional PDF fit parameter +# weights +# (If you get weird fit results decrease the weight up tp -2.0) +weight_valence = -1.6 +weight_sea = -1.6 +weight_gluon = -1.6 +# fit PDF from xmin to 1 (auto = mis/sh) +xmin = auto diff --git a/smodels/etc/resummino.py b/smodels/etc/resummino.py new file mode 100644 index 0000000000..f0fc0ab96e --- /dev/null +++ b/smodels/etc/resummino.py @@ -0,0 +1,22 @@ +{ + # channels: values (as python lists) are the pdg codes of the particles X,Y + # to be produced in the 2->2 processes p,p -> X,Y + # names of keys are used for debugging only + "channels" : {"1" : [1000023,1000024], "2" : [1000023, -1000024], + "3" : [1000024,-1000024] + }, + # ------------------------------------- + # The limit for the NLO calculation is determined by the 'xsec_limit' variable. + # If the LO cross secion is below this value (in pb), no NLO (or NLO+NLL) cross-sections are + # calculated. + "xsec_limit" : 0.00001, + # pdfs (case-sensitive): our default is cteq66, which gives results close + # to the PDFLHC2021_40 set. Any pdf can be used, but be sure to check + # the name on the lhapdf website before putting any value here. + "pdfs" : { + "pdf_lo" : "cteq66", + "pdfset_lo" : 0, + "pdf_nlo" : "cteq66", + "pdfset_nlo" : 0 + } +} diff --git a/smodels/lib/Makefile b/smodels/lib/Makefile index 3f513c36fd..8a5f3dc9f7 100644 --- a/smodels/lib/Makefile +++ b/smodels/lib/Makefile @@ -1,5 +1,5 @@ -all: pythia nllfast -clean: clean_pythia clean_nllfast +all: pythia nllfast resummino +clean: clean_pythia clean_nllfast clean_resummino nllfast: nllfast-1.2 nllfast-2.1 nllfast-3.1 clean_nllfast: clean_nllfast-1.2 clean_nllfast-2.1 clean_nllfast-3.1 @@ -7,6 +7,9 @@ clean_nllfast: clean_nllfast-1.2 clean_nllfast-2.1 clean_nllfast-3.1 pythia: pythia8 pythia6 clean_pythia: clean_pythia8 clean_pythia6 ## we clean both +resummino: resummino-3.1.2 +clean_resummino: clean_resummino-3.1.2 + pythia6: .PHONY cd pythia6 && make clean_pythia6: @@ -27,5 +30,8 @@ nllfast-3.1: cd nllfast/nllfast-3.1 && make clean_nllfast-3.1: cd nllfast/nllfast-3.1 && make clean - +resummino-3.1.2: + cd resummino && make +clean_resummino-3.1.2: + cd resummino && make clean .PHONY: diff --git a/smodels/lib/resummino/Makefile b/smodels/lib/resummino/Makefile new file mode 100644 index 0000000000..5ad8949c4e --- /dev/null +++ b/smodels/lib/resummino/Makefile @@ -0,0 +1,8 @@ +all: install + +install: + ./clean.sh + ./install.sh + +clean: + ./clean.sh diff --git a/smodels/lib/resummino/clean.sh b/smodels/lib/resummino/clean.sh new file mode 100755 index 0000000000..abbb4a3ad2 --- /dev/null +++ b/smodels/lib/resummino/clean.sh @@ -0,0 +1,11 @@ +#!/bin/bash + + +install_dir=$PWD +cd $install_dir + +LINES=$(ls | grep -v '\.sh$' | grep -v Makefile | grep -v versions.txt ) +# echo "lines x${LINES}x" +[ -z "$LINES" ] || { + echo $LINES | xargs rm -r; +} diff --git a/smodels/lib/resummino/install.sh b/smodels/lib/resummino/install.sh new file mode 100755 index 0000000000..1c1efb3eea --- /dev/null +++ b/smodels/lib/resummino/install.sh @@ -0,0 +1,138 @@ +#!/bin/bash + +install_dir=$PWD +LHAPDF_VERSION="6.5.4" +RESUMMINO_VERSION="3.1.2" + +mkdir -p $install_dir +cd $install_dir + +get_cpu_cores() { + local num_cores=$(nproc) + if [ "$num_cores" -ge 3 ]; then + echo "$((num_cores / 2 - 1))" + else + echo "1" + fi +} + +cat < boost_check.cpp +#include +#include +#include +#include + +int main() { + #ifdef BOOST_VERSION + std::cout << "installed" << std::endl; + #else + std::cout << "not_installed" << std::endl; + #endif + return 0; +} +EOF + +g++ boost_check.cpp -o boost_check + +output=$(./boost_check) + +if [ "$output" = "installed" ]; then + echo "Boost is installed. Continuing script execution." +else + echo "Boost is not installed. Stopping script execution." + rm boost_check.cpp boost_check + exit 1 +fi + +# Clean temporary files if Boost is installed +rm boost_check.cpp boost_check + +if command -v gsl-config &>/dev/null; then + echo "GSL is installed." + GSL_VERSION=$(gsl-config --version) + echo "GSL version: $GSL_VERSION" +else + echo "GSL is not installed. Stopping script execution." + exit 1 +fi + +num_cores_to_use=$(get_cpu_cores) + +download_file() { + url="$1" + output="$2" + + if command -v wget > /dev/null; then + echo "Using wget to download $url" + wget "$url" -O "$output" + elif command -v curl > /dev/null; then + echo "Using curl to download $url" + curl -L "$url" -o "$output" + else + echo "Error: Neither curl nor wget is available for downloading files." + exit 1 + fi +} + +if [ ! -d "$install_dir/lhapdf" ]; then + download_file "https://lhapdf.hepforge.org/downloads/?f=LHAPDF-$LHAPDF_VERSION.tar.gz" "LHAPDF-$LHAPDF_VERSION.tar.gz" + tar xf "LHAPDF-$LHAPDF_VERSION.tar.gz" + cd "LHAPDF-$LHAPDF_VERSION" + ./configure --prefix=$install_dir/lhapdf --disable-python + make -j"$num_cores_to_use" + make install + cd .. + download_file "http://lhapdfsets.web.cern.ch/lhapdfsets/current/PDF4LHC21_40.tar.gz" "PDF4LHC21_40.tar.gz" + tar xz -C $install_dir/lhapdf/share/LHAPDF -f PDF4LHC21_40.tar.gz + cd $install_dir +fi + +download_and_install_lhapdf() { + download_file "$1" "LHAPDF-$LHAPDF_VERSION.tar.gz" + tar xf "LHAPDF-$LHAPDF_VERSION.tar.gz" + cd "LHAPDF-$LHAPDF_VERSION" || { echo "Error: Unable to change directory to LHAPDF-$LHAPDF_VERSION"; return 1; } + ./configure --prefix=$install_dir/lhapdf --disable-python + make -j"$num_cores_to_use" + make install + cd .. + download_file "http://lhapdfsets.web.cern.ch/lhapdfsets/current/PDF4LHC21_40.tar.gz" "PDF4LHC21_40.tar.gz" + tar xz -C $install_dir/lhapdf/share/LHAPDF -f PDF4LHC21_40.tar.gz + cd $install_dir +} + +if [ ! -d "$install_dir/lhapdf" ]; then + if ! download_and_install_lhapdf "https://smodels.github.io/resummino/LHAPDF-$LHAPDF_VERSION.tar.gz"; then + echo "Failed to download from smodels.github.io, trying hepforge.org..." + download_and_install_lhapdf "https://lhapdf.hepforge.org/downloads/?f=LHAPDF-$LHAPDF_VERSION.tar.gz" + fi +fi + +# Checking for the existence of RESUMMINO +download_and_install_resummino() { + download_file "$1" "resummino-$RESUMMINO_VERSION.zip" + if [ -f "resummino-$RESUMMINO_VERSION.zip" ]; then + unzip "resummino-$RESUMMINO_VERSION.zip" + mkdir -p resummino_install + cd "resummino-$RESUMMINO_VERSION" || { echo "Error: Unable to change directory to resummino-$RESUMMINO_VERSION"; return 1; } + cmake . -DLHAPDF=$install_dir/lhapdf -DCMAKE_INSTALL_PREFIX=$install_dir/resummino_install + make -j"$num_cores_to_use" + make install + cd .. + return 0 + else + return 1 + fi +} + +if [ ! -d "$install_dir/resummino_install" ]; then + if ! download_and_install_resummino "https://smodels.github.io/resummino/resummino-$RESUMMINO_VERSION.zip"; then + echo "Failed to download from smodels.github.io, trying hepforge.org..." + download_and_install_resummino "https://resummino.hepforge.org/downloads/?f=resummino-$RESUMMINO_VERSION.zip" + fi +fi + +echo "# Note that this file gets overwritten when calling install.sh" > versions.txt +echo "# so do not define the versions here!" >> versions.txt +echo "# Instead, look at install.sh." >> versions.txt +echo "LHAPDF_version = $LHAPDF_VERSION" >> versions.txt +echo "resummino_version = $RESUMMINO_VERSION" >> versions.txt diff --git a/smodels/lib/resummino/versions.txt b/smodels/lib/resummino/versions.txt new file mode 100644 index 0000000000..a50385b351 --- /dev/null +++ b/smodels/lib/resummino/versions.txt @@ -0,0 +1,5 @@ +# Note that this file gets overwritten when calling install.sh +# so do not define the versions here! +# Instead, look at install.sh. +LHAPDF_version = 6.5.4 +resummino_version = 3.1.2 diff --git a/smodels/share/BANNER b/smodels/share/BANNER index c0a5682eae..d338b210fa 100644 --- a/smodels/share/BANNER +++ b/smodels/share/BANNER @@ -9,6 +9,6 @@ SModelS -- A tool for interpreting simplified-model results from the LHC, see https://smodels.github.io/. Copyright (C) 2012-2023 The SModelS collaboration, smodels-users@lists.oeaw.ac.at -Current members: Mohammad Mahdi Altakach, Sabine Kraml, Andre Lessa, Sahana Narashima, Timothee Pascal, Humberto Reyes-Gonzalez, Wolfgang Waltenberger +Current members: Mohammad Mahdi Altakach, Sabine Kraml, Andre Lessa, Sahana Narashima, Timothee Pascal, Theo Reymermier, Humberto Reyes-Gonzalez, Wolfgang Waltenberger Previously involved in SModelS: Gael Alguero, Federico Ambrogi, Juhi Dutta, Jan Heisig, Charanjit K. Khosa, Suchita Kulkarni, Ursula Laa, Veronika Magerl, Wolfgang Magerl, Philipp Neuhuber, Doris Proschofsky, Jory Sonneveld, Michael Traub, Matthias Wolf, Alicia Wongel diff --git a/smodels/tools/analysesCombinations.py b/smodels/tools/analysesCombinations.py index 87e2746edf..3143082d03 100644 --- a/smodels/tools/analysesCombinations.py +++ b/smodels/tools/analysesCombinations.py @@ -47,6 +47,7 @@ def likelihood( ) -> float: """ Compute the likelihood at a given mu + :param mu: signal strength :param expected: if True, compute expected likelihood, else observed :param return_nll: if True, return negative log likelihood, else likelihood @@ -81,6 +82,7 @@ def lmax( return_nll: bool = False, ) -> Union[Dict, None]: """find muhat and lmax. + :param allowNegativeSignals: if true, then also allow for negative values :param expected: if true, compute expected prior (=lsm), if "posteriori" \ compute posteriori expected @@ -120,6 +122,10 @@ def lmax( toTry = [sum(weighted) / totweight] + #Add additional initialization points near the first one to make sure that minima is not lost due to small differences in the initialization values + fluc = max(1e-6, 1e-5*toTry[0]) #incase toTry[0] = 0.0, take 1e-06 + toTry += [toTry[0] - fluc, toTry[0] + fluc] + def fun(mu): # Make sure to always compute the correct llhd value (from theoryPrediction) # and not used the cached value (which is constant for mu~=1 an mu~=0) diff --git a/smodels/tools/basicStats.py b/smodels/tools/basicStats.py index 8d7d46e212..95866b1f7e 100755 --- a/smodels/tools/basicStats.py +++ b/smodels/tools/basicStats.py @@ -24,6 +24,7 @@ def CLsfromNLL( """ compute the CLs - alpha from the NLLs TODO: following needs explanation + :param nllA: :param nll0A: :param nll: @@ -60,6 +61,7 @@ def CLsfromNLL( def determineBrentBracket(mu_hat, sigma_mu, rootfinder, allowNegative = True ): """find a, b for brent bracketing + :param mu_hat: mu that maximizes likelihood :param sigm_mu: error on mu_hat (not too reliable) :param rootfinder: function that finds the root (usually root_func) diff --git a/smodels/tools/pyhfInterface.py b/smodels/tools/pyhfInterface.py index 267b25d312..b4cf40e211 100755 --- a/smodels/tools/pyhfInterface.py +++ b/smodels/tools/pyhfInterface.py @@ -62,6 +62,7 @@ pyhfinfo["backend"] = "pytorch" pyhfinfo["backendver"] = torch.__version__ + except pyhf.exceptions.ImportBackendError as e: print( "[SModelS:pyhfInterface] WARNING could not set pytorch as the pyhf backend, falling back to the default." @@ -592,6 +593,62 @@ def exponentiateNLL(self, twice_nll, doIt): return np.exp(-twice_nll / 2.0) return twice_nll / 2.0 + def compute_invhess(self, x, data, model, index, epsilon=1e-05): + ''' + if inv_hess is not given by the optimiser, calculate numerically by evaluating second order partial derivatives using 2 point central finite differences method + :param x: parameter values given to pyhf.infer.mle.twice_nll taken from pyhf.infer.mle.fit - optimizer.x (best_fit parameter values) + :param data: workspace.data(model) passed to pyhf.infer.mle.fit + :param model: model passed to pyhf.infer.mle.fit + :param index: index of the POI + Note : If len(x) <=5, compute the entire hessian matrix and ind its inverse. Else, compute the hessian at the index of the POI and return its inverse (diagonal approximation) + returns the inverse hessian at the index of the poi + ''' + + n = len(x) + + if n<=5: + + hessian = np.zeros((n,n)) + for i in range(n): + for j in range(i+1): + eps_i = epsilon*np.eye(n)[:,i] #identity along ith column + eps_j = epsilon*np.eye(n)[:,j] + + #twice_nll is the objective function, hence need to find its hessian + par_11 = pyhf.infer.mle.twice_nll(x + eps_i + eps_j, data, model) + par_12 = pyhf.infer.mle.twice_nll(x - eps_i + eps_j, data, model) + par_21 = pyhf.infer.mle.twice_nll(x + eps_i - eps_j, data, model) + par_22 = pyhf.infer.mle.twice_nll(x - eps_i - eps_j, data, model) + + partial_xi_xj = (par_11 - par_12 - par_21 +par_22)/(4*epsilon**2) + hessian[i,j] = partial_xi_xj + if i!=j: hessian[j,i] = partial_xi_xj + + def is_positive_definite(matrix): + eigenvalues = np.linalg.eigvals(matrix) + return all(eig > 0 for eig in eigenvalues) + + if not is_positive_definite(hessian): + #raise ValueError("Hessian Matrix is not positive definite") + logger.warning("Hessian Matrix is not positive definite") + + inverse_hessian = np.linalg.inv(hessian) + + #return the inverse hessian at the poi + return inverse_hessian[index][index] + + #calculate only the hessian at the poi and return its inverse (an approximation!) + eps_i = epsilon*np.eye(n)[:,index] + par_11 = pyhf.infer.mle.twice_nll(x + 2*eps_i, data, model) + par_12 = pyhf.infer.mle.twice_nll(x, data, model) + par_22 = pyhf.infer.mle.twice_nll(x - 2*eps_i, data, model) + hessian = (par_11 - 2*par_12 + par_22)/(4*epsilon**2) + + #return the inverse hessian at the poi + return 1.0/hessian + + + def lmax( self, workspace_index=None, return_nll=False, expected=False, allowNegativeSignals=False): """ @@ -634,16 +691,15 @@ def lmax( self, workspace_index=None, return_nll=False, expected=False, bounds = model.config.suggested_bounds() if allowNegativeSignals: bounds[model.config.poi_index] = (-5., 10. ) - #import time - #t0 = time.time() + try: muhat, maxNllh, o = pyhf.infer.mle.fit(workspace.data(model), model, return_fitted_val=True, par_bounds = bounds, return_result_obj = True ) - sigma_mu = self.scale / float ( abs ( o.jac[model.config.poi_index] ) ) ## why did this not work? + #removed jacobain way of computing sigma_mu + except (pyhf.exceptions.FailedMinimization,ValueError) as e: pass - #t1 = time.time() - #print ( f"first fit {t1-t0:.2f}s sigma_mu {sigma_mu:.3f}" ) + if hasattr ( o, "hess_inv" ): # maybe the backend gets changed sigma_mu = float ( np.sqrt ( o.hess_inv[model.config.poi_index][model.config.poi_index] ) ) * self.scale else: @@ -652,6 +708,7 @@ def lmax( self, workspace_index=None, return_nll=False, expected=False, _1, _2, o = pyhf.infer.mle.fit(workspace.data(model), model, return_fitted_val=True, return_result_obj = True, init_pars = list(muhat), method="BFGS" ) sigma_mu_temp = float ( np.sqrt ( o.hess_inv[model.config.poi_index][model.config.poi_index] ) ) + except (pyhf.exceptions.FailedMinimization,ValueError) as e: pass if abs ( sigma_mu_temp - 1.0 ) > 1e-5: @@ -659,13 +716,22 @@ def lmax( self, workspace_index=None, return_nll=False, expected=False, else: _, _, o = pyhf.infer.mle.fit(workspace.data(model), model, return_fitted_val=True, return_result_obj = True, method="L-BFGS-B" ) + sigma_mu_temp = float ( np.sqrt ( o.hess_inv.todense()[model.config.poi_index][model.config.poi_index] ) ) if abs ( sigma_mu_temp - 1.0 ) > 1e-8: sigma_mu = sigma_mu_temp * self.scale # sigma_mu = float ( o.unc[model.config.poi_index] ) * self.scale except (pyhf.exceptions.FailedMinimization, ValueError) as e: - logger.error(f"pyhf mle.fit failed {e}") + if pyhfinfo["backend"] == "pytorch": + logger.warning(f"pyhf mle.fit failed {e} with {pyhfinfo['backend']} v{pyhfinfo['backendver']}. Calculating inv_hess numerically.") + if pyhfinfo["backend"] == "numpy": + logger.debug(f"pyhf mle.fit failed {e} with {pyhfinfo['backend']} v{pyhfinfo['backendver']}. Calculating inv_hess numerically.") + + #Calculate inv_hess numerically + inv_hess = self.compute_invhess(o.x, workspace.data(model), model, model.config.poi_index) + sigma_mu = float ( np.sqrt ( inv_hess)) * self.scale + muhat = float ( muhat[model.config.poi_index]*self.scale ) try: lmax = maxNllh.tolist() @@ -676,8 +742,7 @@ def lmax( self, workspace_index=None, return_nll=False, expected=False, except: lmax = float(lmax[0]) lmax = self.exponentiateNLL(lmax, not return_nll) - #t2 = time.time() - #print ( f"second fit {t2-t1:.2f}s sigma_mu {sigma_mu:.3f}" ) + ret = { "lmax": lmax, "muhat": muhat, "sigma_mu": sigma_mu } self.data.cached_lmaxes[workspace_index] = ret return ret diff --git a/smodels/tools/pythia8Wrapper.py b/smodels/tools/pythia8Wrapper.py index b488da050a..087e41f568 100755 --- a/smodels/tools/pythia8Wrapper.py +++ b/smodels/tools/pythia8Wrapper.py @@ -121,7 +121,7 @@ def unlink(self, unlinkdir=True): self.tempdir = None def checkInstallation ( self, compile : bool = True ): - # super().checkInstallation(compile) + super().checkInstallation(compile) exists = os.path.exists ( self.includeFile ) xmldoc = self.getXmldoc() sleep = 0. @@ -176,6 +176,10 @@ def run(self, slhaFile, lhefile=None, unlink=True): :returns: List of cross sections """ + r = self.checkInstallation() + if r == False: + logger.info ( "Installation check failed." ) + sys.exit() # Change pythia configuration file, if defined: if self.pythiacard: pythiacard_default = self.cfgfile diff --git a/smodels/tools/smodelsTools.py b/smodels/tools/smodelsTools.py index 269a2ac0af..0c99b867c3 100755 --- a/smodels/tools/smodelsTools.py +++ b/smodels/tools/smodelsTools.py @@ -16,7 +16,7 @@ def main(): parser = argparse.ArgumentParser(description="SModelS-tools command line tool.") parser.add_argument('-v','--verbose', help='verbosity level. ' - 'accepted values are: debug, info, warning, error.', + 'accepted values are: debug, info, warning, error. [info]', default = "info", type = str ) subparsers = parser.add_subparsers(dest='subparser_name') @@ -24,43 +24,84 @@ def main(): subparsers.add_parser('installation', description="Print installation setup and exit.") subparsers.add_parser('fixpermissions', description="Fix file permissions for xseccomputer.") xseccomputer = subparsers.add_parser('xseccomputer', description="Compute MSSM cross sections for a SLHA file.") + xseccomputer.add_argument('-f', '--filename', required=True, + help="SLHA file to compute cross sections for. " + "If a directory is given, cross sections for all files in the directory are computed." ) xseccomputer.add_argument('-s', '--sqrts', nargs='+', action='append', - help="sqrt(s) TeV. Can supply more than one value (as a space separated list). Default is both 8 and 13.", + help="LHC center-of-mass energy in TeV for computing the " + "cross sections. Can be more than one value, e.g., -s 8 13 for both " + "8 TeV and 13 TeV cross sections. [13]", type=float, default=[]) + xseccomputer.add_argument('-6', '--pythia6', action='store_true', + help="use pythia6 for LO cross sections") + xseccomputer.add_argument('-8', '--pythia8', action='store_true', + help="use pythia8 for LO cross sections (default)") xseccomputer.add_argument('-e', '--nevents', type=int, default=10000, help="number of events to be simulated [10000].") xseccomputer.add_argument('-v', '--verbosity', type=str, default="info", - help="Verbosity (debug, info, warning, error)") - xseccomputer.add_argument('-c', '--ncpus', type=int, default=-1, - help="number of cores to be used simultaneously. -1 means 'all'. ") + help="Verbosity (debug, info, warning, error) [info]") + xseccomputer.add_argument('-c', '--ncpus', type=int, default=1, + help="number of CPU cores to be used simultaneously. −1 " + "means ‘all’. Used only when cross sections are computed for multiple " + "SLHA files. [1]") xseccomputer.add_argument('-p', '--tofile', action='store_true', help="write cross sections to file (only highest order)") xseccomputer.add_argument('-P', '--alltofile', action='store_true', help="write all cross sections to file, including lower orders") - xseccomputer.add_argument('-q', '--query', action='store_true', - help="only query if there are cross sections in the file") - xseccomputer.add_argument('-C', '--colors', action='store_true', - help="colored terminal output" ) - xseccomputer.add_argument('--tempdir', type=str, default="/tmp/", - help="specify a temporary directory (default is /tmp/)" ) - xseccomputer.add_argument( '--noautocompile', action='store_true', - help="turn off automatic compilation" ) - xseccomputer.add_argument('-k', '--keep', action='store_true', - help="do not unlink temporary directory") - xseccomputer.add_argument('-6', '--pythia6', action='store_true', - help="use pythia6 for LO cross sections") - xseccomputer.add_argument('-8', '--pythia8', action='store_true', - help="use pythia8 for LO cross sections (default)") xseccomputer.add_argument('-n', '--NLO', action='store_true', help="compute at the NLO level (default is LO)") xseccomputer.add_argument('-N', '--NLL', help="compute at the NLO+NLL level (takes precedence over NLO, default is LO)", action='store_true') xseccomputer.add_argument('-O', '--LOfromSLHA', help="use LO cross sections from file to compute the NLO or NLL cross sections", action='store_true') xseccomputer.add_argument('-S', '--ssmultipliers', type=str, default=None, help="Signal strength multipliers, provided as dictionary of pids") - xseccomputer.add_argument('-f', '--filename', required=True, - help="SLHA file to compute cross sections for. " - "If a directory is given, compute cross sections for all files in directory." ) + xseccomputer.add_argument('-q', '--query', action='store_true', + help="only query if there are cross sections in the file") + xseccomputer.add_argument('-C', '--colors', action='store_true', + help="colored terminal output" ) + xseccomputer.add_argument('-k', '--keep', action='store_true', + help="do not unlink temporary directory") + xseccomputer.add_argument( '--noautocompile', action='store_true', + help="turn off automatic compilation" ) + xsecresummino = subparsers.add_parser('xsecresummino', description="Compute gaugino and slepton cross sections via resummino for a given SLHA file.") + xsecresummino.add_argument('-f', '--filename', required=True, + help="SLHA file to compute cross sections for. " + "If a directory is given, cross sections for all files in the directory are computed." ) + xsecresummino.add_argument('-s', '--sqrts', nargs='+', action='append', + help="LHC center-of-mass energy in TeV for computing the " + "cross sections. Can be more than one value, e.g., -s 8 13 for both " + "8 TeV and 13 TeV cross sections. [13]", + type=float, default=[]) + xsecresummino.add_argument('-part', '--particles', nargs='+', action='append', + help="list of daughter particles (given as PDG " + "codes) to compute cross sections for. All valid combinations from " + "the list will be considered. If no list of particles is given, the channels " + "info from the resummino.py configuration file is used instead.", + type=int, default=[]) + xsecresummino.add_argument('-v', '--verbosity', type=str, default="info", + help="verbosity (debug, info, warning, error). [info]") + xsecresummino.add_argument('-c', '--ncpus', type=int, default=1, + help="number of CPU cores to be used simultaneously. -1 means 'all'. Used only when cross sections are computed for multiple SLHA files. [1]") + xsecresummino.add_argument('-C', '--conf', type=str, default='default', + help="path to resummino.py configuration file. [smodels/etc/resummino.py]") + xsecresummino.add_argument('-x', '--xseclimit', type=float, default=None, + help="cross section limit in pb. If the LO cross section is " + "below this value, no higher orders will be calculated. The default " + "is 0.00001, set in the smodels/etc/resummino.py file." ) + xsecresummino.add_argument('-p', '--tofile', action='store_true', + help="write cross sections to file (only highest order)") + xsecresummino.add_argument('-P', '--alltofile', action='store_true', + help="write all cross sections to file, including lower orders") + xsecresummino.add_argument('-n', '--NLO', action='store_true', + help="compute at the NLO level (default is LO)") + xsecresummino.add_argument('-N', '--NLL', help="compute at the NLO+NLL level (takes precedence over NLO, default is LO)", action='store_true') + # xsecresummino.add_argument('-S', '--ssmultipliers', type=str, default=None, + # help="signal strength multipliers, provided as dictionary of pids") + xsecresummino.add_argument('-k', '--keep', action='store_true', + help="do not unlink temporary directory") + xsecresummino.add_argument( '--noautocompile', action='store_true', + help="turn off automatic compilation" ) + slhachecker = subparsers.add_parser('slhachecker', description="Perform several checks on a SLHA file.") slhachecker.add_argument('-xS', '--xsec', help='turn off the check for xsection blocks', action='store_false') slhachecker.add_argument('-illegal', '--illegal', help='turn on check for kinematically forbidden decays', action='store_true') @@ -91,7 +132,7 @@ def main(): help="How many (randomly selected) points will be included in the plot. If -1 all points will be read and included (default = -1).") iPlots.add_argument('-v', '--verbosity', type=str, default="info", - help="Verbosity (debug, info, warning, error)") + help="Verbosity (debug, info, warning, error) [info]") proxydb = subparsers.add_parser( 'proxydb', description= "create proxy databases for network use") @@ -153,6 +194,9 @@ def main(): if args.subparser_name == 'xseccomputer': from smodels.tools import xsecComputer xsecComputer.main(args) + if args.subparser_name == 'xsecresummino': + from smodels.tools import xsecResummino + xsecResummino.main(args) if args.subparser_name == 'slhachecker': from smodels.tools import slhaChecks slhaChecks.main(args) @@ -169,3 +213,4 @@ def main(): if __name__ == '__main__': main() + diff --git a/smodels/tools/xsecBase.py b/smodels/tools/xsecBase.py new file mode 100644 index 0000000000..92424b0019 --- /dev/null +++ b/smodels/tools/xsecBase.py @@ -0,0 +1,290 @@ +#!/usr/bin/env python3 + +""" +.. module:: xsecBasis + :synopsis: Computation of reference ("theory") production cross sections. + +.. moduleauthor:: Suchita Kulkarni +.. moduleauthor:: Andre Lessa +.. moduleauthor:: Wolfgang Waltenberger +.. moduleauthor:: Théo Reymermier + +""" + +from __future__ import print_function +import sys +import os, copy +current = os.getcwd() +sys.path.append(current) + + +from smodels import installation +from smodels.tools import toolBox, runtime +from smodels.tools.physicsUnits import pb, TeV, GeV +from smodels.theory import crossSection +from smodels.theory.crossSection import LO, NLO, NLL +from smodels.tools.smodelsLogging import logger, setLogLevel +from smodels.theory.exceptions import SModelSTheoryError as SModelSError +import subprocess +from concurrent.futures import ProcessPoolExecutor + +import pyslha +import math +try: + import cStringIO as io +except ImportError as e: + import io + + + +class XSecBase: + """ cross section computer class, what else? """ + def __init__ ( self, maxOrder,slha_folder_name, maycompile=True): + """ + :param maxOrder: maximum order to compute the cross section, given as an integer + if maxOrder == LO, compute only LO pythia xsecs + if maxOrder == NLO, apply NLO K-factors from NLLfast (if available) + if maxOrder == NLL, apply NLO+NLL K-factors from NLLfast (if available) + :param nevents: number of events for pythia run + :param pythiaVersion: pythia6 or pythia8 (integer) + :param maycompile: if True, then tools can get compiled on-the-fly + """ + self.resummino_bin = "./smodels/lib/resummino/resummino-3.1.2/bin/resummino" + self.input_file_original = "smodels/etc/ff1a240db6c1719fe9f299b3390d49d32050c4f1003286d2428411eca45bd50c.in" + self.slha_folder_name = slha_folder_name + self.maxOrder = maxOrder + self.countNoXSecs = 0 + self.countNoNLOXSecs = 0 + self.maycompile = maycompile + + def addXSecToFile( self, xsecs, slhafile, comment=None, complain=True): + """ + Write cross sections to an SLHA file. + + :param xsecs: a XSectionList object containing the cross sections + :param slhafile: target file for writing the cross sections in SLHA format + :param comment: optional comment to be added to each cross section block + :param complain: complain if there are already cross sections in file + + """ + + if not os.path.isfile(slhafile): + line = f"SLHA file {slhafile} not found." + logger.error( line ) + raise SModelSError( line ) + if len(xsecs) == 0: + self.countNoXSecs+=1 + if self.countNoXSecs < 3: + logger.warning("No cross sections available.") + if self.countNoXSecs == 3: + logger.warning("No cross sections available (will quench such warnings in future).") + return False + # Check if file already contain cross section blocks + xSectionList = crossSection.getXsecFromSLHAFile(slhafile) + if xSectionList and complain: + logger.info("SLHA file already contains XSECTION blocks. Adding " + "only missing cross sections.") + + # Write cross sections to file, if they do not overlap with any cross + # section in the file + outfile = open(slhafile, 'r') + lastline = outfile.readlines()[-1] + lastline = lastline.strip() + outfile.close() + + outfile = open(slhafile, 'a') + if lastline != "": + outfile.write("\n" ) + nxsecs = 0 + for xsec in xsecs: + writeXsec = True + for oldxsec in xSectionList: + if oldxsec.info == xsec.info and set(oldxsec.pid) == set(xsec.pid): + writeXsec = False + break + if writeXsec: + nxsecs += 1 + outfile.write( self.xsecToBlock(xsec, (2212, 2212), comment) + "\n") + outfile.close() + + return nxsecs + + def xsecToBlock( self, xsec, inPDGs=(2212, 2212), comment=None, xsecUnit = pb): + """ + Generate a string for a XSECTION block in the SLHA format from a XSection + object. + + :param inPDGs: defines the PDGs of the incoming states + (default = 2212,2212) + + :param comment: is added at the end of the header as a comment + :param xsecUnit: unit of cross sections to be written (default is pb). + Must be a Unum unit. + + """ + if type(xsec) != type(crossSection.XSection()): + logger.error("Wrong input") + raise SModelSError() + # Sqrt(s) in GeV + header = "XSECTION " + str(xsec.info.sqrts / GeV) + for pdg in inPDGs: + # PDGs of incoming states + header += " " + str(pdg) + # Number of outgoing states + header += " " + str(len(xsec.pid)) + for pid in xsec.pid: + # PDGs of outgoing states + header += " " + str(pid) + if comment: + header += " # " + str(comment) # Comment + entry = " 0 " + str(xsec.info.order) + " 0 0 0 0 " + \ + str( "%16.8E" % (xsec.value / xsecUnit) ) + " SModelSv" + \ + installation.version() + + return "\n" + header + "\n" + entry + + + + +class ArgsStandardizer: + """ simple class to collect all argument manipulators """ + + def getSSMultipliers ( self, multipliers ): + if type ( multipliers ) == str: + if multipliers in [ "", "None", "none", "no", "{}" ]: + return None + if multipliers.count("{") != 1 or multipliers.count("}") != 1: + logger.error ( "need to pass signal strengh multipliers as dictionary with tuple of pids as keys" ) + if multipliers.count("(") != multipliers.count(")"): + logger.error ( "need to pass signal strengh multipliers as dictionary with tuple of pids as keys" ) + return eval(multipliers) + return multipliers + + def getInputFiles ( self, args ): + """ geth the names of the slha files to run over """ + inputPath = args.filename.strip() + if not os.path.exists( inputPath ): + logger.error( "Path %s does not exist." % inputPath ) + sys.exit(1) + inputFiles = [] + if os.path.isfile ( inputPath ): + inputFiles = [ inputPath ] + else: + files = os.listdir ( inputPath ) + for f in files: + inputFiles.append ( os.path.join ( inputPath, f ) ) + import random + random.shuffle ( inputFiles ) + return inputFiles + + def checkAllowedSqrtses ( self, order, sqrtses ): + """ check if the sqrtses are 'allowed' """ + if order == 0: return + allowedsqrtses=[7, 8, 13, 13.6] + for sqrts in sqrtses: + if not sqrts in allowedsqrtses: + logger.error("Cannot compute NLO or NLL xsecs for sqrts = %d " + "TeV! Available are: %s TeV." % (sqrts, allowedsqrtses )) + sys.exit(-2) + + def getOrder ( self, args ): + """ retrieve the order in perturbation theory from argument list """ + if args.NLL: + return NLL + if args.NLO: + return NLO + return LO + + def getjson ( self, args ): + """ retrieve the path to the json file from argument list """ + json = args.conf + + if json == 'default': + return None + else: + if os.path.exists(json): + return json + else: + return logger.error("Path does not exist.") + + def queryCrossSections ( self, filename ): + if os.path.isdir ( filename ): + logger.error ( "Cannot query cross sections for a directory." ) + sys.exit(-1) + xsecsInfile = crossSection.getXsecFromSLHAFile(filename) + if xsecsInfile: + print ( "1" ) + else: + print ( "0" ) + + def getSqrtses ( self, args ): + """ extract the sqrtses from argument list """ + if args.sqrts is None or len(args.sqrts) == 0: + return {13} + sqrtses = [item for sublist in args.sqrts for item in sublist] + sqrtses.sort() + sqrtses = set(sqrtses) + return sqrtses + + def getParticles ( self, args ): + """ extract the particles from argument list, default to None, then channels are chosen by the json file """ + if not hasattr ( args, "particles" ) or len(args.particles) == 0: + return None + particles = [int(item) for sublist in args.particles for item in sublist] + if len(particles) == 0: + particles= None + return particles + particles.sort() + particles = set(particles) + return particles + + def checkNCPUs ( self, ncpus, inputFiles ): + if ncpus < -1 or ncpus == 0: + logger.error ( "Weird number of CPUs given: %d" % ncpus ) + sys.exit() + if ncpus == -1: + ncpus = runtime.nCPUs() + ncpus = min ( len(inputFiles), ncpus ) + if ncpus == 1: + logger.info ( "We run on a single cpu" ) + else: + logger.info ( "We run on %d cpus" % ncpus ) + return ncpus + + def tempDir ( self, args ): + ret = "/tmp/" + if hasattr ( args, "tempdir" ): + ret = args.tempdir + return ret + + + def checkXsec_limit (self,args ): + if args.xseclimit == None: + return None + if args.xseclimit < 0: + logger.error("Xsec_limit cannot be negative. Set to 0 if you want no limitation.") + sys.exit() + if args.xseclimit > 1000: + logger.warn("Are you sure to use a limit that high ? you might get errors.") + return args.xseclimit + + def getPythiaVersion ( self, args ): + pythiaVersion = 8 + + if hasattr(args, 'pythia6' ) and args.pythia6 == True: + pythiaVersion = 6 + if hasattr(args, 'pythia8') and args.pythia8 == True: + logger.error ( "cannot both use pythia6 and pythia8 for LO xsecs." ) + sys.exit() + return pythiaVersion + + def writeToFile ( self, args ): + toFile = None + if args.tofile: + toFile="highest" + if args.alltofile: + if toFile=="highest": + logger.warning ( "Specified both --tofile and --alltofile. Will use "\ + "--alltofile" ) + toFile="all" + return toFile diff --git a/smodels/tools/xsecComputer.py b/smodels/tools/xsecComputer.py index ed25ecb021..ffbe9686a6 100755 --- a/smodels/tools/xsecComputer.py +++ b/smodels/tools/xsecComputer.py @@ -17,6 +17,7 @@ from smodels.theory.crossSection import LO, NLO, NLL from smodels.tools.smodelsLogging import logger, setLogLevel from smodels.theory.exceptions import SModelSTheoryError as SModelSError +from smodels.tools.xsecBase import XSecBase, ArgsStandardizer import os, copy import pyslha try: @@ -25,7 +26,7 @@ import io import sys -class XSecComputer: +class XSecComputer(XSecBase): """ cross section computer class, what else? """ def __init__ ( self, maxOrder, nevents, pythiaVersion, maycompile=True, defaulttempdir : str = "/tmp/" ): @@ -385,201 +386,6 @@ def addMultipliersToFile ( self, ssmultipliers, slhaFile ): outfile.write ( "\n" ) outfile.close() - def addXSecToFile( self, xsecs, slhafile, comment=None, complain=True): - """ - Write cross sections to an SLHA file. - - :param xsecs: a XSectionList object containing the cross sections - :param slhafile: target file for writing the cross sections in SLHA format - :param comment: optional comment to be added to each cross section block - :param complain: complain if there are already cross sections in file - - """ - - if not os.path.isfile(slhafile): - line = f"SLHA file {slhafile} not found." - logger.error( line ) - raise SModelSError( line ) - if len(xsecs) == 0: - self.countNoXSecs+=1 - if self.countNoXSecs < 3: - logger.warning("No cross sections available.") - if self.countNoXSecs == 3: - logger.warning("No cross sections available (will quench such warnings in future).") - return False - # Check if file already contain cross section blocks - xSectionList = crossSection.getXsecFromSLHAFile(slhafile) - if xSectionList and complain: - logger.info("SLHA file already contains XSECTION blocks. Adding " - "only missing cross sections.") - - # Write cross sections to file, if they do not overlap with any cross - # section in the file - outfile = open(slhafile, 'r') - lastline = outfile.readlines()[-1] - lastline = lastline.strip() - outfile.close() - - outfile = open(slhafile, 'a') - if lastline != "": - outfile.write("\n" ) - nxsecs = 0 - for xsec in xsecs: - writeXsec = True - for oldxsec in xSectionList: - if oldxsec.info == xsec.info and set(oldxsec.pid) == set(xsec.pid): - writeXsec = False - break - if writeXsec: - nxsecs += 1 - outfile.write( self.xsecToBlock(xsec, (2212, 2212), comment) + "\n") - outfile.close() - - return nxsecs - - def xsecToBlock( self, xsec, inPDGs=(2212, 2212), comment=None, xsecUnit = pb): - """ - Generate a string for a XSECTION block in the SLHA format from a XSection - object. - - :param inPDGs: defines the PDGs of the incoming states - (default = 2212,2212) - - :param comment: is added at the end of the header as a comment - :param xsecUnit: unit of cross sections to be written (default is pb). - Must be a Unum unit. - - """ - if type(xsec) != type(crossSection.XSection()): - logger.error("Wrong input") - raise SModelSError() - # Sqrt(s) in GeV - header = "XSECTION " + str(xsec.info.sqrts / GeV) - for pdg in inPDGs: - # PDGs of incoming states - header += " " + str(pdg) - # Number of outgoing states - header += " " + str(len(xsec.pid)) - for pid in xsec.pid: - # PDGs of outgoing states - header += " " + str(pid) - if comment: - header += " # " + str(comment) # Comment - entry = " 0 " + str(xsec.info.order) + " 0 0 0 0 " + \ - str( "%16.8E" % (xsec.value / xsecUnit) ) + " SModelSv" + \ - installation.version() - - return "\n" + header + "\n" + entry - - -class ArgsStandardizer: - """ simple class to collect all argument manipulators """ - - def getSSMultipliers ( self, multipliers ): - if type ( multipliers ) == str: - if multipliers in [ "", "None", "none", "no", "{}" ]: - return None - if multipliers.count("{") != 1 or multipliers.count("}") != 1: - logger.error ( "need to pass signal strengh multipliers as dictionary with tuple of pids as keys" ) - if multipliers.count("(") != multipliers.count(")"): - logger.error ( "need to pass signal strengh multipliers as dictionary with tuple of pids as keys" ) - return eval(multipliers) - return multipliers - - def getInputFiles ( self, args ): - """ geth the names of the slha files to run over """ - inputPath = args.filename.strip() - if not os.path.exists( inputPath ): - logger.error( "Path %s does not exist." % inputPath ) - sys.exit(1) - inputFiles = [] - if os.path.isfile ( inputPath ): - inputFiles = [ inputPath ] - else: - files = os.listdir ( inputPath ) - for f in files: - inputFiles.append ( os.path.join ( inputPath, f ) ) - import random - random.shuffle ( inputFiles ) - return inputFiles - - def checkAllowedSqrtses ( self, order, sqrtses ): - """ check if the sqrtses are 'allowed' """ - if order == 0: return - allowedsqrtses=[7, 8, 13, 13.6] - for sqrts in sqrtses: - if not sqrts in allowedsqrtses: - logger.error("Cannot compute NLO or NLL xsecs for sqrts = %d " - "TeV! Available are: %s TeV." % (sqrts, allowedsqrtses )) - sys.exit(-2) - - def getOrder ( self, args ): - """ retrieve the order in perturbation theory from argument list """ - if args.NLL: - return NLL - if args.NLO: - return NLO - return LO - - def queryCrossSections ( self, filename ): - if os.path.isdir ( filename ): - logger.error ( "Cannot query cross sections for a directory." ) - sys.exit(-1) - xsecsInfile = crossSection.getXsecFromSLHAFile(filename) - if xsecsInfile: - print ( "1" ) - else: - print ( "0" ) - - def getSqrtses ( self, args ): - """ extract the sqrtses from argument list """ - sqrtses = [item for sublist in args.sqrts for item in sublist] - if len(sqrtses) == 0: - sqrtses = [8,13] - sqrtses.sort() - sqrtses = set(sqrtses) - return sqrtses - - def checkNCPUs ( self, ncpus, inputFiles ): - if ncpus < -1 or ncpus == 0: - logger.error ( "Weird number of CPUs given: %d" % ncpus ) - sys.exit() - if ncpus == -1: - ncpus = runtime.nCPUs() - ncpus = min ( len(inputFiles), ncpus ) - if ncpus == 1: - logger.info ( "We run on a single cpu" ) - else: - logger.info ( "We run on %d cpus" % ncpus ) - return ncpus - - def getPythiaVersion ( self, args ): - pythiaVersion = 8 - - if hasattr(args, 'pythia6' ) and args.pythia6 == True: - pythiaVersion = 6 - if hasattr(args, 'pythia8') and args.pythia8 == True: - logger.error ( "cannot both use pythia6 and pythia8 for LO xsecs." ) - sys.exit() - return pythiaVersion - - def writeToFile ( self, args ): - toFile = None - if args.tofile: - toFile="highest" - if args.alltofile: - if toFile=="highest": - logger.warning ( "Specified both --tofile and --alltofile. Will use "\ - "--alltofile" ) - toFile="all" - return toFile - - def tempDir ( self, args ): - ret = "/tmp/" - if hasattr ( args, "tempdir" ): - ret = args.tempdir - return ret - def main(args): canonizer = ArgsStandardizer() setLogLevel ( args.verbosity ) diff --git a/smodels/tools/xsecResummino.py b/smodels/tools/xsecResummino.py new file mode 100644 index 0000000000..2fc71d6145 --- /dev/null +++ b/smodels/tools/xsecResummino.py @@ -0,0 +1,804 @@ +#!/usr/bin/env python3 + +""" +.. module:: xsecResummino + :synopsis: Computation of reference ("theory") production cross sections. + +.. moduleauthor:: Théo Reymermier + +""" + +from __future__ import print_function +import sys +import os, copy +current = os.getcwd() +sys.path.append(current) + + +from smodels import installation +from smodels.tools.physicsUnits import pb, TeV, GeV +from smodels.theory import crossSection +from smodels.theory.crossSection import LO, NLO, NLL +from smodels.tools.smodelsLogging import logger, setLogLevel +from smodels.theory.exceptions import SModelSTheoryError as SModelSError +import subprocess +from concurrent.futures import ProcessPoolExecutor +from smodels.tools.xsecBase import XSecBase, ArgsStandardizer +import tempfile +import argparse +import pyslha +import shutil +import requests +import tarfile +from itertools import combinations + +class XSecResummino(XSecBase): + """ cross section computer class (for resummino), what else? """ + def __init__ ( self, maxOrder,slha_folder_name,sqrt = 13,ncpu=1, maycompile=True, type_writting = None, verbosity = '', json = None, particles = None, xsec_limit = None): + """ + :param maxOrder: maximum order to compute the cross section, given as an integer + if maxOrder == LO, compute only LO resummino xsecs + if maxOrder == NLO, compute NLO resummino xsecs + if maxOrder == NLL, compute NLO+NLL resummino xsecs + :param nevents: number of events for pythia run + :param pythiaVersion: pythia6 or pythia8 (integer) + :param sqrt: Center of mass energy to consider for the cross section calculation + :param xsec_limit: Value below which if mode == "check", cross section at NLO order are not calculated + :param type: If all, put all the order in the slha file, if highest, only the hightest order. + :param json: Path to the json file with all the relevant informations concerning the resummino calculation + :param resummino_bin: Path to resummino executable + :param input_file_original: Path to the template input file of resummino + :param ncpu: Number of cpu used in parallel for the calculation + :param verbosity: Type of informations given in the logger file + """ + self.pwd = installation.installDirectory() + self.tooldir = os.path.join(self.pwd,"smodels","lib","resummino") + self.getVersion() + self.resummino_bin = os.path.join(self.tooldir,f"resummino-{self.version}","bin","resummino") + self.input_file_original = os.path.join(self.pwd,"smodels","etc","input_resummino.in") + if json == None: + self.json_resummino = os.path.join(self.pwd,"smodels","etc","resummino.py") + else: + if os.path.isabs(json): + self.json_resummino = json + else: + self.json_resummino = os.path.join(self.pwd, json) + if particles != None : + self.particles = list(particles) + else: + self.particles = None + self.slha_folder_name = slha_folder_name + self.maxOrder = maxOrder + self.countNoXSecs = 0 + self.countNoNLOXSecs = 0 + self.maycompile = maycompile + self.ncpu = ncpu + self.sqrts = sqrt + self.type_writting = type_writting + self.verbosity = verbosity + self.sqrt = sqrt + + + logger.debug('installation directory is '+self.pwd) + + if xsec_limit == None: + try: + with open(self.json_resummino, "r") as f: + data = eval(f.read()) + + self.xsec_limit = data["xsec_limit"] + except KeyError: + self.xsec_limit = 0.00001 + else: + self.xsec_limit = xsec_limit + + def checkInstallation(self, compile : bool = True ) -> bool: + """ check if resummino is already compiled. + :param compile: if true, then attempt a compilation if not installed + :returns: true if we have a compiled executable + """ + if os.path.exists ( self.resummino_bin ): + return True + if not compile: + return False + logger.info ( "Resummino not compiled; trying to build now (this might take a while)" ) + cmd = f"cd {self.tooldir}; make" + o = subprocess.getoutput ( cmd ) + ret = os.path.exists ( self.resummino_bin ) + if not ret: + logger.error ( f"attempt at compiling resummino failed:\n{o}" ) + return ret + + def getVersion( self ): + """ retrieve the version from version_path, + set self.version + if it doesnt exist, set to default of 3.1.2 """ + version_path = os.path.join(self.tooldir, 'versions.txt') + self.version = "?.?.?" + if os.path.exists(version_path): + with open(version_path, "r") as f: + lines = f.readlines() + for line in lines: + if line.startswith("#"): + continue + if "resummino_version" in line: + self.version = line.split(sep ="=")[1].strip() + + def calculate_one_slha(self, particles,input_file, slha_file, output_file, + num_try, order, log): + """ + log file management and launch resummino command. Prepare also the + cross section list to write the cross section onto the slha file. + """ + with open(log, 'a') as f: + f.write(f'{particles} cross-sections written in {slha_file}\n') + if particles == None: + with open(slha_file, 'a') as f: + f.write(' #no_cross-section\n') + + #Use to check the #no_cross_section stuff + self.are_crosssection(slha_file, order) + if particles == None: + return + Xsections = crossSection.XSectionList() + + for particle_pair in particles: + self.launch_resummino(input_file, slha_file, output_file, particle_pair[0], particle_pair[1], num_try, order, Xsections, log) + + resummino_version = f"Resumminov{self.version}" + nxsecs = self.addXSecToFile(Xsections, slha_file, comment = f"[pb], {resummino_version}") + + + def launch_command(self,resummino_bin,input_file, output_file, order): + """ + use resummino at the order asked by the user (order variable). + """ + if order == 0: + command = f"{resummino_bin} {input_file} --lo" + if order == 1: + command = f"{resummino_bin} {input_file} --nlo" + if order == 2: + command = f"{resummino_bin} {input_file}" + + with open(output_file, 'w') as f: + if self.verbosity == "debug": + subprocess.run(command, shell=True, stdout=f,stderr=os.sys.stderr, text=True) + else: + with open("/dev/null", "w") as errorhandle: + subprocess.run(command, shell=True, stdout=f,stderr=errorhandle, text=True) + + + def launch_resummino(self, input_file, slha_file, output_file, particle_1, particle_2, num_try, order, Xsections, log): + """ + Check everything before launching resummino. + """ + + #modify_slha_file(input_file, input_file, slha_file) + self.modify_outgoing_particles(input_file, input_file, particle_1, particle_2) + + already_written_channel = self.find_channels(slha_file) + + if self.sqrt > 10: + _ = str(self.sqrt*10**(-1))+'0E+04' + else: + _ = str(self.sqrt)+'.00E+03' + + already_written_channel_set = [({x,y},z,w) for (x,y), z,w in already_written_channel] + + logger.debug(f"channel, order and cross section {str(particle_1)} {str(particle_2)} {str(order)} {str(_)}" ) + logger.debug('the already written channels are '+ str(already_written_channel)) + if (((particle_1, particle_2), _, order)) in already_written_channel: + return + + if self.mode == "check": + self.launch_command(self.resummino_bin, input_file, output_file, 0) + infos = self.search_in_output(output_file) + infos = infos[0].split(" ")[2][1:] + + logger.debug(str((particle_1,particle_2))+" cross section is "+str(infos)+ " pb at LO order") + logger.debug("Is "+str((particle_1,particle_2))+ " cross section above the limit ? "+str( float(infos)>self.xsec_limit)) + logger.debug("cross section is "+str(infos)+ " pb at LO order") + logger.debug("Is cross section above the limit ? "+str( float(infos)>self.xsec_limit)) + if (float(infos))>(self.xsec_limit): + logger.debug('num try is '+ str(num_try)+ ' for the moment') + self.launch_command(self.resummino_bin, input_file, output_file, order) + if num_try == 0: + hist = self.write_in_slha(output_file, slha_file, order, particle_1, particle_2, self.type_writting, Xsections, log) + else: + hist = self.write_in_slha(output_file, slha_file, 0, particle_1, particle_2, self.type_writting, Xsections, log) + return + + if self.mode == "all": + self.launch_command(self.resummino_bin, input_file, output_file, order) + if num_try == 0: + hist = self.write_in_slha(output_file, slha_file, order, particle_1, particle_2, self.type_writting, Xsections, log) + return + + #:hist: variable to check if there is a problem in the cross section (strange value or no value) + hist = 0 + + if num_try == 0 and self.mode != "check": + self.launch_command(self.resummino_bin, input_file, output_file, order) + + #here we write in the slha file. + hist = self.write_in_slha(output_file, slha_file, order, particle_1, particle_2, self.type_writting, Xsections, log) + + #we check if we have written too much cross section + self.are_crosssection(slha_file, order) + + #if there is an error, we inform the user and launch again resummino + if hist == 1: + self.warning("error in resummino, trying again") + num_try = 0 + self.modify_outgoing_particles(input_file, input_file, particle_1, particle_2) + self.launch_resummino(input_file, slha_file, output_file, particle_1, particle_2, num_try, order, Xsections, log) + + + def search_in_output(self, output_file): + """ + Search in the .out files of resummino (in tempfiles) to get the cross section asked by the users, + then extract the LO,NLO and NLL+NLO. + If you want to get the incertainties given by resummino, you have everything here in LO, NLO and NLL. + """ + Infos = [] + with open(output_file, 'r') as f: + data = f.readlines() + for i in range(len(data)): + if "Results:" in data[i]: + LO = data[i+1][:-1] #[:-1] to get rid of the \n + NLO = data[i+2][:-1] + NLL = data[i+3][:-1] + Infos.append((LO,NLO,NLL)) + if len(Infos) == 0: + raise RuntimeError("Please check your slha file and that you have resummino correctly installed with the install.sh script in the lib/resummino folder") + return Infos[0] + + def create_xsection(self, result, particle_1, particle_2, order, Xsections): + """ + Create cross section list filled with cross section objects, corresponding to all the channels calculated. + """ + if type(result) == list: + for i in range(order+1): + Xsection = crossSection.XSection() + + Xsection.value = float(result[i]) * pb + Xsection.info.order = i + Xsection.info.sqrts = float(self.sqrt) * TeV + Xsection.info.label = "WAOUW" + Xsection.pid = (particle_1, particle_2) + Xsections.add(Xsection) + return + + Xsection = crossSection.XSection() + + Xsection.value = float(result) * pb + Xsection.info.order = order + Xsection.info.sqrts = float(self.sqrt) * TeV + Xsection.info.label = "WAOUW" + Xsection.pid = (particle_1, particle_2) + Xsections.add(Xsection) + return + + def write_in_slha(self, output_file, slha_file, order, particle_1, particle_2, type_writing, Xsections, log): + """ + Organize here the way cross sections are written into the file (highest, + all) and then create cross_section object to let smodels take + care of the writing itself with the create_xsection method. + """ + results = self.search_in_output(output_file) + if type_writing == 'highest': + if order == 0: + result = results[0].split(" ")[2][1:] + elif order == 1: + result = results[1].split(" ")[2][1:] + elif order == 2: + result = results[2].split(" ")[2][1:] + logger.debug('the highest result is'+ str(result) +' pb') + elif type_writing == "all": + result = [results[0].split(" ")[2][1:], results[1].split(" ")[2][1:], results[2].split(" ")[2][1:]] + elif type_writing == None: + result = [results[0].split(" ")[2][1:], results[1].split(" ")[2][1:], results[2].split(" ")[2][1:]] + + _ = ['LO', 'NLO', 'NLO+NLL'] + for i in range(self.maxOrder+1): + logger.info(f"Cross section is {result[i]} pb for ({particle_1},{particle_2}) channel at {_[i]} in perturbation theory") + else : + result_list = [results[0].split(" ")[2][1:], results[1].split(" ")[2][1:], results[2].split(" ")[2][1:]] + result = 0 + for i in results: + _ = i.split(" ")[2][1:] + _ = float(_) + if _ != 0: + result = i.split(" ")[2][1:] + return result + + if order == 1: + if float(results[1].split(" ")[2][1:])>2*float(results[0].split(" ")[2][1:]) or float(results[0].split(" ")[2][1:])> 2* float(results[1].split(" ")[2][1:]): + with open(log, 'a') as f: + f.write(f"to much change between LO and NLO for {particle_1} and {particle_2} with {slha_file}") + return 1 + if order == 2: + if float(results[2].split(" ")[2][1:])>2*float(results[0].split(" ")[2][1:]) or float(results[0].split(" ")[2][1:])> 2* float(results[1].split(" ")[2][1:]): + with open(log, 'a') as f: + f.write(f"to much change between LO and NLL+NLO for {particle_1} and {particle_2} with {slha_file}") + return 1 + + if type_writing is not None: + self.create_xsection(result, particle_1, particle_2, order, Xsections) + + return 0 + + def extract_m1_m2_mu(self, file_path : os.PathLike ) -> dict: + """_summary_ + + function to extract the breaking term of the electrowikino part (SUSY) in + an slha file. + Args: + file_path (_string_): _path of the slha file_ + + Returns: dictionary of: + _int_: _M1 breaking term in SUSY models_ + _int_: _M2 braking term in SUSY models_ + _int_: _mu breaking term in SUSY models_ + """ + data = pyslha.read(file_path) + + m1 = data.blocks['EXTPAR'][1] + m2 = data.blocks['EXTPAR'][2] + mu = data.blocks['EXTPAR'][23] + + result = { + 'M_1(MX)': m1, + 'M_2(MX)': m2, + 'mu(MX)' : mu + } + + return m1,m2,mu + + def extract_N1_N2_C1(self, file_path): + """_summary_ + + function to extract the breaking term of the electrowikino part (SUSY) in + an slha file. + Args: + file_path (_string_): _path of the slha file_ + + Returns: + _float_: _N1 Mass of the neutralino 1 + _float_: _N2 Mass of the neutralino 2 + _float_: _C1 Mass of the chargino 1 + _float_: _C2 Mass of the chargnino 2 + """ + data = pyslha.read(file_path) + + N1 = data.blocks['MASS'][1000022] + N2 = data.blocks['MASS'][1000023] + C1 = data.blocks['MASS'][1000024] + C2 = data.blocks['MASS'][1000037] + return N1,N2,C1, C2 + + def are_crosssection(self, slha_file, order): + """ + check if the cross sections are already written, and remove the + cross section written twice. + """ + with open(slha_file, 'r') as f: + data = f.readlines() + test = True + if data[-1]== " #no_cross-section" or data[-1]==" #no_cross-section\n": + while test == True: + if data[-2]== " #no_cross-section" or data[-2]==" #no_cross-section\n" or data[-3]==" #no_cross-section\n": + data.pop() + logger.info(f'remove from {slha_file} a #no_cross-section"') + else: + test = False + with open(slha_file, 'w') as f: + for _ in data: + f.write(_) + return + channels = {} + to_delete = [] + + for i in range(len(data)): + line = data[i] + if line.startswith("XSECTION"): + _ = [x for x in line.split(" ") if x != ''] + channel = (int(_[5]), int(_[6]),_[1], data[i+1].split(" ")[4]) + + if channel in channels: + start = channels[channel] + end = start+1 + while not data[end].startswith('XSECTION'): + end+=1 + #end = start+order+3 + to_delete.extend(range(start, end)) + channels[channel] = i + lines = [line for i, line in enumerate(data) if i not in to_delete] + + with open(slha_file, 'w') as f: + f.writelines(lines) + + + def find_channels(self, slha_file): + + with open(slha_file, 'r') as f: + data = f.readlines() + + channels = [] + for i in range(len(data)): + line = data[i] + if line.startswith("XSECTION"): + _ = [x for x in line.split(" ") if x != ''] + sqrt = _[2] + channel = (int(_[5]), int(_[6])) + order = data[i+1].split(" ")[4] + order = int(order) + channels.append((channel, sqrt, order)) + return channels + + def create_routine_files(self, order, slha_folder_name): + """ + Prepare all the paths and everything before turning into parallel task. + resumino.py is called here to avoid multi-tasking on one file. Create also tempfile to stock all data needed + by resummino. + """ + + #You haven't seen anything. + output_file = "output_file.txt" + + #List of the differents files on which resummino will be launch + Liste_slha = [] + Liste_resummino_in = [] + Liste_output_file = [] + Liste_particles = [] + Liste = [] + self.resummino_in = tempfile.mkdtemp() + self.resummino_out = tempfile.mkdtemp() + self.resummino_log = tempfile.mkdtemp() + resummino_in = '/tmp/resummino/resummino_in' + resummino_out = '/tmp/resummino/resummino_out' + resummino_log = '/tmp/resummino/resummino_log' + #Check if the folder already exists + if not os.path.exists(self.resummino_in): + os.mkdir(self.resummino_in) + if not os.path.exists(self.resummino_out): + os.mkdir(self.resummino_out) + if not os.path.exists(self.resummino_log): + os.mkdir(self.resummino_log) + + #always use absolute path + slha_folder = os.path.join( os.getcwd(), slha_folder_name) + # slha_folder = slha_folder_name + #Check if the input is a file or a directory + if not os.path.isfile(slha_folder): + liste_slha = os.listdir(slha_folder_name) + else: + name = os.path.normpath(slha_folder_name).split(os.path.sep)[-1] + liste_slha = [slha_folder_name] + + + + + + ''' + a = number of files + b = number of files with already a cross section + c = number of files ignored + ''' + a,b,c = 0,0,0 + + #Loop on all the slha file, in a random order (os.listdir) + for slha in liste_slha: + + if not os.path.isfile(slha_folder): + slha_path = os.path.join(slha_folder,slha) + else: + slha_path = os.path.join(current, slha) + #Variable used to avoid strange result (huge difference between LO and NLO result) + num_try = 0 + with open(slha_path, 'r') as f: + data = f.readlines() + a+=1 + + if "SModelSv" in data[-1]: + b+=1 + # We increase this variable by 1, so if it is > 0 we do not redo the calculation + #num_try+=1 + elif data[-1].endswith(" #no_cross-section\n"): + c+=1 + # We increase this variable by 1, so if it is > 0 we do not redo the calculation + num_try+=1 + + #remove the .slha + if not os.path.isfile(slha_folder): + slha_file_name = slha[6:-5] + else: + slha_file_name = name[6:-5] + + + resummino_in_file = os.path.join(self.resummino_in,f"resummino_{slha_file_name}.in") + resummino_out_file = os.path.join(self.resummino_out,f"resummino_{slha_file_name}.txt") + resummino_log_file = os.path.join(self.resummino_log,f"resummino_log_{slha_file_name}.txt") + + #On prend le fichier de référence, et on créer une copie dans resummino_in avec le bon fichier slha + self.modify_slha_file(self.input_file_original, resummino_in_file,slha_path) + + #On ajoute les noms à la liste (in, out et slha) + Liste_resummino_in.append(resummino_in_file) + Liste_output_file.append(resummino_out_file) + Liste_slha.append(slha_path) + + # Here we list the channels to use, if scenario excluded then return None + #particles = self.discrimination_particles(slha_path) + if self.particles == None: + self.mode, particles = self.extract_json() + else: + self.mode, particles = self.determine_channels() + + Liste_particles.append(particles) + + # We could optimize by removing variables that do not change from one iteration to another + # But it is not very important (negligible in terms of compilation time + # compared to Resummino) + Liste.append((particles, resummino_in_file, slha_path, resummino_out_file, num_try, order, resummino_log_file)) + logger.info(f"{a-b-c} files created") + return Liste + + + def modify_slha_file(self, file_before, file_after, slha_file): + """ _summary_ + + Change all the informations in the .in files before launching calculations + Args: + file_before (input file for resummino): template + file_after (input file for resummino): input file ready for resummino + """ + with open(file_before, 'r') as f: + lines = f.readlines() + + try: + with open(self.json_resummino, "r") as fi: + data = eval(fi.read()) + + pdfs = data["pdfs"] + + pdf_lo = pdfs['pdf_lo'] + pdfset_lo = pdfs['pdfset_lo'] + pdf_nlo = pdfs['pdf_nlo'] + pdfset_nlo = pdfs['pdfset_nlo'] + + lhapdf_folder = os.path.join(self.pwd, 'smodels', 'lib', 'resummino', 'lhapdf', 'share', 'LHAPDF') + + if not os.path.exists(os.path.join(lhapdf_folder, pdf_lo)): + url = f"http://lhapdfsets.web.cern.ch/lhapdfsets/current/{pdf_lo}.tar.gz" + try: + response = requests.get(url, stream=True) + response.raise_for_status() + + with open(f"{pdf_lo}.tar.gz", 'wb') as file: + for chunk in response.iter_content(chunk_size=8192): + file.write(chunk) + + with tarfile.open(f"{pdf_lo}.tar.gz", "r:gz") as tar: + tar.extractall(path=lhapdf_folder) + + except requests.exceptions.HTTPError as e: + logger.error("pdf name is wrong for LO, see http://lhapdfsets.web.cern.ch/lhapdfsets/current to check") + raise RuntimeError(f"Failed during HTTP request for {pdf_lo}, see http://lhapdfsets.web.cern.ch/lhapdfsets/current to check. You may also check if resummino is installed.") + finally: + if os.path.exists(f"{pdf_lo}.tar.gz"): + os.remove(f"{pdf_lo}.tar.gz") + if not os.path.exists(os.path.join(lhapdf_folder, pdf_nlo)): + url = f"http://lhapdfsets.web.cern.ch/lhapdfsets/current/{pdf_nlo}.tar.gz" + try: + response = requests.get(url, stream=True) + response.raise_for_status() # Vérifie si la requête a réussi + + # Ouvre un fichier temporaire pour enregistrer le fichier téléchargé + with open(f"{pdf_nlo}.tar.gz", 'wb') as file: + for chunk in response.iter_content(chunk_size=8192): + file.write(chunk) + + # Décompression du fichier + with tarfile.open(f"{pdf_nlo}.tar.gz", "r:gz") as tar: + tar.extractall(path=lhapdf_folder) + + except requests.exceptions.HTTPError as e: + logger.error("pdf name is wrong for NLO, see http://lhapdfsets.web.cern.ch/lhapdfsets/current to check") + raise RuntimeError(f"Failed during HTTP request for {pdf_nlo}, see http://lhapdfsets.web.cern.ch/lhapdfsets/current to check.") + finally: + # Supprimer le fichier tar.gz temporaire si nécessaire + if os.path.exists(f"{pdf_nlo}.tar.gz"): + os.remove(f"{pdf_nlo}.tar.gz") + with open(file_after, 'w') as f: + for line in lines: + if line.startswith("pdf_lo"): + line = f"pdf_lo = {pdf_lo}\n" + if line.startswith("pdfset_lo"): + line = f"pdfset_lo = {pdfset_lo}\n" + if line.startswith("pdf_nlo"): + line = f"pdf_nlo = {pdf_nlo}\n" + if line.startswith("pdfset_nlo"): + line = f"pdfset_nlo = {pdfset_nlo}\n" + f.write(line) + + except KeyError: + logger.info("choosing PDF4LHC21_40 by default") + + with open(file_after, 'w') as f: + for line in lines: + if line.startswith("slha ="): + line = f"slha = {slha_file}\n" + if self.sqrt == '8.0': + if line.startswith("center_of_mass_energy ="): + line = f"center_of_mass_energy = 8000" + if self.sqrt == '13.6': + if line.startswith("center_of_mass_energy ="): + line = f"center_of_mass_energy = 13600" + f.write(line) + + + def extract_json(self): + """_summary_ + + function to extract all the informations in the resummino.py + file + + Returns: tuple of: + string: Mode of writting for the slha cross section + list: list of the daugther particle to consider in the calculation + of the cross section + """ + with open(self.json_resummino, "r") as f: + data = eval(f.read()) + + try: + mode = data["mode"] + except KeyError: + mode = "check" + + + channels = data["channels"] + particles = [] + for key, values in channels.items(): + particles.append((values[0], values[1])) + + return mode, particles + + def determine_channels(self): + """_summary_ + + function to find channels using a set of particles + + Returns: tuple of: + string: Mode of writting for the slha cross section + list: list of the daugther particle to consider in the calculation + of the cross section + """ + if 1000024 in self.particles: + self.particles.append(-1000024) + if 1000037 in self.particles: + self.particles.append(-1000037) + channels = list(combinations(self.particles, 2)) + + mode = 'check' + + return mode, channels + + def modify_outgoing_particles( self, input_file, output_file, new_particle1, + new_particle2 ): + """ + modify the output particles (mother particles) in the resummino .in + file. First call the template (input_file), + then write into the resummino .in file (output_file). + Can also write directly onto the output_file. + """ + with open(input_file, 'r') as f: + lines = f.readlines() + + with open(output_file, 'w') as f: + for line in lines: + if line.startswith("particle1 ="): + line = f"particle1 = {new_particle1}\n" + elif line.startswith("particle2 ="): + line = f"particle2 = {new_particle2}\n" + f.write(line) + + + def launch_all(self): + """ + Launch all the calculations of the slha files in parallel (limited by + ncpu), with first the creation of every path needed for the calculations. + """ + # We create the list + tasks = self.create_routine_files(self.maxOrder, self.slha_folder_name) + + if not os.path.exists(os.path.join(self.pwd, 'smodels', 'lib', 'resummino', 'lhapdf', 'bin', 'lhapdf')): + logger.error("Warning, lhapdf is not installed, please make resummino, or use ./install.sh in the smodels/lib/resummino folder.") + return + + if not os.path.exists(os.path.join(self.pwd, 'smodels', 'lib', 'resummino', 'resummino_install', 'bin', 'resummino')): + logger.error("Warning, resummino is not installed, please make resummino, or use ./install.Sh in the smodels/lib/resummino folder.") + return + try: + if len(tasks)==0: + logger.error("Warning, check your inputfile (.slha), no calculation are available.") + return + except TypeError: + logger.error("Warning, check your inputfile (.slha), no calculation are available.") + return + + if self.ncpu == 1: + for task in tasks: + self.calculate_one_slha(*task) + else: + # We launch the program with maximum performance, change if necessary + with ProcessPoolExecutor(max_workers=self.ncpu) as executor: + futures = [executor.submit(self.calculate_one_slha, *task) for task in tasks] + for future in futures: + future.result() + + shutil.rmtree(self.resummino_in) + shutil.rmtree(self.resummino_out) + shutil.rmtree(self.resummino_log) + +def main ( args : argparse.Namespace ): + """ the central entry point """ + canonizer = ArgsStandardizer() + setLogLevel ( args.verbosity ) + if not hasattr ( args, "noautocompile" ): + args.noautocompile = False + + sqrtses = canonizer.getSqrtses ( args ) + order = canonizer.getOrder ( args ) + canonizer.checkAllowedSqrtses ( order, sqrtses ) + inputFiles = args.filename.strip() + ncpus = canonizer.checkNCPUs ( args.ncpus, inputFiles ) + type_writting = canonizer.writeToFile(args) + json = canonizer.getjson(args) + particles = canonizer.getParticles( args ) + xsec_limit = canonizer.checkXsec_limit(args) + #If we get no type_writing then we only print the result + if type_writting == None : + type_writting = None + + verbosity = args.verbosity + + ssmultipliers = None + if hasattr ( args, "ssmultipliers" ): + ssmultipliers = canonizer.getSSMultipliers ( args.ssmultipliers ) + if ssmultipliers != None: + for pids,multiplier in ssmultipliers.items(): + if type(pids) != tuple: + logger.error ( "keys of ssmultipliers need to be supplied as tuples" ) + sys.exit() + if type(multiplier) not in [ int, float ]: + logger.error ( "values of ssmultipliers need to be supplied as ints or floats" ) + sys.exit() + + logger.debug('verbosity is '+ verbosity) + ssqrtses = ",".join(map(str,sqrtses)) + + logger.info(f"the calculation will be performed using {ssqrtses} TeV as center of mass energy") + # logger.debug("The calculation will be performed using : " +str(sqrtses)+ ' TeV as center of mass energy') + orders_dic = {0:'LO', 1:'NLO', 2:'NLL+NLO'} + + logger.info("the highest perturbation order considered is " + orders_dic[order]) + scpu = "cpus" + if ncpus == 1: + scpu = "cpu" + logger.info(f"we are currently running on {ncpus} {scpu}" ) + logger.debug(f"In this calculation, we'll write out the cross section at "+ str(type_writting) +" order of perturbation theory below "+ orders_dic[order]) + + for sqrt in sqrtses: + logger.debug('Current energy considered is '+ str(sqrt)+ ' TeV') + test = XSecResummino(maxOrder=order, slha_folder_name=inputFiles, sqrt = sqrt, ncpu=ncpus, type_writting = type_writting, verbosity = verbosity, json = json, particles=particles, xsec_limit = xsec_limit) + test.checkInstallation ( compile = not args.noautocompile ) + test.launch_all() + return + + # test = XSecResummino(maxOrder=order, slha_folder_name=inputFiles, sqrt = sqrtses, ncpu=ncpus, type = type_writting) + # test.routine_resummino() + + # test = XSecResummino(maxOrder=order, slha_folder_name=inputFiles, sqrt = sqrtses, ncpu=ncpus, type = type_writting) + # test.routine_resummino() diff --git a/smodels/version b/smodels/version index f90b1afc08..0bee604df7 100644 --- a/smodels/version +++ b/smodels/version @@ -1 +1 @@ -2.3.2 +2.3.3 diff --git a/test/database/version b/test/database/version index 44f2dda97c..7e918b3d7b 100644 --- a/test/database/version +++ b/test/database/version @@ -1 +1 @@ -unittest231 +unittest233 diff --git a/test/database_extra/version b/test/database_extra/version index 3219f11371..6a077a5e41 100644 --- a/test/database_extra/version +++ b/test/database_extra/version @@ -1 +1 @@ -unittestextra231 +unittestextra233 diff --git a/test/database_simple/version b/test/database_simple/version index 47bfa6e84f..25bc3ccceb 100644 --- a/test/database_simple/version +++ b/test/database_simple/version @@ -1 +1 @@ -unittest231-beta +unittest233-beta diff --git a/test/testFiles/resummino/resummino.py b/test/testFiles/resummino/resummino.py new file mode 100644 index 0000000000..e214da0432 --- /dev/null +++ b/test/testFiles/resummino/resummino.py @@ -0,0 +1,21 @@ +{ + # channels: values (as python lists) are the pdg codes of the particles X,Y + # to be produced in the 2->2 processes p,p -> X,Y + # names of keys are used for debugging only + "channels" : {"1" : [1000024,-1000024] + }, + # ------------------------------------- + # The limit for the NLO calculation is determined by the 'xsec_limit' variable. + # If the LO cross secion is below this value (in pb), no NLO (or NLO+NLL) cross-sections are + # calculated. + "xsec_limit" : 0.00001, + # pdfs (case-sensitive): our default is cteq66, which gives results close + # to the PDFLHC2021_40 set. Any pdf can be used, but be sure to check + # the name on the lhapdf website before putting any value here. + #"pdfs" : { + # "pdf_lo" : "cteq61", + # "pdfset_lo" : 0, + # "pdf_nlo" : "cteq61", + # "pdfset_nlo" : 0 + #} +} diff --git a/test/testFiles/resummino/resummino_example.slha b/test/testFiles/resummino/resummino_example.slha new file mode 100644 index 0000000000..20f9fbfb1b --- /dev/null +++ b/test/testFiles/resummino/resummino_example.slha @@ -0,0 +1,489 @@ +# WARNING: Can only set number of loops for higgs masses and REWSB to be 1, 2 or 3 in BLOCK SOFTSUSY parameter 7, not 0. Ignoring. +# SOFTSUSY4.1.13 SLHA compliant output +# B.C. Allanach, Comput. Phys. Commun. 143 (2002) 305-331, hep-ph/0104145 +Block SPINFO # Program information + 1 SOFTSUSY # spectrum calculator + 2 4.1.13 # version number +Block MODSEL # Select model + 1 0 # nonUniversal +Block SMINPUTS # Standard Model inputs + 1 1.27934000e+02 # alpha_em^(-1)(MZ) SM MSbar + 2 1.16637000e-05 # G_Fermi + 3 1.17200000e-01 # alpha_s(MZ)MSbar + 4 9.11876000e+01 # MZ(pole) + 5 4.25000000e+00 # mb(mb) + 6 1.72500000e+02 # Mtop(pole) + 7 1.77700000e+00 # Mtau(pole) +Block MINPAR # SUSY breaking input parameters + 3 1.00000000e+01 # tanb, DRbar, Feynman gauge +Block EXTPAR # non-universal SUSY breaking parameters + 0 -1.00000000e+00 # Set MX=MSUSY + 1 2.00000000e+03 # M_1(MX) + 2 3.50000000e+02 # M_2(MX) + 3 3.00000000e+03 # M_3(MX) + 11 -3.80000000e+03 # At(MX) + 12 -3.80000000e+03 # Ab(MX) + 13 0.00000000e+00 # Atau(MX) + 23 8.50000000e+02 # mu(MX) + 26 2.00000000e+03 # mA(pole) + 25 1.00000000e+01 # tan beta(MX) + 31 5.00000000e+03 # meL(MX) + 32 5.00000000e+03 # mmuL(MX) + 33 5.00000000e+03 # mtauL(MX) + 34 5.00000000e+03 # meR(MX) + 35 5.00000000e+03 # mmuR(MX) + 36 5.00000000e+03 # mtauR(MX) + 41 5.00000000e+03 # mqL1(MX) + 42 5.00000000e+03 # mqL2(MX) + 43 2.00000000e+03 # mqL3(MX) + 44 5.00000000e+03 # muR(MX) + 45 5.00000000e+03 # mcR(MX) + 46 3.00000000e+03 # mtR(MX) + 47 5.00000000e+03 # mdR(MX) + 48 5.00000000e+03 # msR(MX) + 49 5.00000000e+03 # mbR(MX) +# SOFTSUSY-specific non SLHA information: +# mixing=0 Desired accuracy=1.00000000e-04 Achieved accuracy=5.83824444e-06 +# 3-loop RGE corrections are on. 2-loop Yukawa/g3 thresholds are off +# 2-loop SUSY QCD computation of squark/gluino pole masses are off +# Matching scale=1.72500000e+02 +Block MASS # Mass spectrum +# PDG code mass particle + 24 8.03532070e+01 # MW + 25 1.25077949e+02 # h0 + 35 2.00006354e+03 # H0 + 36 2.00000002e+03 # A0 + 37 2.00180830e+03 # H+ + 1000021 3.24461732e+03 # ~g + 1000022 3.71722937e+02 # ~neutralino(1) + 1000023 -8.63766357e+02 # ~neutralino(2) + 1000024 3.71914006e+02 # ~chargino(1) + 1000025 8.67912838e+02 # ~neutralino(3) + 1000035 2.00925910e+03 # ~neutralino(4) + 1000037 8.71507915e+02 # ~chargino(2) + 1000001 5.05186046e+03 # ~d_L + 1000002 5.05137659e+03 # ~u_L + 1000003 5.05186046e+03 # ~s_L + 1000004 5.05137659e+03 # ~c_L + 1000005 2.07585981e+03 # ~b_1 + 1000006 2.06370359e+03 # ~t_1 + 1000011 5.00920314e+03 # ~e_L + 1000012 5.00824805e+03 # ~nue_L + 1000013 5.00920314e+03 # ~mu_L + 1000014 5.00824805e+03 # ~numu_L + 1000015 5.00510952e+03 # ~stau_1 + 1000016 5.00818020e+03 # ~nu_tau_L + 2000001 5.04358896e+03 # ~d_R + 2000002 5.04290959e+03 # ~u_R + 2000003 5.04358896e+03 # ~s_R + 2000004 5.04290959e+03 # ~c_R + 2000005 5.04243628e+03 # ~b_2 + 2000006 3.01963151e+03 # ~t_2 + 2000011 5.00581696e+03 # ~e_R + 2000013 5.00581696e+03 # ~mu_R + 2000015 5.00972947e+03 # ~stau_2 +Block alpha # Effective Higgs mixing parameter + -1.00263825e-01 # alpha - evaluated at p^2=0 +Block nmix # neutralino mixing matrix Q=2.44830526e+03 + 1 1 -1.73662718e-03 # N_{1,1} + 1 2 9.92216927e-01 # N_{1,2} + 1 3 -1.11982086e-01 # N_{1,3} + 1 4 5.44294586e-02 # N_{1,4} + 2 1 -9.78271903e-03 # N_{2,1} + 2 2 4.07686551e-02 # N_{2,2} + 2 3 7.05332270e-01 # N_{2,3} + 2 4 7.07635927e-01 # N_{2,4} + 3 1 2.96301287e-02 # N_{3,1} + 3 2 1.17650430e-01 # N_{3,2} + 3 3 6.99835232e-01 # N_{3,3} + 3 4 -7.03925479e-01 # N_{3,4} + 4 1 9.99511550e-01 # N_{4,1} + 4 2 -1.36472481e-03 # N_{4,2} + 4 3 -1.40374683e-02 # N_{4,3} + 4 4 2.78881517e-02 # N_{4,4} +Block Umix # chargino U mixing matrix at Q=2.44830526e+03 + 1 1 9.87515362e-01 # U_{1,1} + 1 2 -1.57522729e-01 # U_{1,2} + 2 1 1.57522729e-01 # U_{2,1} + 2 2 9.87515362e-01 # U_{2,2} +Block Vmix # chargino V mixing matrix at Q=2.44830526e+03 + 1 1 9.97055324e-01 # V_{1,1} + 1 2 -7.66855997e-02 # V_{1,2} + 2 1 7.66855997e-02 # V_{2,1} + 2 2 9.97055324e-01 # V_{2,2} +Block stopmix # stop mixing matrix at Q=2.44830526e+03 + 1 1 9.94233187e-01 # F_{11} + 1 2 1.07239777e-01 # F_{12} + 2 1 -1.07239777e-01 # F_{21} + 2 2 9.94233187e-01 # F_{22} +Block sbotmix # sbottom mixing matrix at Q=2.44830526e+03 + 1 1 9.99999067e-01 # F_{11} + 1 2 1.36597755e-03 # F_{12} + 2 1 -1.36597755e-03 # F_{21} + 2 2 9.99999067e-01 # F_{22} +Block staumix # stau mixing matrix at Q=2.44830526e+03 + 1 1 7.06143542e-01 # F_{11} + 1 2 7.08068710e-01 # F_{12} + 2 1 7.08068710e-01 # F_{21} + 2 2 -7.06143542e-01 # F_{22} +Block gauge Q= 2.44830526e+03 # SM gauge couplings + 1 3.63675025e-01 # g'(Q)MSSM DRbar + 2 6.38839571e-01 # g(Q)MSSM DRbar + 3 1.00495011e+00 # g3(Q)MSSM DRbar +Block yu Q= 2.44830526e+03 + 3 3 8.24904204e-01 # Yt(Q)MSSM DRbar +Block yd Q= 2.44830526e+03 + 3 3 1.36918620e-01 # Yb(Q)MSSM DRbar +Block ye Q= 2.44830526e+03 + 3 3 1.03251197e-01 # Ytau(Q)MSSM DRbar +Block hmix Q= 2.44830526e+03 # Higgs mixing parameters + 1 8.50000000e+02 # mu(Q)MSSM DRbar + 2 1.00000000e+01 # tan beta(Q)MSSM DRbar Feynman gauge + 3 2.42057244e+02 # higgs vev(Q)MSSM DRbar Feynman gauge + 4 4.09897382e+06 # mA^2(Q)MSSM DRbar +Block msoft Q= 2.44830526e+03 # MSSM DRbar SUSY breaking parameters + 1 2.00000000e+03 # M_1(Q) + 2 3.50000000e+02 # M_2(Q) + 3 3.00000000e+03 # M_3(Q) + 21 3.21945923e+06 # mH1^2(Q) + 22 -5.93961802e+05 # mH2^2(Q) + 31 5.00000000e+03 # meL(Q) + 32 5.00000000e+03 # mmuL(Q) + 33 5.00000000e+03 # mtauL(Q) + 34 5.00000000e+03 # meR(Q) + 35 5.00000000e+03 # mmuR(Q) + 36 5.00000000e+03 # mtauR(Q) + 41 5.00000000e+03 # mqL1(Q) + 42 5.00000000e+03 # mqL2(Q) + 43 1.99999998e+03 # mqL3(Q) + 44 5.00000000e+03 # muR(Q) + 45 5.00000000e+03 # mcR(Q) + 46 2.99999998e+03 # mtR(Q) + 47 5.00000000e+03 # mdR(Q) + 48 5.00000000e+03 # msR(Q) + 49 5.00000000e+03 # mbR(Q) +Block au Q= 2.44830526e+03 + 1 1 5.81774051e-06 # Au(Q)MSSM DRbar + 2 2 5.81784233e-06 # Ac(Q)MSSM DRbar + 3 3 -3.79999999e+03 # At(Q)MSSM DRbar +Block ad Q= 2.44830526e+03 + 1 1 1.63597770e-06 # Ad(Q)MSSM DRbar + 2 2 1.63606185e-06 # As(Q)MSSM DRbar + 3 3 -3.80000000e+03 # Ab(Q)MSSM DRbar +Block ae Q= 2.44830526e+03 + 1 1 0.00000000e+00 # Ae(Q)MSSM DRbar + 2 2 1.79727943e-07 # Amu(Q)MSSM DRbar + 3 3 1.80415102e-07 # Atau(Q)MSSM DRbar +# PDG Width +DECAY 1000021 4.57521547e+01 # Gluino decays +# BR NDA PDG1 PDG2 Comments + 2.48558445e-01 2 5 -1000005 # ~g -> b ~b_1* + 2.48558445e-01 2 -5 1000005 # ~g -> bb ~b_1 + 2.41436164e-01 2 6 -1000006 # ~g -> t ~t_1* + 2.41436164e-01 2 -6 1000006 # ~g -> tb ~t_1 + 9.75483782e-03 2 6 -2000006 # ~g -> t ~t_2* + 9.75483782e-03 2 -6 2000006 # ~g -> tb ~t_2 +# BR NDA PDG1 PDG2 PDG3 Comments +# +# PDG Width +DECAY 1000001 1.54736202e+02 # SdownL decays +# BR NDA PDG1 PDG2 Comments + 6.03757564e-01 2 1 1000021 # ~d_L -> d ~g + 2.55156609e-01 2 2 -1000024 # ~d_L -> u ~chi_1- + 6.71441694e-03 2 2 -1000037 # ~d_L -> u ~chi_2- + 1.28959128e-01 2 1 1000022 # ~d_L -> d ~chi_10 + 2.27579746e-04 2 1 1000023 # ~d_L -> d ~chi_20 + 1.75582626e-03 2 1 1000025 # ~d_L -> d ~chi_30 + 3.42887579e-03 2 1 1000035 # ~d_L -> d ~chi_40 +# +# PDG Width +DECAY 2000001 9.49286865e+01 # SdownR decays +# BR NDA PDG1 PDG2 Comments + 9.78005033e-01 2 1 1000021 # ~d_R -> d ~g + 2.19663180e-02 2 1 1000035 # ~d_R -> d ~chi_40 +# +# PDG Width +DECAY 1000002 1.54739007e+02 # SupL decays +# BR NDA PDG1 PDG2 Comments + 6.03526386e-01 2 2 1000021 # ~u_L -> u ~g + 2.60371081e-01 2 1 1000024 # ~u_L -> d ~chi_1+ + 1.72309150e-03 2 1 1000037 # ~u_L -> d ~chi_2+ + 1.28756241e-01 2 2 1000022 # ~u_L -> u ~chi_10 + 1.89943990e-04 2 2 1000023 # ~u_L -> u ~chi_20 + 2.10496898e-03 2 2 1000025 # ~u_L -> u ~chi_30 + 3.32828755e-03 2 2 1000035 # ~u_L -> u ~chi_40 +# +# PDG Width +DECAY 2000002 1.01142750e+02 # SupR decays +# BR NDA PDG1 PDG2 Comments + 9.17444993e-01 2 2 1000021 # ~u_R -> u ~g + 8.24474668e-02 2 2 1000035 # ~u_R -> u ~chi_40 +# +# PDG Width +DECAY 1000003 1.54736201e+02 # SstrangeL decays +# BR NDA PDG1 PDG2 Comments + 6.03757569e-01 2 3 1000021 # ~s_L -> s ~g + 2.55156604e-01 2 4 -1000024 # ~s_L -> c ~chi_1- + 6.71441679e-03 2 4 -1000037 # ~s_L -> c ~chi_2- + 1.28959129e-01 2 3 1000022 # ~s_L -> s ~chi_10 + 2.27579748e-04 2 3 1000023 # ~s_L -> s ~chi_20 + 1.75582627e-03 2 3 1000025 # ~s_L -> s ~chi_30 + 3.42887582e-03 2 3 1000035 # ~s_L -> s ~chi_40 +# +# PDG Width +DECAY 2000003 9.49286864e+01 # SstrangeR decays +# BR NDA PDG1 PDG2 Comments + 9.78005033e-01 2 3 1000021 # ~s_R -> s ~g + 2.19663180e-02 2 3 1000035 # ~s_R -> s ~chi_40 +# +# PDG Width +DECAY 1000004 1.54738999e+02 # ScharmL decays +# BR NDA PDG1 PDG2 Comments + 6.03526369e-01 2 4 1000021 # ~c_L -> c ~g + 2.60371095e-01 2 3 1000024 # ~c_L -> s ~chi_1+ + 1.72309159e-03 2 3 1000037 # ~c_L -> s ~chi_2+ + 1.28756244e-01 2 4 1000022 # ~c_L -> c ~chi_10 + 1.89943995e-04 2 4 1000023 # ~c_L -> c ~chi_20 + 2.10496903e-03 2 4 1000025 # ~c_L -> c ~chi_30 + 3.32828760e-03 2 4 1000035 # ~c_L -> c ~chi_40 +# +# PDG Width +DECAY 2000004 1.01142741e+02 # ScharmR decays +# BR NDA PDG1 PDG2 Comments + 9.17444990e-01 2 4 1000021 # ~c_R -> c ~g + 8.24474701e-02 2 4 1000035 # ~c_R -> c ~chi_40 +# +# PDG Width +DECAY 1000005 4.67144012e+01 # Sbottom1 decays +# BR NDA PDG1 PDG2 Comments + 3.26311846e-01 2 6 -1000024 # ~b_1 -> t ~chi_1- + 4.91112655e-01 2 6 -1000037 # ~b_1 -> t ~chi_2- + 1.66515313e-01 2 5 1000022 # ~b_1 -> b ~chi_10 + 7.33220540e-03 2 5 1000023 # ~b_1 -> b ~chi_20 + 8.70170545e-03 2 5 1000025 # ~b_1 -> b ~chi_30 +# +# PDG Width +DECAY 2000005 1.00998854e+02 # Sbottom2 decays +# BR NDA PDG1 PDG2 Comments + 9.18411490e-01 2 5 1000021 # ~b_2 -> b ~g + 5.82525387e-04 2 6 -1000024 # ~b_2 -> t ~chi_1- + 2.15270204e-02 2 6 -1000037 # ~b_2 -> t ~chi_2- + 8.48284632e-04 2 -24 1000006 # ~b_2 -> W- ~t_1 + 7.32430597e-03 2 -37 1000006 # ~b_2 -> H- ~t_1 + 5.76194480e-04 2 25 1000005 # ~b_2 -> h ~b_1 + 3.71838486e-03 2 35 1000005 # ~b_2 -> H ~b_1 + 3.71955685e-03 2 36 1000005 # ~b_2 -> A ~b_1 + 4.37839493e-04 2 1000005 23 # ~b_2 -> Z ~b_1 + 2.95518573e-04 2 5 1000022 # ~b_2 -> b ~chi_10 + 1.10279023e-02 2 5 1000023 # ~b_2 -> b ~chi_20 + 1.08591662e-02 2 5 1000025 # ~b_2 -> b ~chi_30 + 2.06426646e-02 2 5 1000035 # ~b_2 -> b ~chi_40 +# +# PDG Width +DECAY 1000006 4.66061316e+01 # Stop1 decays +# BR NDA PDG1 PDG2 Comments + 3.39313739e-01 2 5 1000024 # ~t_1 -> b ~chi_1+ + 1.46564830e-02 2 5 1000037 # ~t_1 -> b ~chi_2+ + 1.66178687e-01 2 6 1000022 # ~t_1 -> t ~chi_10 + 2.46737847e-01 2 6 1000023 # ~t_1 -> t ~chi_20 + 2.33113244e-01 2 6 1000025 # ~t_1 -> t ~chi_30 +# +# PDG Width +DECAY 2000006 1.48130238e+02 # Stop2 decays +# BR NDA PDG1 PDG2 Comments + 2.75993674e-01 2 5 1000037 # ~t_2 -> b ~chi_2+ + 1.94957681e-01 2 24 1000005 # ~t_2 -> W+ ~b_1 + 1.36305754e-01 2 25 1000006 # ~t_2 -> h ~t_1 + 1.01914037e-01 2 1000006 23 # ~t_2 -> Z ~t_1 + 1.36385838e-01 2 6 1000023 # ~t_2 -> t ~chi_20 + 1.39648352e-01 2 6 1000025 # ~t_2 -> t ~chi_30 + 1.47563340e-02 2 6 1000035 # ~t_2 -> t ~chi_40 +# +# PDG Width +DECAY 1000011 6.49129402e+01 # SelectronL decays +# BR NDA PDG1 PDG2 Comments + 6.02981255e-01 2 12 -1000024 # ~e_L -> nu_e ~chi_1- + 1.58536929e-02 2 12 -1000037 # ~e_L -> nu_e ~chi_2- + 3.03865826e-01 2 11 1000022 # ~e_L -> e- ~chi_10 + 3.67756837e-04 2 11 1000023 # ~e_L -> e- ~chi_20 + 5.87061906e-03 2 11 1000025 # ~e_L -> e- ~chi_30 + 7.10608508e-02 2 11 1000035 # ~e_L -> e- ~chi_40 +# +# PDG Width +DECAY 2000011 1.85445223e+01 # SelectronR decays +# BR NDA PDG1 PDG2 Comments + 1.26170472e-04 2 11 1000023 # ~e_R -> e- ~chi_20 + 1.17753227e-03 2 11 1000025 # ~e_R -> e- ~chi_30 + 9.98691199e-01 2 11 1000035 # ~e_R -> e- ~chi_40 +# +# PDG Width +DECAY 1000013 6.49129402e+01 # SmuonL decays +# BR NDA PDG1 PDG2 Comments + 6.02981255e-01 2 14 -1000024 # ~mu_L -> nu_mu ~chi_1- + 1.58536929e-02 2 14 -1000037 # ~mu_L -> nu_mu ~chi_2- + 3.03865825e-01 2 13 1000022 # ~mu_L -> mu- ~chi_10 + 3.67756836e-04 2 13 1000023 # ~mu_L -> mu- ~chi_20 + 5.87061906e-03 2 13 1000025 # ~mu_L -> mu- ~chi_30 + 7.10608508e-02 2 13 1000035 # ~mu_L -> mu- ~chi_40 +# +# PDG Width +DECAY 2000013 1.85445223e+01 # SmuonR decays +# BR NDA PDG1 PDG2 Comments + 1.26170472e-04 2 13 1000023 # ~mu_R -> mu- ~chi_20 + 1.17753227e-03 2 13 1000025 # ~mu_R -> mu- ~chi_30 + 9.98691199e-01 2 13 1000035 # ~mu_R -> mu- ~chi_40 +# +# PDG Width +DECAY 1000012 6.49280182e+01 # Selectron sneutrino decays +# BR NDA PDG1 PDG2 Comments + 6.15112564e-01 2 11 1000024 # ~nu_eL -> e- ~chi_1+ + 4.06715787e-03 2 11 1000037 # ~nu_eL -> e- ~chi_2+ + 3.05067964e-01 2 12 1000022 # ~nu_eL -> nu_e ~chi_10 + 6.33900257e-04 2 12 1000023 # ~nu_eL -> nu_e ~chi_20 + 3.39408187e-03 2 12 1000025 # ~nu_eL -> nu_e ~chi_30 + 7.17243322e-02 2 12 1000035 # ~nu_eL -> nu_e ~chi_40 +# +# PDG Width +DECAY 1000014 6.49280181e+01 # Smuon sneutrino decays +# BR NDA PDG1 PDG2 Comments + 6.15112564e-01 2 13 1000024 # ~nu_muL -> mu- ~chi_1+ + 4.06715787e-03 2 13 1000037 # ~nu_muL -> mu- ~chi_2+ + 3.05067964e-01 2 14 1000022 # ~nu_muL -> nu_mu ~chi_10 + 6.33900257e-04 2 14 1000023 # ~nu_muL -> nu_mu ~chi_20 + 3.39408188e-03 2 14 1000025 # ~nu_muL -> nu_mu ~chi_30 + 7.17243322e-02 2 14 1000035 # ~nu_muL -> nu_mu ~chi_40 +# +# PDG Width +DECAY 1000015 4.32128881e+01 # Stau1 decays +# BR NDA PDG1 PDG2 Comments + 4.76393849e-01 2 16 -1000024 # ~tau_1- -> nu_tau ~chi_1- + 2.16559458e-01 2 15 1000022 # ~tau_1- -> tau- ~chi_10 + 1.53908705e-02 2 15 1000023 # ~tau_1- -> tau- ~chi_20 + 2.38687438e-02 2 15 1000025 # ~tau_1- -> tau- ~chi_30 + 2.67786446e-01 2 15 1000035 # ~tau_1- -> tau- ~chi_40 +# +# PDG Width +DECAY 2000015 4.33163823e+01 # Stau2 decays +# BR NDA PDG1 PDG2 Comments + 4.28684179e-01 2 16 -1000024 # ~tau_2- -> nu_tau ~chi_1- + 4.70480885e-02 2 16 -1000037 # ~tau_2- -> nu_tau ~chi_2- + 2.39845883e-01 2 15 1000022 # ~tau_2- -> tau- ~chi_10 + 8.69154520e-03 2 15 1000023 # ~tau_2- -> tau- ~chi_20 + 8.50270995e-03 2 15 1000025 # ~tau_2- -> tau- ~chi_30 + 2.67227594e-01 2 15 1000035 # ~tau_2- -> tau- ~chi_40 +# +# PDG Width +DECAY 1000016 6.59484429e+01 # Stau sneutrino decays +# BR NDA PDG1 PDG2 Comments + 3.00343467e-01 2 16 1000022 # ~nu_tauL -> nu_tau ~chi_10 + 6.24082383e-04 2 16 1000023 # ~nu_tauL -> nu_tau ~chi_20 + 3.34151414e-03 2 16 1000025 # ~nu_tauL -> nu_tau ~chi_30 + 7.06128460e-02 2 16 1000035 # ~nu_tauL -> nu_tau ~chi_40 + 6.06025763e-01 2 15 1000024 # ~nu_tauL -> tau- ~chi_1+ + 1.90523269e-02 2 15 1000037 # ~nu_tauL -> tau- ~chi_2+ +# +# PDG Width +DECAY 1000024 7.05738238e-15 # Chargino 1+ (lightest) decays +# BR NDA PDG1 PDG2 Comments + 9.76611731e-01 2 211 1000022 # ~chi_1+ -> pi+ ~chi_10 +# BR NDA PDG1 PDG2 PDG3 Comments + 1.90219825e-02 3 1000022 12 -11 # ~chi_1+ -> chi_10 nu_e e+ + 4.36628670e-03 3 1000022 14 -13 # ~chi_1+ -> chi_10 nu_mu mu+ +# +# PDG Width +DECAY 1000037 5.45402142e+00 # Chargino 2+ (heaviest) decays +# BR NDA PDG1 PDG2 Comments + 3.36191832e-01 2 24 1000022 # ~chi_2+ -> W+ ~chi_10 + 3.42463958e-01 2 1000024 23 # ~chi_2+ -> Z ~chi_1+ + 3.21344206e-01 2 1000024 25 # ~chi_2+ -> h ~chi_1+ +# +# PDG Width +DECAY 1000022 0.00000000e+00 # Neutralino1 decays +# +# PDG Width +DECAY 1000023 5.44614732e+00 # Neutralino2 decays +# BR NDA PDG1 PDG2 Comments + 3.32347203e-01 2 24 -1000024 # ~chi_20 -> W+ ~chi_1- + 3.32347203e-01 2 -24 1000024 # ~chi_20 -> W- ~chi_1+ + 3.04685436e-01 2 23 1000022 # ~chi_20 -> Z ~chi_10 + 3.06201572e-02 2 25 1000022 # ~chi_20 -> h ~chi_10 +# +# PDG Width +DECAY 1000025 5.50130437e+00 # Neutralino3 decays +# BR NDA PDG1 PDG2 Comments + 3.37096298e-01 2 24 -1000024 # ~chi_30 -> W+ ~chi_1- + 3.37096298e-01 2 -24 1000024 # ~chi_30 -> W- ~chi_1+ + 3.48981377e-02 2 23 1000022 # ~chi_30 -> Z ~chi_10 + 2.90909265e-01 2 25 1000022 # ~chi_30 -> h ~chi_10 +# +# PDG Width +DECAY 1000035 5.50929004e+00 # Neutralino4 decays +# BR NDA PDG1 PDG2 Comments + 1.02246683e-04 2 24 -1000024 # ~chi_40 -> W+ ~chi_1- + 2.42670079e-01 2 24 -1000037 # ~chi_40 -> W+ ~chi_2- + 1.02246683e-04 2 -24 1000024 # ~chi_40 -> W- ~chi_1+ + 2.42670079e-01 2 -24 1000037 # ~chi_40 -> W- ~chi_2+ + 2.25566631e-01 2 23 1000023 # ~chi_40 -> Z ~chi_20 + 2.44730740e-02 2 23 1000025 # ~chi_40 -> Z ~chi_30 + 1.25467161e-04 2 25 1000022 # ~chi_40 -> h ~chi_10 + 2.49589084e-02 2 25 1000023 # ~chi_40 -> h ~chi_20 + 2.39331265e-01 2 25 1000025 # ~chi_40 -> h ~chi_30 +# +# PDG Width +DECAY 25 5.60733006e-03 # light higgs decays +# BR NDA PDG1 PDG2 Comments + 4.01941189e-02 2 4 -4 # h -> c cb + 2.91121478e-04 2 3 -3 # h -> s sb + 7.16819352e-01 2 5 -5 # h -> b bb + 1.52920877e-04 2 13 -13 # h -> mu- mu+ + 4.72626878e-02 2 15 -15 # h-> tau- tau+ + 1.89931210e-03 2 22 22 # h -> gamma gamma + 4.87801858e-02 2 21 21 # h -> gluon gluon + 1.23681895e-03 2 23 22 # h -> Z gamma + 1.28909878e-01 2 24 -24 # h -> WW* -> W f f'bar + 1.44536008e-02 2 23 23 # h -> ZZ* -> Z f f'bar +# +# PDG Width +DECAY 35 2.09418962e+01 # heavy higgs decays +# BR NDA PDG1 PDG2 Comments + 2.16413355e-01 2 5 -5 # H -> b bb + 4.51568961e-02 2 6 -6 # H -> t tb + 2.01078732e-02 2 15 -15 # H -> tau- tau+ + 7.42157609e-03 2 1000022 1000022 # H -> ~chi_10 ~chi_10 + 1.61008070e-01 2 1000022 1000023 # H -> ~chi_10 ~chi_20 + 6.72886237e-02 2 1000022 1000025 # H -> ~chi_10 ~chi_30 + 1.27323134e-04 2 1000023 1000023 # H -> ~chi_20 ~chi_20 + 2.42948146e-03 2 1000023 1000025 # H -> ~chi_20 ~chi_30 + 4.28282073e-04 2 1000025 1000025 # H -> ~chi_30 ~chi_30 + 1.47985117e-02 2 1000024 -1000024 # H -> ~chi_1+ ~chi_1- + 3.89321686e-04 2 1000037 -1000037 # H -> ~chi_2+ ~chi_2- + 2.32075126e-01 2 1000024 -1000037 # H -> ~chi_1+ ~chi_2- + 2.32075126e-01 2 1000037 -1000024 # H -> ~chi_2+ ~chi_1- +# +# PDG Width +DECAY 36 2.09428997e+01 # pseudoscalar higgs decays +# BR NDA PDG1 PDG2 Comments + 2.16412077e-01 2 5 -5 # A -> b bb + 4.58141820e-02 2 6 -6 # A -> t tb + 2.01078485e-02 2 15 -15 # A -> tau- tau+ + 1.05492262e-02 2 1000022 1000022 # A -> ~chi_10 ~chi_10 + 7.04603756e-02 2 1000022 1000023 # A -> ~chi_10 ~chi_20 + 1.54259036e-01 2 1000022 1000025 # A -> ~chi_10 ~chi_30 + 3.34893042e-04 2 1000023 1000023 # A -> ~chi_20 ~chi_20 + 5.17818384e-04 2 1000023 1000025 # A -> ~chi_20 ~chi_30 + 2.60106775e-03 2 1000025 1000025 # A -> ~chi_30 ~chi_30 + 2.10004044e-02 2 1000024 -1000024 # A -> ~chi_1+ ~chi_1- + 3.63947515e-03 2 1000037 -1000037 # A -> ~chi_2+ ~chi_2- + 2.27035112e-01 2 1000024 -1000037 # A -> ~chi_1+ ~chi_2- + 2.27035112e-01 2 1000037 -1000024 # A -> ~chi_2+ ~chi_1- +# +# PDG Width +DECAY 37 2.62743284e+01 # Charged higgs+ decays +# BR NDA PDG1 PDG2 Comments + 4.13899140e-01 2 6 -5 # H+ -> t bb + 1.57897957e-02 2 16 -15 # H+ -> tau+ nu_tau + 1.91068454e-01 2 1000022 1000037 # H+ -> ~chi_10 ~chi_2+ + 1.87434817e-01 2 1000023 1000024 # H+ -> ~chi_20 ~chi_1+ + 1.76880721e-03 2 1000023 1000037 # H+ -> ~chi_20 ~chi_2+ + 1.89890443e-01 2 1000025 1000024 # H+ -> ~chi_30 ~chi_1+ +# diff --git a/test/testResummino.py b/test/testResummino.py new file mode 100755 index 0000000000..9c5da6ac9f --- /dev/null +++ b/test/testResummino.py @@ -0,0 +1,174 @@ +#!/usr/bin/env python3 + +""" +.. module:: testXSecResummino + :synopsis: Compares the output of XSecResummino with a given value. + +.. moduleauthor:: Théo Reymermier + +""" + +import sys,os +sys.path.insert(0,"../") +from smodels.tools import xsecComputer, toolBox, xsecResummino +from smodels.tools.xsecComputer import LO, NLL +from smodels.tools.physicsUnits import TeV, fb +from smodels.tools.smodelsLogging import logger +from smodels.theory import crossSection +import tempfile +import unittest +import argparse +import logging.config + + +class XSecTest(unittest.TestCase): + # use different logging config for the unit tests. + logging.config.fileConfig( "./logging.conf" ) + from smodels.tools.smodelsLogging import setLogLevel + + setLogLevel ( "warn" ) + + toolBox.ToolBox().compile() ## make sure the tools are compiled + + def testWithConfigFile(self): + """ test the main routine for computation of LO and NLL cross section + for several sqrts""" + + slhafile="./testFiles/resummino/resummino_example.slha" + f = open(slhafile,'r') + fdata = f.read() + fdata = fdata[:fdata.find('XSECTION')] + f.close() + + conffile="./testFiles/resummino/resummino.py" + f_conf = open(conffile,'r') + fdata_conf = f_conf.read() + f_conf.close() + + fnew = tempfile.mkstemp() + os.close(fnew[0]) + tmpfile = fnew[1] + fnew = open(tmpfile,'w') + fnew.write(fdata) + fnew.close() + + fnew_conf = tempfile.mkstemp() + os.close(fnew_conf[0]) + tmpfile_conf = fnew_conf[1] + fnew_conf = open(tmpfile_conf,'w') + fnew_conf.write(fdata_conf) + fnew_conf.close() + logger.info ("test NLO xsecs @ 13 TeV" ) + #Set overall options: + #Options for cross section calculation: + xargs = argparse.Namespace() + xargs.sqrts = [[13]] + from smodels.tools.runtime import nCPUs + xargs.ncpus = nCPUs()-1 + xargs.noautocompile = False + #Compute LO xsecs: + xargs.query = False + xargs.conf = 'default' + xargs.NLL = False + xargs.NLO = False + xargs.keep = False + xargs.tofile = True + xargs.alltofile = False + xargs.filename = tmpfile + xargs.xsec = None + xargs.xseclimit = None + xargs.particles = [] + xargs.verbosity = "warning" + #Compute LO cross sections + xargs.tofile = False + xargs.alltofile = True + xargs.conf = tmpfile_conf + xsecResummino.main(xargs) + #Read xsecs: + xsecsInfile = crossSection.getXsecFromSLHAFile(tmpfile) + # print ( "xsecs", xsecsInfile ) + + #Check 8 TeV xsecs: + #lo = xsecsInfile.getXsecsFor('8 TeV (LO)')[0].value.asNumber(fb) + #nlo = xsecsInfile.getXsecsFor('8 TeV (NLO)')[0].value.asNumber(fb) + #print('8 TeV is', lo, nlo) + #self.assertAlmostEqual(lo/0.3186,1.,2) + #self.assertAlmostEqual(nlo/0.3691,1.,2) + #Check 13 TeV xsecs: + lo = xsecsInfile.getXsecsFor('13 TeV (LO)')[0].value.asNumber(fb) + #nlo = xsecsInfile.getXsecsFor('13 TeV (NLO)')[0].value.asNumber(fb) + try: + self.assertAlmostEqual(lo / 66.478153, 1., 1 ) + # self.assertAlmostEqual(lo / 0.3186, 1., 1 ) + # self.assertAlmostEqual(nlo / 0.3691, 1., 1 ) + except Exception as e: + logger.error ( f"check {tmpfile}" ) + raise e + os.remove(tmpfile) + os.remove(tmpfile_conf) + + def MestXSecMain(self): + """ test the main routine for computation of LO and NLL cross section + for 13 TeV """ + + slhafile="./testFiles/resummino/resummino_example.slha" + f = open(slhafile,'r') + fdata = f.read() + fdata = fdata[:fdata.find('XSECTION')] + f.close() + fnew = tempfile.mkstemp() + os.close(fnew[0]) + tmpfile = fnew[1] + fnew = open(tmpfile,'w') + fnew.write(fdata) + fnew.close() + logger.info ("test NLO xsecs @ 8 and 13 TeV" ) + #Set overall options: + #Options for cross section calculation: + xargs = argparse.Namespace() + xargs.sqrts = [[13]] + from smodels.tools.runtime import nCPUs + xargs.ncpus = nCPUs()-1 + xargs.noautocompile = True + #Compute LO xsecs: + xargs.query = False + xargs.conf = 'default' + xargs.NLL = False + xargs.NLO = False + xargs.keep = False + xargs.tofile = True + xargs.alltofile = False + xargs.filename = tmpfile + xargs.xsec = 0.00001 + xargs.xseclimit = None + xargs.particles = [ [1000024 ] ] + xargs.verbosity = "warning" + #Compute LO cross sections + xargs.tofile = False + xargs.alltofile = True + xsecResummino.main(xargs) + #Read xsecs: + xsecsInfile = crossSection.getXsecFromSLHAFile(tmpfile) + # print ( "xsecs", xsecsInfile ) + + #Check 8 TeV xsecs: + #lo = xsecsInfile.getXsecsFor('8 TeV (LO)')[0].value.asNumber(fb) + #nlo = xsecsInfile.getXsecsFor('8 TeV (NLO)')[0].value.asNumber(fb) + #print('8 TeV is', lo, nlo) + #self.assertAlmostEqual(lo/0.3186,1.,2) + #self.assertAlmostEqual(nlo/0.3691,1.,2) + #Check 13 TeV xsecs: + lo = xsecsInfile.getXsecsFor('13 TeV (LO)')[0].value.asNumber(fb) + #nlo = xsecsInfile.getXsecsFor('13 TeV (NLO)')[0].value.asNumber(fb) + try: + self.assertAlmostEqual(lo / 66.478153, 1., 1 ) + # self.assertAlmostEqual(lo / 0.3186, 1., 1 ) + # self.assertAlmostEqual(nlo / 0.3691, 1., 1 ) + except Exception as e: + logger.error ( f"check {tmpfile}" ) + raise e + os.remove(tmpfile) + + +if __name__ == "__main__": + unittest.main()