diff --git a/Example.ipynb b/Example.ipynb
deleted file mode 100644
index 1025a8dc2..000000000
--- a/Example.ipynb
+++ /dev/null
@@ -1,896 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": 2,
- "id": "e68930e3",
- "metadata": {},
- "outputs": [
- {
- "ename": "ImportError",
- "evalue": "cannot import name 'runtime' from 'smodels.base' (unknown location)",
- "output_type": "error",
- "traceback": [
- "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
- "\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)",
- "Cell \u001b[0;32mIn[2], line 4\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;66;03m#!/usr/bin/env python3\u001b[39;00m\n\u001b[1;32m 3\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m__future__\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m print_function\n\u001b[0;32m----> 4\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01msmodels\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mbase\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m runtime\n\u001b[1;32m 5\u001b[0m \u001b[38;5;66;03m# Define your model (list of BSM particles)\u001b[39;00m\n\u001b[1;32m 6\u001b[0m runtime\u001b[38;5;241m.\u001b[39mmodelFile \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124msmodels.share.models.mssm\u001b[39m\u001b[38;5;124m'\u001b[39m\n",
- "\u001b[0;31mImportError\u001b[0m: cannot import name 'runtime' from 'smodels.base' (unknown location)"
- ]
- }
- ],
- "source": [
- "#!/usr/bin/env python3\n",
- "\n",
- "from __future__ import print_function\n",
- "from smodels.base import runtime\n",
- "# Define your model (list of BSM particles)\n",
- "runtime.modelFile = 'smodels.share.models.mssm'\n",
- "# runtime.modelFile = 'mssmQNumbers.slha'\n",
- "\n",
- "from smodels.decomposition import decomposer\n",
- "from smodels.base.physicsUnits import fb, GeV, TeV\n",
- "from smodels.matching.theoryPrediction import theoryPredictionsFor,TheoryPredictionsCombiner\n",
- "from smodels.experiment.databaseObj import Database\n",
- "from smodels.tools import coverage\n",
- "from smodels.base.smodelsLogging import setLogLevel\n",
- "from smodels.share.models.mssm import BSMList\n",
- "from smodels.share.models.SMparticles import SMList\n",
- "from smodels.base.model import Model\n",
- "import time\n",
- "import os\n",
- "setLogLevel(\"info\")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 2,
- "id": "ed387104",
- "metadata": {},
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "INFO in databaseObj.loadTextDatabase() in 457: Parsing text database at /Users/sahananarasimha/smodels-database/\n",
- "INFO in databaseObj.createBinaryFile() in 608: /Users/sahananarasimha/smodels-database/db3.pcl created.\n",
- "INFO in metaObj.printFastlimBanner() in 162: FastLim v1.1 efficiencies loaded. Please cite: arXiv:1402.0492, EPJC74 (2014) 11\n"
- ]
- }
- ],
- "source": [
- "# Set the path to the database\n",
- "database = Database('official')"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 1,
- "id": "c38e24dd",
- "metadata": {},
- "outputs": [
- {
- "ename": "NameError",
- "evalue": "name 'Model' is not defined",
- "output_type": "error",
- "traceback": [
- "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
- "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
- "Cell \u001b[0;32mIn[1], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m model \u001b[38;5;241m=\u001b[39m \u001b[43mModel\u001b[49m(BSMparticles\u001b[38;5;241m=\u001b[39mBSMList, SMparticles\u001b[38;5;241m=\u001b[39mSMList)\n\u001b[1;32m 2\u001b[0m slhafile \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124minputFiles/slha/lightEWinos.slha\u001b[39m\u001b[38;5;124m'\u001b[39m\n\u001b[1;32m 3\u001b[0m model\u001b[38;5;241m.\u001b[39mupdateParticles(inputFile\u001b[38;5;241m=\u001b[39mslhafile)\n",
- "\u001b[0;31mNameError\u001b[0m: name 'Model' is not defined"
- ]
- }
- ],
- "source": [
- "model = Model(BSMparticles=BSMList, SMparticles=SMList)\n",
- "slhafile = 'inputFiles/slha/lightEWinos.slha'\n",
- "model.updateParticles(inputFile=slhafile)\n",
- "\n",
- "# Set main options for decomposition\n",
- "sigmacut = 0.5*fb\n",
- "mingap = 5.*GeV\n",
- "\n",
- "t0 = time.time()\n",
- "# Decompose model\n",
- "topDict = decomposer.decompose(model, sigmacut,\n",
- " massCompress=True, invisibleCompress=True,\n",
- " minmassgap=mingap)\n",
- "\n",
- "# Access basic information from decomposition, using the topology list and topology objects:\n",
- "print(\"\\n Decomposition done in %1.2fm\" %((time.time()-t0)/60.))\n",
- "print(\"\\n Decomposition Results: \")\n",
- "print(\"\\t Total number of topologies: %i \" % len(topDict))\n",
- "nel = len(topDict.getSMSList())\n",
- "print(\"\\t Total number of elements = %i \" % nel)\n",
- "# Print information about the m-th topology:\n",
- "m = 2\n",
- "if len(topDict) > m:\n",
- " cName = sorted(topDict.keys())[m]\n",
- " elementList = topDict[cName]\n",
- " print(\"\\t\\t %i topology = \" % cName)\n",
- " # Print information about the n-th element in the m-th topology:\n",
- " n = 0\n",
- " el = elementList[n]\n",
- " print(\"\\t\\t %i-th element = \" % (n), el, end=\"\")\n",
- " print(\"\\n\\t\\t\\twith final states =\", el.getFinalStates(), \"\\n\\t\\t\\twith cross section =\", el.weightList, \"\\n\\t\\t\\tand masses = \", el.mass)\n",
- " "
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 120,
- "id": "6b0a8565",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "(PV > C1+/C1-(1),N2(2)), (C1+/C1-(1) > N1/N1~,q,q), (N2(2) > N1,mu-,mu+)\n"
- ]
- },
- {
- "data": {
- "image/svg+xml": [
- "\n",
- "\n",
- "\n",
- "\n",
- "\n"
- ],
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- }
- ],
- "source": [
- "sms = topDict.getSMSList()[3]\n",
- "print(sms)\n",
- "sms.draw()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 86,
- "id": "25b20426",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "\n",
- " Loaded Database with 1 UL results and 0 EM results \n",
- "\n",
- " Theory Predictions and Constraints:\n",
- "\n",
- " ATLAS-SUSY-2019-08-grp \n",
- "------------------------\n",
- "Dataset = None\n",
- "TxNames = ['TChiWH']\n",
- "Theory Prediction = 5.38E-04 [pb]\n",
- "Condition Violation = None\n",
- "UL for theory prediction = 3.36E+01 [fb]\n",
- "r = 1.599E-02\n",
- "\n",
- "The largest r-value (theory/upper limit ratio) is 1.599E-02\n",
- "(The input model is not excluded by the simplified model results)\n",
- "\n",
- " Theory Predictions done in 0.00m\n"
- ]
- }
- ],
- "source": [
- "# Load the experimental results to be used.\n",
- "# In this case, all results are employed.\n",
- "listOfExpRes = database.getExpResults()\n",
- "\n",
- "t0 = time.time()\n",
- "# Print basic information about the results loaded.\n",
- "# Count the number of loaded UL and EM experimental results:\n",
- "nUL, nEM = 0, 0\n",
- "for exp in listOfExpRes:\n",
- " expType = exp.datasets[0].dataInfo.dataType\n",
- " if expType == 'upperLimit':\n",
- " nUL += 1\n",
- " elif expType == 'efficiencyMap':\n",
- " nEM += 1\n",
- "print(\"\\n Loaded Database with %i UL results and %i EM results \" % (nUL, nEM))\n",
- "\n",
- "# Compute the theory predictions for each experimental result and print them:\n",
- "print(\"\\n Theory Predictions and Constraints:\")\n",
- "rmax = 0.\n",
- "bestResult = None\n",
- "allPredictions = theoryPredictionsFor(database, topDict, combinedResults=False)\n",
- "for theoryPrediction in allPredictions:\n",
- " print('\\n %s ' % theoryPrediction.analysisId())\n",
- " dataset = theoryPrediction.dataset\n",
- " datasetID = theoryPrediction.dataId()\n",
- " txnames = sorted([str(txname) for txname in theoryPrediction.txnames])\n",
- " print(\"------------------------\")\n",
- " print(\"Dataset = \", datasetID) # Analysis name\n",
- " print(\"TxNames = \", txnames)\n",
- " print(\"Theory Prediction = \", theoryPrediction.xsection) # Signal cross section\n",
- " print(\"Condition Violation = \", theoryPrediction.conditions) # Condition violation values\n",
- "\n",
- " # Get the corresponding upper limit:\n",
- " print(\"UL for theory prediction = \", theoryPrediction.upperLimit)\n",
- "\n",
- " # Compute the r-value\n",
- " r = theoryPrediction.getRValue()\n",
- " print(\"r = %1.3E\" % r)\n",
- " # Compute likelihoods for EM-type results:\n",
- " if dataset.getType() == 'efficiencyMap':\n",
- " theoryPrediction.computeStatistics()\n",
- " print('L_BSM, L_SM, L_max = %1.3E, %1.3E, %1.3E' % (theoryPrediction.likelihood(),\n",
- " theoryPrediction.lsm(), theoryPrediction.lmax()))\n",
- " if r > rmax:\n",
- " rmax = r\n",
- " bestResult = theoryPrediction.analysisId()\n",
- "\n",
- " # Print the most constraining experimental result\n",
- " print(\"\\nThe largest r-value (theory/upper limit ratio) is %1.3E\" % rmax)\n",
- " if rmax > 1.:\n",
- " print(\"(The input model is likely excluded by %s)\" % bestResult)\n",
- " else:\n",
- " print(\"(The input model is not excluded by the simplified model results)\")\n",
- "\n",
- " print(\"\\n Theory Predictions done in %1.2fm\" %((time.time()-t0)/60.))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 122,
- "id": "6ec0ae0d",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "[(PV > C2+(1),N4(2)), (C2+(1) > N1,W+), (N4(2) > N1,higgs)]\n"
- ]
- }
- ],
- "source": [
- "tp = allPredictions[0]\n",
- "smsList = tp.smsList\n",
- "print(smsList)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 88,
- "id": "05b73b27",
- "metadata": {},
- "outputs": [
- {
- "data": {
- "image/svg+xml": [
- "\n",
- "\n",
- "\n",
- "\n",
- "\n"
- ],
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- }
- ],
- "source": [
- "list(tp.expResult.getTxNames()[0].smsMap.keys())[0].draw()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 89,
- "id": "c4ca4d6c",
- "metadata": {},
- "outputs": [
- {
- "data": {
- "image/svg+xml": [
- "\n",
- "\n",
- "\n",
- "\n",
- "\n"
- ],
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "data": {
- "image/svg+xml": [
- "\n",
- "\n",
- "\n",
- "\n",
- "\n"
- ],
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- }
- ],
- "source": [
- "sms1 = smsList[0]\n",
- "sms1.draw()\n",
- "sms1.draw(labelAttr='mass',attrUnit=GeV)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 5,
- "id": "571be06f",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "\n",
- "\n",
- "Combined analyses: ATLAS-SUSY-2013-11,CMS-SUS-13-013\n",
- "Combined r value: 2.183E-02\n",
- "Combined r value (expected): 2.183E-02\n",
- "Likelihoods: L, L_max, L_SM = 1.385E-02, 1.401E-02, 1.394E-02\n",
- "\n",
- "\n",
- " Combination of analyses done in 0.00m\n"
- ]
- }
- ],
- "source": [
- "t0 = time.time()\n",
- "# Select a few results results for combination:\n",
- "combineAnas = ['ATLAS-SUSY-2013-11', 'CMS-SUS-13-013']\n",
- "selectedTheoryPreds = []\n",
- "for tp in allPredictions:\n",
- " expID = tp.analysisId()\n",
- " if expID not in combineAnas:\n",
- " continue\n",
- " if tp.likelihood() is None:\n",
- " continue\n",
- " selectedTheoryPreds.append(tp)\n",
- "# Make sure each analysis appears only once:\n",
- "expIDs = [tp.analysisId() for tp in selectedTheoryPreds]\n",
- "if len(expIDs) != len(set(expIDs)):\n",
- " print(\"\\nDuplicated results when trying to combine analyses. Combination will be skipped.\")\n",
- "# Only compute combination if at least two results were selected\n",
- "elif len(selectedTheoryPreds) > 1:\n",
- " combiner = TheoryPredictionsCombiner(selectedTheoryPreds)\n",
- " combiner.computeStatistics()\n",
- " llhd = combiner.likelihood()\n",
- " lmax = combiner.lmax()\n",
- " lsm = combiner.lsm()\n",
- " print(\"\\n\\nCombined analyses:\", combiner.analysisId())\n",
- " print(\"Combined r value: %1.3E\" % combiner.getRValue())\n",
- " print(\"Combined r value (expected): %1.3E\" % combiner.getRValue(expected=True))\n",
- " print(\"Likelihoods: L, L_max, L_SM = %10.3E, %10.3E, %10.3E\\n\" % (llhd, lmax, lsm))\n",
- "\n",
- "print(\"\\n Combination of analyses done in %1.2fm\" %((time.time()-t0)/60.))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 6,
- "id": "127fa69d",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "\n",
- " Coverage done in 0.77m\n"
- ]
- }
- ],
- "source": [
- "t0 = time.time()\n",
- "# Find out missing topologies for sqrts=13*TeV:\n",
- "uncovered = coverage.Uncovered(topDict, sqrts=13.*TeV)\n",
- "print(\"\\n Coverage done in %1.2fm\" %((time.time()-t0)/60.))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 7,
- "id": "4e4a9e3a",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "\n",
- "Total cross-section for missing topologies (fb): 1.120E+04\n",
- "\n",
- "\n",
- "Total cross-section for missing topologies with displaced decays (fb): 0.000E+00\n",
- "\n",
- "\n",
- "Total cross-section for missing topologies with prompt decays (fb): 1.399E+04\n",
- "\n",
- "\n",
- "Total cross-section for topologies outside the grid (fb): 3.746E+03\n",
- "\n"
- ]
- }
- ],
- "source": [
- "# First sort coverage groups by label\n",
- "groups = sorted(uncovered.groups[:], key=lambda g: g.label)\n",
- "# Print uncovered cross-sections:\n",
- "for group in groups:\n",
- " print(\"\\nTotal cross-section for %s (fb): %10.3E\\n\" % (group.description, group.getTotalXSec()))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 8,
- "id": "6fa1c9d3",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Missing topologies (up to 3):\n",
- "Element: (PV > MET,MET,jet,jet,l,nu)\n",
- "\tcross-section (fb): 1203.8783553949456\n",
- "Element: (PV > MET,MET,jet,jet,ta,nu)\n",
- "\tcross-section (fb): 600.4536496545026\n",
- "Element: (PV > MET,MET,jet,jet,jet,jet,t,b,b,b)\n",
- "\tcross-section (fb): 515.0638147985978\n"
- ]
- }
- ],
- "source": [
- "missingTopos = uncovered.getGroup('missing (prompt)')\n",
- "# Print some of the missing topologies:\n",
- "if missingTopos.finalStateSMS:\n",
- " print('Missing topologies (up to 3):')\n",
- " for genEl in missingTopos.finalStateSMS[:3]:\n",
- " print('Element:', genEl)\n",
- " print('\\tcross-section (fb):', genEl.missingX)\n",
- "else:\n",
- " print(\"No missing topologies found\\n\")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 9,
- "id": "5f564ea6",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "\n",
- "No displaced decays\n"
- ]
- }
- ],
- "source": [
- "missingDisplaced = uncovered.getGroup('missing (displaced)')\n",
- "# Print elements with displaced decays:\n",
- "if missingDisplaced.finalStateSMS:\n",
- " print('\\nElements with displaced vertices (up to 2):')\n",
- " for genEl in missingDisplaced.finalStateSMS[:2]:\n",
- " print('Element:', genEl)\n",
- " print('\\tcross-section (fb):', genEl.missingX)\n",
- "else:\n",
- " print(\"\\nNo displaced decays\")\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "c77a2bae",
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3 (ipykernel)",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.11.1"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 5
-}
diff --git a/parameters.ini b/parameters.ini
index 5273e7bab..805348119 100644
--- a/parameters.ini
+++ b/parameters.ini
@@ -25,23 +25,23 @@ ncpus = 1 ;Give number of cores used when running in parallel (integer, -1 means
#Select database analyses
[database]
-path = ../smodels-database ; URL to the database pickle file (it will be downloaded)
+path = official ; URL to the database pickle file (it will be downloaded)
#can be extended by: +fastlim (adds fastlim results), +superseded (adds superseded results), +nonaggregated (adds results with non-aggregated SRs in addition to the aggregated results), +full_llhds (replaces simplified HistFactory likelihoods by full ones).
# examples: path = official+nonaggregated, path = official+full_llhds
-analyses = ATLAS-SUSY-2019-08-grp ;Set all to use all analyses included in the database
+analyses = all ;Set all to use all analyses included in the database
#to use only specific analyses, give a list of the names separated by comma
# analyses = CMS-PAS-SUS-13-008, CMS-SUS-13-013,ATLAS-CONF-2013-024,ATLAS-SUSY-2013-04
# Wildcards are understood as in shell-expansion of file names: * ? []
# Filter centre-of-mass energy with suffix beginning with a colon, in unum-style, like :13*TeV
# Note that the asterisk in the suffix is not a wildcard.
-txnames= TChiWH ;Set all to use all constraints included in the database
+txnames= all ;Set all to use all constraints included in the database
#to use only specific constraints, give a list of the names separated by comma
#txnames = T2,T1,TChiWZ
# Wildcards are understood as in shell-expansion of file names: * ? []
-dataselector= upperLimit ; Set all to use all upper limit and efficiency maps results in the database. Set to upperLimit (efficiencyMap) to use only UL (EM) results:
+dataselector= all ; Set all to use all upper limit and efficiency maps results in the database. Set to upperLimit (efficiencyMap) to use only UL (EM) results:
#dataselector = efficiencyMap
#It can also be used to select specific datasets (signal regions) from efficiency map results. For the latter provide a list of the desired dataset ids
#dataselector = SRA mCT150,SRA mCT200