~madteam/mg5amcnlo/series2.0

« back to all changes in this revision

Viewing changes to madgraph/interface/amcatnlo_run_interface.py

  • Committer: olivier Mattelaer
  • Date: 2016-05-12 11:00:18 UTC
  • mfrom: (262.1.150 2.3.4)
  • Revision ID: olivier.mattelaer@uclouvain.be-20160512110018-sevb79f0wm4g8mpp
pass to 2.4.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
36
36
import copy
37
37
import datetime
38
38
import tarfile
 
39
import traceback
 
40
import StringIO
39
41
 
40
42
try:
41
43
    import readline
184
186
    }
185
187
    
186
188
    debug_output = 'ME5_debug'
187
 
    error_debug = 'Please report this bug on https://bugs.launchpad.net/madgraph5\n'
 
189
    error_debug = 'Please report this bug on https://bugs.launchpad.net/mg5amcnlo\n'
188
190
    error_debug += 'More information is found in \'%(debug)s\'.\n' 
189
191
    error_debug += 'Please attach this file to your report.'
190
192
 
191
 
    config_debug = 'If you need help with this issue please contact us on https://answers.launchpad.net/madgraph5\n'
 
193
    config_debug = 'If you need help with this issue please contact us on https://answers.launchpad.net/mg5amcnlo\n'
192
194
 
193
195
 
194
196
    keyboard_stop_msg = """stopping all operation
915
917
    cluster_mode = 0
916
918
    queue  = 'madgraph'
917
919
    nb_core = None
 
920
    make_opts_var = {}
918
921
    
919
922
    next_possibility = {
920
923
        'start': ['generate_events [OPTIONS]', 'calculate_crossx [OPTIONS]', 'launch [OPTIONS]',
937
940
        self.nb_core = 0
938
941
        self.prompt = "%s>"%os.path.basename(pjoin(self.me_dir))
939
942
 
940
 
        # load the current status of the directory
941
 
        if os.path.exists(pjoin(self.me_dir,'HTML','results.pkl')):
942
 
            self.results = save_load_object.load_from_file(pjoin(self.me_dir,'HTML','results.pkl'))
943
 
            self.results.resetall(self.me_dir)
944
 
            self.last_mode = self.results[self.results.lastrun][-1]['run_mode']
945
 
        else:
946
 
            model = self.find_model_name()
947
 
            process = self.process # define in find_model_name
948
 
            self.results = gen_crossxhtml.AllResultsNLO(model, process, self.me_dir)
949
 
            self.last_mode = ''
 
943
 
 
944
        self.load_results_db()
950
945
        self.results.def_web_mode(self.web)
951
946
        # check that compiler is gfortran 4.6 or later if virtuals have been exported
952
947
        proc_card = open(pjoin(self.me_dir, 'Cards', 'proc_card_mg5.dat')).read()
1229
1224
 
1230
1225
 
1231
1226
        self.update_status('', level='all', update_results=True)
1232
 
        if self.run_card['ickkw'] == 3 and mode in ['noshower', 'aMC@NLO']:
 
1227
        if self.run_card['ickkw'] == 3 and \
 
1228
           (mode in ['noshower'] or \
 
1229
            (('PYTHIA8' not in self.run_card['parton_shower'].upper()) and (mode in ['aMC@NLO']))):
1233
1230
            logger.warning("""You are running with FxFx merging enabled.
1234
1231
To be able to merge samples of various multiplicities without double counting,
1235
1232
you have to remove some events after showering 'by hand'.
1236
1233
Please read http://amcatnlo.cern.ch/FxFx_merging.htm for more details.""")
1237
1234
 
1238
 
 
 
1235
        self.store_result()
1239
1236
        #check if the param_card defines a scan.
1240
1237
        if self.param_card_iterator:
1241
1238
            param_card_iterator = self.param_card_iterator
1614
1611
            run_type="Fixed order integration step %s" % integration_step
1615
1612
        else:
1616
1613
            run_type="MINT step %s" % integration_step
 
1614
        self.njobs=len(jobs_to_run)            
1617
1615
        for job in jobs_to_run:
1618
1616
            executable='ajob1'
1619
1617
            if fixed_order:
1627
1625
 
1628
1626
        if self.cluster_mode == 2:
1629
1627
            time.sleep(1) # security to allow all jobs to be launched
1630
 
        self.njobs=len(jobs_to_run)
1631
1628
        self.wait_for_complete(run_type)
1632
1629
 
1633
1630
 
1661
1658
            return jobs_to_run_new,jobs_to_collect
1662
1659
        elif jobs_to_run_new:
1663
1660
            # print intermediate summary of results
1664
 
            scale_pdf_info={}
 
1661
            scale_pdf_info=[]
1665
1662
            self.print_summary(options,integration_step,mode,scale_pdf_info,done=False)
1666
1663
        else:
1667
1664
            # When we are done for (N)LO+PS runs, do not print
1668
1665
            # anything yet. This will be done after the reweighting
1669
1666
            # and collection of the events
1670
 
            scale_pdf_info={}
 
1667
            scale_pdf_info=[]
1671
1668
# Prepare for the next integration/MINT step
1672
1669
        if (not fixed_order) and integration_step+1 == 2 :
1673
1670
            # next step is event generation (mint_step 2)
1674
1671
            jobs_to_run_new,jobs_to_collect_new= \
1675
1672
                    self.check_the_need_to_split(jobs_to_run_new,jobs_to_collect)
1676
1673
            self.prepare_directories(jobs_to_run_new,mode,fixed_order)
1677
 
            self.write_nevents_unweighted_file(jobs_to_collect_new)
 
1674
            self.write_nevents_unweighted_file(jobs_to_collect_new,jobs_to_collect)
1678
1675
            self.write_nevts_files(jobs_to_run_new)
1679
1676
        else:
1680
1677
            self.prepare_directories(jobs_to_run_new,mode,fixed_order)
1682
1679
        return jobs_to_run_new,jobs_to_collect_new
1683
1680
 
1684
1681
 
1685
 
    def write_nevents_unweighted_file(self,jobs):
1686
 
        """writes the nevents_unweighted file in the SubProcesses directory"""
 
1682
    def write_nevents_unweighted_file(self,jobs,jobs0events):
 
1683
        """writes the nevents_unweighted file in the SubProcesses directory.
 
1684
           We also need to write the jobs that will generate 0 events,
 
1685
           because that makes sure that the cross section from those channels
 
1686
           is taken into account in the event weights (by collect_events.f).
 
1687
        """
1687
1688
        content=[]
1688
1689
        for job in jobs:
1689
1690
            path=pjoin(job['dirname'].split('/')[-2],job['dirname'].split('/')[-1])
1690
1691
            lhefile=pjoin(path,'events.lhe')
1691
1692
            content.append(' %s     %d     %9e     %9e' % \
1692
1693
                (lhefile.ljust(40),job['nevents'],job['resultABS']*job['wgt_frac'],job['wgt_frac']))
 
1694
        for job in jobs0events:
 
1695
            if job['nevents']==0:
 
1696
                path=pjoin(job['dirname'].split('/')[-2],job['dirname'].split('/')[-1])
 
1697
                lhefile=pjoin(path,'events.lhe')
 
1698
                content.append(' %s     %d     %9e     %9e' % \
 
1699
                               (lhefile.ljust(40),job['nevents'],job['resultABS'],1.))
1693
1700
        with open(pjoin(self.me_dir,'SubProcesses',"nevents_unweighted"),'w') as f:
1694
1701
            f.write('\n'.join(content)+'\n')
1695
1702
 
1768
1775
                elif step+1 > 2:
1769
1776
                    raise aMCatNLOError('Cannot determine number of iterations and PS points '+
1770
1777
                                        'for integration step %i' % step )
1771
 
            elif ( req_acc > 0 and err/tot > req_acc*1.2 ) or step == 0:
 
1778
            elif ( req_acc > 0 and err/tot > req_acc*1.2 ) or step <= 0:
1772
1779
                req_accABS=req_acc*abs(tot)/totABS # overal relative required accuracy on ABS Xsec.
1773
1780
                for job in jobs:
1774
1781
                    job['mint_mode']=-1
1775
1782
                    # Determine relative required accuracy on the ABS for this job
1776
1783
                    job['accuracy']=req_accABS*math.sqrt(totABS/job['resultABS'])
1777
 
                    # If already accurate enough, skip running
1778
 
                    if job['accuracy'] > job['errorABS']/job['resultABS'] and step != 0:
1779
 
                        continue
 
1784
                    # If already accurate enough, skip the job (except when doing the first
 
1785
                    # step for the iappl=2 run: we need to fill all the applgrid grids!)
 
1786
                    if (job['accuracy'] > job['errorABS']/job['resultABS'] and step != 0) \
 
1787
                       and not (step==-1 and self.run_card['iappl'] == 2):
 
1788
                            continue
1780
1789
                    # Update the number of PS points based on errorABS, ncall and accuracy
1781
1790
                    itmax_fl=job['niters_done']*math.pow(job['errorABS']/
1782
1791
                                                         (job['accuracy']*job['resultABS']),2)
1879
1888
        """writes the res.txt files in the SubProcess dir"""
1880
1889
        jobs.sort(key = lambda job: -job['errorABS'])
1881
1890
        content=[]
1882
 
        content.append('\n\nCross-section per integration channel:')
 
1891
        content.append('\n\nCross section per integration channel:')
1883
1892
        for job in jobs:
1884
1893
            content.append('%(p_dir)20s  %(channel)15s   %(result)10.8e    %(error)6.4e       %(err_perc)6.4f%%  ' %  job)
1885
 
        content.append('\n\nABS cross-section per integration channel:')
 
1894
        content.append('\n\nABS cross section per integration channel:')
1886
1895
        for job in jobs:
1887
1896
            content.append('%(p_dir)20s  %(channel)15s   %(resultABS)10.8e    %(errorABS)6.4e       %(err_percABS)6.4f%%  ' %  job)
1888
1897
        totABS=0
1890
1899
        tot=0
1891
1900
        err=0
1892
1901
        for job in jobs:
1893
 
            totABS+= job['resultABS']
1894
 
            errABS+= math.pow(job['errorABS'],2)
1895
 
            tot+= job['result']
1896
 
            err+= math.pow(job['error'],2)
 
1902
            totABS+= job['resultABS']*job['wgt_frac']
 
1903
            errABS+= math.pow(job['errorABS'],2)*job['wgt_frac']
 
1904
            tot+= job['result']*job['wgt_frac']
 
1905
            err+= math.pow(job['error'],2)*job['wgt_frac']
1897
1906
        if jobs:
1898
1907
            content.append('\nTotal ABS and \nTotal: \n                      %10.8e +- %6.4e  (%6.4e%%)\n                      %10.8e +- %6.4e  (%6.4e%%) \n' %\
1899
1908
                           (totABS, math.sqrt(errABS), math.sqrt(errABS)/totABS *100.,\
1907
1916
 
1908
1917
    def collect_scale_pdf_info(self,options,jobs):
1909
1918
        """read the scale_pdf_dependence.dat files and collects there results"""
1910
 
        scale_pdf_info={}
1911
 
        if self.run_card['reweight_scale'] or self.run_card['reweight_PDF']:
 
1919
        scale_pdf_info=[]
 
1920
        if any(self.run_card['reweight_scale']) or any(self.run_card['reweight_PDF']) or \
 
1921
           len(self.run_card['dynamical_scale_choice']) > 1 or len(self.run_card['lhaid']) > 1:
1912
1922
            data_files=[]
1913
1923
            for job in jobs:
1914
1924
                data_files.append(pjoin(job['dirname'],'scale_pdf_dependence.dat'))
1928
1938
            logger.info('The results of this run and the TopDrawer file with the plots' + \
1929
1939
                        ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name))
1930
1940
        elif self.analyse_card['fo_analysis_format'].lower() == 'hwu':
1931
 
            self.combine_plots_HwU(jobs)
1932
 
            files.cp(pjoin(self.me_dir, 'SubProcesses', 'MADatNLO.HwU'),
1933
 
                     pjoin(self.me_dir, 'Events', self.run_name))
1934
 
            files.cp(pjoin(self.me_dir, 'SubProcesses', 'MADatNLO.gnuplot'),
1935
 
                     pjoin(self.me_dir, 'Events', self.run_name))
 
1941
            out=pjoin(self.me_dir,'Events',self.run_name,'MADatNLO')
 
1942
            self.combine_plots_HwU(jobs,out)
1936
1943
            try:
1937
1944
                misc.call(['gnuplot','MADatNLO.gnuplot'],\
1938
1945
                          stdout=devnull,stderr=devnull,\
1954
1961
                        ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name))
1955
1962
 
1956
1963
 
1957
 
    def combine_plots_HwU(self,jobs):
 
1964
    def combine_plots_HwU(self,jobs,out,normalisation=None):
1958
1965
        """Sums all the plots in the HwU format."""
1959
1966
        logger.debug('Combining HwU plots.')
1960
 
        all_histo_paths=[]
 
1967
 
 
1968
        command =  []
 
1969
        command.append(pjoin(self.me_dir, 'bin', 'internal','histograms.py'))
1961
1970
        for job in jobs:
1962
 
            all_histo_paths.append(pjoin(job['dirname'],"MADatNLO.HwU"))
1963
 
        histogram_list = histograms.HwUList(all_histo_paths[0])
1964
 
        for histo_path in all_histo_paths[1:]:
1965
 
            for i, histo in enumerate(histograms.HwUList(histo_path)):
1966
 
                # First make sure the plots have the same weight labels and such
1967
 
                histo.test_plot_compability(histogram_list[i])
1968
 
                # Now let the histogram module do the magic and add them.
1969
 
                histogram_list[i] += histo
1970
 
        
1971
 
        # And now output the finalized list
1972
 
        histogram_list.output(pjoin(self.me_dir,'SubProcesses',"MADatNLO"),
1973
 
                                                             format = 'gnuplot')
1974
 
 
 
1971
            if job['dirname'].endswith('.HwU'):
 
1972
                command.append(job['dirname'])
 
1973
            else:
 
1974
                command.append(pjoin(job['dirname'],'MADatNLO.HwU'))
 
1975
        command.append("--out="+out)
 
1976
        command.append("--gnuplot")
 
1977
        command.append("--band=[]")
 
1978
        command.append("--lhapdf-config="+self.options['lhapdf'])
 
1979
        if normalisation:
 
1980
            command.append("--multiply="+(','.join([str(n) for n in normalisation])))
 
1981
        command.append("--sum")
 
1982
        command.append("--no_open")
 
1983
 
 
1984
        p = misc.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, cwd=self.me_dir)
 
1985
 
 
1986
        while p.poll() is None:
 
1987
            line = p.stdout.readline()
 
1988
            if any(t in line for t in ['INFO:','WARNING:','CRITICAL:','ERROR:','KEEP:']):
 
1989
                print line[:-1]
 
1990
            elif __debug__ and line:
 
1991
                logger.debug(line[:-1])
 
1992
 
 
1993
            
1975
1994
    def applgrid_combine(self,cross,error,jobs):
1976
1995
        """Combines the APPLgrids in all the SubProcess/P*/all_G*/ directories"""
1977
1996
        logger.debug('Combining APPLgrids \n')
2155
2174
        return
2156
2175
 
2157
2176
 
2158
 
    def print_summary(self, options, step, mode, scale_pdf_info={}, done=True):
 
2177
    def print_summary(self, options, step, mode, scale_pdf_info=[], done=True):
2159
2178
        """print a summary of the results contained in self.cross_sect_dict.
2160
2179
        step corresponds to the mintMC step, if =2 (i.e. after event generation)
2161
2180
        some additional infos are printed"""
2179
2198
            self.cross_sect_dict['axsec_string']='(Partial) abs(decay width)'
2180
2199
        else:
2181
2200
            self.cross_sect_dict['unit']='pb'
2182
 
            self.cross_sect_dict['xsec_string']='Total cross-section'
2183
 
            self.cross_sect_dict['axsec_string']='Total abs(cross-section)'
2184
 
        # Gather some basic statistics for the run and extracted from the log files.
2185
 
        if mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']: 
2186
 
            log_GV_files =  glob.glob(pjoin(self.me_dir, \
2187
 
                                    'SubProcesses', 'P*','G*','log_MINT*.txt'))
2188
 
            all_log_files = log_GV_files
2189
 
        elif mode == 'NLO':
2190
 
            log_GV_files =  glob.glob(pjoin(self.me_dir, \
2191
 
                                    'SubProcesses', 'P*','all_G*','log_MINT*.txt'))
2192
 
            all_log_files = log_GV_files
 
2201
            self.cross_sect_dict['xsec_string']='Total cross section'
 
2202
            self.cross_sect_dict['axsec_string']='Total abs(cross section)'
2193
2203
 
2194
 
        elif mode == 'LO':
2195
 
            log_GV_files = ''
2196
 
            all_log_files = glob.glob(pjoin(self.me_dir, \
2197
 
                                    'SubProcesses', 'P*','born_G*','log_MINT*.txt'))
2198
 
        else:
2199
 
            raise aMCatNLOError, 'Running mode %s not supported.'%mode
2200
 
            
2201
 
        
2202
2204
        if mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']:
2203
2205
            status = ['Determining the number of unweighted events per channel',
2204
2206
                      'Updating the number of unweighted events per channel',
2205
2207
                      'Summary:']
2206
 
            if step != 2:
2207
 
                message = status[step] + '\n\n      Intermediate results:' + \
2208
 
                    ('\n      Random seed: %(randinit)d' + \
2209
 
                     '\n      %(xsec_string)s:      %(xsect)8.3e +- %(errt)6.1e %(unit)s' + \
2210
 
                     '\n      %(axsec_string)s: %(xseca)8.3e +- %(erra)6.1e %(unit)s \n') \
2211
 
                     % self.cross_sect_dict
2212
 
            else:
2213
 
        
2214
 
                message = '\n      ' + status[step] + proc_info + \
2215
 
                          '\n      %(xsec_string)s: %(xsect)8.3e +- %(errt)6.1e %(unit)s' % \
2216
 
                        self.cross_sect_dict
2217
 
 
2218
 
                if self.run_card['nevents']>=10000 and self.run_card['reweight_scale']:
2219
 
                   message = message + \
2220
 
                       ('\n      Ren. and fac. scale uncertainty: +%0.1f%% -%0.1f%%') % \
2221
 
                       (scale_pdf_info['scale_upp'], scale_pdf_info['scale_low'])
2222
 
                if self.run_card['nevents']>=10000 and self.run_card['reweight_PDF']:
2223
 
                   message = message + \
2224
 
                       ('\n      PDF uncertainty: +%0.1f%% -%0.1f%%') % \
2225
 
                       (scale_pdf_info['pdf_upp'], scale_pdf_info['pdf_low'])
2226
 
 
2227
 
                neg_frac = (self.cross_sect_dict['xseca'] - self.cross_sect_dict['xsect'])/\
2228
 
                       (2. * self.cross_sect_dict['xseca'])
2229
 
                message = message + \
2230
 
                    ('\n      Number of events generated: %s' + \
2231
 
                     '\n      Parton shower to be used: %s' + \
2232
 
                     '\n      Fraction of negative weights: %4.2f' + \
2233
 
                     '\n      Total running time : %s') % \
2234
 
                        (self.run_card['nevents'],
2235
 
                         self.run_card['parton_shower'].upper(),
2236
 
                         neg_frac, 
2237
 
                         misc.format_timer(time.time()-self.start_time))
2238
 
 
 
2208
            computed='(computed from LHE events)'
2239
2209
        elif mode in ['NLO', 'LO']:
2240
2210
            status = ['Results after grid setup:','Current results:',
2241
2211
                      'Final results and run summary:']
2242
 
            if (not done) and (step == 0):
 
2212
            computed='(computed from histogram information)'
 
2213
 
 
2214
        if step != 2 and mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']:
 
2215
            message = status[step] + '\n\n      Intermediate results:' + \
 
2216
                      ('\n      Random seed: %(randinit)d' + \
 
2217
                       '\n      %(xsec_string)s:      %(xsect)8.3e +- %(errt)6.1e %(unit)s' + \
 
2218
                       '\n      %(axsec_string)s: %(xseca)8.3e +- %(erra)6.1e %(unit)s \n') \
 
2219
                      % self.cross_sect_dict
 
2220
        elif mode in ['NLO','LO'] and not done:
 
2221
            if step == 0:
2243
2222
                message = '\n      ' + status[0] + \
2244
 
                     '\n      %(xsec_string)s:      %(xsect)8.3e +- %(errt)6.1e %(unit)s' % \
2245
 
                             self.cross_sect_dict
2246
 
            elif not done:
 
2223
                          '\n      %(xsec_string)s:      %(xsect)8.3e +- %(errt)6.1e %(unit)s' % \
 
2224
                          self.cross_sect_dict
 
2225
            else:
2247
2226
                message = '\n      ' + status[1] + \
2248
 
                     '\n      %(xsec_string)s:      %(xsect)8.3e +- %(errt)6.1e %(unit)s' % \
2249
 
                             self.cross_sect_dict
2250
 
            elif done:
2251
 
                message = '\n      ' + status[2] + proc_info + \
2252
 
                     '\n      %(xsec_string)s:      %(xsect)8.3e +- %(errt)6.1e %(unit)s' % \
2253
 
                             self.cross_sect_dict
2254
 
                if self.run_card['reweight_scale']:
2255
 
                    if self.run_card['ickkw'] != -1:
2256
 
                        message = message + \
2257
 
                            ('\n      Ren. and fac. scale uncertainty: +%0.1f%% -%0.1f%%') % \
2258
 
                            (scale_pdf_info['scale_upp'], scale_pdf_info['scale_low'])
2259
 
                    else:
2260
 
                        message = message + \
2261
 
                            ('\n      Soft and hard scale dependence (added in quadrature): +%0.1f%% -%0.1f%%') % \
2262
 
                            (scale_pdf_info['scale_upp_quad'], scale_pdf_info['scale_low_quad'])
2263
 
                if self.run_card['reweight_PDF']:
2264
 
                    message = message + \
2265
 
                        ('\n      PDF uncertainty: +%0.1f%% -%0.1f%%') % \
2266
 
                        (scale_pdf_info['pdf_upp'], scale_pdf_info['pdf_low'])
 
2227
                          '\n      %(xsec_string)s:      %(xsect)8.3e +- %(errt)6.1e %(unit)s' % \
 
2228
                          self.cross_sect_dict
 
2229
                
 
2230
        else:
 
2231
            message = '\n   --------------------------------------------------------------'
 
2232
            message = message + \
 
2233
                      '\n      ' + status[2] + proc_info + \
 
2234
                      '\n      Number of events generated: %s' % self.run_card['nevents'] +\
 
2235
                      '\n      %(xsec_string)s: %(xsect)8.3e +- %(errt)6.1e %(unit)s' % \
 
2236
                      self.cross_sect_dict
 
2237
            message = message + \
 
2238
                      '\n   --------------------------------------------------------------'
 
2239
            if scale_pdf_info and (self.run_card['nevents']>=10000 or mode in ['NLO', 'LO']):
 
2240
                if scale_pdf_info[0]:
 
2241
                        # scale uncertainties
 
2242
                    message = message + '\n      Scale variation %s:' % computed
 
2243
                    for s in scale_pdf_info[0]:
 
2244
                        if s['unc']:
 
2245
                            if self.run_card['ickkw'] != -1:
 
2246
                                message = message + \
 
2247
                                          ('\n          Dynamical_scale_choice %(label)i (envelope of %(size)s values): '\
 
2248
                                           '\n              %(cen)8.3e pb  +%(max)0.1f%% -%(min)0.1f%%') % s
 
2249
                            else:
 
2250
                                message = message + \
 
2251
                                          ('\n          Soft and hard scale dependence (added in quadrature): '\
 
2252
                                           '\n              %(cen)8.3e pb  +%(max_q)0.1f%% -%(min_q)0.1f%%') % s
 
2253
                                    
 
2254
                        else:
 
2255
                            message = message + \
 
2256
                                          ('\n          Dynamical_scale_choice %(label)i: '\
 
2257
                                           '\n              %(cen)8.3e pb') % s
 
2258
                                
 
2259
                if scale_pdf_info[1]:
 
2260
                    message = message + '\n      PDF variation %s:' % computed
 
2261
                    for p in scale_pdf_info[1]:
 
2262
                        if p['unc']=='none':
 
2263
                            message = message + \
 
2264
                                          ('\n          %(name)s (central value only): '\
 
2265
                                           '\n              %(cen)8.3e pb') % p
 
2266
                            
 
2267
                        elif p['unc']=='unknown':
 
2268
                            message = message + \
 
2269
                                          ('\n          %(name)s (%(size)s members; combination method unknown): '\
 
2270
                                           '\n              %(cen)8.3e pb') % p
 
2271
                        else:
 
2272
                            message = message + \
 
2273
                                          ('\n          %(name)s (%(size)s members; using %(unc)s method): '\
 
2274
                                           '\n              %(cen)8.3e pb  +%(max)0.1f%% -%(min)0.1f%%') % p
 
2275
                        # pdf uncertainties
 
2276
                message = message + \
 
2277
                          '\n   --------------------------------------------------------------'
 
2278
 
2267
2279
        
2268
2280
        if (mode in ['NLO', 'LO'] and not done) or \
2269
2281
           (mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO'] and step!=2):
2273
2285
        # Some advanced general statistics are shown in the debug message at the
2274
2286
        # end of the run
2275
2287
        # Make sure it never stops a run
 
2288
        # Gather some basic statistics for the run and extracted from the log files.
 
2289
        if mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']: 
 
2290
            log_GV_files =  glob.glob(pjoin(self.me_dir, \
 
2291
                                    'SubProcesses', 'P*','G*','log_MINT*.txt'))
 
2292
            all_log_files = log_GV_files
 
2293
        elif mode == 'NLO':
 
2294
            log_GV_files =  glob.glob(pjoin(self.me_dir, \
 
2295
                                    'SubProcesses', 'P*','all_G*','log_MINT*.txt'))
 
2296
            all_log_files = log_GV_files
 
2297
 
 
2298
        elif mode == 'LO':
 
2299
            log_GV_files = ''
 
2300
            all_log_files = glob.glob(pjoin(self.me_dir, \
 
2301
                                    'SubProcesses', 'P*','born_G*','log_MINT*.txt'))
 
2302
        else:
 
2303
            raise aMCatNLOError, 'Running mode %s not supported.'%mode
 
2304
 
2276
2305
        try:
2277
2306
            message, debug_msg = \
2278
2307
               self.compile_advanced_stats(log_GV_files, all_log_files, message)
2279
2308
        except Exception as e:
2280
 
            debug_msg = 'Advanced statistics collection failed with error "%s"'%str(e)
 
2309
            debug_msg = 'Advanced statistics collection failed with error "%s"\n'%str(e)
 
2310
            err_string = StringIO.StringIO()
 
2311
            traceback.print_exc(limit=4, file=err_string)
 
2312
            debug_msg += 'Please report this backtrace to a MadGraph developer:\n%s'\
 
2313
                                                          %err_string.getvalue()
2281
2314
 
2282
2315
        logger.debug(debug_msg+'\n')
2283
2316
        logger.info(message+'\n')
2319
2352
        optimization and detection of potential error messages into a nice
2320
2353
        debug message to printed at the end of the run """
2321
2354
        
 
2355
        def safe_float(str_float):
 
2356
            try:
 
2357
                return float(str_float)
 
2358
            except ValueError:
 
2359
                logger.debug('Could not convert the following float during'+
 
2360
                             ' advanced statistics printout: %s'%str(str_float))
 
2361
                return -1.0
 
2362
        
 
2363
        
2322
2364
        # > UPS is a dictionary of tuples with this format {channel:[nPS,nUPS]}
2323
2365
        # > Errors is a list of tuples with this format (log_file,nErrors)
2324
2366
        stats = {'UPS':{}, 'Errors':[], 'virt_stats':{}, 'timings':{}}
2348
2390
                              2 : 'PJFry++',
2349
2391
                              3 : 'IREGI',
2350
2392
                              4 : 'Golem95',
 
2393
                              5 : 'Samurai',
 
2394
                              6 : 'Ninja (double precision)',
 
2395
                              8 : 'Ninja (quadruple precision)',
2351
2396
                              9 : 'CutTools (quadruple precision)'}
2352
2397
        RetUnit_finder =re.compile(
2353
2398
                           r"#Unit\s*(?P<unit>\d+)\s*=\s*(?P<n_occurences>\d+)")
2396
2441
            nTot1  = [sum([chan[10][i] for chan in stats['UPS'].values()],0) \
2397
2442
                                                             for i in range(10)]
2398
2443
            UPSfracs = [(chan[0] , 0.0 if chan[1][0]==0 else \
2399
 
                 float(chan[1][4]*100)/chan[1][0]) for chan in stats['UPS'].items()]
 
2444
              safe_float(chan[1][4]*100)/chan[1][0]) for chan in stats['UPS'].items()]
2400
2445
            maxUPS = max(UPSfracs, key = lambda w: w[1])
2401
2446
 
2402
2447
            tmpStr = ""
2426
2471
            if maxUPS[1]>0.001:
2427
2472
                message += tmpStr
2428
2473
                message += '\n  Total number of unstable PS point detected:'+\
2429
 
                                 ' %d (%4.2f%%)'%(nToteps,float(100*nToteps)/nTotPS)
 
2474
                        ' %d (%4.2f%%)'%(nToteps,safe_float(100*nToteps)/nTotPS)
2430
2475
                message += '\n    Maximum fraction of UPS points in '+\
2431
2476
                          'channel %s (%4.2f%%)'%maxUPS
2432
2477
                message += '\n    Please report this to the authors while '+\
2462
2507
            for vf_stats in re.finditer(virt_frac_finder, log):
2463
2508
                pass
2464
2509
            if not vf_stats is None:
2465
 
                v_frac = float(vf_stats.group('v_frac'))
2466
 
                v_average = float(vf_stats.group('v_average'))
 
2510
                v_frac = safe_float(vf_stats.group('v_frac'))
 
2511
                v_average = safe_float(vf_stats.group('v_average'))
2467
2512
                try:
2468
2513
                    if v_frac < stats['virt_stats']['v_frac_min'][0]:
2469
2514
                        stats['virt_stats']['v_frac_min']=(v_frac,channel_name)
2481
2526
            for ccontr_stats in re.finditer(channel_contr_finder, log):
2482
2527
                pass
2483
2528
            if not ccontr_stats is None:
2484
 
                contrib = float(ccontr_stats.group('v_contr'))
 
2529
                contrib = safe_float(ccontr_stats.group('v_contr'))
2485
2530
                try:
2486
2531
                    if contrib>channel_contr_list[channel_name]:
2487
2532
                        channel_contr_list[channel_name]=contrib
2523
2568
                pass
2524
2569
            if not vt_stats is None:
2525
2570
                vt_stats_group = vt_stats.groupdict()
2526
 
                v_ratio = float(vt_stats.group('v_ratio'))
2527
 
                v_ratio_err = float(vt_stats.group('v_ratio_err'))
2528
 
                v_contr = float(vt_stats.group('v_abs_contr'))
2529
 
                v_contr_err = float(vt_stats.group('v_abs_contr_err'))
 
2571
                v_ratio = safe_float(vt_stats.group('v_ratio'))
 
2572
                v_ratio_err = safe_float(vt_stats.group('v_ratio_err'))
 
2573
                v_contr = safe_float(vt_stats.group('v_abs_contr'))
 
2574
                v_contr_err = safe_float(vt_stats.group('v_abs_contr_err'))
2530
2575
                try:
2531
2576
                    if v_ratio < stats['virt_stats']['v_ratio_min'][0]:
2532
2577
                        stats['virt_stats']['v_ratio_min']=(v_ratio,channel_name)
2558
2603
            for vf_stats in re.finditer(virt_frac_finder, log):
2559
2604
                pass
2560
2605
            if not vf_stats is None:
2561
 
                v_frac = float(vf_stats.group('v_frac'))
2562
 
                v_average = float(vf_stats.group('v_average'))
 
2606
                v_frac = safe_float(vf_stats.group('v_frac'))
 
2607
                v_average = safe_float(vf_stats.group('v_average'))
2563
2608
                try:
2564
2609
                    if v_average < stats['virt_stats']['v_average_min'][0]:
2565
2610
                        stats['virt_stats']['v_average_min']=(v_average,channel_name)
2580
2625
            debug_msg += '\n    Minimum virt fraction computed         %.3f (%s)'\
2581
2626
                                       %tuple(stats['virt_stats']['v_frac_min'])
2582
2627
            debug_msg += '\n    Average virt fraction computed         %.3f'\
2583
 
              %float(stats['virt_stats']['v_frac_avg'][0]/float(stats['virt_stats']['v_frac_avg'][1]))
 
2628
              %safe_float(stats['virt_stats']['v_frac_avg'][0]/safe_float(stats['virt_stats']['v_frac_avg'][1]))
2584
2629
            debug_msg += '\n  Stats below exclude negligible channels (%d excluded out of %d)'%\
2585
2630
                 (len(excluded_channels),len(all_channels))
2586
2631
            debug_msg += '\n    Maximum virt ratio used                %.2f (%s)'\
2627
2672
            for time_stats in re.finditer(timing_stat_finder, log):
2628
2673
                try:
2629
2674
                    stats['timings'][time_stats.group('name')][channel_name]+=\
2630
 
                                                 float(time_stats.group('time'))
 
2675
                                                 safe_float(time_stats.group('time'))
2631
2676
                except KeyError:
2632
2677
                    if time_stats.group('name') not in stats['timings'].keys():
2633
2678
                        stats['timings'][time_stats.group('name')] = {}
2634
2679
                    stats['timings'][time_stats.group('name')][channel_name]=\
2635
 
                                                 float(time_stats.group('time'))
 
2680
                                                 safe_float(time_stats.group('time'))
2636
2681
        
2637
2682
        # useful inline function
2638
2683
        Tstr = lambda secs: str(datetime.timedelta(seconds=int(secs)))
2672
2717
            debug_msg += '\n  Timing profile for <%s> :'%name
2673
2718
            try:
2674
2719
                debug_msg += '\n    Overall fraction of time         %.3f %%'%\
2675
 
                       float((100.0*(sum(stats['timings'][name].values())/
 
2720
                       safe_float((100.0*(sum(stats['timings'][name].values())/
2676
2721
                                      sum(stats['timings']['Total'].values()))))
2677
2722
            except KeyError, ZeroDivisionError:
2678
2723
                debug_msg += '\n    Overall fraction of time unavailable.'
2723
2768
        """this function calls the reweighting routines and creates the event file in the 
2724
2769
        Event dir. Return the name of the event file created
2725
2770
        """
2726
 
        scale_pdf_info={}
2727
 
        if self.run_card['reweight_scale'] or self.run_card['reweight_PDF'] :
 
2771
        scale_pdf_info=[]
 
2772
        if any(self.run_card['reweight_scale']) or any(self.run_card['reweight_PDF']) or \
 
2773
           len(self.run_card['dynamical_scale_choice']) > 1 or len(self.run_card['lhaid']) > 1:
2728
2774
            scale_pdf_info = self.run_reweight(options['reweightonly'])
2729
2775
        self.update_status('Collecting events', level='parton', update_results=True)
2730
2776
        misc.compile(['collect_events'], 
3081
3127
                                         '%s%d.top' % (filename, i))
3082
3128
                        files.mv(pjoin(rundir, file), plotfile) 
3083
3129
                    elif out_id=='HWU':
3084
 
                        histogram_list=histograms.HwUList(pjoin(rundir,file))
3085
 
                        histogram_list.output(pjoin(self.me_dir,'Events',self.run_name,
3086
 
                                                    '%s%d'% (filename,i)),format = 'gnuplot')
 
3130
                        out=pjoin(self.me_dir,'Events',
 
3131
                                  self.run_name,'%s%d'% (filename,i))
 
3132
                        histos=[{'dirname':pjoin(rundir,file)}]
 
3133
                        self.combine_plots_HwU(histos,out)
3087
3134
                        try:
3088
3135
                            misc.call(['gnuplot','%s%d.gnuplot' % (filename,i)],\
3089
3136
                                      stdout=os.open(os.devnull, os.O_RDWR),\
3143
3190
                            files.mv(pjoin(self.me_dir, 'Events', self.run_name, 'sum.top'),
3144
3191
                                     pjoin(self.me_dir, 'Events', self.run_name, '%s%d.top' % (filename, i)))
3145
3192
                        elif out_id=='HWU':
3146
 
                            histogram_list=histograms.HwUList(plotfiles[0])
3147
 
                            for ii, histo in enumerate(histogram_list):
3148
 
                                histogram_list[ii] = histo*norm
3149
 
                            for histo_path in plotfiles[1:]:
3150
 
                                for ii, histo in enumerate(histograms.HwUList(histo_path)):
3151
 
                                    # First make sure the plots have the same weight labels and such
3152
 
                                    histo.test_plot_compability(histogram_list[ii])
3153
 
                                    # Now let the histogram module do the magic and add them.
3154
 
                                    histogram_list[ii] += histo*norm
3155
 
                            # And now output the finalized list
3156
 
                            histogram_list.output(pjoin(self.me_dir,'Events',self.run_name,'%s%d'% (filename, i)),
3157
 
                                                  format = 'gnuplot')
 
3193
                            out=pjoin(self.me_dir,'Events',
 
3194
                                      self.run_name,'%s%d'% (filename,i))
 
3195
                            histos=[]
 
3196
                            norms=[]
 
3197
                            for plotfile in plotfiles:
 
3198
                                histos.append({'dirname':plotfile})
 
3199
                                norms.append(norm)
 
3200
                            self.combine_plots_HwU(histos,out,normalisation=norms)
3158
3201
                            try:
3159
3202
                                misc.call(['gnuplot','%s%d.gnuplot' % (filename, i)],\
3160
3203
                                          stdout=os.open(os.devnull, os.O_RDWR),\
3161
3204
                                          stderr=os.open(os.devnull, os.O_RDWR),\
3162
 
                                          cwd=pjoin(self.me_dir, 'Events', self.run_name))
 
3205
                                          cwd=pjoin(self.me_dir, 'Events',self.run_name))
3163
3206
                            except Exception:
3164
3207
                                pass
3165
3208
 
3318
3361
 
3319
3362
        if not self.to_store:
3320
3363
            return 
 
3364
 
 
3365
        if 'event' in self.to_store:
 
3366
            if os.path.exists(pjoin(self.me_dir,'Events', self.run_name, 'events.lhe')):
 
3367
                if not  os.path.exists(pjoin(self.me_dir,'Events', self.run_name, 'events.lhe.gz')):
 
3368
                    self.update_status('gzipping output file: events.lhe', level='parton', error=True)
 
3369
                    misc.gzip(pjoin(self.me_dir,'Events', self.run_name, 'events.lhe'))
 
3370
                else:
 
3371
                    os.remove(pjoin(self.me_dir,'Events', self.run_name, 'events.lhe'))
 
3372
            if os.path.exists(pjoin(self.me_dir,'Events','reweight.lhe')):
 
3373
                os.remove(pjoin(self.me_dir,'Events', 'reweight.lhe'))
 
3374
                
3321
3375
        
3322
3376
        tag = self.run_card['run_tag']
3323
3377
        
3555
3609
        and returns it in percents.  The expected format of the file
3556
3610
        is: n_scales xsec_scale_central xsec_scale1 ...  n_pdf
3557
3611
        xsec_pdf0 xsec_pdf1 ...."""
3558
 
        scale_pdf_info={}
3559
3612
        scales=[]
3560
3613
        pdfs=[]
3561
 
        numofpdf = 0
3562
 
        numofscales = 0
3563
3614
        for evt_file in evt_files:
3564
3615
            path, evt=os.path.split(evt_file)
3565
 
            data_file=open(pjoin(self.me_dir, 'SubProcesses', path, 'scale_pdf_dependence.dat')).read()
3566
 
            lines = data_file.replace("D", "E").split("\n")
3567
 
            if not numofscales:
3568
 
                numofscales = int(lines[0])
3569
 
            if not numofpdf:
3570
 
                numofpdf = int(lines[2])
3571
 
            scales_this = [float(val) for val in lines[1].split()]
3572
 
            pdfs_this = [float(val) for val in lines[3].split()]
3573
 
 
3574
 
            if numofscales != len(scales_this) or numofpdf !=len(pdfs_this):
3575
 
                # the +1 takes the 0th (central) set into account
3576
 
                logger.info(data_file)
3577
 
                logger.info((' Expected # of scales: %d\n'+
3578
 
                             ' Found # of scales: %d\n'+
3579
 
                             ' Expected # of pdfs: %d\n'+
3580
 
                             ' Found # of pdfs: %d\n') %
3581
 
                        (numofscales, len(scales_this), numofpdf, len(pdfs_this)))
3582
 
                raise aMCatNLOError('inconsistent scale_pdf_dependence.dat')
3583
 
            if not scales:
3584
 
                scales = [0.] * numofscales
3585
 
            if not pdfs:
3586
 
                pdfs = [0.] * numofpdf
3587
 
 
3588
 
            scales = [a + b for a, b in zip(scales, scales_this)]
3589
 
            pdfs = [a + b for a, b in zip(pdfs, pdfs_this)]
3590
 
 
3591
 
        # get the central value
3592
 
        if numofscales>0 and numofpdf==0:
3593
 
            cntrl_val=scales[0]
3594
 
        elif numofpdf>0 and numofscales==0:
3595
 
            cntrl_val=pdfs[0]
3596
 
        elif numofpdf>0 and numofscales>0:
3597
 
            if abs(1-scales[0]/pdfs[0])>0.0001:
3598
 
                raise aMCatNLOError('Central values for scale and PDF variation not identical')
3599
 
            else:
3600
 
                cntrl_val=scales[0]
 
3616
            with open(pjoin(self.me_dir, 'SubProcesses', path, 'scale_pdf_dependence.dat'),'r') as f:
 
3617
                data_line=f.readline()
 
3618
                if "scale variations:" in data_line:
 
3619
                    for i,scale in enumerate(self.run_card['dynamical_scale_choice']):
 
3620
                        data_line = f.readline().split()
 
3621
                        scales_this = [float(val) for val in f.readline().replace("D", "E").split()]
 
3622
                        try:
 
3623
                            scales[i] = [a + b for a, b in zip(scales[i], scales_this)]
 
3624
                        except IndexError:
 
3625
                            scales+=[scales_this]
 
3626
                    data_line=f.readline()
 
3627
                if "pdf variations:" in data_line:
 
3628
                    for i,pdf in enumerate(self.run_card['lhaid']):
 
3629
                        data_line = f.readline().split()
 
3630
                        pdfs_this = [float(val) for val in f.readline().replace("D", "E").split()]
 
3631
                        try:
 
3632
                            pdfs[i] = [a + b for a, b in zip(pdfs[i], pdfs_this)]
 
3633
                        except IndexError:
 
3634
                            pdfs+=[pdfs_this]
3601
3635
 
3602
3636
        # get the scale uncertainty in percent
3603
 
        if numofscales>0:
3604
 
            if cntrl_val != 0.0:
3605
 
            # max and min of the full envelope
3606
 
                scale_pdf_info['scale_upp'] = (max(scales)/cntrl_val-1)*100
3607
 
                scale_pdf_info['scale_low'] = (1-min(scales)/cntrl_val)*100
3608
 
            # ren and fac scale dependence added in quadrature
3609
 
                scale_pdf_info['scale_upp_quad'] = ((cntrl_val+math.sqrt(math.pow(max(scales[0]-cntrl_val,scales[1]-cntrl_val,scales[2]-cntrl_val),2)+math.pow(max(scales[0]-cntrl_val,scales[3]-cntrl_val,scales[6]-cntrl_val),2)))/cntrl_val-1)*100
3610
 
                scale_pdf_info['scale_low_quad'] = (1-(cntrl_val-math.sqrt(math.pow(min(scales[0]-cntrl_val,scales[1]-cntrl_val,scales[2]-cntrl_val),2)+math.pow(min(scales[0]-cntrl_val,scales[3]-cntrl_val,scales[6]-cntrl_val),2)))/cntrl_val)*100
 
3637
        scale_info=[]
 
3638
        for j,scale in enumerate(scales):
 
3639
            s_cen=scale[0]
 
3640
            if s_cen != 0.0 and self.run_card['reweight_scale'][j]:
 
3641
                # max and min of the full envelope
 
3642
                s_max=(max(scale)/s_cen-1)*100
 
3643
                s_min=(1-min(scale)/s_cen)*100
 
3644
                # ren and fac scale dependence added in quadrature
 
3645
                ren_var=[]
 
3646
                fac_var=[]
 
3647
                for i in range(len(self.run_card['rw_rscale'])):
 
3648
                    ren_var.append(scale[i]-s_cen) # central fac scale
 
3649
                for i in range(len(self.run_card['rw_fscale'])):
 
3650
                    fac_var.append(scale[i*len(self.run_card['rw_rscale'])]-s_cen) # central ren scale
 
3651
                s_max_q=((s_cen+math.sqrt(math.pow(max(ren_var),2)+math.pow(max(fac_var),2)))/s_cen-1)*100
 
3652
                s_min_q=(1-(s_cen-math.sqrt(math.pow(min(ren_var),2)+math.pow(min(fac_var),2)))/s_cen)*100
 
3653
                s_size=len(scale)
3611
3654
            else:
3612
 
                scale_pdf_info['scale_upp'] = 0.0
3613
 
                scale_pdf_info['scale_low'] = 0.0
3614
 
 
3615
 
        # get the pdf uncertainty in percent (according to the Hessian method)
3616
 
        lhaid=self.run_card['lhaid']
3617
 
        pdf_upp=0.0
3618
 
        pdf_low=0.0
3619
 
        if lhaid <= 90000:
3620
 
            # use Hessian method (CTEQ & MSTW)
3621
 
            if numofpdf>1:
3622
 
                for i in range(int(numofpdf/2)):
3623
 
                    pdf_upp=pdf_upp+math.pow(max(0.0,pdfs[2*i+1]-cntrl_val,pdfs[2*i+2]-cntrl_val),2)
3624
 
                    pdf_low=pdf_low+math.pow(max(0.0,cntrl_val-pdfs[2*i+1],cntrl_val-pdfs[2*i+2]),2)
3625
 
                if cntrl_val != 0.0:
3626
 
                    scale_pdf_info['pdf_upp'] = math.sqrt(pdf_upp)/cntrl_val*100
3627
 
                    scale_pdf_info['pdf_low'] = math.sqrt(pdf_low)/cntrl_val*100
 
3655
                s_max=0.0
 
3656
                s_min=0.0
 
3657
                s_max_q=0.0
 
3658
                s_min_q=0.0
 
3659
                s_size=len(scale)
 
3660
            scale_info.append({'cen':s_cen, 'min':s_min, 'max':s_max, \
 
3661
                               'min_q':s_min_q, 'max_q':s_max_q, 'size':s_size, \
 
3662
                               'label':self.run_card['dynamical_scale_choice'][j], \
 
3663
                               'unc':self.run_card['reweight_scale'][j]})
 
3664
 
 
3665
        # check if we can use LHAPDF to compute the PDF uncertainty
 
3666
        if any(self.run_card['reweight_pdf']):
 
3667
            use_lhapdf=False
 
3668
            lhapdf_libdir=subprocess.Popen([self.options['lhapdf'],'--libdir'],\
 
3669
                                           stdout=subprocess.PIPE).stdout.read().strip() 
 
3670
 
 
3671
            try:
 
3672
                candidates=[dirname for dirname in os.listdir(lhapdf_libdir) \
 
3673
                            if os.path.isdir(pjoin(lhapdf_libdir,dirname))]
 
3674
            except OSError:
 
3675
                candidates=[]
 
3676
            for candidate in candidates:
 
3677
                if os.path.isfile(pjoin(lhapdf_libdir,candidate,'site-packages','lhapdf.so')):
 
3678
                    sys.path.insert(0,pjoin(lhapdf_libdir,candidate,'site-packages'))
 
3679
                    try:
 
3680
                        import lhapdf
 
3681
                        use_lhapdf=True
 
3682
                        break
 
3683
                    except ImportError:
 
3684
                        sys.path.pop(0)
 
3685
                        continue
 
3686
                
 
3687
            if not use_lhapdf:
 
3688
                try:
 
3689
                    candidates=[dirname for dirname in os.listdir(lhapdf_libdir+'64') \
 
3690
                                if os.path.isdir(pjoin(lhapdf_libdir+'64',dirname))]
 
3691
                except OSError:
 
3692
                    candidates=[]
 
3693
                for candidate in candidates:
 
3694
                    if os.path.isfile(pjoin(lhapdf_libdir+'64',candidate,'site-packages','lhapdf.so')):
 
3695
                        sys.path.insert(0,pjoin(lhapdf_libdir+'64',candidate,'site-packages'))
 
3696
                        try:
 
3697
                            import lhapdf
 
3698
                            use_lhapdf=True
 
3699
                            break
 
3700
                        except ImportError:
 
3701
                            sys.path.pop(0)
 
3702
                            continue
 
3703
                
 
3704
            if not use_lhapdf:
 
3705
                try:
 
3706
                    import lhapdf
 
3707
                    use_lhapdf=True
 
3708
                except ImportError:
 
3709
                    logger.warning("Failed to access python version of LHAPDF: "\
 
3710
                                   "cannot compute PDF uncertainty from the "\
 
3711
                                   "weights in the events. The weights in the LHE " \
 
3712
                                   "event files will still cover all PDF set members, "\
 
3713
                                   "but there will be no PDF uncertainty printed in the run summary. \n "\
 
3714
                                   "If the python interface to LHAPDF is available on your system, try "\
 
3715
                                   "adding its location to the PYTHONPATH environment variable and the"\
 
3716
                                   "LHAPDF library location to LD_LIBRARY_PATH (linux) or DYLD_LIBRARY_PATH (mac os x).")
 
3717
                    use_lhapdf=False
 
3718
 
 
3719
        # turn off lhapdf printing any messages
 
3720
        if any(self.run_card['reweight_pdf']) and use_lhapdf: lhapdf.setVerbosity(0)
 
3721
 
 
3722
        pdf_info=[]
 
3723
        for j,pdfset in enumerate(pdfs):
 
3724
            p_cen=pdfset[0]
 
3725
            if p_cen != 0.0 and self.run_card['reweight_pdf'][j]:
 
3726
                if use_lhapdf:
 
3727
                    pdfsetname=self.run_card['lhapdfsetname'][j]
 
3728
                    try:
 
3729
                        p=lhapdf.getPDFSet(pdfsetname)
 
3730
                        ep=p.uncertainty(pdfset,-1)
 
3731
                        p_cen=ep.central
 
3732
                        p_min=abs(ep.errminus/p_cen)*100
 
3733
                        p_max=abs(ep.errplus/p_cen)*100
 
3734
                        p_type=p.errorType
 
3735
                        p_size=p.size
 
3736
                        p_conf=p.errorConfLevel
 
3737
                    except:
 
3738
                        logger.warning("Could not access LHAPDF to compute uncertainties for %s" % pdfsetname)
 
3739
                        p_min=0.0
 
3740
                        p_max=0.0
 
3741
                        p_type='unknown'
 
3742
                        p_conf='unknown'
 
3743
                        p_size=len(pdfset)
3628
3744
                else:
3629
 
                    scale_pdf_info['pdf_upp'] = 0.0
3630
 
                    scale_pdf_info['pdf_low'] = 0.0
3631
 
        else:
3632
 
            # use Gaussian method (NNPDF)
3633
 
            pdf_stdev=0.0
3634
 
            for i in range(int(numofpdf-1)):
3635
 
                pdf_stdev = pdf_stdev + pow(pdfs[i+1] - cntrl_val,2)
3636
 
            pdf_stdev = math.sqrt(pdf_stdev/int(numofpdf-2))
3637
 
            if cntrl_val != 0.0:
3638
 
                scale_pdf_info['pdf_upp'] = pdf_stdev/cntrl_val*100
 
3745
                    p_min=0.0
 
3746
                    p_max=0.0
 
3747
                    p_type='unknown'
 
3748
                    p_conf='unknown'
 
3749
                    p_size=len(pdfset)
 
3750
                    pdfsetname=self.run_card['lhaid'][j]
3639
3751
            else:
3640
 
                scale_pdf_info['pdf_upp'] = 0.0
3641
 
            scale_pdf_info['pdf_low'] = scale_pdf_info['pdf_upp']
 
3752
                p_min=0.0
 
3753
                p_max=0.0
 
3754
                p_type='none'
 
3755
                p_conf='unknown'
 
3756
                p_size=len(pdfset)
 
3757
                pdfsetname=self.run_card['lhaid'][j]
 
3758
            pdf_info.append({'cen':p_cen, 'min':p_min, 'max':p_max, \
 
3759
                             'unc':p_type, 'name':pdfsetname, 'size':p_size, \
 
3760
                             'label':self.run_card['lhaid'][j], 'conf':p_conf})
 
3761
 
 
3762
        scale_pdf_info=[scale_info,pdf_info]
3642
3763
        return scale_pdf_info
3643
3764
 
3644
3765
 
3945
4066
        reweight_log = pjoin(self.me_dir, 'compile_reweight.log')
3946
4067
        test_log = pjoin(self.me_dir, 'test.log')
3947
4068
 
 
4069
        # environmental variables to be included in make_opts
 
4070
        self.make_opts_var = {}
 
4071
        if self.proc_characteristics['has_loops'] and \
 
4072
                          not os.path.exists(pjoin(self.me_dir,'OLP_virtuals')):
 
4073
            self.make_opts_var['madloop'] = 'true'
 
4074
 
3948
4075
        self.update_status('Compiling the code', level=None, update_results=True)
3949
4076
 
3950
 
 
3951
4077
        libdir = pjoin(self.me_dir, 'lib')
3952
4078
        sourcedir = pjoin(self.me_dir, 'Source')
3953
4079
 
3988
4114
 
3989
4115
            self.link_lhapdf(libdir, [pjoin('SubProcesses', p) for p in p_dirs])
3990
4116
            pdfsetsdir = self.get_lhapdf_pdfsetsdir()
3991
 
            lhaid_list = [self.run_card['lhaid']]
3992
 
            if self.run_card['reweight_PDF']:
3993
 
                lhaid_list.append(self.run_card['PDF_set_min'])
3994
 
                lhaid_list.append(self.run_card['PDF_set_max'])
 
4117
            lhaid_list = self.run_card['lhaid']
3995
4118
            self.copy_lhapdf_set(lhaid_list, pdfsetsdir)
3996
4119
 
3997
4120
        else:
3999
4122
                logger.info('Using built-in libraries for PDFs')
4000
4123
            if self.run_card['lpp1'] == 0 == self.run_card['lpp2']:
4001
4124
                logger.info('Lepton-Lepton collision: Ignoring \'pdlabel\' and \'lhaid\' in the run_card.')
4002
 
            try:
4003
 
                del os.environ['lhapdf']
4004
 
            except KeyError:
4005
 
                pass
4006
4125
 
4007
4126
        # read the run_card to find if applgrid is used or not
4008
4127
        if self.run_card['iappl'] != 0:
4009
 
            os.environ['applgrid'] = 'True'
 
4128
            self.make_opts_var['applgrid'] = 'True'
4010
4129
            # check versions of applgrid and amcfast
4011
4130
            for code in ['applgrid','amcfast']:
4012
4131
                try:
4035
4154
                    line=appllibs
4036
4155
                text_out.append(line)
4037
4156
            open(pjoin(self.me_dir,'Source','make_opts'),'w').writelines(text_out)
4038
 
        else:
4039
 
            try:
4040
 
                del os.environ['applgrid']
4041
 
            except KeyError:
4042
 
                pass
4043
4157
 
4044
 
        try: 
4045
 
            os.environ['fastjet_config'] = self.options['fastjet']
4046
 
        except (TypeError, KeyError):
4047
 
            if 'fastjet_config' in os.environ:
4048
 
                del os.environ['fastjet_config']
4049
 
            os.unsetenv('fastjet_config')
 
4158
        if 'fastjet' in self.options.keys() and self.options['fastjet']:
 
4159
            self.make_opts_var['fastjet_config'] = self.options['fastjet']
 
4160
        
 
4161
        # add the make_opts_var to make_opts
 
4162
        self.update_make_opts()
4050
4163
        
4051
4164
        # make Source
4052
4165
        self.update_status('Compiling source...', level=None)
4139
4252
        # check if MadLoop virtuals have been generated
4140
4253
        if self.proc_characteristics['has_loops'] and \
4141
4254
                          not os.path.exists(pjoin(self.me_dir,'OLP_virtuals')):
4142
 
            os.environ['madloop'] = 'true'
4143
4255
            if mode in ['NLO', 'aMC@NLO', 'noshower']:
4144
4256
                tests.append('check_poles')
4145
 
        else:
4146
 
            os.unsetenv('madloop')
4147
4257
 
4148
4258
        # make and run tests (if asked for), gensym and make madevent in each dir
4149
4259
        self.update_status('Compiling directories...', level=None)
4317
4427
                                'madspin': default_switch,
4318
4428
                                'reweight': default_switch}
4319
4429
 
4320
 
            
4321
 
            
4322
 
        
4323
 
        
4324
4430
        description = {'order':  'Perturbative order of the calculation:',
4325
4431
                       'fixed_order': 'Fixed order (no event generation and no MC@[N]LO matching):',
4326
4432
                       'shower': 'Shower the generated events:',
4523
4629
        if mode =='onlyshower':
4524
4630
            cards = ['shower_card.dat']
4525
4631
        
 
4632
        
 
4633
        # automatically switch to keep_wgt option
 
4634
        first_cmd = [] # force to change some switch
 
4635
        
4526
4636
        if not options['force'] and not self.force:
4527
 
            self.ask_edit_cards(cards, plot=False)
 
4637
            self.ask_edit_cards(cards, plot=False, first_cmd=first_cmd)
4528
4638
 
 
4639
        
4529
4640
        self.banner = banner_mod.Banner()
4530
4641
 
4531
4642
        # store the cards in the banner
4574
4685
            analyse_card_path = pjoin(self.me_dir, 'Cards','FO_analyse_card.dat')
4575
4686
            self.analyse_card = self.banner.charge_card('FO_analyse_card')
4576
4687
 
4577
 
        
4578
4688
        return mode
4579
4689
 
4580
4690
 
4597
4707
                "-- execute aMC@NLO \n" + \
4598
4708
                "   MODE can be either LO, NLO, aMC@NLO or aMC@LO (if omitted, it is asked in a separate question)\n" + \
4599
4709
                "     If mode is set to LO/NLO, no event generation will be performed, but only the \n" + \
4600
 
                "     computation of the total cross-section and the filling of parton-level histograms \n" + \
 
4710
                "     computation of the total cross section and the filling of parton-level histograms \n" + \
4601
4711
                "     specified in the DIRPATH/SubProcesses/madfks_plot.f file.\n" + \
4602
4712
                "     If mode is set to aMC@LO/aMC@NLO, after the cross-section computation, a .lhe \n" + \
4603
4713
                "     event file is generated which will be showered with the MonteCarlo specified \n" + \
4626
4736
_launch_parser.add_option("-a", "--appl_start_grid", default=False, dest='appl_start_grid',
4627
4737
                            help="For use with APPLgrid only: start from existing grids")
4628
4738
_launch_parser.add_option("-R", "--reweight", default=False, dest='do_reweight', action='store_true',
4629
 
                            help="Run the reweight module (reweighting by different model parameter")
 
4739
                            help="Run the reweight module (reweighting by different model parameters)")
4630
4740
_launch_parser.add_option("-M", "--madspin", default=False, dest='do_madspin', action='store_true',
4631
4741
                            help="Run the madspin package")
4632
4742
 
4636
4746
                "-- execute aMC@NLO \n" + \
4637
4747
                "   MODE can be either LO, NLO, aMC@NLO or aMC@LO (if omitted, it is asked in a separate question)\n" + \
4638
4748
                "     If mode is set to LO/NLO, no event generation will be performed, but only the \n" + \
4639
 
                "     computation of the total cross-section and the filling of parton-level histograms \n" + \
 
4749
                "     computation of the total cross section and the filling of parton-level histograms \n" + \
4640
4750
                "     specified in the DIRPATH/SubProcesses/madfks_plot.f file.\n" + \
4641
4751
                "     If mode is set to aMC@LO/aMC@NLO, after the cross-section computation, a .lhe \n" + \
4642
4752
                "     event file is generated which will be showered with the MonteCarlo specified \n" + \
4666
4776
 
4667
4777
 
4668
4778
_calculate_xsect_usage = "calculate_xsect [ORDER] [options]\n" + \
4669
 
                "-- calculate cross-section up to ORDER.\n" + \
 
4779
                "-- calculate cross section up to ORDER.\n" + \
4670
4780
                "   ORDER can be either LO or NLO (if omitted, it is set to NLO). \n"
4671
4781
 
4672
4782
_calculate_xsect_parser = misc.OptionParser(usage=_calculate_xsect_usage)
4694
4804
_shower_parser.add_option("-f", "--force", default=False, action='store_true',
4695
4805
                                help="Use the shower_card present in the directory for the launch, without editing")
4696
4806
 
 
4807
if '__main__' == __name__:
 
4808
    # Launch the interface without any check if one code is already running.
 
4809
    # This can ONLY run a single command !!
 
4810
    import sys
 
4811
    if not sys.version_info[0] == 2 or sys.version_info[1] < 6:
 
4812
        sys.exit('MadGraph/MadEvent 5 works only with python 2.6 or later (but not python 3.X).\n'+\
 
4813
               'Please upgrate your version of python.')
 
4814
 
 
4815
    import os
 
4816
    import optparse
 
4817
    # Get the directory of the script real path (bin)                                                                                                                                                           
 
4818
    # and add it to the current PYTHONPATH                                                                                                                                                                      
 
4819
    root_path = os.path.dirname(os.path.dirname(os.path.realpath( __file__ )))
 
4820
    sys.path.insert(0, root_path)
 
4821
 
 
4822
    class MyOptParser(optparse.OptionParser):    
 
4823
        class InvalidOption(Exception): pass
 
4824
        def error(self, msg=''):
 
4825
            raise MyOptParser.InvalidOption(msg)
 
4826
    # Write out nice usage message if called with -h or --help                                                                                                                                                  
 
4827
    usage = "usage: %prog [options] [FILE] "
 
4828
    parser = MyOptParser(usage=usage)
 
4829
    parser.add_option("-l", "--logging", default='INFO',
 
4830
                      help="logging level (DEBUG|INFO|WARNING|ERROR|CRITICAL) [%default]")
 
4831
    parser.add_option("","--web", action="store_true", default=False, dest='web', \
 
4832
                     help='force toce to be in secure mode')
 
4833
    parser.add_option("","--debug", action="store_true", default=False, dest='debug', \
 
4834
                     help='force to launch debug mode')
 
4835
    parser_error = ''
 
4836
    done = False
 
4837
    
 
4838
    for i in range(len(sys.argv)-1):
 
4839
        try:
 
4840
            (options, args) = parser.parse_args(sys.argv[1:len(sys.argv)-i])
 
4841
            done = True
 
4842
        except MyOptParser.InvalidOption, error:
 
4843
            pass
 
4844
        else:
 
4845
            args += sys.argv[len(sys.argv)-i:]
 
4846
    if not done:
 
4847
        # raise correct error:                                                                                                                                                                                  
 
4848
        try:
 
4849
            (options, args) = parser.parse_args()
 
4850
        except MyOptParser.InvalidOption, error:
 
4851
            print error
 
4852
            sys.exit(2)
 
4853
 
 
4854
    if len(args) == 0:
 
4855
        args = ''
 
4856
 
 
4857
    import subprocess
 
4858
    import logging
 
4859
    import logging.config
 
4860
    # Set logging level according to the logging level given by options                                                                                                                                         
 
4861
    #logging.basicConfig(level=vars(logging)[options.logging])                                                                                                                                                  
 
4862
    import internal.coloring_logging
 
4863
    try:
 
4864
        if __debug__ and options.logging == 'INFO':
 
4865
            options.logging = 'DEBUG'
 
4866
        if options.logging.isdigit():
 
4867
            level = int(options.logging)
 
4868
        else:
 
4869
            level = eval('logging.' + options.logging)
 
4870
        print os.path.join(root_path, 'internal', 'me5_logging.conf')
 
4871
        logging.config.fileConfig(os.path.join(root_path, 'internal', 'me5_logging.conf'))
 
4872
        logging.root.setLevel(level)
 
4873
        logging.getLogger('madgraph').setLevel(level)
 
4874
    except:
 
4875
        raise
 
4876
        pass
 
4877
 
 
4878
    # Call the cmd interface main loop                                                                                                                                                                          
 
4879
    try:
 
4880
        if args:
 
4881
            # a single command is provided   
 
4882
            if '--web' in args:
 
4883
                i = args.index('--web') 
 
4884
                args.pop(i)                                                                                                                                                                     
 
4885
                cmd_line =  aMCatNLOCmd(force_run=True)
 
4886
            else:
 
4887
                cmd_line =  aMCatNLOCmdShell(force_run=True)
 
4888
 
 
4889
            if not hasattr(cmd_line, 'do_%s' % args[0]):
 
4890
                if parser_error:
 
4891
                    print parser_error
 
4892
                    print 'and %s  can not be interpreted as a valid command.' % args[0]
 
4893
                else:
 
4894
                    print 'ERROR: %s  not a valid command. Please retry' % args[0]
 
4895
            else:
 
4896
                cmd_line.use_rawinput = False
 
4897
                cmd_line.run_cmd(' '.join(args))
 
4898
                cmd_line.run_cmd('quit')
 
4899
 
 
4900
    except KeyboardInterrupt:
 
4901
        print 'quit on KeyboardInterrupt'
 
4902
        pass
4697
4903