Merge lp:~maddevelopers/mg5amcnlo/2.8.1.py3 into lp:mg5amcnlo/lts
- 2.8.1.py3
- Merge into series2.0
Proposed by
Olivier Mattelaer
Status: | Merged |
---|---|
Merged at revision: | 290 |
Proposed branch: | lp:~maddevelopers/mg5amcnlo/2.8.1.py3 |
Merge into: | lp:mg5amcnlo/lts |
Diff against target: |
1112 lines (+365/-142) 23 files modified
MadSpin/interface_madspin.py (+0/-12) Template/LO/Source/dsample.f (+4/-0) UpdateNotes.txt (+7/-0) VERSION (+2/-2) bin/mg5_aMC (+14/-3) madgraph/interface/loop_interface.py (+2/-0) madgraph/interface/madevent_interface.py (+3/-0) madgraph/interface/madgraph_interface.py (+59/-47) madgraph/iolibs/export_fks.py (+4/-5) madgraph/iolibs/export_v4.py (+11/-8) madgraph/iolibs/group_subprocs.py (+2/-2) madgraph/madevent/gen_crossxhtml.py (+7/-18) madgraph/madevent/gen_ximprove.py (+1/-1) madgraph/madevent/sum_html.py (+23/-19) madgraph/various/banner.py (+28/-0) madgraph/various/lhe_parser.py (+2/-0) madgraph/various/misc.py (+30/-0) mg5decay/decay_objects.py (+101/-6) models/__init__.py (+9/-8) models/check_param_card.py (+1/-1) models/import_ufo.py (+47/-7) models/model_reader.py (+5/-1) models/write_param_card.py (+3/-2) |
To merge this branch: | bzr merge lp:~maddevelopers/mg5amcnlo/2.8.1.py3 |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
MadTeam | Pending | ||
Review via email: mp+391101@code.launchpad.net |
Commit message
pass to 2.8.1
Description of the change
This is a pure bug fixing release.
Fixing mainly issue with python3 (comparison operator, model loading, model conversion,...)
It has also a couple of important bug fix like for the auto-width computation.
I would like to also include this "feature" branch as well (containing better support for FxFx)
lp:~maddevelopers/mg5amcnlo/2.8.1_fxfx
But I'm still waiting approval on that one.
Olivier
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'MadSpin/interface_madspin.py' | |||
2 | --- MadSpin/interface_madspin.py 2020-08-20 15:33:24 +0000 | |||
3 | +++ MadSpin/interface_madspin.py 2020-09-22 07:07:51 +0000 | |||
4 | @@ -451,14 +451,6 @@ | |||
5 | 451 | except ValueError: | 451 | except ValueError: |
6 | 452 | raise self.InvalidCmd('second argument should be a real number.') | 452 | raise self.InvalidCmd('second argument should be a real number.') |
7 | 453 | 453 | ||
8 | 454 | elif args[0] == 'BW_effect': | ||
9 | 455 | if args[1] in [0, False,'.false.', 'F', 'f', 'False', 'no']: | ||
10 | 456 | args[1] = 0 | ||
11 | 457 | elif args[1] in [1, True,'.true.', 'T', 't', 'True', 'yes']: | ||
12 | 458 | args[1] = 1 | ||
13 | 459 | else: | ||
14 | 460 | raise self.InvalidCmd('second argument should be either T or F.') | ||
15 | 461 | |||
16 | 462 | elif args[0] == 'curr_dir': | 454 | elif args[0] == 'curr_dir': |
17 | 463 | if not os.path.isdir(args[1]): | 455 | if not os.path.isdir(args[1]): |
18 | 464 | raise self.InvalidCmd('second argument should be a path to a existing directory') | 456 | raise self.InvalidCmd('second argument should be a path to a existing directory') |
19 | @@ -499,8 +491,6 @@ | |||
20 | 499 | opts = list(self.options.keys()) + ['seed', "spinmode"] | 491 | opts = list(self.options.keys()) + ['seed', "spinmode"] |
21 | 500 | return self.list_completion(text, opts) | 492 | return self.list_completion(text, opts) |
22 | 501 | elif len(args) == 2: | 493 | elif len(args) == 2: |
23 | 502 | if args[1] == 'BW_effect': | ||
24 | 503 | return self.list_completion(text, ['True', 'False']) | ||
25 | 504 | if args[1] == 'ms_dir': | 494 | if args[1] == 'ms_dir': |
26 | 505 | return self.path_completion(text, '.', only_dirs = True) | 495 | return self.path_completion(text, '.', only_dirs = True) |
27 | 506 | elif args[1] == 'ms_dir': | 496 | elif args[1] == 'ms_dir': |
28 | @@ -517,8 +507,6 @@ | |||
29 | 517 | print('') | 507 | print('') |
30 | 518 | print('-- assign to a given option a given value') | 508 | print('-- assign to a given option a given value') |
31 | 519 | print(' - set max_weight VALUE: pre-define the maximum_weight for the reweighting') | 509 | print(' - set max_weight VALUE: pre-define the maximum_weight for the reweighting') |
32 | 520 | print(' - set BW_effect True|False: [default:True] reshuffle the momenta to describe') | ||
33 | 521 | print(' corrrectly the Breit-Wigner of the decayed particle') | ||
34 | 522 | print(' - set seed VALUE: fix the value of the seed to a given value.') | 510 | print(' - set seed VALUE: fix the value of the seed to a given value.') |
35 | 523 | print(' by default use the current time to set the seed. random number are') | 511 | print(' by default use the current time to set the seed. random number are') |
36 | 524 | print(' generated by the python module random using the Mersenne Twister generator.') | 512 | print(' generated by the python module random using the Mersenne Twister generator.') |
37 | 525 | 513 | ||
38 | === modified file 'Template/LO/Source/dsample.f' | |||
39 | --- Template/LO/Source/dsample.f 2020-05-13 09:45:34 +0000 | |||
40 | +++ Template/LO/Source/dsample.f 2020-09-22 07:07:51 +0000 | |||
41 | @@ -453,6 +453,10 @@ | |||
42 | 453 | CUMULATED_TIMING = t_after - CUMULATED_TIMING | 453 | CUMULATED_TIMING = t_after - CUMULATED_TIMING |
43 | 454 | 454 | ||
44 | 455 | if (N_EVALS.eq.0) then | 455 | if (N_EVALS.eq.0) then |
45 | 456 | write(outUnit,*) '<lo_statistics> ' | ||
46 | 457 | write(outUnit,*) '<cumulated_time>'//trim(toStr_real(CUMULATED_TIMING)) | ||
47 | 458 | & //'</cumulated_time>' | ||
48 | 459 | write(outUnit,*) '</lo_statistics>' | ||
49 | 456 | return | 460 | return |
50 | 457 | endif | 461 | endif |
51 | 458 | 462 | ||
52 | 459 | 463 | ||
53 | === modified file 'UpdateNotes.txt' | |||
54 | --- UpdateNotes.txt 2020-08-21 08:59:01 +0000 | |||
55 | +++ UpdateNotes.txt 2020-09-22 07:07:51 +0000 | |||
56 | @@ -1,6 +1,13 @@ | |||
57 | 1 | Update notes for MadGraph5_aMC@NLO (in reverse time order) | 1 | Update notes for MadGraph5_aMC@NLO (in reverse time order) |
58 | 2 | 2 | ||
59 | 3 | 3 | ||
60 | 4 | 2.8.1(22/09/22): | ||
61 | 5 | OM: Fix for the auto width for three body decay in presence of identical particles. | ||
62 | 6 | OM: add support for __header__ in UFO model | ||
63 | 7 | OM: allow restriction card to have auto-width | ||
64 | 8 | OM: fixing some html link (removed ajax link forbidden by major web browser) | ||
65 | 9 | OM: Various fix related to the python3 support | ||
66 | 10 | - including more efficient model conversion method | ||
67 | 4 | 2.8.0 (21/08/20): | 11 | 2.8.0 (21/08/20): |
68 | 5 | OM: pass to python3 by default | 12 | OM: pass to python3 by default |
69 | 6 | OM: For LO process, you can now set lpp1 and lpp2 to "4" for process with initial photon in order to get the | 13 | OM: For LO process, you can now set lpp1 and lpp2 to "4" for process with initial photon in order to get the |
70 | 7 | 14 | ||
71 | === modified file 'VERSION' | |||
72 | --- VERSION 2020-08-21 08:59:01 +0000 | |||
73 | +++ VERSION 2020-09-22 07:07:51 +0000 | |||
74 | @@ -1,5 +1,5 @@ | |||
77 | 1 | version = 2.8.0 | 1 | version = 2.8.1 |
78 | 2 | date = 2020-08-21 | 2 | date = 2020-09-22 |
79 | 3 | 3 | ||
80 | 4 | 4 | ||
81 | 5 | 5 | ||
82 | 6 | 6 | ||
83 | === modified file 'bin/mg5_aMC' | |||
84 | --- bin/mg5_aMC 2020-06-21 18:48:13 +0000 | |||
85 | +++ bin/mg5_aMC 2020-09-22 07:07:51 +0000 | |||
86 | @@ -23,8 +23,15 @@ | |||
87 | 23 | 23 | ||
88 | 24 | import sys | 24 | import sys |
89 | 25 | if sys.version_info[1] < 7: | 25 | if sys.version_info[1] < 7: |
92 | 26 | sys.exit('MadGraph5_aMC@NLO works only with python 2.7 or python 3.7 (and later).\n\ | 26 | if sys.version_info[0] ==2: |
93 | 27 | Please upgrate your version of python.') | 27 | sys.exit("MadGraph5_aMC@NLO works only with python 2.7 or python 3.7 (and later).\n"+\ |
94 | 28 | " You are currently using Python2.%s. Please use a more recent version of Python." % sys.version_info[1]) | ||
95 | 29 | if sys.version_info[1] ==3: | ||
96 | 30 | sys.exit("MadGraph5_aMC@NLO works only with python 2.7 or python 3.7 (and later).\n"+\ | ||
97 | 31 | " You are currently using Python 3.%i. So please upgrade your version of Python." % sys.version_info[1] +\ | ||
98 | 32 | " If you have python2.7 installed you need to run the code as\n"+\ | ||
99 | 33 | " python27 ./bin/mg5_aMC \n") | ||
100 | 34 | |||
101 | 28 | try: | 35 | try: |
102 | 29 | import six | 36 | import six |
103 | 30 | except ImportError: | 37 | except ImportError: |
104 | @@ -75,8 +82,12 @@ | |||
105 | 75 | import logging.config | 82 | import logging.config |
106 | 76 | import madgraph.interface.coloring_logging | 83 | import madgraph.interface.coloring_logging |
107 | 77 | 84 | ||
108 | 85 | if sys.version_info[0] ==2: | ||
109 | 86 | logging.warning("\033[91mpython2 support will be removed in last quarter 2021. If you use python2 due to issue with Python3, please report them on https://bugs.launchpad.net/mg5amcnlo\033[0m") | ||
110 | 87 | |||
111 | 88 | |||
112 | 78 | if ' ' in os.getcwd(): | 89 | if ' ' in os.getcwd(): |
114 | 79 | logging.warning("Path does contains spaces. We advise that you change your current path to avoid to have space in the path.") | 90 | logging.warning("\033[91mPath does contains spaces. We advise that you change your current path to avoid to have space in the path.\033[0m") |
115 | 80 | 91 | ||
116 | 81 | try: | 92 | try: |
117 | 82 | import readline | 93 | import readline |
118 | 83 | 94 | ||
119 | === modified file 'madgraph/interface/loop_interface.py' | |||
120 | --- madgraph/interface/loop_interface.py 2020-03-25 14:57:21 +0000 | |||
121 | +++ madgraph/interface/loop_interface.py 2020-09-22 07:07:51 +0000 | |||
122 | @@ -803,6 +803,7 @@ | |||
123 | 803 | """Generate an amplitude for a given process and add to | 803 | """Generate an amplitude for a given process and add to |
124 | 804 | existing amplitudes | 804 | existing amplitudes |
125 | 805 | """ | 805 | """ |
126 | 806 | |||
127 | 806 | args = self.split_arg(line) | 807 | args = self.split_arg(line) |
128 | 807 | # Check the validity of the arguments | 808 | # Check the validity of the arguments |
129 | 808 | self.check_add(args) | 809 | self.check_add(args) |
130 | @@ -907,6 +908,7 @@ | |||
131 | 907 | amp in myproc.get('amplitudes')]) | 908 | amp in myproc.get('amplitudes')]) |
132 | 908 | logger.info("Process generated in %0.3f s" % \ | 909 | logger.info("Process generated in %0.3f s" % \ |
133 | 909 | (cpu_time2 - cpu_time1)) | 910 | (cpu_time2 - cpu_time1)) |
134 | 911 | |||
135 | 910 | 912 | ||
136 | 911 | class LoopInterfaceWeb(mg_interface.CheckValidForCmdWeb, LoopInterface): | 913 | class LoopInterfaceWeb(mg_interface.CheckValidForCmdWeb, LoopInterface): |
137 | 912 | pass | 914 | pass |
138 | 913 | 915 | ||
139 | === modified file 'madgraph/interface/madevent_interface.py' | |||
140 | --- madgraph/interface/madevent_interface.py 2020-06-21 18:48:13 +0000 | |||
141 | +++ madgraph/interface/madevent_interface.py 2020-09-22 07:07:51 +0000 | |||
142 | @@ -3422,6 +3422,9 @@ | |||
143 | 3422 | self.nb_refine += 1 | 3422 | self.nb_refine += 1 |
144 | 3423 | args = self.split_arg(line) | 3423 | args = self.split_arg(line) |
145 | 3424 | treshold=None | 3424 | treshold=None |
146 | 3425 | |||
147 | 3426 | |||
148 | 3427 | |||
149 | 3425 | for a in args: | 3428 | for a in args: |
150 | 3426 | if a.startswith('--treshold='): | 3429 | if a.startswith('--treshold='): |
151 | 3427 | treshold = float(a.split('=',1)[1]) | 3430 | treshold = float(a.split('=',1)[1]) |
152 | 3428 | 3431 | ||
153 | === modified file 'madgraph/interface/madgraph_interface.py' | |||
154 | --- madgraph/interface/madgraph_interface.py 2020-08-20 15:33:24 +0000 | |||
155 | +++ madgraph/interface/madgraph_interface.py 2020-09-22 07:07:51 +0000 | |||
156 | @@ -3082,7 +3082,7 @@ | |||
157 | 3082 | existing amplitudes | 3082 | existing amplitudes |
158 | 3083 | or merge two model | 3083 | or merge two model |
159 | 3084 | """ | 3084 | """ |
161 | 3085 | 3085 | ||
162 | 3086 | args = self.split_arg(line) | 3086 | args = self.split_arg(line) |
163 | 3087 | 3087 | ||
164 | 3088 | 3088 | ||
165 | @@ -3183,47 +3183,51 @@ | |||
166 | 3183 | 3183 | ||
167 | 3184 | 3184 | ||
168 | 3185 | self._curr_proc_defs.append(myprocdef) | 3185 | self._curr_proc_defs.append(myprocdef) |
210 | 3186 | 3186 | ||
211 | 3187 | # Negative coupling order contraints can be given on at most one | 3187 | try: |
212 | 3188 | # coupling order (and either in squared orders or orders, not both) | 3188 | # Negative coupling order contraints can be given on at most one |
213 | 3189 | if len([1 for val in list(myprocdef.get('orders').values())+\ | 3189 | # coupling order (and either in squared orders or orders, not both) |
214 | 3190 | list(myprocdef.get('squared_orders').values()) if val<0])>1: | 3190 | if len([1 for val in list(myprocdef.get('orders').values())+\ |
215 | 3191 | raise MadGraph5Error("Negative coupling order constraints"+\ | 3191 | list(myprocdef.get('squared_orders').values()) if val<0])>1: |
216 | 3192 | " can only be given on one type of coupling and either on"+\ | 3192 | raise MadGraph5Error("Negative coupling order constraints"+\ |
217 | 3193 | " squared orders or amplitude orders, not both.") | 3193 | " can only be given on one type of coupling and either on"+\ |
218 | 3194 | 3194 | " squared orders or amplitude orders, not both.") | |
219 | 3195 | if myprocdef.get_ninitial() ==1 and myprocdef.get('squared_orders'): | 3195 | |
220 | 3196 | logger.warning('''Computation of interference term with decay is not 100% validated. | 3196 | if myprocdef.get_ninitial() ==1 and myprocdef.get('squared_orders'): |
221 | 3197 | Please check carefully your result. | 3197 | logger.warning('''Computation of interference term with decay is not 100% validated. |
222 | 3198 | One suggestion is also to compare the generation of your process with and without | 3198 | Please check carefully your result. |
223 | 3199 | set group_subprocesses True | 3199 | One suggestion is also to compare the generation of your process with and without |
224 | 3200 | (to write Before the generate command) | 3200 | set group_subprocesses True |
225 | 3201 | ''') | 3201 | (to write Before the generate command) |
226 | 3202 | 3202 | ''') | |
227 | 3203 | cpu_time1 = time.time() | 3203 | |
228 | 3204 | 3204 | cpu_time1 = time.time() | |
229 | 3205 | # Generate processes | 3205 | |
230 | 3206 | if self.options['group_subprocesses'] == 'Auto': | 3206 | # Generate processes |
231 | 3207 | collect_mirror_procs = True | 3207 | if self.options['group_subprocesses'] == 'Auto': |
232 | 3208 | else: | 3208 | collect_mirror_procs = True |
233 | 3209 | collect_mirror_procs = self.options['group_subprocesses'] | 3209 | else: |
234 | 3210 | ignore_six_quark_processes = \ | 3210 | collect_mirror_procs = self.options['group_subprocesses'] |
235 | 3211 | self.options['ignore_six_quark_processes'] if \ | 3211 | ignore_six_quark_processes = \ |
236 | 3212 | "ignore_six_quark_processes" in self.options \ | 3212 | self.options['ignore_six_quark_processes'] if \ |
237 | 3213 | else [] | 3213 | "ignore_six_quark_processes" in self.options \ |
238 | 3214 | 3214 | else [] | |
239 | 3215 | myproc = diagram_generation.MultiProcess(myprocdef, | 3215 | |
240 | 3216 | collect_mirror_procs = collect_mirror_procs, | 3216 | myproc = diagram_generation.MultiProcess(myprocdef, |
241 | 3217 | ignore_six_quark_processes = ignore_six_quark_processes, | 3217 | collect_mirror_procs = collect_mirror_procs, |
242 | 3218 | optimize=optimize, diagram_filter=diagram_filter) | 3218 | ignore_six_quark_processes = ignore_six_quark_processes, |
243 | 3219 | 3219 | optimize=optimize, diagram_filter=diagram_filter) | |
244 | 3220 | 3220 | ||
245 | 3221 | for amp in myproc.get('amplitudes'): | 3221 | |
246 | 3222 | if amp not in self._curr_amps: | 3222 | for amp in myproc.get('amplitudes'): |
247 | 3223 | self._curr_amps.append(amp) | 3223 | if amp not in self._curr_amps: |
248 | 3224 | elif warning_duplicate: | 3224 | self._curr_amps.append(amp) |
249 | 3225 | raise self.InvalidCmd("Duplicate process %s found. Please check your processes." % \ | 3225 | elif warning_duplicate: |
250 | 3226 | amp.nice_string_processes()) | 3226 | raise self.InvalidCmd( "Duplicate process %s found. Please check your processes." % \ |
251 | 3227 | amp.nice_string_processes()) | ||
252 | 3228 | except Exception: | ||
253 | 3229 | self._curr_proc_defs.pop(-1) | ||
254 | 3230 | raise | ||
255 | 3227 | 3231 | ||
256 | 3228 | # Reset _done_export, since we have new process | 3232 | # Reset _done_export, since we have new process |
257 | 3229 | self._done_export = False | 3233 | self._done_export = False |
258 | @@ -3329,9 +3333,13 @@ | |||
259 | 3329 | if answer != 'y': | 3333 | if answer != 'y': |
260 | 3330 | return | 3334 | return |
261 | 3331 | 3335 | ||
263 | 3332 | #Object_library (.iteritems() -> .items()) | 3336 | #Object_library |
264 | 3333 | text = open(pjoin(model_dir, 'object_library.py')).read() | 3337 | text = open(pjoin(model_dir, 'object_library.py')).read() |
265 | 3338 | #(.iteritems() -> .items()) | ||
266 | 3334 | text = text.replace('.iteritems()', '.items()') | 3339 | text = text.replace('.iteritems()', '.items()') |
267 | 3340 | # raise UFOError, "" -> raise UFOError() | ||
268 | 3341 | text = re.sub('raise (\w+)\s*,\s*["\']([^"]+)["\']', | ||
269 | 3342 | 'raise \g<1>("\g<2>")', text) | ||
270 | 3335 | text = open(pjoin(model_dir, 'object_library.py'),'w').write(text) | 3343 | text = open(pjoin(model_dir, 'object_library.py'),'w').write(text) |
271 | 3336 | 3344 | ||
272 | 3337 | # write_param_card.dat -> copy the one of the sm model | 3345 | # write_param_card.dat -> copy the one of the sm model |
273 | @@ -5166,6 +5174,7 @@ | |||
274 | 5166 | 5174 | ||
275 | 5167 | # Reset _done_export, since we have new process | 5175 | # Reset _done_export, since we have new process |
276 | 5168 | self._done_export = False | 5176 | self._done_export = False |
277 | 5177 | self._curr_proc_defs.append(myprocdef) | ||
278 | 5169 | 5178 | ||
279 | 5170 | cpu_time2 = time.time() | 5179 | cpu_time2 = time.time() |
280 | 5171 | 5180 | ||
281 | @@ -5372,20 +5381,20 @@ | |||
282 | 5372 | string. Returns a ProcessDefinition.""" | 5381 | string. Returns a ProcessDefinition.""" |
283 | 5373 | 5382 | ||
284 | 5374 | # Start with process number (identified by "@") and overall orders | 5383 | # Start with process number (identified by "@") and overall orders |
286 | 5375 | proc_number_pattern = re.compile("^(.+)@\s*(\d+)\s*((\w+\s*=\s*\d+\s*)*)$") | 5384 | proc_number_pattern = re.compile("^(.+)@\s*(\d+)\s*((\w+\s*\<?=\s*\d+\s*)*)$") |
287 | 5376 | proc_number_re = proc_number_pattern.match(line) | 5385 | proc_number_re = proc_number_pattern.match(line) |
288 | 5377 | overall_orders = {} | 5386 | overall_orders = {} |
289 | 5378 | if proc_number_re: | 5387 | if proc_number_re: |
290 | 5379 | proc_number = int(proc_number_re.group(2)) | 5388 | proc_number = int(proc_number_re.group(2)) |
291 | 5380 | line = proc_number_re.group(1) | 5389 | line = proc_number_re.group(1) |
292 | 5381 | if proc_number_re.group(3): | 5390 | if proc_number_re.group(3): |
294 | 5382 | order_pattern = re.compile("^(.*?)\s*(\w+)\s*=\s*(\d+)\s*$") | 5391 | order_pattern = re.compile("^(.*?)\s*(\w+)\s*\<?=\s*(\d+)\s*$") |
295 | 5383 | order_line = proc_number_re.group(3) | 5392 | order_line = proc_number_re.group(3) |
296 | 5384 | order_re = order_pattern.match(order_line) | 5393 | order_re = order_pattern.match(order_line) |
297 | 5385 | while order_re: | 5394 | while order_re: |
298 | 5386 | overall_orders[order_re.group(2)] = int(order_re.group(3)) | 5395 | overall_orders[order_re.group(2)] = int(order_re.group(3)) |
299 | 5387 | order_line = order_re.group(1) | 5396 | order_line = order_re.group(1) |
301 | 5388 | order_re = order_pattern.match(order_line) | 5397 | order_re = order_pattern.match(order_line) |
302 | 5389 | logger.info(line) | 5398 | logger.info(line) |
303 | 5390 | 5399 | ||
304 | 5391 | 5400 | ||
305 | @@ -6003,9 +6012,12 @@ | |||
306 | 6003 | if six.PY3: | 6012 | if six.PY3: |
307 | 6004 | self.options['lhapdf_py3'] = pjoin(prefix,'lhapdf6_py3','bin', 'lhapdf-config') | 6013 | self.options['lhapdf_py3'] = pjoin(prefix,'lhapdf6_py3','bin', 'lhapdf-config') |
308 | 6005 | self.exec_cmd('save options %s lhapdf_py3' % config_file) | 6014 | self.exec_cmd('save options %s lhapdf_py3' % config_file) |
309 | 6015 | self.options['lhapdf'] = self.options['lhapdf_py3'] | ||
310 | 6006 | else: | 6016 | else: |
311 | 6007 | self.options['lhapdf_py2'] = pjoin(prefix,'lhapdf6','bin', 'lhapdf-config') | 6017 | self.options['lhapdf_py2'] = pjoin(prefix,'lhapdf6','bin', 'lhapdf-config') |
312 | 6008 | self.exec_cmd('save options %s lhapdf_py2' % config_file) | 6018 | self.exec_cmd('save options %s lhapdf_py2' % config_file) |
313 | 6019 | self.options['lhapdf'] = self.options['lhapdf_py2'] | ||
314 | 6020 | |||
315 | 6009 | elif tool == 'lhapdf5': | 6021 | elif tool == 'lhapdf5': |
316 | 6010 | self.options['lhapdf'] = pjoin(prefix,'lhapdf5','bin', 'lhapdf-config') | 6022 | self.options['lhapdf'] = pjoin(prefix,'lhapdf5','bin', 'lhapdf-config') |
317 | 6011 | self.exec_cmd('save options %s lhapdf' % config_file, printcmd=False, log=False) | 6023 | self.exec_cmd('save options %s lhapdf' % config_file, printcmd=False, log=False) |
318 | @@ -6801,7 +6813,7 @@ | |||
319 | 6801 | data['last_check'] = time.time() | 6813 | data['last_check'] = time.time() |
320 | 6802 | 6814 | ||
321 | 6803 | #check if we need to update. | 6815 | #check if we need to update. |
323 | 6804 | if time.time() - data['last_check'] < update_delay: | 6816 | if time.time() - float(data['last_check']) < float(update_delay): |
324 | 6805 | return | 6817 | return |
325 | 6806 | 6818 | ||
326 | 6807 | logger.info('Checking if MG5 is up-to-date... (takes up to %ss)' % timeout) | 6819 | logger.info('Checking if MG5 is up-to-date... (takes up to %ss)' % timeout) |
327 | 6808 | 6820 | ||
328 | === modified file 'madgraph/iolibs/export_fks.py' | |||
329 | --- madgraph/iolibs/export_fks.py 2020-08-20 15:33:24 +0000 | |||
330 | +++ madgraph/iolibs/export_fks.py 2020-09-22 07:07:51 +0000 | |||
331 | @@ -16,7 +16,6 @@ | |||
332 | 16 | 16 | ||
333 | 17 | from __future__ import absolute_import | 17 | from __future__ import absolute_import |
334 | 18 | from __future__ import print_function | 18 | from __future__ import print_function |
335 | 19 | from distutils import dir_util | ||
336 | 20 | import glob | 19 | import glob |
337 | 21 | import logging | 20 | import logging |
338 | 22 | import os | 21 | import os |
339 | @@ -99,8 +98,8 @@ | |||
340 | 99 | logger.info('initialize a new directory: %s' % \ | 98 | logger.info('initialize a new directory: %s' % \ |
341 | 100 | os.path.basename(dir_path)) | 99 | os.path.basename(dir_path)) |
342 | 101 | shutil.copytree(os.path.join(mgme_dir, 'Template', 'NLO'), dir_path, True) | 100 | shutil.copytree(os.path.join(mgme_dir, 'Template', 'NLO'), dir_path, True) |
345 | 102 | # distutils.dir_util.copy_tree since dir_path already exists | 101 | # misc.copytree since dir_path already exists |
346 | 103 | dir_util.copy_tree(pjoin(self.mgme_dir, 'Template', 'Common'),dir_path) | 102 | misc.copytree(pjoin(self.mgme_dir, 'Template', 'Common'),dir_path) |
347 | 104 | # Copy plot_card | 103 | # Copy plot_card |
348 | 105 | for card in ['plot_card']: | 104 | for card in ['plot_card']: |
349 | 106 | if os.path.isfile(pjoin(self.dir_path, 'Cards',card + '.dat')): | 105 | if os.path.isfile(pjoin(self.dir_path, 'Cards',card + '.dat')): |
350 | @@ -3396,8 +3395,8 @@ | |||
351 | 3396 | logger.info('initialize a new directory: %s' % \ | 3395 | logger.info('initialize a new directory: %s' % \ |
352 | 3397 | os.path.basename(dir_path)) | 3396 | os.path.basename(dir_path)) |
353 | 3398 | shutil.copytree(os.path.join(mgme_dir, 'Template', 'NLO'), dir_path, True) | 3397 | shutil.copytree(os.path.join(mgme_dir, 'Template', 'NLO'), dir_path, True) |
356 | 3399 | # distutils.dir_util.copy_tree since dir_path already exists | 3398 | # misc.copytree since dir_path already exists |
357 | 3400 | dir_util.copy_tree(pjoin(self.mgme_dir, 'Template', 'Common'), | 3399 | misc.copytree(pjoin(self.mgme_dir, 'Template', 'Common'), |
358 | 3401 | dir_path) | 3400 | dir_path) |
359 | 3402 | # Copy plot_card | 3401 | # Copy plot_card |
360 | 3403 | for card in ['plot_card']: | 3402 | for card in ['plot_card']: |
361 | 3404 | 3403 | ||
362 | === modified file 'madgraph/iolibs/export_v4.py' | |||
363 | --- madgraph/iolibs/export_v4.py 2020-08-20 15:33:24 +0000 | |||
364 | +++ madgraph/iolibs/export_v4.py 2020-09-22 07:07:51 +0000 | |||
365 | @@ -20,7 +20,6 @@ | |||
366 | 20 | 20 | ||
367 | 21 | import copy | 21 | import copy |
368 | 22 | from six import StringIO | 22 | from six import StringIO |
369 | 23 | from distutils import dir_util | ||
370 | 24 | import itertools | 23 | import itertools |
371 | 25 | import fractions | 24 | import fractions |
372 | 26 | import glob | 25 | import glob |
373 | @@ -255,8 +254,8 @@ | |||
374 | 255 | os.path.basename(self.dir_path)) | 254 | os.path.basename(self.dir_path)) |
375 | 256 | shutil.copytree(pjoin(self.mgme_dir, 'Template/LO'), | 255 | shutil.copytree(pjoin(self.mgme_dir, 'Template/LO'), |
376 | 257 | self.dir_path, True) | 256 | self.dir_path, True) |
379 | 258 | # distutils.dir_util.copy_tree since dir_path already exists | 257 | # misc.copytree since dir_path already exists |
380 | 259 | dir_util.copy_tree(pjoin(self.mgme_dir, 'Template/Common'), | 258 | misc.copytree(pjoin(self.mgme_dir, 'Template/Common'), |
381 | 260 | self.dir_path) | 259 | self.dir_path) |
382 | 261 | # copy plot_card | 260 | # copy plot_card |
383 | 262 | for card in ['plot_card']: | 261 | for card in ['plot_card']: |
384 | @@ -269,8 +268,8 @@ | |||
385 | 269 | elif os.getcwd() == os.path.realpath(self.dir_path): | 268 | elif os.getcwd() == os.path.realpath(self.dir_path): |
386 | 270 | logger.info('working in local directory: %s' % \ | 269 | logger.info('working in local directory: %s' % \ |
387 | 271 | os.path.realpath(self.dir_path)) | 270 | os.path.realpath(self.dir_path)) |
390 | 272 | # distutils.dir_util.copy_tree since dir_path already exists | 271 | # misc.copytree since dir_path already exists |
391 | 273 | dir_util.copy_tree(pjoin(self.mgme_dir, 'Template/LO'), | 272 | misc.copytree(pjoin(self.mgme_dir, 'Template/LO'), |
392 | 274 | self.dir_path) | 273 | self.dir_path) |
393 | 275 | # for name in misc.glob('Template/LO/*', self.mgme_dir): | 274 | # for name in misc.glob('Template/LO/*', self.mgme_dir): |
394 | 276 | # name = os.path.basename(name) | 275 | # name = os.path.basename(name) |
395 | @@ -279,8 +278,8 @@ | |||
396 | 279 | # files.cp(filename, pjoin(self.dir_path,name)) | 278 | # files.cp(filename, pjoin(self.dir_path,name)) |
397 | 280 | # elif os.path.isdir(filename): | 279 | # elif os.path.isdir(filename): |
398 | 281 | # shutil.copytree(filename, pjoin(self.dir_path,name), True) | 280 | # shutil.copytree(filename, pjoin(self.dir_path,name), True) |
401 | 282 | # distutils.dir_util.copy_tree since dir_path already exists | 281 | # misc.copytree since dir_path already exists |
402 | 283 | dir_util.copy_tree(pjoin(self.mgme_dir, 'Template/Common'), | 282 | misc.copytree(pjoin(self.mgme_dir, 'Template/Common'), |
403 | 284 | self.dir_path) | 283 | self.dir_path) |
404 | 285 | # Copy plot_card | 284 | # Copy plot_card |
405 | 286 | for card in ['plot_card']: | 285 | for card in ['plot_card']: |
406 | @@ -903,7 +902,11 @@ | |||
407 | 903 | if hasattr(self, 'aloha_model'): | 902 | if hasattr(self, 'aloha_model'): |
408 | 904 | aloha_model = self.aloha_model | 903 | aloha_model = self.aloha_model |
409 | 905 | else: | 904 | else: |
411 | 906 | aloha_model = create_aloha.AbstractALOHAModel(os.path.basename(model.get('modelpath'))) | 905 | try: |
412 | 906 | with misc.MuteLogger(['madgraph.models'], [60]): | ||
413 | 907 | aloha_model = create_aloha.AbstractALOHAModel(os.path.basename(model.get('modelpath'))) | ||
414 | 908 | except ImportError: | ||
415 | 909 | aloha_model = create_aloha.AbstractALOHAModel(model.get('modelpath')) | ||
416 | 907 | aloha_model.add_Lorentz_object(model.get('lorentz')) | 910 | aloha_model.add_Lorentz_object(model.get('lorentz')) |
417 | 908 | 911 | ||
418 | 909 | # Compute the subroutines | 912 | # Compute the subroutines |
419 | 910 | 913 | ||
420 | === modified file 'madgraph/iolibs/group_subprocs.py' | |||
421 | --- madgraph/iolibs/group_subprocs.py 2020-08-20 15:33:24 +0000 | |||
422 | +++ madgraph/iolibs/group_subprocs.py 2020-09-22 07:07:51 +0000 | |||
423 | @@ -317,7 +317,7 @@ | |||
424 | 317 | "Need matrix elements to run find_mapping_diagrams" | 317 | "Need matrix elements to run find_mapping_diagrams" |
425 | 318 | 318 | ||
426 | 319 | if max_tpropa == 0: | 319 | if max_tpropa == 0: |
428 | 320 | max_tpropa = base_objects.Vertex.max_tpropa | 320 | max_tpropa = int(base_objects.Vertex.max_tpropa) |
429 | 321 | 321 | ||
430 | 322 | matrix_elements = self.get('matrix_elements') | 322 | matrix_elements = self.get('matrix_elements') |
431 | 323 | model = matrix_elements[0].get('processes')[0].get('model') | 323 | model = matrix_elements[0].get('processes')[0].get('model') |
432 | @@ -363,7 +363,7 @@ | |||
433 | 363 | max(diagram.get_vertex_leg_numbers()) > minvert: | 363 | max(diagram.get_vertex_leg_numbers()) > minvert: |
434 | 364 | diagram_maps[ime].append(0) | 364 | diagram_maps[ime].append(0) |
435 | 365 | continue | 365 | continue |
437 | 366 | if diagram.get_nb_t_channel() > max_tpropa: | 366 | if diagram.get_nb_t_channel() > int(max_tpropa): |
438 | 367 | diagram_maps[ime].append(0) | 367 | diagram_maps[ime].append(0) |
439 | 368 | continue | 368 | continue |
440 | 369 | # Create the equivalent diagram, in the format | 369 | # Create the equivalent diagram, in the format |
441 | 370 | 370 | ||
442 | === modified file 'madgraph/madevent/gen_crossxhtml.py' | |||
443 | --- madgraph/madevent/gen_crossxhtml.py 2020-06-21 18:48:13 +0000 | |||
444 | +++ madgraph/madevent/gen_crossxhtml.py 2020-09-22 07:07:51 +0000 | |||
445 | @@ -68,19 +68,6 @@ | |||
446 | 68 | } | 68 | } |
447 | 69 | return http.status!=404; | 69 | return http.status!=404; |
448 | 70 | } | 70 | } |
449 | 71 | function check_link(url,alt, id){ | ||
450 | 72 | var obj = document.getElementById(id); | ||
451 | 73 | if ( ! UrlExists(url)){ | ||
452 | 74 | if ( ! UrlExists(alt)){ | ||
453 | 75 | obj.href = url; | ||
454 | 76 | return 1==1; | ||
455 | 77 | } | ||
456 | 78 | obj.href = alt; | ||
457 | 79 | return 1 == 2; | ||
458 | 80 | } | ||
459 | 81 | obj.href = url; | ||
460 | 82 | return 1==1; | ||
461 | 83 | } | ||
462 | 84 | </script> | 71 | </script> |
463 | 85 | <H2 align=center> Results in the %(model)s for %(process)s </H2> | 72 | <H2 align=center> Results in the %(model)s for %(process)s </H2> |
464 | 86 | <HR> | 73 | <HR> |
465 | @@ -1014,14 +1001,16 @@ | |||
466 | 1014 | def special_link(self, link, level, name): | 1001 | def special_link(self, link, level, name): |
467 | 1015 | 1002 | ||
468 | 1016 | id = '%s_%s_%s_%s' % (self['run_name'],self['tag'], level, name) | 1003 | id = '%s_%s_%s_%s' % (self['run_name'],self['tag'], level, name) |
471 | 1017 | 1004 | return " <a id='%(id)s' href='%(link)s.gz'>%(name)s</a>" \ | |
470 | 1018 | return " <a id='%(id)s' href='%(link)s.gz' onClick=\"check_link('%(link)s.gz','%(link)s','%(id)s')\">%(name)s</a>" \ | ||
472 | 1019 | % {'link': link, 'id': id, 'name':name} | 1005 | % {'link': link, 'id': id, 'name':name} |
473 | 1006 | #return " <a id='%(id)s' href='%(link)s.gz' onClick=\"check_link('%(link)s.gz','%(link)s','%(id)s')\">%(name)s</a>" \ | ||
474 | 1007 | # % {'link': link, 'id': id, 'name':name} | ||
475 | 1020 | 1008 | ||
476 | 1021 | def double_link(self, link1, link2, name, id): | 1009 | def double_link(self, link1, link2, name, id): |
480 | 1022 | 1010 | return " <a id='%(id)s' href='%(link2)s'>%(name)s</a>" \ | |
481 | 1023 | return " <a id='%(id)s' href='%(link1)s' onClick=\"check_link('%(link1)s','%(link2)s','%(id)s')\">%(name)s</a>" \ | 1011 | % {'link1': link1, 'link2':link2, 'id': id, 'name':name} |
482 | 1024 | % {'link1': link1, 'link2':link2, 'id': id, 'name':name} | 1012 | #return " <a id='%(id)s' href='%(link2)s' onClick=\"check_link('%(link1)s','%(link2)s','%(id)s')\">%(name)s</a>" \ |
483 | 1013 | # % {'link1': link1, 'link2':link2, 'id': id, 'name':name} | ||
484 | 1025 | 1014 | ||
485 | 1026 | def get_links(self, level): | 1015 | def get_links(self, level): |
486 | 1027 | """ Get the links for a given level""" | 1016 | """ Get the links for a given level""" |
487 | 1028 | 1017 | ||
488 | === modified file 'madgraph/madevent/gen_ximprove.py' | |||
489 | --- madgraph/madevent/gen_ximprove.py 2020-06-21 18:48:13 +0000 | |||
490 | +++ madgraph/madevent/gen_ximprove.py 2020-09-22 07:07:51 +0000 | |||
491 | @@ -993,7 +993,7 @@ | |||
492 | 993 | f.close() | 993 | f.close() |
493 | 994 | 994 | ||
494 | 995 | def increase_precision(self, rate=3): | 995 | def increase_precision(self, rate=3): |
496 | 996 | misc.sprint(rate) | 996 | #misc.sprint(rate) |
497 | 997 | if rate < 3: | 997 | if rate < 3: |
498 | 998 | self.max_event_in_iter = 20000 | 998 | self.max_event_in_iter = 20000 |
499 | 999 | self.min_events = 7500 | 999 | self.min_events = 7500 |
500 | 1000 | 1000 | ||
501 | === modified file 'madgraph/madevent/sum_html.py' | |||
502 | --- madgraph/madevent/sum_html.py 2019-06-27 12:17:38 +0000 | |||
503 | +++ madgraph/madevent/sum_html.py 2020-09-22 07:07:51 +0000 | |||
504 | @@ -270,7 +270,7 @@ | |||
505 | 270 | # this can happen if we force maxweight | 270 | # this can happen if we force maxweight |
506 | 271 | self.th_nunwgt = 0 # associated number of event with th_maxwgt | 271 | self.th_nunwgt = 0 # associated number of event with th_maxwgt |
507 | 272 | #(this is theoretical do not correspond to a number of written event) | 272 | #(this is theoretical do not correspond to a number of written event) |
509 | 273 | 273 | self.timing = 0 | |
510 | 274 | return | 274 | return |
511 | 275 | 275 | ||
512 | 276 | #@cluster.multiple_try(nb_try=5,sleep=20) | 276 | #@cluster.multiple_try(nb_try=5,sleep=20) |
513 | @@ -286,7 +286,7 @@ | |||
514 | 286 | 286 | ||
515 | 287 | i=0 | 287 | i=0 |
516 | 288 | found_xsec_line = False | 288 | found_xsec_line = False |
518 | 289 | for line in finput: | 289 | for line in finput: |
519 | 290 | # Exit as soon as we hit the xml part. Not elegant, but the part | 290 | # Exit as soon as we hit the xml part. Not elegant, but the part |
520 | 291 | # below should eventually be xml anyway. | 291 | # below should eventually be xml anyway. |
521 | 292 | if '<' in line: | 292 | if '<' in line: |
522 | @@ -347,7 +347,7 @@ | |||
523 | 347 | xml.append(line) | 347 | xml.append(line) |
524 | 348 | 348 | ||
525 | 349 | if xml: | 349 | if xml: |
527 | 350 | self.parse_xml_results('\n'.join(xml)) | 350 | self.parse_xml_results('\n'.join(xml)) |
528 | 351 | 351 | ||
529 | 352 | # this is for amcatnlo: the number of events has to be read from another file | 352 | # this is for amcatnlo: the number of events has to be read from another file |
530 | 353 | if self.nevents == 0 and self.nunwgt == 0 and isinstance(filepath, str) and \ | 353 | if self.nevents == 0 and self.nunwgt == 0 and isinstance(filepath, str) and \ |
531 | @@ -368,6 +368,12 @@ | |||
532 | 368 | self.run_statistics.load_statistics(statistics_node[0]) | 368 | self.run_statistics.load_statistics(statistics_node[0]) |
533 | 369 | except ValueError as IndexError: | 369 | except ValueError as IndexError: |
534 | 370 | logger.warning('Fail to read run statistics from results.dat') | 370 | logger.warning('Fail to read run statistics from results.dat') |
535 | 371 | else: | ||
536 | 372 | lo_statistics_node = dom.getElementsByTagName("lo_statistics")[0] | ||
537 | 373 | timing = lo_statistics_node.getElementsByTagName('cumulated_time')[0] | ||
538 | 374 | timing= timing.firstChild.nodeValue | ||
539 | 375 | self.timing = 0.3 + float(timing) #0.3 is the typical latency of bash script/... | ||
540 | 376 | |||
541 | 371 | 377 | ||
542 | 372 | def set_mfactor(self, value): | 378 | def set_mfactor(self, value): |
543 | 373 | self.mfactor = int(value) | 379 | self.mfactor = int(value) |
544 | @@ -448,6 +454,7 @@ | |||
545 | 448 | self.nunwgt = sum([one.nunwgt for one in self]) | 454 | self.nunwgt = sum([one.nunwgt for one in self]) |
546 | 449 | self.wgt = 0 | 455 | self.wgt = 0 |
547 | 450 | self.luminosity = min([0]+[one.luminosity for one in self]) | 456 | self.luminosity = min([0]+[one.luminosity for one in self]) |
548 | 457 | self.timing = sum([one.timing for one in self]) | ||
549 | 451 | if update_statistics: | 458 | if update_statistics: |
550 | 452 | self.run_statistics.aggregate_statistics([_.run_statistics for _ in self]) | 459 | self.run_statistics.aggregate_statistics([_.run_statistics for _ in self]) |
551 | 453 | 460 | ||
552 | @@ -463,6 +470,7 @@ | |||
553 | 463 | self.xsec = sum([one.xsec for one in self]) /nbjobs | 470 | self.xsec = sum([one.xsec for one in self]) /nbjobs |
554 | 464 | self.xerrc = sum([one.xerrc for one in self]) /nbjobs | 471 | self.xerrc = sum([one.xerrc for one in self]) /nbjobs |
555 | 465 | self.xerru = math.sqrt(sum([one.xerru**2 for one in self])) /nbjobs | 472 | self.xerru = math.sqrt(sum([one.xerru**2 for one in self])) /nbjobs |
556 | 473 | self.timing = sum([one.timing for one in self]) #no average here | ||
557 | 466 | if error: | 474 | if error: |
558 | 467 | self.xerrc = error | 475 | self.xerrc = error |
559 | 468 | self.xerru = error | 476 | self.xerru = error |
560 | @@ -547,7 +555,7 @@ | |||
561 | 547 | table_line_template = \ | 555 | table_line_template = \ |
562 | 548 | """ | 556 | """ |
563 | 549 | <tr><td align=right>%(P_title)s</td> | 557 | <tr><td align=right>%(P_title)s</td> |
565 | 550 | <td align=right><a id="%(P_link)s" href=%(P_link)s onClick="check_link('%(P_link)s','%(mod_P_link)s','%(P_link)s')"> %(cross)s </a> </td> | 558 | <td align=right><a id="%(P_link)s" href=%(P_link)s > %(cross)s </a> </td> |
566 | 551 | <td align=right> %(error)s</td> | 559 | <td align=right> %(error)s</td> |
567 | 552 | <td align=right> %(events)s</td> | 560 | <td align=right> %(events)s</td> |
568 | 553 | <td align=right> %(unweighted)s</td> | 561 | <td align=right> %(unweighted)s</td> |
569 | @@ -672,6 +680,10 @@ | |||
570 | 672 | line = '%s %s %s %s %s %s\n' % (i+1, self.ysec_iter[i], self.yerr_iter[i], | 680 | line = '%s %s %s %s %s %s\n' % (i+1, self.ysec_iter[i], self.yerr_iter[i], |
571 | 673 | self.eff_iter[i], self.maxwgt_iter[i], self.yasec_iter[i]) | 681 | self.eff_iter[i], self.maxwgt_iter[i], self.yasec_iter[i]) |
572 | 674 | fsock.writelines(line) | 682 | fsock.writelines(line) |
573 | 683 | |||
574 | 684 | if self.timing: | ||
575 | 685 | text = """<lo_statistics>\n<cumulated_time> %s </cumulated_time>\n</lo_statistics>""" | ||
576 | 686 | fsock.writelines(text % self.timing) | ||
577 | 675 | 687 | ||
578 | 676 | 688 | ||
579 | 677 | 689 | ||
580 | @@ -694,19 +706,6 @@ | |||
581 | 694 | } | 706 | } |
582 | 695 | return http.status!=404; | 707 | return http.status!=404; |
583 | 696 | } | 708 | } |
584 | 697 | function check_link(url,alt, id){ | ||
585 | 698 | var obj = document.getElementById(id); | ||
586 | 699 | if ( ! UrlExists(url)){ | ||
587 | 700 | if ( ! UrlExists(alt)){ | ||
588 | 701 | obj.href = alt; | ||
589 | 702 | return true; | ||
590 | 703 | } | ||
591 | 704 | obj.href = alt; | ||
592 | 705 | return false; | ||
593 | 706 | } | ||
594 | 707 | obj.href = url; | ||
595 | 708 | return 1==1; | ||
596 | 709 | } | ||
597 | 710 | </script> | 709 | </script> |
598 | 711 | """ | 710 | """ |
599 | 712 | 711 | ||
600 | @@ -716,7 +715,6 @@ | |||
601 | 716 | run = cmd.results.current['run_name'] | 715 | run = cmd.results.current['run_name'] |
602 | 717 | all = Combine_results(run) | 716 | all = Combine_results(run) |
603 | 718 | 717 | ||
604 | 719 | |||
605 | 720 | for Pdir in cmd.get_Pdir(): | 718 | for Pdir in cmd.get_Pdir(): |
606 | 721 | P_comb = Combine_results(Pdir) | 719 | P_comb = Combine_results(Pdir) |
607 | 722 | 720 | ||
608 | @@ -759,7 +757,13 @@ | |||
609 | 759 | all.append(P_comb) | 757 | all.append(P_comb) |
610 | 760 | all.compute_values() | 758 | all.compute_values() |
611 | 761 | 759 | ||
613 | 762 | 760 | try: | |
614 | 761 | all_channels = sum([list(P) for P in all],[]) | ||
615 | 762 | timings = sum(x.timing for x in all_channels) | ||
616 | 763 | logger.info('sum of cpu time of last step: %s', misc.format_time(timings)) | ||
617 | 764 | except Exception as error: | ||
618 | 765 | logger.debug(str(error)) | ||
619 | 766 | pass | ||
620 | 763 | 767 | ||
621 | 764 | return all | 768 | return all |
622 | 765 | 769 | ||
623 | 766 | 770 | ||
624 | === modified file 'madgraph/various/banner.py' | |||
625 | --- madgraph/various/banner.py 2020-08-21 08:59:01 +0000 | |||
626 | +++ madgraph/various/banner.py 2020-09-22 07:07:51 +0000 | |||
627 | @@ -3259,6 +3259,21 @@ | |||
628 | 3259 | if abs(self['lpp1']) in [2, 3,4] and abs(self['lpp2']) in [2, 3,4] and not self['fixed_fac_scale']: | 3259 | if abs(self['lpp1']) in [2, 3,4] and abs(self['lpp2']) in [2, 3,4] and not self['fixed_fac_scale']: |
629 | 3260 | raise InvalidRunCard("Having both beam in elastic photon mode requires fixec_fac_scale to be on True [since this is use as cutoff]") | 3260 | raise InvalidRunCard("Having both beam in elastic photon mode requires fixec_fac_scale to be on True [since this is use as cutoff]") |
630 | 3261 | 3261 | ||
631 | 3262 | # check that ebeam is bigger than the associated mass. | ||
632 | 3263 | for i in [1,2]: | ||
633 | 3264 | if self['lpp%s' % i ] not in [1,2]: | ||
634 | 3265 | continue | ||
635 | 3266 | if self['mass_ion%i' % i] == -1: | ||
636 | 3267 | if self['ebeam%i' % i] < 0.938: | ||
637 | 3268 | if self['ebeam%i' %i] == 0: | ||
638 | 3269 | logger.warning("At rest proton mode set: Energy beam set to 0.938") | ||
639 | 3270 | self.set('ebeam%i' %i, 0.938) | ||
640 | 3271 | else: | ||
641 | 3272 | raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") | ||
642 | 3273 | elif self['ebeam%i' % i] < self['mass_ion%i' % i]: | ||
643 | 3274 | if self['ebeam%i' %i] == 0: | ||
644 | 3275 | logger.warning("At rest ion mode set: Energy beam set to %s" % self['mass_ion%i' % i]) | ||
645 | 3276 | self.set('ebeam%i' %i, self['mass_ion%i' % i]) | ||
646 | 3262 | 3277 | ||
647 | 3263 | def update_system_parameter_for_include(self): | 3278 | def update_system_parameter_for_include(self): |
648 | 3264 | 3279 | ||
649 | @@ -4241,6 +4256,19 @@ | |||
650 | 4241 | raise InvalidRunCard("'rw_fscale' has two or more identical entries. They have to be all different for the code to work correctly.") | 4256 | raise InvalidRunCard("'rw_fscale' has two or more identical entries. They have to be all different for the code to work correctly.") |
651 | 4242 | 4257 | ||
652 | 4243 | 4258 | ||
653 | 4259 | # check that ebeam is bigger than the proton mass. | ||
654 | 4260 | for i in [1,2]: | ||
655 | 4261 | if self['lpp%s' % i ] not in [1,2]: | ||
656 | 4262 | continue | ||
657 | 4263 | |||
658 | 4264 | if self['ebeam%i' % i] < 0.938: | ||
659 | 4265 | if self['ebeam%i' %i] == 0: | ||
660 | 4266 | logger.warning("At rest proton mode set: Energy beam set to 0.938") | ||
661 | 4267 | self.set('ebeam%i' %i, 0.938) | ||
662 | 4268 | else: | ||
663 | 4269 | raise InvalidRunCard("Energy for beam %i lower than proton mass. Please fix this") | ||
664 | 4270 | |||
665 | 4271 | |||
666 | 4244 | def update_system_parameter_for_include(self): | 4272 | def update_system_parameter_for_include(self): |
667 | 4245 | 4273 | ||
668 | 4246 | # set the pdg_for_cut fortran parameter | 4274 | # set the pdg_for_cut fortran parameter |
669 | 4247 | 4275 | ||
670 | === modified file 'madgraph/various/lhe_parser.py' | |||
671 | --- madgraph/various/lhe_parser.py 2020-06-21 18:48:13 +0000 | |||
672 | +++ madgraph/various/lhe_parser.py 2020-09-22 07:07:51 +0000 | |||
673 | @@ -2227,6 +2227,8 @@ | |||
674 | 2227 | 2227 | ||
675 | 2228 | if other is None: | 2228 | if other is None: |
676 | 2229 | return False | 2229 | return False |
677 | 2230 | if len(self) != len(other): | ||
678 | 2231 | return False | ||
679 | 2230 | 2232 | ||
680 | 2231 | for i,p in enumerate(self): | 2233 | for i,p in enumerate(self): |
681 | 2232 | if p.E != other[i].E: | 2234 | if p.E != other[i].E: |
682 | 2233 | 2235 | ||
683 | === modified file 'madgraph/various/misc.py' | |||
684 | --- madgraph/various/misc.py 2020-06-21 18:48:13 +0000 | |||
685 | +++ madgraph/various/misc.py 2020-09-22 07:07:51 +0000 | |||
686 | @@ -27,6 +27,7 @@ | |||
687 | 27 | import optparse | 27 | import optparse |
688 | 28 | import time | 28 | import time |
689 | 29 | import shutil | 29 | import shutil |
690 | 30 | import stat | ||
691 | 30 | import traceback | 31 | import traceback |
692 | 31 | import gzip as ziplib | 32 | import gzip as ziplib |
693 | 32 | from distutils.version import LooseVersion, StrictVersion | 33 | from distutils.version import LooseVersion, StrictVersion |
694 | @@ -939,6 +940,35 @@ | |||
695 | 939 | 940 | ||
696 | 940 | str_out = out.stdout.read().decode().strip() | 941 | str_out = out.stdout.read().decode().strip() |
697 | 941 | return str_out | 942 | return str_out |
698 | 943 | |||
699 | 944 | |||
700 | 945 | |||
701 | 946 | def copytree(src, dst, symlinks = False, ignore = None): | ||
702 | 947 | if not os.path.exists(dst): | ||
703 | 948 | os.makedirs(dst) | ||
704 | 949 | shutil.copystat(src, dst) | ||
705 | 950 | lst = os.listdir(src) | ||
706 | 951 | if ignore: | ||
707 | 952 | excl = ignore(src, lst) | ||
708 | 953 | lst = [x for x in lst if x not in excl] | ||
709 | 954 | for item in lst: | ||
710 | 955 | s = os.path.join(src, item) | ||
711 | 956 | d = os.path.join(dst, item) | ||
712 | 957 | if symlinks and os.path.islink(s): | ||
713 | 958 | if os.path.lexists(d): | ||
714 | 959 | os.remove(d) | ||
715 | 960 | os.symlink(os.readlink(s), d) | ||
716 | 961 | try: | ||
717 | 962 | st = os.lstat(s) | ||
718 | 963 | mode = stat.S_IMODE(st.st_mode) | ||
719 | 964 | os.lchmod(d, mode) | ||
720 | 965 | except: | ||
721 | 966 | pass # lchmod not available | ||
722 | 967 | elif os.path.isdir(s): | ||
723 | 968 | copytree(s, d, symlinks, ignore) | ||
724 | 969 | else: | ||
725 | 970 | shutil.copy2(s, d) | ||
726 | 971 | |||
727 | 942 | 972 | ||
728 | 943 | 973 | ||
729 | 944 | @multiple_try() | 974 | @multiple_try() |
730 | 945 | 975 | ||
731 | === modified file 'mg5decay/decay_objects.py' | |||
732 | --- mg5decay/decay_objects.py 2020-08-20 15:33:24 +0000 | |||
733 | +++ mg5decay/decay_objects.py 2020-09-22 07:07:51 +0000 | |||
734 | @@ -43,6 +43,7 @@ | |||
735 | 43 | from __future__ import print_function | 43 | from __future__ import print_function |
736 | 44 | import array | 44 | import array |
737 | 45 | import cmath | 45 | import cmath |
738 | 46 | import collections | ||
739 | 46 | import copy | 47 | import copy |
740 | 47 | import itertools | 48 | import itertools |
741 | 48 | import logging | 49 | import logging |
742 | @@ -1034,6 +1035,7 @@ | |||
743 | 1034 | 1035 | ||
744 | 1035 | # Group channels into amplitudes | 1036 | # Group channels into amplitudes |
745 | 1036 | self.group_channels_2_amplitudes(clevel, model, min_br) | 1037 | self.group_channels_2_amplitudes(clevel, model, min_br) |
746 | 1038 | |||
747 | 1037 | 1039 | ||
748 | 1038 | 1040 | ||
749 | 1039 | def connect_channel_vertex(self, sub_channel, index, vertex, model): | 1041 | def connect_channel_vertex(self, sub_channel, index, vertex, model): |
750 | @@ -1170,7 +1172,11 @@ | |||
751 | 1170 | # Do not include the first leg (initial id) | 1172 | # Do not include the first leg (initial id) |
752 | 1171 | if sorted([l.get('id') for l in amplt['process']['legs'][1:]])\ | 1173 | if sorted([l.get('id') for l in amplt['process']['legs'][1:]])\ |
753 | 1172 | == final_pid: | 1174 | == final_pid: |
755 | 1173 | amplt.add_std_diagram(channel) | 1175 | |
756 | 1176 | for symchan in channel.get_symmetric_channel(): | ||
757 | 1177 | amplt.add_std_diagram(symchan) | ||
758 | 1178 | |||
759 | 1179 | |||
760 | 1174 | found = True | 1180 | found = True |
761 | 1175 | break | 1181 | break |
762 | 1176 | 1182 | ||
763 | @@ -1766,6 +1772,7 @@ | |||
764 | 1766 | interaction = self.get('interaction_dict')[vertex['id']] | 1772 | interaction = self.get('interaction_dict')[vertex['id']] |
765 | 1767 | decay_parts = [p for p in interaction['particles']] | 1773 | decay_parts = [p for p in interaction['particles']] |
766 | 1768 | 1774 | ||
767 | 1775 | # avoid self decay | ||
768 | 1769 | if len([1 for p in decay_parts if abs(p['pdg_code'])==abs(initpart['pdg_code'])]) >1: | 1776 | if len([1 for p in decay_parts if abs(p['pdg_code'])==abs(initpart['pdg_code'])]) >1: |
769 | 1770 | self['invalid_Npoint'].append(vertex['id']) | 1777 | self['invalid_Npoint'].append(vertex['id']) |
770 | 1771 | return False | 1778 | return False |
771 | @@ -1808,15 +1815,23 @@ | |||
772 | 1808 | 1815 | ||
773 | 1809 | #check that all substructure are valid | 1816 | #check that all substructure are valid |
774 | 1810 | #remove if any radiation and two times the same particle in a vertex | 1817 | #remove if any radiation and two times the same particle in a vertex |
775 | 1818 | # 2020: relaxed to avoid only twice initial particle in the vertex | ||
776 | 1811 | for v in proc['vertices']: | 1819 | for v in proc['vertices']: |
777 | 1812 | if any([get_mass(l)==0 for l in v.get('legs')]): | 1820 | if any([get_mass(l)==0 for l in v.get('legs')]): |
778 | 1813 | self['invalid_Npoint'].append(vertex['id']) | 1821 | self['invalid_Npoint'].append(vertex['id']) |
779 | 1814 | return False | 1822 | return False |
783 | 1815 | 1823 | init_pdg = [l['id'] for l in v.get('legs') if l['number'] ==1][0] | |
784 | 1816 | ids = set(abs(l['id']) for l in v.get('legs')) | 1824 | nb_part = [1 for l in v.get('legs') if abs(l['id']) in [abs(init_pdg), abs(initpart.get('pdg_code'))]] |
785 | 1817 | if len(ids) != len(vertex.get('legs')): | 1825 | if len(nb_part) > 1: |
786 | 1818 | self['invalid_Npoint'].append(vertex['id']) | 1826 | self['invalid_Npoint'].append(vertex['id']) |
787 | 1819 | return False | 1827 | return False |
788 | 1828 | |||
789 | 1829 | # before relaxation it was | ||
790 | 1830 | # seems to me to be always False | ||
791 | 1831 | #ids = set(abs(l['id']) for l in v.get('legs')) | ||
792 | 1832 | #if len(ids) != len(vertex.get('legs')): | ||
793 | 1833 | # self['invalid_Npoint'].append(vertex['id']) | ||
794 | 1834 | # return False | ||
795 | 1820 | 1835 | ||
796 | 1821 | # check onshell/offshell status | 1836 | # check onshell/offshell status |
797 | 1822 | prev_mass = 0 | 1837 | prev_mass = 0 |
798 | @@ -3376,6 +3391,85 @@ | |||
799 | 3376 | self['fermionfactor'] = 1 | 3391 | self['fermionfactor'] = 1 |
800 | 3377 | 3392 | ||
801 | 3378 | 3393 | ||
802 | 3394 | def get_symmetric_channel(self, ignore=[]): | ||
803 | 3395 | |||
804 | 3396 | if self['s_factor'] == 1: | ||
805 | 3397 | return [self] | ||
806 | 3398 | elif len(self['vertices']) == 1: | ||
807 | 3399 | return [self] | ||
808 | 3400 | elif len(self['final_legs']) == len(set(l['id'] for l in self['final_legs'])): | ||
809 | 3401 | return [self] | ||
810 | 3402 | |||
811 | 3403 | # check if all symetry are already handle: | ||
812 | 3404 | if len(set(l['id'] for l in self['final_legs'] if l['id'] not in ignore)) ==\ | ||
813 | 3405 | len([ l['id'] for l in self['final_legs'] if l['id'] not in ignore]): | ||
814 | 3406 | return [self] | ||
815 | 3407 | |||
816 | 3408 | nb_id = collections.defaultdict(int) | ||
817 | 3409 | for l in self['final_legs']: | ||
818 | 3410 | nb_id[l['id']] += 1 | ||
819 | 3411 | |||
820 | 3412 | id_to_handle = [id for id in nb_id if nb_id[id] > 1 and id not in ignore] | ||
821 | 3413 | |||
822 | 3414 | handling = id_to_handle[0] | ||
823 | 3415 | remain_id = id_to_handle[1:] | ||
824 | 3416 | out = [] | ||
825 | 3417 | |||
826 | 3418 | numbers = [l.get('number') for l in self['final_legs'] if l.get('id') == handling] | ||
827 | 3419 | |||
828 | 3420 | for new_numbers in itertools.permutations(numbers): | ||
829 | 3421 | mapping_id = dict([(o,n) for o,n in zip(numbers, new_numbers) if o!=n]) | ||
830 | 3422 | if not mapping_id: | ||
831 | 3423 | out.append(self) | ||
832 | 3424 | continue | ||
833 | 3425 | channel = copy.copy(self) | ||
834 | 3426 | channel['vertices'] = base_objects.VertexList() | ||
835 | 3427 | # (real) DiagramTag | ||
836 | 3428 | channel['tag'] = [] | ||
837 | 3429 | # IdentifyHelasTag | ||
838 | 3430 | channel['helastag'] = [] | ||
839 | 3431 | # the number of the corresponding helas calls | ||
840 | 3432 | channel['helas_number'] = None | ||
841 | 3433 | # diagram written by IdentifyHelasTag | ||
842 | 3434 | channel['std_diagram'] = None | ||
843 | 3435 | for l,vertex in enumerate(self['vertices']): | ||
844 | 3436 | new_vertex = copy.copy(vertex) | ||
845 | 3437 | new_vertex['legs'] = base_objects.LegList() | ||
846 | 3438 | min_id = 99 | ||
847 | 3439 | for leg in vertex['legs']: | ||
848 | 3440 | if leg['number'] in mapping_id: | ||
849 | 3441 | new_leg = copy.copy(leg) | ||
850 | 3442 | new_leg.set('number', mapping_id[leg['number']]) | ||
851 | 3443 | new_vertex['legs'].append(new_leg) | ||
852 | 3444 | else: | ||
853 | 3445 | new_vertex['legs'].append(leg) | ||
854 | 3446 | min_id = min(min_id, leg['number']) | ||
855 | 3447 | |||
856 | 3448 | if min_id != new_vertex['legs'][-1]['number']: | ||
857 | 3449 | if l != len(self['vertices']) -1: | ||
858 | 3450 | mapping_id[new_vertex['legs'][-1]['number']] = min_id | ||
859 | 3451 | new_vertex['legs'][-1]['number'] = min_id | ||
860 | 3452 | channel['vertices'].append(new_vertex) | ||
861 | 3453 | out.append(channel) | ||
862 | 3454 | |||
863 | 3455 | |||
864 | 3456 | # do the recursion | ||
865 | 3457 | if len(remain_id) > 1: | ||
866 | 3458 | all_out = [] | ||
867 | 3459 | for d in out: | ||
868 | 3460 | all_out += d.get_symmetric_channel(ignore=ignore) | ||
869 | 3461 | return all_out | ||
870 | 3462 | else: | ||
871 | 3463 | return out | ||
872 | 3464 | |||
873 | 3465 | |||
874 | 3466 | |||
875 | 3467 | |||
876 | 3468 | |||
877 | 3469 | |||
878 | 3470 | |||
879 | 3471 | |||
880 | 3472 | |||
881 | 3379 | def filter(self, name, value): | 3473 | def filter(self, name, value): |
882 | 3380 | """Filter for valid diagram property values.""" | 3474 | """Filter for valid diagram property values.""" |
883 | 3381 | 3475 | ||
884 | @@ -3494,6 +3588,7 @@ | |||
885 | 3494 | tmp.sort() | 3588 | tmp.sort() |
886 | 3495 | if base == tmp: | 3589 | if base == tmp: |
887 | 3496 | return False | 3590 | return False |
888 | 3591 | |||
889 | 3497 | return True | 3592 | return True |
890 | 3498 | 3593 | ||
891 | 3499 | 3594 | ||
892 | @@ -4373,6 +4468,7 @@ | |||
893 | 4373 | # of list. | 4468 | # of list. |
894 | 4374 | if count != 1: | 4469 | if count != 1: |
895 | 4375 | self['s_factor'] = self['s_factor'] * math.factorial(count) | 4470 | self['s_factor'] = self['s_factor'] * math.factorial(count) |
896 | 4471 | |||
897 | 4376 | return math.sqrt((M ** 2+mass_list[0] ** 2-mass_list[1] ** 2) ** 2-\ | 4472 | return math.sqrt((M ** 2+mass_list[0] ** 2-mass_list[1] ** 2) ** 2-\ |
898 | 4377 | (2* M *mass_list[0]) ** 2)* \ | 4473 | (2* M *mass_list[0]) ** 2)* \ |
899 | 4378 | 1./(8*math.pi*(M ** 2)*self['s_factor']) | 4474 | 1./(8*math.pi*(M ** 2)*self['s_factor']) |
900 | @@ -4664,7 +4760,6 @@ | |||
901 | 4664 | non_std_numbers = [(l.get('id'),l.get('number')) \ | 4760 | non_std_numbers = [(l.get('id'),l.get('number')) \ |
902 | 4665 | for l in new_dia.get_final_legs()] | 4761 | for l in new_dia.get_final_legs()] |
903 | 4666 | 4762 | ||
904 | 4667 | |||
905 | 4668 | # initial leg | 4763 | # initial leg |
906 | 4669 | non_std_numbers.append((new_dia.get_initial_id(model), 1)) | 4764 | non_std_numbers.append((new_dia.get_initial_id(model), 1)) |
907 | 4670 | import operator | 4765 | import operator |
908 | @@ -4680,7 +4775,6 @@ | |||
909 | 4680 | if non_std_numbers == std_numbers: | 4775 | if non_std_numbers == std_numbers: |
910 | 4681 | self['diagrams'].append(new_dia) | 4776 | self['diagrams'].append(new_dia) |
911 | 4682 | return | 4777 | return |
912 | 4683 | |||
913 | 4684 | # Conversion from non_std_number to std_number | 4778 | # Conversion from non_std_number to std_number |
914 | 4685 | converted_dict = dict([(num[1], std_numbers[i][1])\ | 4779 | converted_dict = dict([(num[1], std_numbers[i][1])\ |
915 | 4686 | for i, num in enumerate(non_std_numbers)]) | 4780 | for i, num in enumerate(non_std_numbers)]) |
916 | @@ -4728,6 +4822,7 @@ | |||
917 | 4728 | 4822 | ||
918 | 4729 | # Add this standard diagram into diagrams | 4823 | # Add this standard diagram into diagrams |
919 | 4730 | self['diagrams'].append(new_dia) | 4824 | self['diagrams'].append(new_dia) |
920 | 4825 | |||
921 | 4731 | 4826 | ||
922 | 4732 | 4827 | ||
923 | 4733 | def reset_width_br(self): | 4828 | def reset_width_br(self): |
924 | 4734 | 4829 | ||
925 | === modified file 'models/__init__.py' | |||
926 | --- models/__init__.py 2020-02-11 10:57:44 +0000 | |||
927 | +++ models/__init__.py 2020-09-22 07:07:51 +0000 | |||
928 | @@ -43,14 +43,15 @@ | |||
929 | 43 | return sys.modules[model_pos] | 43 | return sys.modules[model_pos] |
930 | 44 | except Exception as error: | 44 | except Exception as error: |
931 | 45 | pass | 45 | pass |
940 | 46 | for p in os.environ['PYTHONPATH'].split(':'): | 46 | if 'PYTHONPATH' in os.environ: |
941 | 47 | new_name = os.path.join(p, name) | 47 | for p in os.environ['PYTHONPATH'].split(':'): |
942 | 48 | try: | 48 | new_name = os.path.join(p, name) |
943 | 49 | return load_model(new_name, decay) | 49 | try: |
944 | 50 | except Exception: | 50 | return load_model(new_name, decay) |
945 | 51 | pass | 51 | except Exception: |
946 | 52 | except ImportError: | 52 | pass |
947 | 53 | pass | 53 | except ImportError: |
948 | 54 | pass | ||
949 | 54 | elif path_split[-1] in sys.modules: | 55 | elif path_split[-1] in sys.modules: |
950 | 55 | model_path = os.path.realpath(os.sep.join(path_split)) | 56 | model_path = os.path.realpath(os.sep.join(path_split)) |
951 | 56 | sys_path = os.path.realpath(os.path.dirname(sys.modules[path_split[-1]].__file__)) | 57 | sys_path = os.path.realpath(os.path.dirname(sys.modules[path_split[-1]].__file__)) |
952 | 57 | 58 | ||
953 | === modified file 'models/check_param_card.py' | |||
954 | --- models/check_param_card.py 2019-04-17 18:52:07 +0000 | |||
955 | +++ models/check_param_card.py 2020-09-22 07:07:51 +0000 | |||
956 | @@ -1323,7 +1323,7 @@ | |||
957 | 1323 | logger.log(log,'For model consistency, update %s with id %s to value %s', | 1323 | logger.log(log,'For model consistency, update %s with id %s to value %s', |
958 | 1324 | (block, id, 1.0), '$MG:BOLD') | 1324 | (block, id, 1.0), '$MG:BOLD') |
959 | 1325 | elif log: | 1325 | elif log: |
961 | 1326 | logger.log(log,'For model consistency, update %s with id %s to value %s', | 1326 | logger.log(log,'For model consistency, update %s with id %s to value %s' % |
962 | 1327 | (block, id, 1.0)) | 1327 | (block, id, 1.0)) |
963 | 1328 | 1328 | ||
964 | 1329 | 1329 | ||
965 | 1330 | 1330 | ||
966 | === modified file 'models/import_ufo.py' | |||
967 | --- models/import_ufo.py 2020-02-27 13:38:00 +0000 | |||
968 | +++ models/import_ufo.py 2020-09-22 07:07:51 +0000 | |||
969 | @@ -18,6 +18,7 @@ | |||
970 | 18 | import collections | 18 | import collections |
971 | 19 | import fractions | 19 | import fractions |
972 | 20 | import logging | 20 | import logging |
973 | 21 | import math | ||
974 | 21 | import os | 22 | import os |
975 | 22 | import re | 23 | import re |
976 | 23 | import sys | 24 | import sys |
977 | @@ -445,10 +446,27 @@ | |||
978 | 445 | 446 | ||
979 | 446 | def __init__(self, model, auto=False): | 447 | def __init__(self, model, auto=False): |
980 | 447 | """ initialize empty list for particles/interactions """ | 448 | """ initialize empty list for particles/interactions """ |
985 | 448 | 449 | ||
986 | 449 | if hasattr(model, '__arxiv__'): | 450 | if hasattr(model, '__header__'): |
987 | 450 | logger.info('Please cite %s when using this model', model.__arxiv__, '$MG:color:BLACK') | 451 | header = model.__header__ |
988 | 451 | 452 | if len(header) > 500 or header.count('\n') > 5: | |
989 | 453 | logger.debug("Too long header") | ||
990 | 454 | else: | ||
991 | 455 | logger.info("\n"+header) | ||
992 | 456 | else: | ||
993 | 457 | f =collections.defaultdict(lambda : 'n/a') | ||
994 | 458 | for key in ['author', 'version', 'email', 'arxiv']: | ||
995 | 459 | if hasattr(model, '__%s__' % key): | ||
996 | 460 | val = getattr(model, '__%s__' % key) | ||
997 | 461 | if 'Duhr' in val: | ||
998 | 462 | continue | ||
999 | 463 | f[key] = getattr(model, '__%s__' % key) | ||
1000 | 464 | |||
1001 | 465 | if len(f)>2: | ||
1002 | 466 | logger.info("This model [version %(version)s] is provided by %(author)s (email: %(email)s). Please cite %(arxiv)s" % f, '$MG:color:BLACK') | ||
1003 | 467 | elif hasattr(model, '__arxiv__'): | ||
1004 | 468 | logger.info('Please cite %s when using this model', model.__arxiv__, '$MG:color:BLACK') | ||
1005 | 469 | |||
1006 | 452 | self.particles = base_objects.ParticleList() | 470 | self.particles = base_objects.ParticleList() |
1007 | 453 | self.interactions = base_objects.InteractionList() | 471 | self.interactions = base_objects.InteractionList() |
1008 | 454 | self.non_qcd_gluon_emission = 0 # vertex where a gluon is emitted withou QCD interaction | 472 | self.non_qcd_gluon_emission = 0 # vertex where a gluon is emitted withou QCD interaction |
1009 | @@ -1860,6 +1878,11 @@ | |||
1010 | 1860 | self.rule_card = check_param_card.ParamCardRule() | 1878 | self.rule_card = check_param_card.ParamCardRule() |
1011 | 1861 | self.restrict_card = None | 1879 | self.restrict_card = None |
1012 | 1862 | self.coupling_order_dict ={} | 1880 | self.coupling_order_dict ={} |
1013 | 1881 | self.autowidth = [] | ||
1014 | 1882 | |||
1015 | 1883 | def modify_autowidth(self, cards, id): | ||
1016 | 1884 | self.autowidth.append([int(id[0])]) | ||
1017 | 1885 | return math.log10(2*len(self.autowidth)) | ||
1018 | 1863 | 1886 | ||
1019 | 1864 | def restrict_model(self, param_card, rm_parameter=True, keep_external=False, | 1887 | def restrict_model(self, param_card, rm_parameter=True, keep_external=False, |
1020 | 1865 | complex_mass_scheme=None): | 1888 | complex_mass_scheme=None): |
1021 | @@ -1879,7 +1902,8 @@ | |||
1022 | 1879 | # compute the value of all parameters | 1902 | # compute the value of all parameters |
1023 | 1880 | # Get the list of definition of model functions, parameter values. | 1903 | # Get the list of definition of model functions, parameter values. |
1024 | 1881 | model_definitions = self.set_parameters_and_couplings(param_card, | 1904 | model_definitions = self.set_parameters_and_couplings(param_card, |
1026 | 1882 | complex_mass_scheme=complex_mass_scheme) | 1905 | complex_mass_scheme=complex_mass_scheme, |
1027 | 1906 | auto_width=self.modify_autowidth) | ||
1028 | 1883 | 1907 | ||
1029 | 1884 | # Simplify conditional statements | 1908 | # Simplify conditional statements |
1030 | 1885 | logger.log(self.log_level, 'Simplifying conditional expressions') | 1909 | logger.log(self.log_level, 'Simplifying conditional expressions') |
1031 | @@ -1932,8 +1956,23 @@ | |||
1032 | 1932 | self['parameter_dict'][name] = 1 | 1956 | self['parameter_dict'][name] = 1 |
1033 | 1933 | elif value == 0.000001e-99: | 1957 | elif value == 0.000001e-99: |
1034 | 1934 | self['parameter_dict'][name] = 0 | 1958 | self['parameter_dict'][name] = 0 |
1037 | 1935 | 1959 | ||
1038 | 1936 | 1960 | # | |
1039 | 1961 | # restore auto-width value | ||
1040 | 1962 | # | ||
1041 | 1963 | #for lhacode in self.autowidth: | ||
1042 | 1964 | for parameter in self['parameters'][('external',)]: | ||
1043 | 1965 | if parameter.lhablock.lower() == 'decay' and parameter.lhacode in self.autowidth: | ||
1044 | 1966 | parameter.value = 'auto' | ||
1045 | 1967 | if parameter.name in self['parameter_dict']: | ||
1046 | 1968 | self['parameter_dict'][parameter.name] = 'auto' | ||
1047 | 1969 | elif parameter.name.startswith('mdl_'): | ||
1048 | 1970 | self['parameter_dict'][parameter.name[4:]] = 'auto' | ||
1049 | 1971 | else: | ||
1050 | 1972 | raise Exception | ||
1051 | 1973 | |||
1052 | 1974 | |||
1053 | 1975 | |||
1054 | 1937 | def locate_coupling(self): | 1976 | def locate_coupling(self): |
1055 | 1938 | """ create a dict couplings_name -> vertex or (particle, counterterm_key) """ | 1977 | """ create a dict couplings_name -> vertex or (particle, counterterm_key) """ |
1056 | 1939 | 1978 | ||
1057 | @@ -2480,6 +2519,7 @@ | |||
1058 | 2480 | logger_mod.log(self.log_level,'remove parameters: %s' % (param)) | 2519 | logger_mod.log(self.log_level,'remove parameters: %s' % (param)) |
1059 | 2481 | data = self['parameters'][param_info[param]['dep']] | 2520 | data = self['parameters'][param_info[param]['dep']] |
1060 | 2482 | data.remove(param_info[param]['obj']) | 2521 | data.remove(param_info[param]['obj']) |
1061 | 2522 | |||
1062 | 2483 | 2523 | ||
1063 | 2484 | def optimise_interaction(self, interaction): | 2524 | def optimise_interaction(self, interaction): |
1064 | 2485 | 2525 | ||
1065 | 2486 | 2526 | ||
1066 | === modified file 'models/model_reader.py' | |||
1067 | --- models/model_reader.py 2020-06-21 18:48:13 +0000 | |||
1068 | +++ models/model_reader.py 2020-09-22 07:07:51 +0000 | |||
1069 | @@ -58,7 +58,8 @@ | |||
1070 | 58 | super(ModelReader, self).default_setup() | 58 | super(ModelReader, self).default_setup() |
1071 | 59 | 59 | ||
1072 | 60 | def set_parameters_and_couplings(self, param_card = None, scale=None, | 60 | def set_parameters_and_couplings(self, param_card = None, scale=None, |
1074 | 61 | complex_mass_scheme=None): | 61 | complex_mass_scheme=None, |
1075 | 62 | auto_width=None): | ||
1076 | 62 | """Read a param_card and calculate all parameters and | 63 | """Read a param_card and calculate all parameters and |
1077 | 63 | couplings. Set values directly in the parameters and | 64 | couplings. Set values directly in the parameters and |
1078 | 64 | couplings, plus add new dictionary coupling_dict from | 65 | couplings, plus add new dictionary coupling_dict from |
1079 | @@ -84,6 +85,9 @@ | |||
1080 | 84 | raise MadGraph5Error("No such file %s" % param_card) | 85 | raise MadGraph5Error("No such file %s" % param_card) |
1081 | 85 | param_card_text = param_card | 86 | param_card_text = param_card |
1082 | 86 | param_card = card_reader.ParamCard(param_card) | 87 | param_card = card_reader.ParamCard(param_card) |
1083 | 88 | for param in param_card.get('decay'): | ||
1084 | 89 | if str(param.value).lower() == 'auto': | ||
1085 | 90 | param.value = auto_width(param_card, param.lhacode) | ||
1086 | 87 | #misc.sprint(type(param_card), card_reader.ParamCard, isinstance(param_card, card_reader.ParamCard)) | 91 | #misc.sprint(type(param_card), card_reader.ParamCard, isinstance(param_card, card_reader.ParamCard)) |
1087 | 88 | #assert isinstance(param_card, card_reader.ParamCard),'%s is not a ParamCard: %s' % (type(param_card), isinstance(param_card, card_reader.ParamCard)) | 92 | #assert isinstance(param_card, card_reader.ParamCard),'%s is not a ParamCard: %s' % (type(param_card), isinstance(param_card, card_reader.ParamCard)) |
1088 | 89 | 93 | ||
1089 | 90 | 94 | ||
1090 | === modified file 'models/write_param_card.py' | |||
1091 | --- models/write_param_card.py 2019-06-27 12:21:53 +0000 | |||
1092 | +++ models/write_param_card.py 2020-09-22 07:07:51 +0000 | |||
1093 | @@ -243,9 +243,8 @@ | |||
1094 | 243 | if info.startswith('mdl_'): | 243 | if info.startswith('mdl_'): |
1095 | 244 | info = info[4:] | 244 | info = info[4:] |
1096 | 245 | 245 | ||
1098 | 246 | if param.value.imag != 0: | 246 | if param.value != 'auto' and param.value.imag != 0: |
1099 | 247 | raise ParamCardWriterError('All External Parameter should be real (not the case for %s)'%param.name) | 247 | raise ParamCardWriterError('All External Parameter should be real (not the case for %s)'%param.name) |
1100 | 248 | |||
1101 | 249 | 248 | ||
1102 | 250 | # avoid to keep special value used to avoid restriction | 249 | # avoid to keep special value used to avoid restriction |
1103 | 251 | if param.value == 9.999999e-1: | 250 | if param.value == 9.999999e-1: |
1104 | @@ -257,6 +256,8 @@ | |||
1105 | 257 | lhacode=' '.join(['%3s' % key for key in param.lhacode]) | 256 | lhacode=' '.join(['%3s' % key for key in param.lhacode]) |
1106 | 258 | if lhablock != 'DECAY': | 257 | if lhablock != 'DECAY': |
1107 | 259 | text = """ %s %e # %s \n""" % (lhacode, param.value.real, info) | 258 | text = """ %s %e # %s \n""" % (lhacode, param.value.real, info) |
1108 | 259 | elif param.value == 'auto': | ||
1109 | 260 | text = '''DECAY %s auto # %s \n''' % (lhacode, info) | ||
1110 | 260 | else: | 261 | else: |
1111 | 261 | text = '''DECAY %s %e # %s \n''' % (lhacode, param.value.real, info) | 262 | text = '''DECAY %s %e # %s \n''' % (lhacode, param.value.real, info) |
1112 | 262 | self.fsock.write(text) | 263 | self.fsock.write(text) |