Skip to content

Commit 687ed9a

Browse files
sevyharrisrwest
andcommitted
uncertainty: change x**2 pattern to x*x for speedup
Richard noted that for slow f(x) it might be that f(x)*f(x) is slower than f(x)**2, so I undid a few of those cases when rebasing. But when x is just a number, yes, x*x is faster than x**2. Co-Authored-By: Richard West <r.west@northeastern.edu>
1 parent 8286ad7 commit 687ed9a

1 file changed

Lines changed: 16 additions & 16 deletions

File tree

rmgpy/tools/uncertainty.py

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -64,18 +64,18 @@ def get_uncertainty_value(self, source):
6464
"""
6565
varG = 0.0
6666
if 'Library' in source:
67-
varG += self.dG_library ** 2
67+
varG += self.dG_library * self.dG_library
6868
if 'Surface_Library' in source:
69-
varG += self.dG_surf_lib ** 2
69+
varG += self.dG_surf_lib * self.dG_surf_lib
7070
if 'QM' in source:
71-
varG += self.dG_QM ** 2
71+
varG += self.dG_QM * self.dG_QM
7272
if 'GAV' in source:
73-
varG += self.dG_GAV ** 2 # Add a fixed uncertainty for the GAV method
73+
varG += self.dG_GAV * self.dG_GAV # Add a fixed uncertainty for the GAV method
7474
for group_type, group_entries in source['GAV'].items():
7575
group_weights = [groupTuple[-1] for groupTuple in group_entries]
76-
varG += np.sum([weight ** 2 * self.dG_group ** 2 for weight in group_weights])
76+
varG += np.sum([weight * weight * self.dG_group * self.dG_group for weight in group_weights])
7777
if 'ADS' in source:
78-
varG += self.dG_ADS_correction ** 2 # Add adsorption correction uncertainty
78+
varG += self.dG_ADS_correction * self.dG_ADS_correction # Add adsorption correction uncertainty
7979

8080
return np.sqrt(varG)
8181

@@ -173,29 +173,29 @@ def get_uncertainty_value(self, source):
173173
varlnk = 0.0
174174
if 'Library' in source:
175175
# Should be a single library reaction source
176-
varlnk += self.dlnk_library ** 2
176+
varlnk += self.dlnk_library * self.dlnk_library
177177
elif 'Surface_Library' in source:
178178
# Should be a single library reaction source
179-
varlnk += self.dlnk_surf_library ** 2
179+
varlnk += self.dlnk_surf_library * self.dlnk_surf_library
180180
elif 'PDep' in source:
181181
# Should be a single pdep reaction source
182-
varlnk += self.dlnk_pdep ** 2
182+
varlnk += self.dlnk_pdep * self.dlnk_pdep
183183
elif 'Training' in source:
184184
# Should be a single training reaction
185185
# Although some training entries may be used in reverse,
186186
# We still consider the kinetics to be directly dependent
187187
if 'surface' in source['Training'][0].lower():
188-
varlnk += self.dlnk_surf_training ** 2
188+
varlnk += self.dlnk_surf_training * self.dlnk_surf_training
189189
else:
190-
varlnk += self.dlnk_training ** 2
190+
varlnk += self.dlnk_training * self.dlnk_training
191191
elif 'Rate Rules' in source:
192192
family_label = source['Rate Rules'][0]
193193
source_dict = source['Rate Rules'][1]
194194
exact = source_dict['exact']
195195
rule_weights = [ruleTuple[-1] for ruleTuple in source_dict['rules']]
196196
training_weights = [trainingTuple[-1] for trainingTuple in source_dict['training']]
197197

198-
varlnk += self.dlnk_family ** 2
198+
varlnk += self.dlnk_family * self.dlnk_family
199199

200200
N = len(rule_weights) + len(training_weights)
201201
if 'node_std_dev' in source_dict:
@@ -219,18 +219,18 @@ def get_uncertainty_value(self, source):
219219
varlnk += (np.log10(N + 1) * self.dlnk_nonexact) ** 2
220220

221221
if 'surface' in family_label.lower():
222-
varlnk += np.sum([weight ** 2 * self.dlnk_surf_rule ** 2 for weight in rule_weights])
223-
varlnk += np.sum([weight ** 2 * self.dlnk_surf_training ** 2 for weight in training_weights])
222+
varlnk += np.sum([weight * weight * self.dlnk_surf_rule * self.dlnk_surf_rule for weight in rule_weights])
223+
varlnk += np.sum([weight * weight * self.dlnk_surf_training * self.dlnk_surf_training for weight in training_weights])
224224
else:
225225
# Add the contributions from rules
226-
varlnk += np.sum([weight ** 2 * self.dlnk_rule ** 2 for weight in rule_weights])
226+
varlnk += np.sum([weight * weight * self.dlnk_rule * self.dlnk_rule for weight in rule_weights])
227227
# Add the contributions from training
228228
# Even though these source from training reactions, we actually
229229
# use the uncertainty for rate rules, since these are now approximations
230230
# of the original reaction. We consider these to be independent of original the training
231231
# parameters because the rate rules may be reversing the training reactions,
232232
# which leads to more complicated dependence
233-
varlnk += np.sum([weight ** 2 * self.dlnk_rule ** 2 for weight in training_weights])
233+
varlnk += np.sum([weight * weight * self.dlnk_rule * self.dlnk_rule for weight in training_weights])
234234

235235
return np.sqrt(varlnk)
236236

0 commit comments

Comments
 (0)