Skip to content

Commit a8e1d16

Browse files
committed
uncertainty: change x**2 pattern to x*x for speedup
1 parent e56a97b commit a8e1d16

1 file changed

Lines changed: 20 additions & 20 deletions

File tree

rmgpy/tools/uncertainty.py

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -64,18 +64,18 @@ def get_uncertainty_value(self, source):
6464
"""
6565
varG = 0.0
6666
if 'Library' in source:
67-
varG += self.dG_library ** 2
67+
varG += self.dG_library * self.dG_library
6868
if 'Surface_Library' in source:
69-
varG += self.dG_surf_lib ** 2
69+
varG += self.dG_surf_lib * self.dG_surf_lib
7070
if 'QM' in source:
71-
varG += self.dG_QM ** 2
71+
varG += self.dG_QM * self.dG_QM
7272
if 'GAV' in source:
73-
varG += self.dG_GAV ** 2 # Add a fixed uncertainty for the GAV method
73+
varG += self.dG_GAV * self.dG_GAV # Add a fixed uncertainty for the GAV method
7474
for group_type, group_entries in source['GAV'].items():
7575
group_weights = [groupTuple[-1] for groupTuple in group_entries]
76-
varG += np.sum([weight ** 2 * self.dG_group ** 2 for weight in group_weights])
76+
varG += np.sum([weight * weight * self.dG_group * self.dG_group for weight in group_weights])
7777
if 'ADS' in source:
78-
varG += self.dG_ADS_correction ** 2 # Add adsorption correction uncertainty
78+
varG += self.dG_ADS_correction * self.dG_ADS_correction # Add adsorption correction uncertainty
7979

8080
return np.sqrt(varG)
8181

@@ -173,29 +173,29 @@ def get_uncertainty_value(self, source):
173173
varlnk = 0.0
174174
if 'Library' in source:
175175
# Should be a single library reaction source
176-
varlnk += self.dlnk_library ** 2
176+
varlnk += self.dlnk_library * self.dlnk_library
177177
elif 'Surface_Library' in source:
178178
# Should be a single library reaction source
179-
varlnk += self.dlnk_surf_library ** 2
179+
varlnk += self.dlnk_surf_library * self.dlnk_surf_library
180180
elif 'PDep' in source:
181181
# Should be a single pdep reaction source
182-
varlnk += self.dlnk_pdep ** 2
182+
varlnk += self.dlnk_pdep * self.dlnk_pdep
183183
elif 'Training' in source:
184184
# Should be a single training reaction
185185
# Although some training entries may be used in reverse,
186186
# We still consider the kinetics to be directly dependent
187187
if 'surface' in source['Training'][0].lower():
188-
varlnk += self.dlnk_surf_training ** 2
188+
varlnk += self.dlnk_surf_training * self.dlnk_surf_training
189189
else:
190-
varlnk += self.dlnk_training ** 2
190+
varlnk += self.dlnk_training * self.dlnk_training
191191
elif 'Rate Rules' in source:
192192
family_label = source['Rate Rules'][0]
193193
source_dict = source['Rate Rules'][1]
194194
exact = source_dict['exact']
195195
rule_weights = [ruleTuple[-1] for ruleTuple in source_dict['rules']]
196196
training_weights = [trainingTuple[-1] for trainingTuple in source_dict['training']]
197197

198-
varlnk += self.dlnk_family ** 2
198+
varlnk += self.dlnk_family * self.dlnk_family
199199

200200
N = len(rule_weights) + len(training_weights)
201201
if 'node_std_dev' in source_dict:
@@ -211,26 +211,26 @@ def get_uncertainty_value(self, source):
211211
# every node template has its own fitted rate rule by definition, but here we use the
212212
# number of training reactions as an approximation of the node's specificity/generality
213213
# and add a penalty for being too general (large # of training reactions)
214-
varlnk += (np.log10(N + 1) * self.dlnk_nonexact) ** 2
214+
varlnk += (np.log10(N + 1) * self.dlnk_nonexact) * (np.log10(N + 1) * self.dlnk_nonexact)
215215
else:
216216
# Handle hand-made trees
217217
if not exact:
218218
# nonexactness contribution increases as N increases
219-
varlnk += (np.log10(N + 1) * self.dlnk_nonexact) ** 2
219+
varlnk += (np.log10(N + 1) * self.dlnk_nonexact) * (np.log10(N + 1) * self.dlnk_nonexact)
220220

221221
if 'surface' in family_label.lower():
222-
varlnk += np.sum([weight ** 2 * self.dlnk_surf_rule ** 2 for weight in rule_weights])
223-
varlnk += np.sum([weight ** 2 * self.dlnk_surf_training ** 2 for weight in training_weights])
222+
varlnk += np.sum([weight * weight * self.dlnk_surf_rule * self.dlnk_surf_rule for weight in rule_weights])
223+
varlnk += np.sum([weight * weight * self.dlnk_surf_training * self.dlnk_surf_training for weight in training_weights])
224224
else:
225225
# Add the contributions from rules
226-
varlnk += np.sum([weight ** 2 * self.dlnk_rule ** 2 for weight in rule_weights])
226+
varlnk += np.sum([weight * weight * self.dlnk_rule * self.dlnk_rule for weight in rule_weights])
227227
# Add the contributions from training
228228
# Even though these source from training reactions, we actually
229229
# use the uncertainty for rate rules, since these are now approximations
230230
# of the original reaction. We consider these to be independent of original the training
231231
# parameters because the rate rules may be reversing the training reactions,
232232
# which leads to more complicated dependence
233-
varlnk += np.sum([weight ** 2 * self.dlnk_rule ** 2 for weight in training_weights])
233+
varlnk += np.sum([weight * weight * self.dlnk_rule * self.dlnk_rule for weight in training_weights])
234234

235235
return np.sqrt(varlnk)
236236

@@ -896,9 +896,9 @@ def local_analysis(self, sensitive_species, reaction_system_index=0, correlated=
896896
# Compute total variance
897897
total_variance = 0.0
898898
for data in thermo_data_list:
899-
total_variance += (data.data[-1] * data.uncertainty) ** 2
899+
total_variance += (data.data[-1] * data.uncertainty) * (data.data[-1] * data.uncertainty)
900900
for data in reaction_data_list:
901-
total_variance += (data.data[-1] * data.uncertainty) ** 2
901+
total_variance += (data.data[-1] * data.uncertainty) * (data.data[-1] * data.uncertainty)
902902

903903
if not correlated:
904904
# Add the reaction index to the data label of the reaction uncertainties

0 commit comments

Comments
 (0)