Skip to content

Commit fb2027b

Browse files
author
Han Wang
committed
fix: find_* defaults 1.0→0.0 to match PT, fix ruff RUF059 unused vars
- Change find_* label defaults from 1.0 to 0.0 in dpmodel losses (ener_spin, dos, tensor) to match PT behavior: missing labels should zero out the contribution, not assume they exist. - Prefix unused unpacked variables with _ to satisfy ruff RUF059.
1 parent ca8418b commit fb2027b

9 files changed

Lines changed: 24 additions & 24 deletions

File tree

deepmd/dpmodel/loss/dos.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ def call(
124124
more_loss = {}
125125

126126
if self.has_ados and "atom_dos" in model_dict and "atom_dos" in label_dict:
127-
find_local = label_dict.get("find_atom_dos", 1.0)
127+
find_local = label_dict.get("find_atom_dos", 0.0)
128128
pref_ados = pref_ados * find_local
129129
local_pred = xp.reshape(model_dict["atom_dos"], (-1, natoms, self.numb_dos))
130130
local_label = xp.reshape(
@@ -145,7 +145,7 @@ def call(
145145
)
146146

147147
if self.has_acdf and "atom_dos" in model_dict and "atom_dos" in label_dict:
148-
find_local = label_dict.get("find_atom_dos", 1.0)
148+
find_local = label_dict.get("find_atom_dos", 0.0)
149149
pref_acdf = pref_acdf * find_local
150150
local_pred_cdf = xp.cumulative_sum(
151151
xp.reshape(model_dict["atom_dos"], (-1, natoms, self.numb_dos)),
@@ -170,7 +170,7 @@ def call(
170170
)
171171

172172
if self.has_dos and "dos" in model_dict and "dos" in label_dict:
173-
find_global = label_dict.get("find_dos", 1.0)
173+
find_global = label_dict.get("find_dos", 0.0)
174174
pref_dos = pref_dos * find_global
175175
global_pred = xp.reshape(model_dict["dos"], (-1, self.numb_dos))
176176
global_label = xp.reshape(label_dict["dos"], (-1, self.numb_dos))
@@ -190,7 +190,7 @@ def call(
190190
)
191191

192192
if self.has_cdf and "dos" in model_dict and "dos" in label_dict:
193-
find_global = label_dict.get("find_dos", 1.0)
193+
find_global = label_dict.get("find_dos", 0.0)
194194
pref_cdf = pref_cdf * find_global
195195
global_pred_cdf = xp.cumulative_sum(
196196
xp.reshape(model_dict["dos"], (-1, self.numb_dos)), axis=-1

deepmd/dpmodel/loss/ener_spin.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ def call(
121121
if self.has_e:
122122
energy_pred = model_dict["energy"]
123123
energy_label = label_dict["energy"]
124-
find_energy = label_dict.get("find_energy", 1.0)
124+
find_energy = label_dict.get("find_energy", 0.0)
125125
pref_e = pref_e * find_energy
126126
if self.enable_atom_ener_coeff and "atom_energy" in model_dict:
127127
atom_ener_pred = model_dict["atom_energy"]
@@ -148,7 +148,7 @@ def call(
148148
more_loss["mae_e_all"] = self.display_if_exist(mae_e_all, find_energy)
149149

150150
if self.has_fr:
151-
find_force = label_dict.get("find_force", 1.0)
151+
find_force = label_dict.get("find_force", 0.0)
152152
pref_fr = pref_fr * find_force
153153
force_pred = model_dict["force"]
154154
force_label = label_dict["force"]
@@ -173,7 +173,7 @@ def call(
173173
)
174174

175175
if self.has_fm:
176-
find_force_mag = label_dict.get("find_force_mag", 1.0)
176+
find_force_mag = label_dict.get("find_force_mag", 0.0)
177177
pref_fm = pref_fm * find_force_mag
178178
force_mag_pred = model_dict["force_mag"]
179179
force_mag_label = label_dict["force_mag"]
@@ -207,7 +207,7 @@ def call(
207207
)
208208

209209
if self.has_ae:
210-
find_atom_ener = label_dict.get("find_atom_ener", 1.0)
210+
find_atom_ener = label_dict.get("find_atom_ener", 0.0)
211211
pref_ae = pref_ae * find_atom_ener
212212
atom_ener = model_dict["atom_energy"]
213213
atom_ener_label = label_dict["atom_ener"]
@@ -231,7 +231,7 @@ def call(
231231
)
232232

233233
if self.has_v:
234-
find_virial = label_dict.get("find_virial", 1.0)
234+
find_virial = label_dict.get("find_virial", 0.0)
235235
pref_v = pref_v * find_virial
236236
virial_pred = xp.reshape(model_dict["virial"], (-1, 9))
237237
virial_label = label_dict["virial"]

deepmd/dpmodel/loss/tensor.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ def call(
9292
and self.tensor_name in model_dict
9393
and "atom_" + self.label_name in label_dict
9494
):
95-
find_local = label_dict.get("find_atom_" + self.label_name, 1.0)
95+
find_local = label_dict.get("find_atom_" + self.label_name, 0.0)
9696
local_weight = self.local_weight * find_local
9797
local_pred = xp.reshape(
9898
model_dict[self.tensor_name], (-1, natoms, self.tensor_size)
@@ -120,7 +120,7 @@ def call(
120120
and "global_" + self.tensor_name in model_dict
121121
and self.label_name in label_dict
122122
):
123-
find_global = label_dict.get("find_" + self.label_name, 1.0)
123+
find_global = label_dict.get("find_" + self.label_name, 0.0)
124124
global_weight = self.global_weight * find_global
125125
global_pred = xp.reshape(
126126
model_dict["global_" + self.tensor_name], (-1, self.tensor_size)

source/tests/consistent/loss/test_ener.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -91,13 +91,13 @@ def data(self) -> dict:
9191

9292
@property
9393
def skip_tf(self) -> bool:
94-
(use_huber, enable_atom_ener_coeff, loss_func, f_use_norm, _mae) = self.param
94+
(_use_huber, _enable_atom_ener_coeff, loss_func, f_use_norm, _mae) = self.param
9595
# Skip TF for MAE loss tests (not implemented in TF backend)
9696
return CommonTest.skip_tf or loss_func == "mae" or f_use_norm
9797

9898
@property
9999
def skip_pd(self) -> bool:
100-
(use_huber, enable_atom_ener_coeff, loss_func, f_use_norm, _mae) = self.param
100+
(_use_huber, _enable_atom_ener_coeff, loss_func, f_use_norm, _mae) = self.param
101101
# Skip Paddle for MAE loss tests (not implemented in Paddle backend)
102102
return not INSTALLED_PD or loss_func == "mae" or f_use_norm
103103

@@ -116,7 +116,7 @@ def skip_pd(self) -> bool:
116116
args = loss_ener()
117117

118118
def setUp(self) -> None:
119-
(use_huber, enable_atom_ener_coeff, loss_func, f_use_norm, mae) = self.param
119+
(use_huber, _enable_atom_ener_coeff, loss_func, f_use_norm, mae) = self.param
120120
# Skip invalid combinations
121121
if f_use_norm and not (use_huber or loss_func == "mae"):
122122
self.skipTest("f_use_norm requires either use_huber or loss_func='mae'")
@@ -189,7 +189,7 @@ def build_tf(self, obj: Any, suffix: str) -> tuple[list, dict]:
189189
for kk, vv in self.label.items()
190190
}
191191

192-
loss, more_loss = obj.build(
192+
loss, _more_loss = obj.build(
193193
self.learning_rate,
194194
[self.natoms],
195195
predict,
@@ -424,7 +424,7 @@ def build_tf(self, obj: Any, suffix: str) -> tuple[list, dict]:
424424
for kk, vv in self.label.items()
425425
}
426426

427-
loss, more_loss = obj.build(
427+
loss, _more_loss = obj.build(
428428
self.learning_rate,
429429
[self.natoms],
430430
predict,

source/tests/pt_expt/loss/test_dos.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ def test_consistency(self, prec, has_dos, has_ados) -> None:
122122
k: v.detach().cpu().numpy() if isinstance(v, torch.Tensor) else v
123123
for k, v in label.items()
124124
}
125-
l_dp, more_dp = dp_loss(learning_rate, natoms, model_pred_np, label_np)
125+
l_dp, _more_dp = dp_loss(learning_rate, natoms, model_pred_np, label_np)
126126

127127
np.testing.assert_allclose(
128128
l0.detach().cpu().numpy(),
@@ -158,7 +158,7 @@ def test_cdf_terms(self, prec) -> None:
158158
rng, nframes, natoms, numb_dos, dtype, self.device
159159
)
160160

161-
l0, more0 = loss0(learning_rate, natoms, model_pred, label)
161+
l0, _more0 = loss0(learning_rate, natoms, model_pred, label)
162162
assert l0.shape == ()
163163

164164
# Compare with dpmodel

source/tests/pt_expt/loss/test_ener.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,7 @@ def test_consistency(self, prec, use_huber) -> None:
133133
k: v.detach().cpu().numpy() if isinstance(v, torch.Tensor) else v
134134
for k, v in label.items()
135135
}
136-
l_dp, more_dp = dp_loss(learning_rate, natoms, model_pred_np, label_np)
136+
l_dp, _more_dp = dp_loss(learning_rate, natoms, model_pred_np, label_np)
137137

138138
np.testing.assert_allclose(
139139
l0.detach().cpu().numpy(),

source/tests/pt_expt/loss/test_ener_spin.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@ def test_consistency(self, prec, loss_func) -> None:
141141
k: v.detach().cpu().numpy() if isinstance(v, torch.Tensor) else v
142142
for k, v in label.items()
143143
}
144-
l_dp, more_dp = dp_loss(learning_rate, natoms, model_pred_np, label_np)
144+
l_dp, _more_dp = dp_loss(learning_rate, natoms, model_pred_np, label_np)
145145

146146
np.testing.assert_allclose(
147147
l0.detach().cpu().numpy(),
@@ -229,7 +229,7 @@ def test_all_masked(self, prec) -> None:
229229
rng, nframes, natoms, n_magnetic, dtype, self.device
230230
)
231231

232-
l0, more0 = loss0(learning_rate, natoms, model_pred, label)
232+
l0, _more0 = loss0(learning_rate, natoms, model_pred, label)
233233
assert l0.shape == ()
234234

235235
# Compare with dpmodel

source/tests/pt_expt/loss/test_property.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ def test_consistency(self, prec, loss_func) -> None:
110110
dp_loss = PropertyLossDP.deserialize(loss0.serialize())
111111
model_pred_np = {k: v.detach().cpu().numpy() for k, v in model_pred.items()}
112112
label_np = {k: v.detach().cpu().numpy() for k, v in label.items()}
113-
l_dp, more_dp = dp_loss(learning_rate, natoms, model_pred_np, label_np)
113+
l_dp, _more_dp = dp_loss(learning_rate, natoms, model_pred_np, label_np)
114114

115115
# Use relative tolerance: extreme out_std values (e.g. 0.001) can produce
116116
# large loss values where torch/numpy accumulation order differs at machine epsilon.

source/tests/pt_expt/loss/test_tensor.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,7 @@ def test_consistency(self, prec, has_local, has_global) -> None:
129129
k: v.detach().cpu().numpy() if isinstance(v, torch.Tensor) else v
130130
for k, v in label.items()
131131
}
132-
l_dp, more_dp = dp_loss(learning_rate, natoms, model_pred_np, label_np)
132+
l_dp, _more_dp = dp_loss(learning_rate, natoms, model_pred_np, label_np)
133133

134134
np.testing.assert_allclose(
135135
l0.detach().cpu().numpy(),
@@ -174,7 +174,7 @@ def test_with_atomic_weight(self, prec) -> None:
174174
rng.random((nframes, natoms)), dtype=dtype, device=self.device
175175
)
176176

177-
l0, more0 = loss0(learning_rate, natoms, model_pred, label)
177+
l0, _more0 = loss0(learning_rate, natoms, model_pred, label)
178178
assert l0.shape == ()
179179

180180
# Compare with dpmodel

0 commit comments

Comments
 (0)