Skip to content

Commit 2c7fee9

Browse files
Fix bug: Error intensive property output using virtual atoms
1 parent 98fb397 commit 2c7fee9

3 files changed

Lines changed: 25 additions & 1 deletion

File tree

deepmd/pt/model/model/make_model.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -299,6 +299,7 @@ def forward_common_lower(
299299
cc_ext,
300300
do_atomic_virial=do_atomic_virial,
301301
create_graph=self.training,
302+
mask=atomic_ret["mask"] if "mask" in atomic_ret else None,
302303
)
303304
model_predict = self.output_type_cast(model_predict, input_prec)
304305
return model_predict

deepmd/pt/model/model/transform_output.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -158,6 +158,7 @@ def fit_output_to_model_output(
158158
coord_ext: torch.Tensor,
159159
do_atomic_virial: bool = False,
160160
create_graph: bool = True,
161+
mask: Optional[torch.Tensor] = None,
161162
) -> dict[str, torch.Tensor]:
162163
"""Transform the output of the fitting network to
163164
the model output.
@@ -172,7 +173,11 @@ def fit_output_to_model_output(
172173
if vdef.reducible:
173174
kk_redu = get_reduce_name(kk)
174175
if vdef.intensive:
175-
model_ret[kk_redu] = torch.mean(vv.to(redu_prec), dim=atom_axis)
176+
if (mask is not None) and (mask == 0.0).any(): # containing padding atoms
177+
mask = mask.to(dtype=torch.bool, device=vv.device) # [nbz, nreal+npadding]
178+
model_ret[kk_redu] = torch.stack([torch.mean(vv[ii].to(redu_prec)[mask[ii]], dim=atom_axis) for ii in range(mask.size(0))])
179+
else:
180+
model_ret[kk_redu] = torch.mean(vv.to(redu_prec), dim=atom_axis)
176181
else:
177182
model_ret[kk_redu] = torch.sum(vv.to(redu_prec), dim=atom_axis)
178183
if vdef.r_differentiable:

source/tests/pt/test_dp_test.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313

1414
import numpy as np
1515
import torch
16+
import torch.nn.functional as F
1617

1718
from deepmd.entrypoints.test import test as dp_test
1819
from deepmd.pt.entrypoints.main import (
@@ -186,6 +187,23 @@ def test_dp_test_1_frame(self) -> None:
186187
to_numpy_array(result[model.get_var_name()])[0],
187188
)
188189

190+
def test_dp_test_padding_atoms(self) -> None:
191+
trainer = get_trainer(deepcopy(self.config))
192+
with torch.device("cpu"):
193+
input_dict, label_dict, _ = trainer.get_data(is_train=False)
194+
input_dict.pop("spin", None)
195+
result = trainer.model(**input_dict)
196+
padding_atoms_list = [1, 5, 10]
197+
for padding_atoms in padding_atoms_list:
198+
input_dict_padding = deepcopy(input_dict)
199+
input_dict_padding["atype"] = F.pad(input_dict_padding["atype"], (0, padding_atoms), value=-1)
200+
input_dict_padding["coord"] = F.pad(input_dict_padding["coord"], (0, 0, 0, padding_atoms, 0, 0), value=0)
201+
result_padding = trainer.model(**input_dict_padding)
202+
np.testing.assert_equal(
203+
to_numpy_array(result[trainer.model.get_var_name()])[0],
204+
to_numpy_array(result_padding[trainer.model.get_var_name()])[0],
205+
)
206+
189207
def tearDown(self) -> None:
190208
for f in os.listdir("."):
191209
if f.startswith("model") and f.endswith(".pt"):

0 commit comments

Comments
 (0)