Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
74 commits
Select commit Hold shift + click to select a range
ec2e031
implement pytorch-exportable for se_e2_a descriptor
Feb 5, 2026
b8a48ff
better type for xp.zeros
Feb 5, 2026
1cc001f
implement env, base_descriptor and exclude_mask, remove the dependenc…
Feb 6, 2026
f2fbe88
mv to_torch_tensor to common
Feb 6, 2026
e2afbe9
simplify __init__ of the NaiveLayer
Feb 6, 2026
4ba511a
fix bug
Feb 6, 2026
fb9598a
fix bug
Feb 6, 2026
fa03351
simplify init method of se_e2_a descriptor. fig bug in consistent UT
Feb 6, 2026
09b33f1
restructure the test folders. add test_common.
Feb 6, 2026
67f2e54
add test_exclusion_mask.py
Feb 6, 2026
f7d83dd
fix poitential import issue in test.
Feb 6, 2026
0c96bb6
correct __call__(). fix bug
Feb 6, 2026
9dca912
fix registration issue
Feb 6, 2026
17f0a5d
fix pt-expt file extension
Feb 6, 2026
8ce93ba
fix(pt): expansion of get_default_nthreads()
Feb 6, 2026
3091988
fix bug of intra-inter
Feb 6, 2026
85f0583
fix bug of default dp inter value
Feb 6, 2026
d33324d
fix cicd
Feb 6, 2026
4de9a56
feat: add support for se_r
Feb 6, 2026
f4dc0af
fix device of xp array
Feb 6, 2026
2384835
fix device of xp array
Feb 6, 2026
9646d71
revert extend_coord_with_ghosts
Feb 6, 2026
f270069
raise error for non-implemented methods
Feb 6, 2026
57433d3
restore import torch
Feb 6, 2026
eedcbaf
fix(pt,pt-expt): guard thread setters
Feb 6, 2026
d8b2cf4
make exclusion mask modules
Feb 6, 2026
aeef15a
fix(pt-expt): clear params on None
Feb 6, 2026
8bdb1f8
fix bug
Feb 7, 2026
d3b01da
utility to handel dpmodel -> pt_expt conversion
Feb 8, 2026
3452a2a
fix to_numpy_array device
Feb 8, 2026
ba8e7ab
chore(dpmodel,pt_expt): refactorize the implementation of embedding net
Feb 8, 2026
621c7cc
feat: se_t and se_t_tebd descriptors for the pytroch exportable backend.
Feb 8, 2026
faa4026
fix bug
Feb 8, 2026
e263270
refact: fitting net
Feb 8, 2026
ea61141
fix bug
Feb 8, 2026
de8f156
merge master
Feb 8, 2026
ad83d98
Merge branch 'refact-auto-setattr' into refact-fitting-net
Feb 8, 2026
9311ed5
feat(pt_expt): add fitting
Feb 9, 2026
9472af7
merge master
Feb 10, 2026
6ef9cd8
merge with master
Feb 11, 2026
165d1df
fix the API consistency issue in descriptors
Feb 11, 2026
e76b702
feat: add stat for dpmodel's atomic model. implement atomic model for…
Feb 11, 2026
03974dd
merge master
Feb 12, 2026
4ae2726
feat: full energy model (but not exportable)
Feb 12, 2026
fb08ffc
add missing file
Feb 12, 2026
ed460a5
add missing file
Feb 12, 2026
f5171f2
merge master
Feb 12, 2026
a59c18d
fix test
Feb 12, 2026
d057ca1
fix test
Feb 12, 2026
b3d22da
fix test
Feb 12, 2026
d2e4faa
Merge branch 'feat-fitting' into feat-atomic-model
Feb 12, 2026
d094d21
merge with updated feat-atomic-model
Feb 12, 2026
a920ef6
use torch_module to simplify the def of modules.
Feb 12, 2026
56cbe2d
simplify three autograd to one by vmap, which was made inpossible by jit
Feb 12, 2026
bcb4008
export forward_lower, but not successful
Feb 12, 2026
4437146
Merge remote-tracking branch 'upstream/master' into feat-atomic-model
Feb 13, 2026
33a9db3
Merge branch 'feat-atomic-model' into feat-full-model
Feb 13, 2026
d0e22d2
make forward_lower exportable
Feb 13, 2026
1f7bb6c
implement all EnergyModel APIs in pt but not in dpmodel. add a compre…
Feb 13, 2026
4be000c
Merge branch 'master' into feat-atomic-model
wanghan-iapcm Feb 13, 2026
2ea6b74
simplify the code
Feb 14, 2026
459afa2
Merge remote-tracking branch 'origin/feat-atomic-model' into feat-ato…
Feb 14, 2026
21077bc
fix bug
Feb 14, 2026
bdd015c
fix issues
Feb 14, 2026
1c1ea90
Merge branch 'feat-atomic-model' into feat-full-model
Feb 14, 2026
77609c1
more careful check on the compute_or_load_stat
Feb 14, 2026
1fa1eb2
merge master
Feb 14, 2026
b67accc
add guard for eval_descriptor and eval_fitting_last_layer
Feb 15, 2026
19df985
fix issues
Feb 15, 2026
fc0be62
remove eval_ hooks
Feb 15, 2026
c15212d
rm eval_return_middle_output
Feb 15, 2026
7e51e9d
change forward_lower to forward_lower_exportable
Feb 16, 2026
7add238
fix the squeeze issue for atomic virial, pt_expt backend only
Feb 16, 2026
3600076
fix squeeze bug
Feb 16, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion deepmd/dpmodel/fitting/general_fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -584,6 +584,7 @@ def _call_common(
)

# calculate the prediction
results: dict[str, Array] = {}
if not self.mixed_types:
outs = xp.zeros(
[nf, nloc, net_dim_out],
Expand Down Expand Up @@ -622,4 +623,5 @@ def _call_common(
exclude_mask = xp.astype(exclude_mask, xp.bool)
# nf x nloc x nod
outs = xp.where(exclude_mask[:, :, None], outs, xp.zeros_like(outs))
return {self.var_name: outs}
results[self.var_name] = outs
return results
4 changes: 4 additions & 0 deletions deepmd/dpmodel/model/dp_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,3 +48,7 @@ def update_sel(
def get_fitting_net(self) -> BaseFitting:
"""Get the fitting network."""
return self.atomic_model.fitting

def get_descriptor(self) -> BaseDescriptor:
"""Get the descriptor."""
return self.atomic_model.descriptor
25 changes: 25 additions & 0 deletions deepmd/dpmodel/model/ener_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,3 +47,28 @@ def atomic_output_def(self) -> FittingOutputDef:
if self._enable_hessian:
return self.hess_fitting_def
return super().atomic_output_def()

def translated_output_def(self) -> dict[str, Any]:
"""Get the translated output definition.

Maps internal output names to user-facing names, e.g.
``energy_redu`` -> ``energy``, ``energy_derv_r`` -> ``force``.
"""
out_def_data = self.model_output_def().get_data()
output_def = {
"atom_energy": out_def_data["energy"],
"energy": out_def_data["energy_redu"],
}
if self.do_grad_r("energy"):
output_def["force"] = out_def_data["energy_derv_r"]
output_def["force"].squeeze(-2)
if self.do_grad_c("energy"):
output_def["virial"] = out_def_data["energy_derv_c_redu"]
output_def["virial"].squeeze(-2)
output_def["atom_virial"] = out_def_data["energy_derv_c"]
output_def["atom_virial"].squeeze(-2)
if "mask" in out_def_data:
output_def["mask"] = out_def_data["mask"]
if self._enable_hessian:
output_def["hessian"] = out_def_data["energy_derv_r_derv_r"]
return output_def
101 changes: 75 additions & 26 deletions deepmd/dpmodel/model/make_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
PRECISION_DICT,
RESERVED_PRECISION_DICT,
NativeOP,
get_xp_precision,
)
from deepmd.dpmodel.model.base_model import (
BaseModel,
Expand Down Expand Up @@ -103,7 +104,8 @@ def model_call_from_call_lower(
bb.reshape(nframes, 3, 3),
)
else:
coord_normalized = cc.copy()
xp = array_api_compat.array_namespace(cc)
coord_normalized = xp.reshape(cc, (nframes, nloc, 3))
extended_coord, extended_atype, mapping = extend_coord_with_ghosts(
coord_normalized, atype, bb, rcut
)
Expand Down Expand Up @@ -255,7 +257,7 @@ def call(
The keys are defined by the `ModelOutputDef`.

"""
cc, bb, fp, ap, input_prec = self.input_type_cast(
cc, bb, fp, ap, input_prec = self._input_type_cast(
coord, box=box, fparam=fparam, aparam=aparam
)
del coord, box, fparam, aparam
Expand All @@ -272,7 +274,7 @@ def call(
aparam=ap,
do_atomic_virial=do_atomic_virial,
)
model_predict = self.output_type_cast(model_predict, input_prec)
model_predict = self._output_type_cast(model_predict, input_prec)
return model_predict

def call_lower(
Expand Down Expand Up @@ -321,7 +323,7 @@ def call_lower(
nlist,
extra_nlist_sort=self.need_sorted_nlist_for_lower(),
)
cc_ext, _, fp, ap, input_prec = self.input_type_cast(
cc_ext, _, fp, ap, input_prec = self._input_type_cast(
extended_coord, fparam=fparam, aparam=aparam
)
del extended_coord, fparam, aparam
Expand All @@ -334,7 +336,7 @@ def call_lower(
aparam=ap,
do_atomic_virial=do_atomic_virial,
)
model_predict = self.output_type_cast(model_predict, input_prec)
model_predict = self._output_type_cast(model_predict, input_prec)
return model_predict

def forward_common_atomic(
Expand Down Expand Up @@ -364,60 +366,107 @@ def forward_common_atomic(
)

forward_lower = call_lower
forward_common = call
forward_common_lower = call_lower

def input_type_cast(
def get_out_bias(self) -> Array:
"""Get the output bias."""
return self.atomic_model.out_bias

def set_out_bias(self, out_bias: Array) -> None:
"""Set the output bias."""
self.atomic_model.out_bias = out_bias

def change_out_bias(
self,
merged: Any,
bias_adjust_mode: str = "change-by-statistic",
) -> None:
"""Change the output bias according to the input data and the pretrained model.

Parameters
----------
merged
The merged data samples.
bias_adjust_mode : str
The mode for changing output bias:
'change-by-statistic' or 'set-by-statistic'.
"""
self.atomic_model.change_out_bias(merged, bias_adjust_mode=bias_adjust_mode)

def _input_type_cast(
self,
coord: Array,
box: Array | None = None,
fparam: Array | None = None,
aparam: Array | None = None,
) -> tuple[Array, Array, np.ndarray | None, np.ndarray | None, str]:
) -> tuple[Array, Array | None, Array | None, Array | None, Any]:
"""Cast the input data to global float type."""
input_prec = RESERVED_PRECISION_DICT[self.precision_dict[coord.dtype.name]]
xp = array_api_compat.array_namespace(coord)
input_dtype = coord.dtype
global_dtype = get_xp_precision(
xp, RESERVED_PRECISION_DICT[self.global_np_float_precision]
)
###
### type checking would not pass jit, convert to coord prec anyway
###
_lst: list[np.ndarray | None] = [
vv.astype(coord.dtype) if vv is not None else None
_lst: list[Array | None] = [
xp.astype(vv, input_dtype) if vv is not None else None
for vv in [box, fparam, aparam]
]
box, fparam, aparam = _lst
if input_prec == RESERVED_PRECISION_DICT[self.global_np_float_precision]:
return coord, box, fparam, aparam, input_prec
if input_dtype == global_dtype:
return coord, box, fparam, aparam, input_dtype
else:
pp = self.global_np_float_precision
return (
coord.astype(pp),
box.astype(pp) if box is not None else None,
fparam.astype(pp) if fparam is not None else None,
aparam.astype(pp) if aparam is not None else None,
input_prec,
xp.astype(coord, global_dtype),
xp.astype(box, global_dtype) if box is not None else None,
xp.astype(fparam, global_dtype) if fparam is not None else None,
xp.astype(aparam, global_dtype) if aparam is not None else None,
input_dtype,
)

def output_type_cast(
def _output_type_cast(
self,
model_ret: dict[str, Array],
input_prec: str,
input_prec: Any,
) -> dict[str, Array]:
"""Convert the model output to the input prec."""
do_cast = (
input_prec != RESERVED_PRECISION_DICT[self.global_np_float_precision]
"""Convert the model output to the input prec.

Parameters
----------
model_ret
The model output.
input_prec
The input dtype returned by ``_input_type_cast``.
"""
model_ret_not_none = [vv for vv in model_ret.values() if vv is not None]
if not model_ret_not_none:
return model_ret
xp = array_api_compat.array_namespace(model_ret_not_none[0])
global_dtype = get_xp_precision(
xp, RESERVED_PRECISION_DICT[self.global_np_float_precision]
)
ener_dtype = get_xp_precision(
xp, RESERVED_PRECISION_DICT[self.global_ener_float_precision]
)
pp = self.precision_dict[input_prec]
do_cast = input_prec != global_dtype
odef = self.model_output_def()
for kk in odef.keys():
if kk not in model_ret.keys():
# do not return energy_derv_c if not do_atomic_virial
continue
if check_operation_applied(odef[kk], OutputVariableOperation.REDU):
model_ret[kk] = (
model_ret[kk].astype(self.global_ener_float_precision)
xp.astype(model_ret[kk], ener_dtype)
if model_ret[kk] is not None
else None
)
elif do_cast:
model_ret[kk] = (
model_ret[kk].astype(pp) if model_ret[kk] is not None else None
xp.astype(model_ret[kk], input_prec)
if model_ret[kk] is not None
else None
)
return model_ret

Expand Down
14 changes: 11 additions & 3 deletions deepmd/dpmodel/model/transform_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@ def communicate_extended_output(

"""
xp = array_api_compat.get_namespace(mapping)
device = array_api_compat.device(mapping)
mapping_ = mapping
new_ret = {}
for kk in model_output_def.keys_outp():
Expand All @@ -117,7 +118,9 @@ def communicate_extended_output(
mapping, tuple(mldims + [1] * len(derv_r_ext_dims))
)
mapping = xp.tile(mapping, [1] * len(mldims) + derv_r_ext_dims)
force = xp.zeros(vldims + derv_r_ext_dims, dtype=vv.dtype)
force = xp.zeros(
vldims + derv_r_ext_dims, dtype=vv.dtype, device=device
)
force = xp_scatter_sum(
force,
1,
Expand Down Expand Up @@ -149,7 +152,9 @@ def communicate_extended_output(
nall = hess_1.shape[1]
# (1) -> [nf, nloc1, nall2, *def, 3(1), 3(2)]
hessian1 = xp.zeros(
[*vldims, nall, *vdef.shape, 3, 3], dtype=vv.dtype
[*vldims, nall, *vdef.shape, 3, 3],
dtype=vv.dtype,
device=device,
)
mapping_hess = xp.reshape(
mapping_, (mldims + [1] * (len(vdef.shape) + 3))
Expand All @@ -172,7 +177,9 @@ def communicate_extended_output(
nloc = hessian1.shape[2]
# (2) -> [nf, nloc2, nloc1, *def, 3(1), 3(2)]
hessian = xp.zeros(
[*vldims, nloc, *vdef.shape, 3, 3], dtype=vv.dtype
[*vldims, nloc, *vdef.shape, 3, 3],
dtype=vv.dtype,
device=device,
)
mapping_hess = xp.reshape(
mapping_, (mldims + [1] * (len(vdef.shape) + 3))
Expand Down Expand Up @@ -218,6 +225,7 @@ def communicate_extended_output(
virial = xp.zeros(
vldims + derv_c_ext_dims,
dtype=vv.dtype,
device=device,
)
virial = xp_scatter_sum(
virial,
Expand Down
6 changes: 3 additions & 3 deletions deepmd/dpmodel/utils/network.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,11 +280,11 @@ def call(self, x): # noqa: ANN001, ANN201
y = xp.astype(y, x.dtype)
y = fn(y)
if self.idt is not None:
y *= self.idt
y = y * self.idt
if self.resnet and self.w.shape[1] == self.w.shape[0]:
y += x
y = y + x
elif self.resnet and self.w.shape[1] == 2 * self.w.shape[0]:
y += xp.concat([x, x], axis=-1)
y = y + xp.concat([x, x], axis=-1)
return y


Expand Down
4 changes: 2 additions & 2 deletions deepmd/pd/model/model/ener_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def translated_output_def(self) -> dict:
output_def["virial"] = out_def_data["energy_derv_c_redu"]
output_def["virial"].squeeze(-2)
output_def["atom_virial"] = out_def_data["energy_derv_c"]
output_def["atom_virial"].squeeze(-3)
output_def["atom_virial"].squeeze(-2)
if "mask" in out_def_data:
output_def["mask"] = out_def_data["mask"]
return output_def
Expand Down Expand Up @@ -140,7 +140,7 @@ def forward_lower(
if do_atomic_virial:
model_predict["extended_virial"] = model_ret[
"energy_derv_c"
].squeeze(-3)
].squeeze(-2)
else:
model_predict["extended_virial"] = paddle.zeros(
[model_predict["energy"].shape[0], 1, 9], dtype=paddle.float64
Expand Down
12 changes: 6 additions & 6 deletions deepmd/pd/model/model/make_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ def forward_common(
The keys are defined by the `ModelOutputDef`.

"""
cc, bb, fp, ap, input_prec = self.input_type_cast(
cc, bb, fp, ap, input_prec = self._input_type_cast(
coord, box=box, fparam=fparam, aparam=aparam
)
del coord, box, fparam, aparam
Expand Down Expand Up @@ -196,7 +196,7 @@ def forward_common(
mapping,
do_atomic_virial=do_atomic_virial,
)
model_predict = self.output_type_cast(model_predict, input_prec)
model_predict = self._output_type_cast(model_predict, input_prec)
return model_predict

def get_out_bias(self) -> paddle.Tensor:
Expand Down Expand Up @@ -283,7 +283,7 @@ def forward_common_lower(
nlist = self.format_nlist(
extended_coord, extended_atype, nlist, extra_nlist_sort=extra_nlist_sort
)
cc_ext, _, fp, ap, input_prec = self.input_type_cast(
cc_ext, _, fp, ap, input_prec = self._input_type_cast(
extended_coord, fparam=fparam, aparam=aparam
)
del extended_coord, fparam, aparam
Expand All @@ -303,10 +303,10 @@ def forward_common_lower(
do_atomic_virial=do_atomic_virial,
create_graph=self.training,
)
model_predict = self.output_type_cast(model_predict, input_prec)
model_predict = self._output_type_cast(model_predict, input_prec)
return model_predict

def input_type_cast(
def _input_type_cast(
self,
coord: paddle.Tensor,
box: paddle.Tensor | None = None,
Expand Down Expand Up @@ -351,7 +351,7 @@ def input_type_cast(
input_prec,
)

def output_type_cast(
def _output_type_cast(
self,
model_ret: dict[str, paddle.Tensor],
input_prec: str,
Expand Down
10 changes: 10 additions & 0 deletions deepmd/pt/model/atomic_model/dp_atomic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,11 @@ def set_eval_descriptor_hook(self, enable: bool) -> None:

def eval_descriptor(self) -> torch.Tensor:
"""Evaluate the descriptor."""
if not self.eval_descriptor_list:
raise RuntimeError(
"eval_descriptor_list is empty. "
"Call set_eval_descriptor_hook(True) and perform a forward pass first."
)
return torch.concat(self.eval_descriptor_list)

def set_eval_fitting_last_layer_hook(self, enable: bool) -> None:
Expand All @@ -94,6 +99,11 @@ def set_eval_fitting_last_layer_hook(self, enable: bool) -> None:

def eval_fitting_last_layer(self) -> torch.Tensor:
"""Evaluate the fitting last layer output."""
if not self.eval_fitting_last_layer_list:
raise RuntimeError(
"eval_fitting_last_layer_list is empty. "
"Call set_eval_fitting_last_layer_hook(True) and perform a forward pass first."
)
return torch.concat(self.eval_fitting_last_layer_list)

@torch.jit.export
Expand Down
Loading