Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
42 commits
Select commit Hold shift + click to select a range
ec2e031
implement pytorch-exportable for se_e2_a descriptor
Feb 5, 2026
b8a48ff
better type for xp.zeros
Feb 5, 2026
1cc001f
implement env, base_descriptor and exclude_mask, remove the dependenc…
Feb 6, 2026
f2fbe88
mv to_torch_tensor to common
Feb 6, 2026
e2afbe9
simplify __init__ of the NaiveLayer
Feb 6, 2026
4ba511a
fix bug
Feb 6, 2026
fb9598a
fix bug
Feb 6, 2026
fa03351
simplify init method of se_e2_a descriptor. fig bug in consistent UT
Feb 6, 2026
09b33f1
restructure the test folders. add test_common.
Feb 6, 2026
67f2e54
add test_exclusion_mask.py
Feb 6, 2026
f7d83dd
fix poitential import issue in test.
Feb 6, 2026
0c96bb6
correct __call__(). fix bug
Feb 6, 2026
9dca912
fix registration issue
Feb 6, 2026
17f0a5d
fix pt-expt file extension
Feb 6, 2026
8ce93ba
fix(pt): expansion of get_default_nthreads()
Feb 6, 2026
3091988
fix bug of intra-inter
Feb 6, 2026
85f0583
fix bug of default dp inter value
Feb 6, 2026
d33324d
fix cicd
Feb 6, 2026
4de9a56
feat: add support for se_r
Feb 6, 2026
f4dc0af
fix device of xp array
Feb 6, 2026
2384835
fix device of xp array
Feb 6, 2026
9646d71
revert extend_coord_with_ghosts
Feb 6, 2026
f270069
raise error for non-implemented methods
Feb 6, 2026
57433d3
restore import torch
Feb 6, 2026
eedcbaf
fix(pt,pt-expt): guard thread setters
Feb 6, 2026
d8b2cf4
make exclusion mask modules
Feb 6, 2026
aeef15a
fix(pt-expt): clear params on None
Feb 6, 2026
8bdb1f8
fix bug
Feb 7, 2026
d3b01da
utility to handel dpmodel -> pt_expt conversion
Feb 8, 2026
3452a2a
fix to_numpy_array device
Feb 8, 2026
ba8e7ab
chore(dpmodel,pt_expt): refactorize the implementation of embedding net
Feb 8, 2026
621c7cc
feat: se_t and se_t_tebd descriptors for the pytroch exportable backend.
Feb 8, 2026
faa4026
fix bug
Feb 8, 2026
e263270
refact: fitting net
Feb 8, 2026
ea61141
fix bug
Feb 8, 2026
de8f156
merge master
Feb 8, 2026
ad83d98
Merge branch 'refact-auto-setattr' into refact-fitting-net
Feb 8, 2026
7cf88d2
Merge branch 'master' into refact-fitting-net
njzjz Feb 9, 2026
d02aa6a
Merge branch 'master' into refact-fitting-net
njzjz Feb 10, 2026
271dc3b
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 10, 2026
356214c
Revert "feat: se_t and se_t_tebd descriptors for the pytroch exportab…
Feb 11, 2026
53d2768
Merge remote-tracking branch 'origin/refact-fitting-net' into refact-…
Feb 11, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
113 changes: 112 additions & 1 deletion deepmd/dpmodel/utils/network.py
Original file line number Diff line number Diff line change
Expand Up @@ -1009,7 +1009,118 @@ def deserialize(cls, data: dict) -> "FittingNet":
return FN


FittingNet = make_fitting_network(EmbeddingNet, NativeNet, NativeLayer)
class FittingNet(EmbeddingNet):
"""The fitting network. It may be implemented as an embedding
net connected with a linear output layer.

Parameters
----------
in_dim
Input dimension.
out_dim
Output dimension
neuron
The number of neurons in each hidden layer.
activation_function
The activation function.
resnet_dt
Use time step at the resnet architecture.
precision
Floating point precision for the model parameters.
bias_out
The last linear layer has bias.
seed : int, optional
Random seed.
trainable : bool or list[bool], optional
Whether the network is trainable.
"""

def __init__(
self,
in_dim: int,
out_dim: int,
neuron: list[int] = [24, 48, 96],
activation_function: str = "tanh",
resnet_dt: bool = False,
precision: str = DEFAULT_PRECISION,
bias_out: bool = True,
seed: int | list[int] | None = None,
trainable: bool | list[bool] = True,
) -> None:
if trainable is None:
trainable = [True] * (len(neuron) + 1)
elif isinstance(trainable, bool):
trainable = [trainable] * (len(neuron) + 1)
else:
pass
Comment thread
wanghan-iapcm marked this conversation as resolved.
Comment thread
njzjz marked this conversation as resolved.
super().__init__(
in_dim,
neuron=neuron,
activation_function=activation_function,
resnet_dt=resnet_dt,
precision=precision,
seed=seed,
trainable=trainable[:-1],
)
i_in = neuron[-1] if len(neuron) > 0 else in_dim
i_ot = out_dim
self.layers.append(
NativeLayer(
i_in,
i_ot,
bias=bias_out,
use_timestep=False,
activation_function=None,
resnet=False,
precision=precision,
seed=child_seed(seed, len(neuron)),
trainable=trainable[-1],
)
)
self.out_dim = out_dim
self.bias_out = bias_out

def serialize(self) -> dict:
"""Serialize the network to a dict.

Returns
-------
dict
The serialized network.
"""
return {
"@class": "FittingNetwork",
"@version": 1,
"in_dim": self.in_dim,
"out_dim": self.out_dim,
"neuron": self.neuron.copy(),
"activation_function": self.activation_function,
"resnet_dt": self.resnet_dt,
"precision": self.precision,
"bias_out": self.bias_out,
"layers": [layer.serialize() for layer in self.layers],
}

@classmethod
def deserialize(cls, data: dict) -> "FittingNet":
"""Deserialize the network from a dict.

Parameters
----------
data : dict
The dict to deserialize from.
"""
data = data.copy()
check_version_compatibility(data.pop("@version", 1), 1, 1)
data.pop("@class", None)
layers = data.pop("layers")
obj = cls(**data)
# Use type(obj.layers[0]) to respect subclass layer types
layer_type = type(obj.layers[0])
obj.layers = type(obj.layers)(
[layer_type.deserialize(layer) for layer in layers]
)
return obj


class NetworkCollection:
Expand Down
24 changes: 21 additions & 3 deletions deepmd/pt_expt/utils/network.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,11 @@
NativeOP,
)
from deepmd.dpmodel.utils.network import EmbeddingNet as EmbeddingNetDP
from deepmd.dpmodel.utils.network import FittingNet as FittingNetDP
from deepmd.dpmodel.utils.network import LayerNorm as LayerNormDP
from deepmd.dpmodel.utils.network import NativeLayer as NativeLayerDP
from deepmd.dpmodel.utils.network import NetworkCollection as NetworkCollectionDP
from deepmd.dpmodel.utils.network import (
make_fitting_network,
make_multilayer_network,
)
from deepmd.pt_expt.common import (
Expand Down Expand Up @@ -114,8 +114,26 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
)


class FittingNet(make_fitting_network(EmbeddingNet, NativeNet, NativeLayer)):
pass
class FittingNet(FittingNetDP, torch.nn.Module):
def __init__(self, *args: Any, **kwargs: Any) -> None:
torch.nn.Module.__init__(self)
FittingNetDP.__init__(self, *args, **kwargs)
# Convert dpmodel layers to pt_expt NativeLayer
self.layers = torch.nn.ModuleList(
[NativeLayer.deserialize(layer.serialize()) for layer in self.layers]
)

def __call__(self, *args: Any, **kwargs: Any) -> Any:
return torch.nn.Module.__call__(self, *args, **kwargs)

def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.call(x)


register_dpmodel_mapping(
FittingNetDP,
lambda v: FittingNet.deserialize(v.serialize()),
)


class NetworkCollection(NetworkCollectionDP, torch.nn.Module):
Expand Down
98 changes: 98 additions & 0 deletions source/tests/common/dpmodel/test_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -313,6 +313,104 @@ def test_fitting_net(self) -> None:
en1.call(inp)
np.testing.assert_allclose(en0.call(inp), en1.call(inp))

def test_is_concrete_class(self) -> None:
"""Verify FittingNet is a concrete class, not factory-generated."""
in_dim = 4
out_dim = 1
neuron = [8, 16]
net = FittingNet(
in_dim=in_dim,
out_dim=out_dim,
neuron=neuron,
activation_function="tanh",
resnet_dt=True,
precision="float64",
bias_out=True,
)
# Check it's the actual FittingNet class, not a dynamic class
self.assertEqual(net.__class__.__name__, "FittingNet")
self.assertEqual(net.__class__.__module__, "deepmd.dpmodel.utils.network")
# Verify it has the expected attributes
self.assertEqual(net.in_dim, in_dim)
self.assertEqual(net.out_dim, out_dim)
self.assertEqual(net.neuron, neuron)
self.assertEqual(net.activation_function, "tanh")
self.assertEqual(net.resnet_dt, True)
self.assertEqual(net.bias_out, True)
# FittingNet has len(neuron) embedding layers + 1 output layer
self.assertEqual(len(net.layers), len(neuron) + 1)

def test_forward_pass(self) -> None:
"""Test FittingNet forward pass produces correct output shape."""
in_dim = 4
out_dim = 3
neuron = [8, 16, 32]
net = FittingNet(
in_dim=in_dim,
out_dim=out_dim,
neuron=neuron,
activation_function="tanh",
resnet_dt=True,
precision="float64",
)
# Single sample
rng = np.random.default_rng()
x = rng.standard_normal(in_dim)
out = net.call(x)
self.assertEqual(out.shape, (out_dim,))

# Batch of samples
batch_size = 5
x_batch = rng.standard_normal((batch_size, in_dim))
out_batch = net.call(x_batch)
self.assertEqual(out_batch.shape, (batch_size, out_dim))

def test_trainable_parameter_variants(self) -> None:
"""Test FittingNet with different trainable configurations."""
in_dim = 4
out_dim = 2
neuron = [8, 16]

# Test 1: All layers trainable (default)
net_all_trainable = FittingNet(
in_dim=in_dim,
out_dim=out_dim,
neuron=neuron,
trainable=True,
)
for layer in net_all_trainable.layers:
self.assertTrue(layer.trainable)

# Test 2: All layers frozen
net_all_frozen = FittingNet(
in_dim=in_dim,
out_dim=out_dim,
neuron=neuron,
trainable=False,
)
for layer in net_all_frozen.layers:
self.assertFalse(layer.trainable)

# Test 3: Mixed trainable (embedding layers frozen, output layer trainable)
trainable_list = [False, False, True] # 2 embedding layers + 1 output layer
net_mixed = FittingNet(
in_dim=in_dim,
out_dim=out_dim,
neuron=neuron,
trainable=trainable_list,
)
self.assertFalse(net_mixed.layers[0].trainable) # First embedding layer
self.assertFalse(net_mixed.layers[1].trainable) # Second embedding layer
self.assertTrue(net_mixed.layers[2].trainable) # Output layer

# Test 4: Serialize/deserialize preserves trainable
serialized = net_mixed.serialize()
net_restored = FittingNet.deserialize(serialized)
for orig_layer, restored_layer in zip(
net_mixed.layers, net_restored.layers, strict=True
):
self.assertEqual(orig_layer.trainable, restored_layer.trainable)


class TestNetworkCollection(unittest.TestCase):
def setUp(self) -> None:
Expand Down
121 changes: 121 additions & 0 deletions source/tests/pt_expt/utils/test_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -281,3 +281,124 @@ def test_trainable_parameter_handling(self) -> None:
for layer in net_frozen.layers:
if layer.w is not None:
self.assertFalse(layer.w.requires_grad)


class TestFittingNetRefactor(unittest.TestCase):
"""Tests for the refactored FittingNet pt_expt wrapper."""

def setUp(self) -> None:
self.in_dim = 4
self.out_dim = 1
self.neuron = [8, 16]
self.activation = "tanh"
self.resnet_dt = True
self.precision = "float64"

def test_pt_expt_fitting_net_wraps_dpmodel(self) -> None:
"""Verify pt_expt FittingNet correctly wraps dpmodel."""
from deepmd.pt_expt.utils.network import (
FittingNet,
)

net = FittingNet(
in_dim=self.in_dim,
out_dim=self.out_dim,
neuron=self.neuron,
activation_function=self.activation,
resnet_dt=self.resnet_dt,
precision=self.precision,
seed=GLOBAL_SEED,
)
# Check it's a torch.nn.Module
self.assertIsInstance(net, torch.nn.Module)
# Check layers are converted to pt_expt NativeLayer (torch modules)
self.assertIsInstance(net.layers, torch.nn.ModuleList)
for layer in net.layers:
self.assertIsInstance(layer, torch.nn.Module)

def test_pt_expt_fitting_net_forward(self) -> None:
"""Test pt_expt FittingNet forward pass returns torch.Tensor."""
from deepmd.pt_expt.utils.network import (
FittingNet,
)

net = FittingNet(
in_dim=self.in_dim,
out_dim=self.out_dim,
neuron=self.neuron,
activation_function=self.activation,
resnet_dt=self.resnet_dt,
precision=self.precision,
seed=GLOBAL_SEED,
)
x = torch.randn(5, self.in_dim, dtype=torch.float64, device=env.DEVICE)
out = net(x)
self.assertIsInstance(out, torch.Tensor)
self.assertEqual(out.shape, (5, self.out_dim))
self.assertEqual(out.dtype, torch.float64)

def test_serialization_round_trip_pt_expt(self) -> None:
"""Test pt_expt FittingNet serialization/deserialization."""
from deepmd.pt_expt.utils.network import (
FittingNet,
)

net = FittingNet(
in_dim=self.in_dim,
out_dim=self.out_dim,
neuron=self.neuron,
activation_function=self.activation,
resnet_dt=self.resnet_dt,
precision=self.precision,
seed=GLOBAL_SEED,
)
x = torch.randn(5, self.in_dim, dtype=torch.float64, device=env.DEVICE)
out1 = net(x)

# Serialize and deserialize
serialized = net.serialize()
net2 = FittingNet.deserialize(serialized)

# Verify layers are still pt_expt NativeLayer modules
self.assertIsInstance(net2.layers, torch.nn.ModuleList)
for layer in net2.layers:
self.assertIsInstance(layer, torch.nn.Module)

out2 = net2(x)
np.testing.assert_allclose(
out1.detach().cpu().numpy(),
out2.detach().cpu().numpy(),
)

def test_registry_converts_dpmodel_to_pt_expt(self) -> None:
"""Test that dpmodel FittingNet can be converted to pt_expt via registry."""
from deepmd.dpmodel.utils.network import FittingNet as DPFittingNet
from deepmd.pt_expt.common import (
try_convert_module,
)
from deepmd.pt_expt.utils.network import (
FittingNet,
)

# Create dpmodel FittingNet
dp_net = DPFittingNet(
in_dim=self.in_dim,
out_dim=self.out_dim,
neuron=self.neuron,
activation_function=self.activation,
resnet_dt=self.resnet_dt,
precision=self.precision,
seed=GLOBAL_SEED,
)

# Try to convert via registry
converted = try_convert_module(dp_net)

# Should return pt_expt FittingNet
self.assertIsNotNone(converted)
self.assertIsInstance(converted, torch.nn.Module)
self.assertIsInstance(converted, FittingNet)

# Verify layers are pt_expt modules
for layer in converted.layers:
self.assertIsInstance(layer, torch.nn.Module)