Skip to content

Commit 206ace0

Browse files
wanghan-iapcmHan Wang
andauthored
fix(c++,pt-expt): substitute default_fparam in .pt2 compute (#5343)
## Summary - Store `default_fparam` values in `.pt2` metadata so C++ can read them at init time - In `DeepPotPTExpt::compute()`, substitute stored default values when caller passes empty fparam on a model with `has_default_fparam=true` - Add C++ tests for empty-fparam, explicit-fparam, and LAMMPS-nlist paths using `fparam_aparam_default.pt2` ## Test plan - [x] `runUnitTests_cc --gtest_filter="*DefaultFParam*PtExpt*"` — 10 tests pass (double + float, attrs/empty/explicit/lmp_nlist) - [x] `runUnitTests_cc --gtest_filter="*PtExpt*"` — all 100 PtExpt tests pass, no regressions - [x] Verified `fparam_aparam_default.pt2` metadata contains `default_fparam: [0.25852028]` - [ ] CI: regenerate model files via `gen_fparam_aparam.py` before C++ tests <!-- This is an auto-generated comment: release notes by coderabbit.ai --> ## Summary by CodeRabbit * **New Features** * Models now include and export a default feature-parameter vector so inference will use pre-configured defaults when explicit parameters are omitted. * **Bug Fixes** * Stricter validation: missing or size-mismatched default parameters now emit warnings or errors instead of proceeding silently. * **Tests** * Added end-to-end inference tests validating empty vs. explicit feature-parameter behavior and verifying energies, forces, and virials. * **Chores** * Updated test-data generation to produce and commit reference values for the default-parameter model. <!-- end of auto-generated comment: release notes by coderabbit.ai --> --------- Co-authored-by: Han Wang <wang_han@iapcm.ac.cn>
1 parent a7e9fed commit 206ace0

6 files changed

Lines changed: 304 additions & 25 deletions

File tree

deepmd/pt_expt/utils/serialization.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -195,6 +195,7 @@ def _collect_metadata(model: torch.nn.Module) -> dict:
195195
"mixed_types": model.mixed_types(),
196196
"sel_type": model.get_sel_type(),
197197
"has_default_fparam": model.has_default_fparam(),
198+
"default_fparam": model.get_default_fparam(),
198199
"fitting_output_defs": fitting_output_defs,
199200
}
200201

source/api_cc/include/DeepPotPTExpt.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -202,6 +202,7 @@ class DeepPotPTExpt : public DeepPotBackend {
202202
int daparam;
203203
bool aparam_nall;
204204
bool has_default_fparam_;
205+
std::vector<double> default_fparam_;
205206
double rcut;
206207
int gpu_id;
207208
bool gpu_enabled;

source/api_cc/src/DeepPotPTExpt.cc

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -636,6 +636,25 @@ void DeepPotPTExpt::init(const std::string& model,
636636
} else {
637637
has_default_fparam_ = false;
638638
}
639+
if (has_default_fparam_) {
640+
if (metadata.obj_val.count("default_fparam")) {
641+
default_fparam_.clear();
642+
for (const auto& v : metadata["default_fparam"].as_array()) {
643+
default_fparam_.push_back(v.as_double());
644+
}
645+
if (static_cast<int>(default_fparam_.size()) != dfparam) {
646+
throw deepmd::deepmd_exception(
647+
"default_fparam length (" + std::to_string(default_fparam_.size()) +
648+
") does not match dim_fparam (" + std::to_string(dfparam) + ").");
649+
}
650+
} else {
651+
std::cerr << "WARNING: Model has has_default_fparam=true but "
652+
"default_fparam values are missing from metadata. "
653+
"Empty fparam will not be substituted. Please regenerate "
654+
"the .pt2 model with an updated version of deepmd-kit."
655+
<< std::endl;
656+
}
657+
}
639658

640659
type_map.clear();
641660
for (const auto& v : metadata["type_map"].as_array()) {
@@ -818,6 +837,17 @@ void DeepPotPTExpt::compute(ENERGYVTYPE& ener,
818837
valuetype_options)
819838
.to(torch::kFloat64)
820839
.to(device);
840+
} else if (has_default_fparam_ && !default_fparam_.empty()) {
841+
fparam_tensor =
842+
torch::from_blob(const_cast<double*>(default_fparam_.data()),
843+
{1, static_cast<std::int64_t>(default_fparam_.size())},
844+
options)
845+
.clone()
846+
.to(device);
847+
} else if (has_default_fparam_) {
848+
throw deepmd::deepmd_exception(
849+
"fparam is empty and default_fparam values are missing from the .pt2 "
850+
"metadata. Please regenerate the model or provide fparam explicitly.");
821851
} else {
822852
fparam_tensor = torch::zeros({0}, options).to(device);
823853
}
@@ -1052,6 +1082,17 @@ void DeepPotPTExpt::compute(ENERGYVTYPE& ener,
10521082
valuetype_options)
10531083
.to(torch::kFloat64)
10541084
.to(device);
1085+
} else if (has_default_fparam_ && !default_fparam_.empty()) {
1086+
fparam_tensor =
1087+
torch::from_blob(const_cast<double*>(default_fparam_.data()),
1088+
{1, static_cast<std::int64_t>(default_fparam_.size())},
1089+
options)
1090+
.clone()
1091+
.to(device);
1092+
} else if (has_default_fparam_) {
1093+
throw deepmd::deepmd_exception(
1094+
"fparam is empty and default_fparam values are missing from the .pt2 "
1095+
"metadata. Please regenerate the model or provide fparam explicitly.");
10551096
} else {
10561097
fparam_tensor = torch::zeros({0}, options).to(device);
10571098
}

source/api_cc/tests/test_deeppot_a_fparam_aparam_ptexpt.cc

Lines changed: 0 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -345,28 +345,3 @@ TYPED_TEST(TestInferDeepPotNoDefaultFParamPtExpt, no_default_fparam) {
345345
EXPECT_EQ(dp.dim_fparam(), 1);
346346
EXPECT_FALSE(dp.has_default_fparam());
347347
}
348-
349-
// Test that a .pt2 model with default_fparam reports true
350-
template <class VALUETYPE>
351-
class TestInferDeepPotDefaultFParamPtExpt : public ::testing::Test {
352-
protected:
353-
deepmd::DeepPot dp;
354-
355-
void SetUp() override {
356-
#ifndef BUILD_PYTORCH
357-
GTEST_SKIP() << "Skip because PyTorch support is not enabled.";
358-
#endif
359-
dp.init("../../tests/infer/fparam_aparam_default.pt2");
360-
};
361-
362-
void TearDown() override {};
363-
};
364-
365-
TYPED_TEST_SUITE(TestInferDeepPotDefaultFParamPtExpt, ValueTypes);
366-
367-
TYPED_TEST(TestInferDeepPotDefaultFParamPtExpt, has_default_fparam) {
368-
using VALUETYPE = TypeParam;
369-
deepmd::DeepPot& dp = this->dp;
370-
EXPECT_EQ(dp.dim_fparam(), 1);
371-
EXPECT_TRUE(dp.has_default_fparam());
372-
}
Lines changed: 220 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,220 @@
1+
// SPDX-License-Identifier: LGPL-3.0-or-later
2+
// Test C++ inference for pt_expt (.pt2) backend with default_fparam.
3+
// Uses fparam_aparam_default.pt2: has_default_fparam=true,
4+
// default=[0.25852028].
5+
#include <gtest/gtest.h>
6+
7+
#include <algorithm>
8+
#include <cmath>
9+
#include <fstream>
10+
#include <vector>
11+
12+
#include "DeepPot.h"
13+
#include "neighbor_list.h"
14+
#include "test_utils.h"
15+
16+
// 1e-10 cannot pass; unclear bug or not
17+
#undef EPSILON
18+
#define EPSILON (std::is_same<VALUETYPE, double>::value ? 1e-7 : 1e-4)
19+
20+
template <class VALUETYPE>
21+
class TestInferDeepPotDefaultFParamPtExpt : public ::testing::Test {
22+
protected:
23+
std::vector<VALUETYPE> coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74,
24+
00.25, 3.32, 1.68, 3.36, 3.00, 1.81,
25+
3.51, 2.51, 2.60, 4.27, 3.22, 1.56};
26+
std::vector<int> atype = {0, 0, 0, 0, 0, 0};
27+
std::vector<VALUETYPE> box = {13., 0., 0., 0., 13., 0., 0., 0., 13.};
28+
// aparam is still provided explicitly
29+
std::vector<VALUETYPE> aparam = {0.25852028, 0.25852028, 0.25852028,
30+
0.25852028, 0.25852028, 0.25852028};
31+
// explicit fparam for backward compat test
32+
std::vector<VALUETYPE> fparam = {0.25852028};
33+
// expected values computed with default fparam
34+
std::vector<VALUETYPE> expected_e = {
35+
1.596836265688982293e-01, 1.596933624175455035e-01,
36+
1.596859462832928844e-01, 1.596779837732069107e-01,
37+
1.596776702807257142e-01, 1.596869048501883825e-01};
38+
std::vector<VALUETYPE> expected_f = {
39+
1.112134318320098417e-05, 1.085230789880272913e-04,
40+
9.298442641358670786e-07, 1.491517597320257801e-04,
41+
-1.250419527572718750e-05, -9.768265174690742383e-05,
42+
-5.021052645725076108e-05, -9.741678916887853762e-05,
43+
9.375317764392499637e-05, 8.664103999852429459e-05,
44+
-4.538513400016661465e-05, 8.561605116672728300e-05,
45+
-2.454811055475983474e-05, 1.079491454988312375e-04,
46+
-1.656974003674590982e-04, -1.721555059017404292e-04,
47+
-6.116610604208619416e-05, 8.308097903957838235e-05};
48+
std::vector<VALUETYPE> expected_v = {
49+
-1.264062189119718516e-04, -1.636544077308298682e-05,
50+
4.453224130911559301e-05, -7.947403699518174416e-06,
51+
-4.603504987332694676e-05, 9.491045850088937973e-06,
52+
4.131028921467351392e-05, 9.691472468201808941e-06,
53+
-3.323572704427467454e-05, -1.024556912293136224e-04,
54+
5.530809120954762511e-06, 5.211030391191995666e-05,
55+
-3.851138686809712045e-06, 2.101414374152978974e-07,
56+
3.247573516972806439e-06, 4.561253716254927361e-05,
57+
-3.865680092083368681e-06, -3.262252150841838630e-05,
58+
-1.166788692566848309e-04, -1.814499890570951256e-05,
59+
2.155064011880963138e-05, -1.629918981392338952e-05,
60+
-3.245631268444034052e-05, 2.968538417601219450e-05,
61+
2.463149007223181425e-05, 3.660689861518750502e-05,
62+
-3.586518711234942813e-05, -1.424206401855391917e-04,
63+
-1.017840928263479808e-05, 1.421307534994556974e-05,
64+
-8.618294024757196269e-06, -2.192409332705388475e-05,
65+
3.461715847634955364e-05, 1.277625693457723244e-05,
66+
3.486479415793142304e-05, -5.604161168847289151e-05,
67+
-8.612844407008964597e-05, 2.508361660152530129e-06,
68+
-1.633895954533155651e-07, 1.903591783622965016e-06,
69+
-3.028341203071198989e-05, 4.685511271783774690e-05,
70+
2.876824509984395433e-06, 4.576515617130287920e-05,
71+
-7.108738780331674085e-05, -1.062354815105980244e-04,
72+
-2.954644717832236758e-05, 4.075640001084843372e-05,
73+
-3.138369091725702186e-05, -1.316088004849702041e-05,
74+
1.786389692843502177e-05, 4.579187321116953935e-05,
75+
1.869753034515599593e-05, -2.550749273395904029e-05};
76+
int natoms;
77+
double expected_tot_e;
78+
std::vector<VALUETYPE> expected_tot_v;
79+
80+
deepmd::DeepPot dp;
81+
82+
void SetUp() override {
83+
#ifndef BUILD_PYTORCH
84+
GTEST_SKIP() << "Skip because PyTorch support is not enabled.";
85+
#endif
86+
dp.init("../../tests/infer/fparam_aparam_default.pt2");
87+
88+
natoms = expected_e.size();
89+
EXPECT_EQ(natoms * 3, expected_f.size());
90+
EXPECT_EQ(natoms * 9, expected_v.size());
91+
expected_tot_e = 0.;
92+
expected_tot_v.resize(9);
93+
std::fill(expected_tot_v.begin(), expected_tot_v.end(), 0.);
94+
for (int ii = 0; ii < natoms; ++ii) {
95+
expected_tot_e += expected_e[ii];
96+
}
97+
for (int ii = 0; ii < natoms; ++ii) {
98+
for (int dd = 0; dd < 9; ++dd) {
99+
expected_tot_v[dd] += expected_v[ii * 9 + dd];
100+
}
101+
}
102+
};
103+
104+
void TearDown() override {};
105+
};
106+
107+
TYPED_TEST_SUITE(TestInferDeepPotDefaultFParamPtExpt, ValueTypes);
108+
109+
TYPED_TEST(TestInferDeepPotDefaultFParamPtExpt, attrs) {
110+
using VALUETYPE = TypeParam;
111+
deepmd::DeepPot& dp = this->dp;
112+
EXPECT_EQ(dp.dim_fparam(), 1);
113+
EXPECT_EQ(dp.dim_aparam(), 1);
114+
EXPECT_TRUE(dp.has_default_fparam());
115+
}
116+
117+
TYPED_TEST(TestInferDeepPotDefaultFParamPtExpt, cpu_build_nlist_empty_fparam) {
118+
using VALUETYPE = TypeParam;
119+
std::vector<VALUETYPE>& coord = this->coord;
120+
std::vector<int>& atype = this->atype;
121+
std::vector<VALUETYPE>& box = this->box;
122+
std::vector<VALUETYPE>& aparam = this->aparam;
123+
std::vector<VALUETYPE>& expected_e = this->expected_e;
124+
std::vector<VALUETYPE>& expected_f = this->expected_f;
125+
int& natoms = this->natoms;
126+
double& expected_tot_e = this->expected_tot_e;
127+
std::vector<VALUETYPE>& expected_tot_v = this->expected_tot_v;
128+
deepmd::DeepPot& dp = this->dp;
129+
double ener;
130+
std::vector<VALUETYPE> force, virial;
131+
// Empty fparam — model should use default
132+
std::vector<VALUETYPE> empty_fparam;
133+
dp.compute(ener, force, virial, coord, atype, box, empty_fparam, aparam);
134+
135+
EXPECT_EQ(force.size(), natoms * 3);
136+
EXPECT_EQ(virial.size(), 9);
137+
138+
EXPECT_LT(fabs(ener - expected_tot_e), EPSILON);
139+
for (int ii = 0; ii < natoms * 3; ++ii) {
140+
EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON);
141+
}
142+
for (int ii = 0; ii < 3 * 3; ++ii) {
143+
EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON);
144+
}
145+
}
146+
147+
TYPED_TEST(TestInferDeepPotDefaultFParamPtExpt,
148+
cpu_build_nlist_explicit_fparam) {
149+
using VALUETYPE = TypeParam;
150+
std::vector<VALUETYPE>& coord = this->coord;
151+
std::vector<int>& atype = this->atype;
152+
std::vector<VALUETYPE>& box = this->box;
153+
std::vector<VALUETYPE>& fparam = this->fparam;
154+
std::vector<VALUETYPE>& aparam = this->aparam;
155+
std::vector<VALUETYPE>& expected_e = this->expected_e;
156+
std::vector<VALUETYPE>& expected_f = this->expected_f;
157+
int& natoms = this->natoms;
158+
double& expected_tot_e = this->expected_tot_e;
159+
std::vector<VALUETYPE>& expected_tot_v = this->expected_tot_v;
160+
deepmd::DeepPot& dp = this->dp;
161+
double ener;
162+
std::vector<VALUETYPE> force, virial;
163+
// Explicit fparam — backward compat
164+
dp.compute(ener, force, virial, coord, atype, box, fparam, aparam);
165+
166+
EXPECT_EQ(force.size(), natoms * 3);
167+
EXPECT_EQ(virial.size(), 9);
168+
169+
EXPECT_LT(fabs(ener - expected_tot_e), EPSILON);
170+
for (int ii = 0; ii < natoms * 3; ++ii) {
171+
EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON);
172+
}
173+
for (int ii = 0; ii < 3 * 3; ++ii) {
174+
EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON);
175+
}
176+
}
177+
178+
TYPED_TEST(TestInferDeepPotDefaultFParamPtExpt, cpu_lmp_nlist_empty_fparam) {
179+
using VALUETYPE = TypeParam;
180+
std::vector<VALUETYPE>& coord = this->coord;
181+
std::vector<int>& atype = this->atype;
182+
std::vector<VALUETYPE>& box = this->box;
183+
std::vector<VALUETYPE>& aparam = this->aparam;
184+
std::vector<VALUETYPE>& expected_f = this->expected_f;
185+
int& natoms = this->natoms;
186+
double& expected_tot_e = this->expected_tot_e;
187+
std::vector<VALUETYPE>& expected_tot_v = this->expected_tot_v;
188+
deepmd::DeepPot& dp = this->dp;
189+
float rc = dp.cutoff();
190+
int nloc = coord.size() / 3;
191+
std::vector<VALUETYPE> coord_cpy;
192+
std::vector<int> atype_cpy, mapping;
193+
std::vector<std::vector<int> > nlist_data;
194+
_build_nlist<VALUETYPE>(nlist_data, coord_cpy, atype_cpy, mapping, coord,
195+
atype, box, rc);
196+
int nall = coord_cpy.size() / 3;
197+
std::vector<int> ilist(nloc), numneigh(nloc);
198+
std::vector<int*> firstneigh(nloc);
199+
deepmd::InputNlist inlist(nloc, &ilist[0], &numneigh[0], &firstneigh[0]);
200+
convert_nlist(inlist, nlist_data);
201+
202+
double ener;
203+
std::vector<VALUETYPE> force_, virial;
204+
std::vector<VALUETYPE> empty_fparam;
205+
dp.compute(ener, force_, virial, coord_cpy, atype_cpy, box, nall - nloc,
206+
inlist, 0, empty_fparam, aparam);
207+
std::vector<VALUETYPE> force;
208+
_fold_back<VALUETYPE>(force, force_, mapping, nloc, nall, 3);
209+
210+
EXPECT_EQ(force.size(), natoms * 3);
211+
EXPECT_EQ(virial.size(), 9);
212+
213+
EXPECT_LT(fabs(ener - expected_tot_e), EPSILON);
214+
for (int ii = 0; ii < natoms * 3; ++ii) {
215+
EXPECT_LT(fabs(force[ii] - expected_f[ii]), EPSILON);
216+
}
217+
for (int ii = 0; ii < 3 * 3; ++ii) {
218+
EXPECT_LT(fabs(virial[ii] - expected_tot_v[ii]), EPSILON);
219+
}
220+
}

source/tests/infer/gen_fparam_aparam.py

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -121,6 +121,10 @@ def main():
121121
print(f"Exporting to {pt2_default_path} ...") # noqa: T201
122122
pt_expt_deserialize_to_file(pt2_default_path, copy.deepcopy(data_default))
123123

124+
# NOTE: fparam_aparam_default.pth is committed to git (generated by PR #5311),
125+
# NOT regenerated here — it has its own expected values in
126+
# test_deeppot_default_fparam_pt.cc.
127+
124128
# ---- 4. Run inference via DeepPot to get reference values ----
125129
from deepmd.infer import (
126130
DeepPot,
@@ -193,6 +197,43 @@ def main():
193197
print(f" {vv:.18e}{comma}") # noqa: T201
194198
print(" };") # noqa: T201
195199

200+
# ---- 4b. Reference values for default_fparam model (.pt2) ----
201+
dp_default = DeepPot(pt2_default_path)
202+
e_d, f_d, _v_d, ae_d, av_d = dp_default.eval(
203+
coord,
204+
box,
205+
atype,
206+
fparam=fparam_val,
207+
aparam=aparam_val,
208+
atomic=True,
209+
)
210+
atom_energy_d = ae_d[0, :, 0]
211+
force_d = f_d[0]
212+
atom_virial_d = av_d[0]
213+
214+
print("\n// ---- Reference values for C++ default_fparam .pt2 test ----") # noqa: T201
215+
print(f"// Total energy: {e_d[0, 0]:.18e}") # noqa: T201
216+
print() # noqa: T201
217+
print(" std::vector<VALUETYPE> expected_e = {") # noqa: T201
218+
for ii, ev in enumerate(atom_energy_d):
219+
comma = "," if ii < len(atom_energy_d) - 1 else ""
220+
print(f" {ev:.18e}{comma}") # noqa: T201
221+
print(" };") # noqa: T201
222+
223+
print(" std::vector<VALUETYPE> expected_f = {") # noqa: T201
224+
force_flat_d = force_d.flatten()
225+
for ii, fv in enumerate(force_flat_d):
226+
comma = "," if ii < len(force_flat_d) - 1 else ""
227+
print(f" {fv:.18e}{comma}") # noqa: T201
228+
print(" };") # noqa: T201
229+
230+
print(" std::vector<VALUETYPE> expected_v = {") # noqa: T201
231+
virial_flat_d = atom_virial_d.flatten()
232+
for ii, vv in enumerate(virial_flat_d):
233+
comma = "," if ii < len(virial_flat_d) - 1 else ""
234+
print(f" {vv:.18e}{comma}") # noqa: T201
235+
print(" };") # noqa: T201
236+
196237
# ---- 5. Verify .pth gives same results ----
197238
if pth_exported:
198239
dp_pth = DeepPot(pth_path)

0 commit comments

Comments
 (0)