Skip to content

Commit 4b7d82d

Browse files
Copilotnjzjz
andcommitted
fix(finetune): avoid unnecessary warnings for default parameter values during config comparison
Co-authored-by: njzjz <9496702+njzjz@users.noreply.github.com>
1 parent 54fa343 commit 4b7d82d

4 files changed

Lines changed: 156 additions & 0 deletions

File tree

deepmd/pd/train/training.py

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,9 @@
7272
nvprof_context,
7373
to_numpy_array,
7474
)
75+
from deepmd.utils.argcheck import (
76+
normalize,
77+
)
7578
from deepmd.utils.data import (
7679
DataRequirementItem,
7780
)
@@ -104,6 +107,42 @@ def _warn_configuration_mismatch_during_finetune(
104107
model_branch : str
105108
Model branch name for logging context
106109
"""
110+
# Normalize both configurations to ensure consistent comparison
111+
# This avoids warnings for parameters that only differ due to default values
112+
try:
113+
# Create minimal configs for normalization with required fields
114+
base_config = {
115+
"model": {
116+
"fitting_net": {"neuron": [240, 240, 240]},
117+
"type_map": ["H", "O"],
118+
},
119+
"training": {"training_data": {"systems": ["fake"]}, "numb_steps": 100},
120+
}
121+
122+
input_config = base_config.copy()
123+
input_config["model"]["descriptor"] = input_descriptor.copy()
124+
125+
pretrained_config = base_config.copy()
126+
pretrained_config["model"]["descriptor"] = pretrained_descriptor.copy()
127+
128+
# Normalize both configurations
129+
normalized_input = normalize(input_config, multi_task=False)["model"][
130+
"descriptor"
131+
]
132+
normalized_pretrained = normalize(pretrained_config, multi_task=False)["model"][
133+
"descriptor"
134+
]
135+
136+
if normalized_input == normalized_pretrained:
137+
return
138+
139+
# Use normalized configs for comparison to show only meaningful differences
140+
input_descriptor = normalized_input
141+
pretrained_descriptor = normalized_pretrained
142+
except Exception:
143+
# If normalization fails, fall back to original comparison
144+
pass
145+
107146
if input_descriptor == pretrained_descriptor:
108147
return
109148

deepmd/pd/utils/finetune.py

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,9 @@
66

77
import paddle
88

9+
from deepmd.utils.argcheck import (
10+
normalize,
11+
)
912
from deepmd.utils.finetune import (
1013
FinetuneRuleItem,
1114
)
@@ -30,6 +33,42 @@ def _warn_descriptor_config_differences(
3033
model_branch : str
3134
Model branch name for logging context
3235
"""
36+
# Normalize both configurations to ensure consistent comparison
37+
# This avoids warnings for parameters that only differ due to default values
38+
try:
39+
# Create minimal configs for normalization with required fields
40+
base_config = {
41+
"model": {
42+
"fitting_net": {"neuron": [240, 240, 240]},
43+
"type_map": ["H", "O"],
44+
},
45+
"training": {"training_data": {"systems": ["fake"]}, "numb_steps": 100},
46+
}
47+
48+
input_config = base_config.copy()
49+
input_config["model"]["descriptor"] = input_descriptor.copy()
50+
51+
pretrained_config = base_config.copy()
52+
pretrained_config["model"]["descriptor"] = pretrained_descriptor.copy()
53+
54+
# Normalize both configurations
55+
normalized_input = normalize(input_config, multi_task=False)["model"][
56+
"descriptor"
57+
]
58+
normalized_pretrained = normalize(pretrained_config, multi_task=False)["model"][
59+
"descriptor"
60+
]
61+
62+
if normalized_input == normalized_pretrained:
63+
return
64+
65+
# Use normalized configs for comparison to show only meaningful differences
66+
input_descriptor = normalized_input
67+
pretrained_descriptor = normalized_pretrained
68+
except Exception:
69+
# If normalization fails, fall back to original comparison
70+
pass
71+
3372
if input_descriptor == pretrained_descriptor:
3473
return
3574

deepmd/pt/train/training.py

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,9 @@
8181
DataLoader,
8282
)
8383

84+
from deepmd.utils.argcheck import (
85+
normalize,
86+
)
8487
from deepmd.utils.path import (
8588
DPH5Path,
8689
)
@@ -110,6 +113,42 @@ def _warn_configuration_mismatch_during_finetune(
110113
model_branch : str
111114
Model branch name for logging context
112115
"""
116+
# Normalize both configurations to ensure consistent comparison
117+
# This avoids warnings for parameters that only differ due to default values
118+
try:
119+
# Create minimal configs for normalization with required fields
120+
base_config = {
121+
"model": {
122+
"fitting_net": {"neuron": [240, 240, 240]},
123+
"type_map": ["H", "O"],
124+
},
125+
"training": {"training_data": {"systems": ["fake"]}, "numb_steps": 100},
126+
}
127+
128+
input_config = base_config.copy()
129+
input_config["model"]["descriptor"] = input_descriptor.copy()
130+
131+
pretrained_config = base_config.copy()
132+
pretrained_config["model"]["descriptor"] = pretrained_descriptor.copy()
133+
134+
# Normalize both configurations
135+
normalized_input = normalize(input_config, multi_task=False)["model"][
136+
"descriptor"
137+
]
138+
normalized_pretrained = normalize(pretrained_config, multi_task=False)["model"][
139+
"descriptor"
140+
]
141+
142+
if normalized_input == normalized_pretrained:
143+
return
144+
145+
# Use normalized configs for comparison to show only meaningful differences
146+
input_descriptor = normalized_input
147+
pretrained_descriptor = normalized_pretrained
148+
except Exception:
149+
# If normalization fails, fall back to original comparison
150+
pass
151+
113152
if input_descriptor == pretrained_descriptor:
114153
return
115154

deepmd/pt/utils/finetune.py

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,9 @@
99
from deepmd.pt.utils import (
1010
env,
1111
)
12+
from deepmd.utils.argcheck import (
13+
normalize,
14+
)
1215
from deepmd.utils.finetune import (
1316
FinetuneRuleItem,
1417
)
@@ -33,6 +36,42 @@ def _warn_descriptor_config_differences(
3336
model_branch : str
3437
Model branch name for logging context
3538
"""
39+
# Normalize both configurations to ensure consistent comparison
40+
# This avoids warnings for parameters that only differ due to default values
41+
try:
42+
# Create minimal configs for normalization with required fields
43+
base_config = {
44+
"model": {
45+
"fitting_net": {"neuron": [240, 240, 240]},
46+
"type_map": ["H", "O"],
47+
},
48+
"training": {"training_data": {"systems": ["fake"]}, "numb_steps": 100},
49+
}
50+
51+
input_config = base_config.copy()
52+
input_config["model"]["descriptor"] = input_descriptor.copy()
53+
54+
pretrained_config = base_config.copy()
55+
pretrained_config["model"]["descriptor"] = pretrained_descriptor.copy()
56+
57+
# Normalize both configurations
58+
normalized_input = normalize(input_config, multi_task=False)["model"][
59+
"descriptor"
60+
]
61+
normalized_pretrained = normalize(pretrained_config, multi_task=False)["model"][
62+
"descriptor"
63+
]
64+
65+
if normalized_input == normalized_pretrained:
66+
return
67+
68+
# Use normalized configs for comparison to show only meaningful differences
69+
input_descriptor = normalized_input
70+
pretrained_descriptor = normalized_pretrained
71+
except Exception:
72+
# If normalization fails, fall back to original comparison
73+
pass
74+
3675
if input_descriptor == pretrained_descriptor:
3776
return
3877

0 commit comments

Comments
 (0)