diff --git a/deepmd/dpmodel/loss/ener.py b/deepmd/dpmodel/loss/ener.py index a237138dee..7ea35d5596 100644 --- a/deepmd/dpmodel/loss/ener.py +++ b/deepmd/dpmodel/loss/ener.py @@ -90,6 +90,14 @@ class EnergyLoss(Loss): If true, use L2 norm of force vectors for loss calculation when loss_func='mae' or use_huber is True. Instead of computing loss on force components, computes loss on ||F_pred - F_label||_2. This treats the force vector as a whole rather than three independent components. + intensive_ener_virial : bool + If true, the non-Huber MSE energy and virial losses use intensive normalization, + i.e. a 1/N^2 factor instead of the legacy 1/N scaling. This matches per-atom + RMSE-style normalization for those terms. MAE and Huber modes use different + scaling and are not affected in the same way by this flag. + If false (default), the legacy normalization is used for the affected terms. + The default is false for backward compatibility with models trained using + deepmd-kit <= 3.1.3. **kwargs Other keyword arguments. """ @@ -116,6 +124,7 @@ def __init__( huber_delta: float | list[float] = 0.01, loss_func: str = "mse", f_use_norm: bool = False, + intensive_ener_virial: bool = False, **kwargs: Any, ) -> None: # Validate loss_func @@ -155,6 +164,7 @@ def __init__( self.use_huber = use_huber self.huber_delta = huber_delta self.f_use_norm = f_use_norm + self.intensive_ener_virial = intensive_ener_virial if self.f_use_norm and not (self.use_huber or self.loss_func == "mae"): raise RuntimeError( "f_use_norm can only be True when use_huber or loss_func='mae'." @@ -256,11 +266,15 @@ def call( loss = 0 more_loss = {} + # Normalization exponent controls loss scaling with system size: + # - norm_exp=2 (intensive_ener_virial=True): loss uses 1/N² scaling, making it independent of system size + # - norm_exp=1 (intensive_ener_virial=False, legacy): loss uses 1/N scaling, which varies with system size + norm_exp = 2 if self.intensive_ener_virial else 1 if self.has_e: if self.loss_func == "mse": l2_ener_loss = xp.mean(xp.square(energy - energy_hat)) if not self.use_huber: - loss += atom_norm_ener * (pref_e * l2_ener_loss) + loss += atom_norm_ener**norm_exp * (pref_e * l2_ener_loss) else: l_huber_loss = custom_huber_loss( atom_norm_ener * energy, @@ -335,7 +349,7 @@ def call( xp.square(virial_hat_reshape - virial_reshape), ) if not self.use_huber: - loss += atom_norm * (pref_v * l2_virial_loss) + loss += atom_norm**norm_exp * (pref_v * l2_virial_loss) else: l_huber_loss = custom_huber_loss( atom_norm * virial_reshape, @@ -525,7 +539,7 @@ def serialize(self) -> dict: """ return { "@class": "EnergyLoss", - "@version": 2, + "@version": 3, "starter_learning_rate": self.starter_learning_rate, "start_pref_e": self.start_pref_e, "limit_pref_e": self.limit_pref_e, @@ -546,6 +560,7 @@ def serialize(self) -> dict: "huber_delta": self.huber_delta, "loss_func": self.loss_func, "f_use_norm": self.f_use_norm, + "intensive_ener_virial": self.intensive_ener_virial, } @classmethod @@ -563,6 +578,10 @@ def deserialize(cls, data: dict) -> "Loss": The deserialized loss module """ data = data.copy() - check_version_compatibility(data.pop("@version"), 2, 1) + version = data.pop("@version") + check_version_compatibility(version, 3, 1) data.pop("@class") + # Backward compatibility: version 1-2 used legacy normalization + if version < 3: + data.setdefault("intensive_ener_virial", False) return cls(**data) diff --git a/deepmd/dpmodel/loss/ener_spin.py b/deepmd/dpmodel/loss/ener_spin.py index a13d626764..6262015c84 100644 --- a/deepmd/dpmodel/loss/ener_spin.py +++ b/deepmd/dpmodel/loss/ener_spin.py @@ -50,6 +50,14 @@ class EnergySpinLoss(Loss): if true, the energy will be computed as \sum_i c_i E_i loss_func : str Loss function type: 'mse' or 'mae'. + intensive_ener_virial : bool + If true, the MSE energy and virial terms use intensive normalization, + i.e. an additional normalization by the square of the number of atoms + (1/N^2) instead of the legacy (1/N) behavior. This keeps those MSE loss + terms consistent with per-atom RMSE reporting and less dependent on + system size. This option does not change the MAE formulation, which is + handled separately. The default is false for backward compatibility with + models trained using deepmd-kit <= 3.1.3. **kwargs Other keyword arguments. """ @@ -69,6 +77,7 @@ def __init__( limit_pref_ae: float = 0.0, enable_atom_ener_coeff: bool = False, loss_func: str = "mse", + intensive_ener_virial: bool = False, **kwargs: Any, ) -> None: valid_loss_funcs = ["mse", "mae"] @@ -89,6 +98,7 @@ def __init__( self.start_pref_ae = start_pref_ae self.limit_pref_ae = limit_pref_ae self.enable_atom_ener_coeff = enable_atom_ener_coeff + self.intensive_ener_virial = intensive_ener_virial self.has_e = self.start_pref_e != 0.0 or self.limit_pref_e != 0.0 self.has_fr = self.start_pref_fr != 0.0 or self.limit_pref_fr != 0.0 self.has_fm = self.start_pref_fm != 0.0 or self.limit_pref_fm != 0.0 @@ -117,6 +127,10 @@ def call( loss = 0 more_loss = {} atom_norm = 1.0 / natoms + # Normalization exponent controls loss scaling with system size: + # - norm_exp=2 (intensive_ener_virial=True): loss uses 1/N² scaling, making it independent of system size + # - norm_exp=1 (intensive_ener_virial=False, legacy): loss uses 1/N scaling, which varies with system size + norm_exp = 2 if self.intensive_ener_virial else 1 if self.has_e: energy_pred = model_dict["energy"] @@ -130,7 +144,7 @@ def call( energy_pred = xp.sum(atom_ener_coeff * atom_ener_pred, axis=1) if self.loss_func == "mse": l2_ener_loss = xp.mean(xp.square(energy_pred - energy_label)) - loss += atom_norm * (pref_e * l2_ener_loss) + loss += atom_norm**norm_exp * (pref_e * l2_ener_loss) more_loss["rmse_e"] = self.display_if_exist( xp.sqrt(l2_ener_loss) * atom_norm, find_energy ) @@ -238,7 +252,7 @@ def call( diff_v = virial_label - virial_pred if self.loss_func == "mse": l2_virial_loss = xp.mean(xp.square(diff_v)) - loss += atom_norm * (pref_v * l2_virial_loss) + loss += atom_norm**norm_exp * (pref_v * l2_virial_loss) more_loss["rmse_v"] = self.display_if_exist( xp.sqrt(l2_virial_loss) * atom_norm, find_virial ) @@ -326,7 +340,7 @@ def serialize(self) -> dict: """Serialize the loss module.""" return { "@class": "EnergySpinLoss", - "@version": 1, + "@version": 2, "starter_learning_rate": self.starter_learning_rate, "start_pref_e": self.start_pref_e, "limit_pref_e": self.limit_pref_e, @@ -340,12 +354,17 @@ def serialize(self) -> dict: "limit_pref_ae": self.limit_pref_ae, "enable_atom_ener_coeff": self.enable_atom_ener_coeff, "loss_func": self.loss_func, + "intensive_ener_virial": self.intensive_ener_virial, } @classmethod def deserialize(cls, data: dict) -> "EnergySpinLoss": """Deserialize the loss module.""" data = data.copy() - check_version_compatibility(data.pop("@version"), 1, 1) + version = data.pop("@version") + check_version_compatibility(version, 2, 1) data.pop("@class") + # Backward compatibility: version 1 used legacy normalization + if version < 2: + data.setdefault("intensive_ener_virial", False) return cls(**data) diff --git a/deepmd/pd/loss/ener.py b/deepmd/pd/loss/ener.py index 71ca164b7d..d4ea5ce587 100644 --- a/deepmd/pd/loss/ener.py +++ b/deepmd/pd/loss/ener.py @@ -61,6 +61,7 @@ def __init__( use_huber: bool = False, huber_delta: float | list[float] = 0.01, f_use_norm: bool = False, + intensive_ener_virial: bool = False, **kwargs: Any, ) -> None: r"""Construct a layer to compute loss on energy, force and virial. @@ -119,6 +120,13 @@ def __init__( f_use_norm : bool If True, use L2 norm of force vectors for loss calculation. Not implemented in PD backend, only for serialization compatibility. + intensive_ener_virial : bool + Controls size normalization for energy and virial loss terms. For the non-Huber + MSE path, setting this to true applies 1/N^2 scaling, while false uses the legacy + 1/N scaling. For MAE, the normalization remains 1/N. For Huber loss, residuals are + first normalized by 1/N before applying the Huber formula, so this option does not + provide a pure 1/N versus 1/N^2 toggle in that path. The default is false for + backward compatibility with models trained using deepmd-kit <= 3.1.3. **kwargs Other keyword arguments. """ @@ -161,6 +169,7 @@ def __init__( self.inference = inference self.use_huber = use_huber self.huber_delta = huber_delta + self.intensive_ener_virial = intensive_ener_virial ( self._huber_delta_energy, self._huber_delta_force, @@ -218,6 +227,10 @@ def forward( # more_loss['log_keys'] = [] # showed when validation on the fly # more_loss['test_keys'] = [] # showed when doing dp test atom_norm = 1.0 / natoms + # Normalization exponent controls loss scaling with system size: + # - norm_exp=2 (intensive_ener_virial=True): loss uses 1/N² scaling, making it independent of system size + # - norm_exp=1 (intensive_ener_virial=False, legacy): loss uses 1/N scaling, which varies with system size + norm_exp = 2 if self.intensive_ener_virial else 1 if self.has_e and "energy" in model_pred and "energy" in label: energy_pred = model_pred["energy"] energy_label = label["energy"] @@ -243,7 +256,7 @@ def forward( l2_ener_loss.detach(), find_energy ) if not self.use_huber: - loss += atom_norm * (pref_e * l2_ener_loss) + loss += atom_norm**norm_exp * (pref_e * l2_ener_loss) else: l_huber_loss = custom_huber_loss( atom_norm * energy_pred, @@ -414,7 +427,7 @@ def forward( l2_virial_loss.detach(), find_virial ) if not self.use_huber: - loss += atom_norm * (pref_v * l2_virial_loss) + loss += atom_norm**norm_exp * (pref_v * l2_virial_loss) else: l_huber_loss = custom_huber_loss( atom_norm * model_pred["virial"].reshape([-1]), @@ -564,7 +577,7 @@ def serialize(self) -> dict: """ return { "@class": "EnergyLoss", - "@version": 2, + "@version": 3, "starter_learning_rate": self.starter_learning_rate, "start_pref_e": self.start_pref_e, "limit_pref_e": self.limit_pref_e, @@ -585,6 +598,7 @@ def serialize(self) -> dict: "huber_delta": self.huber_delta, "loss_func": self.loss_func, "f_use_norm": self.f_use_norm, + "intensive_ener_virial": self.intensive_ener_virial, } @classmethod @@ -602,8 +616,12 @@ def deserialize(cls, data: dict) -> "TaskLoss": The deserialized loss module """ data = data.copy() - check_version_compatibility(data.pop("@version"), 2, 1) + version = data.pop("@version") + check_version_compatibility(version, 3, 1) data.pop("@class") + # Handle backward compatibility for older versions without intensive_ener_virial + if version < 3: + data.setdefault("intensive_ener_virial", False) return cls(**data) diff --git a/deepmd/pt/loss/ener.py b/deepmd/pt/loss/ener.py index f793cefc96..f1dffee218 100644 --- a/deepmd/pt/loss/ener.py +++ b/deepmd/pt/loss/ener.py @@ -61,6 +61,7 @@ def __init__( use_huber: bool = False, f_use_norm: bool = False, huber_delta: float | list[float] = 0.01, + intensive_ener_virial: bool = False, **kwargs: Any, ) -> None: r"""Construct a layer to compute loss on energy, force and virial. @@ -120,6 +121,13 @@ def __init__( The threshold delta (D) used for Huber loss, controlling transition between L2 and L1 loss. It can be either one float shared by all terms or a list of three values ordered as [energy, force, virial]. + intensive_ener_virial : bool + Controls size normalization for energy and virial loss terms. For the non-Huber + MSE path, setting this to true applies 1/N^2 scaling, while false uses the legacy + 1/N scaling. For MAE, the normalization remains 1/N. For Huber loss, residuals are + first normalized by 1/N before applying the Huber formula, so this option does not + provide a pure 1/N versus 1/N^2 toggle in that path. The default is false for + backward compatibility with models trained using deepmd-kit <= 3.1.3. **kwargs Other keyword arguments. """ @@ -163,6 +171,7 @@ def __init__( self.inference = inference self.use_huber = use_huber self.f_use_norm = f_use_norm + self.intensive_ener_virial = intensive_ener_virial if self.f_use_norm and not (self.use_huber or self.loss_func == "mae"): raise RuntimeError( "f_use_norm can only be True when use_huber or loss_func='mae'." @@ -225,6 +234,10 @@ def forward( # more_loss['log_keys'] = [] # showed when validation on the fly # more_loss['test_keys'] = [] # showed when doing dp test atom_norm = 1.0 / natoms + # Normalization exponent controls loss scaling with system size: + # - norm_exp=2 (intensive_ener_virial=True): loss uses 1/N² scaling, making it independent of system size + # - norm_exp=1 (intensive_ener_virial=False, legacy): loss uses 1/N scaling, which varies with system size + norm_exp = 2 if self.intensive_ener_virial else 1 if self.has_e and "energy" in model_pred and "energy" in label: energy_pred = model_pred["energy"] energy_label = label["energy"] @@ -250,7 +263,7 @@ def forward( l2_ener_loss.detach(), find_energy ) if not self.use_huber: - loss += atom_norm * (pref_e * l2_ener_loss) + loss += atom_norm**norm_exp * (pref_e * l2_ener_loss) else: l_huber_loss = custom_huber_loss( atom_norm * energy_pred, @@ -432,7 +445,7 @@ def forward( l2_virial_loss.detach(), find_virial ) if not self.use_huber: - loss += atom_norm * (pref_v * l2_virial_loss) + loss += atom_norm**norm_exp * (pref_v * l2_virial_loss) else: l_huber_loss = custom_huber_loss( atom_norm * model_pred["virial"].reshape(-1), @@ -599,7 +612,7 @@ def serialize(self) -> dict: """ return { "@class": "EnergyLoss", - "@version": 2, + "@version": 3, "starter_learning_rate": self.starter_learning_rate, "start_pref_e": self.start_pref_e, "limit_pref_e": self.limit_pref_e, @@ -620,6 +633,7 @@ def serialize(self) -> dict: "huber_delta": self.huber_delta, "loss_func": self.loss_func, "f_use_norm": self.f_use_norm, + "intensive_ener_virial": self.intensive_ener_virial, } @classmethod @@ -637,8 +651,12 @@ def deserialize(cls, data: dict) -> "TaskLoss": The deserialized loss module """ data = data.copy() - check_version_compatibility(data.pop("@version"), 2, 1) + version = data.pop("@version") + check_version_compatibility(version, 3, 1) data.pop("@class") + # Handle backward compatibility for older versions without intensive_ener_virial + if version < 3: + data.setdefault("intensive_ener_virial", False) return cls(**data) diff --git a/deepmd/pt/loss/ener_spin.py b/deepmd/pt/loss/ener_spin.py index df9885109d..59ad04a169 100644 --- a/deepmd/pt/loss/ener_spin.py +++ b/deepmd/pt/loss/ener_spin.py @@ -40,6 +40,7 @@ def __init__( enable_atom_ener_coeff: bool = False, loss_func: str = "mse", inference: bool = False, + intensive_ener_virial: bool = False, **kwargs: Any, ) -> None: r"""Construct a layer to compute loss on energy, real force, magnetic force and virial. @@ -76,6 +77,14 @@ def __init__( MAE loss is less sensitive to outliers compared to MSE loss. inference : bool If true, it will output all losses found in output, ignoring the pre-factors. + intensive_ener_virial : bool + Controls the normalization exponent used for the MSE energy and virial loss terms. + If true, those MSE terms use intensive normalization by the square of the number of + atoms (1/N^2), which is consistent with per-atom RMSE reporting. If false (default), + the legacy normalization (1/N) is used for those MSE terms. Note that this 1/N^2 + behavior does not apply to the MAE code paths: MAE energy/virial losses do not use + the `intensive_ener_virial` exponent in the same way. The default is false for backward + compatibility with models trained using deepmd-kit <= 3.1.3. **kwargs Other keyword arguments. """ @@ -101,6 +110,7 @@ def __init__( self.limit_pref_ae = limit_pref_ae self.enable_atom_ener_coeff = enable_atom_ener_coeff self.inference = inference + self.intensive_ener_virial = intensive_ener_virial def forward( self, @@ -145,6 +155,10 @@ def forward( # more_loss['log_keys'] = [] # showed when validation on the fly # more_loss['test_keys'] = [] # showed when doing dp test atom_norm = 1.0 / natoms + # Normalization exponent controls loss scaling with system size: + # - norm_exp=2 (intensive_ener_virial=True): loss uses 1/N² scaling, making it independent of system size + # - norm_exp=1 (intensive_ener_virial=False, legacy): loss uses 1/N scaling, which varies with system size + norm_exp = 2 if self.intensive_ener_virial else 1 if self.has_e and "energy" in model_pred and "energy" in label: energy_pred = model_pred["energy"] energy_label = label["energy"] @@ -169,7 +183,7 @@ def forward( more_loss["l2_ener_loss"] = self.display_if_exist( l2_ener_loss.detach(), find_energy ) - loss += atom_norm * (pref_e * l2_ener_loss) + loss += atom_norm**norm_exp * (pref_e * l2_ener_loss) rmse_e = l2_ener_loss.sqrt() * atom_norm more_loss["rmse_e"] = self.display_if_exist( rmse_e.detach(), find_energy @@ -324,7 +338,7 @@ def forward( more_loss["l2_virial_loss"] = self.display_if_exist( l2_virial_loss.detach(), find_virial ) - loss += atom_norm * (pref_v * l2_virial_loss) + loss += atom_norm**norm_exp * (pref_v * l2_virial_loss) rmse_v = l2_virial_loss.sqrt() * atom_norm more_loss["rmse_v"] = self.display_if_exist( rmse_v.detach(), find_virial @@ -413,7 +427,7 @@ def serialize(self) -> dict: """Serialize the loss module.""" return { "@class": "EnergySpinLoss", - "@version": 1, + "@version": 2, "starter_learning_rate": self.starter_learning_rate, "start_pref_e": self.start_pref_e, "limit_pref_e": self.limit_pref_e, @@ -427,12 +441,17 @@ def serialize(self) -> dict: "limit_pref_ae": self.limit_pref_ae, "enable_atom_ener_coeff": self.enable_atom_ener_coeff, "loss_func": self.loss_func, + "intensive_ener_virial": self.intensive_ener_virial, } @classmethod def deserialize(cls, data: dict) -> "EnergySpinLoss": """Deserialize the loss module.""" data = data.copy() - check_version_compatibility(data.pop("@version"), 1, 1) + version = data.pop("@version") + check_version_compatibility(version, 2, 1) data.pop("@class") + # Handle backward compatibility for older versions without intensive_ener_virial + if version < 2: + data.setdefault("intensive_ener_virial", False) return cls(**data) diff --git a/deepmd/tf/loss/ener.py b/deepmd/tf/loss/ener.py index b477a1005f..da7e5d4462 100644 --- a/deepmd/tf/loss/ener.py +++ b/deepmd/tf/loss/ener.py @@ -100,6 +100,14 @@ class EnerStdLoss(Loss): f_use_norm : bool If True, use L2 norm of force vectors for loss calculation. Not implemented in TF backend, only for serialization compatibility. + intensive_ener_virial : bool + Controls the normalization used for energy and virial terms in the non-Huber + MSE branch of this TF loss. If true, that branch uses intensive normalization + by the square of the number of atoms (1/N^2); if false (default), it uses the + legacy normalization (1/N). When ``use_huber=True``, the residual is still + normalized by 1/N before applying the Huber loss, so ``intensive_ener_virial`` may not + change behavior in that path. The default is false for backward compatibility + with models trained using deepmd-kit <= 3.1.3. **kwargs Other keyword arguments. """ @@ -126,6 +134,7 @@ def __init__( huber_delta: float | list[float] = 0.01, loss_func: str = "mse", f_use_norm: bool = False, + intensive_ener_virial: bool = False, **kwargs: Any, ) -> None: if loss_func != "mse": @@ -167,6 +176,7 @@ def __init__( ) self.use_huber = use_huber self.huber_delta = huber_delta + self.intensive_ener_virial = intensive_ener_virial ( self._huber_delta_energy, self._huber_delta_force, @@ -354,9 +364,13 @@ def build( loss = 0 more_loss = {} + # Normalization exponent controls loss scaling with system size: + # - norm_exp=2 (intensive_ener_virial=True): loss uses 1/N² scaling, making it independent of system size + # - norm_exp=1 (intensive_ener_virial=False, legacy): loss uses 1/N scaling, which varies with system size + norm_exp = 2 if self.intensive_ener_virial else 1 if self.has_e: if not self.use_huber: - loss += atom_norm_ener * (pref_e * l2_ener_loss) + loss += atom_norm_ener**norm_exp * (pref_e * l2_ener_loss) else: l_huber_loss = custom_huber_loss( atom_norm_ener * energy, @@ -380,7 +394,9 @@ def build( ) if self.has_v: if not self.use_huber: - loss += global_cvt_2_ener_float(atom_norm * (pref_v * l2_virial_loss)) + loss += global_cvt_2_ener_float( + atom_norm**norm_exp * (pref_v * l2_virial_loss) + ) else: l_huber_loss = custom_huber_loss( atom_norm * tf.reshape(virial, [-1]), @@ -541,7 +557,7 @@ def serialize(self, suffix: str = "") -> dict: """ return { "@class": "EnergyLoss", - "@version": 2, + "@version": 3, "starter_learning_rate": self.starter_learning_rate, "start_pref_e": self.start_pref_e, "limit_pref_e": self.limit_pref_e, @@ -562,6 +578,7 @@ def serialize(self, suffix: str = "") -> dict: "huber_delta": self.huber_delta, "loss_func": self.loss_func, "f_use_norm": self.f_use_norm, + "intensive_ener_virial": self.intensive_ener_virial, } @classmethod @@ -581,8 +598,12 @@ def deserialize(cls, data: dict, suffix: str = "") -> "Loss": The deserialized loss module """ data = data.copy() - check_version_compatibility(data.pop("@version"), 2, 1) + version = data.pop("@version") + check_version_compatibility(version, 3, 1) data.pop("@class") + # Handle backward compatibility for older versions without intensive_ener_virial + if version < 3: + data.setdefault("intensive_ener_virial", False) return cls(**data) @@ -606,6 +627,7 @@ def __init__( enable_atom_ener_coeff: bool = False, use_spin: list | None = None, loss_func: str = "mse", + intensive_ener_virial: bool = False, ) -> None: if loss_func != "mse": raise NotImplementedError( @@ -628,6 +650,7 @@ def __init__( self.relative_f = relative_f self.enable_atom_ener_coeff = enable_atom_ener_coeff self.use_spin = use_spin + self.intensive_ener_virial = intensive_ener_virial self.has_e = self.start_pref_e != 0.0 or self.limit_pref_e != 0.0 self.has_fr = self.start_pref_fr != 0.0 or self.limit_pref_fr != 0.0 self.has_fm = self.start_pref_fm != 0.0 or self.limit_pref_fm != 0.0 @@ -713,6 +736,10 @@ def build( atom_norm = 1.0 / global_cvt_2_tf_float(natoms[0]) atom_norm_ener = 1.0 / global_cvt_2_ener_float(natoms[0]) + # loss normalization exponent: + # - norm_exp=2 (intensive_ener_virial=True): loss uses 1/N² scaling, making it independent of system size + # - norm_exp=1 (intensive_ener_virial=False, legacy): loss uses 1/N scaling, which varies with system size + norm_exp = 2 if self.intensive_ener_virial else 1 pref_e = global_cvt_2_ener_float( find_energy * ( @@ -762,7 +789,7 @@ def build( l2_loss = 0 more_loss = {} if self.has_e: - l2_loss += atom_norm_ener * (pref_e * l2_ener_loss) + l2_loss += atom_norm_ener**norm_exp * (pref_e * l2_ener_loss) more_loss["l2_ener_loss"] = self.display_if_exist(l2_ener_loss, find_energy) if self.has_fr: l2_loss += global_cvt_2_ener_float(pref_fr * l2_force_r_loss) @@ -775,7 +802,9 @@ def build( l2_force_m_loss, find_force ) if self.has_v: - l2_loss += global_cvt_2_ener_float(atom_norm * (pref_v * l2_virial_loss)) + l2_loss += global_cvt_2_ener_float( + atom_norm**norm_exp * (pref_v * l2_virial_loss) + ) more_loss["l2_virial_loss"] = self.display_if_exist(l2_virial_loss, find_virial) if self.has_ae: l2_loss += global_cvt_2_ener_float(pref_ae * l2_atom_ener_loss) @@ -963,6 +992,67 @@ def label_requirement(self) -> list[DataRequirementItem]: ) return data_requirements + def serialize(self, suffix: str = "") -> dict: + """Serialize the loss module. + + Parameters + ---------- + suffix : str + The suffix of the loss module + + Returns + ------- + dict + The serialized loss module + """ + return { + "@class": "EnergySpinLoss", + "@version": 2, + "starter_learning_rate": self.starter_learning_rate, + "start_pref_e": self.start_pref_e, + "limit_pref_e": self.limit_pref_e, + "start_pref_fr": self.start_pref_fr, + "limit_pref_fr": self.limit_pref_fr, + "start_pref_fm": self.start_pref_fm, + "limit_pref_fm": self.limit_pref_fm, + "start_pref_v": self.start_pref_v, + "limit_pref_v": self.limit_pref_v, + "start_pref_ae": self.start_pref_ae, + "limit_pref_ae": self.limit_pref_ae, + "start_pref_pf": self.start_pref_pf, + "limit_pref_pf": self.limit_pref_pf, + "relative_f": self.relative_f, + "enable_atom_ener_coeff": self.enable_atom_ener_coeff, + "use_spin": self.use_spin, + "loss_func": self.loss_func, + "intensive_ener_virial": self.intensive_ener_virial, + } + + @classmethod + def deserialize(cls, data: dict, suffix: str = "") -> "EnerSpinLoss": + """Deserialize the loss module. + + Parameters + ---------- + data : dict + The serialized loss module + suffix : str + The suffix of the loss module + + Returns + ------- + EnerSpinLoss + The deserialized loss module + """ + data = data.copy() + version = data.pop("@version") + check_version_compatibility(version, 2, 1) + data.pop("@class") + # Handle backward compatibility for older versions without intensive_ener_virial + if version < 2: + data.setdefault("intensive_ener_virial", False) + return cls(**data) + class EnerDipoleLoss(Loss): def __init__( diff --git a/deepmd/utils/argcheck.py b/deepmd/utils/argcheck.py index 4ac49cf4d8..9e62de544e 100644 --- a/deepmd/utils/argcheck.py +++ b/deepmd/utils/argcheck.py @@ -3234,6 +3234,14 @@ def loss_ener() -> list[Argument]: "This treats the force vector as a whole rather than three independent components. " "Only effective when loss_func='mae' or use_huber=True." ) + doc_intensive_ener_virial = ( + "Controls intensive normalization for energy and virial loss terms in the current implementation. " + "For non-Huber MSE energy/virial terms, setting this to true uses 1/N^2 normalization instead of the legacy 1/N scaling. " + "This matches per-atom-style reporting more closely for those terms. " + "For MAE, the normalization remains 1/N. When `use_huber=True`, the residual is already scaled by 1/N before applying the Huber loss, " + "so this flag may have limited or no effect for those terms. " + "The default is false for backward compatibility with models trained using deepmd-kit <= 3.1.3." + ) return [ Argument( "start_pref_e", @@ -3376,6 +3384,13 @@ def loss_ener() -> list[Argument]: default=0.01, doc=doc_huber_delta, ), + Argument( + "intensive_ener_virial", + bool, + optional=True, + default=False, + doc=doc_intensive_ener_virial, + ), ] @@ -3401,6 +3416,14 @@ def loss_ener_spin() -> list[Argument]: "MAE loss is less sensitive to outliers compared to MSE loss. " "Future extensions may support additional loss types." ) + doc_intensive_ener_virial = ( + "Controls normalization of the energy and virial loss terms. " + "For `loss_func='mse'`, if true, energy and virial losses are computed as intensive quantities, " + "normalized by the square of the number of atoms (1/N^2); if false (default), the legacy normalization " + "(1/N) is used. " + "For `loss_func='mae'`, this option does not change the existing MAE formulations;" + "The default is false for backward compatibility with models trained using deepmd-kit <= 3.1.3." + ) return [ Argument( "start_pref_e", @@ -3501,6 +3524,13 @@ def loss_ener_spin() -> list[Argument]: default="mse", doc=doc_loss_func, ), + Argument( + "intensive_ener_virial", + bool, + optional=True, + default=False, + doc=doc_intensive_ener_virial, + ), ] diff --git a/doc/model/train-energy-spin.md b/doc/model/train-energy-spin.md index 41b57c2f29..eebc33fe33 100644 --- a/doc/model/train-energy-spin.md +++ b/doc/model/train-energy-spin.md @@ -86,7 +86,21 @@ The spin loss function $L$ for training energy is given by $$L = p_e L_e + p_{fr} L_{fr} + p_{fm} L_{fm} + p_v L_v$$ -where $L_e$, $L_{fr}$, $L_{fm}$ and $L_v$ denote the loss in energy, atomic force, magnatic force and virial, respectively. $p_e$, $p_{fr}$, $p_{fm}$ and $p_v$ give the prefactors of the energy, atomic force, magnatic force and virial losses. +where $L_e$, $L_{fr}$, $L_{fm}$ and $L_v$ denote the loss in energy, atomic force, magnetic force and virial, respectively. $p_e$, $p_{fr}$, $p_{fm}$ and $p_v$ give the prefactors of the energy, atomic force, magnetic force and virial losses. + +By default, the energy and virial losses are normalized by the number of atoms $N$. When **intensive loss normalization** is enabled, these terms are instead normalized by $N^2$. For the energy loss, this converts it to the square of the per-atom energy error: + +```math + L_E^{\text{intensive}}(\boldsymbol{x};\boldsymbol{\theta})=\left(\frac{E(\boldsymbol{x};\boldsymbol{\theta})-E^*}{N}\right)^2 = \frac{1}{N^2}(E(\boldsymbol{x};\boldsymbol{\theta})-E^*)^2, +``` + +and similarly for the virial loss: + +```math + L_\Xi^{\text{intensive}}(\boldsymbol{x};\boldsymbol{\theta})=\frac{1}{9N^2}\sum_{\alpha,\beta=1}^{3}(\Xi_{\alpha\beta}(\boldsymbol{x};\boldsymbol{\theta})-\Xi_{\alpha\beta}^*)^2. +``` + +Intensive normalization ensures the loss scale remains consistent across systems with different numbers of atoms $N$, which is highly recommended for multi-task learning. The prefectors may not be a constant, rather it changes linearly with the learning rate. Taking the atomic force prefactor for example, at training step $t$, it is given by @@ -111,8 +125,10 @@ The {ref}`loss ` section in the `input.json` is "limit_pref_fm": 10.0, "start_pref_v": 0, "limit_pref_v": 0, - "loss_func": "mse" + "loss_func": "mse", + "intensive_ener_virial": false }, + ``` The options {ref}`start_pref_e `, {ref}`limit_pref_e `, {ref}`start_pref_fr `, {ref}`limit_pref_fm `, {ref}`start_pref_v ` and {ref}`limit_pref_v ` determine the start and limit prefactors of energy, atomic force, magnatic force and virial, respectively. @@ -124,6 +140,8 @@ The {ref}`loss_func ` option specifies the type of lo When using `loss_func="mse"`, the training will output `rmse_e`, `rmse_fr`, `rmse_fm`, `rmse_v` metrics (root mean square errors). When using `loss_func="mae"`, the training will output `mae_e`, `mae_fr`, `mae_fm`, `mae_v` metrics (mean absolute errors). +The {ref}`intensive_ener_virial ` option (default is `false`) controls the normalization of the energy and virial loss terms when `loss_func="mse"`. If set to `true`, these terms are normalized by $1/N^2$ (intensive), ensuring the loss scale is independent of the system size $N$. If `false`, the legacy $1/N$ normalization is used. + If one does not want to train with virial, then he/she may set the virial prefactors {ref}`start_pref_v ` and {ref}`limit_pref_v ` to 0. ## Data format diff --git a/doc/model/train-energy.md b/doc/model/train-energy.md index ecc6f234b4..d936ce9e16 100644 --- a/doc/model/train-energy.md +++ b/doc/model/train-energy.md @@ -57,6 +57,20 @@ The properties $\eta$ of the energy loss function could be energy $E$, force $\b where $F_{k,\alpha}$ is the $\alpha$-th component of the force on atom $k$, and the superscript $\ast$ indicates the label of the property that should be provided in advance. Using $N$ ensures that each loss of fitting property is averaged over atomic contributions before they contribute to the total loss by weight. +By default, the energy and virial losses are normalized by the number of atoms $N$ as shown above. When **intensive loss normalization** is enabled, these terms are instead normalized by $N^2$. For the energy loss, this converts it to the square of the per-atom energy error: + +```math + L_E^{\text{intensive}}(\boldsymbol{x};\boldsymbol{\theta})=\left(\frac{E(\boldsymbol{x};\boldsymbol{\theta})-E^*}{N}\right)^2 = \frac{1}{N^2}(E(\boldsymbol{x};\boldsymbol{\theta})-E^*)^2, +``` + +and similarly for the virial loss: + +```math + L_\Xi^{\text{intensive}}(\boldsymbol{x};\boldsymbol{\theta})=\frac{1}{9N^2}\sum_{\alpha,\beta=1}^{3}(\Xi_{\alpha\beta}(\boldsymbol{x};\boldsymbol{\theta})-\Xi_{\alpha\beta}^*)^2. +``` + +Intensive normalization makes the loss magnitudes independent of the system size $N$ (assuming per-atom errors are consistent), which is crucial for multi-task training involving datasets with varying system sizes to prevent larger systems from dominating the training process. + If part of atoms is more important than others, for example, certain atoms play an essential role when calculating free energy profiles or kinetic isotope effects, the MSE of atomic forces with prefactors $q_{k}$ can also be used as the loss function: ```math @@ -117,8 +131,10 @@ The {ref}`loss ` section in the `input.json` is "limit_pref_f": 1, "start_pref_v": 0, "limit_pref_v": 0, - "loss_func": "mse" + "loss_func": "mse", + "intensive_ener_virial": false } + ``` The options {ref}`start_pref_e `, {ref}`limit_pref_e `, {ref}`start_pref_f `, {ref}`limit_pref_f `, {ref}`start_pref_v ` and {ref}`limit_pref_v ` determine the start and limit prefactors of energy, force and virial, respectively. @@ -130,6 +146,8 @@ The {ref}`loss_func ` option specifies the type of loss fu When using `loss_func="mse"`, the training will output `rmse_e`, `rmse_f`, `rmse_v` metrics (root mean square errors). When using `loss_func="mae"`, the training will output `mae_e`, `mae_f`, `mae_v` metrics (mean absolute errors). +The {ref}`intensive_ener_virial ` option (default is `false`) controls the normalization of the energy and virial loss terms when `loss_func="mse"`. If set to `true`, these terms are normalized by $1/N^2$ (making them "intensive"), ensuring the loss scale remains consistent across different system sizes $N$. If `false`, the legacy $1/N$ normalization is used. This option is highly recommended for multi-task learning. + If one does not want to train with virial, then he/she may set the virial prefactors {ref}`start_pref_v ` and {ref}`limit_pref_v ` to 0. [^1]: This section is built upon Jinzhe Zeng, Duo Zhang, Denghui Lu, Pinghui Mo, Zeyu Li, Yixiao Chen, Marián Rynik, Li'ang Huang, Ziyao Li, Shaochen Shi, Yingze Wang, Haotian Ye, Ping Tuo, Jiabin Yang, Ye Ding, Yifan Li, Davide Tisi, Qiyu Zeng, Han Bao, Yu Xia, Jiameng Huang, Koki Muraoka, Yibo Wang, Junhan Chang, Fengbo Yuan, Sigbjørn Løland Bore, Chun Cai, Yinnian Lin, Bo Wang, Jiayan Xu, Jia-Xin Zhu, Chenxing Luo, Yuzhi Zhang, Rhys E. A. Goodall, Wenshuo Liang, Anurag Kumar Singh, Sikai Yao, Jingchao Zhang, Renata Wentzcovitch, Jiequn Han, Jie Liu, Weile Jia, Darrin M. York, Weinan E, Roberto Car, Linfeng Zhang, Han Wang, [J. Chem. Phys. 159, 054801 (2023)](https://doi.org/10.1063/5.0155600) licensed under a [Creative Commons Attribution (CC BY) license](http://creativecommons.org/licenses/by/4.0/). diff --git a/source/tests/consistent/loss/test_ener.py b/source/tests/consistent/loss/test_ener.py index dcb3988173..08229606e2 100644 --- a/source/tests/consistent/loss/test_ener.py +++ b/source/tests/consistent/loss/test_ener.py @@ -67,11 +67,19 @@ ("mse", "mae"), # loss_func (False, True), # f_use_norm (False, True), # mae (dp test extra MAE metrics) + (False, True), # intensive_ener_virial ) class TestEner(CommonTest, LossTest, unittest.TestCase): @property def data(self) -> dict: - (use_huber, enable_atom_ener_coeff, loss_func, f_use_norm, _mae) = self.param + ( + use_huber, + enable_atom_ener_coeff, + loss_func, + f_use_norm, + _mae, + intensive_ener_virial, + ) = self.param return { "start_pref_e": 0.02, "limit_pref_e": 1.0, @@ -87,17 +95,32 @@ def data(self) -> dict: "enable_atom_ener_coeff": enable_atom_ener_coeff, "loss_func": loss_func, "f_use_norm": f_use_norm, + "intensive_ener_virial": intensive_ener_virial, } @property def skip_tf(self) -> bool: - (_use_huber, _enable_atom_ener_coeff, loss_func, f_use_norm, _mae) = self.param + ( + _use_huber, + _enable_atom_ener_coeff, + loss_func, + f_use_norm, + _mae, + intensive_ener_virial, + ) = self.param # Skip TF for MAE loss tests (not implemented in TF backend) return CommonTest.skip_tf or loss_func == "mae" or f_use_norm @property def skip_pd(self) -> bool: - (_use_huber, _enable_atom_ener_coeff, loss_func, f_use_norm, _mae) = self.param + ( + _use_huber, + _enable_atom_ener_coeff, + loss_func, + f_use_norm, + _mae, + intensive_ener_virial, + ) = self.param # Skip Paddle for MAE loss tests (not implemented in Paddle backend) return not INSTALLED_PD or loss_func == "mae" or f_use_norm @@ -116,7 +139,14 @@ def skip_pd(self) -> bool: args = loss_ener() def setUp(self) -> None: - (use_huber, _enable_atom_ener_coeff, loss_func, f_use_norm, mae) = self.param + ( + use_huber, + _enable_atom_ener_coeff, + loss_func, + f_use_norm, + mae, + intensive_ener_virial, + ) = self.param # Skip invalid combinations if f_use_norm and not (use_huber or loss_func == "mae"): self.skipTest("f_use_norm requires either use_huber or loss_func='mae'") @@ -557,3 +587,228 @@ def rtol(self) -> float: @property def atol(self) -> float: return 1e-10 + + +class TestIntensiveNatomsScaling(unittest.TestCase): + """Regression test for natoms-scaling behavior with intensive normalization. + + This test verifies that MSE energy/virial loss contributions scale with 1/N² when + intensive=True, ensuring the loss is independent of system size. This guards against + future refactors accidentally reverting to 1/N scaling. + """ + + def test_intensive_total_loss_scaling(self) -> None: + """Test that total loss scales correctly with 1/N² for intensive_ener_virial=True. + + This test uses controlled energy/virial residuals to verify that the + total loss contribution scales with 1/N² (intensive) vs 1/N (legacy). + We use identical per-atom residuals across different system sizes to + ensure the raw MSE is the same, then verify the total loss scales as + expected based on the normalization factor. + """ + if not INSTALLED_PT: + self.skipTest("PyTorch not installed") + + nframes = 1 + + # Test with two different system sizes + natoms_small = 4 + natoms_large = 8 # 2x the small system + + # Use fixed energy residual so MSE is predictable + # Energy residual = 1.0, so l2_ener_loss = 1.0 + fixed_energy_diff = 1.0 + + def create_data_with_fixed_residual(natoms: int, energy_diff: float): + """Create predict/label with a fixed energy difference.""" + predict = { + "energy": numpy_to_torch(np.array([1.0])), + "force": numpy_to_torch(np.zeros((nframes, natoms, 3))), + "virial": numpy_to_torch(np.array([[1.0] * 9])), # Virial residual = 1 + "atom_energy": numpy_to_torch(np.ones((nframes, natoms)) / natoms), + } + label = { + "energy": numpy_to_torch(np.array([1.0 + energy_diff])), + "force": numpy_to_torch(np.zeros((nframes, natoms, 3))), + "virial": numpy_to_torch(np.array([[2.0] * 9])), # Virial residual = 1 + "atom_ener": numpy_to_torch( + np.ones((nframes, natoms)) * (1.0 + energy_diff) / natoms + ), + "find_energy": 1.0, + "find_force": 0.0, # Disable force to focus on energy/virial + "find_virial": 1.0, + "find_atom_ener": 0.0, + } + return predict, label + + # Create loss functions + loss_intensive = EnerLossPT( + starter_learning_rate=1e-3, + start_pref_e=1.0, + limit_pref_e=1.0, + start_pref_f=0.0, + limit_pref_f=0.0, + start_pref_v=1.0, + limit_pref_v=1.0, + intensive_ener_virial=True, + ) + loss_legacy = EnerLossPT( + starter_learning_rate=1e-3, + start_pref_e=1.0, + limit_pref_e=1.0, + start_pref_f=0.0, + limit_pref_f=0.0, + start_pref_v=1.0, + limit_pref_v=1.0, + intensive_ener_virial=False, + ) + + # Compute losses for small system + predict_small, label_small = create_data_with_fixed_residual( + natoms_small, fixed_energy_diff + ) + _, loss_intensive_small, _ = loss_intensive( + {}, + lambda p=predict_small: p, + label_small, + natoms_small, + 1e-3, + ) + _, loss_legacy_small, _ = loss_legacy( + {}, + lambda p=predict_small: p, + label_small, + natoms_small, + 1e-3, + ) + + # Compute losses for large system + predict_large, label_large = create_data_with_fixed_residual( + natoms_large, fixed_energy_diff + ) + _, loss_intensive_large, _ = loss_intensive( + {}, + lambda p=predict_large: p, + label_large, + natoms_large, + 1e-3, + ) + _, loss_legacy_large, _ = loss_legacy( + {}, + lambda p=predict_large: p, + label_large, + natoms_large, + 1e-3, + ) + + loss_int_small = float(torch_to_numpy(loss_intensive_small)) + loss_int_large = float(torch_to_numpy(loss_intensive_large)) + loss_leg_small = float(torch_to_numpy(loss_legacy_small)) + loss_leg_large = float(torch_to_numpy(loss_legacy_large)) + + # With same residuals but different natoms: + # - intensive (1/N²): loss should scale as (N_small/N_large)² = (4/8)² = 0.25 + # - legacy (1/N): loss should scale as (N_small/N_large) = 4/8 = 0.5 + + # Verify intensive scaling: loss_large / loss_small should be ~0.25 + natoms_ratio = natoms_small / natoms_large # 0.5 + expected_intensive_ratio = natoms_ratio**2 # 0.25 + expected_legacy_ratio = natoms_ratio # 0.5 + + actual_intensive_ratio = loss_int_large / loss_int_small + actual_legacy_ratio = loss_leg_large / loss_leg_small + + self.assertAlmostEqual( + actual_intensive_ratio, + expected_intensive_ratio, + places=5, + msg=f"Intensive loss scaling: expected {expected_intensive_ratio:.4f}, " + f"got {actual_intensive_ratio:.4f}", + ) + self.assertAlmostEqual( + actual_legacy_ratio, + expected_legacy_ratio, + places=5, + msg=f"Legacy loss scaling: expected {expected_legacy_ratio:.4f}, " + f"got {actual_legacy_ratio:.4f}", + ) + + def test_intensive_vs_legacy_scaling_difference(self) -> None: + """Test that intensive_ener_virial=True produces different loss than intensive_ener_virial=False for energy/virial.""" + if not INSTALLED_PT: + self.skipTest("PyTorch not installed") + + rng = np.random.default_rng(20250419) + nframes = 1 + natoms = 8 + + predict = { + "energy": numpy_to_torch(rng.random((nframes,))), + "force": numpy_to_torch(rng.random((nframes, natoms, 3))), + "virial": numpy_to_torch(rng.random((nframes, 9))), + "atom_energy": numpy_to_torch(rng.random((nframes, natoms))), + } + label = { + "energy": numpy_to_torch(rng.random((nframes,))), + "force": numpy_to_torch(rng.random((nframes, natoms, 3))), + "virial": numpy_to_torch(rng.random((nframes, 9))), + "atom_ener": numpy_to_torch(rng.random((nframes, natoms))), + "find_energy": 1.0, + "find_force": 1.0, + "find_virial": 1.0, + "find_atom_ener": 0.0, + } + + # Create loss functions with intensive_ener_virial=True and intensive_ener_virial=False + loss_intensive = EnerLossPT( + starter_learning_rate=1e-3, + start_pref_e=1.0, + limit_pref_e=1.0, + start_pref_f=0.0, + limit_pref_f=0.0, + start_pref_v=1.0, + limit_pref_v=1.0, + intensive_ener_virial=True, + ) + loss_legacy = EnerLossPT( + starter_learning_rate=1e-3, + start_pref_e=1.0, + limit_pref_e=1.0, + start_pref_f=0.0, + limit_pref_f=0.0, + start_pref_v=1.0, + limit_pref_v=1.0, + intensive_ener_virial=False, + ) + + _, loss_val_intensive, _ = loss_intensive( + {}, + lambda: predict, + label, + natoms, + 1e-3, + ) + _, loss_val_legacy, _ = loss_legacy( + {}, + lambda: predict, + label, + natoms, + 1e-3, + ) + + loss_intensive_val = float(torch_to_numpy(loss_val_intensive)) + loss_legacy_val = float(torch_to_numpy(loss_val_legacy)) + + # The losses should be different when intensive differs + # (unless by chance the values are the same, which is unlikely) + # The intensive version should have an extra 1/N factor + expected_ratio = 1.0 / natoms + actual_ratio = loss_intensive_val / loss_legacy_val + + # Allow some tolerance due to floating point + self.assertAlmostEqual( + actual_ratio, + expected_ratio, + places=5, + msg=f"Expected intensive/legacy ratio ~{expected_ratio:.6f}, got {actual_ratio:.6f}", + ) diff --git a/source/tests/consistent/loss/test_ener_spin.py b/source/tests/consistent/loss/test_ener_spin.py index 2e8734c109..bd6561bb78 100644 --- a/source/tests/consistent/loss/test_ener_spin.py +++ b/source/tests/consistent/loss/test_ener_spin.py @@ -47,11 +47,12 @@ @parameterized( ("mse", "mae"), # loss_func (False, True), # mae (dp test extra MAE metrics) + (False, True), # intensive_ener_virial ) class TestEnerSpin(CommonTest, LossTest, unittest.TestCase): @property def data(self) -> dict: - (loss_func, _mae) = self.param + (loss_func, _mae, intensive_ener_virial) = self.param return { "start_pref_e": 0.02, "limit_pref_e": 1.0, @@ -64,6 +65,7 @@ def data(self) -> dict: "start_pref_ae": 1.0, "limit_pref_ae": 1.0, "loss_func": loss_func, + "intensive_ener_virial": intensive_ener_virial, } skip_tf = True @@ -81,11 +83,12 @@ def data(self) -> dict: args = loss_ener_spin() def setUp(self) -> None: - (loss_func, mae) = self.param + (loss_func, mae, intensive_ener_virial) = self.param if loss_func == "mae" and mae: self.skipTest("mae=True with loss_func='mae' is redundant") CommonTest.setUp(self) self.mae = mae + self.intensive_ener_virial = intensive_ener_virial self.learning_rate = 1e-3 rng = np.random.default_rng(20250326) self.nframes = 2 @@ -208,3 +211,248 @@ def rtol(self) -> float: @property def atol(self) -> float: return 1e-10 + + +class TestEnerSpinIntensiveScaling(unittest.TestCase): + """Regression test for natoms-scaling behavior with intensive normalization. + + This test verifies that MSE energy/virial loss contributions scale with 1/N² when + intensive_ener_virial=True, ensuring the loss is independent of system size. This guards against + future refactors accidentally reverting to 1/N scaling. + """ + + def test_intensive_total_loss_scaling(self) -> None: + """Test that total loss scales correctly with 1/N² for intensive_ener_virial=True. + + This test uses controlled energy/virial residuals to verify that the + total loss contribution scales with 1/N² (intensive) vs 1/N (legacy). + """ + if not INSTALLED_PT: + self.skipTest("PyTorch not installed") + + nframes = 1 + + # Test with two different system sizes + natoms_small = 4 + natoms_large = 8 # 2x the small system + # For spin systems, we have real atoms and virtual (magnetic) atoms + n_magnetic = 2 # Half of atoms have magnetic spins + + # Use fixed energy residual so MSE is predictable + fixed_energy_diff = 1.0 + + def create_data_with_fixed_residual( + natoms: int, n_mag: int, energy_diff: float + ): + """Create predict/label with a fixed energy difference.""" + mask_mag = np.zeros((nframes, natoms, 1), dtype=bool) + mask_mag[:, :n_mag, :] = True + + predict = { + "energy": numpy_to_torch(np.array([1.0])), + "force": numpy_to_torch(np.zeros((nframes, natoms, 3))), + "force_mag": numpy_to_torch(np.zeros((nframes, natoms, 3))), + "mask_mag": mask_mag, + "virial": numpy_to_torch(np.array([[1.0] * 9])), + "atom_energy": numpy_to_torch(np.ones((nframes, natoms)) / natoms), + } + label = { + "energy": numpy_to_torch(np.array([1.0 + energy_diff])), + "force": numpy_to_torch(np.zeros((nframes, natoms, 3))), + "force_mag": numpy_to_torch(np.zeros((nframes, natoms, 3))), + "virial": numpy_to_torch(np.array([[2.0] * 9])), + "atom_ener": numpy_to_torch( + np.ones((nframes, natoms)) * (1.0 + energy_diff) / natoms + ), + "find_energy": 1.0, + "find_force": 0.0, # Disable force to focus on energy/virial + "find_force_mag": 0.0, + "find_virial": 1.0, + "find_atom_ener": 0.0, + } + return predict, label + + # Create loss functions + loss_intensive = EnerSpinLossPT( + starter_learning_rate=1e-3, + start_pref_e=1.0, + limit_pref_e=1.0, + start_pref_fr=0.0, + limit_pref_fr=0.0, + start_pref_fm=0.0, + limit_pref_fm=0.0, + start_pref_v=1.0, + limit_pref_v=1.0, + intensive_ener_virial=True, + ) + loss_legacy = EnerSpinLossPT( + starter_learning_rate=1e-3, + start_pref_e=1.0, + limit_pref_e=1.0, + start_pref_fr=0.0, + limit_pref_fr=0.0, + start_pref_fm=0.0, + limit_pref_fm=0.0, + start_pref_v=1.0, + limit_pref_v=1.0, + intensive_ener_virial=False, + ) + + # Compute losses for small system + predict_small, label_small = create_data_with_fixed_residual( + natoms_small, n_magnetic, fixed_energy_diff + ) + _, loss_intensive_small, _ = loss_intensive( + {}, + lambda p=predict_small: p, + label_small, + natoms_small, + 1e-3, + ) + _, loss_legacy_small, _ = loss_legacy( + {}, + lambda p=predict_small: p, + label_small, + natoms_small, + 1e-3, + ) + + # Compute losses for large system (proportionally scale magnetic atoms) + predict_large, label_large = create_data_with_fixed_residual( + natoms_large, n_magnetic * 2, fixed_energy_diff + ) + _, loss_intensive_large, _ = loss_intensive( + {}, + lambda p=predict_large: p, + label_large, + natoms_large, + 1e-3, + ) + _, loss_legacy_large, _ = loss_legacy( + {}, + lambda p=predict_large: p, + label_large, + natoms_large, + 1e-3, + ) + + loss_int_small = float(torch_to_numpy(loss_intensive_small)) + loss_int_large = float(torch_to_numpy(loss_intensive_large)) + loss_leg_small = float(torch_to_numpy(loss_legacy_small)) + loss_leg_large = float(torch_to_numpy(loss_legacy_large)) + + # With same residuals but different natoms: + # - intensive (1/N²): loss should scale as (N_small/N_large)² = (4/8)² = 0.25 + # - legacy (1/N): loss should scale as (N_small/N_large) = 4/8 = 0.5 + + natoms_ratio = natoms_small / natoms_large # 0.5 + expected_intensive_ratio = natoms_ratio**2 # 0.25 + expected_legacy_ratio = natoms_ratio # 0.5 + + actual_intensive_ratio = loss_int_large / loss_int_small + actual_legacy_ratio = loss_leg_large / loss_leg_small + + self.assertAlmostEqual( + actual_intensive_ratio, + expected_intensive_ratio, + places=5, + msg=f"Intensive loss scaling: expected {expected_intensive_ratio:.4f}, " + f"got {actual_intensive_ratio:.4f}", + ) + self.assertAlmostEqual( + actual_legacy_ratio, + expected_legacy_ratio, + places=5, + msg=f"Legacy loss scaling: expected {expected_legacy_ratio:.4f}, " + f"got {actual_legacy_ratio:.4f}", + ) + + def test_intensive_vs_legacy_scaling_difference(self) -> None: + """Test that intensive_ener_virial=True produces different loss than intensive_ener_virial=False.""" + if not INSTALLED_PT: + self.skipTest("PyTorch not installed") + + rng = np.random.default_rng(20250419) + nframes = 1 + natoms = 8 + n_magnetic = 4 + + mask_mag = np.zeros((nframes, natoms, 1), dtype=bool) + mask_mag[:, :n_magnetic, :] = True + + predict = { + "energy": numpy_to_torch(rng.random((nframes,))), + "force": numpy_to_torch(rng.random((nframes, natoms, 3))), + "force_mag": numpy_to_torch(rng.random((nframes, natoms, 3))), + "mask_mag": mask_mag, + "virial": numpy_to_torch(rng.random((nframes, 9))), + "atom_energy": numpy_to_torch(rng.random((nframes, natoms))), + } + label = { + "energy": numpy_to_torch(rng.random((nframes,))), + "force": numpy_to_torch(rng.random((nframes, natoms, 3))), + "force_mag": numpy_to_torch(rng.random((nframes, natoms, 3))), + "virial": numpy_to_torch(rng.random((nframes, 9))), + "atom_ener": numpy_to_torch(rng.random((nframes, natoms))), + "find_energy": 1.0, + "find_force": 1.0, + "find_force_mag": 1.0, + "find_virial": 1.0, + "find_atom_ener": 0.0, + } + + # Create loss functions with intensive_ener_virial=True and intensive_ener_virial=False + loss_intensive = EnerSpinLossPT( + starter_learning_rate=1e-3, + start_pref_e=1.0, + limit_pref_e=1.0, + start_pref_fr=0.0, + limit_pref_fr=0.0, + start_pref_fm=0.0, + limit_pref_fm=0.0, + start_pref_v=1.0, + limit_pref_v=1.0, + intensive_ener_virial=True, + ) + loss_legacy = EnerSpinLossPT( + starter_learning_rate=1e-3, + start_pref_e=1.0, + limit_pref_e=1.0, + start_pref_fr=0.0, + limit_pref_fr=0.0, + start_pref_fm=0.0, + limit_pref_fm=0.0, + start_pref_v=1.0, + limit_pref_v=1.0, + intensive_ener_virial=False, + ) + + _, loss_val_intensive, _ = loss_intensive( + {}, + lambda: predict, + label, + natoms, + 1e-3, + ) + _, loss_val_legacy, _ = loss_legacy( + {}, + lambda: predict, + label, + natoms, + 1e-3, + ) + + loss_intensive_val = float(torch_to_numpy(loss_val_intensive)) + loss_legacy_val = float(torch_to_numpy(loss_val_legacy)) + + # The losses should be different when intensive differs + # The intensive version should have an extra 1/N factor + expected_ratio = 1.0 / natoms + actual_ratio = loss_intensive_val / loss_legacy_val + + self.assertAlmostEqual( + actual_ratio, + expected_ratio, + places=5, + msg=f"Expected intensive/legacy ratio ~{expected_ratio:.6f}, got {actual_ratio:.6f}", + )