Skip to content

Commit 2a9667e

Browse files
OutisLiCopilot
andauthored
feat: capitalise some info display (#5145)
To make display tidier. <!-- This is an auto-generated comment: release notes by coderabbit.ai --> ## Summary by CodeRabbit * **Style** * Standardized capitalization and label formatting across backend info displays (PT/TF/PD and custom OP status). * Training progress messages now show "Batch" (capital B) for consistent display. * Capitalized DataSystem summary headings. * Title-cased build and runtime summary labels; normalized value formatting (float precision, build variant, computation device, thread counts, visible GPU count). <sub>✏️ Tip: You can customize this high-level summary in your review settings.</sub> <!-- end of auto-generated comment: release notes by coderabbit.ai --> --------- Signed-off-by: OutisLi <137472077+OutisLi@users.noreply.github.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
1 parent e5baf69 commit 2a9667e

6 files changed

Lines changed: 29 additions & 29 deletions

File tree

deepmd/loggers/training.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ def format_training_message(
1212
eta: int | None = None,
1313
) -> str:
1414
"""Format a training message."""
15-
msg = f"batch {batch:7d}: total wall time = {wall_time:.2f} s"
15+
msg = f"Batch {batch:7d}: total wall time = {wall_time:.2f} s"
1616
if isinstance(eta, int):
1717
msg += f", eta = {datetime.timedelta(seconds=int(eta))!s}"
1818
return msg
@@ -49,7 +49,7 @@ def format_training_message_per_task(
4949
# sort rmse
5050
rmse = dict(sorted(rmse.items()))
5151
msg = (
52-
f"batch {batch:7d}: {task_name}"
52+
f"Batch {batch:7d}: {task_name}"
5353
f"{', '.join([f'{kk} = {vv:8.2e}' for kk, vv in rmse.items()])}"
5454
f"{lr}"
5555
)

deepmd/pd/entrypoints/main.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -219,8 +219,8 @@ def get_backend_info(self) -> dict:
219219
op_info = {}
220220
return {
221221
"Backend": "Paddle",
222-
"PD ver": f"v{paddle.__version__}-g{paddle.version.commit[:11]}",
223-
"Enable custom OP": False,
222+
"PD Ver": f"v{paddle.__version__}-g{paddle.version.commit[:11]}",
223+
"Custom OP Enabled": False,
224224
**op_info,
225225
}
226226

deepmd/pt/entrypoints/main.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -239,16 +239,16 @@ def get_backend_info(self) -> dict:
239239
"""Get backend information."""
240240
if ENABLE_CUSTOMIZED_OP:
241241
op_info = {
242-
"build with PT ver": GLOBAL_CONFIG["pt_version"],
243-
"build with PT inc": GLOBAL_CONFIG["pt_include_dir"].replace(";", "\n"),
244-
"build with PT lib": GLOBAL_CONFIG["pt_libs"].replace(";", "\n"),
242+
"Built with PT Ver": GLOBAL_CONFIG["pt_version"],
243+
"Built with PT Inc": GLOBAL_CONFIG["pt_include_dir"].replace(";", "\n"),
244+
"Built with PT Lib": GLOBAL_CONFIG["pt_libs"].replace(";", "\n"),
245245
}
246246
else:
247247
op_info = {}
248248
return {
249249
"Backend": "PyTorch",
250-
"PT ver": f"v{torch.__version__}-g{torch.version.git_version[:11]}",
251-
"Enable custom OP": ENABLE_CUSTOMIZED_OP,
250+
"PT Ver": f"v{torch.__version__}-g{torch.version.git_version[:11]}",
251+
"Custom OP Enabled": ENABLE_CUSTOMIZED_OP,
252252
**op_info,
253253
}
254254

deepmd/tf/train/run_options.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -67,10 +67,10 @@ def get_backend_info(self) -> dict:
6767
"""Get backend information."""
6868
return {
6969
"Backend": "TensorFlow",
70-
"TF ver": tf.version.GIT_VERSION,
71-
"build with TF ver": TF_VERSION,
72-
"build with TF inc": GLOBAL_CONFIG["tf_include_dir"].replace(";", "\n"),
73-
"build with TF lib": GLOBAL_CONFIG["tf_libs"].replace(";", "\n"),
70+
"TF Ver": tf.version.GIT_VERSION,
71+
"Built with TF Ver": TF_VERSION,
72+
"Built with TF Inc": GLOBAL_CONFIG["tf_include_dir"].replace(";", "\n"),
73+
"Built with TF Lib": GLOBAL_CONFIG["tf_libs"].replace(";", "\n"),
7474
}
7575

7676
def get_device_name(self) -> str | None:

deepmd/utils/data_system.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -715,9 +715,9 @@ def print_summary(
715715
# width 65
716716
sys_width = 42
717717
log.info(
718-
f"---Summary of DataSystem: {name:13s}-----------------------------------------------"
718+
f"---Summary of DataSystem: {name.capitalize():13s}-----------------------------------------------"
719719
)
720-
log.info("found %d system(s):", nsystems)
720+
log.info("Found %d System(s):", nsystems)
721721
log.info(
722722
"%s %6s %6s %6s %9s %3s",
723723
_format_name_length("system", sys_width),

deepmd/utils/summary.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -47,13 +47,13 @@ class SummaryPrinter(ABC):
4747
)
4848

4949
BUILD: ClassVar = {
50-
"installed to": "\n".join(deepmd.__path__),
51-
"source": GLOBAL_CONFIG["git_summ"],
52-
"source branch": GLOBAL_CONFIG["git_branch"],
53-
"source commit": GLOBAL_CONFIG["git_hash"],
54-
"source commit at": GLOBAL_CONFIG["git_date"],
55-
"use float prec": global_float_prec,
56-
"build variant": GLOBAL_CONFIG["dp_variant"],
50+
"Installed to": "\n".join(deepmd.__path__),
51+
"Source": GLOBAL_CONFIG["git_summ"],
52+
"Source Branch": GLOBAL_CONFIG["git_branch"],
53+
"Source Commit": GLOBAL_CONFIG["git_hash"],
54+
"Source Commit at": GLOBAL_CONFIG["git_date"],
55+
"Float Precision": global_float_prec.capitalize(),
56+
"Build Variant": GLOBAL_CONFIG["dp_variant"].upper(),
5757
}
5858

5959
def __call__(self) -> None:
@@ -64,14 +64,14 @@ def __call__(self) -> None:
6464
if len(nodelist) > 1:
6565
build_info.update(
6666
{
67-
"world size": str(len(nodelist)),
68-
"node list": ", ".join(set(nodelist)),
67+
"World Size": str(len(nodelist)),
68+
"Node List": ", ".join(set(nodelist)),
6969
}
7070
)
7171
build_info.update(
7272
{
73-
"running on": nodename,
74-
"computing device": self.get_compute_device(),
73+
"Running on": nodename,
74+
"Computing Device": self.get_compute_device().upper(),
7575
}
7676
)
7777
device_name = self.get_device_name()
@@ -84,13 +84,13 @@ def __call__(self) -> None:
8484
env_value = os.environ.get("HIP_VISIBLE_DEVICES", "unset")
8585
build_info["HIP_VISIBLE_DEVICES"] = env_value
8686
if self.is_built_with_cuda() or self.is_built_with_rocm():
87-
build_info["Count of visible GPUs"] = str(self.get_ngpus())
87+
build_info["Visible GPU Count"] = str(self.get_ngpus())
8888

8989
intra, inter = get_default_nthreads()
9090
build_info.update(
9191
{
92-
"num_intra_threads": str(intra),
93-
"num_inter_threads": str(inter),
92+
"Num Intra Threads": str(intra),
93+
"Num Inter Threads": str(inter),
9494
}
9595
)
9696
# count the maximum characters in the keys and values

0 commit comments

Comments
 (0)