Skip to content

Commit a8d54af

Browse files
committed
feat(log): save the evaluation log and add inlineTee for output file.
* for easy sharing score afterward etc. * update docs about upcoming works also.
1 parent df23d2a commit a8d54af

File tree

5 files changed

+93
-19
lines changed

5 files changed

+93
-19
lines changed

README.md

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,11 @@
1111
OpenSceneFlow is a codebase for point cloud scene flow estimation.
1212
It is also an official implementation of the following papers (sorted by the time of publication):
1313

14+
<!-- - **TeFlow: An Efficient Multi-frame Scene Flow Estimation Method**
15+
*Qingwen Zhang, Chenhan Jiang, Xiaomeng Zhu, Yunqi Miao, Yushan Zhang, Olov Andersson, Patric Jensfelt*
16+
Under Review
17+
[ Strategy ] [ Self-Supervised ] - [ [OpenReview](https://openreview.net/forum?id=h70FLgnIAw) ] [ [Project](https://github.com/Kin-Zhang/TeFlow) ]&rarr; [here](#teflow) -->
18+
1419
- **DeltaFlow: An Efficient Multi-frame Scene Flow Estimation Method**
1520
*Qingwen Zhang, Xiaomeng Zhu, Yushan Zhang, Yixi Cai, Olov Andersson, Patric Jensfelt*
1621
Conference on Neural Information Processing Systems (**NeurIPS**) 2025 - Spotlight
@@ -338,6 +343,7 @@ https://github.com/user-attachments/assets/07e8d430-a867-42b7-900a-11755949de21
338343
## Cite Us
339344

340345
[*OpenSceneFlow*](https://github.com/KTH-RPL/OpenSceneFlow) is originally designed by [Qingwen Zhang](https://kin-zhang.github.io/) from DeFlow and SeFlow.
346+
It is actively maintained and developed by the community (ref. below works).
341347
If you find it useful, please cite our works:
342348

343349
```bibtex
@@ -375,6 +381,12 @@ If you find it useful, please cite our works:
375381
year={2025},
376382
url={https://openreview.net/forum?id=T9qNDtvAJX}
377383
}
384+
@misc{zhang2025teflow,
385+
title={{TeFlow}: Enabling Multi-frame Supervision for Feed-forward Scene Flow Estimation},
386+
author={Zhang, Qingwen and Jiang, Chenhan and Zhu, Xiaomeng and Miao, Yunqi and Zhang, Yushan and Andersson, Olov and Jensfelt, Patric},
387+
year={2025},
388+
url={https://openreview.net/forum?id=h70FLgnIAw}
389+
}
378390
```
379391

380392
And our excellent collaborators works contributed to this codebase also:

conf/eval.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11

22
dataset_path: /home/kin/data/av2/h5py/sensor
3-
checkpoint: /home/kin/model_zoo/deflow.ckpt
3+
checkpoint: /home/kin/data/model_zoo/deltaflow_public/deltaflow-av2.ckpt
44
data_mode: val # [val, test]
55
save_res: False # [True, False]
66

@@ -15,7 +15,7 @@ output: ${model.name}-${slurm_id}
1515
gpus: 1
1616
seed: 42069
1717
eval_only: True
18-
wandb_mode: offline # [offline, disabled, online]
18+
wandb_mode: disabled # [offline, disabled, online]
1919
defaults:
2020
- hydra: default
2121
- model: deflow

eval.py

Lines changed: 24 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -13,12 +13,13 @@
1313
import torch
1414
from torch.utils.data import DataLoader
1515
import lightning.pytorch as pl
16-
from lightning.pytorch.loggers import WandbLogger
16+
from lightning.pytorch.loggers import TensorBoardLogger, WandbLogger
1717
from omegaconf import DictConfig
1818
import hydra, wandb, os, sys
1919
from hydra.core.hydra_config import HydraConfig
2020
from src.dataset import HDF5Dataset
2121
from src.trainer import ModelWrapper
22+
from src.utils import InlineTee
2223

2324
def precheck_cfg_valid(cfg):
2425
if os.path.exists(cfg.dataset_path + f"/{cfg.data_mode}") is False:
@@ -36,8 +37,8 @@ def main(cfg):
3637

3738
if 'iter_only' in cfg.model and cfg.model.iter_only:
3839
from src.runner import launch_runner
39-
print(f"---LOG[eval]: Run optmization-based method: {cfg.model.name}")
40-
launch_runner(cfg, cfg.data_mode)
40+
launch_runner(cfg, cfg.data_mode, output_dir)
41+
print(f"---LOG[eval]: Finished optimization-based evaluation. Logging saved to {output_dir}/output.log")
4142
return
4243

4344
if not os.path.exists(cfg.checkpoint):
@@ -47,27 +48,39 @@ def main(cfg):
4748
torch_load_ckpt = torch.load(cfg.checkpoint)
4849
checkpoint_params = DictConfig(torch_load_ckpt["hyper_parameters"])
4950
cfg.output = checkpoint_params.cfg.output + f"-e{torch_load_ckpt['epoch']}-{cfg.data_mode}-v{cfg.leaderboard_version}"
51+
# replace output_dir ${old_output_dir} with ${output_dir}
52+
output_dir = output_dir.replace(HydraConfig.get().runtime.output_dir.split('/')[-2], checkpoint_params.cfg.output.split('/')[-1])
5053
cfg.model.update(checkpoint_params.cfg.model)
5154
cfg.num_frames = cfg.model.target.get('num_frames', checkpoint_params.cfg.get('num_frames', cfg.get('num_frames', 2)))
5255

5356
mymodel = ModelWrapper.load_from_checkpoint(cfg.checkpoint, cfg=cfg, eval=True)
54-
print(f"\n---LOG[eval]: Loaded model from {cfg.checkpoint}. The backbone network is {checkpoint_params.cfg.model.name}.\n")
57+
os.makedirs(output_dir, exist_ok=True)
58+
sys.stdout = InlineTee(f"{output_dir}/output.log")
59+
print(f"---LOG[eval]: Loaded model from {cfg.checkpoint}. The backbone network is {checkpoint_params.cfg.model.name}.")
60+
print(f"---LOG[eval]: Evaluation data: {cfg.dataset_path}/{cfg.data_mode} set.\n")
5561

56-
wandb_logger = WandbLogger(save_dir=output_dir,
57-
entity="kth-rpl",
58-
project=f"deflow-eval",
59-
name=f"{cfg.output}",
60-
offline=(cfg.wandb_mode == "offline"))
62+
if cfg.wandb_mode != "disabled":
63+
logger = WandbLogger(save_dir=output_dir,
64+
entity="kth-rpl",
65+
project=f"opensf-eval",
66+
name=f"{cfg.output}",
67+
offline=(cfg.wandb_mode == "offline"))
68+
logger.watch(mymodel, log_graph=False)
69+
else:
70+
# check local tensorboard logging: tensorboard --logdir logs/jobs/{log folder}
71+
logger = TensorBoardLogger(save_dir=output_dir, name="logs")
6172

62-
trainer = pl.Trainer(logger=wandb_logger, devices=1)
73+
trainer = pl.Trainer(logger=logger, devices=1)
6374
# NOTE(Qingwen): search & check: def eval_only_step_(self, batch, res_dict)
6475
trainer.validate(model = mymodel, \
6576
dataloaders = DataLoader( \
6677
HDF5Dataset(cfg.dataset_path + f"/{cfg.data_mode}", \
6778
n_frames=cfg.num_frames, \
6879
eval=True, leaderboard_version=cfg.leaderboard_version), \
6980
batch_size=1, shuffle=False))
70-
wandb.finish()
81+
if cfg.wandb_mode != "disabled":
82+
wandb.finish()
83+
print(f"---LOG[eval]: Finished feed-forward evaluation. Logging saved to {output_dir}/output.log")
7184

7285
if __name__ == "__main__":
7386
main()

src/runner.py

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
#
1616
"""
1717

18-
import os
18+
import os, sys
1919
import torch
2020
import torch.distributed as dist
2121
import torch.multiprocessing as mp
@@ -34,7 +34,7 @@
3434
from .utils.eval_metric import OfficialMetrics, evaluate_leaderboard, evaluate_leaderboard_v2, evaluate_ssf
3535
from .utils.av2_eval import write_output_file
3636
from .utils.mics import zip_res
37-
37+
from .utils import InlineTee
3838
class SceneDistributedSampler(Sampler):
3939
"""
4040
A DistributedSampler that distributes data based on scene IDs, not individual indices.
@@ -300,15 +300,20 @@ def _run_process(cfg, mode):
300300

301301
runner.cleanup()
302302

303-
def _spawn_wrapper(rank, world_size, cfg, mode):
303+
def _spawn_wrapper(rank, world_size, cfg, mode, output_dir):
304+
log_filepath = f"{output_dir}/output.log" if output_dir else None
305+
if log_filepath and rank==0:
306+
sys.stdout = InlineTee(log_filepath, append=True)
307+
if rank == 0:
308+
print(f"---LOG[eval]: Run optimization-based method: {cfg.model.name} on {cfg.dataset_path}/{cfg.data_mode} set.\n")
304309
torch.cuda.set_device(rank)
305310
os.environ['RANK'] = str(rank)
306311
os.environ['WORLD_SIZE'] = str(world_size)
307312
os.environ['MASTER_ADDR'] = 'localhost'
308313
os.environ['MASTER_PORT'] = str(cfg.get('master_port', 12355))
309314
_run_process(cfg, mode)
310315

311-
def launch_runner(cfg, mode):
316+
def launch_runner(cfg, mode, output_dir):
312317
is_slurm_job = 'SLURM_PROCID' in os.environ
313318

314319
if not is_slurm_job and not dist.is_initialized():
@@ -320,7 +325,7 @@ def launch_runner(cfg, mode):
320325
cfg.save_res_path = Path(cfg.dataset_path).parent / "results" / cfg.output
321326

322327
mp.spawn(_spawn_wrapper,
323-
args=(world_size, cfg, mode),
328+
args=(world_size, cfg, mode, output_dir),
324329
nprocs=world_size,
325330
join=True)
326331

src/utils/__init__.py

Lines changed: 45 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,4 +29,48 @@ def npcal_pose0to1(pose0, pose1):
2929
pose1_inv[:3,:3] = pose1[:3,:3].T
3030
pose1_inv[:3,3] = (pose1[:3,:3].T * -pose1[:3,3]).sum(axis=1)
3131
pose_0to1 = pose1_inv @ pose0.astype(np.float64)
32-
return pose_0to1.astype(np.float32)
32+
return pose_0to1.astype(np.float32)
33+
34+
35+
# a quick inline tee class to log stdout to file
36+
import sys
37+
import re
38+
from datetime import datetime
39+
class InlineTee:
40+
def __init__(self, filepath, append=False, timestamp_per_line=False):
41+
mode = "a" if append else "w"
42+
self.file = open(filepath, mode)
43+
self.stdout = sys.stdout
44+
self.newline = True
45+
self.timestamp_per_line = timestamp_per_line
46+
self.first_write = True
47+
self.ansi_pattern = re.compile(r'\x1b\[[0-9;]*m')
48+
49+
# write header timestamp
50+
if not self.timestamp_per_line:
51+
self.file.write(f"=== Log started at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} ===\n\n")
52+
53+
def write(self, data):
54+
self.stdout.write(data)
55+
56+
clean_data = self.ansi_pattern.sub('', data)
57+
58+
if self.timestamp_per_line and clean_data.strip():
59+
lines = clean_data.split('\n')
60+
for i, line in enumerate(lines):
61+
if line and self.newline:
62+
timestamp = datetime.now().strftime("[%Y-%m-%d %H:%M:%S] ")
63+
self.file.write(timestamp + line)
64+
else:
65+
self.file.write(line)
66+
if i < len(lines) - 1:
67+
self.file.write('\n')
68+
self.newline = True
69+
else:
70+
self.newline = line == ''
71+
else:
72+
self.file.write(clean_data)
73+
74+
def flush(self):
75+
self.file.flush()
76+
self.stdout.flush()

0 commit comments

Comments
 (0)