|
14 | 14 | parser.add_argument("--batch_size", type=int, default=128) |
15 | 15 | parser.add_argument("--subset", type=str, choices=["train", "val", "test", "testA", "testB"]) |
16 | 16 | parser.add_argument("--gpus", type=int, default=1) |
| 17 | +parser.add_argument("--resume", type=str, default=None) |
17 | 18 | parser.add_argument("--take_num", type=int, default=None) |
18 | 19 |
|
19 | 20 | if __name__ == '__main__': |
|
32 | 33 |
|
33 | 34 | save_path = f"output/{args.model}_{args.subset}.txt" |
34 | 35 | Path(save_path).parent.mkdir(parents=True, exist_ok=True) |
35 | | - with open(save_path, "w") as f: |
| 36 | + |
| 37 | + processed_files = set() |
| 38 | + if args.resume is not None: |
| 39 | + with open(args.resume, "r") as f: |
| 40 | + for line in f: |
| 41 | + processed_files.add(line.split(";")[0]) |
| 42 | + |
| 43 | + with open(save_path, "a") as f: |
36 | 44 | with torch.inference_mode(): |
37 | | - for i, (video, _, _) in enumerate(tqdm(test_dataset)): |
| 45 | + for i in tqdm(range(len(test_dataset))): |
| 46 | + file_name = test_dataset.metadata[i].file |
| 47 | + if file_name in processed_files: |
| 48 | + continue |
| 49 | + |
| 50 | + video, _, _ = test_dataset[i] |
38 | 51 | # batch video as frames use batch_size |
39 | 52 | preds_video = [] |
40 | 53 | for j in range(0, len(video), args.batch_size): |
|
45 | 58 | # choose the max prediction |
46 | 59 | pred = preds_video.max().item() |
47 | 60 |
|
48 | | - file_name = test_dataset.metadata[i].file |
49 | 61 | f.write(f"{file_name};{pred}\n") |
50 | 62 | f.flush() |
0 commit comments