|
| 1 | +import argparse |
| 2 | + |
| 3 | +from torch.utils.data import DataLoader |
| 4 | +from pytorch_lightning import Trainer |
| 5 | +from pytorch_lightning.callbacks import ModelCheckpoint |
| 6 | +from avdeepfake1m.loader import AVDeepfake1mPlusPlusImages |
| 7 | + |
| 8 | +from xception import Xception |
| 9 | +from utils import LrLogger, EarlyStoppingLR |
| 10 | + |
| 11 | + |
| 12 | +parser = argparse.ArgumentParser(description="Classification model training") |
| 13 | +parser.add_argument("--data_root", type=str) |
| 14 | +parser.add_argument("--batch_size", type=int, default=128) |
| 15 | +parser.add_argument("--model", type=str, choices=["xception", "meso4", "meso_inception4"]) |
| 16 | +parser.add_argument("--gpus", type=int, default=1) |
| 17 | +parser.add_argument("--precision", default=32) |
| 18 | +parser.add_argument("--num_train", type=int, default=None) |
| 19 | +parser.add_argument("--num_val", type=int, default=2000) |
| 20 | +parser.add_argument("--max_epochs", type=int, default=500) |
| 21 | +parser.add_argument("--resume", type=str, default=None) |
| 22 | +args = parser.parse_args() |
| 23 | + |
| 24 | + |
| 25 | +if __name__ == "__main__": |
| 26 | + |
| 27 | + # You can fix the random seed if you want reproducible subsets each epoch: |
| 28 | + # torch.manual_seed(42) |
| 29 | + # random.seed(42) |
| 30 | + |
| 31 | + learning_rate = 1e-4 |
| 32 | + gpus = args.gpus |
| 33 | + total_batch_size = args.batch_size * gpus |
| 34 | + learning_rate = learning_rate * total_batch_size / 4 |
| 35 | + |
| 36 | + # Setup model |
| 37 | + if args.model == "xception": |
| 38 | + model = Xception(learning_rate, distributed=gpus > 1) |
| 39 | + else: |
| 40 | + raise ValueError(f"Unknown model: {args.model}") |
| 41 | + |
| 42 | + train_dataset = AVDeepfake1mPlusPlusImages( |
| 43 | + subset="train", |
| 44 | + data_root=args.data_root, |
| 45 | + take_num=args.num_train, |
| 46 | + use_video_label=True # For video-level label access, set True |
| 47 | + ) |
| 48 | + |
| 49 | + # For validation, you can still do the normal dataset |
| 50 | + val_dataset = AVDeepfake1mPlusPlusImages( |
| 51 | + subset="val", |
| 52 | + data_root=args.data_root, |
| 53 | + take_num=args.num_val, |
| 54 | + use_video_label=True |
| 55 | + ) |
| 56 | + |
| 57 | + # Parse precision properly |
| 58 | + try: |
| 59 | + precision = int(args.precision) |
| 60 | + except ValueError: |
| 61 | + precision = args.precision |
| 62 | + |
| 63 | + monitor = "val_loss" |
| 64 | + |
| 65 | + trainer = Trainer( |
| 66 | + log_every_n_steps=50, |
| 67 | + precision=precision, |
| 68 | + max_epochs=args.max_epochs, |
| 69 | + callbacks=[ |
| 70 | + ModelCheckpoint( |
| 71 | + dirpath=f"./ckpt1/{args.model}", |
| 72 | + save_last=True, |
| 73 | + filename=args.model + "-{epoch}-{val_loss:.3f}", |
| 74 | + monitor=monitor, |
| 75 | + mode="min" |
| 76 | + ), |
| 77 | + LrLogger(), |
| 78 | + EarlyStoppingLR(lr_threshold=1e-7) |
| 79 | + ], |
| 80 | + enable_checkpointing=True, |
| 81 | + benchmark=True, |
| 82 | + accelerator="gpu", |
| 83 | + devices=args.gpus, |
| 84 | + strategy="ddp" if args.gpus > 1 else "auto", |
| 85 | + # ckpt_path=args.resume, |
| 86 | + # If you're on an older version of Lightning, you may need `strategy='ddp'` just the same, but this is typical. |
| 87 | + ) |
| 88 | + |
| 89 | + trainer.fit( |
| 90 | + model, |
| 91 | + train_dataloaders=DataLoader(train_dataset, batch_size=args.batch_size, num_workers=0), |
| 92 | + val_dataloaders=DataLoader(val_dataset, batch_size=args.batch_size, num_workers=0), |
| 93 | + ckpt_path=args.resume, |
| 94 | + ) |
0 commit comments