-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathsemantic_segmentation.py
More file actions
311 lines (266 loc) · 15.2 KB
/
semantic_segmentation.py
File metadata and controls
311 lines (266 loc) · 15.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
import argparse
import datetime
import math
import os
import time
import numpy as np
import torch
from compressai.zoo.pretrained import load_pretrained
from torch import distributed as dist
from torch import nn
from torch.backends import cudnn
from torch.nn import DataParallel
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data._utils.collate import default_collate
from torchdistill.common import file_util, module_util, yaml_util
from torchdistill.common.constant import def_logger
from torchdistill.common.main_util import is_main_process, init_distributed_mode, load_ckpt, save_ckpt
from torchdistill.core.distillation import get_distillation_box
from torchdistill.core.training import get_training_box
from torchdistill.datasets import util
from torchdistill.eval.coco import SegEvaluator
from torchdistill.misc.log import setup_log_file, SmoothedValue, MetricLogger
from torchdistill.models.custom.bottleneck.base import BottleneckBase
from torchdistill.models.official import get_semantic_segmentation_model
from torchdistill.models.registry import get_model
from compression.registry import get_compression_model
from custom.segmenter import InputCompressionSegmenter, get_custom_model
from custom.util import check_if_module_exits, load_bottleneck_model_ckpt, extract_entropy_bottleneck_module
logger = def_logger.getChild(__name__)
torch.multiprocessing.set_sharing_strategy('file_system')
def get_argparser():
parser = argparse.ArgumentParser(description='Knowledge distillation for semantic segmentation models')
parser.add_argument('--config', required=True, help='yaml file path')
parser.add_argument('--device', default='cuda', help='device')
parser.add_argument('--log', help='log file path')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch')
parser.add_argument('--num_classes', default=21, type=int, metavar='N', help='number of classes for evaluation')
parser.add_argument('-test_only', action='store_true', help='Only test the models')
parser.add_argument('-student_only', action='store_true', help='Test the student model only')
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('-adjust_lr', action='store_true',
help='multiply learning rate by number of distributed processes (world_size)')
return parser
def customize_config(config, dataset_dict, world_size):
if 'train' not in config:
return
train_config = config['train']
if 'stage1' not in train_config:
train_data_loader_config = train_config['train_data_loader']
num_iterations = math.ceil(len(dataset_dict[train_data_loader_config['dataset_id']]) /
train_data_loader_config['batch_size'] / world_size)
if 'scheduler' in train_config and train_config['scheduler']['type'] == 'custom_lambda_lr':
train_config['scheduler']['params']['num_iterations'] = num_iterations
else:
for i in range(1, 1000000):
stage_name = 'stage{}'.format(i)
if stage_name not in train_config:
break
stage_train_config = train_config[stage_name]
if 'train_data_loader' not in stage_train_config:
continue
train_data_loader_config = stage_train_config['train_data_loader']
num_iterations = math.ceil(len(dataset_dict[train_data_loader_config['dataset_id']]) /
train_data_loader_config['batch_size'] / world_size)
if 'scheduler' in stage_train_config and stage_train_config['scheduler']['type'] == 'custom_lambda_lr':
stage_train_config['scheduler']['params']['num_iterations'] = num_iterations
def load_model(model_config, device):
if 'compressor' not in model_config:
model = get_semantic_segmentation_model(model_config)
if model is None:
repo_or_dir = model_config.get('repo_or_dir', None)
model = get_model(model_config['name'], repo_or_dir, **model_config['params'])
model_ckpt_file_path = os.path.expanduser(model_config['ckpt'])
if load_bottleneck_model_ckpt(model, model_ckpt_file_path):
return model.to(device)
load_ckpt(model_ckpt_file_path, model=model, strict=True)
return model.to(device)
# Define compressor
compressor_config = model_config['compressor']
compressor = get_compression_model(compressor_config['name'], **compressor_config['params'])
compressor_ckpt_file_path = os.path.expanduser(compressor_config['ckpt'])
if os.path.isfile(compressor_ckpt_file_path):
logger.info('Loading compressor parameters')
state_dict = torch.load(compressor_ckpt_file_path)
# Old parameter keys do not work with recent version of compressai
state_dict = load_pretrained(state_dict)
compressor.load_state_dict(state_dict)
logger.info('Updating compression model')
compressor.update()
# Define segmenter
segmenter_config = model_config['segmenter']
segmenter = get_semantic_segmentation_model(segmenter_config)
if segmenter is None:
repo_or_dir = segmenter_config.get('repo_or_dir', None)
segmenter = get_model(segmenter_config['name'], repo_or_dir, **segmenter_config['params'])
segmenter_ckpt_file_path = os.path.expanduser(segmenter_config['ckpt'])
load_ckpt(segmenter_ckpt_file_path, model=segmenter, strict=True)
custom_model = get_custom_model(model_config['name'], compressor, segmenter, **model_config['params'])
return custom_model.to(device)
def train_one_epoch(training_box, bottleneck_updated, device, epoch, log_freq):
model = training_box.student_model if hasattr(training_box, 'student_model') else training_box.model
entropy_bottleneck_module = extract_entropy_bottleneck_module(model)
metric_logger = MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', SmoothedValue(window_size=1, fmt='{value}'))
metric_logger.add_meter('img/s', SmoothedValue(window_size=10, fmt='{value}'))
header = 'Epoch: [{}]'.format(epoch)
for sample_batch, targets, supp_dict in \
metric_logger.log_every(training_box.train_data_loader, log_freq, header):
start_time = time.time()
sample_batch, targets = sample_batch.to(device), targets.to(device)
supp_dict = default_collate(supp_dict)
loss = training_box(sample_batch, targets, supp_dict)
aux_loss = None
if isinstance(entropy_bottleneck_module, nn.Module) and not bottleneck_updated:
aux_loss = entropy_bottleneck_module.aux_loss()
aux_loss.backward()
training_box.update_params(loss)
batch_size = len(sample_batch)
if aux_loss is None:
metric_logger.update(loss=loss.item(), lr=training_box.optimizer.param_groups[0]['lr'])
else:
metric_logger.update(loss=loss.item(), aux_loss=aux_loss.item(),
lr=training_box.optimizer.param_groups[0]['lr'])
metric_logger.meters['img/s'].update(batch_size / (time.time() - start_time))
@torch.no_grad()
def evaluate(model, data_loader, device, device_ids, distributed, num_classes, bottleneck_updated=False,
log_freq=1000, title=None, header='Test:'):
model.to(device)
entropy_bottleneck_module = extract_entropy_bottleneck_module(model)
if entropy_bottleneck_module is not None:
entropy_bottleneck_module.file_size_list.clear()
if not bottleneck_updated:
logger.info('Updating entropy bottleneck')
entropy_bottleneck_module.update()
else:
if distributed:
model = DistributedDataParallel(model, device_ids=device_ids)
elif device.type.startswith('cuda'):
model = DataParallel(model, device_ids=device_ids)
if title is not None:
logger.info(title)
model.eval()
metric_logger = MetricLogger(delimiter=' ')
seg_evaluator = SegEvaluator(num_classes)
for sample_batch, targets in metric_logger.log_every(data_loader, log_freq, header):
sample_batch, targets = sample_batch.to(device), targets.to(device)
torch.cuda.synchronize()
model_time = time.time()
outputs = model(sample_batch)
model_time = time.time() - model_time
outputs = outputs['out']
evaluator_time = time.time()
seg_evaluator.update(targets.flatten(), outputs.argmax(1).flatten())
evaluator_time = time.time() - evaluator_time
metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)
# gather the stats from all processes
seg_evaluator.reduce_from_all_processes()
logger.info(seg_evaluator)
return seg_evaluator
def train(teacher_model, student_model, dataset_dict, ckpt_file_path, device, device_ids, distributed, config, args):
logger.info('Start training')
train_config = config['train']
lr_factor = args.world_size if distributed and args.adjust_lr else 1
training_box = get_training_box(student_model, dataset_dict, train_config,
device, device_ids, distributed, lr_factor) if teacher_model is None \
else get_distillation_box(teacher_model, student_model, dataset_dict, train_config,
device, device_ids, distributed, lr_factor)
best_val_miou = 0.0
optimizer, lr_scheduler = training_box.optimizer, training_box.lr_scheduler
if file_util.check_if_exists(ckpt_file_path):
best_val_miou, _, _ = load_ckpt(ckpt_file_path, optimizer=optimizer, lr_scheduler=lr_scheduler)
log_freq = train_config['log_freq']
student_model_without_ddp = student_model.module if module_util.check_if_wrapped(student_model) else student_model
entropy_bottleneck_module = extract_entropy_bottleneck_module(student_model_without_ddp)
epoch_to_update = train_config.get('epoch_to_update', None)
bottleneck_updated = False
start_time = time.time()
for epoch in range(args.start_epoch, training_box.num_epochs):
training_box.pre_process(epoch=epoch)
if epoch_to_update is not None and epoch_to_update <= epoch and not bottleneck_updated:
logger.info('Updating entropy bottleneck')
student_model_without_ddp.backbone.bottleneck_layer.update()
bottleneck_updated = True
train_one_epoch(training_box, bottleneck_updated, device, epoch, log_freq)
if entropy_bottleneck_module is None or bottleneck_updated:
val_seg_evaluator = \
evaluate(student_model, training_box.val_data_loader, device, device_ids, distributed,
num_classes=args.num_classes, bottleneck_updated=bottleneck_updated,
log_freq=log_freq, header='Validation:')
val_acc_global, val_acc, val_iou = val_seg_evaluator.compute()
val_miou = val_iou.mean().item()
if val_miou > best_val_miou and is_main_process():
logger.info('Updating ckpt (Best mIoU: {:.4f} -> {:.4f})'.format(best_val_miou, val_miou))
best_val_miou = val_miou
save_ckpt(student_model_without_ddp, optimizer, lr_scheduler,
best_val_miou, config, args, ckpt_file_path)
training_box.post_process()
if distributed:
dist.barrier()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logger.info('Training time {}'.format(total_time_str))
if entropy_bottleneck_module is not None:
save_ckpt(student_model_without_ddp, optimizer, lr_scheduler,
best_val_miou, config, args, ckpt_file_path)
training_box.clean_modules()
def analyze_bottleneck_size(model):
file_size_list = list()
if hasattr(model, 'bottleneck') and isinstance(model.bottleneck, BottleneckBase):
file_size_list = model.bottleneck.compressor.file_size_list
elif isinstance(model, InputCompressionSegmenter):
file_size_list = model.file_size_list
elif check_if_module_exits(model, 'backbone.layer1.compressor') and model.backbone.layer1.compressor is not None:
file_size_list = model.backbone.layer1.compressor.file_size_list
elif check_if_module_exits(model, 'backbone.bottleneck_layer'):
file_size_list = model.backbone.bottleneck_layer.file_size_list
if len(file_size_list) == 0:
return
file_sizes = np.array(file_size_list)
logger.info('Bottleneck size [KB]: mean {} std {} for {} samples'.format(file_sizes.mean(), file_sizes.std(),
len(file_sizes)))
def main(args):
log_file_path = args.log
if is_main_process() and log_file_path is not None:
setup_log_file(os.path.expanduser(log_file_path))
world_size = args.world_size
distributed, device_ids = init_distributed_mode(world_size, args.dist_url)
logger.info(args)
cudnn.benchmark = True
config = yaml_util.load_yaml_file(os.path.expanduser(args.config))
device = torch.device(args.device)
dataset_dict = util.get_all_datasets(config['datasets'])
# Update config with dataset size len(data_loader)
customize_config(config, dataset_dict, world_size)
models_config = config['models']
teacher_model_config = models_config.get('teacher_model', None)
teacher_model = load_model(teacher_model_config, device) if teacher_model_config is not None else None
student_model_config =\
models_config['student_model'] if 'student_model' in models_config else models_config['model']
student_model = load_model(student_model_config, device)
if distributed:
if teacher_model is not None:
teacher_model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(teacher_model)
student_model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(student_model)
if not args.test_only:
ckpt_file_path = student_model_config['ckpt']
train(teacher_model, student_model, dataset_dict, ckpt_file_path, device, device_ids, distributed, config, args)
student_model_without_ddp =\
student_model.module if module_util.check_if_wrapped(student_model) else student_model
load_ckpt(student_model_config['ckpt'], model=student_model_without_ddp, strict=True)
test_config = config['test']
test_data_loader_config = test_config['test_data_loader']
test_data_loader = util.build_data_loader(dataset_dict[test_data_loader_config['dataset_id']],
test_data_loader_config, distributed)
num_classes = args.num_classes
if not args.student_only and teacher_model is not None:
evaluate(teacher_model, test_data_loader, device, device_ids, distributed, bottleneck_updated=False,
num_classes=num_classes, title='[Teacher: {}]'.format(teacher_model_config['name']))
evaluate(student_model, test_data_loader, device, device_ids, distributed, bottleneck_updated=False,
num_classes=num_classes, title='[Student: {}]'.format(student_model_config['name']))
analyze_bottleneck_size(student_model)
if __name__ == '__main__':
argparser = get_argparser()
main(argparser.parse_args())