-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathutils.py
More file actions
169 lines (123 loc) · 4.34 KB
/
utils.py
File metadata and controls
169 lines (123 loc) · 4.34 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 26 18:45:09 2020
@author: rebeccaadaimi
"""
import os
import torch
torch.backends.cudnn.benchmark=True
torch.manual_seed(0)
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
from torch.utils.data import TensorDataset
import datetime
import _pickle as cPickle
import librosa
import numpy as np
def create_folder(fd):
if not os.path.exists(fd):
os.makedirs(fd)
def get_filename(path):
path = os.path.realpath(path)
na_ext = path.split('/')[-1]
na = os.path.splitext(na_ext)[0]
return na
def get_sub_filepaths(folder):
paths = []
for root, dirs, files in os.walk(folder):
for name in files:
path = os.path.join(root, name)
paths.append(path)
return paths
def get_sub_folders(folder):
paths = []
for root, dirs, files in os.walk(folder):
for name in dirs:
path = os.path.join(root, name)
paths.append(path)
return paths
def get_sub_dirs(folder):
return os.listdir(folder)
def concatenate(a1, a2):
return a2 if a1 is None else np.concatenate((a1, a2))
### segment interaction audio into 300ms clips
def segmentation(data, label, win_length):
segmented_data = []
new_label = []
for i in range(len(data)):
audio = data[i]
l = label[i]
if len(audio) > win_length:
segment = librosa.util.frame(audio, frame_length=int(win_length), hop_length=int(win_length), axis = 0)
segmented_data.extend(segment)
new_label.extend([l]*len(segment))
else:
segmented_data.append(audio)
new_label.append(l)
return segmented_data, new_label
### segment interaction audio into 300ms clips
def segmentation_half(data, label, threshold):
segmented_data = []
new_label = []
for i in range(len(data)):
audio = data[i]
l = label[i]
if len(audio) > threshold:
segment = librosa.util.frame(audio, frame_length=int(len(audio)/2), hop_length=int(len(audio)/2), axis = 0)
segmented_data.extend(segment)
new_label.extend([l]*len(segment))
else:
segmented_data.append(audio)
new_label.append(l)
return segmented_data, new_label
def average_confidence(y, device):
clipwise_output = y['clipwise_output'].data.cpu().numpy()
embedding = y['embedding'].data.cpu().numpy()
av_clipwise_output = torch.from_numpy(np.array(np.mean(clipwise_output, axis = 0).reshape(1,-1))).float()
av_embedding = torch.from_numpy(np.array(np.mean(embedding, axis = 0))).float()
av_clipwise_output = av_clipwise_output.to(device)
av_embedding = av_embedding.to(device)
output_dict = {'clipwise_output':av_clipwise_output, 'embedding':av_embedding}
return output_dict
class Evaluator(object):
def __init__(self, model, generator):
self.model = model
self.generator = generator
def evaluate(self):
# Forward
output_dict = forward(
model=self.model,
generator=self.generator,
return_target=True)
clipwise_output = output_dict['clipwise_output'] # (audios_num, classes_num)
target = output_dict['target'] # (audios_num, classes_num)
average_precision = metrics.average_precision_score(
target, clipwise_output, average=None)
auc = metrics.roc_auc_score(target, clipwise_output, average=None)
statistics = {'average_precision': average_precision, 'auc': auc}
return statistics
class StatisticsContainer(object):
def __init__(self, statistics_path):
self.statistics_path = statistics_path
self.backup_statistics_path = '{}_{}.pickle'.format(
os.path.splitext(self.statistics_path)[0],
datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
self.statistics_dict = {'Trainloss': [], 'Testloss': [], 'test_f1': []}
def append(self, iteration, statistics, data_type):
print(iteration)
statistics['iteration'] = iteration
self.statistics_dict[data_type].append(statistics)
def dump(self):
cPickle.dump(self.statistics_dict, open(self.statistics_path, 'wb'))
cPickle.dump(self.statistics_dict, open(self.backup_statistics_path, 'wb'))
def load_state_dict(self, resume_iteration):
self.statistics_dict = cPickle.load(open(self.statistics_path, 'rb'))
resume_statistics_dict = {'Trainloss': [], 'Testloss': [], 'test_f1': []}
for key in self.statistics_dict.keys():
for statistics in self.statistics_dict[key]:
if statistics['iteration'] <= resume_iteration:
resume_statistics_dict[key].append(statistics)
self.statistics_dict = resume_statistics_dict