-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathgenerate_audio.py
More file actions
125 lines (101 loc) · 5.66 KB
/
generate_audio.py
File metadata and controls
125 lines (101 loc) · 5.66 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 21 15:02:54 2022
@author: user
"""
import numpy as np
import librosa
import os
import torch.utils.data
import torch
from scipy.io.wavfile import write
from transcoder.transcoders import ThirdOctaveToMelTranscoderPinv, ThirdOctaveToMelTranscoder
from utils.util import sort_labels_by_score
import argparse
def apply_fade(audio, sr, duration=0.1):
out = audio
length = int(duration*sr)
end_fo = audio.shape[0]
start_fo = end_fo - length
start_fi = 0
end_fi = length
fadeout_curve = np.linspace(1.0, 0.0, length)
fadein_curve = np.linspace(0, 1.0, length)
out[start_fo:end_fo] = out[start_fo:end_fo] * fadeout_curve
out[start_fi:end_fi] = out[start_fi:end_fi] * fadein_curve
return(out)
def main(config):
MODEL_PATH = "./reference_models"
filename = config.audio_file
cnn_logits_name = 'classifier=PANN+dataset=full+dilation=1+epoch=200+kernel_size=5+learning_rate=-3+nb_channels=64+nb_layers=5+prop_logit=100+step=train+transcoder=cnn_pinv+ts=1_model'
cnn_logits_slow_name = 'classifier=PANN+dataset=full+dilation=1+epoch=50+kernel_size=5+learning_rate=-3+nb_channels=64+nb_layers=5+prop_logit=100+step=train+tho_type=slow+transcoder=cnn_pinv+ts=1_model'
cnn_mels_name = 'classifier=PANN+dataset=full+dilation=1+epoch=200+kernel_size=5+learning_rate=-3+nb_channels=64+nb_layers=5+step=train+transcoder=cnn_pinv+ts=0_model'
transcoder = 'cnn_pinv'
fs=32000
full_filename = "audio/" + filename
force_cpu = False
#manage gpu
useCuda = torch.cuda.is_available() and not force_cpu
if useCuda:
print('Using CUDA.')
dtype = torch.cuda.FloatTensor
ltype = torch.cuda.LongTensor
#MT: add
device = torch.device("cuda:0")
else:
print('No CUDA available.')
dtype = torch.FloatTensor
ltype = torch.LongTensor
#MT: add
device = torch.device("cpu")
transcoder_deep_bce = ThirdOctaveToMelTranscoder(transcoder, cnn_logits_name, MODEL_PATH, device=device)
# transcoder_deep_mse = ThirdOctaveToMelTranscoder(transcoder, cnn_mels_name, MODEL_PATH, device=device)
transcoder_pinv = ThirdOctaveToMelTranscoderPinv(MODEL_PATH, cnn_logits_name, device, classifier="PANN")
x_32k = librosa.load(full_filename, sr=fs)[0]
# x_32k = librosa.util.normalize(x_32k)
x_32k = apply_fade(x_32k, 32000)
#Groundtruth mels
x_mels_gt, x_logit_gt = transcoder_deep_bce.wave_to_mels_to_logit(x_32k)
#PINV model
x_mels_pinv = transcoder_pinv.wave_to_thirdo_to_mels(x_32k)
#PANN-1/3oct model
x_mels_deep_bce = transcoder_deep_bce.wave_to_thirdo_to_mels(x_32k)
x_logit_deep_bce = transcoder_deep_bce.mels_to_logit(x_mels_deep_bce, mean=True)
x_logit_deep_bce = x_logit_deep_bce.T
#CNN-mels model
# x_mels_deep_mse = transcoder_deep_bce.wave_to_thirdo_to_mels(x_32k)
# x_logit_deep_mse = transcoder_deep_mse.mels_to_logit(x_mels_deep_mse, mean=True)
# x_logit_deep_mse = x_logit_deep_mse.T
print('\n XXXXXXXXX PANN-Mels (mel input) XXXXXXXXXXX')
labels = sort_labels_by_score(np.mean(x_logit_gt, axis=0), transcoder_deep_bce.classif_inference.labels_str)[1][:10]
scores = sort_labels_by_score(np.mean(x_logit_gt, axis=0), transcoder_deep_bce.classif_inference.labels_str)[0][:10]
for k in range(len(labels)):
print(f'{labels[k]} : {round(float(scores[k]), 2)}')
print('\n XXXXXXXXXXXX PANN-1/3oct (fast third-octave input) XXXXXXXXXXXX')
labels = sort_labels_by_score(np.mean(x_logit_deep_bce, axis=0), transcoder_deep_bce.classif_inference.labels_str)[1][:10]
scores = sort_labels_by_score(np.mean(x_logit_deep_bce, axis=0), transcoder_deep_bce.classif_inference.labels_str)[0][:10]
for k in range(len(labels)):
print(f'{labels[k]} : {round(float(scores[k]), 2)}')
# ut.plot_spectro(torch.Tensor(x_mels_gt[:, :1000]), fs=32000, title='Audio generated from Mel spectrogram (ground truth)', vmin=-60, vmax=20)
# ut.plot_spectro(torch.Tensor(x_mels_deep_bce[:, :1000]), fs=32000, title='Audio generated from transcoded Mel spectrogram (CNN-logits)', vmin=-60, vmax=20)
# ut.plot_spectro(torch.Tensor(x_mels_pinv[:, :1000]), fs=32000, title='Audio generated from transcoded Mel spectrogram (PINV)', vmin=-60, vmax=20)
audio_cnn_logits = librosa.feature.inverse.mel_to_audio(x_mels_deep_bce, sr=32000, n_fft=1024, hop_length=320, win_length=1024, fmin=50, fmax=16000)
audio_pinv = librosa.feature.inverse.mel_to_audio(x_mels_pinv, sr=32000, n_fft=1024, hop_length=320, win_length=1024, fmin=50, fmax=16000)
audio_gt = librosa.feature.inverse.mel_to_audio(x_mels_gt, sr=32000, n_fft=1024, hop_length=320, win_length=1024, fmin=50, fmax=16000)
save_path = "./audio_generated/"+filename[:-4]
if not os.path.exists("./audio_generated/"+filename[:-4]):
os.makedirs(save_path)
print(f"Directory '{save_path}' created.")
else:
print(f"Directory '{save_path}' already exists.")
write(save_path + "/" +filename[:-4]+"_generated_from_cnn_logits.wav", 32000, audio_cnn_logits)
write(save_path + "/" +filename[:-4]+"_generated_from_pinv.wav", 32000, audio_pinv)
write(save_path + "/" +filename[:-4]+"_generated_from_groundtruth_mel.wav", 32000, audio_gt)
write(save_path + "/" +filename[:-4]+"_original.wav", 32000, x_32k)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Transform audio into different spectral representations, transcoded or not')
parser.add_argument('audio_file', type=str,
help='Name of the original audio file that should be located in the "audio" folder')
config = parser.parse_args()
main(config)