Upload BirdAST_Seq.py
Browse files- BirdAST_Seq.py +185 -0
BirdAST_Seq.py
ADDED
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import torch.nn.functional as F
|
4 |
+
|
5 |
+
import transformers
|
6 |
+
from transformers import ASTConfig, ASTFeatureExtractor, ASTModel
|
7 |
+
|
8 |
+
BirdAST_FEATURE_EXTRACTOR = ASTFeatureExtractor()
|
9 |
+
DEFAULT_SR = 16_000
|
10 |
+
DEFAULT_BACKBONE = "MIT/ast-finetuned-audioset-10-10-0.4593"
|
11 |
+
DEFAULT_N_CLASSES = 728
|
12 |
+
DEFAULT_ACTIVATION = "silu"
|
13 |
+
DEFAULT_N_MLP_LAYERS = 1
|
14 |
+
|
15 |
+
|
16 |
+
def birdast_seq_preprocess(audio_array, sr=DEFAULT_SR):
|
17 |
+
"""
|
18 |
+
Preprocess audio array for BirdAST model
|
19 |
+
audio_array: np.array, audio array of the recording, shape (n_samples,) Note: The audio array should be normalized to [-1, 1]
|
20 |
+
sr: int, sampling rate of the audio array (default: 16_000)
|
21 |
+
|
22 |
+
Note:
|
23 |
+
1. The audio array should be normalized to [-1, 1].
|
24 |
+
2. The audio length should be 10 seconds (or 10.24 seconds). Longer audio will be truncated.
|
25 |
+
"""
|
26 |
+
# Extract features
|
27 |
+
features = BirdAST_FEATURE_EXTRACTOR(audio_array, sampling_rate=sr, padding="max_length", return_tensors="pt")
|
28 |
+
|
29 |
+
# Convert to PyTorch tensor
|
30 |
+
spectrogram = torch.tensor(features['input_values']).squeeze(0)
|
31 |
+
|
32 |
+
return spectrogram
|
33 |
+
|
34 |
+
|
35 |
+
def birdast_seq_inference(
|
36 |
+
model_weights,
|
37 |
+
spectrogram,
|
38 |
+
device = 'cpu',
|
39 |
+
backbone_name=DEFAULT_BACKBONE,
|
40 |
+
n_classes=DEFAULT_N_CLASSES,
|
41 |
+
activation=DEFAULT_ACTIVATION,
|
42 |
+
n_mlp_layers=DEFAULT_N_MLP_LAYERS
|
43 |
+
):
|
44 |
+
|
45 |
+
"""
|
46 |
+
Perform inference on BirdAST model
|
47 |
+
model_weights: list, list of model weights
|
48 |
+
spectrogram: torch.Tensor, spectrogram tensor, shape (batch_size, n_frames, n_mels,)
|
49 |
+
device: str, device to run inference (default: 'cpu')
|
50 |
+
backbone_name: str, name of the backbone model (default: 'MIT/ast-finetuned-audioset-10-10-0.4593')
|
51 |
+
n_classes: int, number of classes (default: 728)
|
52 |
+
activation: str, activation function (default: 'silu')
|
53 |
+
n_mlp_layers: int, number of MLP layers (default: 1)
|
54 |
+
|
55 |
+
Returns:
|
56 |
+
predictions: np.array, array of predictions, shape (n_models, batch_size, n_classes)
|
57 |
+
"""
|
58 |
+
|
59 |
+
model = BirdAST(
|
60 |
+
backbone_name=backbone_name,
|
61 |
+
n_classes=n_classes,
|
62 |
+
n_mlp_layers=n_mlp_layers,
|
63 |
+
activation=activation
|
64 |
+
)
|
65 |
+
|
66 |
+
predict_collects = []
|
67 |
+
|
68 |
+
for _weight in model_weights:
|
69 |
+
model.load_state_dict(torch.load(_weight, map_location=device))
|
70 |
+
model.to(device)
|
71 |
+
model.eval()
|
72 |
+
|
73 |
+
with torch.no_grad():
|
74 |
+
spectrogram = spectrogram.to(device)
|
75 |
+
output = model(spectrogram)
|
76 |
+
logits = output['logits']
|
77 |
+
predictions = F.softmax(logits, dim=1)
|
78 |
+
predict_collects.append(predictions)
|
79 |
+
|
80 |
+
if device == 'cuda':
|
81 |
+
predict_collects = [pred.cpu() for pred in predict_collects]
|
82 |
+
|
83 |
+
predict_collects = torch.stack(predict_collects).numpy()
|
84 |
+
|
85 |
+
return predict_collects
|
86 |
+
|
87 |
+
|
88 |
+
class SelfAttentionPooling(nn.Module):
|
89 |
+
"""
|
90 |
+
Implementation of SelfAttentionPooling
|
91 |
+
Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition
|
92 |
+
https://arxiv.org/pdf/2008.01077v1.pdf
|
93 |
+
"""
|
94 |
+
def __init__(self, input_dim):
|
95 |
+
super(SelfAttentionPooling, self).__init__()
|
96 |
+
self.W = nn.Linear(input_dim, 1)
|
97 |
+
self.softmax = nn.Softmax(dim=1)
|
98 |
+
|
99 |
+
def forward(self, batch_rep):
|
100 |
+
"""
|
101 |
+
input:
|
102 |
+
batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension
|
103 |
+
attention_weight:
|
104 |
+
att_w : size (N, T, 1)
|
105 |
+
return:
|
106 |
+
utter_rep: size (N, H)
|
107 |
+
"""
|
108 |
+
att_w = self.softmax(self.W(batch_rep).squeeze(-1)).unsqueeze(-1)
|
109 |
+
utter_rep = torch.sum(batch_rep * att_w, dim=1)
|
110 |
+
|
111 |
+
return utter_rep
|
112 |
+
|
113 |
+
|
114 |
+
class BirdAST(nn.Module):
|
115 |
+
|
116 |
+
def __init__(self, backbone_name, n_classes, n_mlp_layers=1, activation='silu'):
|
117 |
+
super(BirdAST, self).__init__()
|
118 |
+
|
119 |
+
# pre-trained backbone
|
120 |
+
backbone_config = ASTConfig.from_pretrained(backbone_name)
|
121 |
+
self.ast = ASTModel.from_pretrained(backbone_name, config=backbone_config)
|
122 |
+
self.hidden_size = backbone_config.hidden_size
|
123 |
+
|
124 |
+
# set activation functions
|
125 |
+
if activation == 'relu':
|
126 |
+
self.activation = nn.ReLU()
|
127 |
+
elif activation == 'silu':
|
128 |
+
self.activation = nn.SiLU()
|
129 |
+
elif activation == 'gelu':
|
130 |
+
self.activation = nn.GELU()
|
131 |
+
else:
|
132 |
+
raise ValueError("Unsupported activation function. Choose 'relu', 'silu' or 'gelu'")
|
133 |
+
|
134 |
+
#define self-attention pooling layer
|
135 |
+
self.sa_pool = SelfAttentionPooling(self.hidden_size)
|
136 |
+
|
137 |
+
# define MLP layers with activation
|
138 |
+
layers = []
|
139 |
+
for _ in range(n_mlp_layers):
|
140 |
+
layers.append(nn.Linear(self.hidden_size, self.hidden_size))
|
141 |
+
layers.append(self.activation)
|
142 |
+
layers.append(nn.Linear(self.hidden_size, n_classes))
|
143 |
+
self.mlp = nn.Sequential(*layers)
|
144 |
+
|
145 |
+
def forward(self, spectrogram):
|
146 |
+
# spectrogram: (batch_size, n_mels, n_frames)
|
147 |
+
# output: (batch_size, n_classes)
|
148 |
+
|
149 |
+
ast_output = self.ast(spectrogram, output_hidden_states=False)
|
150 |
+
hidden_state = ast_output.last_hidden_state
|
151 |
+
pool_output = self.sa_pool(hidden_state)
|
152 |
+
logits = self.mlp(pool_output)
|
153 |
+
|
154 |
+
return {'logits': logits}
|
155 |
+
|
156 |
+
|
157 |
+
|
158 |
+
if __name__ == '__main__':
|
159 |
+
|
160 |
+
import numpy as np
|
161 |
+
import matplotlib.pyplot as plt
|
162 |
+
|
163 |
+
# example usage of BirdAST_Seq
|
164 |
+
# create random audio array
|
165 |
+
audio_array = np.random.randn(160_000 * 10)
|
166 |
+
|
167 |
+
# Preprocess audio array
|
168 |
+
spectrogram = birdast_seq_preprocess(audio_array)
|
169 |
+
|
170 |
+
model_weights_dir = '/workspace/voice_of_jungle/training_logs'
|
171 |
+
|
172 |
+
# Load model weights
|
173 |
+
model_weights = [f'{model_weights_dir}/BirdAST_SeqPool_GroupKFold_fold_{i}.pth' for i in range(5)]
|
174 |
+
|
175 |
+
# Perform inference
|
176 |
+
predictions = birdast_seq_inference(model_weights, spectrogram.unsqueeze(0))
|
177 |
+
|
178 |
+
# Plot predictions
|
179 |
+
fig, ax = plt.subplots()
|
180 |
+
for i, pred in enumerate(predictions):
|
181 |
+
ax.plot(pred[0], label=f'model_{i}')
|
182 |
+
ax.legend()
|
183 |
+
fig.savefig('test_BirdAST_Seq.png')
|
184 |
+
|
185 |
+
print("Inference completed successfully!")
|