KhaldiAbderrhmane
commited on
Update modeling_emotion_classifier.py
Browse files- modeling_emotion_classifier.py +29 -29
modeling_emotion_classifier.py
CHANGED
@@ -1,29 +1,29 @@
|
|
1 |
-
from transformers import PreTrainedModel, HubertModel
|
2 |
-
import torch.nn as nn
|
3 |
-
import torch
|
4 |
-
from .configuration_emotion_classifier import EmotionClassifierConfig
|
5 |
-
|
6 |
-
|
7 |
-
class EmotionClassifierHuBERT(PreTrainedModel):
|
8 |
-
config_class = EmotionClassifierConfig
|
9 |
-
|
10 |
-
def __init__(self, config):
|
11 |
-
super().__init__(config)
|
12 |
-
self.hubert = HubertModel.from_pretrained("facebook/hubert-large-ls960-ft")
|
13 |
-
self.conv1 = nn.Conv1d(in_channels=1024, out_channels=512, kernel_size=3, padding=1)
|
14 |
-
self.conv2 = nn.Conv1d(in_channels=512, out_channels=256, kernel_size=3, padding=1)
|
15 |
-
self.transformer_encoder = nn.TransformerEncoderLayer(d_model=256, nhead=8)
|
16 |
-
self.bilstm = nn.LSTM(input_size=256, hidden_size=config.
|
17 |
-
self.fc = nn.Linear(config.
|
18 |
-
|
19 |
-
def forward(self, x):
|
20 |
-
with torch.no_grad():
|
21 |
-
features = self.hubert(x).last_hidden_state
|
22 |
-
features = features.transpose(1, 2)
|
23 |
-
x = torch.relu(self.conv1(features))
|
24 |
-
x = torch.relu(self.conv2(x))
|
25 |
-
x = x.transpose(1, 2)
|
26 |
-
x = self.transformer_encoder(x)
|
27 |
-
x, _ = self.bilstm(x)
|
28 |
-
x = self.fc(x[:, -1, :])
|
29 |
-
return x
|
|
|
1 |
+
from transformers import PreTrainedModel, HubertModel
|
2 |
+
import torch.nn as nn
|
3 |
+
import torch
|
4 |
+
from .configuration_emotion_classifier import EmotionClassifierConfig
|
5 |
+
|
6 |
+
|
7 |
+
class EmotionClassifierHuBERT(PreTrainedModel):
|
8 |
+
config_class = EmotionClassifierConfig
|
9 |
+
|
10 |
+
def __init__(self, config):
|
11 |
+
super().__init__(config)
|
12 |
+
self.hubert = HubertModel.from_pretrained("facebook/hubert-large-ls960-ft")
|
13 |
+
self.conv1 = nn.Conv1d(in_channels=1024, out_channels=512, kernel_size=3, padding=1)
|
14 |
+
self.conv2 = nn.Conv1d(in_channels=512, out_channels=256, kernel_size=3, padding=1)
|
15 |
+
self.transformer_encoder = nn.TransformerEncoderLayer(d_model=256, nhead=8)
|
16 |
+
self.bilstm = nn.LSTM(input_size=256, hidden_size=config.hidden_size_lstm, num_layers=2, batch_first=True, bidirectional=True)
|
17 |
+
self.fc = nn.Linear(config.hidden_size_lstm * 2, config.num_classes) # * 2 for bidirectional
|
18 |
+
|
19 |
+
def forward(self, x):
|
20 |
+
with torch.no_grad():
|
21 |
+
features = self.hubert(x).last_hidden_state
|
22 |
+
features = features.transpose(1, 2)
|
23 |
+
x = torch.relu(self.conv1(features))
|
24 |
+
x = torch.relu(self.conv2(x))
|
25 |
+
x = x.transpose(1, 2)
|
26 |
+
x = self.transformer_encoder(x)
|
27 |
+
x, _ = self.bilstm(x)
|
28 |
+
x = self.fc(x[:, -1, :])
|
29 |
+
return x
|