|
|
|
|
|
from transformers import PreTrainedModel, PretrainedConfig |
|
import torch |
|
import torch.nn as nn |
|
|
|
class CustomConfig(PretrainedConfig): |
|
model_type = "custom_model" |
|
|
|
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, num_labels=2, **kwargs): |
|
super().__init__(**kwargs) |
|
self.vocab_size = vocab_size |
|
self.hidden_size = hidden_size |
|
self.num_hidden_layers = num_hidden_layers |
|
self.num_attention_heads = num_attention_heads |
|
self.num_labels = num_labels |
|
|
|
class CustomModel(PreTrainedModel): |
|
config_class = CustomConfig |
|
|
|
def __init__(self, config): |
|
super().__init__(config) |
|
self.embedding = nn.Embedding(config.vocab_size, config.hidden_size) |
|
self.layers = nn.ModuleList([nn.TransformerEncoderLayer(d_model=config.hidden_size, nhead=config.num_attention_heads) for _ in range(config.num_hidden_layers)]) |
|
self.classifier = nn.Linear(config.hidden_size, config.num_labels) |
|
|
|
self.init_weights() |
|
|
|
def forward(self, input_ids): |
|
embeddings = self.embedding(input_ids) |
|
x = embeddings |
|
for layer in self.layers: |
|
x = layer(x) |
|
logits = self.classifier(x.mean(dim=1)) |
|
return logits |
|
|