Spaces:
Sleeping
Sleeping
File size: 2,277 Bytes
1b74e0a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
from transformers import AutoModel
from huggingface_hub import hf_hub_download
from safetensors.torch import load_file
import torch.nn as nn
import torch
# Number of labels (update if different)
NUM_LABELS = 4
# Model with frozen DistilBERT weights
class DistilBertClassificationModel(nn.Module):
def __init__(
self,
model_path="distilbert/distilbert-base-uncased",
freeze_weights=True,
):
super(DistilBertClassificationModel, self).__init__()
if model_path == "distilbert/distilbert-base-uncased":
self.base_model = AutoModel.from_pretrained(model_path)
else:
pytorch_model_path = hf_hub_download(
repo_id=model_path,
repo_type="model",
filename="model.safetensors"
)
state_dict = load_file(pytorch_model_path)
filtered_state_dict = {
k.replace("base_model.", ""): v
for k, v in state_dict.items()
if not k.startswith("classifier.")
}
self.base_model = AutoModel.from_pretrained("distilbert/distilbert-base-uncased", state_dict=filtered_state_dict)
# For push to hub.
self.config = self.base_model.config
# Freeze the base model's weights
if freeze_weights:
for param in self.base_model.parameters():
param.requires_grad = False
# Add a classification head
self.classifier = nn.Linear(self.base_model.config.hidden_size, NUM_LABELS)
def forward(self, input_ids, attention_mask, labels=None):
with torch.no_grad(): # No gradients for the base model
outputs = self.base_model(input_ids=input_ids, attention_mask=attention_mask)
# Sum hidden states over the sequence dimension
summed_representation = outputs.last_hidden_state.sum(dim=1) # Summing over sequence length
logits = self.classifier(summed_representation) # Pass the summed representation to the classifier
loss = None
if labels is not None:
loss_fn = nn.BCEWithLogitsLoss()
loss = loss_fn(logits, labels.float())
return {"loss": loss, "logits": logits}
|