Spaces:
Sleeping
Sleeping
import torch.nn as nn | |
import torch | |
from transformers import AutoModel | |
NUM_LABELS = 4 | |
# Model with frozen LLaMA weights | |
class LlamaClassificationModel(nn.Module): | |
def __init__(self, model_path = "meta-llama/Llama-3.2-1B", freeze_weights = True): | |
super(LlamaClassificationModel, self).__init__() | |
self.base_model = AutoModel.from_pretrained(model_path) | |
# For push to hub. | |
self.config = self.base_model.config | |
# Freeze the base model's weights | |
if freeze_weights: | |
for param in self.base_model.parameters(): | |
param.requires_grad = False | |
# Add a classification head | |
self.classifier = nn.Linear(self.base_model.config.hidden_size, NUM_LABELS) | |
def forward(self, input_ids, attention_mask, labels=None): | |
with torch.no_grad(): # No gradients for the base model | |
outputs = self.base_model(input_ids=input_ids, attention_mask=attention_mask) | |
# Sum hidden states over the sequence dimension | |
summed_representation = outputs.last_hidden_state.sum(dim=1) # Summing over sequence length | |
logits = self.classifier(summed_representation) # Pass the summed representation to the classifier | |
loss = None | |
if labels is not None: | |
loss_fn = nn.BCEWithLogitsLoss() | |
loss = loss_fn(logits, labels.float()) | |
return {"loss": loss, "logits": logits} |