File size: 1,452 Bytes
1b74e0a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import torch.nn as nn
import torch

from transformers import AutoModel

NUM_LABELS = 4

# Model with frozen LLaMA weights
class LlamaClassificationModel(nn.Module):
    def __init__(self, model_path = "meta-llama/Llama-3.2-1B", freeze_weights = True):
        super(LlamaClassificationModel, self).__init__()
        self.base_model = AutoModel.from_pretrained(model_path)
        
        # For push to hub.
        self.config = self.base_model.config

        # Freeze the base model's weights
        if freeze_weights:
            for param in self.base_model.parameters():
                param.requires_grad = False
        
        # Add a classification head
        self.classifier = nn.Linear(self.base_model.config.hidden_size, NUM_LABELS)
    
    def forward(self, input_ids, attention_mask, labels=None):
        with torch.no_grad():  # No gradients for the base model
            outputs = self.base_model(input_ids=input_ids, attention_mask=attention_mask)
        
        # Sum hidden states over the sequence dimension
        summed_representation = outputs.last_hidden_state.sum(dim=1)  # Summing over sequence length
        
        logits = self.classifier(summed_representation)  # Pass the summed representation to the classifier
        loss = None
        if labels is not None:
            loss_fn = nn.BCEWithLogitsLoss()
            loss = loss_fn(logits, labels.float())
        return {"loss": loss, "logits": logits}