File size: 2,797 Bytes
1b74e0a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
from transformers import AutoModel
from huggingface_hub import hf_hub_download
from safetensors.torch import load_file
import torch.nn as nn
import torch

# Number of labels (update if different)
NUM_LABELS = 4

class SciBertClassificationModel(nn.Module):
    def __init__(self, model_path="allenai/scibert_scivocab_uncased", freeze_weights=True):
        super(SciBertClassificationModel, self).__init__()
        if model_path == "allenai/scibert_scivocab_uncased":
            self.base_model = AutoModel.from_pretrained(model_path)
        else:
            pytorch_model_path = hf_hub_download(
                repo_id=model_path,
                repo_type="model",
                filename="model.safetensors"
            )
            state_dict = load_file(pytorch_model_path)
            filtered_state_dict = {
                k.replace("base_model.", ""): v
                for k, v in state_dict.items()
                if not k.startswith("classifier.")
            }

            self.base_model = AutoModel.from_pretrained("allenai/scibert_scivocab_uncased", state_dict=filtered_state_dict)

        # For push to hub.
        self.config = self.base_model.config
        
        # Freeze the base model's weights
        if freeze_weights:
            for param in self.base_model.parameters():
                param.requires_grad = False
        
        # Add a classification head
        self.classifier = nn.Linear(self.base_model.config.hidden_size, NUM_LABELS)
    
    def forward(self, input_ids, attention_mask, labels=None):
        with torch.no_grad():  # No gradients for the base model
            outputs = self.base_model(input_ids=input_ids, attention_mask=attention_mask)
        
        # Ensure the tensor is contiguous before passing to the classifier
        # cls_token_representation = outputs.last_hidden_state[:, 0, :].contiguous()
        # logits = self.classifier(cls_token_representation)

        # Sum token representations
        summed_representation = outputs.last_hidden_state.sum(dim=1)  # Summing over the sequence length (dim=1)
        
        logits = self.classifier(summed_representation)  # Pass the summed representation to the classifier
        
        loss = None
        if labels is not None:
            loss_fn = nn.BCEWithLogitsLoss()
            loss = loss_fn(logits, labels.float())
        return {"loss": loss, "logits": logits}

    def state_dict(self, *args, **kwargs):
        # Get the state dictionary
        state_dict = super().state_dict(*args, **kwargs)
        # Ensure all tensors are contiguous
        for key, tensor in state_dict.items():
            if isinstance(tensor, torch.Tensor) and not tensor.is_contiguous():
                state_dict[key] = tensor.contiguous()
        return state_dict