Matt commited on
Commit
9a303c2
·
1 Parent(s): ceaf371

Update modeling code and weights

Browse files
Files changed (2) hide show
  1. model.safetensors +3 -0
  2. modeling_florence2.py +12 -20
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03075d2d2d2bbd3e180b9ba0afae4aa8563226e2d32911656966e05b2f2ee060
3
+ size 463221266
modeling_florence2.py CHANGED
@@ -26,7 +26,7 @@ import torch.utils.checkpoint as checkpoint
26
  from torch.nn import CrossEntropyLoss
27
  from collections import OrderedDict
28
  from einops import rearrange
29
- from timm.models.layers import DropPath, trunc_normal_
30
 
31
  from transformers.modeling_utils import PreTrainedModel
32
  from transformers.generation.utils import GenerationMixin
@@ -610,29 +610,10 @@ class DaViT(nn.Module):
610
  self.avgpool = nn.AdaptiveAvgPool1d(1)
611
  self.head = nn.Linear(self.embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity()
612
 
613
- self.apply(self._init_weights)
614
-
615
  @property
616
  def dim_out(self):
617
  return self.embed_dims[-1]
618
 
619
- def _init_weights(self, m):
620
- if isinstance(m, nn.Linear):
621
- trunc_normal_(m.weight, std=0.02)
622
- if m.bias is not None:
623
- nn.init.constant_(m.bias, 0)
624
- elif isinstance(m, nn.Conv2d):
625
- nn.init.normal_(m.weight, std=0.02)
626
- for name, _ in m.named_parameters():
627
- if name in ['bias']:
628
- nn.init.constant_(m.bias, 0)
629
- elif isinstance(m, nn.LayerNorm):
630
- nn.init.constant_(m.weight, 1.0)
631
- nn.init.constant_(m.bias, 0)
632
- elif isinstance(m, nn.BatchNorm2d):
633
- nn.init.constant_(m.weight, 1.0)
634
- nn.init.constant_(m.bias, 0)
635
-
636
  def forward_features_unpool(self, x):
637
  """
638
  forward until avg pooling
@@ -1451,6 +1432,17 @@ class Florence2LanguagePreTrainedModel(PreTrainedModel):
1451
  module.weight.data.normal_(mean=0.0, std=std)
1452
  if module.padding_idx is not None:
1453
  module.weight.data[module.padding_idx].zero_()
 
 
 
 
 
 
 
 
 
 
 
1454
 
1455
  @property
1456
  def dummy_inputs(self):
 
26
  from torch.nn import CrossEntropyLoss
27
  from collections import OrderedDict
28
  from einops import rearrange
29
+ from timm.layers import DropPath, trunc_normal_
30
 
31
  from transformers.modeling_utils import PreTrainedModel
32
  from transformers.generation.utils import GenerationMixin
 
610
  self.avgpool = nn.AdaptiveAvgPool1d(1)
611
  self.head = nn.Linear(self.embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity()
612
 
 
 
613
  @property
614
  def dim_out(self):
615
  return self.embed_dims[-1]
616
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
617
  def forward_features_unpool(self, x):
618
  """
619
  forward until avg pooling
 
1432
  module.weight.data.normal_(mean=0.0, std=std)
1433
  if module.padding_idx is not None:
1434
  module.weight.data[module.padding_idx].zero_()
1435
+ elif isinstance(module, nn.Conv2d):
1436
+ nn.init.normal_(module.weight, std=0.02)
1437
+ for name, _ in module.named_parameters():
1438
+ if name == "bias":
1439
+ nn.init.constant_(module.bias, 0)
1440
+ elif isinstance(module, nn.LayerNorm):
1441
+ nn.init.constant_(module.weight, 1.0)
1442
+ nn.init.constant_(module.bias, 0)
1443
+ elif isinstance(module, nn.BatchNorm2d):
1444
+ nn.init.constant_(module.weight, 1.0)
1445
+ nn.init.constant_(module.bias, 0)
1446
 
1447
  @property
1448
  def dummy_inputs(self):