VictorSanh commited on
Commit
c8028be
1 Parent(s): 3d51561

clean vision

Browse files
Files changed (1) hide show
  1. vision.py +2 -711
vision.py CHANGED
@@ -24,27 +24,16 @@ import torch.utils.checkpoint
24
  from torch import nn
25
  from transformers.activations import ACT2FN
26
  from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
27
- from transformers.modeling_utils import PreTrainedModel
28
  from transformers.utils import (
29
  ModelOutput,
30
- add_start_docstrings,
31
- add_start_docstrings_to_model_forward,
32
  is_flash_attn_2_available,
33
- logging,
34
- replace_return_docstrings,
35
- )
36
 
37
  from .configuration_img2html import Img2HTMLVisionConfig
38
 
39
 
40
  logger = logging.get_logger(__name__)
41
 
42
- # _CHECKPOINT_FOR_DOC = "google/siglip-base-patch16-224"
43
-
44
- # SIGLIP_PRETRAINED_MODEL_ARCHIVE_LIST = [
45
- # "google/siglip-base-patch16-224",
46
- # # See all SigLIP models at https://huggingface.co/models?filter=siglip
47
- # ]
48
 
49
  if is_flash_attn_2_available():
50
  from flash_attn import flash_attn_func, flash_attn_varlen_func
@@ -64,34 +53,6 @@ def _get_unpad_data(attention_mask):
64
  )
65
 
66
 
67
- # # Copied from transformers.models.bart.modeling_bart._expand_mask
68
- # def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
69
- # """
70
- # Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
71
- # """
72
- # bsz, src_len = mask.size()
73
- # tgt_len = tgt_len if tgt_len is not None else src_len
74
-
75
- # expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
76
-
77
- # inverted_mask = 1.0 - expanded_mask
78
-
79
- # return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
80
-
81
-
82
- # # contrastive loss function, adapted from
83
- # # https://sachinruk.github.io/blog/2021-03-07-siglip.html
84
- # def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
85
- # return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
86
-
87
-
88
- # # Copied from transformers.models.clip.modeling_clip.clip_loss with clip->siglip
89
- # def siglip_loss(similarity: torch.Tensor) -> torch.Tensor:
90
- # caption_loss = contrastive_loss(similarity)
91
- # image_loss = contrastive_loss(similarity.t())
92
- # return (caption_loss + image_loss) / 2.0
93
-
94
-
95
  @dataclass
96
  # Copied from transformers.models.clip.modeling_clip.CLIPVisionModelOutput with CLIP->Siglip
97
  class SiglipVisionModelOutput(ModelOutput):
@@ -122,75 +83,6 @@ class SiglipVisionModelOutput(ModelOutput):
122
  attentions: Optional[Tuple[torch.FloatTensor]] = None
123
 
124
 
125
- # @dataclass
126
- # # Copied from transformers.models.clip.modeling_clip.CLIPTextModelOutput with CLIP->Siglip
127
- # class SiglipTextModelOutput(ModelOutput):
128
- # """
129
- # Base class for text model's outputs that also contains a pooling of the last hidden states.
130
-
131
- # Args:
132
- # text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
133
- # The text embeddings obtained by applying the projection layer to the pooler_output.
134
- # last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
135
- # Sequence of hidden-states at the output of the last layer of the model.
136
- # hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
137
- # Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
138
- # one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
139
-
140
- # Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
141
- # attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
142
- # Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
143
- # sequence_length)`.
144
-
145
- # Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
146
- # heads.
147
- # """
148
-
149
- # text_embeds: Optional[torch.FloatTensor] = None
150
- # last_hidden_state: torch.FloatTensor = None
151
- # hidden_states: Optional[Tuple[torch.FloatTensor]] = None
152
- # attentions: Optional[Tuple[torch.FloatTensor]] = None
153
-
154
-
155
- # @dataclass
156
- # # Copied from transformers.models.clip.modeling_clip.CLIPOutput with CLIP->Siglip
157
- # class SiglipOutput(ModelOutput):
158
- # """
159
- # Args:
160
- # loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
161
- # Contrastive loss for image-text similarity.
162
- # logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
163
- # The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
164
- # similarity scores.
165
- # logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
166
- # The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
167
- # similarity scores.
168
- # text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
169
- # The text embeddings obtained by applying the projection layer to the pooled output of [`SiglipTextModel`].
170
- # image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
171
- # The image embeddings obtained by applying the projection layer to the pooled output of
172
- # [`SiglipVisionModel`].
173
- # text_model_output(`BaseModelOutputWithPooling`):
174
- # The output of the [`SiglipTextModel`].
175
- # vision_model_output(`BaseModelOutputWithPooling`):
176
- # The output of the [`SiglipVisionModel`].
177
- # """
178
-
179
- # loss: Optional[torch.FloatTensor] = None
180
- # logits_per_image: torch.FloatTensor = None
181
- # logits_per_text: torch.FloatTensor = None
182
- # text_embeds: torch.FloatTensor = None
183
- # image_embeds: torch.FloatTensor = None
184
- # text_model_output: BaseModelOutputWithPooling = None
185
- # vision_model_output: BaseModelOutputWithPooling = None
186
-
187
- # def to_tuple(self) -> Tuple[Any]:
188
- # return tuple(
189
- # self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
190
- # for k in self.keys()
191
- # )
192
-
193
-
194
  class SiglipVisionEmbeddings(nn.Module):
195
  def __init__(self, config: Img2HTMLVisionConfig):
196
  super().__init__()
@@ -220,40 +112,6 @@ class SiglipVisionEmbeddings(nn.Module):
220
  return embeddings
221
 
222
 
223
- # # Copied from transformers.models.clip.modeling_clip.CLIPTextEmbeddings with CLIP->Siglip
224
- # class SiglipTextEmbeddings(nn.Module):
225
- # def __init__(self, config: SiglipTextConfig):
226
- # super().__init__()
227
- # embed_dim = config.hidden_size
228
-
229
- # self.token_embedding = nn.Embedding(config.vocab_size, embed_dim)
230
- # self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim)
231
-
232
- # # position_ids (1, len position emb) is contiguous in memory and exported when serialized
233
- # self.register_buffer(
234
- # "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
235
- # )
236
-
237
- # def forward(
238
- # self,
239
- # input_ids: Optional[torch.LongTensor] = None,
240
- # position_ids: Optional[torch.LongTensor] = None,
241
- # inputs_embeds: Optional[torch.FloatTensor] = None,
242
- # ) -> torch.Tensor:
243
- # seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
244
-
245
- # if position_ids is None:
246
- # position_ids = self.position_ids[:, :seq_length]
247
-
248
- # if inputs_embeds is None:
249
- # inputs_embeds = self.token_embedding(input_ids)
250
-
251
- # position_embeddings = self.position_embedding(position_ids)
252
- # embeddings = inputs_embeds + position_embeddings
253
-
254
- # return embeddings
255
-
256
-
257
  # Copied from transformers.models.clip.modeling_clip.CLIPAttention with CLIP->Siglip
258
  class SiglipAttention(nn.Module):
259
  """Multi-headed attention from 'Attention Is All You Need' paper"""
@@ -618,150 +476,6 @@ class SiglipEncoderLayer(nn.Module):
618
  return outputs
619
 
620
 
621
- # class SiglipPreTrainedModel(PreTrainedModel):
622
- # """
623
- # An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
624
- # models.
625
- # """
626
-
627
- # config_class = SiglipConfig
628
- # base_model_prefix = "siglip"
629
- # supports_gradient_checkpointing = True
630
-
631
- # def _init_weights(self, module):
632
- # """Initialize the weights"""
633
- # factor = self.config.initializer_factor
634
- # if isinstance(module, SiglipVisionEmbeddings):
635
- # factor = self.config.initializer_factor
636
- # nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
637
- # nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor)
638
- # elif isinstance(module, SiglipAttention):
639
- # factor = self.config.initializer_factor
640
- # in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
641
- # out_proj_std = (module.embed_dim**-0.5) * factor
642
- # nn.init.normal_(module.q_proj.weight, std=in_proj_std)
643
- # nn.init.normal_(module.k_proj.weight, std=in_proj_std)
644
- # nn.init.normal_(module.v_proj.weight, std=in_proj_std)
645
- # nn.init.normal_(module.out_proj.weight, std=out_proj_std)
646
- # elif isinstance(module, SiglipMLP):
647
- # factor = self.config.initializer_factor
648
- # in_proj_std = (
649
- # (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
650
- # )
651
- # fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
652
- # nn.init.normal_(module.fc1.weight, std=fc_std)
653
- # nn.init.normal_(module.fc2.weight, std=in_proj_std)
654
- # if isinstance(module, nn.LayerNorm):
655
- # module.bias.data.zero_()
656
- # module.weight.data.fill_(1.0)
657
- # if isinstance(module, nn.Linear) and module.bias is not None:
658
- # module.bias.data.zero_()
659
-
660
- # def _set_gradient_checkpointing(self, module, value=False):
661
- # if isinstance(module, SiglipEncoder):
662
- # module.gradient_checkpointing = value
663
-
664
-
665
- # SIGLIP_START_DOCSTRING = r"""
666
- # This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
667
- # library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
668
- # etc.)
669
-
670
- # This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
671
- # Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
672
- # and behavior.
673
-
674
- # Parameters:
675
- # config ([`SiglipConfig`]): Model configuration class with all the parameters of the model.
676
- # Initializing with a config file does not load the weights associated with the model, only the
677
- # configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
678
- # """
679
-
680
- # SIGLIP_TEXT_INPUTS_DOCSTRING = r"""
681
- # Args:
682
- # input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
683
- # Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
684
- # it.
685
-
686
- # Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
687
- # [`PreTrainedTokenizer.__call__`] for details.
688
-
689
- # [What are input IDs?](../glossary#input-ids)
690
- # attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
691
- # Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
692
-
693
- # - 1 for tokens that are **not masked**,
694
- # - 0 for tokens that are **masked**.
695
-
696
- # [What are attention masks?](../glossary#attention-mask)
697
- # position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
698
- # Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
699
- # config.max_position_embeddings - 1]`.
700
-
701
- # [What are position IDs?](../glossary#position-ids)
702
- # output_attentions (`bool`, *optional*):
703
- # Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
704
- # tensors for more detail.
705
- # output_hidden_states (`bool`, *optional*):
706
- # Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
707
- # more detail.
708
- # return_dict (`bool`, *optional*):
709
- # Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
710
- # """
711
-
712
- # SIGLIP_VISION_INPUTS_DOCSTRING = r"""
713
- # Args:
714
- # pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
715
- # Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
716
- # [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
717
- # output_attentions (`bool`, *optional*):
718
- # Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
719
- # tensors for more detail.
720
- # output_hidden_states (`bool`, *optional*):
721
- # Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
722
- # more detail.
723
- # return_dict (`bool`, *optional*):
724
- # Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
725
- # """
726
-
727
- # SIGLIP_INPUTS_DOCSTRING = r"""
728
- # Args:
729
- # input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
730
- # Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
731
- # it.
732
-
733
- # Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
734
- # [`PreTrainedTokenizer.__call__`] for details.
735
-
736
- # [What are input IDs?](../glossary#input-ids)
737
- # attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
738
- # Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
739
-
740
- # - 1 for tokens that are **not masked**,
741
- # - 0 for tokens that are **masked**.
742
-
743
- # [What are attention masks?](../glossary#attention-mask)
744
- # position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
745
- # Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
746
- # config.max_position_embeddings - 1]`.
747
-
748
- # [What are position IDs?](../glossary#position-ids)
749
- # pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
750
- # Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
751
- # [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
752
- # return_loss (`bool`, *optional*):
753
- # Whether or not to return the contrastive loss.
754
- # output_attentions (`bool`, *optional*):
755
- # Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
756
- # tensors for more detail.
757
- # output_hidden_states (`bool`, *optional*):
758
- # Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
759
- # more detail.
760
- # return_dict (`bool`, *optional*):
761
- # Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
762
- # """
763
-
764
-
765
  # Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->Siglip
766
  class SiglipEncoder(nn.Module):
767
  """
@@ -787,35 +501,6 @@ class SiglipEncoder(nn.Module):
787
  output_hidden_states: Optional[bool] = None,
788
  return_dict: Optional[bool] = None,
789
  ) -> Union[Tuple, BaseModelOutput]:
790
- r"""
791
- Args:
792
- inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
793
- Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
794
- This is useful if you want more control over how to convert `input_ids` indices into associated vectors
795
- than the model's internal embedding lookup matrix.
796
- attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
797
- Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
798
-
799
- - 1 for tokens that are **not masked**,
800
- - 0 for tokens that are **masked**.
801
-
802
- [What are attention masks?](../glossary#attention-mask)
803
- causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
804
- Causal mask for the text model. Mask values selected in `[0, 1]`:
805
-
806
- - 1 for tokens that are **not masked**,
807
- - 0 for tokens that are **masked**.
808
-
809
- [What are attention masks?](../glossary#attention-mask)
810
- output_attentions (`bool`, *optional*):
811
- Whether or not to return the attentions tensors of all attention layers. See `attentions` under
812
- returned tensors for more detail.
813
- output_hidden_states (`bool`, *optional*):
814
- Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
815
- for more detail.
816
- return_dict (`bool`, *optional*):
817
- Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
818
- """
819
  output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
820
  output_hidden_states = (
821
  output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
@@ -866,140 +551,6 @@ class SiglipEncoder(nn.Module):
866
  )
867
 
868
 
869
- # class SiglipTextTransformer(nn.Module):
870
- # def __init__(self, config: SiglipTextConfig):
871
- # super().__init__()
872
- # self.config = config
873
- # embed_dim = config.hidden_size
874
- # self.embeddings = SiglipTextEmbeddings(config)
875
- # self.encoder = SiglipEncoder(config)
876
- # self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
877
-
878
- # self.head = nn.Linear(embed_dim, embed_dim)
879
-
880
- # @add_start_docstrings_to_model_forward(SIGLIP_TEXT_INPUTS_DOCSTRING)
881
- # @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=SiglipTextConfig)
882
- # def forward(
883
- # self,
884
- # input_ids: Optional[torch.Tensor] = None,
885
- # attention_mask: Optional[torch.Tensor] = None,
886
- # position_ids: Optional[torch.Tensor] = None,
887
- # output_attentions: Optional[bool] = None,
888
- # output_hidden_states: Optional[bool] = None,
889
- # return_dict: Optional[bool] = None,
890
- # ) -> Union[Tuple, BaseModelOutputWithPooling]:
891
- # r"""
892
- # Returns:
893
-
894
- # """
895
- # output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
896
- # output_hidden_states = (
897
- # output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
898
- # )
899
- # return_dict = return_dict if return_dict is not None else self.config.use_return_dict
900
-
901
- # if input_ids is None:
902
- # raise ValueError("You have to specify input_ids")
903
-
904
- # input_shape = input_ids.size()
905
- # input_ids = input_ids.view(-1, input_shape[-1])
906
-
907
- # hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)
908
-
909
- # # note: SigLIP's text model does not use q causal mask, unlike the original CLIP model.
910
- # # expand attention_mask
911
- # if attention_mask is not None:
912
- # # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
913
- # attention_mask = _expand_mask(attention_mask, hidden_states.dtype)
914
-
915
- # encoder_outputs = self.encoder(
916
- # inputs_embeds=hidden_states,
917
- # attention_mask=None,
918
- # causal_attention_mask=None,
919
- # output_attentions=output_attentions,
920
- # output_hidden_states=output_hidden_states,
921
- # return_dict=return_dict,
922
- # )
923
-
924
- # last_hidden_state = encoder_outputs[0]
925
- # last_hidden_state = self.final_layer_norm(last_hidden_state)
926
-
927
- # # Assuming "sticky" EOS tokenization, last token is always EOS.
928
- # pooled_output = last_hidden_state[:, -1, :]
929
- # pooled_output = self.head(pooled_output)
930
-
931
- # if not return_dict:
932
- # return (last_hidden_state, pooled_output) + encoder_outputs[1:]
933
-
934
- # return BaseModelOutputWithPooling(
935
- # last_hidden_state=last_hidden_state,
936
- # pooler_output=pooled_output,
937
- # hidden_states=encoder_outputs.hidden_states,
938
- # attentions=encoder_outputs.attentions,
939
- # )
940
-
941
-
942
- # @add_start_docstrings(
943
- # """The text model from SigLIP without any head or projection on top.""",
944
- # SIGLIP_START_DOCSTRING,
945
- # )
946
- # class SiglipTextModel(SiglipPreTrainedModel):
947
- # config_class = SiglipTextConfig
948
-
949
- # _no_split_modules = ["SiglipTextEmbeddings", "SiglipEncoderLayer"]
950
-
951
- # def __init__(self, config: SiglipTextConfig):
952
- # super().__init__(config)
953
- # self.text_model = SiglipTextTransformer(config)
954
- # # Initialize weights and apply final processing
955
- # self.post_init()
956
-
957
- # def get_input_embeddings(self) -> nn.Module:
958
- # return self.text_model.embeddings.token_embedding
959
-
960
- # def set_input_embeddings(self, value):
961
- # self.text_model.embeddings.token_embedding = value
962
-
963
- # @add_start_docstrings_to_model_forward(SIGLIP_TEXT_INPUTS_DOCSTRING)
964
- # @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=SiglipTextConfig)
965
- # def forward(
966
- # self,
967
- # input_ids: Optional[torch.Tensor] = None,
968
- # attention_mask: Optional[torch.Tensor] = None,
969
- # position_ids: Optional[torch.Tensor] = None,
970
- # output_attentions: Optional[bool] = None,
971
- # output_hidden_states: Optional[bool] = None,
972
- # return_dict: Optional[bool] = None,
973
- # ) -> Union[Tuple, BaseModelOutputWithPooling]:
974
- # r"""
975
- # Returns:
976
-
977
- # Examples:
978
-
979
- # ```python
980
- # >>> from transformers import AutoTokenizer, SiglipTextModel
981
-
982
- # >>> model = SiglipTextModel.from_pretrained("google/siglip-base-patch16-224")
983
- # >>> tokenizer = AutoTokenizer.from_pretrained("google/siglip-base-patch16-224")
984
-
985
- # >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
986
-
987
- # >>> outputs = model(**inputs)
988
- # >>> last_hidden_state = outputs.last_hidden_state
989
- # >>> pooled_output = outputs.pooler_output # pooled (EOS token) states
990
- # ```"""
991
- # return_dict = return_dict if return_dict is not None else self.config.use_return_dict
992
-
993
- # return self.text_model(
994
- # input_ids=input_ids,
995
- # attention_mask=attention_mask,
996
- # position_ids=position_ids,
997
- # output_attentions=output_attentions,
998
- # output_hidden_states=output_hidden_states,
999
- # return_dict=return_dict,
1000
- # )
1001
-
1002
-
1003
  class SiglipVisionTransformer(nn.Module):
1004
  def __init__(self, config: Img2HTMLVisionConfig):
1005
  super().__init__()
@@ -1011,8 +562,6 @@ class SiglipVisionTransformer(nn.Module):
1011
  self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
1012
  self.head = SiglipMultiheadAttentionPoolingHead(config)
1013
 
1014
- # @add_start_docstrings_to_model_forward(SIGLIP_VISION_INPUTS_DOCSTRING)
1015
- # @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=Img2HTMLVisionConfig)
1016
  def forward(
1017
  self,
1018
  pixel_values,
@@ -1079,24 +628,13 @@ class SiglipMultiheadAttentionPoolingHead(nn.Module):
1079
  return hidden_state[:, 0]
1080
 
1081
 
1082
- # @add_start_docstrings(
1083
- # """The vision model from SigLIP without any head or projection on top.""",
1084
- # SIGLIP_START_DOCSTRING,
1085
- # )
1086
  class SiglipVisionModel(nn.Module):
1087
  def __init__(self, config: Img2HTMLVisionConfig):
1088
  super().__init__()
1089
 
 
1090
  self.vision_model = SiglipVisionTransformer(config)
1091
 
1092
- # # Initialize weights and apply final processing
1093
- # self.post_init()
1094
-
1095
- # def get_input_embeddings(self) -> nn.Module:
1096
- # return self.vision_model.embeddings.patch_embedding
1097
-
1098
- # @add_start_docstrings_to_model_forward(SIGLIP_VISION_INPUTS_DOCSTRING)
1099
- # @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=Img2HTMLVisionConfig)
1100
  def forward(
1101
  self,
1102
  pixel_values,
@@ -1104,28 +642,6 @@ class SiglipVisionModel(nn.Module):
1104
  output_hidden_states: Optional[bool] = None,
1105
  return_dict: Optional[bool] = None,
1106
  ) -> Union[Tuple, BaseModelOutputWithPooling]:
1107
- # r"""
1108
- # Returns:
1109
-
1110
- # Examples:
1111
-
1112
- # ```python
1113
- # >>> from PIL import Image
1114
- # >>> import requests
1115
- # >>> from transformers import AutoProcessor, SiglipVisionModel
1116
-
1117
- # >>> model = SiglipVisionModel.from_pretrained("google/siglip-base-patch16-224")
1118
- # >>> processor = AutoProcessor.from_pretrained("google/siglip-base-patch16-224")
1119
-
1120
- # >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1121
- # >>> image = Image.open(requests.get(url, stream=True).raw)
1122
-
1123
- # >>> inputs = processor(images=image, return_tensors="pt")
1124
-
1125
- # >>> outputs = model(**inputs)
1126
- # >>> last_hidden_state = outputs.last_hidden_state
1127
- # >>> pooled_output = outputs.pooler_output # pooled CLS states
1128
- # ```"""
1129
  return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1130
 
1131
  return self.vision_model(
@@ -1134,228 +650,3 @@ class SiglipVisionModel(nn.Module):
1134
  output_hidden_states=output_hidden_states,
1135
  return_dict=return_dict,
1136
  )
1137
-
1138
-
1139
- # @add_start_docstrings(SIGLIP_START_DOCSTRING)
1140
- # class SiglipModel(SiglipPreTrainedModel):
1141
- # config_class = SiglipConfig
1142
-
1143
- # def __init__(self, config: SiglipConfig):
1144
- # super().__init__(config)
1145
-
1146
- # if not isinstance(config.text_config, SiglipTextConfig):
1147
- # raise ValueError(
1148
- # "config.text_config is expected to be of type SiglipTextConfig but is of type"
1149
- # f" {type(config.text_config)}."
1150
- # )
1151
-
1152
- # if not isinstance(config.vision_config, SiglipVisionConfig):
1153
- # raise ValueError(
1154
- # "config.vision_config is expected to be of type SiglipVisionConfig but is of type"
1155
- # f" {type(config.vision_config)}."
1156
- # )
1157
-
1158
- # text_config = config.text_config
1159
- # vision_config = config.vision_config
1160
-
1161
- # self.text_model = SiglipTextModel(text_config)
1162
- # self.vision_model = SiglipVisionModel(vision_config)
1163
-
1164
- # self.temperature = nn.Parameter(
1165
- # torch.randn(
1166
- # 1,
1167
- # )
1168
- # )
1169
- # self.bias = nn.Parameter(
1170
- # torch.randn(
1171
- # 1,
1172
- # )
1173
- # )
1174
-
1175
- # # Initialize weights and apply final processing
1176
- # self.post_init()
1177
-
1178
- # @add_start_docstrings_to_model_forward(SIGLIP_TEXT_INPUTS_DOCSTRING)
1179
- # def get_text_features(
1180
- # self,
1181
- # input_ids: Optional[torch.Tensor] = None,
1182
- # attention_mask: Optional[torch.Tensor] = None,
1183
- # position_ids: Optional[torch.Tensor] = None,
1184
- # output_attentions: Optional[bool] = None,
1185
- # output_hidden_states: Optional[bool] = None,
1186
- # return_dict: Optional[bool] = None,
1187
- # ) -> torch.FloatTensor:
1188
- # r"""
1189
- # Returns:
1190
- # text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
1191
- # applying the projection layer to the pooled output of [`SiglipTextModel`].
1192
-
1193
- # Examples:
1194
-
1195
- # ```python
1196
- # >>> from transformers import AutoTokenizer, SiglipModel
1197
-
1198
- # >>> model = SiglipModel.from_pretrained("google/siglip-base-patch16-224")
1199
- # >>> tokenizer = AutoTokenizer.from_pretrained("google/siglip-base-patch16-224")
1200
-
1201
- # >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
1202
- # >>> text_features = model.get_text_features(**inputs)
1203
- # ```"""
1204
- # # Use SigLIP model's config for some fields (if specified) instead of those of vision & text components.
1205
- # output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1206
- # output_hidden_states = (
1207
- # output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1208
- # )
1209
- # return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1210
-
1211
- # text_outputs = self.text_model(
1212
- # input_ids=input_ids,
1213
- # attention_mask=attention_mask,
1214
- # position_ids=position_ids,
1215
- # output_attentions=output_attentions,
1216
- # output_hidden_states=output_hidden_states,
1217
- # return_dict=return_dict,
1218
- # )
1219
-
1220
- # pooled_output = text_outputs[1]
1221
-
1222
- # return pooled_output
1223
-
1224
- # @add_start_docstrings_to_model_forward(SIGLIP_VISION_INPUTS_DOCSTRING)
1225
- # def get_image_features(
1226
- # self,
1227
- # pixel_values: Optional[torch.FloatTensor] = None,
1228
- # output_attentions: Optional[bool] = None,
1229
- # output_hidden_states: Optional[bool] = None,
1230
- # return_dict: Optional[bool] = None,
1231
- # ) -> torch.FloatTensor:
1232
- # r"""
1233
- # Returns:
1234
- # image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
1235
- # applying the projection layer to the pooled output of [`SiglipVisionModel`].
1236
-
1237
- # Examples:
1238
-
1239
- # ```python
1240
- # >>> from PIL import Image
1241
- # >>> import requests
1242
- # >>> from transformers import AutoProcessor, SiglipModel
1243
-
1244
- # >>> model = SiglipModel.from_pretrained("google/siglip-base-patch16-224")
1245
- # >>> processor = AutoProcessor.from_pretrained("google/siglip-base-patch16-224")
1246
-
1247
- # >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1248
- # >>> image = Image.open(requests.get(url, stream=True).raw)
1249
-
1250
- # >>> inputs = processor(images=image, return_tensors="pt")
1251
-
1252
- # >>> image_features = model.get_image_features(**inputs)
1253
- # ```"""
1254
- # # Use SiglipModel's config for some fields (if specified) instead of those of vision & text components.
1255
- # output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1256
- # output_hidden_states = (
1257
- # output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1258
- # )
1259
- # return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1260
-
1261
- # vision_outputs = self.vision_model(
1262
- # pixel_values=pixel_values,
1263
- # output_attentions=output_attentions,
1264
- # output_hidden_states=output_hidden_states,
1265
- # return_dict=return_dict,
1266
- # )
1267
-
1268
- # pooled_output = vision_outputs[1]
1269
-
1270
- # return pooled_output
1271
-
1272
- # @add_start_docstrings_to_model_forward(SIGLIP_INPUTS_DOCSTRING)
1273
- # @replace_return_docstrings(output_type=SiglipOutput, config_class=SiglipConfig)
1274
- # def forward(
1275
- # self,
1276
- # input_ids: Optional[torch.LongTensor] = None,
1277
- # pixel_values: Optional[torch.FloatTensor] = None,
1278
- # attention_mask: Optional[torch.Tensor] = None,
1279
- # position_ids: Optional[torch.LongTensor] = None,
1280
- # return_loss: Optional[bool] = None,
1281
- # output_attentions: Optional[bool] = None,
1282
- # output_hidden_states: Optional[bool] = None,
1283
- # return_dict: Optional[bool] = None,
1284
- # ) -> Union[Tuple, SiglipOutput]:
1285
- # r"""
1286
- # Returns:
1287
-
1288
- # Examples:
1289
-
1290
- # ```python
1291
- # >>> from PIL import Image
1292
- # >>> import requests
1293
- # >>> from transformers import AutoProcessor, SiglipModel
1294
-
1295
- # >>> model = SiglipModel.from_pretrained("google/siglip-base-patch16-224")
1296
- # >>> processor = AutoProcessor.from_pretrained("google/siglip-base-patch16-224")
1297
-
1298
- # >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1299
- # >>> image = Image.open(requests.get(url, stream=True).raw)
1300
-
1301
- # >>> inputs = processor(
1302
- # ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
1303
- # ... )
1304
-
1305
- # >>> outputs = model(**inputs)
1306
- # >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
1307
- # >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
1308
- # ```"""
1309
- # # Use SigLIP model's config for some fields (if specified) instead of those of vision & text components.
1310
- # output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1311
- # output_hidden_states = (
1312
- # output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1313
- # )
1314
- # return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1315
-
1316
- # vision_outputs = self.vision_model(
1317
- # pixel_values=pixel_values,
1318
- # output_attentions=output_attentions,
1319
- # output_hidden_states=output_hidden_states,
1320
- # return_dict=return_dict,
1321
- # )
1322
-
1323
- # text_outputs = self.text_model(
1324
- # input_ids=input_ids,
1325
- # attention_mask=attention_mask,
1326
- # position_ids=position_ids,
1327
- # output_attentions=output_attentions,
1328
- # output_hidden_states=output_hidden_states,
1329
- # return_dict=return_dict,
1330
- # )
1331
-
1332
- # image_embeds = vision_outputs[1]
1333
- # text_embeds = text_outputs[1]
1334
-
1335
- # # normalized features
1336
- # image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
1337
- # text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
1338
-
1339
- # # cosine similarity as logits
1340
- # logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * self.temperature.exp() + self.bias
1341
- # logits_per_image = logits_per_text.t()
1342
-
1343
- # z = torch.matmul(image_embeds, text_embeds.t()) * self.temperature.exp()
1344
-
1345
- # loss = None
1346
- # if return_loss:
1347
- # raise NotImplementedError("SigLIP loss to be implemented")
1348
-
1349
- # if not return_dict:
1350
- # output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
1351
- # return ((loss,) + output) if loss is not None else output
1352
-
1353
- # return SiglipOutput(
1354
- # loss=loss,
1355
- # logits_per_image=logits_per_image,
1356
- # logits_per_text=logits_per_text,
1357
- # text_embeds=text_embeds,
1358
- # image_embeds=image_embeds,
1359
- # text_model_output=text_outputs,
1360
- # vision_model_output=vision_outputs,
1361
- # )
 
24
  from torch import nn
25
  from transformers.activations import ACT2FN
26
  from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
 
27
  from transformers.utils import (
28
  ModelOutput,
 
 
29
  is_flash_attn_2_available,
30
+ logging,)
 
 
31
 
32
  from .configuration_img2html import Img2HTMLVisionConfig
33
 
34
 
35
  logger = logging.get_logger(__name__)
36
 
 
 
 
 
 
 
37
 
38
  if is_flash_attn_2_available():
39
  from flash_attn import flash_attn_func, flash_attn_varlen_func
 
53
  )
54
 
55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  @dataclass
57
  # Copied from transformers.models.clip.modeling_clip.CLIPVisionModelOutput with CLIP->Siglip
58
  class SiglipVisionModelOutput(ModelOutput):
 
83
  attentions: Optional[Tuple[torch.FloatTensor]] = None
84
 
85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  class SiglipVisionEmbeddings(nn.Module):
87
  def __init__(self, config: Img2HTMLVisionConfig):
88
  super().__init__()
 
112
  return embeddings
113
 
114
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  # Copied from transformers.models.clip.modeling_clip.CLIPAttention with CLIP->Siglip
116
  class SiglipAttention(nn.Module):
117
  """Multi-headed attention from 'Attention Is All You Need' paper"""
 
476
  return outputs
477
 
478
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
479
  # Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->Siglip
480
  class SiglipEncoder(nn.Module):
481
  """
 
501
  output_hidden_states: Optional[bool] = None,
502
  return_dict: Optional[bool] = None,
503
  ) -> Union[Tuple, BaseModelOutput]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
504
  output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
505
  output_hidden_states = (
506
  output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
 
551
  )
552
 
553
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
554
  class SiglipVisionTransformer(nn.Module):
555
  def __init__(self, config: Img2HTMLVisionConfig):
556
  super().__init__()
 
562
  self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
563
  self.head = SiglipMultiheadAttentionPoolingHead(config)
564
 
 
 
565
  def forward(
566
  self,
567
  pixel_values,
 
628
  return hidden_state[:, 0]
629
 
630
 
 
 
 
 
631
  class SiglipVisionModel(nn.Module):
632
  def __init__(self, config: Img2HTMLVisionConfig):
633
  super().__init__()
634
 
635
+ self.config = config
636
  self.vision_model = SiglipVisionTransformer(config)
637
 
 
 
 
 
 
 
 
 
638
  def forward(
639
  self,
640
  pixel_values,
 
642
  output_hidden_states: Optional[bool] = None,
643
  return_dict: Optional[bool] = None,
644
  ) -> Union[Tuple, BaseModelOutputWithPooling]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
645
  return_dict = return_dict if return_dict is not None else self.config.use_return_dict
646
 
647
  return self.vision_model(
 
650
  output_hidden_states=output_hidden_states,
651
  return_dict=return_dict,
652
  )