sgugger commited on
Commit
5932849
1 Parent(s): 86aa09d

Update with commit 0a057201a96565df29984d716f660fd8d634329a

Browse files

See: https://github.com/huggingface/transformers/commit/0a057201a96565df29984d716f660fd8d634329a

Files changed (2) hide show
  1. frameworks.json +1 -0
  2. pipeline_tags.json +2 -0
frameworks.json CHANGED
@@ -77,6 +77,7 @@
77
  {"model_type":"trocr","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
78
  {"model_type":"unispeech","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
79
  {"model_type":"unispeech-sat","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
 
80
  {"model_type":"vilt","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
81
  {"model_type":"vision-encoder-decoder","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoTokenizer"}
82
  {"model_type":"vision-text-dual-encoder","pytorch":true,"tensorflow":false,"flax":true,"processor":"AutoProcessor"}
 
77
  {"model_type":"trocr","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
78
  {"model_type":"unispeech","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
79
  {"model_type":"unispeech-sat","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
80
+ {"model_type":"van","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoFeatureExtractor"}
81
  {"model_type":"vilt","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
82
  {"model_type":"vision-encoder-decoder","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoTokenizer"}
83
  {"model_type":"vision-text-dual-encoder","pytorch":true,"tensorflow":false,"flax":true,"processor":"AutoProcessor"}
pipeline_tags.json CHANGED
@@ -589,6 +589,8 @@
589
  {"model_class":"UniSpeechSatForSequenceClassification","pipeline_tag":"audio-classification","auto_class":"AutoModelForAudioClassification"}
590
  {"model_class":"UniSpeechSatForXVector","pipeline_tag":"audio-xvector","auto_class":"AutoModelForAudioXVector"}
591
  {"model_class":"UniSpeechSatModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
 
 
592
  {"model_class":"ViTForImageClassification","pipeline_tag":"image-classification","auto_class":"AutoModelForImageClassification"}
593
  {"model_class":"ViTMAEForPreTraining","pipeline_tag":"pretraining","auto_class":"AutoModelForPreTraining"}
594
  {"model_class":"ViTMAEModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
 
589
  {"model_class":"UniSpeechSatForSequenceClassification","pipeline_tag":"audio-classification","auto_class":"AutoModelForAudioClassification"}
590
  {"model_class":"UniSpeechSatForXVector","pipeline_tag":"audio-xvector","auto_class":"AutoModelForAudioXVector"}
591
  {"model_class":"UniSpeechSatModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
592
+ {"model_class":"VanForImageClassification","pipeline_tag":"image-classification","auto_class":"AutoModelForImageClassification"}
593
+ {"model_class":"VanModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
594
  {"model_class":"ViTForImageClassification","pipeline_tag":"image-classification","auto_class":"AutoModelForImageClassification"}
595
  {"model_class":"ViTMAEForPreTraining","pipeline_tag":"pretraining","auto_class":"AutoModelForPreTraining"}
596
  {"model_class":"ViTMAEModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}