Update with commit 5b40a37bc4da9dc6cd33876ce9bb3f7f48450a03
Browse filesSee: https://github.com/huggingface/transformers/commit/5b40a37bc4da9dc6cd33876ce9bb3f7f48450a03
- frameworks.json +1 -1
- pipeline_tags.json +2 -0
frameworks.json
CHANGED
@@ -86,7 +86,7 @@
|
|
86 |
{"model_type":"vision-text-dual-encoder","pytorch":true,"tensorflow":false,"flax":true,"processor":"AutoProcessor"}
|
87 |
{"model_type":"visual_bert","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
88 |
{"model_type":"vit","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoFeatureExtractor"}
|
89 |
-
{"model_type":"vit_mae","pytorch":true,"tensorflow":
|
90 |
{"model_type":"wav2vec2","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoProcessor"}
|
91 |
{"model_type":"wavlm","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
92 |
{"model_type":"xglm","pytorch":true,"tensorflow":false,"flax":true,"processor":"AutoTokenizer"}
|
|
|
86 |
{"model_type":"vision-text-dual-encoder","pytorch":true,"tensorflow":false,"flax":true,"processor":"AutoProcessor"}
|
87 |
{"model_type":"visual_bert","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
88 |
{"model_type":"vit","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoFeatureExtractor"}
|
89 |
+
{"model_type":"vit_mae","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoFeatureExtractor"}
|
90 |
{"model_type":"wav2vec2","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoProcessor"}
|
91 |
{"model_type":"wavlm","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
92 |
{"model_type":"xglm","pytorch":true,"tensorflow":false,"flax":true,"processor":"AutoTokenizer"}
|
pipeline_tags.json
CHANGED
@@ -559,6 +559,8 @@
|
|
559 |
{"model_class":"TFTransfoXLLMHeadModel","pipeline_tag":"text-generation","auto_class":"TF_AutoModelForCausalLM"}
|
560 |
{"model_class":"TFTransfoXLModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
561 |
{"model_class":"TFViTForImageClassification","pipeline_tag":"image-classification","auto_class":"TF_AutoModelForImageClassification"}
|
|
|
|
|
562 |
{"model_class":"TFViTModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
563 |
{"model_class":"TFWav2Vec2Model","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
564 |
{"model_class":"TFXLMForMultipleChoice","pipeline_tag":"multiple-choice","auto_class":"TF_AutoModelForMultipleChoice"}
|
|
|
559 |
{"model_class":"TFTransfoXLLMHeadModel","pipeline_tag":"text-generation","auto_class":"TF_AutoModelForCausalLM"}
|
560 |
{"model_class":"TFTransfoXLModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
561 |
{"model_class":"TFViTForImageClassification","pipeline_tag":"image-classification","auto_class":"TF_AutoModelForImageClassification"}
|
562 |
+
{"model_class":"TFViTMAEForPreTraining","pipeline_tag":"pretraining","auto_class":"TF_AutoModelForPreTraining"}
|
563 |
+
{"model_class":"TFViTMAEModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
564 |
{"model_class":"TFViTModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
565 |
{"model_class":"TFWav2Vec2Model","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
566 |
{"model_class":"TFXLMForMultipleChoice","pipeline_tag":"multiple-choice","auto_class":"TF_AutoModelForMultipleChoice"}
|