Update with commit 5a9957358cebd616e58b2d1ab3b887c2f2793b45
Browse filesSee: https://github.com/huggingface/transformers/commit/5a9957358cebd616e58b2d1ab3b887c2f2793b45
- frameworks.json +1 -0
- pipeline_tags.json +6 -0
frameworks.json
CHANGED
@@ -92,6 +92,7 @@
|
|
92 |
{"model_type":"vit","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoFeatureExtractor"}
|
93 |
{"model_type":"vit_mae","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoFeatureExtractor"}
|
94 |
{"model_type":"wav2vec2","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoProcessor"}
|
|
|
95 |
{"model_type":"wavlm","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
96 |
{"model_type":"xglm","pytorch":true,"tensorflow":false,"flax":true,"processor":"AutoTokenizer"}
|
97 |
{"model_type":"xlm","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
|
|
92 |
{"model_type":"vit","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoFeatureExtractor"}
|
93 |
{"model_type":"vit_mae","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoFeatureExtractor"}
|
94 |
{"model_type":"wav2vec2","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoProcessor"}
|
95 |
+
{"model_type":"wav2vec2-conformer","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
96 |
{"model_type":"wavlm","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
97 |
{"model_type":"xglm","pytorch":true,"tensorflow":false,"flax":true,"processor":"AutoTokenizer"}
|
98 |
{"model_type":"xlm","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
pipeline_tags.json
CHANGED
@@ -626,6 +626,12 @@
|
|
626 |
{"model_class":"VisionTextDualEncoderModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
627 |
{"model_class":"VisualBertForPreTraining","pipeline_tag":"pretraining","auto_class":"AutoModelForPreTraining"}
|
628 |
{"model_class":"VisualBertModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
|
|
|
|
|
|
|
|
|
|
|
|
629 |
{"model_class":"Wav2Vec2ForAudioFrameClassification","pipeline_tag":"audio-frame-classification","auto_class":"AutoModelForAudioFrameClassification"}
|
630 |
{"model_class":"Wav2Vec2ForCTC","pipeline_tag":"automatic-speech-recognition","auto_class":"AutoModelForCTC"}
|
631 |
{"model_class":"Wav2Vec2ForMaskedLM","pipeline_tag":"fill-mask","auto_class":"AutoModelForMaskedLM"}
|
|
|
626 |
{"model_class":"VisionTextDualEncoderModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
627 |
{"model_class":"VisualBertForPreTraining","pipeline_tag":"pretraining","auto_class":"AutoModelForPreTraining"}
|
628 |
{"model_class":"VisualBertModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
629 |
+
{"model_class":"Wav2Vec2ConformerForAudioFrameClassification","pipeline_tag":"audio-frame-classification","auto_class":"AutoModelForAudioFrameClassification"}
|
630 |
+
{"model_class":"Wav2Vec2ConformerForCTC","pipeline_tag":"automatic-speech-recognition","auto_class":"AutoModelForCTC"}
|
631 |
+
{"model_class":"Wav2Vec2ConformerForPreTraining","pipeline_tag":"pretraining","auto_class":"AutoModelForPreTraining"}
|
632 |
+
{"model_class":"Wav2Vec2ConformerForSequenceClassification","pipeline_tag":"audio-classification","auto_class":"AutoModelForAudioClassification"}
|
633 |
+
{"model_class":"Wav2Vec2ConformerForXVector","pipeline_tag":"audio-xvector","auto_class":"AutoModelForAudioXVector"}
|
634 |
+
{"model_class":"Wav2Vec2ConformerModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
635 |
{"model_class":"Wav2Vec2ForAudioFrameClassification","pipeline_tag":"audio-frame-classification","auto_class":"AutoModelForAudioFrameClassification"}
|
636 |
{"model_class":"Wav2Vec2ForCTC","pipeline_tag":"automatic-speech-recognition","auto_class":"AutoModelForCTC"}
|
637 |
{"model_class":"Wav2Vec2ForMaskedLM","pipeline_tag":"fill-mask","auto_class":"AutoModelForMaskedLM"}
|