Update with commit e3f028f3af9e2fd42bb8cf52ec1bcf720b6fbaf1
Browse filesSee: https://github.com/huggingface/transformers/commit/e3f028f3af9e2fd42bb8cf52ec1bcf720b6fbaf1
- frameworks.json +1 -1
- pipeline_tags.json +2 -0
frameworks.json
CHANGED
@@ -120,7 +120,7 @@
|
|
120 |
{"model_type":"wav2vec2","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoProcessor"}
|
121 |
{"model_type":"wav2vec2-conformer","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
122 |
{"model_type":"wavlm","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
123 |
-
{"model_type":"whisper","pytorch":true,"tensorflow":
|
124 |
{"model_type":"xclip","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
125 |
{"model_type":"xglm","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoTokenizer"}
|
126 |
{"model_type":"xlm","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
|
|
120 |
{"model_type":"wav2vec2","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoProcessor"}
|
121 |
{"model_type":"wav2vec2-conformer","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
122 |
{"model_type":"wavlm","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
123 |
+
{"model_type":"whisper","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoProcessor"}
|
124 |
{"model_type":"xclip","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
125 |
{"model_type":"xglm","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoTokenizer"}
|
126 |
{"model_type":"xlm","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
pipeline_tags.json
CHANGED
@@ -679,6 +679,8 @@
|
|
679 |
{"model_class":"TFViTMAEModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
680 |
{"model_class":"TFViTModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
681 |
{"model_class":"TFWav2Vec2Model","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
|
|
|
|
682 |
{"model_class":"TFXGLMForCausalLM","pipeline_tag":"text-generation","auto_class":"TF_AutoModelForCausalLM"}
|
683 |
{"model_class":"TFXGLMModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
684 |
{"model_class":"TFXLMForMultipleChoice","pipeline_tag":"multiple-choice","auto_class":"TF_AutoModelForMultipleChoice"}
|
|
|
679 |
{"model_class":"TFViTMAEModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
680 |
{"model_class":"TFViTModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
681 |
{"model_class":"TFWav2Vec2Model","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
682 |
+
{"model_class":"TFWhisperForConditionalGeneration","pipeline_tag":"automatic-speech-recognition","auto_class":"TF_AutoModelForSpeechSeq2Seq"}
|
683 |
+
{"model_class":"TFWhisperModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
684 |
{"model_class":"TFXGLMForCausalLM","pipeline_tag":"text-generation","auto_class":"TF_AutoModelForCausalLM"}
|
685 |
{"model_class":"TFXGLMModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
686 |
{"model_class":"TFXLMForMultipleChoice","pipeline_tag":"multiple-choice","auto_class":"TF_AutoModelForMultipleChoice"}
|