Update with commit bb6f6d53386bf2340eead6a8f9320ce61add3e96
Browse filesSee: https://github.com/huggingface/transformers/commit/bb6f6d53386bf2340eead6a8f9320ce61add3e96
- frameworks.json +1 -0
- pipeline_tags.json +1 -0
frameworks.json
CHANGED
@@ -112,6 +112,7 @@
|
|
112 |
{"model_type":"wav2vec2","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoProcessor"}
|
113 |
{"model_type":"wav2vec2-conformer","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
114 |
{"model_type":"wavlm","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
|
|
115 |
{"model_type":"xglm","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoTokenizer"}
|
116 |
{"model_type":"xlm","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
117 |
{"model_type":"xlm-prophetnet","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
|
|
112 |
{"model_type":"wav2vec2","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoProcessor"}
|
113 |
{"model_type":"wav2vec2-conformer","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
114 |
{"model_type":"wavlm","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
115 |
+
{"model_type":"xclip","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
116 |
{"model_type":"xglm","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoTokenizer"}
|
117 |
{"model_type":"xlm","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
118 |
{"model_type":"xlm-prophetnet","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
pipeline_tags.json
CHANGED
@@ -720,6 +720,7 @@
|
|
720 |
{"model_class":"WavLMForSequenceClassification","pipeline_tag":"audio-classification","auto_class":"AutoModelForAudioClassification"}
|
721 |
{"model_class":"WavLMForXVector","pipeline_tag":"audio-xvector","auto_class":"AutoModelForAudioXVector"}
|
722 |
{"model_class":"WavLMModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
|
|
723 |
{"model_class":"XGLMForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
|
724 |
{"model_class":"XGLMModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
725 |
{"model_class":"XLMForMultipleChoice","pipeline_tag":"multiple-choice","auto_class":"AutoModelForMultipleChoice"}
|
|
|
720 |
{"model_class":"WavLMForSequenceClassification","pipeline_tag":"audio-classification","auto_class":"AutoModelForAudioClassification"}
|
721 |
{"model_class":"WavLMForXVector","pipeline_tag":"audio-xvector","auto_class":"AutoModelForAudioXVector"}
|
722 |
{"model_class":"WavLMModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
723 |
+
{"model_class":"XCLIPModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
724 |
{"model_class":"XGLMForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
|
725 |
{"model_class":"XGLMModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
726 |
{"model_class":"XLMForMultipleChoice","pipeline_tag":"multiple-choice","auto_class":"AutoModelForMultipleChoice"}
|