Update with commit dcb183f4bdcd9491efb68b3c28d51614a11e59dc
Browse filesSee: https://github.com/huggingface/transformers/commit/dcb183f4bdcd9491efb68b3c28d51614a11e59dc
- frameworks.json +1 -0
- pipeline_tags.json +5 -0
frameworks.json
CHANGED
@@ -107,6 +107,7 @@
|
|
107 |
{"model_type":"mobilevit","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoFeatureExtractor"}
|
108 |
{"model_type":"mobilevitv2","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
109 |
{"model_type":"mpnet","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
|
|
110 |
{"model_type":"mra","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
111 |
{"model_type":"mt5","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoTokenizer"}
|
112 |
{"model_type":"musicgen","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
|
|
107 |
{"model_type":"mobilevit","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoFeatureExtractor"}
|
108 |
{"model_type":"mobilevitv2","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
109 |
{"model_type":"mpnet","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
110 |
+
{"model_type":"mpt","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
111 |
{"model_type":"mra","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
112 |
{"model_type":"mt5","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoTokenizer"}
|
113 |
{"model_type":"musicgen","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
pipeline_tags.json
CHANGED
@@ -482,6 +482,11 @@
|
|
482 |
{"model_class":"MobileViTModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
483 |
{"model_class":"MobileViTV2ForImageClassification","pipeline_tag":"image-classification","auto_class":"AutoModelForImageClassification"}
|
484 |
{"model_class":"MobileViTV2Model","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
|
|
|
|
|
|
|
|
|
|
485 |
{"model_class":"MraForMaskedLM","pipeline_tag":"fill-mask","auto_class":"AutoModelForMaskedLM"}
|
486 |
{"model_class":"MraForMultipleChoice","pipeline_tag":"multiple-choice","auto_class":"AutoModelForMultipleChoice"}
|
487 |
{"model_class":"MraForQuestionAnswering","pipeline_tag":"question-answering","auto_class":"AutoModelForQuestionAnswering"}
|
|
|
482 |
{"model_class":"MobileViTModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
483 |
{"model_class":"MobileViTV2ForImageClassification","pipeline_tag":"image-classification","auto_class":"AutoModelForImageClassification"}
|
484 |
{"model_class":"MobileViTV2Model","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
485 |
+
{"model_class":"MptForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
|
486 |
+
{"model_class":"MptForQuestionAnswering","pipeline_tag":"question-answering","auto_class":"AutoModelForQuestionAnswering"}
|
487 |
+
{"model_class":"MptForSequenceClassification","pipeline_tag":"text-classification","auto_class":"AutoModelForSequenceClassification"}
|
488 |
+
{"model_class":"MptForTokenClassification","pipeline_tag":"token-classification","auto_class":"AutoModelForTokenClassification"}
|
489 |
+
{"model_class":"MptModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
490 |
{"model_class":"MraForMaskedLM","pipeline_tag":"fill-mask","auto_class":"AutoModelForMaskedLM"}
|
491 |
{"model_class":"MraForMultipleChoice","pipeline_tag":"multiple-choice","auto_class":"AutoModelForMultipleChoice"}
|
492 |
{"model_class":"MraForQuestionAnswering","pipeline_tag":"question-answering","auto_class":"AutoModelForQuestionAnswering"}
|