Update with commit 523ca4e01674cee314cea128c1a9c0fd4bb0fcd2
Browse filesSee: https://github.com/huggingface/transformers/commit/523ca4e01674cee314cea128c1a9c0fd4bb0fcd2
- frameworks.json +1 -0
- pipeline_tags.json +2 -0
frameworks.json
CHANGED
@@ -27,6 +27,7 @@
|
|
27 |
{"model_type":"convbert","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
28 |
{"model_type":"convnext","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoFeatureExtractor"}
|
29 |
{"model_type":"convnextv2","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
|
|
30 |
{"model_type":"ctrl","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
31 |
{"model_type":"cvt","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoFeatureExtractor"}
|
32 |
{"model_type":"data2vec-audio","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoFeatureExtractor"}
|
|
|
27 |
{"model_type":"convbert","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
28 |
{"model_type":"convnext","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoFeatureExtractor"}
|
29 |
{"model_type":"convnextv2","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
30 |
+
{"model_type":"cpmant","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
31 |
{"model_type":"ctrl","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
32 |
{"model_type":"cvt","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoFeatureExtractor"}
|
33 |
{"model_type":"data2vec-audio","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoFeatureExtractor"}
|
pipeline_tags.json
CHANGED
@@ -92,6 +92,8 @@
|
|
92 |
{"model_class":"ConvNextModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
93 |
{"model_class":"ConvNextV2ForImageClassification","pipeline_tag":"image-classification","auto_class":"AutoModelForImageClassification"}
|
94 |
{"model_class":"ConvNextV2Model","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
|
|
|
|
95 |
{"model_class":"CvtForImageClassification","pipeline_tag":"image-classification","auto_class":"AutoModelForImageClassification"}
|
96 |
{"model_class":"CvtModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
97 |
{"model_class":"DPRQuestionEncoder","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
|
|
92 |
{"model_class":"ConvNextModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
93 |
{"model_class":"ConvNextV2ForImageClassification","pipeline_tag":"image-classification","auto_class":"AutoModelForImageClassification"}
|
94 |
{"model_class":"ConvNextV2Model","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
95 |
+
{"model_class":"CpmAntForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
|
96 |
+
{"model_class":"CpmAntModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
97 |
{"model_class":"CvtForImageClassification","pipeline_tag":"image-classification","auto_class":"AutoModelForImageClassification"}
|
98 |
{"model_class":"CvtModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
99 |
{"model_class":"DPRQuestionEncoder","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|