Update with commit 0cf60f13ab1c857c17fc3fb127129048c93bf06c
Browse filesSee: https://github.com/huggingface/transformers/commit/0cf60f13ab1c857c17fc3fb127129048c93bf06c
- frameworks.json +1 -0
- pipeline_tags.json +4 -0
frameworks.json
CHANGED
@@ -73,6 +73,7 @@
|
|
73 |
{"model_type":"funnel","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
74 |
{"model_type":"fuyu","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
75 |
{"model_type":"gemma","pytorch":true,"tensorflow":false,"flax":true,"processor":"AutoTokenizer"}
|
|
|
76 |
{"model_type":"git","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
77 |
{"model_type":"glpn","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoImageProcessor"}
|
78 |
{"model_type":"gpt2","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoTokenizer"}
|
|
|
73 |
{"model_type":"funnel","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
74 |
{"model_type":"fuyu","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
75 |
{"model_type":"gemma","pytorch":true,"tensorflow":false,"flax":true,"processor":"AutoTokenizer"}
|
76 |
+
{"model_type":"gemma2","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
77 |
{"model_type":"git","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
78 |
{"model_type":"glpn","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoImageProcessor"}
|
79 |
{"model_type":"gpt2","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoTokenizer"}
|
pipeline_tags.json
CHANGED
@@ -383,6 +383,10 @@
|
|
383 |
{"model_class":"GPTNeoXJapaneseModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
384 |
{"model_class":"GPTNeoXModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
385 |
{"model_class":"GPTSanJapaneseForConditionalGeneration","pipeline_tag":"text2text-generation","auto_class":"AutoModelForSeq2SeqLM"}
|
|
|
|
|
|
|
|
|
386 |
{"model_class":"GemmaForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
|
387 |
{"model_class":"GemmaForSequenceClassification","pipeline_tag":"text-classification","auto_class":"AutoModelForSequenceClassification"}
|
388 |
{"model_class":"GemmaForTokenClassification","pipeline_tag":"token-classification","auto_class":"AutoModelForTokenClassification"}
|
|
|
383 |
{"model_class":"GPTNeoXJapaneseModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
384 |
{"model_class":"GPTNeoXModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
385 |
{"model_class":"GPTSanJapaneseForConditionalGeneration","pipeline_tag":"text2text-generation","auto_class":"AutoModelForSeq2SeqLM"}
|
386 |
+
{"model_class":"Gemma2ForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
|
387 |
+
{"model_class":"Gemma2ForSequenceClassification","pipeline_tag":"text-classification","auto_class":"AutoModelForSequenceClassification"}
|
388 |
+
{"model_class":"Gemma2ForTokenClassification","pipeline_tag":"token-classification","auto_class":"AutoModelForTokenClassification"}
|
389 |
+
{"model_class":"Gemma2Model","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
390 |
{"model_class":"GemmaForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
|
391 |
{"model_class":"GemmaForSequenceClassification","pipeline_tag":"text-classification","auto_class":"AutoModelForSequenceClassification"}
|
392 |
{"model_class":"GemmaForTokenClassification","pipeline_tag":"token-classification","auto_class":"AutoModelForTokenClassification"}
|