Update with commit c43b380e70136c65e4f2c0644fa50e1fb21b4fee
Browse filesSee: https://github.com/huggingface/transformers/commit/c43b380e70136c65e4f2c0644fa50e1fb21b4fee
- frameworks.json +1 -0
- pipeline_tags.json +2 -0
frameworks.json
CHANGED
@@ -126,6 +126,7 @@
|
|
126 |
{"model_type":"mra","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
127 |
{"model_type":"mt5","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoTokenizer"}
|
128 |
{"model_type":"musicgen","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
|
|
129 |
{"model_type":"mvp","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
130 |
{"model_type":"nat","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoImageProcessor"}
|
131 |
{"model_type":"nezha","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
|
|
126 |
{"model_type":"mra","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
127 |
{"model_type":"mt5","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoTokenizer"}
|
128 |
{"model_type":"musicgen","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
129 |
+
{"model_type":"musicgen_melody","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
130 |
{"model_type":"mvp","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
131 |
{"model_type":"nat","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoImageProcessor"}
|
132 |
{"model_type":"nezha","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
pipeline_tags.json
CHANGED
@@ -535,6 +535,8 @@
|
|
535 |
{"model_class":"MraModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
536 |
{"model_class":"MusicgenForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
|
537 |
{"model_class":"MusicgenForConditionalGeneration","pipeline_tag":"text-to-audio","auto_class":"AutoModelForTextToWaveform"}
|
|
|
|
|
538 |
{"model_class":"MvpForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
|
539 |
{"model_class":"MvpForConditionalGeneration","pipeline_tag":"text2text-generation","auto_class":"AutoModelForSeq2SeqLM"}
|
540 |
{"model_class":"MvpForQuestionAnswering","pipeline_tag":"question-answering","auto_class":"AutoModelForQuestionAnswering"}
|
|
|
535 |
{"model_class":"MraModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
536 |
{"model_class":"MusicgenForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
|
537 |
{"model_class":"MusicgenForConditionalGeneration","pipeline_tag":"text-to-audio","auto_class":"AutoModelForTextToWaveform"}
|
538 |
+
{"model_class":"MusicgenMelodyForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
|
539 |
+
{"model_class":"MusicgenMelodyForConditionalGeneration","pipeline_tag":"text-to-audio","auto_class":"AutoModelForTextToWaveform"}
|
540 |
{"model_class":"MvpForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
|
541 |
{"model_class":"MvpForConditionalGeneration","pipeline_tag":"text2text-generation","auto_class":"AutoModelForSeq2SeqLM"}
|
542 |
{"model_class":"MvpForQuestionAnswering","pipeline_tag":"question-answering","auto_class":"AutoModelForQuestionAnswering"}
|