lysandre HF staff commited on
Commit
58b45fb
1 Parent(s): 0aa0ece

Update with commit 8260cb311efe9b2855f6ecb6d1d7724ea2d0dcc3

Browse files

See: https://github.com/huggingface/transformers/commit/8260cb311efe9b2855f6ecb6d1d7724ea2d0dcc3

Files changed (2) hide show
  1. frameworks.json +1 -0
  2. pipeline_tags.json +1 -0
frameworks.json CHANGED
@@ -38,6 +38,7 @@
38
  {"model_type":"cpmant","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
39
  {"model_type":"ctrl","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
40
  {"model_type":"cvt","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoImageProcessor"}
 
41
  {"model_type":"data2vec-audio","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
42
  {"model_type":"data2vec-text","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
43
  {"model_type":"data2vec-vision","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoImageProcessor"}
 
38
  {"model_type":"cpmant","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
39
  {"model_type":"ctrl","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
40
  {"model_type":"cvt","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoImageProcessor"}
41
+ {"model_type":"dac","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoFeatureExtractor"}
42
  {"model_type":"data2vec-audio","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
43
  {"model_type":"data2vec-text","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
44
  {"model_type":"data2vec-vision","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoImageProcessor"}
pipeline_tags.json CHANGED
@@ -114,6 +114,7 @@
114
  {"model_class":"DPRQuestionEncoder","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
115
  {"model_class":"DPTForDepthEstimation","pipeline_tag":"depth-estimation","auto_class":"AutoModelForDepthEstimation"}
116
  {"model_class":"DPTModel","pipeline_tag":"image-feature-extraction","auto_class":"AutoModel"}
 
117
  {"model_class":"Data2VecAudioForAudioFrameClassification","pipeline_tag":"audio-frame-classification","auto_class":"AutoModelForAudioFrameClassification"}
118
  {"model_class":"Data2VecAudioForCTC","pipeline_tag":"automatic-speech-recognition","auto_class":"AutoModelForCTC"}
119
  {"model_class":"Data2VecAudioForSequenceClassification","pipeline_tag":"audio-classification","auto_class":"AutoModelForAudioClassification"}
 
114
  {"model_class":"DPRQuestionEncoder","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
115
  {"model_class":"DPTForDepthEstimation","pipeline_tag":"depth-estimation","auto_class":"AutoModelForDepthEstimation"}
116
  {"model_class":"DPTModel","pipeline_tag":"image-feature-extraction","auto_class":"AutoModel"}
117
+ {"model_class":"DacModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
118
  {"model_class":"Data2VecAudioForAudioFrameClassification","pipeline_tag":"audio-frame-classification","auto_class":"AutoModelForAudioFrameClassification"}
119
  {"model_class":"Data2VecAudioForCTC","pipeline_tag":"automatic-speech-recognition","auto_class":"AutoModelForCTC"}
120
  {"model_class":"Data2VecAudioForSequenceClassification","pipeline_tag":"audio-classification","auto_class":"AutoModelForAudioClassification"}