amiriparian Filip-Packan commited on
Commit
847a2cd
1 Parent(s): 8481dff

Added example (#1)

Browse files

- Added example (0741be329067ad79bec2dc8361203c1f979898e4)


Co-authored-by: Filip <[email protected]>

Files changed (1) hide show
  1. README.md +41 -16
README.md CHANGED
@@ -51,25 +51,50 @@ Further details are available in the corresponding [**paper**](https://huggingfa
51
  ### Usage
52
 
53
  ```python
54
- import torch
55
- import torch.nn as nn
56
- from transformers import AutoModelForAudioClassification, Wav2Vec2FeatureExtractor
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
 
59
 
60
- # CONFIG and MODEL SETUP
61
- model_name = 'amiriparian/ExHuBERT'
62
- feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("facebook/hubert-base-ls960")
63
- model = AutoModelForAudioClassification.from_pretrained(model_name, trust_remote_code=True,revision="b158d45ed8578432468f3ab8d46cbe5974380812")
64
-
65
- # Freezing half of the encoder for further transfer learning
66
- model.freeze_og_encoder()
67
-
68
- sampling_rate=16000
69
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
70
- model = model.to(device)
71
-
72
-
73
  ```
74
 
75
  ### Citation Info
 
51
  ### Usage
52
 
53
  ```python
54
+ import torch
55
+ import torch.nn as nn
56
+ from transformers import AutoModelForAudioClassification, Wav2Vec2FeatureExtractor
57
+
58
+
59
+ # CONFIG and MODEL SETUP
60
+ model_name = 'amiriparian/ExHuBERT'
61
+ feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("facebook/hubert-base-ls960")
62
+ model = AutoModelForAudioClassification.from_pretrained(model_name, trust_remote_code=True,
63
+ revision="b158d45ed8578432468f3ab8d46cbe5974380812")
64
+
65
+ # Freezing half of the encoder for further transfer learning
66
+ model.freeze_og_encoder()
67
+
68
+ sampling_rate = 16000
69
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
70
+ model = model.to(device)
71
+
72
+
73
+
74
+ # Example application from a local audiofile
75
+ import numpy as np
76
+ import librosa
77
+ import torch.nn.functional as F
78
+ # Sample taken from the Toronto emotional speech set (TESS) https://tspace.library.utoronto.ca/handle/1807/24487
79
+ waveform, sr_wav = librosa.load("YAF_date_angry.wav")
80
+ # Max Padding to 3 Seconds at 16k sampling rate for the best results
81
+ waveform = feature_extractor(waveform, sampling_rate=sampling_rate,padding = 'max_length',max_length = 48000)
82
+ waveform = waveform['input_values'][0]
83
+ waveform = waveform.reshape(1, -1)
84
+ waveform = torch.from_numpy(waveform).to(device)
85
+ with torch.no_grad():
86
+ output = model(waveform)
87
+ output = F.softmax(output.logits, dim = 1)
88
+ output = output.detach().cpu().numpy().round(2)
89
+ print(output)
90
+
91
+ # [[0. 0. 0. 1. 0. 0.]]
92
+ # Low | High Arousal
93
+ # Neg. Neut. Pos. | Neg. Neut. Pos Valence
94
+ # Disgust, Neutral, Kind| Anger, Surprise, Joy Example emotions
95
 
96
 
97
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  ```
99
 
100
  ### Citation Info