abdulmatinomotoso commited on
Commit
51bb781
1 Parent(s): a886a00

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -0
app.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import pandas as pd
4
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
5
+ import torch
6
+
7
+
8
+ target_list = ["Cute", "Infuriating", "Sentimental", "Empathetic",
9
+ "Cynical", "Depressing", "Awe-inspiring", "Patriotic", "Educational",
10
+ "Encouraging", "Voyeuristic", "Funny", "Sarcastic", "Dismissive", "Disparaging"]
11
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
12
+
13
+
14
+ model_name = 'abdulmatinomotoso/finetuned-distilbert-multi-label-emotion_5'
15
+ model = AutoModelForSequenceClassification.from_pretrained(model_name)
16
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
17
+
18
+
19
+ def get_probs(logits, threshold=0.5):
20
+ sigm = 1 / (1 + np.exp(-logits))
21
+ return sigm
22
+
23
+
24
+ def multi_label_emotions(text):
25
+ inputs = tokenizer(text, return_tensors="pt", truncation=True)
26
+ model.to(device)
27
+ with torch.no_grad():
28
+ logits = model(**inputs).logits
29
+ #probs = logits.int().numpy()[0]
30
+ log_probs = get_probs(logits)
31
+
32
+ final_log_probs = []
33
+ for log in log_probs:
34
+ final_log_probs.append(log.numpy())
35
+
36
+ final_output = []
37
+ for i in zip(final_log_probs[0], target_list):
38
+ final_output.append(i)
39
+
40
+ final_output.sort(reverse=True)
41
+
42
+ final_dict = {}
43
+ for k,v in final_output:
44
+ final_dict[v] = float(k)
45
+
46
+ return final_dict
47
+
48
+
49
+ demo = gr.Interface(multi_label_emotions, inputs=gr.inputs.Textbox(),
50
+ outputs = gr.Label(num_top_classes=4),
51
+ title='Multi-label-emotion-classification')
52
+
53
+ if __name__ == '__main__':
54
+ demo.launch(debug=True)