Spaces:
Sleeping
Sleeping
File size: 4,419 Bytes
15c0354 36b3b29 2bca1d4 8cb81df bb13f04 8cb81df 36b3b29 8cb81df 2bca1d4 78cf820 8cb81df 0b73704 bb13f04 0b73704 8cb81df 36b3b29 2bca1d4 36b3b29 8cb81df 2bca1d4 2a8fa62 2bca1d4 36b3b29 0b73704 2bca1d4 36b3b29 2bca1d4 8cb81df 0b73704 36b3b29 2a8fa62 0b73704 36b3b29 7a7170b 8cb81df 0b73704 8cb81df 0b73704 8cb81df 36b3b29 2bca1d4 0b73704 78cf820 0b73704 36b3b29 0b73704 78cf820 0b73704 36b3b29 8cb81df 2bca1d4 2a8fa62 5f715e8 36b3b29 2a8fa62 2bca1d4 36b3b29 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 |
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from transformers import T5Tokenizer, T5ForConditionalGeneration
from transformers import pipeline
import torch
device = torch.device("mps" if torch.backends.mps.is_available() else "cpu")
te_tokenizer = AutoTokenizer.from_pretrained('MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli')
te_model = AutoModelForSequenceClassification.from_pretrained('MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli').to(device)
qa_pipeline = pipeline("question-answering", model='distilbert/distilbert-base-cased-distilled-squad')
qa_tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-base")
qa_model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-base", device_map="auto")
def predict(context, intent, multi_class):
print(context, intent)
input_text = "What is the opposite of " + intent + "?"
input_ids = qa_tokenizer(input_text, return_tensors="pt").input_ids.to(device)
opposite_output = qa_tokenizer.decode(qa_model.generate(input_ids, max_length=2)[0], skip_special_tokens=True)
input_text = "What object/thing is being described in the entire sentence?"
object_output = qa_pipeline(question=input_text, context=context, max_answer_len=2)['answer']
batch = ['The ' + object_output + ' is ' + intent, 'The ' + object_output + ' is ' + opposite_output, 'The ' + object_output + ' is neither ' + intent + ' nor ' + opposite_output]
outputs = []
normal = 0
print(batch)
for i, hypothesis in enumerate(batch):
input_ids = te_tokenizer.encode(context, hypothesis, return_tensors='pt').to(device)
# -> [contradiction, neutral, entailment]
logits = te_model(input_ids)[0][0]
if (i == 0):
normal = logits
if (i >= 2):
# -> [contradiction, entailment]
probs = logits[[0,2]].softmax(dim=0)
else:
probs = torch.exp(logits)
outputs.append(probs)
# calculate the stochastic vector for it being neither the positive or negative class
perfect_prob = outputs[2]
# -> [entailment, contradiction] for perfect
# -> [entailment, neutral, contradiction] for positive
outputs[1] = outputs[1].flip(dims=[0])
print(outputs)
print(perfect_prob)
# combine the negative and positive class by summing by the opposite of the negative class
aggregated = (outputs[0]+outputs[1])/2
print(aggregated)
# multiplying vectors
aggregated[1] = aggregated[1] + perfect_prob[0]
aggregated[0] = aggregated[0] * perfect_prob[1]
aggregated[2] = aggregated[2] * perfect_prob[1]
# multiple true classes
if (multi_class):
aggregated = torch.sigmoid(aggregated)
normal = torch.sigmoid(normal)
# only one true class
else:
aggregated = aggregated.softmax(dim=0)
normal = normal.softmax(dim=0)
return {"agree": aggregated[0], "neutral": aggregated[1], "disagree": aggregated[2]}, {"agree": normal[0], "neutral": normal[1], "disagree": normal[2]}
examples = [["These are so warm and comfortable. I’m 5’7”, 140 lbs, size 6-8 and Medium is a great fit. They wash and dry nicely too. The jogger style is the only style I can wear in this brand - the others are way too long so I had to return.", "long"], ["I feel strongly about politics in the US", "long"], ["The pants are long", "long"], ["The pants are slightly long", "long"]]
gradio_app = gr.Interface(
predict,
examples=examples,
inputs=[gr.Text(label="Statement"), gr.Text(label="Class"), gr.Checkbox(label="Allow multiple true classes")],
outputs=[gr.Label(num_top_classes=3, label="With Postprocessing"), gr.Label(num_top_classes=3, label="Without Postprocessing")],
title="Intent Analysis",
description="This model predicts whether or not the **_class_** describes the **_object described in the sentence_**. <br /> The two outputs shows what TE would predict with and without the postprocessing. An example edge case for normal TE is shown below. <br /> **_It is recommended that you clone the repository to speed up processing time_**. <br /> Additionally, note the difference between the strength of the probability when going between the last two examples, the former representing a strong opinion and the latter a weaker opinion",
cache_examples=True
)
gradio_app.launch() |