Spaces:
Runtime error
Runtime error
File size: 1,839 Bytes
fa1b58a b04dab8 fa1b58a ccf66b3 fa1b58a fca557d fa1b58a 13f58ae fa1b58a ccf66b3 13f58ae ccf66b3 fa1b58a 13f58ae fa1b58a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from transformers import Trainer, TrainingArguments, EarlyStoppingCallback
barthez_tokenizer = AutoTokenizer.from_pretrained("moussaKam/barthez")
model = AutoModelForSequenceClassification.from_pretrained("Anvil-ML/detecteur-ia")
def interpret_pred(pred):
low_bond = -6.748472
high_bound = 6.7176056
result = "IA" if pred.argmax(dim=-1).item() == 1 else "Humain"
pred_value = pred[0][1].item()
interpreted_pred = (pred_value - low_bond) / (high_bound - low_bond)
is_ai_percent = round(100 * interpreted_pred)
return result, is_ai_percent
def interpret_pred_with_sensibility(pred):
low_bond = -6.748472
high_bound = 6.7176056
pred_value = pred[0][1].item()
interpreted_pred = (pred_value - low_bond) / (high_bound - low_bond)
if interpreted_pred < 0.5:
proba = "très faible"
elif interpreted_pred < 0.6:
proba = "faible"
elif interpreted_pred < 0.8:
proba = "modérée"
elif interpreted_pred < 0.95:
proba = "élevée"
else:
proba = "très élevée"
return proba
def main(Texte):
input_ids = torch.tensor(
[barthez_tokenizer.encode(Texte, truncation=True, padding=True, add_special_tokens=True)]
)
predict = model.forward(input_ids)[0]
#result = (
# "Résultat : {}.\nCe texte a {}% de chances d'avoir été généré par de l'IA"
# .format(interpret_pred(predict)[0], interpret_pred(predict)[1])
#)
proba = interpret_pred_with_sensibility(predict)
Resultat = (
"La probabilité que ce texte a été généré par de l'IA est {}"
.format(proba)
)
return Resultat
iface = gr.Interface(fn=main, inputs="text", outputs="text")
iface.launch() |