import numpy as np import gradio as gr import torch from transformers import AutoTokenizer, AutoModelForSequenceClassification labels = ['Not_Adult', 'Adult'] device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') device model_name = 'valurank/finetuned-distilbert-adult-content-detection' model = AutoModelForSequenceClassification.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) def get_adult_content(text): input_tensor = tokenizer.encode(text, return_tensors='pt', truncation=True) logits = model(input_tensor).logits softmax = torch.nn.Softmax(dim=1) probs = softmax(logits)[0] probs = probs.cpu().detach().numpy() max_index = np.argmax(probs) adult_content = labels[max_index] return adult_content demo = gr.Interface(get_adult_content, inputs = gr.inputs.Textbox(label= "Input your text here"), outputs = gr.outputs.Textbox(label='Category')) if __name__ == "__main__": demo.launch(debug=True)