|
import gradio as gr |
|
from transformers import AutoTokenizer |
|
import torch, json |
|
|
|
from fastai.learner import load_learner |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with open('question_labels.json', 'r') as f: |
|
question_dictionary = json.load(f) |
|
que_classes = list(question_dictionary.keys()) |
|
|
|
blurr_model = load_learner('healifyLLM-stage4.pkl') |
|
|
|
def detect_question(text): |
|
probs = blurr_model.blurr_predict(text)[0]['probs'] |
|
return dict(zip(que_classes, map(float, probs))) |
|
|
|
label = gr.outputs.Label(num_top_classes=5) |
|
|
|
iface = gr.Interface(fn=detect_question, inputs="text", outputs=label) |
|
iface.launch(inline=False) |
|
|