|
import gradio as gr |
|
from transformers import pipeline |
|
import json |
|
import torch |
|
|
|
|
|
device = "cuda:0" if torch.cuda.is_available() else "cpu" |
|
|
|
model = pipeline("text-classification", "iknow-lab/azou", device=device) |
|
model.tokenizer.truncation_side = 'left' |
|
|
|
def inference(input, instruction, labels): |
|
instruction = f"{input} [SEP] {instruction}" |
|
inputs = model.tokenizer([instruction] * len(labels), labels, truncation=True, padding=True, return_tensors="pt").to(device) |
|
|
|
scores = model.model(**inputs).logits.squeeze(1).softmax(-1).tolist() |
|
output = dict(zip(labels, scores)) |
|
|
|
print(instruction) |
|
print(output) |
|
return output, json.dumps(output, ensure_ascii=False) |
|
|
|
|
|
def greet(content, instruction, labels): |
|
labels = labels.split(",") |
|
output = inference(content, instruction, labels) |
|
return output |
|
|
|
content = gr.TextArea(label="μ
λ ₯ λ΄μ©") |
|
instruction = gr.Textbox(label="μ§μλ¬Έ") |
|
labels = gr.Textbox(label="λΌλ²¨(μΌνλ‘ κ΅¬λΆ)") |
|
|
|
examples = [ |
|
["μμ μλ μ£Όλ§λ§λ€ κ·Ήμ₯μ λλ¬κ°λλ° μμλ μ’ μκ°λ νΈμ΄μμ", "λκΈ μ£Όμ λ₯Ό λΆλ₯νμΈμ", "μν,λλΌλ§,κ²μ,μμ€"], |
|
["μΈμ²λ° KTXμ κ΄λ ¨νβμ‘λμ 볡ν©νμΉμΌν°κ°βμ¬μ€μβ무μ°,βλ¨μ μ² λΒ·λ²μ€ μμ£Ό νμΉμμ€λ‘βλ§λ€μ΄μ§λ€.βμ΄ λλ¬Έμ μΈμ²μμ μΈμ²λ° KTXβκΈ°μ μ μ΅μ»€μμ€μΈ 볡ν©νμΉμΌν°λ₯Ό ν΅ν μΈκ·Όβμ§μβκ²½μ βνμ±νλ₯Όβμ΄λ€λΈλ€λ κ³νμ μ°¨μ§μ΄ λΆκ°νΌνλ€.", "κ²½μ μ κΈμ μ μΈ λ΄μ€μΈκ°μ?", "μ,μλμ"], |
|
["λ§μ§λ§μλ kν 곡μ°λ³΄κ³ μ’μ μΆμ΅ λ¨μμΌλ©΄ μ’κ² λ€μ","μμ€μ΄ ν¬ν¨λμ΄μλμ?", "μμ€μ΄ μμ΅λλ€,μμ€μ΄ μμ΅λλ€"], |
|
] |
|
gr.Interface(fn=greet, |
|
inputs=[content, instruction, labels], |
|
outputs=[gr.Label(), gr.Text({}, label="json",)], |
|
examples=examples).launch(server_name="0.0.0.0",server_port=7860) |