final_project / app.py
leadingbridge's picture
Update app.py
fbe916d
raw
history blame
2.11 kB
from transformers import BertTokenizerFast,TFBertForSequenceClassification,TextClassificationPipeline
import numpy as np
import tensorflow as tf
import gradio as gr
import openai
import os
# Sentiment Analysis Pre-Trained Model
model_path = "leadingbridge/sentiment-analysis"
tokenizer = BertTokenizerFast.from_pretrained(model_path)
model = TFBertForSequenceClassification.from_pretrained(model_path, id2label={0: 'negative', 1: 'positive'} )
def sentiment_analysis(text):
pipe = TextClassificationPipeline(model=model, tokenizer=tokenizer)
result = pipe(text)
return result
# Open AI Chatbot Model
openai.api_key = "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"
start_sequence = "\nAI:"
restart_sequence = "\nHuman: "
prompt = "You can discuss any topic with the Chatbot assistant in Chinese by typing it in here"
def openai_create(prompt):
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=0.9,
max_tokens=150,
top_p=1,
frequency_penalty=0,
presence_penalty=0.6,
stop=[" Human:", " AI:"]
)
return response.choices[0].text
def chatgpt_clone(input, history):
history = history or []
s = list(sum(history, ()))
s.append(input)
inp = ' '.join(s)
output = openai_create(inp)
history.append((input, output))
return history, history
# Gradio Output Model
with gr.Blocks() as demo:
gr.Markdown("Choose the Chinese NLP model you want to use from the tabs")
with gr.Tab("Sentiment Analysis"):
inputs = gr.Textbox(placeholder="Enter a Chinese positive or negative sentence here")
outputs = gr.Textbox(label="Sentiment Analysis")
proceed_button = gr.Button("proceed")
proceed_button.click(fn=sentiment_analysis, inputs=inputs, outputs=outputs)
with gr.Tab("OpenAI Chatbot"):
chatbot = gr.Chatbot()
message = gr.Textbox(placeholder=prompt)
state = gr.State()
submit = gr.Button("SEND")
submit.click(chatgpt_clone, inputs=[message, state], outputs=[chatbot, state])
demo.launch(inline=False)