Spaces:
Sleeping
Sleeping
from transformers import BertTokenizerFast,TFBertForSequenceClassification,TextClassificationPipeline | |
import numpy as np | |
import tensorflow as tf | |
import gradio as gr | |
import openai | |
import os | |
# Sentiment Analysis Pre-Trained Model | |
model_path = "leadingbridge/sentiment-analysis" | |
tokenizer = BertTokenizerFast.from_pretrained(model_path) | |
model = TFBertForSequenceClassification.from_pretrained(model_path, id2label={0: 'negative', 1: 'positive'} ) | |
def sentiment_analysis(text): | |
pipe = TextClassificationPipeline(model=model, tokenizer=tokenizer) | |
result = pipe(text) | |
return result | |
# Open AI Chatbot Model | |
openai.api_key = "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" | |
start_sequence = "\nAI:" | |
restart_sequence = "\nHuman: " | |
prompt = "You can discuss any topic with the Chatbot assistant in Chinese by typing it in here" | |
def openai_create(prompt): | |
response = openai.Completion.create( | |
model="text-davinci-003", | |
prompt=prompt, | |
temperature=0.9, | |
max_tokens=1024, | |
top_p=1, | |
frequency_penalty=0, | |
presence_penalty=0.6, | |
stop=[" Human:", " AI:"] | |
) | |
return response.choices[0].text | |
def chatgpt_clone(input, history): | |
history = history or [] | |
s = list(sum(history, ())) | |
s.append(input) | |
inp = ' '.join(s) | |
output = openai_create(inp) | |
history.append((input, output)) | |
return history, history | |
# Open AI Chinese Translation Model | |
def translate_to_chinese(text_to_translate): | |
response = openai.Completion.create( | |
model="text-davinci-003", | |
prompt=f"Translate this short sentence into Chinese:\n\n{text_to_translate}\n\n1.", | |
temperature=0.3, | |
max_tokens=1024, | |
top_p=1.0, | |
frequency_penalty=0.0, | |
presence_penalty=0.0 | |
) | |
return response.choices[0].text.strip() | |
# Open AI English Translation Model | |
def translate_to_english(text_to_translate): | |
response = openai.Completion.create( | |
model="text-davinci-003", | |
prompt=f"Translate this short sentence into English:\n\n{text_to_translate}\n\n1.", | |
temperature=0.3, | |
max_tokens=1024, | |
top_p=1.0, | |
frequency_penalty=0.0, | |
presence_penalty=0.0 | |
) | |
return response.choices[0].text.strip() | |
# Gradio Output Model | |
with gr.Blocks() as demo: | |
gr.Markdown("Choose the Chinese NLP model you want to use from the tabs") | |
with gr.Tab("OpenAI Chatbot"): | |
chatbot = gr.Chatbot() | |
message = gr.Textbox(placeholder=prompt) | |
state = gr.State() | |
submit = gr.Button("SEND") | |
submit.click(chatgpt_clone, inputs=[message, state], outputs=[chatbot, state]) | |
with gr.Tab("Sentiment Analysis"): | |
inputs = gr.Textbox(placeholder="Enter a Chinese positive or negative sentence here") | |
outputs = gr.Textbox(label="Sentiment Analysis") | |
proceed_button = gr.Button("proceed") | |
proceed_button.click(fn=sentiment_analysis, inputs=inputs, outputs=outputs) | |
with gr.Tab("Translation to Chinese"): | |
inputs = gr.Textbox(placeholder="Enter a short English sentence to translate to Chinese here.") | |
outputs = gr.Textbox(label="Translation Result") | |
proceed_button = gr.Button("Translate") | |
proceed_button.click(fn=translate_to_chinese, inputs=inputs, outputs=outputs) | |
with gr.Tab("Translation to English"): | |
inputs = gr.Textbox(placeholder="Enter a Short sentence to translate to English here.") | |
outputs = gr.Textbox(label="Translation Result") | |
proceed_button = gr.Button("Translate") | |
proceed_button.click(fn=translate_to_english, inputs=inputs, outputs=outputs) | |
demo.launch(inline=False) |