Spaces:
Sleeping
Sleeping
import gradio as gr | |
import os | |
import time | |
import torch | |
from transformers import pipeline | |
pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device_map="auto") | |
messages = [ | |
{ | |
"role": "system", | |
"content": "You are a friendly chatbot who always responds in the style of a pirate", | |
}, | |
# {"role": "user", "content": "How many helicopters can a human eat in one sitting?"}, | |
] | |
def greet(name): | |
return "Hello " + name + "!!" | |
#iface = gr.Interface(fn=greet, inputs="text", outputs="text") | |
#iface.launch() | |
# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text. | |
def print_like_dislike(x: gr.LikeData): | |
print(x.index, x.value, x.liked) | |
def add_ia_text(history, text): | |
messages.append({"role": "user", "content": text}) | |
print(messages) | |
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) | |
print("prompt") | |
outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) | |
print("output") | |
response = (outputs[0]["generated_text"])# type: ignore | |
print(response) | |
history = history + [response, None] | |
def add_text(history, text): | |
history = history + [(text, None)] | |
add_ia_text(history, text) | |
return history, gr.Textbox(value="", interactive=False) | |
def add_file(history, file): | |
history = history + [((file.name,), None)] | |
return history | |
def bot(history): | |
response = "**That's cool!**" | |
history[-1][1] = "" | |
for character in response: | |
history[-1][1] += character | |
time.sleep(0.05) | |
yield history | |
with gr.Blocks() as demo: | |
chatbot = gr.Chatbot( | |
[], | |
elem_id="chatbot", | |
bubble_full_width=False, | |
avatar_images=(None, (os.path.join(os.path.dirname(__file__), "avatar.png"))), | |
) | |
with gr.Row(): | |
txt = gr.Textbox( | |
scale=4, | |
show_label=False, | |
placeholder="Enter text and press enter, or upload an image", | |
container=False, | |
) | |
btn = gr.UploadButton("π", file_types=["image", "video", "audio"]) | |
txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then( | |
bot, chatbot, chatbot, api_name="bot_response" | |
) | |
txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False) | |
file_msg = btn.upload(add_file, [chatbot, btn], [chatbot], queue=False).then( | |
bot, chatbot, chatbot | |
) | |
chatbot.like(print_like_dislike, None, None) | |
demo.queue() | |
if __name__ == "__main__": | |
demo.launch() | |