Spaces:
Running
on
Zero
Running
on
Zero
import gradio as gr | |
import torch | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
title = """# ππ»ββοΈ Welcome to Tonic's Minitron-8B-Base""" | |
# Load the tokenizer and model | |
model_path = "nvidia/Minitron-8B-Base" | |
tokenizer = AutoTokenizer.from_pretrained(model_path) | |
device='cuda' | |
dtype=torch.bfloat16 | |
model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=dtype, device_map=device) | |
# Define the prompt format | |
def create_prompt(instruction): | |
PROMPT = '''You are TronTonic an AI created by Tonic-AI. Below is an instruction that describes a task.\n\nWrite a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Response:''' | |
return PROMPT.format(instruction=instruction) | |
def respond(message, history, system_message, max_tokens, temperature, top_p): | |
prompt = create_prompt(message) | |
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.device) | |
output_ids = model.generate(input_ids, max_length=50, num_return_sequences=1) | |
output_text = tokenizer.decode(output_ids[0], skip_special_tokens=True) | |
return output_text | |
demo = gr.ChatInterface( | |
gr.markdown(title), | |
# gr.markdown(description), | |
respond, | |
additional_inputs=[ | |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), | |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)") | |
], | |
) | |
if __name__ == "__main__": | |
demo.launch() |