Mohaddz commited on
Commit
2dfc7b0
1 Parent(s): e5ff2a5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -1
app.py CHANGED
@@ -1,3 +1,32 @@
1
  import gradio as gr
 
 
2
 
3
- gr.load("models/mattshumer/Reflection-Llama-3.1-70B").launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import torch
4
 
5
+ # Load the model and tokenizer
6
+ model_name = "mattshumer/Reflection-Llama-3.1-70B"
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
9
+
10
+ def generate_response(message, history):
11
+ # Combine history and new message
12
+ prompt = "\n".join([f"Human: {h[0]}\nAI: {h[1]}" for h in history])
13
+ prompt += f"\nHuman: {message}\nAI:"
14
+
15
+ # Tokenize and generate
16
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
17
+ outputs = model.generate(**inputs, max_new_tokens=500, temperature=0.7)
18
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
19
+
20
+ # Extract only the AI's response
21
+ ai_response = response.split("AI:")[-1].strip()
22
+ return ai_response
23
+
24
+ # Create the Gradio interface
25
+ iface = gr.ChatInterface(
26
+ fn=generate_response,
27
+ title="Chat with Reflection-Llama-3.1-70B",
28
+ description="Ask me anything!",
29
+ )
30
+
31
+ # Launch the interface
32
+ iface.launch()