lucas-w commited on
Commit
fdeb983
1 Parent(s): cf8456d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -4
app.py CHANGED
@@ -1,7 +1,32 @@
1
- from transformers import pipeline
2
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
- pipe = pipeline("translation", model="huggingface-projects/llama-2-7b-chat", token="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")
 
 
 
 
 
 
 
 
 
5
 
6
- demo = gr.Interface.from_pipeline(pipe)
7
- demo.launch()
 
 
1
  import gradio as gr
2
+ from transformers import pipeline
3
+
4
+ DEFAULT_SYSTEM_PROMPT = """\
5
+ You are Menthelp, a mental health chatbot. Please help the user with their concerns.\
6
+ """
7
+ MAX_MAX_NEW_TOKENS = 2048
8
+ DEFAULT_MAX_NEW_TOKENS = 1024
9
+
10
+ if not torch.cuda.is_available():
11
+ DEFAULT_SYSTEM_PROMPT += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
12
+
13
+ # Create a pipeline using the Hugging Face Llama-2-7b-chat model
14
+ pipe = pipeline("chat", model="huggingface-projects/llama-2-7b-chat", tokenizer="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")
15
+
16
+ # Define a function to interact with the pipeline using Gradio
17
+ def llama_2_7b_chatbot(message):
18
+ return pipe(message, system_prompt=DEFAULT_SYSTEM_PROMPT, max_new_tokens=DEFAULT_MAX_NEW_TOKENS)
19
 
20
+ # Define the Gradio interface
21
+ iface = gr.Interface(
22
+ fn=llama_2_7b_chatbot,
23
+ inputs=gr.Textbox(label="Input your message:", placeholder="Type a message..."),
24
+ outputs=gr.Textbox(label="Chatbot response:"),
25
+ live=True,
26
+ capture_session=True,
27
+ title="Llama-2 7B Chat",
28
+ description="This Space demonstrates model [Llama-2-7b-chat](https://huggingface.co/meta-llama/Llama-2-7b-chat) by Meta, a Llama 2 model with 7B parameters fine-tuned for chat instructions. Feel free to play with it!",
29
+ )
30
 
31
+ # Launch the Gradio interface
32
+ iface.launch()