Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer, GenerationConfig
|
4 |
+
|
5 |
+
tokenizer = AutoTokenizer.from_pretrained("ShieldX/manovyadh-1.1B-v1")
|
6 |
+
model = AutoModelForCausalLM.from_pretrained("ShieldX/manovyadh-1.1B-v1")
|
7 |
+
model = model.to('cpu')
|
8 |
+
|
9 |
+
title = "🌱 ManoVyadh 🌱"
|
10 |
+
description = "Mental Health Counselling Chatbot"
|
11 |
+
examples = [["I am feeling sad for my friend's divorce"]]
|
12 |
+
|
13 |
+
def predict(message, history):
|
14 |
+
def formatted_prompt(question)-> str:
|
15 |
+
sysp = "You are an AI assistant that helps people cope with stress and improve their mental health. User will tell you about their feelings and challenges. Your task is to listen empathetically and offer helpful suggestions. While responding, think about the user’s needs and goals and show compassion and support."
|
16 |
+
return f"<|im_start|>assistant\n{sysp}<|im_end|>\n<|im_start|>user\n{question}<|im_end|>\n<|im_start|>assistant:"
|
17 |
+
|
18 |
+
# history_transformer_format = history + [[formatted_prompt(message), "", message]]
|
19 |
+
|
20 |
+
# messages = "".join(["".join([f"\n user: {item[2]}, \n assistant: {item[1]}"]) #curr_system_message +
|
21 |
+
# for item in history_transformer_format])
|
22 |
+
|
23 |
+
messages = formatted_prompt(message)
|
24 |
+
|
25 |
+
inputs = tokenizer([messages], return_tensors="pt").to("cpu")
|
26 |
+
|
27 |
+
streamer = TextStreamer(tokenizer)
|
28 |
+
|
29 |
+
generation_config = GenerationConfig(
|
30 |
+
penalty_alpha=0.6,
|
31 |
+
do_sample=True,
|
32 |
+
top_k=5,
|
33 |
+
temperature=0.5,
|
34 |
+
repetition_penalty=1.2,
|
35 |
+
max_new_tokens=256,
|
36 |
+
streamer=streamer,
|
37 |
+
eos_token_id=tokenizer.eos_token_id,
|
38 |
+
pad_token_id=tokenizer.eos_token_id
|
39 |
+
)
|
40 |
+
|
41 |
+
outputs = model.generate(**inputs, generation_config=generation_config)
|
42 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
43 |
+
return response
|
44 |
+
|
45 |
+
# partial_message = ""
|
46 |
+
# for i in response:
|
47 |
+
# if response!="" or i!="":
|
48 |
+
# partial_message+=i
|
49 |
+
# yield partial_message
|
50 |
+
|
51 |
+
gr.ChatInterface(
|
52 |
+
predict,
|
53 |
+
title=title,
|
54 |
+
description=description,
|
55 |
+
examples=examples,
|
56 |
+
theme="finlaymacklon/boxy_violet",
|
57 |
+
).launch(debug=True)
|