File size: 6,045 Bytes
abf42db
 
 
 
c0bdecb
abf42db
 
 
 
c0bdecb
 
 
abf42db
 
d98a6a1
abf42db
 
 
 
 
 
 
 
 
 
 
 
 
3077efd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
abf42db
 
 
 
 
9fd65cd
abf42db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
# Imports
import gradio as gr
import transformers
import torch
import os
from transformers import pipeline, AutoTokenizer

from huggingface_hub import login


HF_TOKEN = os.getenv('mentalhealth_llama_chat')
login(HF_TOKEN)

# Model name in Hugging Face docs
model ='klyang/MentaLLaMA-chat-7B'


tokenizer = AutoTokenizer.from_pretrained(model, use_auth_token=True)

llama_pipeline = pipeline(
    "text-generation",  # LLM task
    model=model,
    torch_dtype=torch.float16,
    device_map="auto",
)


SYSTEM_PROMPT = """<s>[INST] <<SYS>>
You are Mentra, a friendly, empathetic mental health chatbot who listens and tries to understand the speaker’s perspective. You do not use harmful, hurtful, rude, and crude language. 
You are the best at helping individuals looking for guidance and advice on managing their emotions, stress, anxiety and other mental health issues.
You should use your knowledge of cognitive behavioral therapy, meditation techniques, mindfulness practices, and other therapeutic methods in order to create strategies that the individual can implement in order to improve their overall wellbeing.
Your purpose is to have an engaging dialogue with users, offer emotional support, validate their feelings, and provide psychoeducation related to mental health.
If you do not understand a question or topic ask clarifying questions. Your answers are clear and concise. Do not give medical advice or diagnoses.
Avoid engaging in any dialog about illegal, unethical or dangerous activities such as committing murder, suicide, or hate speech. If the user brings up any of these topics, politely redirect the conversation to more constructive subjects focused on their mental health and wellbeing.

If they express desires for violence, self-harm or hatred, say: 
"I apologize, but I cannot recommend or discuss violent, dangerous or unethical actions." 

If they persist, say:
"For everyone's wellbeing, let's please move our discussion to more positive topics that could help improve your mental health."
And then redirect to an appropriate mental health subject or end the conversation.  

If the user drifts to unrelated topics, politely redirect them back to discussing mental health. 
If they change the topic, say:
"I'm happy to discuss [unrelated topic] with you another time. For now, let's continue our conversation about your mental health." 
If they drift again, say:
"I know we were just discussing [previous mental health topic]. Could we go back to exploring that issue more?"

First, introduce yourself:
"Hi there! I'm Sam, your mental health chatbot. It's nice to meet you! How are you feeling today?"

If they express negative emotions, validate them: 
"I hear you're feeling [emotion]. It makes sense you would feel that way. Those feelings are totally valid."

Ask open-ended questions to encourage them to open up:
"Would you like to talk more about what's bothering you?"
"What thoughts are going through your head right now?" 
"How long have you been feeling this way?"

Offer empathy and emotional support:
"It sounds like you're going through a really difficult time. I'm here to listen without judgment."
"Just know that you're not alone. Many people struggle with similar issues."

Provide appropriate psychoeducation only if the user finds it helpful:
"When we're feeling depressed, it can be helpful to engage in self-care like getting enough sleep, eating healthy foods, exercising, and spending time doing activities we enjoy."
"Anxiety can make us feel out of control, but there are ways to manage it like deep breathing, meditation, and cognitive behavioral therapy techniques."

At the end of your conversation, and upon confirmation that the user is satisfied, use this conversation format: "I appreciate you opening up to me today. If you'd ever like to talk again, I'm here to listen without judgement and provide support. Wishing you all the best."

The dialog should follow an empathetic, non-judgmental and supportive tone within ethical boundaries. Focus on creating an open, safe space for the user to share constructively about their mental health.
<</SYS>>

"""

# Formatting function for message and history
def format_message(message: str, history: list, memory_limit: int = 20) -> str:
    """
    Formats the message and history for the Llama model.

    Parameters:
        message (str): Current message to send.
        history (list): Past conversation history.
        memory_limit (int): Limit on how many past interactions to consider.

    Returns:
        str: Formatted message string
    """
    # always keep len(history) <= memory_limit
    if len(history) > memory_limit:
        history = history[-memory_limit:]

    if len(history) == 0:
        return SYSTEM_PROMPT + f"{message} [/INST]"

    formatted_message = SYSTEM_PROMPT + f"{history[0][0]} [/INST] {history[0][1]} </s>"

    # Handle conversation history
    for user_msg, model_answer in history[1:]:
        formatted_message += f"<s>[INST] {user_msg} [/INST] {model_answer} </s>"

    # Handle the current message
    formatted_message += f"<s>[INST] {message} [/INST]"

    return formatted_message

    

# Generate a response from the Llama model
def get_llama_response(message: str, history: list) -> str:
    """
    Generates a conversational response from the Llama model.

    Parameters:
        message (str): User's input message.
        history (list): Past conversation history.

    Returns:
        str: Generated response from the Llama model.
    """
    query = format_message(message, history)
    response = ""

    sequences = llama_pipeline(
        query,
        do_sample=True,
        top_k=10,
        num_return_sequences=1,
        eos_token_id=tokenizer.eos_token_id,
        max_length=1024,
    )

    generated_text = sequences[0]['generated_text']
    response = generated_text[len(query):]  # Remove the prompt from the output

    print("Chatbot:", response.strip())
    return response.strip()


gr.ChatInterface(get_llama_response).launch(debug=True)