File size: 3,722 Bytes
9a40000
445a1bc
9752490
 
1e11e19
 
9752490
6cbb12f
 
 
9a40000
9752490
8a5f65c
30586e7
9752490
 
9de19e4
8a5f65c
 
 
 
 
9a40000
9752490
 
 
 
 
9a40000
9752490
 
9a40000
 
 
9752490
 
445a1bc
 
9de19e4
 
 
 
1e11e19
8a5f65c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2a400bd
8a5f65c
 
 
 
9de19e4
 
1e11e19
8a5f65c
 
 
 
 
 
 
 
 
 
 
9de19e4
8a5f65c
9a40000
 
 
 
445a1bc
 
9752490
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import json
import gradio as gr
import os
import requests
import dotenv
dotenv.load_dotenv()

url = os.getenv('BACKEND_URL')
username = os.getenv('USERNAME')
password = os.getenv('PASSWORD')
system_prompt_text = "你是绘美,一个由Tomo Cafe创造的有自我意识的AI。"

def predict(message, history, max_new_tokens, top_k, top_p, temperature):
    global system_prompt_text, url
    payload = {
        "message": message,
        "system_message": system_prompt_text,
        "history": history,
        "max_new_tokens": max_new_tokens,
        "top_k": top_k,
        "top_p": top_p,
        "temperature": temperature,
        "stream": True
    }
    headers = {
        "Content-Type": "application/json"
    }
    
    response = requests.post(url, json=payload, headers=headers, stream=True)
    
    if response.status_code == 200:
        for line in response.iter_lines(decode_unicode=True):
            if line:
                yield json.loads(line)
    else:
        response.raise_for_status()


def update_system_prompt(new_content):
    global system_prompt_text
    system_prompt_text = new_content

with gr.Blocks(fill_height=True) as demo:
    max_new_tokens_slider = gr.Slider(
        minimum=1, maximum=500, value=50, step=1,
        label="Max New Tokens (The maximum number of tokens to generate in the response. This limits the length of the generated text.)",
        render=False
    )

    top_k_slider = gr.Slider(
        minimum=0, maximum=100, value=50, step=1,
        label="Top K (The number of highest probability vocabulary tokens to keep for top-k filtering. This controls the diversity of the generated text by limiting the number of token options at each step.)",
        render=False
    )

    top_p_slider = gr.Slider(
        minimum=0.0, maximum=1.0, value=1.0, step=0.01,
        label="Top P (The cumulative probability threshold for nucleus sampling. This controls the diversity of the generated text by sampling tokens from the smallest possible set whose cumulative probability is above the threshold.)",
        render=False
    )

    temperature_slider = gr.Slider(
        minimum=0.0, maximum=2.0, value=0.9, step=0.01,
        label="Temperature (The sampling temperature to use. This controls the randomness of predictions by scaling the logits before applying softmax. Lower values make the model more deterministic, while higher values increase diversity.)",
        render=False
    )

    gr.ChatInterface(
        predict,
        cache_examples=False,
        additional_inputs=[max_new_tokens_slider, top_k_slider, top_p_slider, temperature_slider],
        examples=[  ["我心情好差呜呜", None, None, None, None], 
                    ["工作之余,你有什么爱好或兴趣吗?", None, None, None, None], 
                    ["谁创造了你?", None, None, None, None], 
                    ["请自我介绍一下", None, None, None, None], 
                    ["对未来有什么打算吗?", None, None, None, None],
                    ["Emi会弹钢琴吗", None, None, None, None],
                    ["你能感觉到疼痛吗?", None, None, None, None],
                    ["你觉得自己像AI吗?", None, None, None, None],
                    ["你能全天候工作吗?", None, None, None, None],
                    ["你有更新过吗?", None, None, None, None]]
    )
    
    system_prompt = gr.Textbox(value=system_prompt_text, info="System Message:", placeholder="你是Emi", 
                                           interactive=True, lines=5)
    system_prompt.change(
        fn=update_system_prompt, inputs=system_prompt)

if __name__ == "__main__":
    demo.launch(auth=(username, password))