magarpr commited on
Commit
ee7cd33
β€’
1 Parent(s): a8e7096

add user interface

Browse files
Files changed (1) hide show
  1. app.py +147 -1
app.py CHANGED
@@ -1,3 +1,149 @@
 
 
 
 
 
 
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
- gr.load("models/Fugaku-LLM/Fugaku-LLM-13B").launch()
 
 
1
+ #import gradio as gr
2
+ #gr.load("models/Fugaku-LLM/Fugaku-LLM-13B").launch()
3
+ import os
4
+ from threading import Thread
5
+ from typing import Iterator
6
+
7
  import gradio as gr
8
+ import spaces
9
+ import torch
10
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
11
+
12
+ MAX_MAX_NEW_TOKENS = 2048
13
+ DEFAULT_MAX_NEW_TOKENS = 1024
14
+ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
15
+
16
+ DESCRIPTION = """\
17
+ # Llama-2 13B Chat
18
+
19
+ This Space demonstrates model [Llama-2-13b-chat](https://huggingface.co/meta-llama/Llama-2-13b-chat) by Meta, a Llama 2 model with 13B parameters fine-tuned for chat instructions. Feel free to play with it, or duplicate to run generations without a queue! If you want to run your own service, you can also [deploy the model on Inference Endpoints](https://huggingface.co/inference-endpoints).
20
+
21
+ πŸ”Ž For more details about the Llama 2 family of models and how to use them with `transformers`, take a look [at our blog post](https://huggingface.co/blog/llama2).
22
+
23
+ πŸ”¨ Looking for an even more powerful model? Check out the large [**70B** model demo](https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI).
24
+ πŸ‡ For a smaller model that you can run on many GPUs, check our [7B model demo](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat).
25
+
26
+ """
27
+
28
+ LICENSE = """
29
+ <p/>
30
+
31
+ ---
32
+ As a derivate work of [Llama-2-13b-chat](https://huggingface.co/meta-llama/Llama-2-13b-chat) by Meta,
33
+ this demo is governed by the original [license](https://huggingface.co/spaces/huggingface-projects/llama-2-13b-chat/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/huggingface-projects/llama-2-13b-chat/blob/main/USE_POLICY.md).
34
+ """
35
+
36
+ if not torch.cuda.is_available():
37
+ DESCRIPTION += "\n<p>Running on CPU πŸ₯Ά This demo does not work on CPU.</p>"
38
+
39
+
40
+ if torch.cuda.is_available():
41
+ model_id = "Fugaku-LLM/Fugaku-LLM-13B"
42
+ model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
43
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
44
+ tokenizer.use_default_system_prompt = False
45
+
46
+
47
+ @spaces.GPU
48
+ def generate(
49
+ message: str,
50
+ chat_history: list[tuple[str, str]],
51
+ system_prompt: str,
52
+ max_new_tokens: int = 1024,
53
+ temperature: float = 0.6,
54
+ top_p: float = 0.9,
55
+ top_k: int = 50,
56
+ repetition_penalty: float = 1.2,
57
+ ) -> Iterator[str]:
58
+ conversation = []
59
+ if system_prompt:
60
+ conversation.append({"role": "system", "content": system_prompt})
61
+ for user, assistant in chat_history:
62
+ conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
63
+ conversation.append({"role": "user", "content": message})
64
+
65
+ input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
66
+ if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
67
+ input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
68
+ gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
69
+ input_ids = input_ids.to(model.device)
70
+
71
+ streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
72
+ generate_kwargs = dict(
73
+ {"input_ids": input_ids},
74
+ streamer=streamer,
75
+ max_new_tokens=max_new_tokens,
76
+ do_sample=True,
77
+ top_p=top_p,
78
+ top_k=top_k,
79
+ temperature=temperature,
80
+ num_beams=1,
81
+ repetition_penalty=repetition_penalty,
82
+ )
83
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
84
+ t.start()
85
+
86
+ outputs = []
87
+ for text in streamer:
88
+ outputs.append(text)
89
+ yield "".join(outputs)
90
+
91
+
92
+ chat_interface = gr.ChatInterface(
93
+ fn=generate,
94
+ additional_inputs=[
95
+ gr.Textbox(label="System prompt", lines=6),
96
+ gr.Slider(
97
+ label="Max new tokens",
98
+ minimum=1,
99
+ maximum=MAX_MAX_NEW_TOKENS,
100
+ step=1,
101
+ value=DEFAULT_MAX_NEW_TOKENS,
102
+ ),
103
+ gr.Slider(
104
+ label="Temperature",
105
+ minimum=0.1,
106
+ maximum=4.0,
107
+ step=0.1,
108
+ value=0.6,
109
+ ),
110
+ gr.Slider(
111
+ label="Top-p (nucleus sampling)",
112
+ minimum=0.05,
113
+ maximum=1.0,
114
+ step=0.05,
115
+ value=0.9,
116
+ ),
117
+ gr.Slider(
118
+ label="Top-k",
119
+ minimum=1,
120
+ maximum=1000,
121
+ step=1,
122
+ value=50,
123
+ ),
124
+ gr.Slider(
125
+ label="Repetition penalty",
126
+ minimum=1.0,
127
+ maximum=2.0,
128
+ step=0.05,
129
+ value=1.2,
130
+ ),
131
+ ],
132
+ stop_btn=None,
133
+ examples=[
134
+ ["Hello there! How are you doing?"],
135
+ ["Can you explain briefly to me what is the Python programming language?"],
136
+ ["Explain the plot of Cinderella in a sentence."],
137
+ ["How many hours does it take a man to eat a Helicopter?"],
138
+ ["Write a 100-word article on 'Benefits of Open-Source in AI research'"],
139
+ ],
140
+ )
141
+
142
+ with gr.Blocks(css="style.css") as demo:
143
+ gr.Markdown(DESCRIPTION)
144
+ gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
145
+ chat_interface.render()
146
+ gr.Markdown(LICENSE)
147
 
148
+ if __name__ == "__main__":
149
+ demo.queue(max_size=20).launch()