Spaces:
Runtime error
Runtime error
Adapt to Code Llama.
Browse files
app.py
CHANGED
@@ -6,21 +6,18 @@ import torch
|
|
6 |
from model import get_input_token_length, run
|
7 |
|
8 |
DEFAULT_SYSTEM_PROMPT = """\
|
9 |
-
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe.
|
10 |
"""
|
11 |
-
MAX_MAX_NEW_TOKENS =
|
12 |
DEFAULT_MAX_NEW_TOKENS = 1024
|
13 |
MAX_INPUT_TOKEN_LENGTH = 4000
|
14 |
|
15 |
DESCRIPTION = """
|
16 |
-
# Llama
|
17 |
|
18 |
-
This Space demonstrates model [
|
19 |
|
20 |
-
🔎 For more details about the Llama
|
21 |
-
|
22 |
-
🔨 Looking for an even more powerful model? Check out the large [**70B** model demo](https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI).
|
23 |
-
🐇 For a smaller model that you can run on many GPUs, check our [7B model demo](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat).
|
24 |
|
25 |
"""
|
26 |
|
@@ -28,8 +25,8 @@ LICENSE = """
|
|
28 |
<p/>
|
29 |
|
30 |
---
|
31 |
-
As a derivate work of
|
32 |
-
this demo is governed by the original [license](https://huggingface.co/spaces/huggingface-projects/
|
33 |
"""
|
34 |
|
35 |
if not torch.cuda.is_available():
|
@@ -132,30 +129,28 @@ with gr.Blocks(css='style.css') as demo:
|
|
132 |
minimum=0.1,
|
133 |
maximum=4.0,
|
134 |
step=0.1,
|
135 |
-
value=1
|
136 |
)
|
137 |
top_p = gr.Slider(
|
138 |
label='Top-p (nucleus sampling)',
|
139 |
minimum=0.05,
|
140 |
maximum=1.0,
|
141 |
step=0.05,
|
142 |
-
value=0.
|
143 |
)
|
144 |
top_k = gr.Slider(
|
145 |
label='Top-k',
|
146 |
minimum=1,
|
147 |
maximum=1000,
|
148 |
step=1,
|
149 |
-
value=
|
150 |
)
|
151 |
|
152 |
gr.Examples(
|
153 |
examples=[
|
154 |
-
'
|
155 |
-
'Can you explain briefly
|
156 |
-
'
|
157 |
-
'How many hours does it take a man to eat a Helicopter?',
|
158 |
-
"Write a 100-word article on 'Benefits of Open-Source in AI research'",
|
159 |
],
|
160 |
inputs=textbox,
|
161 |
outputs=[textbox, chatbot],
|
|
|
6 |
from model import get_input_token_length, run
|
7 |
|
8 |
DEFAULT_SYSTEM_PROMPT = """\
|
9 |
+
You are a helpful, respectful and honest assistant with a deep knowledge of code and software design. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\
|
10 |
"""
|
11 |
+
MAX_MAX_NEW_TOKENS = 4096
|
12 |
DEFAULT_MAX_NEW_TOKENS = 1024
|
13 |
MAX_INPUT_TOKEN_LENGTH = 4000
|
14 |
|
15 |
DESCRIPTION = """
|
16 |
+
# Code Llama 13B Chat
|
17 |
|
18 |
+
This Space demonstrates model [CodeLlama-13b-Instruct](https://huggingface.co/codellama/CodeLlama-13b-Instruct-hf) by Meta, a Code Llama model with 13B parameters fine-tuned for chat instructions and specialized on code tasks. Feel free to play with it, or duplicate to run generations without a queue! If you want to run your own service, you can also [deploy the model on Inference Endpoints](https://huggingface.co/inference-endpoints).
|
19 |
|
20 |
+
🔎 For more details about the Code Llama family of models and how to use them with `transformers`, take a look [at our blog post](https://huggingface.co/blog/codellama).
|
|
|
|
|
|
|
21 |
|
22 |
"""
|
23 |
|
|
|
25 |
<p/>
|
26 |
|
27 |
---
|
28 |
+
As a derivate work of Code Llama by Meta,
|
29 |
+
this demo is governed by the original [license](https://huggingface.co/spaces/huggingface-projects/codellama-2-13b-chat/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/huggingface-projects/codellama-2-13b-chat/blob/main/USE_POLICY.md).
|
30 |
"""
|
31 |
|
32 |
if not torch.cuda.is_available():
|
|
|
129 |
minimum=0.1,
|
130 |
maximum=4.0,
|
131 |
step=0.1,
|
132 |
+
value=0.1,
|
133 |
)
|
134 |
top_p = gr.Slider(
|
135 |
label='Top-p (nucleus sampling)',
|
136 |
minimum=0.05,
|
137 |
maximum=1.0,
|
138 |
step=0.05,
|
139 |
+
value=0.9,
|
140 |
)
|
141 |
top_k = gr.Slider(
|
142 |
label='Top-k',
|
143 |
minimum=1,
|
144 |
maximum=1000,
|
145 |
step=1,
|
146 |
+
value=10,
|
147 |
)
|
148 |
|
149 |
gr.Examples(
|
150 |
examples=[
|
151 |
+
'What is the Fibonacci sequence?',
|
152 |
+
'Can you explain briefly what Python is good for?',
|
153 |
+
'How can I display a grid of images in SwiftUI?',
|
|
|
|
|
154 |
],
|
155 |
inputs=textbox,
|
156 |
outputs=[textbox, chatbot],
|
model.py
CHANGED
@@ -4,7 +4,7 @@ from typing import Iterator
|
|
4 |
import torch
|
5 |
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
6 |
|
7 |
-
model_id = '
|
8 |
|
9 |
if torch.cuda.is_available():
|
10 |
config = AutoConfig.from_pretrained(model_id)
|
@@ -45,8 +45,8 @@ def run(message: str,
|
|
45 |
chat_history: list[tuple[str, str]],
|
46 |
system_prompt: str,
|
47 |
max_new_tokens: int = 1024,
|
48 |
-
temperature: float = 0.
|
49 |
-
top_p: float = 0.
|
50 |
top_k: int = 50) -> Iterator[str]:
|
51 |
prompt = get_prompt(message, chat_history, system_prompt)
|
52 |
inputs = tokenizer([prompt], return_tensors='pt', add_special_tokens=False).to('cuda')
|
|
|
4 |
import torch
|
5 |
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
6 |
|
7 |
+
model_id = 'codellama/CodeLlama-13b-Instruct-hf'
|
8 |
|
9 |
if torch.cuda.is_available():
|
10 |
config = AutoConfig.from_pretrained(model_id)
|
|
|
45 |
chat_history: list[tuple[str, str]],
|
46 |
system_prompt: str,
|
47 |
max_new_tokens: int = 1024,
|
48 |
+
temperature: float = 0.1,
|
49 |
+
top_p: float = 0.9,
|
50 |
top_k: int = 50) -> Iterator[str]:
|
51 |
prompt = get_prompt(message, chat_history, system_prompt)
|
52 |
inputs = tokenizer([prompt], return_tensors='pt', add_special_tokens=False).to('cuda')
|