lvwerra HF staff commited on
Commit
e3716e2
1 Parent(s): ed57e85

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -19,6 +19,7 @@ FIM_SUFFIX = "<fim_suffix>"
19
 
20
  FIM_INDICATOR = "<FILL_HERE>"
21
 
 
22
 
23
  theme = gr.themes.Monochrome(
24
  primary_hue="indigo",
@@ -77,7 +78,7 @@ def generate(
77
 
78
  previous_token = ""
79
  for response in stream:
80
- if response.token.text == "<|endoftext|>":
81
  if fim_mode:
82
  output += suffix
83
  else:
@@ -119,8 +120,7 @@ description = """
119
  <h1> 🦙 CodeLlama Playground</h1>
120
  </div>
121
  <div style="text-align: left;">
122
- <p>This is a demo to generate text and code with the following Code Llama model (7B).</p>
123
- <p><b>Please note:</b> This model is not designed for instruction purposes but for code completion. If you're looking for instruction or want to chat with a fine-tuned model, you can visit the <a href="https://huggingface.co/codellama/">Code Llama Org</a> and select an instruct model.</p>
124
  </div>
125
  """
126
 
 
19
 
20
  FIM_INDICATOR = "<FILL_HERE>"
21
 
22
+ EOS_STRING = "</s>"
23
 
24
  theme = gr.themes.Monochrome(
25
  primary_hue="indigo",
 
78
 
79
  previous_token = ""
80
  for response in stream:
81
+ if response.token.text == EOS_STRING:
82
  if fim_mode:
83
  output += suffix
84
  else:
 
120
  <h1> 🦙 CodeLlama Playground</h1>
121
  </div>
122
  <div style="text-align: left;">
123
+ <p>This is a demo to generate text and code with the following Code Llama model (7B). Please note that this model is not designed for instruction purposes but for code completion. If you're looking for instruction or want to chat with a fine-tuned model, you can visit the <a href="https://huggingface.co/codellama/">Code Llama Org</a> and select an instruct model.</p>
 
124
  </div>
125
  """
126