laurentiubp commited on
Commit
6e4f7cc
1 Parent(s): a399270

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -6
app.py CHANGED
@@ -18,6 +18,14 @@ This Space demonstrates model [CataLlama-v0.1-Instruct-DPO](https://huggingface.
18
  CataLlama is a fine-tune on Llama-3 8B to enhance it's proficiency on the Catalan Language.
19
  """
20
 
 
 
 
 
 
 
 
 
21
  if not torch.cuda.is_available():
22
  DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
23
 
@@ -33,9 +41,9 @@ def generate(
33
  message: str,
34
  chat_history: list[tuple[str, str]],
35
  system_prompt: str,
36
- max_new_tokens: int = 1024,
37
- temperature: float = 0.6,
38
- top_p: float = 0.9,
39
  ) -> Iterator[str]:
40
 
41
  conversation = []
@@ -75,9 +83,25 @@ def generate(
75
  chat_interface = gr.ChatInterface(
76
  fn=generate,
77
  additional_inputs=[
78
- gr.Textbox(value="Ets un chatbot amigable. Responeu preguntes i ajudeu els usuaris", label="System message"),
79
- gr.Slider(minimum=1, maximum=2048, value=1024, step=1, label="Max new tokens"),
80
- gr.Slider(minimum=0.1, maximum=4.0, value=0.6, step=0.1, label="Temperature"),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  gr.Slider(
82
  minimum=0.1,
83
  maximum=1.0,
@@ -96,6 +120,7 @@ chat_interface = gr.ChatInterface(
96
  with gr.Blocks(css="style.css") as demo:
97
  gr.Markdown(DESCRIPTION)
98
  chat_interface.render()
 
99
 
100
 
101
  if __name__ == "__main__":
 
18
  CataLlama is a fine-tune on Llama-3 8B to enhance it's proficiency on the Catalan Language.
19
  """
20
 
21
+ LICENSE = """
22
+ <p/>
23
+ ---
24
+ As a derivate work of [Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B) by Meta,
25
+ this demo is governed by the original [llama-3 license](https://llama.meta.com/llama3/license)
26
+
27
+ """
28
+
29
  if not torch.cuda.is_available():
30
  DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
31
 
 
41
  message: str,
42
  chat_history: list[tuple[str, str]],
43
  system_prompt: str,
44
+ max_new_tokens: int,
45
+ temperature: float,
46
+ top_p: float,
47
  ) -> Iterator[str]:
48
 
49
  conversation = []
 
83
  chat_interface = gr.ChatInterface(
84
  fn=generate,
85
  additional_inputs=[
86
+ gr.Textbox(
87
+ value="Ets un chatbot amigable. Responeu preguntes i ajudeu els usuaris",
88
+ label="System message",
89
+ lines=6
90
+ ),
91
+ gr.Slider(
92
+ minimum=1,
93
+ maximum=2048,
94
+ value=1024,
95
+ step=256,
96
+ label="Max new tokens"
97
+ ),
98
+ gr.Slider(
99
+ minimum=0.1,
100
+ maximum=1.0,
101
+ value=0.6,
102
+ step=0.05,
103
+ label="Temperature"
104
+ ),
105
  gr.Slider(
106
  minimum=0.1,
107
  maximum=1.0,
 
120
  with gr.Blocks(css="style.css") as demo:
121
  gr.Markdown(DESCRIPTION)
122
  chat_interface.render()
123
+ gr.Markdown(LICENSE)
124
 
125
 
126
  if __name__ == "__main__":