Rijgersberg commited on
Commit
cb0bf19
1 Parent(s): ef97e39

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -14,7 +14,7 @@ DEFAULT_MAX_NEW_TOKENS = 1024
14
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "8192"))
15
 
16
  if torch.cuda.is_available():
17
- model_id = "meta-llama/Meta-Llama-3.1-8B"
18
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="cuda")
19
  tokenizer = AutoTokenizer.from_pretrained(model_id)
20
 
@@ -64,7 +64,7 @@ def generate(
64
  chat_interface = gr.ChatInterface(
65
  fn=generate,
66
  chatbot=gr.Chatbot(height=450,
67
- label="google/gemma-2-9b-it",
68
  show_share_button=True,
69
  # avatar_images=(None, 'geitje-logo.jpg')
70
  ),
@@ -135,8 +135,8 @@ Twee van de wortelbedrijven werkten mee door meer informatie over de ongeoorloof
135
  ```
136
  Vat bovenstaand artikel samen"""]
137
  ],
138
- title="meta-llama/Meta-Llama-3.1-8B",
139
- description="""meta-llama/Meta-Llama-3.1-8B quick demo""",
140
  submit_btn="Genereer",
141
  stop_btn="Stop",
142
  retry_btn="🔄 Opnieuw",
 
14
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "8192"))
15
 
16
  if torch.cuda.is_available():
17
+ model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct"
18
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="cuda")
19
  tokenizer = AutoTokenizer.from_pretrained(model_id)
20
 
 
64
  chat_interface = gr.ChatInterface(
65
  fn=generate,
66
  chatbot=gr.Chatbot(height=450,
67
+ label="meta-llama/Meta-Llama-3.1-8B-Instruct",
68
  show_share_button=True,
69
  # avatar_images=(None, 'geitje-logo.jpg')
70
  ),
 
135
  ```
136
  Vat bovenstaand artikel samen"""]
137
  ],
138
+ title="meta-llama/Meta-Llama-3.1-8B-Instruct",
139
+ description="""meta-llama/Meta-Llama-3.1-8B-Instruct quick demo""",
140
  submit_btn="Genereer",
141
  stop_btn="Stop",
142
  retry_btn="🔄 Opnieuw",