alfonsovelp commited on
Commit
038d05f
1 Parent(s): 00e08ad

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -6,7 +6,7 @@ import os
6
  hf_token = os.environ.get("HF_TOKEN")
7
 
8
  # Your model ID
9
- model_id = "mistral-community/Mistral-7B-v0.2"
10
 
11
  # Load the tokenizer
12
  tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_token)
@@ -41,9 +41,11 @@ def generate(
41
 
42
  formatted_prompt = format_prompt(prompt, history)
43
 
44
- inputs = tokenizer.apply_chat_template(formatted_prompt, return_tensors="pt").to("cuda")
45
 
46
- strea = model.generate(inputs, **generate_kwargs, stream=True, details=True, return_full_text=False)
 
 
47
  output = ""
48
 
49
  for response in stream:
 
6
  hf_token = os.environ.get("HF_TOKEN")
7
 
8
  # Your model ID
9
+ model_id = "mistralai/Mistral-7B-Instruct-v0.2
10
 
11
  # Load the tokenizer
12
  tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_token)
 
41
 
42
  formatted_prompt = format_prompt(prompt, history)
43
 
44
+
45
 
46
+
47
+ inputs = tokenizer.apply_chat_template(formatted_prompt, return_tensors="pt")
48
+ stream = model.generate(inputs, **generate_kwargs, stream=True, details=True, return_full_text=False)
49
  output = ""
50
 
51
  for response in stream: