codys12 commited on
Commit
36ff73f
1 Parent(s): b2015f4

minor changes

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -20,10 +20,10 @@ MAX_INPUT_TOKEN_LENGTH = 4096
20
 
21
  if torch.cuda.is_available():
22
  model_id = "codys12/MergeLlama-7b"
23
- model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map=0, cache_dir="/data")
24
  tokenizer = AutoTokenizer.from_pretrained("codellama/CodeLlama-7b-hf", trust_remote_code=True)
25
- #tokenizer.pad_token = tokenizer.eos_token
26
- #tokenizer.padding_side = "right"
27
 
28
 
29
  @spaces.GPU
 
20
 
21
  if torch.cuda.is_available():
22
  model_id = "codys12/MergeLlama-7b"
23
+ model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True, torch_dtype=torch.float16, device_map=0, cache_dir="/data")
24
  tokenizer = AutoTokenizer.from_pretrained("codellama/CodeLlama-7b-hf", trust_remote_code=True)
25
+ tokenizer.pad_token = tokenizer.eos_token
26
+ tokenizer.padding_side = "right"
27
 
28
 
29
  @spaces.GPU