Spaces:
Running
on
Zero
Running
on
Zero
Update
Browse files
app.py
CHANGED
@@ -4,12 +4,7 @@ from typing import Iterator
|
|
4 |
import gradio as gr
|
5 |
import spaces
|
6 |
import torch
|
7 |
-
from transformers import
|
8 |
-
AutoConfig,
|
9 |
-
AutoModelForCausalLM,
|
10 |
-
AutoTokenizer,
|
11 |
-
TextIteratorStreamer,
|
12 |
-
)
|
13 |
|
14 |
MAX_MAX_NEW_TOKENS = 2048
|
15 |
DEFAULT_MAX_NEW_TOKENS = 1024
|
@@ -41,11 +36,7 @@ if not torch.cuda.is_available():
|
|
41 |
|
42 |
if torch.cuda.is_available():
|
43 |
model_id = "meta-llama/Llama-2-13b-chat-hf"
|
44 |
-
|
45 |
-
config.pretraining_tp = 1
|
46 |
-
model = AutoModelForCausalLM.from_pretrained(
|
47 |
-
model_id, config=config, torch_dtype=torch.float16, load_in_4bit=True, device_map="auto"
|
48 |
-
)
|
49 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
50 |
tokenizer.use_default_system_prompt = False
|
51 |
|
|
|
4 |
import gradio as gr
|
5 |
import spaces
|
6 |
import torch
|
7 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
MAX_MAX_NEW_TOKENS = 2048
|
10 |
DEFAULT_MAX_NEW_TOKENS = 1024
|
|
|
36 |
|
37 |
if torch.cuda.is_available():
|
38 |
model_id = "meta-llama/Llama-2-13b-chat-hf"
|
39 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
|
|
|
|
|
|
|
|
|
40 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
41 |
tokenizer.use_default_system_prompt = False
|
42 |
|