nroggendorff
commited on
Commit
•
d35ebb6
1
Parent(s):
d4edafa
Update README.md
Browse files
README.md
CHANGED
@@ -43,7 +43,6 @@ To use the model with quantization:
|
|
43 |
|
44 |
```python
|
45 |
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
46 |
-
import torch
|
47 |
|
48 |
bnb_config = BitsAndBytesConfig(
|
49 |
load_in_4bit=True,
|
|
|
43 |
|
44 |
```python
|
45 |
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
|
|
46 |
|
47 |
bnb_config = BitsAndBytesConfig(
|
48 |
load_in_4bit=True,
|