losyer8 commited on
Commit
b44eac9
1 Parent(s): 2262092

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -1
README.md CHANGED
@@ -48,6 +48,7 @@ Checkpoints format: Hugging Face Transformers (Megatron-DeepSpeed format models
48
  - torch>=2.0.0
49
  - transformers>=4.34.0
50
  - tokenizers>=0.14.0
 
51
 
52
  ## Usage
53
 
@@ -55,7 +56,7 @@ Checkpoints format: Hugging Face Transformers (Megatron-DeepSpeed format models
55
  import torch
56
  from transformers import AutoTokenizer, AutoModelForCausalLM
57
  tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-13b-instruct-full-jaster-v1.0")
58
- model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-13b-instruct-full-jaster-v1.0", torch_dtype=torch.float16)
59
  text = "自然言語処理とは何か"
60
  text = text + "### 回答:"
61
  tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
 
48
  - torch>=2.0.0
49
  - transformers>=4.34.0
50
  - tokenizers>=0.14.0
51
+ - accelerate==0.23.0
52
 
53
  ## Usage
54
 
 
56
  import torch
57
  from transformers import AutoTokenizer, AutoModelForCausalLM
58
  tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-13b-instruct-full-jaster-v1.0")
59
+ model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-13b-instruct-full-jaster-v1.0", device_map="auto", torch_dtype=torch.float16)
60
  text = "自然言語処理とは何か"
61
  text = text + "### 回答:"
62
  tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)