RealFalconsAI commited on
Commit
5cd1cf4
1 Parent(s): c2a4319

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +7 -6
README.md CHANGED
@@ -80,10 +80,11 @@ To use this model for inference, you need to load the fine-tuned model and token
80
 
81
  Running on CPU
82
  ```python
83
- from transformers import T5Tokenizer, T5ForConditionalGeneration
 
84
 
85
- tokenizer = T5Tokenizer.from_pretrained('Falconsai/arc_of_conversation')
86
- model = T5ForConditionalGeneration.from_pretrained('Falconsai/arc_of_conversation')
87
 
88
  input_text = "Your conversation Here"
89
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids
@@ -95,10 +96,10 @@ print(tokenizer.decode(outputs[0]))
95
  Running on GPU
96
  ```python
97
  # pip install accelerate
98
- from transformers import T5Tokenizer, T5ForConditionalGeneration
99
 
100
- tokenizer = T5Tokenizer.from_pretrained('Falconsai/arc_of_conversation')
101
- model = T5ForConditionalGeneration.from_pretrained('Falconsai/arc_of_conversation', device_map="auto")
102
 
103
  input_text = "Your conversation Here"
104
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda")
 
80
 
81
  Running on CPU
82
  ```python
83
+ # Load model directly
84
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
85
 
86
+ tokenizer = AutoTokenizer.from_pretrained("Falconsai/arc_of_conversation")
87
+ model = AutoModelForSeq2SeqLM.from_pretrained("Falconsai/arc_of_conversation")
88
 
89
  input_text = "Your conversation Here"
90
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids
 
96
  Running on GPU
97
  ```python
98
  # pip install accelerate
99
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
100
 
101
+ tokenizer = AutoTokenizer.from_pretrained("Falconsai/arc_of_conversation")
102
+ model = AutoModelForSeq2SeqLM.from_pretrained("Falconsai/arc_of_conversation", device_map="auto")
103
 
104
  input_text = "Your conversation Here"
105
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda")