Update README.md
Browse files
README.md
CHANGED
@@ -12,10 +12,10 @@ from transformers import GPT2Tokenizer, GPT2LMHeadModel
|
|
12 |
import torch
|
13 |
|
14 |
|
15 |
-
tokenizer = GPT2Tokenizer.from_pretrained('
|
16 |
tokenizer.pad_token = tokenizer.eos_token
|
17 |
|
18 |
-
model = GPT2LMHeadModel.from_pretrained('
|
19 |
|
20 |
def query_model(input_text):
|
21 |
"""Query the model and get a generated response."""
|
@@ -35,8 +35,7 @@ def query_model(input_text):
|
|
35 |
return decoded[tag_index + len('<|prompt|>'):]
|
36 |
|
37 |
# Example use
|
38 |
-
print(query_model("
|
39 |
-
|
40 |
```
|
41 |
|
42 |
## Examples
|
|
|
12 |
import torch
|
13 |
|
14 |
|
15 |
+
tokenizer = GPT2Tokenizer.from_pretrained('sgarbi/prompt_generator')
|
16 |
tokenizer.pad_token = tokenizer.eos_token
|
17 |
|
18 |
+
model = GPT2LMHeadModel.from_pretrained('sgarbi/prompt_generator')
|
19 |
|
20 |
def query_model(input_text):
|
21 |
"""Query the model and get a generated response."""
|
|
|
35 |
return decoded[tag_index + len('<|prompt|>'):]
|
36 |
|
37 |
# Example use
|
38 |
+
print(query_model("Space X Engineer"))
|
|
|
39 |
```
|
40 |
|
41 |
## Examples
|