|
import torch |
|
import random |
|
from transformers import GPT2LMHeadModel, GPT2Tokenizer |
|
|
|
model = GPT2LMHeadModel.from_pretrained("v2/story/medium_2") |
|
tokenizer = GPT2Tokenizer.from_pretrained("v2/story/medium_2") |
|
model.resize_token_embeddings(len(tokenizer)) |
|
|
|
device = torch.device("cpu") |
|
|
|
|
|
|
|
|
|
|
|
input_text ="[Ivan Ivanov, Lead Software Engineer, Superhero for Justice, Writing code, fixing issues, solving problems, Masculine, Long Hair, Adult]<|endoftext|>" |
|
input_ids = tokenizer.encode(input_text, return_tensors="pt").to("cpu") |
|
|
|
|
|
output = model.generate( |
|
input_ids, |
|
max_length=400, |
|
num_return_sequences=1, |
|
eos_token_id=tokenizer.eos_token_id, |
|
repetition_penalty=2.0, |
|
top_k=200, |
|
top_p=0.9, |
|
|
|
|
|
do_sample=True, |
|
use_cache=True, |
|
output_hidden_states=True, |
|
) |
|
|
|
print("\n", tokenizer.decode(output[0], skip_special_tokens=False), "\n\n", "Answer took", len(output[0]), "tokens\n") |