k050506koch's picture
Upload 109 files
ece0628 verified
raw
history blame
1.53 kB
import torch
import random
from transformers import GPT2LMHeadModel, GPT2Tokenizer
model = GPT2LMHeadModel.from_pretrained("v2/story/medium_2")
tokenizer = GPT2Tokenizer.from_pretrained("v2/story/medium_2")
model.resize_token_embeddings(len(tokenizer))
device = torch.device("cpu")
# test examples
#input_text = "[Jane Wilson, Book Restoration Specialist, The British Library, Restoring ancient manuscripts, Archeologist Adventurer, Feminine, Long hair, Beautiful, Adult] <|endoftext|>"
#input_text = "[Gina Marquez, Event Coordinator, The Royal Gardens, Organizing large-scale events, Pirate Captain, Creative, Skilled negotiator, Eye patch, Parrot on shoulder]<|endoftext|>"
#input_text = "[Ethan Carter, Cybersecurity Expert, Interpol, Solving complex digital crimes, Hacker Vigilante, Masculine, Short beard, Ruggedly handsome, Mature]<|endoftext|>"
input_text ="[Ivan Ivanov, Lead Software Engineer, Superhero for Justice, Writing code, fixing issues, solving problems, Masculine, Long Hair, Adult]<|endoftext|>"
input_ids = tokenizer.encode(input_text, return_tensors="pt").to("cpu")
output = model.generate(
input_ids,
max_length=400,
num_return_sequences=1,
eos_token_id=tokenizer.eos_token_id,
repetition_penalty=2.0,
top_k=200,
top_p=0.9,
#num_beams=5,
#temperature=1.2, #0.7
do_sample=True,
use_cache=True,
output_hidden_states=True,
)
print("\n", tokenizer.decode(output[0], skip_special_tokens=False), "\n\n", "Answer took", len(output[0]), "tokens\n")