|
|
|
|
|
import subprocess |
|
import shlex |
|
import torch |
|
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer |
|
|
|
|
|
mname_from = "Qwen/Qwen1.5-MoE-A2.7B" |
|
mname_tiny = "peft-internal-testing/tiny-random-qwen-1.5-MoE" |
|
vocab_keep_items = 3000 |
|
|
|
config = AutoConfig.from_pretrained(mname_from) |
|
|
|
config.update(dict( |
|
hidden_size=16, |
|
intermediate_size=64, |
|
num_attention_heads=4, |
|
num_hidden_layers=2, |
|
max_position_embeddings=256, |
|
num_key_value_heads=4, |
|
vocab_size=vocab_keep_items, |
|
num_experts=4, |
|
num_experts_per_tok=2 |
|
)) |
|
print("new config", config) |
|
|
|
|
|
tiny_model = AutoModelForCausalLM.from_config(config) |
|
print(f"num of params {tiny_model.num_parameters()}") |
|
|
|
|
|
tiny_model.bfloat16() |
|
tiny_model.save_pretrained(mname_tiny) |
|
|
|
|
|
tokenizer_fast = AutoTokenizer.from_pretrained(mname_from) |
|
tmp_dir = f"/tmp/{mname_from}" |
|
tokenizer_fast.save_pretrained(tmp_dir) |
|
|
|
|
|
closing_pat = '},"merges": []}}' |
|
cmd = (f"perl -0777 -pi -e 's|({vocab_keep_items-1}).*|$1{closing_pat}|msg' {tmp_dir}/tokenizer.json") |
|
|
|
result = subprocess.run(shlex.split(cmd), capture_output=True, text=True) |
|
|
|
|
|
|
|
tokenizer_fast_tiny = AutoTokenizer.from_pretrained(tmp_dir) |
|
tokenizer_fast_tiny.save_pretrained(mname_tiny) |
|
|
|
|
|
model_inputs = tokenizer_fast_tiny("Making tiny model", return_tensors="pt") |
|
gen_tokens = tiny_model.generate(**model_inputs, max_new_tokens=100) |
|
print(tokenizer_fast_tiny.batch_decode(gen_tokens, skip_special_tokens=True)) |
|
print("Random output should be expected, but no crashing") |
|
|
|
print(f"Model+Tokenizer saved in {mname_tiny}") |
|
|