#! /bin/sh S=cyberagent/open-calm-1b T=KoichiYasuoka/open-calm-1b-ud-causal U=https://github.com/UniversalDependencies/UD_Japanese-GSDLUW D=`basename $U` test -d $D || git clone --depth=1 $U for F in train dev test do cp $D/*-$F.conllu $F.conllu done TMPA=./maker$$a.py ( echo '#! /usr/bin/python3' echo 'src="'$S'"' cat << 'EOF' import json,unicodedata from transformers import AutoTokenizer tkz=AutoTokenizer.from_pretrained(src,cls_token="<|endoftext|>",sep_token="<|endoftext|>",mask_token="<|endoftext|>",model_max_length=2048) tkz.save_pretrained("tmpdir") d=json.loads(tkz.backend_tokenizer.to_str()) form=set() with open("train.conllu","r",encoding="utf-8") as r: for s in r: w=s.split("\t") if len(w)==10 and w[0].isdecimal(): form.add(w[1]) m=[t for t in d["model"]["merges"] if len(t)<5] for i in range(len(tkz)): w=tkz.decode(i) if len(w)==2 and w in form and not unicodedata.name(w[0]).startswith("HIRAGANA"): k=tkz([w[0],w[1]],add_special_tokens=False)["input_ids"] if len(k[0])==1 and len(k[1])==1: m.append(" ".join(tkz.convert_ids_to_tokens([k[0][0],k[1][0]]))) d["model"]["merges"]=m tkz.backend_tokenizer.from_str(json.dumps(d)).save("tmpdir/tokenizer.json") EOF ) > $TMPA chmod 755 $TMPA $TMPA TMPB=./maker$$b.py ( echo '#! /usr/bin/env deepspeed' echo 'src="'$S'"' echo 'tgt="'$T'"' cat << 'EOF' from transformers import PreTrainedTokenizerFast,AutoConfig,GPTNeoXForTokenClassification,DefaultDataCollator,TrainingArguments,Trainer class UDCausalDataset(object): def __init__(self,conllu,tokenizer,embeddings=None): self.conllu=open(conllu,"r",encoding="utf-8") self.tokenizer=tokenizer self.embeddings=embeddings self.max_tokens=3 self.seeks=[(0,0)] label=set(["SYM"]) dep=set() s=self.conllu.readline() while s!="": if s=="\n": self.seeks.append((self.conllu.tell(),0)) else: w=s.split("\t") if len(w)==10: if w[0].isdecimal(): p=w[3] if w[5]=="_" else w[3]+"|"+w[5] label.add(p) dep.add(p+("|" if w[6]=="0" else "|l-" if int(w[0])0: ids=i+[pad]*j upos=u+["SYM"]*j else: ids=i[0:self.max_tokens] upos=u[0:self.max_tokens] return {"inputs_embeds":emb[ids,:],"labels":[self.label2id[p] for p in upos]} tkz=PreTrainedTokenizerFast.from_pretrained("tmpdir") trainDS=UDCausalDataset("train.conllu",tkz) devDS=UDCausalDataset("dev.conllu",tkz) testDS=UDCausalDataset("test.conllu",tkz) lid=trainDS(devDS,testDS) cfg=AutoConfig.from_pretrained(src,num_labels=len(lid),label2id=lid,id2label={i:l for l,i in lid.items()},ignore_mismatched_sizes=True) mdl=GPTNeoXForTokenClassification.from_pretrained(src,config=cfg,ignore_mismatched_sizes=True) trainDS.embeddings=mdl.get_input_embeddings().weight trainDS.max_tokens=min(trainDS.max_tokens,cfg.max_position_embeddings) dsp={"fp16":{"enabled":"auto"},"optimizer":{"type":"AdamW"},"scheduler":{"type":"WarmupLR","params":{}},"train_batch_size":"auto","train_micro_batch_size_per_gpu":"auto","zero_optimization":{"stage":3,"offload_optimizer":{"device":"cpu","pin_memory":True},"offload_param":{"device":"cpu","pin_memory":True},"overlap_comm":True,"contiguous_gradients":True,"reduce_bucket_size":"auto","stage3_prefetch_bucket_size":"auto","stage3_param_persistence_threshold":"auto","stage3_gather_16bit_weights_on_model_save":True}} arg=TrainingArguments(num_train_epochs=3,per_device_train_batch_size=16,deepspeed=dsp,output_dir=tgt,overwrite_output_dir=True,save_total_limit=2,learning_rate=5e-05,warmup_ratio=0.1,save_safetensors=False) trn=Trainer(args=arg,data_collator=DefaultDataCollator(),model=mdl,train_dataset=trainDS) trn.train() trn.save_model(tgt) tkz.save_pretrained(tgt) EOF ) > $TMPB chmod 755 $TMPB $TMPB exit