|
# Train |
|
|
|
## Tokenizer |
|
|
|
```bash |
|
cd scripts |
|
python -m venv venv |
|
source venv/bin/activate |
|
pip install -U -r requirements.in |
|
``` |
|
|
|
```bash |
|
python -B train_tokenizer.py |
|
``` |
|
|
|
## Dataset |
|
|
|
```bash |
|
cd scripts |
|
python -m venv venv-lit |
|
source venv-lit/bin/activate |
|
pip install -U -r requirements-lit.in |
|
``` |
|
|
|
```bash |
|
python -B prepare_pretrain_dataset.py |
|
``` |
|
|
|
## Model |
|
|
|
```bash |
|
cd scripts |
|
python -m venv venv-lit |
|
source venv-lit/bin/activate |
|
pip install -U -r requirements-lit.in |
|
``` |
|
|
|
```bash |
|
litgpt pretrain --config ./model.yaml |
|
``` |
|
|
|
```bash |
|
litgpt convert_from_litgpt out/pretrain/final/ out/converted_model |
|
cp config.json out/pretrain/final/ |
|
cp config.json out/converted_model/ |
|
``` |
|
|
|
```python |
|
import torch |
|
from transformers import AutoModel |
|
|
|
state_dict = torch.load('out/converted_model/model.pth') |
|
model = AutoModel.from_pretrained('TinyLlama/TinyLlama_v1.1', state_dict=state_dict, ignore_mismatched_sizes=True) |
|
model.save_pretrained('out/converted_model/') |
|
``` |
|
|
|
## Evaluate |
|
|
|
```bash |
|
litgpt evaluate --tasks 'leaderboard' --out_dir 'evaluate-0/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/ |
|
|
|
litgpt evaluate --tasks 'hellaswag,gsm8k,truthfulqa_mc2,mmlu,winogrande,arc_challenge' --out_dir 'evaluate-1/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/ |
|
|
|
litgpt evaluate --tasks 'mmlu_pro,ifeval,mgsm_direct,mathqa,gpqa' --out_dir 'evaluate-2/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/ |
|
``` |
|
|