Commit
•
b5731cf
0
Parent(s):
Duplicate from nvidia/NVLM-D-72B
Browse filesCo-authored-by: Boxin Wang <[email protected]>
This view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +35 -0
- Dockerfile +14 -0
- README.md +311 -0
- config.json +141 -0
- configuration_intern_vit.py +119 -0
- configuration_nvlm_d.py +100 -0
- conversation.py +358 -0
- incl_licenses/LICENSE +21 -0
- incl_licenses/LICENSE_2 +201 -0
- merges.txt +0 -0
- model-00001-of-00046.safetensors +3 -0
- model-00002-of-00046.safetensors +3 -0
- model-00003-of-00046.safetensors +3 -0
- model-00004-of-00046.safetensors +3 -0
- model-00005-of-00046.safetensors +3 -0
- model-00006-of-00046.safetensors +3 -0
- model-00007-of-00046.safetensors +3 -0
- model-00008-of-00046.safetensors +3 -0
- model-00009-of-00046.safetensors +3 -0
- model-00010-of-00046.safetensors +3 -0
- model-00011-of-00046.safetensors +3 -0
- model-00012-of-00046.safetensors +3 -0
- model-00013-of-00046.safetensors +3 -0
- model-00014-of-00046.safetensors +3 -0
- model-00015-of-00046.safetensors +3 -0
- model-00016-of-00046.safetensors +3 -0
- model-00017-of-00046.safetensors +3 -0
- model-00018-of-00046.safetensors +3 -0
- model-00019-of-00046.safetensors +3 -0
- model-00020-of-00046.safetensors +3 -0
- model-00021-of-00046.safetensors +3 -0
- model-00022-of-00046.safetensors +3 -0
- model-00023-of-00046.safetensors +3 -0
- model-00024-of-00046.safetensors +3 -0
- model-00025-of-00046.safetensors +3 -0
- model-00026-of-00046.safetensors +3 -0
- model-00027-of-00046.safetensors +3 -0
- model-00028-of-00046.safetensors +3 -0
- model-00029-of-00046.safetensors +3 -0
- model-00030-of-00046.safetensors +3 -0
- model-00031-of-00046.safetensors +3 -0
- model-00032-of-00046.safetensors +3 -0
- model-00033-of-00046.safetensors +3 -0
- model-00034-of-00046.safetensors +3 -0
- model-00035-of-00046.safetensors +3 -0
- model-00036-of-00046.safetensors +3 -0
- model-00037-of-00046.safetensors +3 -0
- model-00038-of-00046.safetensors +3 -0
- model-00039-of-00046.safetensors +3 -0
- model-00040-of-00046.safetensors +3 -0
.gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
Dockerfile
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM nvcr.io/nvidia/pytorch:23.09-py3
|
2 |
+
|
3 |
+
RUN pip install transformers==4.39.3
|
4 |
+
|
5 |
+
RUN pip install accelerate==0.34.2
|
6 |
+
|
7 |
+
RUN pip install datasets==2.18.0
|
8 |
+
|
9 |
+
RUN pip install timm==1.0.9
|
10 |
+
|
11 |
+
RUN pip install anls==0.0.2
|
12 |
+
|
13 |
+
RUN pip install pycocoevalcap==1.2
|
14 |
+
|
README.md
ADDED
@@ -0,0 +1,311 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: cc-by-nc-4.0
|
3 |
+
language:
|
4 |
+
- en
|
5 |
+
pipeline_tag: image-text-to-text
|
6 |
+
tags:
|
7 |
+
- nvidia
|
8 |
+
- NVLM
|
9 |
+
- pytorch
|
10 |
+
- multimodal
|
11 |
+
- conversational
|
12 |
+
library_name: transformers
|
13 |
+
---
|
14 |
+
|
15 |
+
<p align="center">
|
16 |
+
<img src="nvlm-logo-light.png" alt="Image Description" width="300" >
|
17 |
+
</p>
|
18 |
+
|
19 |
+
|
20 |
+
|
21 |
+
## Model Details
|
22 |
+
|
23 |
+
Today (September 17th, 2024), we introduce [NVLM 1.0](https://arxiv.org/abs/2409.11402), a family of frontier-class multimodal large language models (LLMs) that achieve state-of-the-art results on vision-language tasks, rivaling the leading proprietary models (e.g., GPT-4o) and open-access models (e.g., Llama 3-V 405B and InternVL 2). Remarkably, NVLM 1.0 shows improved text-only performance over its LLM backbone after multimodal training.
|
24 |
+
|
25 |
+
In this repo, we are open-sourcing NVLM-1.0-D-72B (decoder-only architecture), the decoder-only model weights and code for the community.
|
26 |
+
|
27 |
+
## Other Resources
|
28 |
+
[Inference Code (HF)](https://huggingface.co/nvidia/NVLM-D-72B/tree/main)   [Training Code (Coming soon)]()   [Website](https://research.nvidia.com/labs/adlr/NVLM-1/)   [Paper](https://arxiv.org/abs/2409.11402)
|
29 |
+
|
30 |
+
## Benchmark Results
|
31 |
+
We train our model with legacy [Megatron-LM](https://github.com/NVIDIA/Megatron-LM/tree/main/megatron/legacy) and adapt the codebase to Huggingface for model hosting, reproducibility, and inference.
|
32 |
+
We observe numerical differences between the Megatron and Huggingface codebases, which are within the expected range of variation.
|
33 |
+
We provide the results from both the Huggingface codebase and the Megatron codebase for reproducibility and comparison with other models.
|
34 |
+
|
35 |
+
Results (as of September 17th, 2024) in the multimodal benchmarks are as follows:
|
36 |
+
|
37 |
+
### Vision-language Benchmarks
|
38 |
+
|
39 |
+
| Benchmark | MMMU (val / test) | MathVista | OCRBench | AI2D | ChartQA | DocVQA | TextVQA | RealWorldQA | VQAv2 |
|
40 |
+
|------------------------------|-------------------|-----------|----------|------|---------|--------|---------|-------------|-------|
|
41 |
+
| NVLM-D 1.0 72B (Huggingface) | 58.7 / 54.9 | 65.2 | 852 | 94.2 | 86.0 | 92.6 | 82.6 | 69.5 | 85.4 |
|
42 |
+
| NVLM-D 1.0 72B (Megatron) | 59.7 / 54.6 | 65.2 | 853 | 94.2 | 86.0 | 92.6 | 82.1 | 69.7 | 85.4 |
|
43 |
+
| Llama 3.2 90B | 60.3 / - | 57.3 | - | 92.3 | 85.5 | 90.1 | - | - | 78.1 |
|
44 |
+
| Llama 3-V 70B | 60.6 / - | - | - | 93.0 | 83.2 | 92.2 | 83.4 | - | 79.1 |
|
45 |
+
| Llama 3-V 405B | 64.5 / - | - | - | 94.1 | 85.8 | 92.6 | 84.8 | - | 80.2 |
|
46 |
+
| InternVL2-Llama3-76B | 55.2 / - | 65.5 | 839 | 94.8 | 88.4 | 94.1 | 84.4 | 72.2 | - |
|
47 |
+
| GPT-4V | 56.8 / 55.7 | 49.9 | 645 | 78.2 | 78.5 | 88.4 | 78.0 | 61.4 | 77.2 |
|
48 |
+
| GPT-4o | 69.1 / - | 63.8 | 736 | 94.2 | 85.7 | 92.8 | - | - | - |
|
49 |
+
| Claude 3.5 Sonnet | 68.3 / - | 67.7 | 788 | 94.7 | 90.8 | 95.2 | - | - | - |
|
50 |
+
| Gemini 1.5 Pro (Aug 2024) | 62.2 / - | 63.9 | 754 | 94.4 | 87.2 | 93.1 | 78.7 | 70.4 | 80.2 |
|
51 |
+
|
52 |
+
### Text-only Benchmarks
|
53 |
+
|
54 |
+
| Tasks | Backbone LLM | MMLU | GSM8K | MATH | HumanEval | Avg. Accuracy |
|
55 |
+
|------------------------------|--------------|------|-------|------|-----------|------------------|
|
56 |
+
| **Proprietary** | | | | | | |
|
57 |
+
| GPT-4.0 | N/A | 88.7 | - | 76.6 | 90.2 | - |
|
58 |
+
| Gemini Pro 1.5 (Aug 2024) | N/A | 85.9 | 90.8 | 67.7 | 84.1 | 82.1 |
|
59 |
+
| Claude 3.5 Sonnet | N/A | 88.7 | 96.4 | 71.1 | 92.0 | 87.0 |
|
60 |
+
| **Open LLM** | | | | | | |
|
61 |
+
| (a) Nous-Hermes-2-Yi-34B | N/A | 75.5 | 78.6 | 21.8 | 43.3 | 54.8 |
|
62 |
+
| (b) Qwen-72B-Instruct | N/A | 82.3 | 91.1 | 59.7 | 86.0 | 79.8 |
|
63 |
+
| (c) Llama-3-70B-Instruct | N/A | 82.0 | 93.0 | 51.0 | 81.7 | 76.6 |
|
64 |
+
| (d) Llama-3.1-70B-Instruct | N/A | 83.6 | 95.1 | 68.0 | 80.5 | 81.8 |
|
65 |
+
| (e) Llama-3.1-405B-Instruct | N/A | 87.3 | 96.8 | 73.8 | 89.0 | 86.7 |
|
66 |
+
| **Open Multimodal LLM** | | | | | | |
|
67 |
+
| VILA-1.5 40B | (a) | 73.3 | 67.5 | 16.8 | 34.1 | 🥶 47.9 (-6.9) |
|
68 |
+
| LLaVA-OneVision 72B | (b) | 80.6 | 89.9 | 49.2 | 74.4 | 🥶 73.5 (-6.3) |
|
69 |
+
| InternVL-2-Llama3-76B | (c) | 78.5 | 87.1 | 42.5 | 71.3 | 🥶 69.9 (-6.7) |
|
70 |
+
| *Llama 3-V 70B | (d) | 83.6 | 95.1 | 68.0 | 80.5 | 🙂 81.8 (0) |
|
71 |
+
| *Llama 3-V 405B | (e) | 87.3 | 96.8 | 73.8 | 89.0 | 🙂 86.7 (0) |
|
72 |
+
| NVLM-D 1.0 72B (Megatron) | (b) | 82.0 | 92.9 | 73.1 | 88.4 | 🥳 84.1 (+4.3) |
|
73 |
+
| NVLM-D 1.0 72B (Huggingface) | (b) | 81.7 | 93.2 | 73.1 | 89.0 | 🥳 84.3 (+4.5) |
|
74 |
+
|
75 |
+
|
76 |
+
## How to use
|
77 |
+
|
78 |
+
When converting Megatron checkpoint to Huggingface, we adapt [InternVL codebase](https://huggingface.co/OpenGVLab/InternVL2-Llama3-76B) to support model loading and multi-GPU inference in HF.
|
79 |
+
We also use the tokenizer from [Qwen2.5-72B-Instruct](https://huggingface.co/Qwen/Qwen2.5-72B-Instruct/tree/main) when adapting the tokenizer to Huggingface, as it contains extra special tokens for vision tasks, e.g., `<|vision_pad|>`.
|
80 |
+
We train NVLM-1.0-D-72B based on the [Qwen2-72B-Instruct](https://huggingface.co/Qwen/Qwen2-72B-Instruct/tree/main) text-only model and [InternViT-6B-448px-V1-5](https://huggingface.co/OpenGVLab/InternViT-6B-448px-V1-5) ViT model with our large-scale high-quality multimodal dataset.
|
81 |
+
For training code, please refer to [Megatron-LM (Coming soon)]().
|
82 |
+
|
83 |
+
|
84 |
+
### Prepare the environment
|
85 |
+
|
86 |
+
We provide a docker build file in the [Dockerfile](Dockerfile) for reproduction.
|
87 |
+
|
88 |
+
The docker image is based on `nvcr.io/nvidia/pytorch:23.09-py3`.
|
89 |
+
|
90 |
+
*Note: We observe that different transformer versions / CUDA versions / docker versions can lead to slight benchmark number differences. We recommend using the Dockerfile above for precise reproduction.*
|
91 |
+
|
92 |
+
### Model loading
|
93 |
+
|
94 |
+
```python
|
95 |
+
import torch
|
96 |
+
from transformers import AutoModel
|
97 |
+
|
98 |
+
path = "nvidia/NVLM-D-72B"
|
99 |
+
model = AutoModel.from_pretrained(
|
100 |
+
path,
|
101 |
+
torch_dtype=torch.bfloat16,
|
102 |
+
low_cpu_mem_usage=True,
|
103 |
+
use_flash_attn=False,
|
104 |
+
trust_remote_code=True).eval()
|
105 |
+
```
|
106 |
+
|
107 |
+
### Multiple GPUs
|
108 |
+
|
109 |
+
The model can be loaded on multiple GPUs as follows:
|
110 |
+
|
111 |
+
```python
|
112 |
+
import torch
|
113 |
+
import math
|
114 |
+
from transformers import AutoModel
|
115 |
+
|
116 |
+
def split_model():
|
117 |
+
device_map = {}
|
118 |
+
world_size = torch.cuda.device_count()
|
119 |
+
num_layers = 80
|
120 |
+
# Since the first GPU will be used for ViT, treat it as half a GPU.
|
121 |
+
num_layers_per_gpu = math.ceil(num_layers / (world_size - 0.5))
|
122 |
+
num_layers_per_gpu = [num_layers_per_gpu] * world_size
|
123 |
+
num_layers_per_gpu[0] = math.ceil(num_layers_per_gpu[0] * 0.5)
|
124 |
+
layer_cnt = 0
|
125 |
+
for i, num_layer in enumerate(num_layers_per_gpu):
|
126 |
+
for j in range(num_layer):
|
127 |
+
device_map[f'language_model.model.layers.{layer_cnt}'] = i
|
128 |
+
layer_cnt += 1
|
129 |
+
device_map['vision_model'] = 0
|
130 |
+
device_map['mlp1'] = 0
|
131 |
+
device_map['language_model.model.tok_embeddings'] = 0
|
132 |
+
device_map['language_model.model.embed_tokens'] = 0
|
133 |
+
device_map['language_model.output'] = 0
|
134 |
+
device_map['language_model.model.norm'] = 0
|
135 |
+
device_map['language_model.lm_head'] = 0
|
136 |
+
device_map[f'language_model.model.layers.{num_layers - 1}'] = 0
|
137 |
+
|
138 |
+
return device_map
|
139 |
+
|
140 |
+
path = "nvidia/NVLM-D-72B"
|
141 |
+
device_map = split_model()
|
142 |
+
model = AutoModel.from_pretrained(
|
143 |
+
path,
|
144 |
+
torch_dtype=torch.bfloat16,
|
145 |
+
low_cpu_mem_usage=True,
|
146 |
+
use_flash_attn=False,
|
147 |
+
trust_remote_code=True,
|
148 |
+
device_map=device_map).eval()
|
149 |
+
```
|
150 |
+
|
151 |
+
|
152 |
+
### Inference
|
153 |
+
|
154 |
+
```python
|
155 |
+
import torch
|
156 |
+
from transformers import AutoTokenizer, AutoModel
|
157 |
+
import math
|
158 |
+
from PIL import Image
|
159 |
+
import torchvision.transforms as T
|
160 |
+
from torchvision.transforms.functional import InterpolationMode
|
161 |
+
|
162 |
+
|
163 |
+
def split_model():
|
164 |
+
device_map = {}
|
165 |
+
world_size = torch.cuda.device_count()
|
166 |
+
num_layers = 80
|
167 |
+
# Since the first GPU will be used for ViT, treat it as half a GPU.
|
168 |
+
num_layers_per_gpu = math.ceil(num_layers / (world_size - 0.5))
|
169 |
+
num_layers_per_gpu = [num_layers_per_gpu] * world_size
|
170 |
+
num_layers_per_gpu[0] = math.ceil(num_layers_per_gpu[0] * 0.5)
|
171 |
+
layer_cnt = 0
|
172 |
+
for i, num_layer in enumerate(num_layers_per_gpu):
|
173 |
+
for j in range(num_layer):
|
174 |
+
device_map[f'language_model.model.layers.{layer_cnt}'] = i
|
175 |
+
layer_cnt += 1
|
176 |
+
device_map['vision_model'] = 0
|
177 |
+
device_map['mlp1'] = 0
|
178 |
+
device_map['language_model.model.tok_embeddings'] = 0
|
179 |
+
device_map['language_model.model.embed_tokens'] = 0
|
180 |
+
device_map['language_model.output'] = 0
|
181 |
+
device_map['language_model.model.norm'] = 0
|
182 |
+
device_map['language_model.lm_head'] = 0
|
183 |
+
device_map[f'language_model.model.layers.{num_layers - 1}'] = 0
|
184 |
+
|
185 |
+
return device_map
|
186 |
+
|
187 |
+
|
188 |
+
IMAGENET_MEAN = (0.485, 0.456, 0.406)
|
189 |
+
IMAGENET_STD = (0.229, 0.224, 0.225)
|
190 |
+
|
191 |
+
|
192 |
+
def build_transform(input_size):
|
193 |
+
MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
|
194 |
+
transform = T.Compose([
|
195 |
+
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
|
196 |
+
T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
|
197 |
+
T.ToTensor(),
|
198 |
+
T.Normalize(mean=MEAN, std=STD)
|
199 |
+
])
|
200 |
+
return transform
|
201 |
+
|
202 |
+
|
203 |
+
def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
|
204 |
+
best_ratio_diff = float('inf')
|
205 |
+
best_ratio = (1, 1)
|
206 |
+
area = width * height
|
207 |
+
for ratio in target_ratios:
|
208 |
+
target_aspect_ratio = ratio[0] / ratio[1]
|
209 |
+
ratio_diff = abs(aspect_ratio - target_aspect_ratio)
|
210 |
+
if ratio_diff < best_ratio_diff:
|
211 |
+
best_ratio_diff = ratio_diff
|
212 |
+
best_ratio = ratio
|
213 |
+
elif ratio_diff == best_ratio_diff:
|
214 |
+
if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
|
215 |
+
best_ratio = ratio
|
216 |
+
return best_ratio
|
217 |
+
|
218 |
+
|
219 |
+
def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False):
|
220 |
+
orig_width, orig_height = image.size
|
221 |
+
aspect_ratio = orig_width / orig_height
|
222 |
+
|
223 |
+
# calculate the existing image aspect ratio
|
224 |
+
target_ratios = set(
|
225 |
+
(i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
|
226 |
+
i * j <= max_num and i * j >= min_num)
|
227 |
+
target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
|
228 |
+
|
229 |
+
# find the closest aspect ratio to the target
|
230 |
+
target_aspect_ratio = find_closest_aspect_ratio(
|
231 |
+
aspect_ratio, target_ratios, orig_width, orig_height, image_size)
|
232 |
+
|
233 |
+
# calculate the target width and height
|
234 |
+
target_width = image_size * target_aspect_ratio[0]
|
235 |
+
target_height = image_size * target_aspect_ratio[1]
|
236 |
+
blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
|
237 |
+
|
238 |
+
# resize the image
|
239 |
+
resized_img = image.resize((target_width, target_height))
|
240 |
+
processed_images = []
|
241 |
+
for i in range(blocks):
|
242 |
+
box = (
|
243 |
+
(i % (target_width // image_size)) * image_size,
|
244 |
+
(i // (target_width // image_size)) * image_size,
|
245 |
+
((i % (target_width // image_size)) + 1) * image_size,
|
246 |
+
((i // (target_width // image_size)) + 1) * image_size
|
247 |
+
)
|
248 |
+
# split the image
|
249 |
+
split_img = resized_img.crop(box)
|
250 |
+
processed_images.append(split_img)
|
251 |
+
assert len(processed_images) == blocks
|
252 |
+
if use_thumbnail and len(processed_images) != 1:
|
253 |
+
thumbnail_img = image.resize((image_size, image_size))
|
254 |
+
processed_images.append(thumbnail_img)
|
255 |
+
return processed_images
|
256 |
+
|
257 |
+
|
258 |
+
def load_image(image_file, input_size=448, max_num=12):
|
259 |
+
image = Image.open(image_file).convert('RGB')
|
260 |
+
transform = build_transform(input_size=input_size)
|
261 |
+
images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num)
|
262 |
+
pixel_values = [transform(image) for image in images]
|
263 |
+
pixel_values = torch.stack(pixel_values)
|
264 |
+
return pixel_values
|
265 |
+
|
266 |
+
path = "nvidia/NVLM-D-72B"
|
267 |
+
device_map = split_model()
|
268 |
+
model = AutoModel.from_pretrained(
|
269 |
+
path,
|
270 |
+
torch_dtype=torch.bfloat16,
|
271 |
+
low_cpu_mem_usage=True,
|
272 |
+
use_flash_attn=False,
|
273 |
+
trust_remote_code=True,
|
274 |
+
device_map=device_map).eval()
|
275 |
+
|
276 |
+
print(model)
|
277 |
+
|
278 |
+
tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=False)
|
279 |
+
generation_config = dict(max_new_tokens=1024, do_sample=False)
|
280 |
+
|
281 |
+
# pure-text conversation
|
282 |
+
question = 'Hello, who are you?'
|
283 |
+
response, history = model.chat(tokenizer, None, question, generation_config, history=None, return_history=True)
|
284 |
+
print(f'User: {question}\nAssistant: {response}')
|
285 |
+
|
286 |
+
# single-image single-round conversation
|
287 |
+
pixel_values = load_image('path/to/your/example/image.jpg', max_num=6).to(
|
288 |
+
torch.bfloat16)
|
289 |
+
question = '<image>\nPlease describe the image shortly.'
|
290 |
+
response = model.chat(tokenizer, pixel_values, question, generation_config)
|
291 |
+
print(f'User: {question}\nAssistant: {response}')
|
292 |
+
```
|
293 |
+
|
294 |
+
|
295 |
+
## Correspondence to
|
296 |
+
Wenliang Dai* ([email protected]), Nayeon Lee* ([email protected]), Boxin Wang* ([email protected]), Zhuolin Yang* ([email protected]), Wei Ping* ([email protected])
|
297 |
+
|
298 |
+
*Equal contribution
|
299 |
+
|
300 |
+
## Citation
|
301 |
+
<pre>
|
302 |
+
@article{nvlm2024,
|
303 |
+
title={NVLM: Open Frontier-Class Multimodal LLMs},
|
304 |
+
author={Dai, Wenliang and Lee, Nayeon and Wang, Boxin and Yang, Zhuolin and Liu, Zihan and Barker, Jon and Rintamaki, Tuomas and Shoeybi, Mohammad and Catanzaro, Bryan and Ping, Wei},
|
305 |
+
journal={arXiv preprint},
|
306 |
+
year={2024}}
|
307 |
+
</pre>
|
308 |
+
|
309 |
+
|
310 |
+
## License
|
311 |
+
The use of this model is governed by the [cc-by-nc-4.0](https://spdx.org/licenses/CC-BY-NC-4.0)
|
config.json
ADDED
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_commit_hash": null,
|
3 |
+
"architectures": [
|
4 |
+
"NVLM_D"
|
5 |
+
],
|
6 |
+
"auto_map": {
|
7 |
+
"AutoConfig": "configuration_nvlm_d.NVLM_D_Config",
|
8 |
+
"AutoModel": "modeling_nvlm_d.NVLM_D_Model",
|
9 |
+
"AutoModelForCausalLM": "modeling_nvlm_d.NVLM_D_Model"
|
10 |
+
},
|
11 |
+
"downsample_ratio": 0.5,
|
12 |
+
"dynamic_image_size": true,
|
13 |
+
"force_image_size": 448,
|
14 |
+
"llm_config": {
|
15 |
+
"_name_or_path": "Qwen/Qwen2-72B-Instruct",
|
16 |
+
"add_cross_attention": false,
|
17 |
+
"architectures": [
|
18 |
+
"Qwen2ForCausalLM"
|
19 |
+
],
|
20 |
+
"attention_bias": true,
|
21 |
+
"attention_dropout": 0.0,
|
22 |
+
"bad_words_ids": null,
|
23 |
+
"begin_suppress_tokens": null,
|
24 |
+
"bos_token_id": 151643,
|
25 |
+
"chunk_size_feed_forward": 0,
|
26 |
+
"cross_attention_hidden_size": null,
|
27 |
+
"decoder_start_token_id": null,
|
28 |
+
"diversity_penalty": 0.0,
|
29 |
+
"do_sample": false,
|
30 |
+
"early_stopping": false,
|
31 |
+
"encoder_no_repeat_ngram_size": 0,
|
32 |
+
"eos_token_id": 151645,
|
33 |
+
"exponential_decay_length_penalty": null,
|
34 |
+
"finetuning_task": null,
|
35 |
+
"forced_bos_token_id": null,
|
36 |
+
"forced_eos_token_id": null,
|
37 |
+
"hidden_act": "silu",
|
38 |
+
"hidden_size": 8192,
|
39 |
+
"id2label": {
|
40 |
+
"0": "LABEL_0",
|
41 |
+
"1": "LABEL_1"
|
42 |
+
},
|
43 |
+
"initializer_range": 0.02,
|
44 |
+
"intermediate_size": 29568,
|
45 |
+
"is_decoder": false,
|
46 |
+
"is_encoder_decoder": false,
|
47 |
+
"label2id": {
|
48 |
+
"LABEL_0": 0,
|
49 |
+
"LABEL_1": 1
|
50 |
+
},
|
51 |
+
"length_penalty": 1.0,
|
52 |
+
"max_length": 20,
|
53 |
+
"max_position_embeddings": 32768,
|
54 |
+
"min_length": 0,
|
55 |
+
"mlp_bias": false,
|
56 |
+
"model_type": "llama",
|
57 |
+
"no_repeat_ngram_size": 0,
|
58 |
+
"num_attention_heads": 64,
|
59 |
+
"num_beam_groups": 1,
|
60 |
+
"num_beams": 1,
|
61 |
+
"num_hidden_layers": 80,
|
62 |
+
"num_key_value_heads": 8,
|
63 |
+
"num_return_sequences": 1,
|
64 |
+
"output_attentions": false,
|
65 |
+
"output_hidden_states": false,
|
66 |
+
"output_scores": false,
|
67 |
+
"pad_token_id": null,
|
68 |
+
"prefix": null,
|
69 |
+
"pretraining_tp": 1,
|
70 |
+
"problem_type": null,
|
71 |
+
"pruned_heads": {},
|
72 |
+
"remove_invalid_values": false,
|
73 |
+
"repetition_penalty": 1.0,
|
74 |
+
"return_dict": true,
|
75 |
+
"return_dict_in_generate": false,
|
76 |
+
"rms_norm_eps": 1e-06,
|
77 |
+
"rope_scaling": {
|
78 |
+
"factor": 3.0,
|
79 |
+
"type": "dynamic"
|
80 |
+
},
|
81 |
+
"rope_theta": 1000000.0,
|
82 |
+
"sep_token_id": null,
|
83 |
+
"suppress_tokens": null,
|
84 |
+
"task_specific_params": null,
|
85 |
+
"temperature": 1.0,
|
86 |
+
"tf_legacy_loss": false,
|
87 |
+
"tie_encoder_decoder": false,
|
88 |
+
"tie_word_embeddings": false,
|
89 |
+
"tokenizer_class": null,
|
90 |
+
"top_k": 1,
|
91 |
+
"top_p": 0,
|
92 |
+
"torch_dtype": "bfloat16",
|
93 |
+
"torchscript": false,
|
94 |
+
"transformers_version": "4.39.3",
|
95 |
+
"typical_p": 1.0,
|
96 |
+
"use_bfloat16": true,
|
97 |
+
"use_cache": true,
|
98 |
+
"vocab_size": 152064
|
99 |
+
},
|
100 |
+
"max_dynamic_patch": 6,
|
101 |
+
"min_dynamic_patch": 1,
|
102 |
+
"model_type": "NVLM_D",
|
103 |
+
"ps_version": "v2",
|
104 |
+
"select_layer": -1,
|
105 |
+
"template": "chatml",
|
106 |
+
"torch_dtype": "bfloat16",
|
107 |
+
"transformers_version": null,
|
108 |
+
"use_backbone_lora": 0,
|
109 |
+
"use_llm_lora": 0,
|
110 |
+
"use_thumbnail": true,
|
111 |
+
"vision_config": {
|
112 |
+
"architectures": [
|
113 |
+
"InternVisionModel"
|
114 |
+
],
|
115 |
+
"attention_dropout": 0.0,
|
116 |
+
"drop_path_rate": 0.0,
|
117 |
+
"dropout": 0.0,
|
118 |
+
"hidden_act": "gelu",
|
119 |
+
"hidden_size": 3200,
|
120 |
+
"image_size": 448,
|
121 |
+
"initializer_factor": 0.1,
|
122 |
+
"initializer_range": 1e-10,
|
123 |
+
"intermediate_size": 12800,
|
124 |
+
"layer_norm_eps": 1e-06,
|
125 |
+
"model_type": "intern_vit_6b",
|
126 |
+
"norm_type": "rms_norm",
|
127 |
+
"num_attention_heads": 25,
|
128 |
+
"num_channels": 3,
|
129 |
+
"num_hidden_layers": 45,
|
130 |
+
"output_attentions": false,
|
131 |
+
"output_hidden_states": false,
|
132 |
+
"patch_size": 14,
|
133 |
+
"qk_normalization": true,
|
134 |
+
"qkv_bias": false,
|
135 |
+
"return_dict": true,
|
136 |
+
"torch_dtype": "bfloat16",
|
137 |
+
"transformers_version": "4.39.3",
|
138 |
+
"use_bfloat16": true,
|
139 |
+
"use_flash_attn": true
|
140 |
+
}
|
141 |
+
}
|
configuration_intern_vit.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# --------------------------------------------------------
|
2 |
+
# InternVL
|
3 |
+
# Copyright (c) 2024 OpenGVLab
|
4 |
+
# Licensed under The MIT License [see LICENSE for details]
|
5 |
+
# --------------------------------------------------------
|
6 |
+
import os
|
7 |
+
from typing import Union
|
8 |
+
|
9 |
+
from transformers.configuration_utils import PretrainedConfig
|
10 |
+
from transformers.utils import logging
|
11 |
+
|
12 |
+
logger = logging.get_logger(__name__)
|
13 |
+
|
14 |
+
|
15 |
+
class InternVisionConfig(PretrainedConfig):
|
16 |
+
r"""
|
17 |
+
This is the configuration class to store the configuration of a [`InternVisionModel`]. It is used to
|
18 |
+
instantiate a vision encoder according to the specified arguments, defining the model architecture.
|
19 |
+
|
20 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
21 |
+
documentation from [`PretrainedConfig`] for more information.
|
22 |
+
|
23 |
+
Args:
|
24 |
+
num_channels (`int`, *optional*, defaults to 3):
|
25 |
+
Number of color channels in the input images (e.g., 3 for RGB).
|
26 |
+
patch_size (`int`, *optional*, defaults to 14):
|
27 |
+
The size (resolution) of each patch.
|
28 |
+
image_size (`int`, *optional*, defaults to 224):
|
29 |
+
The size (resolution) of each image.
|
30 |
+
qkv_bias (`bool`, *optional*, defaults to `False`):
|
31 |
+
Whether to add a bias to the queries and values in the self-attention layers.
|
32 |
+
hidden_size (`int`, *optional*, defaults to 3200):
|
33 |
+
Dimensionality of the encoder layers and the pooler layer.
|
34 |
+
num_attention_heads (`int`, *optional*, defaults to 25):
|
35 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
36 |
+
intermediate_size (`int`, *optional*, defaults to 12800):
|
37 |
+
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
38 |
+
qk_normalization (`bool`, *optional*, defaults to `True`):
|
39 |
+
Whether to normalize the queries and keys in the self-attention layers.
|
40 |
+
num_hidden_layers (`int`, *optional*, defaults to 48):
|
41 |
+
Number of hidden layers in the Transformer encoder.
|
42 |
+
use_flash_attn (`bool`, *optional*, defaults to `True`):
|
43 |
+
Whether to use flash attention mechanism.
|
44 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
45 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
46 |
+
`"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
|
47 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-6):
|
48 |
+
The epsilon used by the layer normalization layers.
|
49 |
+
dropout (`float`, *optional*, defaults to 0.0):
|
50 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
51 |
+
drop_path_rate (`float`, *optional*, defaults to 0.0):
|
52 |
+
Dropout rate for stochastic depth.
|
53 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
54 |
+
The dropout ratio for the attention probabilities.
|
55 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
56 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
57 |
+
initializer_factor (`float`, *optional*, defaults to 0.1):
|
58 |
+
A factor for layer scale.
|
59 |
+
"""
|
60 |
+
|
61 |
+
model_type = 'intern_vit_6b'
|
62 |
+
|
63 |
+
def __init__(
|
64 |
+
self,
|
65 |
+
num_channels=3,
|
66 |
+
patch_size=14,
|
67 |
+
image_size=224,
|
68 |
+
qkv_bias=False,
|
69 |
+
hidden_size=3200,
|
70 |
+
num_attention_heads=25,
|
71 |
+
intermediate_size=12800,
|
72 |
+
qk_normalization=True,
|
73 |
+
num_hidden_layers=48,
|
74 |
+
use_flash_attn=True,
|
75 |
+
hidden_act='gelu',
|
76 |
+
norm_type='rms_norm',
|
77 |
+
layer_norm_eps=1e-6,
|
78 |
+
dropout=0.0,
|
79 |
+
drop_path_rate=0.0,
|
80 |
+
attention_dropout=0.0,
|
81 |
+
initializer_range=0.02,
|
82 |
+
initializer_factor=0.1,
|
83 |
+
**kwargs,
|
84 |
+
):
|
85 |
+
super().__init__(**kwargs)
|
86 |
+
|
87 |
+
self.hidden_size = hidden_size
|
88 |
+
self.intermediate_size = intermediate_size
|
89 |
+
self.dropout = dropout
|
90 |
+
self.drop_path_rate = drop_path_rate
|
91 |
+
self.num_hidden_layers = num_hidden_layers
|
92 |
+
self.num_attention_heads = num_attention_heads
|
93 |
+
self.num_channels = num_channels
|
94 |
+
self.patch_size = patch_size
|
95 |
+
self.image_size = image_size
|
96 |
+
self.initializer_range = initializer_range
|
97 |
+
self.initializer_factor = initializer_factor
|
98 |
+
self.attention_dropout = attention_dropout
|
99 |
+
self.layer_norm_eps = layer_norm_eps
|
100 |
+
self.hidden_act = hidden_act
|
101 |
+
self.norm_type = norm_type
|
102 |
+
self.qkv_bias = qkv_bias
|
103 |
+
self.qk_normalization = qk_normalization
|
104 |
+
self.use_flash_attn = use_flash_attn
|
105 |
+
|
106 |
+
@classmethod
|
107 |
+
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig':
|
108 |
+
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
|
109 |
+
|
110 |
+
if 'vision_config' in config_dict:
|
111 |
+
config_dict = config_dict['vision_config']
|
112 |
+
|
113 |
+
if 'model_type' in config_dict and hasattr(cls, 'model_type') and config_dict['model_type'] != cls.model_type:
|
114 |
+
logger.warning(
|
115 |
+
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
|
116 |
+
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.'
|
117 |
+
)
|
118 |
+
|
119 |
+
return cls.from_dict(config_dict, **kwargs)
|
configuration_nvlm_d.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# --------------------------------------------------------
|
2 |
+
# Adapted from https://huggingface.co/OpenGVLab/InternVL2-Llama3-76B under MIT License
|
3 |
+
# LICENSE is in incl_licenses directory.
|
4 |
+
# --------------------------------------------------------
|
5 |
+
|
6 |
+
import copy
|
7 |
+
|
8 |
+
from transformers import AutoConfig, Qwen2Config
|
9 |
+
from transformers.configuration_utils import PretrainedConfig
|
10 |
+
from transformers.utils import logging
|
11 |
+
|
12 |
+
from .configuration_intern_vit import InternVisionConfig
|
13 |
+
|
14 |
+
logger = logging.get_logger(__name__)
|
15 |
+
|
16 |
+
|
17 |
+
class NVLM_D_Config(PretrainedConfig):
|
18 |
+
model_type = 'NVLM_D'
|
19 |
+
is_composition = True
|
20 |
+
|
21 |
+
def __init__(
|
22 |
+
self,
|
23 |
+
vision_config=None,
|
24 |
+
llm_config=None,
|
25 |
+
use_backbone_lora=0,
|
26 |
+
use_llm_lora=0,
|
27 |
+
select_layer=-1,
|
28 |
+
force_image_size=None,
|
29 |
+
downsample_ratio=0.5,
|
30 |
+
template=None,
|
31 |
+
dynamic_image_size=False,
|
32 |
+
use_thumbnail=False,
|
33 |
+
ps_version='v1',
|
34 |
+
min_dynamic_patch=1,
|
35 |
+
max_dynamic_patch=6,
|
36 |
+
**kwargs
|
37 |
+
):
|
38 |
+
super().__init__(**kwargs)
|
39 |
+
|
40 |
+
# Handle vision_config initialization
|
41 |
+
if vision_config is None:
|
42 |
+
vision_config = {}
|
43 |
+
logger.info('vision_config is None. Initializing InternVisionConfig with default values.')
|
44 |
+
|
45 |
+
# Handle llm_config initialization
|
46 |
+
if llm_config is None:
|
47 |
+
llm_config = {}
|
48 |
+
logger.info('llm_config is None. Initializing LLM Config with default values.')
|
49 |
+
|
50 |
+
self.vision_config = InternVisionConfig(**vision_config)
|
51 |
+
|
52 |
+
# Check for supported architecture
|
53 |
+
if llm_config.get('architectures', [None])[0] == 'Qwen2ForCausalLM':
|
54 |
+
self.llm_config = Qwen2Config(**llm_config)
|
55 |
+
else:
|
56 |
+
raise ValueError(f"Unsupported architecture: {llm_config.get('architectures', [None])[0]}")
|
57 |
+
|
58 |
+
# Assign configuration values
|
59 |
+
self.use_backbone_lora = use_backbone_lora
|
60 |
+
self.use_llm_lora = use_llm_lora
|
61 |
+
self.select_layer = select_layer
|
62 |
+
self.force_image_size = force_image_size
|
63 |
+
self.downsample_ratio = downsample_ratio
|
64 |
+
self.template = template
|
65 |
+
self.dynamic_image_size = dynamic_image_size
|
66 |
+
self.use_thumbnail = use_thumbnail
|
67 |
+
self.ps_version = ps_version # Pixel shuffle version
|
68 |
+
self.min_dynamic_patch = min_dynamic_patch
|
69 |
+
self.max_dynamic_patch = max_dynamic_patch
|
70 |
+
|
71 |
+
# Log important parameters
|
72 |
+
logger.info(f'vision_select_layer: {self.select_layer}')
|
73 |
+
logger.info(f'ps_version: {self.ps_version}')
|
74 |
+
logger.info(f'min_dynamic_patch: {self.min_dynamic_patch}')
|
75 |
+
logger.info(f'max_dynamic_patch: {self.max_dynamic_patch}')
|
76 |
+
|
77 |
+
def to_dict(self):
|
78 |
+
"""
|
79 |
+
Serializes this instance to a Python dictionary. Overrides the default `PretrainedConfig.to_dict`.
|
80 |
+
|
81 |
+
Returns:
|
82 |
+
Dict[str, Any]: Dictionary of all the attributes that make up this configuration instance.
|
83 |
+
"""
|
84 |
+
output = copy.deepcopy(self.__dict__)
|
85 |
+
output['vision_config'] = self.vision_config.to_dict()
|
86 |
+
output['llm_config'] = self.llm_config.to_dict()
|
87 |
+
output['model_type'] = self.model_type
|
88 |
+
output['use_backbone_lora'] = self.use_backbone_lora
|
89 |
+
output['use_llm_lora'] = self.use_llm_lora
|
90 |
+
output['select_layer'] = self.select_layer
|
91 |
+
output['force_image_size'] = self.force_image_size
|
92 |
+
output['downsample_ratio'] = self.downsample_ratio
|
93 |
+
output['template'] = self.template
|
94 |
+
output['dynamic_image_size'] = self.dynamic_image_size
|
95 |
+
output['use_thumbnail'] = self.use_thumbnail
|
96 |
+
output['ps_version'] = self.ps_version
|
97 |
+
output['min_dynamic_patch'] = self.min_dynamic_patch
|
98 |
+
output['max_dynamic_patch'] = self.max_dynamic_patch
|
99 |
+
|
100 |
+
return output
|
conversation.py
ADDED
@@ -0,0 +1,358 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Adapted from https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py under the Apache License 2.0.
|
3 |
+
LICENSE is in incl_licenses directory.
|
4 |
+
|
5 |
+
Conversation prompt templates.
|
6 |
+
|
7 |
+
We kindly request that you import fastchat instead of copying this file if you wish to use it.
|
8 |
+
If you have changes in mind, please contribute back so the community can benefit collectively and continue to maintain these valuable templates.
|
9 |
+
"""
|
10 |
+
|
11 |
+
import dataclasses
|
12 |
+
from enum import IntEnum, auto
|
13 |
+
from typing import Any, Dict, List, Tuple, Union
|
14 |
+
|
15 |
+
|
16 |
+
class SeparatorStyle(IntEnum):
|
17 |
+
"""Separator styles."""
|
18 |
+
|
19 |
+
ADD_COLON_SINGLE = auto()
|
20 |
+
ADD_COLON_TWO = auto()
|
21 |
+
ADD_COLON_SPACE_SINGLE = auto()
|
22 |
+
NO_COLON_SINGLE = auto()
|
23 |
+
NO_COLON_TWO = auto()
|
24 |
+
ADD_NEW_LINE_SINGLE = auto()
|
25 |
+
LLAMA2 = auto()
|
26 |
+
CHATGLM = auto()
|
27 |
+
CHATML = auto()
|
28 |
+
CHATINTERN = auto()
|
29 |
+
DOLLY = auto()
|
30 |
+
RWKV = auto()
|
31 |
+
PHOENIX = auto()
|
32 |
+
ROBIN = auto()
|
33 |
+
FALCON_CHAT = auto()
|
34 |
+
CHATGLM3 = auto()
|
35 |
+
INTERNVL_ZH = auto()
|
36 |
+
MPT = auto()
|
37 |
+
|
38 |
+
|
39 |
+
@dataclasses.dataclass
|
40 |
+
class Conversation:
|
41 |
+
"""A class that manages prompt templates and keeps all conversation history."""
|
42 |
+
|
43 |
+
# The name of this template
|
44 |
+
name: str
|
45 |
+
# The template of the system prompt
|
46 |
+
system_template: str = '{system_message}'
|
47 |
+
# The system message
|
48 |
+
system_message: str = ''
|
49 |
+
# The names of two roles
|
50 |
+
roles: Tuple[str] = ('USER', 'ASSISTANT')
|
51 |
+
# All messages. Each item is (role, message).
|
52 |
+
messages: List[List[str]] = ()
|
53 |
+
# The number of few shot examples
|
54 |
+
offset: int = 0
|
55 |
+
# The separator style and configurations
|
56 |
+
sep_style: SeparatorStyle = SeparatorStyle.ADD_COLON_SINGLE
|
57 |
+
sep: str = '\n'
|
58 |
+
sep2: str = None
|
59 |
+
# Stop criteria (the default one is EOS token)
|
60 |
+
stop_str: Union[str, List[str]] = None
|
61 |
+
# Stops generation if meeting any token in this list
|
62 |
+
stop_token_ids: List[int] = None
|
63 |
+
|
64 |
+
def get_prompt(self) -> str:
|
65 |
+
"""Get the prompt for generation."""
|
66 |
+
system_prompt = self.system_template.format(system_message=self.system_message)
|
67 |
+
if self.sep_style == SeparatorStyle.ADD_COLON_SINGLE:
|
68 |
+
ret = system_prompt + self.sep
|
69 |
+
for role, message in self.messages:
|
70 |
+
if message:
|
71 |
+
ret += role + ': ' + message + self.sep
|
72 |
+
else:
|
73 |
+
ret += role + ':'
|
74 |
+
return ret
|
75 |
+
elif self.sep_style == SeparatorStyle.ADD_COLON_TWO:
|
76 |
+
seps = [self.sep, self.sep2]
|
77 |
+
ret = system_prompt + seps[0]
|
78 |
+
for i, (role, message) in enumerate(self.messages):
|
79 |
+
if message:
|
80 |
+
ret += role + ': ' + message + seps[i % 2]
|
81 |
+
else:
|
82 |
+
ret += role + ':'
|
83 |
+
return ret
|
84 |
+
elif self.sep_style == SeparatorStyle.ADD_COLON_SPACE_SINGLE:
|
85 |
+
ret = system_prompt + self.sep
|
86 |
+
for role, message in self.messages:
|
87 |
+
if message:
|
88 |
+
ret += role + ': ' + message + self.sep
|
89 |
+
else:
|
90 |
+
ret += role + ': ' # must be end with a space
|
91 |
+
return ret
|
92 |
+
elif self.sep_style == SeparatorStyle.ADD_NEW_LINE_SINGLE:
|
93 |
+
ret = '' if system_prompt == '' else system_prompt + self.sep
|
94 |
+
for role, message in self.messages:
|
95 |
+
if message:
|
96 |
+
ret += role + '\n' + message + self.sep
|
97 |
+
else:
|
98 |
+
ret += role + '\n'
|
99 |
+
return ret
|
100 |
+
elif self.sep_style == SeparatorStyle.NO_COLON_SINGLE:
|
101 |
+
ret = system_prompt
|
102 |
+
for role, message in self.messages:
|
103 |
+
if message:
|
104 |
+
ret += role + message + self.sep
|
105 |
+
else:
|
106 |
+
ret += role
|
107 |
+
return ret
|
108 |
+
elif self.sep_style == SeparatorStyle.NO_COLON_TWO:
|
109 |
+
seps = [self.sep, self.sep2]
|
110 |
+
ret = system_prompt
|
111 |
+
for i, (role, message) in enumerate(self.messages):
|
112 |
+
if message:
|
113 |
+
ret += role + message + seps[i % 2]
|
114 |
+
else:
|
115 |
+
ret += role
|
116 |
+
return ret
|
117 |
+
elif self.sep_style == SeparatorStyle.RWKV:
|
118 |
+
ret = system_prompt
|
119 |
+
for i, (role, message) in enumerate(self.messages):
|
120 |
+
if message:
|
121 |
+
ret += (
|
122 |
+
role
|
123 |
+
+ ': '
|
124 |
+
+ message.replace('\r\n', '\n').replace('\n\n', '\n')
|
125 |
+
)
|
126 |
+
ret += '\n\n'
|
127 |
+
else:
|
128 |
+
ret += role + ':'
|
129 |
+
return ret
|
130 |
+
elif self.sep_style == SeparatorStyle.LLAMA2:
|
131 |
+
seps = [self.sep, self.sep2]
|
132 |
+
if self.system_message:
|
133 |
+
ret = system_prompt
|
134 |
+
else:
|
135 |
+
ret = '[INST] '
|
136 |
+
for i, (role, message) in enumerate(self.messages):
|
137 |
+
tag = self.roles[i % 2]
|
138 |
+
if message:
|
139 |
+
if i == 0:
|
140 |
+
ret += message + ' '
|
141 |
+
else:
|
142 |
+
ret += tag + ' ' + message + seps[i % 2]
|
143 |
+
else:
|
144 |
+
ret += tag
|
145 |
+
return ret
|
146 |
+
elif self.sep_style == SeparatorStyle.CHATGLM:
|
147 |
+
# source: https://huggingface.co/THUDM/chatglm-6b/blob/1d240ba371910e9282298d4592532d7f0f3e9f3e/modeling_chatglm.py#L1302-L1308
|
148 |
+
# source2: https://huggingface.co/THUDM/chatglm2-6b/blob/e186c891cf64310ac66ef10a87e6635fa6c2a579/modeling_chatglm.py#L926
|
149 |
+
round_add_n = 1 if self.name == 'chatglm2' else 0
|
150 |
+
if system_prompt:
|
151 |
+
ret = system_prompt + self.sep
|
152 |
+
else:
|
153 |
+
ret = ''
|
154 |
+
|
155 |
+
for i, (role, message) in enumerate(self.messages):
|
156 |
+
if i % 2 == 0:
|
157 |
+
ret += f'[Round {i//2 + round_add_n}]{self.sep}'
|
158 |
+
|
159 |
+
if message:
|
160 |
+
ret += f'{role}:{message}{self.sep}'
|
161 |
+
else:
|
162 |
+
ret += f'{role}:'
|
163 |
+
return ret
|
164 |
+
elif self.sep_style == SeparatorStyle.CHATML:
|
165 |
+
ret = '' if system_prompt == '' else system_prompt + self.sep + '\n'
|
166 |
+
for role, message in self.messages:
|
167 |
+
if message:
|
168 |
+
ret += role + '\n' + message + self.sep + '\n'
|
169 |
+
else:
|
170 |
+
ret += role + '\n'
|
171 |
+
return ret
|
172 |
+
elif self.sep_style == SeparatorStyle.CHATGLM3:
|
173 |
+
ret = ''
|
174 |
+
if self.system_message:
|
175 |
+
ret += system_prompt
|
176 |
+
for role, message in self.messages:
|
177 |
+
if message:
|
178 |
+
ret += role + '\n' + ' ' + message
|
179 |
+
else:
|
180 |
+
ret += role
|
181 |
+
return ret
|
182 |
+
elif self.sep_style == SeparatorStyle.CHATINTERN:
|
183 |
+
# source: https://huggingface.co/internlm/internlm-chat-7b-8k/blob/bd546fa984b4b0b86958f56bf37f94aa75ab8831/modeling_internlm.py#L771
|
184 |
+
seps = [self.sep, self.sep2]
|
185 |
+
ret = system_prompt
|
186 |
+
for i, (role, message) in enumerate(self.messages):
|
187 |
+
# if i % 2 == 0:
|
188 |
+
# ret += "<s>"
|
189 |
+
if message:
|
190 |
+
ret += role + ':' + message + seps[i % 2] + '\n'
|
191 |
+
else:
|
192 |
+
ret += role + ':'
|
193 |
+
return ret
|
194 |
+
elif self.sep_style == SeparatorStyle.DOLLY:
|
195 |
+
seps = [self.sep, self.sep2]
|
196 |
+
ret = system_prompt
|
197 |
+
for i, (role, message) in enumerate(self.messages):
|
198 |
+
if message:
|
199 |
+
ret += role + ':\n' + message + seps[i % 2]
|
200 |
+
if i % 2 == 1:
|
201 |
+
ret += '\n\n'
|
202 |
+
else:
|
203 |
+
ret += role + ':\n'
|
204 |
+
return ret
|
205 |
+
elif self.sep_style == SeparatorStyle.PHOENIX:
|
206 |
+
ret = system_prompt
|
207 |
+
for role, message in self.messages:
|
208 |
+
if message:
|
209 |
+
ret += role + ': ' + '<s>' + message + '</s>'
|
210 |
+
else:
|
211 |
+
ret += role + ': ' + '<s>'
|
212 |
+
return ret
|
213 |
+
elif self.sep_style == SeparatorStyle.ROBIN:
|
214 |
+
ret = system_prompt + self.sep
|
215 |
+
for role, message in self.messages:
|
216 |
+
if message:
|
217 |
+
ret += role + ':\n' + message + self.sep
|
218 |
+
else:
|
219 |
+
ret += role + ':\n'
|
220 |
+
return ret
|
221 |
+
elif self.sep_style == SeparatorStyle.FALCON_CHAT:
|
222 |
+
ret = ''
|
223 |
+
if self.system_message:
|
224 |
+
ret += system_prompt + self.sep
|
225 |
+
for role, message in self.messages:
|
226 |
+
if message:
|
227 |
+
ret += role + ': ' + message + self.sep
|
228 |
+
else:
|
229 |
+
ret += role + ':'
|
230 |
+
|
231 |
+
return ret
|
232 |
+
elif self.sep_style == SeparatorStyle.INTERNVL_ZH:
|
233 |
+
seps = [self.sep, self.sep2]
|
234 |
+
ret = self.system_message + seps[0]
|
235 |
+
for i, (role, message) in enumerate(self.messages):
|
236 |
+
if message:
|
237 |
+
ret += role + ': ' + message + seps[i % 2]
|
238 |
+
else:
|
239 |
+
ret += role + ':'
|
240 |
+
return ret
|
241 |
+
elif self.sep_style == SeparatorStyle.MPT:
|
242 |
+
ret = system_prompt + self.sep
|
243 |
+
for role, message in self.messages:
|
244 |
+
if message:
|
245 |
+
if type(message) is tuple:
|
246 |
+
message, _, _ = message
|
247 |
+
ret += role + message + self.sep
|
248 |
+
else:
|
249 |
+
ret += role
|
250 |
+
return ret
|
251 |
+
else:
|
252 |
+
raise ValueError(f'Invalid style: {self.sep_style}')
|
253 |
+
|
254 |
+
def set_system_message(self, system_message: str):
|
255 |
+
"""Set the system message."""
|
256 |
+
self.system_message = system_message
|
257 |
+
|
258 |
+
def append_message(self, role: str, message: str):
|
259 |
+
"""Append a new message."""
|
260 |
+
self.messages.append([role, message])
|
261 |
+
|
262 |
+
def update_last_message(self, message: str):
|
263 |
+
"""Update the last output.
|
264 |
+
|
265 |
+
The last message is typically set to be None when constructing the prompt,
|
266 |
+
so we need to update it in-place after getting the response from a model.
|
267 |
+
"""
|
268 |
+
self.messages[-1][1] = message
|
269 |
+
|
270 |
+
def to_gradio_chatbot(self):
|
271 |
+
"""Convert the conversation to gradio chatbot format."""
|
272 |
+
ret = []
|
273 |
+
for i, (role, msg) in enumerate(self.messages[self.offset :]):
|
274 |
+
if i % 2 == 0:
|
275 |
+
ret.append([msg, None])
|
276 |
+
else:
|
277 |
+
ret[-1][-1] = msg
|
278 |
+
return ret
|
279 |
+
|
280 |
+
def to_openai_api_messages(self):
|
281 |
+
"""Convert the conversation to OpenAI chat completion format."""
|
282 |
+
ret = [{'role': 'system', 'content': self.system_message}]
|
283 |
+
|
284 |
+
for i, (_, msg) in enumerate(self.messages[self.offset :]):
|
285 |
+
if i % 2 == 0:
|
286 |
+
ret.append({'role': 'user', 'content': msg})
|
287 |
+
else:
|
288 |
+
if msg is not None:
|
289 |
+
ret.append({'role': 'assistant', 'content': msg})
|
290 |
+
return ret
|
291 |
+
|
292 |
+
def copy(self):
|
293 |
+
return Conversation(
|
294 |
+
name=self.name,
|
295 |
+
system_template=self.system_template,
|
296 |
+
system_message=self.system_message,
|
297 |
+
roles=self.roles,
|
298 |
+
messages=[[x, y] for x, y in self.messages],
|
299 |
+
offset=self.offset,
|
300 |
+
sep_style=self.sep_style,
|
301 |
+
sep=self.sep,
|
302 |
+
sep2=self.sep2,
|
303 |
+
stop_str=self.stop_str,
|
304 |
+
stop_token_ids=self.stop_token_ids,
|
305 |
+
)
|
306 |
+
|
307 |
+
def dict(self):
|
308 |
+
return {
|
309 |
+
'template_name': self.name,
|
310 |
+
'system_message': self.system_message,
|
311 |
+
'roles': self.roles,
|
312 |
+
'messages': self.messages,
|
313 |
+
'offset': self.offset,
|
314 |
+
}
|
315 |
+
|
316 |
+
|
317 |
+
# A global registry for all conversation templates
|
318 |
+
conv_templates: Dict[str, Conversation] = {}
|
319 |
+
|
320 |
+
|
321 |
+
def register_conv_template(template: Conversation, override: bool = False):
|
322 |
+
"""Register a new conversation template."""
|
323 |
+
if not override:
|
324 |
+
assert (
|
325 |
+
template.name not in conv_templates
|
326 |
+
), f'{template.name} has been registered.'
|
327 |
+
|
328 |
+
conv_templates[template.name] = template
|
329 |
+
|
330 |
+
|
331 |
+
def get_conv_template(name: str) -> Conversation:
|
332 |
+
"""Get a conversation template."""
|
333 |
+
return conv_templates[name].copy()
|
334 |
+
|
335 |
+
|
336 |
+
# Both Hermes-2 and internlm2-chat are chatml-format conversation templates. The difference
|
337 |
+
# is that during training, the preprocessing function for the Hermes-2 template doesn't add
|
338 |
+
# <s> at the beginning of the tokenized sequence, while the internlm2-chat template does.
|
339 |
+
# Therefore, they are completely equivalent during inference.
|
340 |
+
|
341 |
+
register_conv_template(
|
342 |
+
Conversation(
|
343 |
+
name='chatml',
|
344 |
+
system_template='<|im_start|>system\n{system_message}',
|
345 |
+
# note: The new system prompt was not used here to avoid changes in benchmark performance.
|
346 |
+
system_message='Answer the questions.',
|
347 |
+
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
348 |
+
sep_style=SeparatorStyle.MPT,
|
349 |
+
sep='<|im_end|>',
|
350 |
+
stop_token_ids=[
|
351 |
+
2,
|
352 |
+
92543,
|
353 |
+
92542
|
354 |
+
]
|
355 |
+
)
|
356 |
+
)
|
357 |
+
|
358 |
+
|
incl_licenses/LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2023 OpenGVLab
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
incl_licenses/LICENSE_2
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Apache License
|
2 |
+
Version 2.0, January 2004
|
3 |
+
http://www.apache.org/licenses/
|
4 |
+
|
5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
6 |
+
|
7 |
+
1. Definitions.
|
8 |
+
|
9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
11 |
+
|
12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
13 |
+
the copyright owner that is granting the License.
|
14 |
+
|
15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
16 |
+
other entities that control, are controlled by, or are under common
|
17 |
+
control with that entity. For the purposes of this definition,
|
18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
19 |
+
direction or management of such entity, whether by contract or
|
20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
22 |
+
|
23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
24 |
+
exercising permissions granted by this License.
|
25 |
+
|
26 |
+
"Source" form shall mean the preferred form for making modifications,
|
27 |
+
including but not limited to software source code, documentation
|
28 |
+
source, and configuration files.
|
29 |
+
|
30 |
+
"Object" form shall mean any form resulting from mechanical
|
31 |
+
transformation or translation of a Source form, including but
|
32 |
+
not limited to compiled object code, generated documentation,
|
33 |
+
and conversions to other media types.
|
34 |
+
|
35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
36 |
+
Object form, made available under the License, as indicated by a
|
37 |
+
copyright notice that is included in or attached to the work
|
38 |
+
(an example is provided in the Appendix below).
|
39 |
+
|
40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
41 |
+
form, that is based on (or derived from) the Work and for which the
|
42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
44 |
+
of this License, Derivative Works shall not include works that remain
|
45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
46 |
+
the Work and Derivative Works thereof.
|
47 |
+
|
48 |
+
"Contribution" shall mean any work of authorship, including
|
49 |
+
the original version of the Work and any modifications or additions
|
50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
54 |
+
means any form of electronic, verbal, or written communication sent
|
55 |
+
to the Licensor or its representatives, including but not limited to
|
56 |
+
communication on electronic mailing lists, source code control systems,
|
57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
59 |
+
excluding communication that is conspicuously marked or otherwise
|
60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
61 |
+
|
62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
64 |
+
subsequently incorporated within the Work.
|
65 |
+
|
66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
71 |
+
Work and such Derivative Works in Source or Object form.
|
72 |
+
|
73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
76 |
+
(except as stated in this section) patent license to make, have made,
|
77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
78 |
+
where such license applies only to those patent claims licensable
|
79 |
+
by such Contributor that are necessarily infringed by their
|
80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
82 |
+
institute patent litigation against any entity (including a
|
83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
84 |
+
or a Contribution incorporated within the Work constitutes direct
|
85 |
+
or contributory patent infringement, then any patent licenses
|
86 |
+
granted to You under this License for that Work shall terminate
|
87 |
+
as of the date such litigation is filed.
|
88 |
+
|
89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
90 |
+
Work or Derivative Works thereof in any medium, with or without
|
91 |
+
modifications, and in Source or Object form, provided that You
|
92 |
+
meet the following conditions:
|
93 |
+
|
94 |
+
(a) You must give any other recipients of the Work or
|
95 |
+
Derivative Works a copy of this License; and
|
96 |
+
|
97 |
+
(b) You must cause any modified files to carry prominent notices
|
98 |
+
stating that You changed the files; and
|
99 |
+
|
100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
101 |
+
that You distribute, all copyright, patent, trademark, and
|
102 |
+
attribution notices from the Source form of the Work,
|
103 |
+
excluding those notices that do not pertain to any part of
|
104 |
+
the Derivative Works; and
|
105 |
+
|
106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
107 |
+
distribution, then any Derivative Works that You distribute must
|
108 |
+
include a readable copy of the attribution notices contained
|
109 |
+
within such NOTICE file, excluding those notices that do not
|
110 |
+
pertain to any part of the Derivative Works, in at least one
|
111 |
+
of the following places: within a NOTICE text file distributed
|
112 |
+
as part of the Derivative Works; within the Source form or
|
113 |
+
documentation, if provided along with the Derivative Works; or,
|
114 |
+
within a display generated by the Derivative Works, if and
|
115 |
+
wherever such third-party notices normally appear. The contents
|
116 |
+
of the NOTICE file are for informational purposes only and
|
117 |
+
do not modify the License. You may add Your own attribution
|
118 |
+
notices within Derivative Works that You distribute, alongside
|
119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
120 |
+
that such additional attribution notices cannot be construed
|
121 |
+
as modifying the License.
|
122 |
+
|
123 |
+
You may add Your own copyright statement to Your modifications and
|
124 |
+
may provide additional or different license terms and conditions
|
125 |
+
for use, reproduction, or distribution of Your modifications, or
|
126 |
+
for any such Derivative Works as a whole, provided Your use,
|
127 |
+
reproduction, and distribution of the Work otherwise complies with
|
128 |
+
the conditions stated in this License.
|
129 |
+
|
130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
132 |
+
by You to the Licensor shall be under the terms and conditions of
|
133 |
+
this License, without any additional terms or conditions.
|
134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
135 |
+
the terms of any separate license agreement you may have executed
|
136 |
+
with Licensor regarding such Contributions.
|
137 |
+
|
138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
140 |
+
except as required for reasonable and customary use in describing the
|
141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
142 |
+
|
143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
144 |
+
agreed to in writing, Licensor provides the Work (and each
|
145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
147 |
+
implied, including, without limitation, any warranties or conditions
|
148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
150 |
+
appropriateness of using or redistributing the Work and assume any
|
151 |
+
risks associated with Your exercise of permissions under this License.
|
152 |
+
|
153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
154 |
+
whether in tort (including negligence), contract, or otherwise,
|
155 |
+
unless required by applicable law (such as deliberate and grossly
|
156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
157 |
+
liable to You for damages, including any direct, indirect, special,
|
158 |
+
incidental, or consequential damages of any character arising as a
|
159 |
+
result of this License or out of the use or inability to use the
|
160 |
+
Work (including but not limited to damages for loss of goodwill,
|
161 |
+
work stoppage, computer failure or malfunction, or any and all
|
162 |
+
other commercial damages or losses), even if such Contributor
|
163 |
+
has been advised of the possibility of such damages.
|
164 |
+
|
165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
168 |
+
or other liability obligations and/or rights consistent with this
|
169 |
+
License. However, in accepting such obligations, You may act only
|
170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
171 |
+
of any other Contributor, and only if You agree to indemnify,
|
172 |
+
defend, and hold each Contributor harmless for any liability
|
173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
174 |
+
of your accepting any such warranty or additional liability.
|
175 |
+
|
176 |
+
END OF TERMS AND CONDITIONS
|
177 |
+
|
178 |
+
APPENDIX: How to apply the Apache License to your work.
|
179 |
+
|
180 |
+
To apply the Apache License to your work, attach the following
|
181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
182 |
+
replaced with your own identifying information. (Don't include
|
183 |
+
the brackets!) The text should be enclosed in the appropriate
|
184 |
+
comment syntax for the file format. We also recommend that a
|
185 |
+
file or class name and description of purpose be included on the
|
186 |
+
same "printed page" as the copyright notice for easier
|
187 |
+
identification within third-party archives.
|
188 |
+
|
189 |
+
Copyright [yyyy] [name of copyright owner]
|
190 |
+
|
191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
192 |
+
you may not use this file except in compliance with the License.
|
193 |
+
You may obtain a copy of the License at
|
194 |
+
|
195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
196 |
+
|
197 |
+
Unless required by applicable law or agreed to in writing, software
|
198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
200 |
+
See the License for the specific language governing permissions and
|
201 |
+
limitations under the License.
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
model-00001-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3a78cdc2cb4c39111e3c409bae8058500c26ad66f939bd9fc367607de5a7b936
|
3 |
+
size 3732884232
|
model-00002-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8676f7b0455aa1155634426125f7a5ef2677327ec5b5157926d14528b222948d
|
3 |
+
size 3846327312
|
model-00003-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b2a312cd5d4e8095cb613818a2f602f7ad07f1221c7b1967cfec833f833630bf
|
3 |
+
size 3846327312
|
model-00004-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:936e1fa418458a7cbe1bdb983b72066802738672bfa4a7a5954ce80b16cecab7
|
3 |
+
size 3846327312
|
model-00005-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:12472f7508046c1901ca08d0f2680d622f4a0da91027097b98166d2df3bc707a
|
3 |
+
size 3846327312
|
model-00006-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7d41bcb078fe921bc8671f7163a42eca9e3c8b2342a14cadae947992018445da
|
3 |
+
size 3846327312
|
model-00007-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:11a80dca0ec39185b3c36bc118f2b4dfd85465d5349a41b378b9334b1792e7a8
|
3 |
+
size 3846327336
|
model-00008-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e61a2385a5be6c28b60f4b8850ed78d35ba96334f36678d85fb93953a470fe4f
|
3 |
+
size 3846327336
|
model-00009-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:658fa23e07154250009f6bc14485fa77e98315f9db18e0301140334a86ea847a
|
3 |
+
size 3846327336
|
model-00010-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d48b3d0e9749a0ea43d1f99acf6916b5e8c0392c29075fa7b249d7605db9124e
|
3 |
+
size 3846327336
|
model-00011-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:efbc2d512bcb5477e23a7f4a4887a788ee0dfa6e711b2939818d7f4e48cbd9a6
|
3 |
+
size 3846327336
|
model-00012-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2047578d29d66084531761f53206b23fa753e6b669b6076a467c0eccff2357e6
|
3 |
+
size 3846327336
|
model-00013-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5a1c696a1e707f1c58086a5890dc50cb1e74c5d04c60b0ec591725872977faa5
|
3 |
+
size 3846327336
|
model-00014-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ed921455a6852117cd630f11958c72a7b93f5178f27b0510e077714f1c5cbff7
|
3 |
+
size 3846327336
|
model-00015-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:32bd67190c85ad495b6515d1f0992162b57677a7581f0eb513765781a67eedf3
|
3 |
+
size 3846327336
|
model-00016-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6fc6943dd51fdb17d61e1776a2fb996f9bc881e3cd1f83bc83933837d18cc5e7
|
3 |
+
size 3846327336
|
model-00017-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:83935d97a80083f6e5b6bd5ab497b130b0625736848ff145a460f8dbb600b72d
|
3 |
+
size 3846327336
|
model-00018-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f63f7979e9859c11b1c849547e7ff96c07f5fa2de3df7a388e1ff961f5ea8d1e
|
3 |
+
size 3846327336
|
model-00019-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a59dbd378197490411556841a67b6f537259f98deed3fc74135871aae9b96b62
|
3 |
+
size 3846327336
|
model-00020-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d5dd6b704854ef4d640f2c731b7e3765ed8bb6de9c0d20ddb31edb57ef529c2f
|
3 |
+
size 3846327336
|
model-00021-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6e0fda318009755689a0e2fc0b835251a60100f55d717ee029ced89ef94289c6
|
3 |
+
size 3846327336
|
model-00022-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e79e6f73ab02c8ea2063b2d75916ba56d023d876a3b9f271de00f014e70ef4ee
|
3 |
+
size 3846327336
|
model-00023-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:afcd0207ec7aae990e0a1ec35ca841df9dedf20ee7461e0dd1af5c380777da0c
|
3 |
+
size 3846327336
|
model-00024-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3ff60a64122e8e0b67cf639b27155be5cb63cbaa6807b76842d529ad2a43922a
|
3 |
+
size 3846327336
|
model-00025-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7cc40acba46b35444e50fc53e03c2091947e903236c09c93049799b7c4e55a89
|
3 |
+
size 3846327336
|
model-00026-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e30326cac00519f1994d1344a2e9fcc905d8bce4d3142d53937e0b534f7736ab
|
3 |
+
size 3846327336
|
model-00027-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2d404c28bd104d4c6b7c38cf498f9f8d6f89091435630c370141b8c931dfe6a4
|
3 |
+
size 3846327336
|
model-00028-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:db0a9bd4c0d42e228ccba1182f7f64f6803a45969526ff2a3763d21d9ba720a2
|
3 |
+
size 3846327336
|
model-00029-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:70804172e97c960e067d4b6b0c9a3c97ec8747acd6e61bec6a5d62b09934562c
|
3 |
+
size 3846327336
|
model-00030-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a3849a92246ab4e49e346304401dc526ee1f7d26f2f7a281acfd8970bb3c0792
|
3 |
+
size 3846327336
|
model-00031-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c117d4fdc67ad25b508d53cc9de2fc991eaa8f0c29e2c85361dbee431b426740
|
3 |
+
size 3846327336
|
model-00032-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8bc68e643780b8dc9ef47af669bb51033ab5d3229477c87efb647469d439b3cb
|
3 |
+
size 3846327336
|
model-00033-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c9528688a3ab66602ba583b53577b22b8fc70e21436d824bc2cacf66b2e0d12d
|
3 |
+
size 3846327336
|
model-00034-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5304592b6b5b28154783f5d93404b9391638a64ae5fcd3b8720cda5f6b22618e
|
3 |
+
size 3846327336
|
model-00035-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c515b0ddc6dde630472717f931ea097dbc57ef51f1416c9af2ff4150a1c46502
|
3 |
+
size 3846327336
|
model-00036-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:31f390b5fa800b1f23d96ad86ca6e56746a1a0bcbd1eac9c06308f6d10608023
|
3 |
+
size 3846327336
|
model-00037-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c8e346877ab270aaff0f2d1ccad11f609601ba345e94f819ad0187355c164cb0
|
3 |
+
size 3846327336
|
model-00038-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ee4e4bcf4092b2f3af48d3c611812cc6453af4f80c6e516583c37e038b35d813
|
3 |
+
size 3846327336
|
model-00039-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a8454eeeaf944e1ce74702c2197d8d04a266e87e13288c4f528ba00b5d20824f
|
3 |
+
size 3846327336
|
model-00040-of-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3a7f563b879f85851630efab934cf23a809ad8a4d775d042118393efb484118c
|
3 |
+
size 3846327336
|