Upload folder using huggingface_hub
Browse files- README.md +44 -0
- config.json +23 -0
- model.safetensors +3 -0
README.md
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
base_model:
|
3 |
+
- openai/clip-vit-base-patch32
|
4 |
+
datasets:
|
5 |
+
- timm/oxford-iiit-pet
|
6 |
+
metrics:
|
7 |
+
- accuracy
|
8 |
+
---
|
9 |
+
|
10 |
+
# Model Card
|
11 |
+
|
12 |
+
## Model Details
|
13 |
+
|
14 |
+
- Architecture: ViT-Base with patch size 32
|
15 |
+
- Training Data: oxford-iiit-pet dataset
|
16 |
+
|
17 |
+
## Training Details
|
18 |
+
|
19 |
+
Adam Optimizer with a constant learning rate 1e-5 for 4000 steps training (batch_size=32).
|
20 |
+
Only the vision encoder is fine-tuned.
|
21 |
+
|
22 |
+
## Evaluation Results
|
23 |
+
|
24 |
+
- pre-trained: 0.8317149877548218
|
25 |
+
- fine-tuned: 0.9084667563438416
|
26 |
+
|
27 |
+
## Usage
|
28 |
+
|
29 |
+
load vision model
|
30 |
+
|
31 |
+
```python
|
32 |
+
from transformers import CLIPVisionModel
|
33 |
+
|
34 |
+
vision_model = CLIPVisionModel.from_pretrained('tanganke/clip-vit-base-patch32_oxford-iiit-pet')
|
35 |
+
```
|
36 |
+
|
37 |
+
substitute the vision encoder of clip
|
38 |
+
|
39 |
+
```python
|
40 |
+
from transformers import CLIPModel
|
41 |
+
|
42 |
+
clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
43 |
+
clip_model.vision_model.load_state_dict(vision_model.vision_model.state_dict())
|
44 |
+
```
|
config.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "results/temp/",
|
3 |
+
"architectures": [
|
4 |
+
"CLIPVisionModel"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"dropout": 0.0,
|
8 |
+
"hidden_act": "quick_gelu",
|
9 |
+
"hidden_size": 768,
|
10 |
+
"image_size": 224,
|
11 |
+
"initializer_factor": 1.0,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 3072,
|
14 |
+
"layer_norm_eps": 1e-05,
|
15 |
+
"model_type": "clip_vision_model",
|
16 |
+
"num_attention_heads": 12,
|
17 |
+
"num_channels": 3,
|
18 |
+
"num_hidden_layers": 12,
|
19 |
+
"patch_size": 32,
|
20 |
+
"projection_dim": 512,
|
21 |
+
"torch_dtype": "float32",
|
22 |
+
"transformers_version": "4.39.1"
|
23 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:37119d9b7fe99b0c8c21bd38be893f1a7fae1b35bd696805a80c2fa6c9aee7d0
|
3 |
+
size 349847824
|