KerasHub
Divyasreepat commited on
Commit
6eb610d
1 Parent(s): 40f941e

Upload folder using huggingface_hub

Browse files
Files changed (8) hide show
  1. README.md +14 -0
  2. config.json +94 -0
  3. image_converter.json +25 -0
  4. metadata.json +6 -0
  5. model.weights.h5 +3 -0
  6. preprocessor.json +43 -0
  7. task.json +145 -0
  8. task.weights.h5 +3 -0
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: keras-hub
3
+ ---
4
+ This is a [`SAM` model](https://keras.io/api/keras_hub/models/sam) uploaded using the KerasHub library and can be used with JAX, TensorFlow, and PyTorch backends.
5
+ This model is related to a `ImageSegmenter` task.
6
+
7
+ Model config:
8
+ * **name:** sam_backbone
9
+ * **trainable:** True
10
+ * **image_encoder:** {'module': 'keras_hub.src.models.vit_det.vit_det_backbone', 'class_name': 'ViTDetBackbone', 'config': {'name': 'vi_t_det_backbone', 'trainable': True, 'image_shape': [1024, 1024, 3], 'patch_size': 16, 'hidden_size': 1024, 'num_layers': 24, 'intermediate_dim': 4096, 'num_heads': 16, 'num_output_channels': 256, 'use_bias': True, 'use_abs_pos': True, 'use_rel_pos': True, 'window_size': 14, 'global_attention_layer_indices': [5, 11, 17, 23], 'layer_norm_epsilon': 1e-06}, 'registered_name': 'keras_hub>ViTDetBackbone'}
11
+ * **prompt_encoder:** {'module': 'keras_hub.src.models.sam.sam_prompt_encoder', 'class_name': 'SAMPromptEncoder', 'config': {'name': 'sam_prompt_encoder', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'hidden_size': 256, 'image_embedding_size': [64, 64], 'input_image_size': [1024, 1024], 'mask_in_channels': 16, 'activation': 'gelu'}, 'registered_name': 'keras_hub>SAMPromptEncoder'}
12
+ * **mask_decoder:** {'module': 'keras_hub.src.models.sam.sam_mask_decoder', 'class_name': 'SAMMaskDecoder', 'config': {'name': 'sam_mask_decoder', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'hidden_size': 256, 'num_layers': 2, 'intermediate_dim': 2048, 'num_heads': 8, 'embedding_dim': 256, 'num_multimask_outputs': 3, 'iou_head_depth': 3, 'iou_head_hidden_dim': 256, 'activation': 'gelu'}, 'registered_name': 'keras_hub>SAMMaskDecoder'}
13
+
14
+ This model card has been generated automatically and should be completed by the model author. See [Model Cards documentation](https://huggingface.co/docs/hub/model-cards) for more information.
config.json ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "module": "keras_hub.src.models.sam.sam_backbone",
3
+ "class_name": "SAMBackbone",
4
+ "config": {
5
+ "name": "sam_backbone",
6
+ "trainable": true,
7
+ "image_encoder": {
8
+ "module": "keras_hub.src.models.vit_det.vit_det_backbone",
9
+ "class_name": "ViTDetBackbone",
10
+ "config": {
11
+ "name": "vi_t_det_backbone",
12
+ "trainable": true,
13
+ "image_shape": [
14
+ 1024,
15
+ 1024,
16
+ 3
17
+ ],
18
+ "patch_size": 16,
19
+ "hidden_size": 1024,
20
+ "num_layers": 24,
21
+ "intermediate_dim": 4096,
22
+ "num_heads": 16,
23
+ "num_output_channels": 256,
24
+ "use_bias": true,
25
+ "use_abs_pos": true,
26
+ "use_rel_pos": true,
27
+ "window_size": 14,
28
+ "global_attention_layer_indices": [
29
+ 5,
30
+ 11,
31
+ 17,
32
+ 23
33
+ ],
34
+ "layer_norm_epsilon": 1e-06
35
+ },
36
+ "registered_name": "keras_hub>ViTDetBackbone"
37
+ },
38
+ "prompt_encoder": {
39
+ "module": "keras_hub.src.models.sam.sam_prompt_encoder",
40
+ "class_name": "SAMPromptEncoder",
41
+ "config": {
42
+ "name": "sam_prompt_encoder",
43
+ "trainable": true,
44
+ "dtype": {
45
+ "module": "keras",
46
+ "class_name": "DTypePolicy",
47
+ "config": {
48
+ "name": "float32"
49
+ },
50
+ "registered_name": null
51
+ },
52
+ "hidden_size": 256,
53
+ "image_embedding_size": [
54
+ 64,
55
+ 64
56
+ ],
57
+ "input_image_size": [
58
+ 1024,
59
+ 1024
60
+ ],
61
+ "mask_in_channels": 16,
62
+ "activation": "gelu"
63
+ },
64
+ "registered_name": "keras_hub>SAMPromptEncoder"
65
+ },
66
+ "mask_decoder": {
67
+ "module": "keras_hub.src.models.sam.sam_mask_decoder",
68
+ "class_name": "SAMMaskDecoder",
69
+ "config": {
70
+ "name": "sam_mask_decoder",
71
+ "trainable": true,
72
+ "dtype": {
73
+ "module": "keras",
74
+ "class_name": "DTypePolicy",
75
+ "config": {
76
+ "name": "float32"
77
+ },
78
+ "registered_name": null
79
+ },
80
+ "hidden_size": 256,
81
+ "num_layers": 2,
82
+ "intermediate_dim": 2048,
83
+ "num_heads": 8,
84
+ "embedding_dim": 256,
85
+ "num_multimask_outputs": 3,
86
+ "iou_head_depth": 3,
87
+ "iou_head_hidden_dim": 256,
88
+ "activation": "gelu"
89
+ },
90
+ "registered_name": "keras_hub>SAMMaskDecoder"
91
+ }
92
+ },
93
+ "registered_name": "keras_hub>SAMBackbone"
94
+ }
image_converter.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "module": "keras_hub.src.models.sam.sam_image_converter",
3
+ "class_name": "SAMImageConverter",
4
+ "config": {
5
+ "name": "sam_image_converter",
6
+ "trainable": true,
7
+ "dtype": {
8
+ "module": "keras",
9
+ "class_name": "DTypePolicy",
10
+ "config": {
11
+ "name": "float32"
12
+ },
13
+ "registered_name": null
14
+ },
15
+ "image_size": [
16
+ 1024,
17
+ 1024
18
+ ],
19
+ "scale": 0.00392156862745098,
20
+ "offset": null,
21
+ "interpolation": "bilinear",
22
+ "crop_to_aspect_ratio": true
23
+ },
24
+ "registered_name": "keras_hub>SAMImageConverter"
25
+ }
metadata.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "keras_version": "3.5.0",
3
+ "keras_hub_version": "0.16.1",
4
+ "parameter_count": 308284748,
5
+ "date_saved": "2024-10-18@22:58:05"
6
+ }
model.weights.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f947d43f1a6678cee74da64850d7da47ff4ef742f363c6915c041dab5908179c
3
+ size 1250405448
preprocessor.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "module": "keras_hub.src.models.sam.sam_image_segmenter_preprocessor",
3
+ "class_name": "SAMImageSegmenterPreprocessor",
4
+ "config": {
5
+ "name": "sam_image_segmenter_preprocessor",
6
+ "trainable": true,
7
+ "dtype": {
8
+ "module": "keras",
9
+ "class_name": "DTypePolicy",
10
+ "config": {
11
+ "name": "float32"
12
+ },
13
+ "registered_name": null
14
+ },
15
+ "image_converter": {
16
+ "module": "keras_hub.src.models.sam.sam_image_converter",
17
+ "class_name": "SAMImageConverter",
18
+ "config": {
19
+ "name": "sam_image_converter",
20
+ "trainable": true,
21
+ "dtype": {
22
+ "module": "keras",
23
+ "class_name": "DTypePolicy",
24
+ "config": {
25
+ "name": "float32"
26
+ },
27
+ "registered_name": null
28
+ },
29
+ "image_size": [
30
+ 1024,
31
+ 1024
32
+ ],
33
+ "scale": 0.00392156862745098,
34
+ "offset": null,
35
+ "interpolation": "bilinear",
36
+ "crop_to_aspect_ratio": true
37
+ },
38
+ "registered_name": "keras_hub>SAMImageConverter"
39
+ },
40
+ "config_file": "preprocessor.json"
41
+ },
42
+ "registered_name": "keras_hub>SAMImageSegmenterPreprocessor"
43
+ }
task.json ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "module": "keras_hub.src.models.sam.sam_image_segmenter",
3
+ "class_name": "SAMImageSegmenter",
4
+ "config": {
5
+ "backbone": {
6
+ "module": "keras_hub.src.models.sam.sam_backbone",
7
+ "class_name": "SAMBackbone",
8
+ "config": {
9
+ "name": "sam_backbone",
10
+ "trainable": true,
11
+ "image_encoder": {
12
+ "module": "keras_hub.src.models.vit_det.vit_det_backbone",
13
+ "class_name": "ViTDetBackbone",
14
+ "config": {
15
+ "name": "vi_t_det_backbone",
16
+ "trainable": true,
17
+ "image_shape": [
18
+ 1024,
19
+ 1024,
20
+ 3
21
+ ],
22
+ "patch_size": 16,
23
+ "hidden_size": 1024,
24
+ "num_layers": 24,
25
+ "intermediate_dim": 4096,
26
+ "num_heads": 16,
27
+ "num_output_channels": 256,
28
+ "use_bias": true,
29
+ "use_abs_pos": true,
30
+ "use_rel_pos": true,
31
+ "window_size": 14,
32
+ "global_attention_layer_indices": [
33
+ 5,
34
+ 11,
35
+ 17,
36
+ 23
37
+ ],
38
+ "layer_norm_epsilon": 1e-06
39
+ },
40
+ "registered_name": "keras_hub>ViTDetBackbone"
41
+ },
42
+ "prompt_encoder": {
43
+ "module": "keras_hub.src.models.sam.sam_prompt_encoder",
44
+ "class_name": "SAMPromptEncoder",
45
+ "config": {
46
+ "name": "sam_prompt_encoder",
47
+ "trainable": true,
48
+ "dtype": {
49
+ "module": "keras",
50
+ "class_name": "DTypePolicy",
51
+ "config": {
52
+ "name": "float32"
53
+ },
54
+ "registered_name": null
55
+ },
56
+ "hidden_size": 256,
57
+ "image_embedding_size": [
58
+ 64,
59
+ 64
60
+ ],
61
+ "input_image_size": [
62
+ 1024,
63
+ 1024
64
+ ],
65
+ "mask_in_channels": 16,
66
+ "activation": "gelu"
67
+ },
68
+ "registered_name": "keras_hub>SAMPromptEncoder"
69
+ },
70
+ "mask_decoder": {
71
+ "module": "keras_hub.src.models.sam.sam_mask_decoder",
72
+ "class_name": "SAMMaskDecoder",
73
+ "config": {
74
+ "name": "sam_mask_decoder",
75
+ "trainable": true,
76
+ "dtype": {
77
+ "module": "keras",
78
+ "class_name": "DTypePolicy",
79
+ "config": {
80
+ "name": "float32"
81
+ },
82
+ "registered_name": null
83
+ },
84
+ "hidden_size": 256,
85
+ "num_layers": 2,
86
+ "intermediate_dim": 2048,
87
+ "num_heads": 8,
88
+ "embedding_dim": 256,
89
+ "num_multimask_outputs": 3,
90
+ "iou_head_depth": 3,
91
+ "iou_head_hidden_dim": 256,
92
+ "activation": "gelu"
93
+ },
94
+ "registered_name": "keras_hub>SAMMaskDecoder"
95
+ }
96
+ },
97
+ "registered_name": "keras_hub>SAMBackbone"
98
+ },
99
+ "preprocessor": {
100
+ "module": "keras_hub.src.models.sam.sam_image_segmenter_preprocessor",
101
+ "class_name": "SAMImageSegmenterPreprocessor",
102
+ "config": {
103
+ "name": "sam_image_segmenter_preprocessor",
104
+ "trainable": true,
105
+ "dtype": {
106
+ "module": "keras",
107
+ "class_name": "DTypePolicy",
108
+ "config": {
109
+ "name": "float32"
110
+ },
111
+ "registered_name": null
112
+ },
113
+ "image_converter": {
114
+ "module": "keras_hub.src.models.sam.sam_image_converter",
115
+ "class_name": "SAMImageConverter",
116
+ "config": {
117
+ "name": "sam_image_converter",
118
+ "trainable": true,
119
+ "dtype": {
120
+ "module": "keras",
121
+ "class_name": "DTypePolicy",
122
+ "config": {
123
+ "name": "float32"
124
+ },
125
+ "registered_name": null
126
+ },
127
+ "image_size": [
128
+ 1024,
129
+ 1024
130
+ ],
131
+ "scale": 0.00392156862745098,
132
+ "offset": null,
133
+ "interpolation": "bilinear",
134
+ "crop_to_aspect_ratio": true
135
+ },
136
+ "registered_name": "keras_hub>SAMImageConverter"
137
+ },
138
+ "config_file": "preprocessor.json"
139
+ },
140
+ "registered_name": "keras_hub>SAMImageSegmenterPreprocessor"
141
+ },
142
+ "name": "sam_image_segmenter"
143
+ },
144
+ "registered_name": "keras_hub>SAMImageSegmenter"
145
+ }
task.weights.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5abc7e8ee08858039ae1643d5be087a9745c0335a3a1724e091e2101fd600923
3
+ size 1250416296