koala2 commited on
Commit
c526beb
1 Parent(s): 8fe73cf

End of training

Browse files
Files changed (44) hide show
  1. README.md +22 -0
  2. checkpoint-200/optimizer.bin +3 -0
  3. checkpoint-200/random_states_0.pkl +3 -0
  4. checkpoint-200/scheduler.bin +3 -0
  5. checkpoint-200/text_encoder/config.json +25 -0
  6. checkpoint-200/text_encoder/pytorch_model.bin +3 -0
  7. checkpoint-200/unet/config.json +62 -0
  8. checkpoint-200/unet/diffusion_pytorch_model.bin +3 -0
  9. checkpoint-400/optimizer.bin +3 -0
  10. checkpoint-400/random_states_0.pkl +3 -0
  11. checkpoint-400/scheduler.bin +3 -0
  12. checkpoint-400/text_encoder/config.json +25 -0
  13. checkpoint-400/text_encoder/pytorch_model.bin +3 -0
  14. checkpoint-400/unet/config.json +62 -0
  15. checkpoint-400/unet/diffusion_pytorch_model.bin +3 -0
  16. checkpoint-600/optimizer.bin +3 -0
  17. checkpoint-600/random_states_0.pkl +3 -0
  18. checkpoint-600/scheduler.bin +3 -0
  19. checkpoint-600/text_encoder/config.json +25 -0
  20. checkpoint-600/text_encoder/pytorch_model.bin +3 -0
  21. checkpoint-600/unet/config.json +62 -0
  22. checkpoint-600/unet/diffusion_pytorch_model.bin +3 -0
  23. checkpoint-800/optimizer.bin +3 -0
  24. checkpoint-800/random_states_0.pkl +3 -0
  25. checkpoint-800/scheduler.bin +3 -0
  26. checkpoint-800/text_encoder/config.json +25 -0
  27. checkpoint-800/text_encoder/pytorch_model.bin +3 -0
  28. checkpoint-800/unet/config.json +62 -0
  29. checkpoint-800/unet/diffusion_pytorch_model.bin +3 -0
  30. feature_extractor/preprocessor_config.json +28 -0
  31. model_index.json +33 -0
  32. safety_checker/config.json +168 -0
  33. safety_checker/pytorch_model.bin +3 -0
  34. scheduler/scheduler_config.json +14 -0
  35. text_encoder/config.json +25 -0
  36. text_encoder/pytorch_model.bin +3 -0
  37. tokenizer/merges.txt +0 -0
  38. tokenizer/special_tokens_map.json +24 -0
  39. tokenizer/tokenizer_config.json +33 -0
  40. tokenizer/vocab.json +0 -0
  41. unet/config.json +62 -0
  42. unet/diffusion_pytorch_model.bin +3 -0
  43. vae/config.json +31 -0
  44. vae/diffusion_pytorch_model.bin +3 -0
README.md ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ license: creativeml-openrail-m
4
+ base_model: SG161222/Realistic_Vision_V2.0
5
+ instance_prompt: a coloring page of a fashion girl
6
+ tags:
7
+ - stable-diffusion
8
+ - stable-diffusion-diffusers
9
+ - text-to-image
10
+ - diffusers
11
+ - dreambooth
12
+ inference: true
13
+ ---
14
+
15
+ # DreamBooth - koala2/fashion-crops-1-50-prior-Realistic_Vision_V2.0
16
+
17
+ This is a dreambooth model derived from SG161222/Realistic_Vision_V2.0. The weights were trained on a coloring page of a fashion girl using [DreamBooth](https://dreambooth.github.io/).
18
+ You can find some example images in the following.
19
+
20
+
21
+
22
+ DreamBooth for the text encoder was enabled: True.
checkpoint-200/optimizer.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63cc836135aff1bb6272022c0038e0d8af34eaada58c25c7c5f7ea35e28b32f1
3
+ size 1972598113
checkpoint-200/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:950da4893415f6a357e7dc94065294888ba115d38260f41b329e6b9446df822c
3
+ size 14727
checkpoint-200/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2506c8e06991ad2bd36b3413df8ccc97614b8b68c7b02caa3931bb0f43023263
3
+ size 563
checkpoint-200/text_encoder/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "SG161222/Realistic_Vision_V2.0",
3
+ "architectures": [
4
+ "CLIPTextModel"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "dropout": 0.0,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "quick_gelu",
11
+ "hidden_size": 768,
12
+ "initializer_factor": 1.0,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 77,
17
+ "model_type": "clip_text_model",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "pad_token_id": 1,
21
+ "projection_dim": 768,
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.29.2",
24
+ "vocab_size": 49408
25
+ }
checkpoint-200/text_encoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c5332934b252afd64675078d69bc9e5258c3545b2de1694001840fff2302c3d
3
+ size 492309793
checkpoint-200/unet/config.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.17.0.dev0",
4
+ "_name_or_path": "SG161222/Realistic_Vision_V2.0",
5
+ "act_fn": "silu",
6
+ "addition_embed_type": null,
7
+ "addition_embed_type_num_heads": 64,
8
+ "attention_head_dim": 8,
9
+ "block_out_channels": [
10
+ 320,
11
+ 640,
12
+ 1280,
13
+ 1280
14
+ ],
15
+ "center_input_sample": false,
16
+ "class_embed_type": null,
17
+ "class_embeddings_concat": false,
18
+ "conv_in_kernel": 3,
19
+ "conv_out_kernel": 3,
20
+ "cross_attention_dim": 768,
21
+ "cross_attention_norm": null,
22
+ "down_block_types": [
23
+ "CrossAttnDownBlock2D",
24
+ "CrossAttnDownBlock2D",
25
+ "CrossAttnDownBlock2D",
26
+ "DownBlock2D"
27
+ ],
28
+ "downsample_padding": 1,
29
+ "dual_cross_attention": false,
30
+ "encoder_hid_dim": null,
31
+ "encoder_hid_dim_type": null,
32
+ "flip_sin_to_cos": true,
33
+ "freq_shift": 0,
34
+ "in_channels": 4,
35
+ "layers_per_block": 2,
36
+ "mid_block_only_cross_attention": null,
37
+ "mid_block_scale_factor": 1,
38
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
39
+ "norm_eps": 1e-05,
40
+ "norm_num_groups": 32,
41
+ "num_class_embeds": null,
42
+ "only_cross_attention": false,
43
+ "out_channels": 4,
44
+ "projection_class_embeddings_input_dim": null,
45
+ "resnet_out_scale_factor": 1.0,
46
+ "resnet_skip_time_act": false,
47
+ "resnet_time_scale_shift": "default",
48
+ "sample_size": 64,
49
+ "time_cond_proj_dim": null,
50
+ "time_embedding_act_fn": null,
51
+ "time_embedding_dim": null,
52
+ "time_embedding_type": "positional",
53
+ "timestep_post_act": null,
54
+ "up_block_types": [
55
+ "UpBlock2D",
56
+ "CrossAttnUpBlock2D",
57
+ "CrossAttnUpBlock2D",
58
+ "CrossAttnUpBlock2D"
59
+ ],
60
+ "upcast_attention": false,
61
+ "use_linear_projection": false
62
+ }
checkpoint-200/unet/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9dc54b443b20a195184909e3b3642ab7ebb52880a90093c4130c39ec84e5192
3
+ size 3438375973
checkpoint-400/optimizer.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ad9e27108878d99a5c44b57da0f039e5acff2ae95319adb26d01d06978dfac5
3
+ size 1972599009
checkpoint-400/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94ea467f48143877282138ceb6421482f6e8bc390ff3acf25ab638d273d629f0
3
+ size 14727
checkpoint-400/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0b6558f71b033f5157aee061de7d2ff3ea2d2ab35af6b857fee29819d9a0755
3
+ size 563
checkpoint-400/text_encoder/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "SG161222/Realistic_Vision_V2.0",
3
+ "architectures": [
4
+ "CLIPTextModel"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "dropout": 0.0,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "quick_gelu",
11
+ "hidden_size": 768,
12
+ "initializer_factor": 1.0,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 77,
17
+ "model_type": "clip_text_model",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "pad_token_id": 1,
21
+ "projection_dim": 768,
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.29.2",
24
+ "vocab_size": 49408
25
+ }
checkpoint-400/text_encoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d83bbefe65f514e3fe0dc3a711689bb1004aa10b8fb048babe1247f8daea8734
3
+ size 492309793
checkpoint-400/unet/config.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.17.0.dev0",
4
+ "_name_or_path": "SG161222/Realistic_Vision_V2.0",
5
+ "act_fn": "silu",
6
+ "addition_embed_type": null,
7
+ "addition_embed_type_num_heads": 64,
8
+ "attention_head_dim": 8,
9
+ "block_out_channels": [
10
+ 320,
11
+ 640,
12
+ 1280,
13
+ 1280
14
+ ],
15
+ "center_input_sample": false,
16
+ "class_embed_type": null,
17
+ "class_embeddings_concat": false,
18
+ "conv_in_kernel": 3,
19
+ "conv_out_kernel": 3,
20
+ "cross_attention_dim": 768,
21
+ "cross_attention_norm": null,
22
+ "down_block_types": [
23
+ "CrossAttnDownBlock2D",
24
+ "CrossAttnDownBlock2D",
25
+ "CrossAttnDownBlock2D",
26
+ "DownBlock2D"
27
+ ],
28
+ "downsample_padding": 1,
29
+ "dual_cross_attention": false,
30
+ "encoder_hid_dim": null,
31
+ "encoder_hid_dim_type": null,
32
+ "flip_sin_to_cos": true,
33
+ "freq_shift": 0,
34
+ "in_channels": 4,
35
+ "layers_per_block": 2,
36
+ "mid_block_only_cross_attention": null,
37
+ "mid_block_scale_factor": 1,
38
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
39
+ "norm_eps": 1e-05,
40
+ "norm_num_groups": 32,
41
+ "num_class_embeds": null,
42
+ "only_cross_attention": false,
43
+ "out_channels": 4,
44
+ "projection_class_embeddings_input_dim": null,
45
+ "resnet_out_scale_factor": 1.0,
46
+ "resnet_skip_time_act": false,
47
+ "resnet_time_scale_shift": "default",
48
+ "sample_size": 64,
49
+ "time_cond_proj_dim": null,
50
+ "time_embedding_act_fn": null,
51
+ "time_embedding_dim": null,
52
+ "time_embedding_type": "positional",
53
+ "timestep_post_act": null,
54
+ "up_block_types": [
55
+ "UpBlock2D",
56
+ "CrossAttnUpBlock2D",
57
+ "CrossAttnUpBlock2D",
58
+ "CrossAttnUpBlock2D"
59
+ ],
60
+ "upcast_attention": false,
61
+ "use_linear_projection": false
62
+ }
checkpoint-400/unet/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27c8a591d23b79e179a5e1353457547ddab5f2b2269d9eb789333defbc39e505
3
+ size 3438375973
checkpoint-600/optimizer.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1535c4b90e43a0f54e03f60950b2e035d7f9a36f6eb248a0865902fbd769192
3
+ size 1972599009
checkpoint-600/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c221bf12c74faf611135e35149d36129f97eedc81bc5b5ce29040f4378a5cce5
3
+ size 14727
checkpoint-600/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18f48f0c34d4b9ce17d0bb3248c44087fb8338e704ff78930b98ecb0142e57e0
3
+ size 563
checkpoint-600/text_encoder/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "SG161222/Realistic_Vision_V2.0",
3
+ "architectures": [
4
+ "CLIPTextModel"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "dropout": 0.0,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "quick_gelu",
11
+ "hidden_size": 768,
12
+ "initializer_factor": 1.0,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 77,
17
+ "model_type": "clip_text_model",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "pad_token_id": 1,
21
+ "projection_dim": 768,
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.29.2",
24
+ "vocab_size": 49408
25
+ }
checkpoint-600/text_encoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d5c258d5dbd04393c6f3dd9df9a549c61ae46ee61e28ac5d70ade8e0845fe17
3
+ size 492309793
checkpoint-600/unet/config.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.17.0.dev0",
4
+ "_name_or_path": "SG161222/Realistic_Vision_V2.0",
5
+ "act_fn": "silu",
6
+ "addition_embed_type": null,
7
+ "addition_embed_type_num_heads": 64,
8
+ "attention_head_dim": 8,
9
+ "block_out_channels": [
10
+ 320,
11
+ 640,
12
+ 1280,
13
+ 1280
14
+ ],
15
+ "center_input_sample": false,
16
+ "class_embed_type": null,
17
+ "class_embeddings_concat": false,
18
+ "conv_in_kernel": 3,
19
+ "conv_out_kernel": 3,
20
+ "cross_attention_dim": 768,
21
+ "cross_attention_norm": null,
22
+ "down_block_types": [
23
+ "CrossAttnDownBlock2D",
24
+ "CrossAttnDownBlock2D",
25
+ "CrossAttnDownBlock2D",
26
+ "DownBlock2D"
27
+ ],
28
+ "downsample_padding": 1,
29
+ "dual_cross_attention": false,
30
+ "encoder_hid_dim": null,
31
+ "encoder_hid_dim_type": null,
32
+ "flip_sin_to_cos": true,
33
+ "freq_shift": 0,
34
+ "in_channels": 4,
35
+ "layers_per_block": 2,
36
+ "mid_block_only_cross_attention": null,
37
+ "mid_block_scale_factor": 1,
38
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
39
+ "norm_eps": 1e-05,
40
+ "norm_num_groups": 32,
41
+ "num_class_embeds": null,
42
+ "only_cross_attention": false,
43
+ "out_channels": 4,
44
+ "projection_class_embeddings_input_dim": null,
45
+ "resnet_out_scale_factor": 1.0,
46
+ "resnet_skip_time_act": false,
47
+ "resnet_time_scale_shift": "default",
48
+ "sample_size": 64,
49
+ "time_cond_proj_dim": null,
50
+ "time_embedding_act_fn": null,
51
+ "time_embedding_dim": null,
52
+ "time_embedding_type": "positional",
53
+ "timestep_post_act": null,
54
+ "up_block_types": [
55
+ "UpBlock2D",
56
+ "CrossAttnUpBlock2D",
57
+ "CrossAttnUpBlock2D",
58
+ "CrossAttnUpBlock2D"
59
+ ],
60
+ "upcast_attention": false,
61
+ "use_linear_projection": false
62
+ }
checkpoint-600/unet/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7175f562061f4619bb9bab4e10c62ea08a056a01a89035dec1e38ce16da469c
3
+ size 3438375973
checkpoint-800/optimizer.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53203228c3f990a7b39c19c55fc0cc6c41ccb006185db10d093defbe1962e9e0
3
+ size 1972599009
checkpoint-800/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b70b47a5283406b49e11f143b4939cb46d18ec28ec60095c8e5604a3e94a43e8
3
+ size 14727
checkpoint-800/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0488900e6aa3ff591ee9120e08dfd6ad9c8bbe3972ce2714258d7ba907cadcfd
3
+ size 563
checkpoint-800/text_encoder/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "SG161222/Realistic_Vision_V2.0",
3
+ "architectures": [
4
+ "CLIPTextModel"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "dropout": 0.0,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "quick_gelu",
11
+ "hidden_size": 768,
12
+ "initializer_factor": 1.0,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 77,
17
+ "model_type": "clip_text_model",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "pad_token_id": 1,
21
+ "projection_dim": 768,
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.29.2",
24
+ "vocab_size": 49408
25
+ }
checkpoint-800/text_encoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4034e17e9f3223895c20d2a23f7bbd9bc1bcd7863c077e11b61e909aeb029549
3
+ size 492309793
checkpoint-800/unet/config.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.17.0.dev0",
4
+ "_name_or_path": "SG161222/Realistic_Vision_V2.0",
5
+ "act_fn": "silu",
6
+ "addition_embed_type": null,
7
+ "addition_embed_type_num_heads": 64,
8
+ "attention_head_dim": 8,
9
+ "block_out_channels": [
10
+ 320,
11
+ 640,
12
+ 1280,
13
+ 1280
14
+ ],
15
+ "center_input_sample": false,
16
+ "class_embed_type": null,
17
+ "class_embeddings_concat": false,
18
+ "conv_in_kernel": 3,
19
+ "conv_out_kernel": 3,
20
+ "cross_attention_dim": 768,
21
+ "cross_attention_norm": null,
22
+ "down_block_types": [
23
+ "CrossAttnDownBlock2D",
24
+ "CrossAttnDownBlock2D",
25
+ "CrossAttnDownBlock2D",
26
+ "DownBlock2D"
27
+ ],
28
+ "downsample_padding": 1,
29
+ "dual_cross_attention": false,
30
+ "encoder_hid_dim": null,
31
+ "encoder_hid_dim_type": null,
32
+ "flip_sin_to_cos": true,
33
+ "freq_shift": 0,
34
+ "in_channels": 4,
35
+ "layers_per_block": 2,
36
+ "mid_block_only_cross_attention": null,
37
+ "mid_block_scale_factor": 1,
38
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
39
+ "norm_eps": 1e-05,
40
+ "norm_num_groups": 32,
41
+ "num_class_embeds": null,
42
+ "only_cross_attention": false,
43
+ "out_channels": 4,
44
+ "projection_class_embeddings_input_dim": null,
45
+ "resnet_out_scale_factor": 1.0,
46
+ "resnet_skip_time_act": false,
47
+ "resnet_time_scale_shift": "default",
48
+ "sample_size": 64,
49
+ "time_cond_proj_dim": null,
50
+ "time_embedding_act_fn": null,
51
+ "time_embedding_dim": null,
52
+ "time_embedding_type": "positional",
53
+ "timestep_post_act": null,
54
+ "up_block_types": [
55
+ "UpBlock2D",
56
+ "CrossAttnUpBlock2D",
57
+ "CrossAttnUpBlock2D",
58
+ "CrossAttnUpBlock2D"
59
+ ],
60
+ "upcast_attention": false,
61
+ "use_linear_projection": false
62
+ }
checkpoint-800/unet/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f2fc0153d325cf461e14b16429ea788a31a0b62fce4ce1ae68a33cb427f1f3a
3
+ size 3438375973
feature_extractor/preprocessor_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": true,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "feature_extractor_type": "CLIPFeatureExtractor",
12
+ "image_mean": [
13
+ 0.48145466,
14
+ 0.4578275,
15
+ 0.40821073
16
+ ],
17
+ "image_processor_type": "CLIPImageProcessor",
18
+ "image_std": [
19
+ 0.26862954,
20
+ 0.26130258,
21
+ 0.27577711
22
+ ],
23
+ "resample": 3,
24
+ "rescale_factor": 0.00392156862745098,
25
+ "size": {
26
+ "shortest_edge": 224
27
+ }
28
+ }
model_index.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "StableDiffusionPipeline",
3
+ "_diffusers_version": "0.17.0.dev0",
4
+ "feature_extractor": [
5
+ "transformers",
6
+ "CLIPImageProcessor"
7
+ ],
8
+ "requires_safety_checker": true,
9
+ "safety_checker": [
10
+ "stable_diffusion",
11
+ "StableDiffusionSafetyChecker"
12
+ ],
13
+ "scheduler": [
14
+ "diffusers",
15
+ "PNDMScheduler"
16
+ ],
17
+ "text_encoder": [
18
+ "transformers",
19
+ "CLIPTextModel"
20
+ ],
21
+ "tokenizer": [
22
+ "transformers",
23
+ "CLIPTokenizer"
24
+ ],
25
+ "unet": [
26
+ "diffusers",
27
+ "UNet2DConditionModel"
28
+ ],
29
+ "vae": [
30
+ "diffusers",
31
+ "AutoencoderKL"
32
+ ]
33
+ }
safety_checker/config.json ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": "60c57a68e17cc9261e6031cb3babd7ffd6d6dde1",
3
+ "_name_or_path": "/root/.cache/huggingface/hub/models--SG161222--Realistic_Vision_V2.0/snapshots/60c57a68e17cc9261e6031cb3babd7ffd6d6dde1/safety_checker",
4
+ "architectures": [
5
+ "StableDiffusionSafetyChecker"
6
+ ],
7
+ "initializer_factor": 1.0,
8
+ "logit_scale_init_value": 2.6592,
9
+ "model_type": "clip",
10
+ "projection_dim": 768,
11
+ "text_config": {
12
+ "_name_or_path": "",
13
+ "add_cross_attention": false,
14
+ "architectures": null,
15
+ "attention_dropout": 0.0,
16
+ "bad_words_ids": null,
17
+ "begin_suppress_tokens": null,
18
+ "bos_token_id": 0,
19
+ "chunk_size_feed_forward": 0,
20
+ "cross_attention_hidden_size": null,
21
+ "decoder_start_token_id": null,
22
+ "diversity_penalty": 0.0,
23
+ "do_sample": false,
24
+ "dropout": 0.0,
25
+ "early_stopping": false,
26
+ "encoder_no_repeat_ngram_size": 0,
27
+ "eos_token_id": 2,
28
+ "exponential_decay_length_penalty": null,
29
+ "finetuning_task": null,
30
+ "forced_bos_token_id": null,
31
+ "forced_eos_token_id": null,
32
+ "hidden_act": "quick_gelu",
33
+ "hidden_size": 768,
34
+ "id2label": {
35
+ "0": "LABEL_0",
36
+ "1": "LABEL_1"
37
+ },
38
+ "initializer_factor": 1.0,
39
+ "initializer_range": 0.02,
40
+ "intermediate_size": 3072,
41
+ "is_decoder": false,
42
+ "is_encoder_decoder": false,
43
+ "label2id": {
44
+ "LABEL_0": 0,
45
+ "LABEL_1": 1
46
+ },
47
+ "layer_norm_eps": 1e-05,
48
+ "length_penalty": 1.0,
49
+ "max_length": 20,
50
+ "max_position_embeddings": 77,
51
+ "min_length": 0,
52
+ "model_type": "clip_text_model",
53
+ "no_repeat_ngram_size": 0,
54
+ "num_attention_heads": 12,
55
+ "num_beam_groups": 1,
56
+ "num_beams": 1,
57
+ "num_hidden_layers": 12,
58
+ "num_return_sequences": 1,
59
+ "output_attentions": false,
60
+ "output_hidden_states": false,
61
+ "output_scores": false,
62
+ "pad_token_id": 1,
63
+ "prefix": null,
64
+ "problem_type": null,
65
+ "projection_dim": 512,
66
+ "pruned_heads": {},
67
+ "remove_invalid_values": false,
68
+ "repetition_penalty": 1.0,
69
+ "return_dict": true,
70
+ "return_dict_in_generate": false,
71
+ "sep_token_id": null,
72
+ "suppress_tokens": null,
73
+ "task_specific_params": null,
74
+ "temperature": 1.0,
75
+ "tf_legacy_loss": false,
76
+ "tie_encoder_decoder": false,
77
+ "tie_word_embeddings": true,
78
+ "tokenizer_class": null,
79
+ "top_k": 50,
80
+ "top_p": 1.0,
81
+ "torch_dtype": null,
82
+ "torchscript": false,
83
+ "transformers_version": "4.29.2",
84
+ "typical_p": 1.0,
85
+ "use_bfloat16": false,
86
+ "vocab_size": 49408
87
+ },
88
+ "torch_dtype": "float32",
89
+ "transformers_version": null,
90
+ "vision_config": {
91
+ "_name_or_path": "",
92
+ "add_cross_attention": false,
93
+ "architectures": null,
94
+ "attention_dropout": 0.0,
95
+ "bad_words_ids": null,
96
+ "begin_suppress_tokens": null,
97
+ "bos_token_id": null,
98
+ "chunk_size_feed_forward": 0,
99
+ "cross_attention_hidden_size": null,
100
+ "decoder_start_token_id": null,
101
+ "diversity_penalty": 0.0,
102
+ "do_sample": false,
103
+ "dropout": 0.0,
104
+ "early_stopping": false,
105
+ "encoder_no_repeat_ngram_size": 0,
106
+ "eos_token_id": null,
107
+ "exponential_decay_length_penalty": null,
108
+ "finetuning_task": null,
109
+ "forced_bos_token_id": null,
110
+ "forced_eos_token_id": null,
111
+ "hidden_act": "quick_gelu",
112
+ "hidden_size": 1024,
113
+ "id2label": {
114
+ "0": "LABEL_0",
115
+ "1": "LABEL_1"
116
+ },
117
+ "image_size": 224,
118
+ "initializer_factor": 1.0,
119
+ "initializer_range": 0.02,
120
+ "intermediate_size": 4096,
121
+ "is_decoder": false,
122
+ "is_encoder_decoder": false,
123
+ "label2id": {
124
+ "LABEL_0": 0,
125
+ "LABEL_1": 1
126
+ },
127
+ "layer_norm_eps": 1e-05,
128
+ "length_penalty": 1.0,
129
+ "max_length": 20,
130
+ "min_length": 0,
131
+ "model_type": "clip_vision_model",
132
+ "no_repeat_ngram_size": 0,
133
+ "num_attention_heads": 16,
134
+ "num_beam_groups": 1,
135
+ "num_beams": 1,
136
+ "num_channels": 3,
137
+ "num_hidden_layers": 24,
138
+ "num_return_sequences": 1,
139
+ "output_attentions": false,
140
+ "output_hidden_states": false,
141
+ "output_scores": false,
142
+ "pad_token_id": null,
143
+ "patch_size": 14,
144
+ "prefix": null,
145
+ "problem_type": null,
146
+ "projection_dim": 512,
147
+ "pruned_heads": {},
148
+ "remove_invalid_values": false,
149
+ "repetition_penalty": 1.0,
150
+ "return_dict": true,
151
+ "return_dict_in_generate": false,
152
+ "sep_token_id": null,
153
+ "suppress_tokens": null,
154
+ "task_specific_params": null,
155
+ "temperature": 1.0,
156
+ "tf_legacy_loss": false,
157
+ "tie_encoder_decoder": false,
158
+ "tie_word_embeddings": true,
159
+ "tokenizer_class": null,
160
+ "top_k": 50,
161
+ "top_p": 1.0,
162
+ "torch_dtype": null,
163
+ "torchscript": false,
164
+ "transformers_version": "4.29.2",
165
+ "typical_p": 1.0,
166
+ "use_bfloat16": false
167
+ }
168
+ }
safety_checker/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16d28f2b37109f222cdc33620fdd262102ac32112be0352a7f77e9614b35a394
3
+ size 1216064769
scheduler/scheduler_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "PNDMScheduler",
3
+ "_diffusers_version": "0.17.0.dev0",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "num_train_timesteps": 1000,
9
+ "prediction_type": "epsilon",
10
+ "set_alpha_to_one": false,
11
+ "skip_prk_steps": true,
12
+ "steps_offset": 1,
13
+ "trained_betas": null
14
+ }
text_encoder/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "SG161222/Realistic_Vision_V2.0",
3
+ "architectures": [
4
+ "CLIPTextModel"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "dropout": 0.0,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "quick_gelu",
11
+ "hidden_size": 768,
12
+ "initializer_factor": 1.0,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 77,
17
+ "model_type": "clip_text_model",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "pad_token_id": 1,
21
+ "projection_dim": 768,
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.29.2",
24
+ "vocab_size": 49408
25
+ }
text_encoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05a063508de1afccb1ff593412a20d04bea6ec1e1c3bf91efc035cb23c92674f
3
+ size 492309793
tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|endoftext|>",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": {
4
+ "__type": "AddedToken",
5
+ "content": "<|startoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false
10
+ },
11
+ "clean_up_tokenization_spaces": true,
12
+ "do_lower_case": true,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "<|endoftext|>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "errors": "replace",
22
+ "model_max_length": 77,
23
+ "pad_token": "<|endoftext|>",
24
+ "tokenizer_class": "CLIPTokenizer",
25
+ "unk_token": {
26
+ "__type": "AddedToken",
27
+ "content": "<|endoftext|>",
28
+ "lstrip": false,
29
+ "normalized": true,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
unet/config.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.17.0.dev0",
4
+ "_name_or_path": "SG161222/Realistic_Vision_V2.0",
5
+ "act_fn": "silu",
6
+ "addition_embed_type": null,
7
+ "addition_embed_type_num_heads": 64,
8
+ "attention_head_dim": 8,
9
+ "block_out_channels": [
10
+ 320,
11
+ 640,
12
+ 1280,
13
+ 1280
14
+ ],
15
+ "center_input_sample": false,
16
+ "class_embed_type": null,
17
+ "class_embeddings_concat": false,
18
+ "conv_in_kernel": 3,
19
+ "conv_out_kernel": 3,
20
+ "cross_attention_dim": 768,
21
+ "cross_attention_norm": null,
22
+ "down_block_types": [
23
+ "CrossAttnDownBlock2D",
24
+ "CrossAttnDownBlock2D",
25
+ "CrossAttnDownBlock2D",
26
+ "DownBlock2D"
27
+ ],
28
+ "downsample_padding": 1,
29
+ "dual_cross_attention": false,
30
+ "encoder_hid_dim": null,
31
+ "encoder_hid_dim_type": null,
32
+ "flip_sin_to_cos": true,
33
+ "freq_shift": 0,
34
+ "in_channels": 4,
35
+ "layers_per_block": 2,
36
+ "mid_block_only_cross_attention": null,
37
+ "mid_block_scale_factor": 1,
38
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
39
+ "norm_eps": 1e-05,
40
+ "norm_num_groups": 32,
41
+ "num_class_embeds": null,
42
+ "only_cross_attention": false,
43
+ "out_channels": 4,
44
+ "projection_class_embeddings_input_dim": null,
45
+ "resnet_out_scale_factor": 1.0,
46
+ "resnet_skip_time_act": false,
47
+ "resnet_time_scale_shift": "default",
48
+ "sample_size": 64,
49
+ "time_cond_proj_dim": null,
50
+ "time_embedding_act_fn": null,
51
+ "time_embedding_dim": null,
52
+ "time_embedding_type": "positional",
53
+ "timestep_post_act": null,
54
+ "up_block_types": [
55
+ "UpBlock2D",
56
+ "CrossAttnUpBlock2D",
57
+ "CrossAttnUpBlock2D",
58
+ "CrossAttnUpBlock2D"
59
+ ],
60
+ "upcast_attention": false,
61
+ "use_linear_projection": false
62
+ }
unet/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbb4105373441a192fba9dee8dd10546fbe446fc8ccce36bb2a7c4f931d9cbee
3
+ size 3438375973
vae/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.17.0.dev0",
4
+ "_name_or_path": "/root/.cache/huggingface/hub/models--SG161222--Realistic_Vision_V2.0/snapshots/60c57a68e17cc9261e6031cb3babd7ffd6d6dde1/vae",
5
+ "act_fn": "silu",
6
+ "block_out_channels": [
7
+ 128,
8
+ 256,
9
+ 512,
10
+ 512
11
+ ],
12
+ "down_block_types": [
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D",
16
+ "DownEncoderBlock2D"
17
+ ],
18
+ "in_channels": 3,
19
+ "latent_channels": 4,
20
+ "layers_per_block": 2,
21
+ "norm_num_groups": 32,
22
+ "out_channels": 3,
23
+ "sample_size": 512,
24
+ "scaling_factor": 0.18215,
25
+ "up_block_types": [
26
+ "UpDecoderBlock2D",
27
+ "UpDecoderBlock2D",
28
+ "UpDecoderBlock2D",
29
+ "UpDecoderBlock2D"
30
+ ]
31
+ }
vae/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:251086e7c7793410070d11a421db3886a7b2a7ff27cdea006a67a4cd76a7a899
3
+ size 334712113