.gitattributes CHANGED
@@ -32,3 +32,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ 091122/images/showcase_bendstract.jpg filter=lfs diff=lfs merge=lfs -text
36
+ 091122/images/showcase_pow_midrun.jpg filter=lfs diff=lfs merge=lfs -text
37
+ 091122/images/showcase_bendingreality.jpg filter=lfs diff=lfs merge=lfs -text
091122/ckpts/BendingReality_Style-v1.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5893d0e6598a15aa2d48d46060da7f194515e095a45f252c79e64bacb110bec2
3
+ size 2132870858
091122/ckpts/Bendstract-v1.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc4a64bb612ee0acfdc1bfe5745c1070921245a7cbf7cf5922a389b9a1db5d28
3
+ size 2132866262
091122/ckpts/PoWStyle_Abstralities.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8f23dc37410061035ae24abc70738c62836c2f1e2e106eb96936f702099a57a
3
+ size 2132856622
091122/ckpts/PoWStyle_midrun.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eca0422f4dd852c162e20658e8f3eed6cbe8f544f247e257276e0569f4260b56
3
+ size 2132856622
091122/dataset.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61eef54f22b57908b102a174e5653a45de09837871978df293443cf2683135f3
3
+ size 5038865
091122/diffusers/feature_extractor/preprocessor_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": 224,
3
+ "do_center_crop": true,
4
+ "do_convert_rgb": true,
5
+ "do_normalize": true,
6
+ "do_resize": true,
7
+ "feature_extractor_type": "CLIPFeatureExtractor",
8
+ "image_mean": [
9
+ 0.48145466,
10
+ 0.4578275,
11
+ 0.40821073
12
+ ],
13
+ "image_std": [
14
+ 0.26862954,
15
+ 0.26130258,
16
+ 0.27577711
17
+ ],
18
+ "resample": 3,
19
+ "size": 224
20
+ }
091122/diffusers/model_index.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "StableDiffusionPipeline",
3
+ "_diffusers_version": "0.8.0.dev0",
4
+ "feature_extractor": [
5
+ "transformers",
6
+ "CLIPFeatureExtractor"
7
+ ],
8
+ "scheduler": [
9
+ "diffusers",
10
+ "DDIMScheduler"
11
+ ],
12
+ "text_encoder": [
13
+ "transformers",
14
+ "CLIPTextModel"
15
+ ],
16
+ "tokenizer": [
17
+ "transformers",
18
+ "CLIPTokenizer"
19
+ ],
20
+ "unet": [
21
+ "diffusers",
22
+ "UNet2DConditionModel"
23
+ ],
24
+ "vae": [
25
+ "diffusers",
26
+ "AutoencoderKL"
27
+ ]
28
+ }
091122/diffusers/scheduler/scheduler_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "DDIMScheduler",
3
+ "_diffusers_version": "0.8.0.dev0",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "num_train_timesteps": 1000,
9
+ "set_alpha_to_one": false,
10
+ "steps_offset": 1,
11
+ "trained_betas": null
12
+ }
091122/diffusers/text_encoder/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "openai/clip-vit-large-patch14",
3
+ "architectures": [
4
+ "CLIPTextModel"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "dropout": 0.0,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "quick_gelu",
11
+ "hidden_size": 768,
12
+ "initializer_factor": 1.0,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 77,
17
+ "model_type": "clip_text_model",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "pad_token_id": 1,
21
+ "projection_dim": 768,
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.19.2",
24
+ "vocab_size": 49408
25
+ }
091122/diffusers/text_encoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:770a47a9ffdcfda0b05506a7888ed714d06131d60267e6cf52765d61cf59fd67
3
+ size 492305335
091122/diffusers/tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
091122/diffusers/tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": {"content": "<|startoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": "<|endoftext|>"}
091122/diffusers/tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"errors": "replace", "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|startoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": "<|endoftext|>", "add_prefix_space": false, "do_lower_case": true, "name_or_path": "openai/clip-vit-large-patch14", "model_max_length": 77, "special_tokens_map_file": "./special_tokens_map.json", "tokenizer_class": "CLIPTokenizer"}
091122/diffusers/tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
091122/diffusers/unet/config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.8.0.dev0",
4
+ "act_fn": "silu",
5
+ "attention_head_dim": 8,
6
+ "block_out_channels": [
7
+ 320,
8
+ 640,
9
+ 1280,
10
+ 1280
11
+ ],
12
+ "center_input_sample": false,
13
+ "cross_attention_dim": 768,
14
+ "down_block_types": [
15
+ "CrossAttnDownBlock2D",
16
+ "CrossAttnDownBlock2D",
17
+ "CrossAttnDownBlock2D",
18
+ "DownBlock2D"
19
+ ],
20
+ "downsample_padding": 1,
21
+ "flip_sin_to_cos": true,
22
+ "freq_shift": 0,
23
+ "in_channels": 4,
24
+ "layers_per_block": 2,
25
+ "mid_block_scale_factor": 1,
26
+ "norm_eps": 1e-05,
27
+ "norm_num_groups": 32,
28
+ "out_channels": 4,
29
+ "sample_size": 32,
30
+ "up_block_types": [
31
+ "UpBlock2D",
32
+ "CrossAttnUpBlock2D",
33
+ "CrossAttnUpBlock2D",
34
+ "CrossAttnUpBlock2D"
35
+ ]
36
+ }
091122/diffusers/unet/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7da0e21ba7ea50637bee26e81c220844defdf01aafca02b2c42ecdadb813de4
3
+ size 3438354725
091122/diffusers/vae/config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.8.0.dev0",
4
+ "act_fn": "silu",
5
+ "block_out_channels": [
6
+ 128,
7
+ 256,
8
+ 512,
9
+ 512
10
+ ],
11
+ "down_block_types": [
12
+ "DownEncoderBlock2D",
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D"
16
+ ],
17
+ "in_channels": 3,
18
+ "latent_channels": 4,
19
+ "layers_per_block": 2,
20
+ "norm_num_groups": 32,
21
+ "out_channels": 3,
22
+ "sample_size": 256,
23
+ "up_block_types": [
24
+ "UpDecoderBlock2D",
25
+ "UpDecoderBlock2D",
26
+ "UpDecoderBlock2D",
27
+ "UpDecoderBlock2D"
28
+ ]
29
+ }
091122/diffusers/vae/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b134cded8eb78b184aefb8805b6b572f36fa77b255c483665dda931fa0130c5
3
+ size 334707217
091122/images/AETHER.png ADDED
091122/images/aether2.png ADDED
091122/images/showcase_bendingreality.jpg ADDED

Git LFS Details

  • SHA256: c2325802ab19a9603044352be8bd219a8b1f7cea705def612cdfcf204ef5d252
  • Pointer size: 132 Bytes
  • Size of remote file: 2.49 MB
091122/images/showcase_bendstract.jpg ADDED

Git LFS Details

  • SHA256: c51aff98c4d9fe62b25293db40a6215d3e16276e1dae5f3c850c1882c8ca8d10
  • Pointer size: 132 Bytes
  • Size of remote file: 1.39 MB
091122/images/showcase_pow_midrun.jpg ADDED

Git LFS Details

  • SHA256: 437720a7cfb016b939b4fa685d98c0c3ef2d092759e5102a3d8f127bcc312591
  • Pointer size: 132 Bytes
  • Size of remote file: 8.41 MB
README.md CHANGED
@@ -1,3 +1,88 @@
1
  ---
 
 
2
  license: creativeml-openrail-m
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ language:
3
+ - en
4
  license: creativeml-openrail-m
5
+ thumbnail: "https://huggingface.co/Guizmus/SD_PoW_Collection/resolve/main/showcase.jpg"
6
+ tags:
7
+ - stable-diffusion
8
+ - text-to-image
9
+ - image-to-image
10
+ library_name: "https://github.com/ShivamShrirao/diffusers"
11
+
12
  ---
13
+
14
+ # Intro
15
+
16
+ This is a collection of models related to the "Picture of the Week" contest on Stable Diffusion discord.
17
+
18
+ I try to make a model out of all the submission for people to continue enjoy the theme after the even, and see a little of their designs in other people's creations. The token stays "PoW Style" and I balance the learning on the low side, so that it doesn't just replicate creations.
19
+
20
+ I also make smaller quality models to help make pictures for the contest itself, based on the theme.
21
+
22
+ ## 09 novembre 2022, "Abstralities"
23
+
24
+ ### Theme : Abstract Realities
25
+
26
+ Glitch, warp, static, shape, flicker, break, bend, mend
27
+
28
+ Have you ever felt your reality shift out from under your feet? Our perception falters and repairs itself in the blink of an eye. Just how much do our brains influence what we perceive? How much control do we have over molding these realities?
29
+
30
+ With the introduction of AI and its rapid pace taking the world by storm, we are seeing single-handedly just how these realities can bring worlds into fruition.
31
+
32
+ * Can you show us your altered reality?
33
+ * Are these realities truly broken, or only bent?
34
+
35
+ Our example prompt for this event was created by @Aether !
36
+
37
+ "household objects floating in space, bedroom, furniture, home living, warped reality, cosmic horror, nightmare, retrofuturism, surrealism, abstract, illustrations by alan nasmith"
38
+
39
+
40
+ ![PoW](https://huggingface.co/Guizmus/SD_PoW_Collection/resolve/main/091122/images/AETHER.png)
41
+
42
+ ![PoW](https://huggingface.co/Guizmus/SD_PoW_Collection/resolve/main/091122/images/aether2.png)
43
+
44
+ ### Models
45
+
46
+ ![PoW Style](https://huggingface.co/Guizmus/SD_PoW_Collection/resolve/main/091122/images/showcase_pow_final.jpg)
47
+ * Main model based on all the results from the PoW
48
+ * training: 51 pictures, 3000 steps on 1e-6 polynomial LR.
49
+ * balanced on the light side, add attention/weight on the activation token
50
+ * **Activation token :** `PoW Style`
51
+ * [CKPT link](https://huggingface.co/Guizmus/SD_PoW_Collection/resolve/main/091122/ckpts/PoWStyle_Abstralities.ckpt)
52
+ * [Diffusers : Guizmus/SD_PoW_Collection/091122/diffusers](https://huggingface.co/Guizmus/SD_PoW_Collection/091122/diffusers/)
53
+ * [Dataset](https://huggingface.co/Guizmus/SD_PoW_Collection/091122/dataset.zip)
54
+
55
+ ---
56
+
57
+ ![Bendstract Style](https://huggingface.co/Guizmus/SD_PoW_Collection/resolve/main/091122/images/showcase_bendstract.jpg)
58
+ * based on the suggested prompt
59
+ * training: 100 pictures, 7500 steps on 1e-6 polynomial LR. overtrained
60
+ * **Activation token :** `Bendstract Style`
61
+ * [CKPT link](https://huggingface.co/Guizmus/SD_PoW_Collection/resolve/main/091122/ckpts/Bendstract-v1.ckpt)
62
+
63
+ ---
64
+
65
+ ![BendingReality Style](https://huggingface.co/Guizmus/SD_PoW_Collection/resolve/main/091122/images/showcase_bendingreality.jpg)
66
+ * based on the suggested prompt
67
+ * training: 68 pictures, 6000 steps on 1e-6 polynomial LR. overtrained
68
+ * **Activation token :** `BendingReality Style`
69
+ * [CKPT link](https://huggingface.co/Guizmus/SD_PoW_Collection/resolve/main/091122/ckpts/BendingReality_Style-v1.ckpt)
70
+
71
+ ---
72
+
73
+ ![PoW Style](https://huggingface.co/Guizmus/SD_PoW_Collection/resolve/main/091122/images/showcase_pow_midrun.jpg)
74
+ * based on the first few submissions
75
+ * training: 24 pictures, 2400 steps on 1e-6 polynomial LR. a little too trained
76
+ * **Activation token :** `PoW Style`
77
+ * [CKPT link](https://huggingface.co/Guizmus/SD_PoW_Collection/resolve/main/091122/ckpts/PoWStyle_midrun.ckpt)
78
+
79
+
80
+ ## License
81
+
82
+ These models are open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage.
83
+ The CreativeML OpenRAIL License specifies:
84
+
85
+ 1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content
86
+ 2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license
87
+ 3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully)
88
+ [Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license)
showcase.jpg ADDED