Veldrovive commited on
Commit
7146282
1 Parent(s): 6ca8f12

Upload working/decoder_config.json with huggingface_hub

Browse files
Files changed (1) hide show
  1. working/decoder_config.json +136 -0
working/decoder_config.json ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "seed": 1,
3
+
4
+ "decoder": {
5
+ "unets": [
6
+ {
7
+ "dim": 256,
8
+ "cond_dim": 512,
9
+ "image_embed_dim": 768,
10
+ "text_embed_dim": 768,
11
+ "cond_on_text_encodings": false,
12
+ "channels": 3,
13
+ "dim_mults": [1, 2, 3, 4],
14
+ "num_resnet_blocks": 4,
15
+ "attn_heads": 8,
16
+ "attn_dim_head": 64,
17
+ "sparse_attn": true,
18
+ "memory_efficient": true,
19
+ "self_attn": [false, true, true, true]
20
+ },
21
+ {
22
+ "dim": 256,
23
+ "cond_dim": 512,
24
+ "image_embed_dim": 768,
25
+ "text_embed_dim": 768,
26
+ "cond_on_text_encodings": true,
27
+ "init_cross_embed": false,
28
+ "channels": 3,
29
+ "dim_mults": [1, 2, 3, 4],
30
+ "num_resnet_blocks": 4,
31
+ "attn_heads": 8,
32
+ "attn_dim_head": 64,
33
+ "sparse_attn": false,
34
+ "memory_efficient": true,
35
+ "self_attn": [false, false, false, false]
36
+ }
37
+
38
+ ],
39
+ "clip": {
40
+ "make": "openai",
41
+ "model": "ViT-L/14"
42
+ },
43
+ "image_sizes": [64, 256],
44
+ "channels": 3,
45
+ "timesteps": 1000,
46
+ "loss_type": "l2",
47
+ "beta_schedule": ["cosine", "cosine"],
48
+ "learned_variance": false,
49
+ "text_cond_drop_prob": 0.0,
50
+ "image_cond_drop_prob": 0.0
51
+ },
52
+ "data": {
53
+ "webdataset_base_url": "pipe:aws s3 cp --quiet s3://s-datasets/laion-high-resolution/{}.tar -",
54
+ "num_workers": 6,
55
+ "batch_size": 5,
56
+ "start_shard": 0,
57
+ "end_shard": 17535,
58
+ "shard_width": 5,
59
+ "index_width": 4,
60
+ "cond_scale": [3.5, 1.0],
61
+ "splits": {
62
+ "train": 0.75,
63
+ "val": 0.15,
64
+ "test": 0.1
65
+ },
66
+ "shuffle_train": false,
67
+ "resample_train": true,
68
+ "preprocessing": {
69
+ "RandomResizedCrop": {
70
+ "size": [256, 256],
71
+ "scale": [0.75, 1.0],
72
+ "ratio": [1.0, 1.0]
73
+ },
74
+ "ToTensor": true
75
+ }
76
+ },
77
+ "train": {
78
+ "epochs": 1000,
79
+ "lr": 1.2e-4,
80
+ "wd": 0.0,
81
+ "max_grad_norm": 0.5,
82
+ "save_every_n_samples": 400000,
83
+ "n_sample_images": 10,
84
+ "cond_scale": [3.5, 1.0],
85
+ "device": "cuda:0",
86
+ "epoch_samples": 1600000,
87
+ "validation_samples": 60000,
88
+ "use_ema": true,
89
+ "ema_beta": 0.9999,
90
+ "save_all": false,
91
+ "save_latest": true,
92
+ "save_best": true,
93
+ "unet_training_mask": [false, true]
94
+ },
95
+ "evaluate": {
96
+ "n_evaluation_samples": 10,
97
+ "FID": {
98
+ "feature": 64
99
+ },
100
+ "LPIPS": {
101
+ "net_type": "vgg",
102
+ "reduction": "mean"
103
+ }
104
+ },
105
+ "tracker": {
106
+ "data_path": ".tracker-upsampling",
107
+ "overwrite_data_path": true,
108
+
109
+ "log": {
110
+ "log_type": "wandb",
111
+
112
+ "wandb_entity": "veldrovive",
113
+ "wandb_project": "upsampler",
114
+ "auto_resume": false,
115
+
116
+ "verbose": true
117
+ },
118
+
119
+ "load": {
120
+ "load_from": null,
121
+ "only_auto_resume": true,
122
+ "file_path": "/fsx/aidan/new/dalle2/dev-dalle2/models/latest.pth"
123
+ },
124
+
125
+ "save": [{
126
+ "save_to": "wandb"
127
+ },{
128
+ "save_to": "local",
129
+ "save_latest_to": "/fsx/aidan/new/dalle2/dev-dalle2/models/latest.pth",
130
+ "save_best_to": "/fsx/aidan/new/dalle2/dev-dalle2/models/best.pth",
131
+ "save_meta_to": "/fsx/aidan/new/dalle2/dev-dalle2/models",
132
+
133
+ "save_type": "model"
134
+ }]
135
+ }
136
+ }