Skip to content

Commit 5951133

Browse files
committed
update vae faces
1 parent 1974ad0 commit 5951133

File tree

2 files changed

+11
-14
lines changed

2 files changed

+11
-14
lines changed

docs/history.csv

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ GLOW,09/07/2018,https://arxiv.org/abs/1807.03039,0,Normalizing Flow,
3333
Universal Transformer,10/07/2018,https://arxiv.org/abs/1807.03819,1,Autoregressive / Transformer,
3434
BigGAN,28/09/2018,https://arxiv.org/abs/1809.11096,0,Generative Adversarial Network,
3535
FFJORD,02/10/2018,https://arxiv.org/abs/1810.01367,0,Normalizing Flow,
36-
BERT,11/10/2018,https://arxiv.org/abs/1810.04805,1,Autoregressive / Transformer,
36+
BERT,11/10/2018,https://arxiv.org/abs/1810.04805,0,Autoregressive / Transformer,0.345
3737
StyleGAN,12/12/2018,https://arxiv.org/abs/1812.04948,0,Generative Adversarial Network,
3838
Music Transformer,12/12/2018,https://arxiv.org/abs/1809.04281,0,Autoregressive / Transformer,
3939
GPT-2,14/02/2019,https://cdn.openai.com/better-language-models/language_models_are_unsupervised_multitask_learners.pdf,0,Autoregressive / Transformer,1.5
@@ -81,6 +81,11 @@ LLaMA,24/02/2023,https://arxiv.org/abs/2302.13971,0,Autoregressive / Transformer
8181
PaLM-E,06/03/2023,https://arxiv.org/abs/2303.03378,0,Multimodal Models,562
8282
Visual ChatGPT,08/03/2023,https://arxiv.org/abs/2303.04671,0,Multimodal Models,
8383
Alpaca,13/03/2023,https://github.com/tatsu-lab/stanford_alpaca,1,Autoregressive / Transformer,
84-
GPT-4,16/03/2023,https://cdn.openai.com/papers/gpt-4.pdf,0,Multimodal Models,
84+
GPT-4,16/03/2023,https://cdn.openai.com/papers/gpt-4.pdf,0,Autoregressive / Transformer,1000
8585
Luminous,14/04/2022,https://www.aleph-alpha.com/luminous,0,Autoregressive / Transformer,
86-
Flan-T5,20/10/2022,https://arxiv.org/abs/2210.11416,0,Autoregressive / Transformer,11
86+
Flan-T5,20/10/2022,https://arxiv.org/abs/2210.11416,0,Autoregressive / Transformer,11
87+
Falcon,17/03/2023,https://falconllm.tii.ae/,0,Autoregressive / Transformer,40
88+
PaLM 2,10/05/2023,https://ai.google/discover/palm2/,0,Autoregressive / Transformer,340
89+
PanGu-Σ,20/03/2023,https://arxiv.org/abs/2303.10845,1,Autoregressive / Transformer,1085
90+
GPT-3.5,30/11/2022,,0,Autoregressive / Transformer,175
91+
Llama 2,17/07/2023,,0,Autoregressive / Transformer,70

notebooks/03_vae/03_vae_faces/vae_faces.ipynb

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -64,10 +64,10 @@
6464
"metadata": {},
6565
"outputs": [],
6666
"source": [
67-
"IMAGE_SIZE = 64\n",
67+
"IMAGE_SIZE = 32\n",
6868
"CHANNELS = 3\n",
6969
"BATCH_SIZE = 128\n",
70-
"NUM_FEATURES = 64\n",
70+
"NUM_FEATURES = 128\n",
7171
"Z_DIM = 200\n",
7272
"LEARNING_RATE = 0.0005\n",
7373
"EPOCHS = 10\n",
@@ -191,9 +191,6 @@
191191
"x = layers.Conv2D(NUM_FEATURES, kernel_size=3, strides=2, padding=\"same\")(x)\n",
192192
"x = layers.BatchNormalization()(x)\n",
193193
"x = layers.LeakyReLU()(x)\n",
194-
"x = layers.Conv2D(NUM_FEATURES, kernel_size=3, strides=2, padding=\"same\")(x)\n",
195-
"x = layers.BatchNormalization()(x)\n",
196-
"x = layers.LeakyReLU()(x)\n",
197194
"shape_before_flattening = K.int_shape(x)[1:] # the decoder will need this!\n",
198195
"\n",
199196
"x = layers.Flatten()(x)\n",
@@ -238,11 +235,6 @@
238235
")(x)\n",
239236
"x = layers.BatchNormalization()(x)\n",
240237
"x = layers.LeakyReLU()(x)\n",
241-
"x = layers.Conv2DTranspose(\n",
242-
" NUM_FEATURES, kernel_size=3, strides=2, padding=\"same\"\n",
243-
")(x)\n",
244-
"x = layers.BatchNormalization()(x)\n",
245-
"x = layers.LeakyReLU()(x)\n",
246238
"decoder_output = layers.Conv2DTranspose(\n",
247239
" CHANNELS, kernel_size=3, strides=1, activation=\"sigmoid\", padding=\"same\"\n",
248240
")(x)\n",
@@ -665,7 +657,7 @@
665657
"name": "python",
666658
"nbconvert_exporter": "python",
667659
"pygments_lexer": "ipython3",
668-
"version": "3.9.6"
660+
"version": "3.8.10"
669661
},
670662
"vscode": {
671663
"interpreter": {

0 commit comments

Comments
 (0)