96 lines
3.0 KiB
Python
96 lines
3.0 KiB
Python
import numpy as np
|
|
import matplotlib.pyplot as plt
|
|
import tensorflow as tf
|
|
from tensorflow.keras.layers import Input, Dense, Lambda
|
|
from tensorflow.keras.models import Model
|
|
from tensorflow.keras import backend as K
|
|
from tensorflow.keras.datasets import mnist
|
|
|
|
# Daten laden und vorverarbeiten
|
|
(x_train, y_train), (x_test, y_test) = mnist.load_data()
|
|
x_train = x_train.astype('float32') / 255.
|
|
x_test = x_test.astype('float32') / 255.
|
|
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
|
|
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
|
|
|
|
# VAE-Parameter
|
|
input_dim = 784
|
|
intermediate_dim = 256
|
|
latent_dim = 2
|
|
batch_size = 100
|
|
epochs = 50
|
|
|
|
# Encoder
|
|
inputs = Input(shape=(input_dim,))
|
|
h = Dense(intermediate_dim, activation='relu')(inputs)
|
|
z_mean = Dense(latent_dim)(h)
|
|
z_log_sigma = Dense(latent_dim)(h)
|
|
|
|
def sampling(args):
|
|
z_mean, z_log_sigma = args
|
|
epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
|
|
mean=0., stddev=0.1)
|
|
return z_mean + K.exp(z_log_sigma) * epsilon
|
|
|
|
z = Lambda(sampling)([z_mean, z_log_sigma])
|
|
|
|
# Decoder
|
|
decoder_h = Dense(intermediate_dim, activation='relu')
|
|
decoder_mean = Dense(input_dim, activation='sigmoid')
|
|
h_decoded = decoder_h(z)
|
|
x_decoded_mean = decoder_mean(h_decoded)
|
|
|
|
# VAE-Modell
|
|
vae = Model(inputs, x_decoded_mean)
|
|
|
|
# Verlustfunktion und Modellkompilierung
|
|
xent_loss = input_dim * tf.keras.losses.binary_crossentropy(inputs, x_decoded_mean)
|
|
kl_loss = - 0.5 * K.sum(1 + z_log_sigma - K.square(z_mean) - K.exp(z_log_sigma), axis=-1)
|
|
vae_loss = K.mean(xent_loss + kl_loss)
|
|
|
|
vae.add_loss(vae_loss)
|
|
vae.compile(optimizer='rmsprop')
|
|
|
|
# VAE-Training
|
|
vae.fit(x_train, x_train, shuffle=True, epochs=epochs, batch_size=batch_size, validation_data=(x_test, x_test))
|
|
|
|
# Latenten Raum und rekonstruierte Bilder visualisieren
|
|
encoder = Model(inputs, z_mean)
|
|
|
|
x_test_encoded = encoder.predict(x_test, batch_size=batch_size)
|
|
plt.figure(figsize=(6, 6))
|
|
plt.scatter(x_test_encoded[:, 0], x_test_encoded[:, 1], c=y_test)
|
|
plt.colorbar()
|
|
plt.title("Latenter Raum des VAE")
|
|
plt.xlabel("Dimension 1")
|
|
plt.ylabel("Dimension 2")
|
|
plt.savefig("VAE_latent_space.svg")
|
|
plt.show()
|
|
|
|
decoder_input = Input(shape=(latent_dim,))
|
|
_h_decoded = decoder_h(decoder_input)
|
|
_x_decoded_mean = decoder_mean(_h_decoded)
|
|
generator = Model(decoder_input, _x_decoded_mean)
|
|
|
|
n = 15
|
|
digit_size = 28
|
|
figure = np.zeros((digit_size * n, digit_size * n))
|
|
|
|
grid_x = np.linspace(-4, 4, n)
|
|
grid_y = np.linspace(-4, 4, n)
|
|
|
|
for i, yi in enumerate(grid_x):
|
|
for j, xi in enumerate(grid_y):
|
|
z_sample = np.array([[xi, yi]])
|
|
x_decoded = generator.predict(z_sample)
|
|
digit = x_decoded[0].reshape(digit_size, digit_size)
|
|
figure[i * digit_size: (i + 1) * digit_size,
|
|
j * digit_size: (j + 1) * digit_size] = digit
|
|
|
|
plt.figure(figsize=(10, 10))
|
|
plt.imshow(figure)
|
|
plt.title("Rekonstruktion des VAE über den latenten Raum")
|
|
plt.xlabel("Dimension 1 des latenten Raums")
|
|
plt.ylabel("Dimension 2 des latenten Raums")
|
|
plt.show()
|