abgabe ready
parent
94be76a71b
commit
571c5e03f4
|
|
@ -20,58 +20,61 @@ def sigmoid(x):
|
||||||
class RBM:
|
class RBM:
|
||||||
|
|
||||||
def __init__(self, visible_size: int, hidden_size: int, learnrate: float = 0.1, epochs: int = 20):
|
def __init__(self, visible_size: int, hidden_size: int, learnrate: float = 0.1, epochs: int = 20):
|
||||||
"""__init__ Initializes a newly created Restricted Bolzmann Machine.
|
"""__init__ Initialisiere der RBM.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
visible_size (int): amount of neurons inside the visible layer
|
visible_size (int): anzahl neuronen sichtbare schicht
|
||||||
hidden_size (int): amount of neurons inside the hidden layer
|
hidden_size (int): anzahl neuronen sichtbare schicht
|
||||||
learnrate (float, optional): learnrate eta in [0;1]. Defaults to 0.1.
|
learnrate (float, optional): learnrate eta in [0;1]. Default als 0.1.
|
||||||
epochs (int, optional): training epochs. Defaults to 20.
|
epochs (int, optional): training epochs. Defaults als 20.
|
||||||
"""
|
"""
|
||||||
self.learnrate = learnrate
|
self.learnrate = learnrate
|
||||||
self.visible_size = visible_size
|
self.visible_size = visible_size
|
||||||
self.hidden_size = hidden_size
|
self.hidden_size = hidden_size
|
||||||
self.epochs = epochs
|
self.epochs = epochs
|
||||||
|
|
||||||
# initialize/reset learnable attributes
|
# Initialisieren lernbarer Attribute
|
||||||
self.weights = np.random.randn(self.visible_size, self.hidden_size)
|
self.weights = np.random.randn(self.visible_size, self.hidden_size)
|
||||||
self.visible_bias = np.zeros(self.visible_size) * 0.1
|
self.visible_bias = np.zeros(self.visible_size) * 0.1
|
||||||
self.hidden_bias = np.zeros(self.hidden_size) * 0.1
|
self.hidden_bias = np.zeros(self.hidden_size) * 0.1
|
||||||
|
|
||||||
def activate(self, v0):
|
def activate(self, v0):
|
||||||
return sigmoid(np.matmul(v0.T, self.weights) + self.hidden_bias)
|
return self.sample(sigmoid(np.matmul(v0.T, self.weights) + self.hidden_bias))
|
||||||
|
|
||||||
def reactivate(self, h0):
|
def reactivate(self, h0):
|
||||||
return sigmoid(np.matmul(self.weights, h0.T) + self.visible_bias)
|
return self.sample(sigmoid(np.matmul(self.weights, h0.T) + self.visible_bias))
|
||||||
|
|
||||||
|
def sample(self, a):
|
||||||
|
return np.where(a > np.random.uniform(0, 1, a.shape), 1, 0)
|
||||||
|
|
||||||
def contrastive_divergence(self, v0, h0, v1, h1):
|
def contrastive_divergence(self, v0, h0, v1, h1):
|
||||||
# calculate gradients
|
# Gradient
|
||||||
postive_gradient = np.outer(v0, h0)
|
postive_gradient = np.outer(v0, h0)
|
||||||
negative_gradient = np.outer(v1, h0)
|
negative_gradient = np.outer(v1, h1)
|
||||||
|
|
||||||
# Adjust weights by delta
|
# Gewichte per delta anpassen
|
||||||
self.weights += self.learnrate * (postive_gradient - negative_gradient)
|
self.weights += self.learnrate * (postive_gradient - negative_gradient)
|
||||||
|
|
||||||
# Adjust biases by delta
|
# Biases per delta anpassen
|
||||||
self.visible_bias += self.learnrate * (v0 - v1)
|
self.visible_bias += self.learnrate * (v0 - v1)
|
||||||
self.hidden_bias += self.learnrate * (h0 - h1)
|
self.hidden_bias += self.learnrate * (h0 - h1)
|
||||||
|
|
||||||
def train(self, v0):
|
def train(self, v0):
|
||||||
for _ in range(self.epochs):
|
for _ in range(self.epochs):
|
||||||
# activate hidden layer
|
# versteckte schichten aktivieren
|
||||||
h0 = self.activate(v0)
|
h0 = self.activate(v0)
|
||||||
|
|
||||||
# reactivate visible layer
|
# Reaktivieren sichtbarer Schichten
|
||||||
v1 = self.reactivate(h0)
|
v1 = self.reactivate(h0)
|
||||||
|
|
||||||
# activate next hidden layer
|
# Aktivieren nächster versteckter Schicht
|
||||||
h1 = self.activate(v1)
|
h1 = self.activate(v1)
|
||||||
|
|
||||||
# Adjust weights
|
# Gewichte anpassen
|
||||||
self.contrastive_divergence(v0, h0, v1, h1)
|
self.contrastive_divergence(v0, h0, v1, h1)
|
||||||
|
|
||||||
def run(self, v0):
|
def run(self, v0):
|
||||||
# activate hidden layer
|
# Aktivieren der Schichten
|
||||||
h0 = self.activate(v0)
|
h0 = self.activate(v0)
|
||||||
v1 = self.reactivate(h0)
|
v1 = self.reactivate(h0)
|
||||||
|
|
||||||
|
|
@ -81,22 +84,24 @@ class RBM:
|
||||||
rbm = RBM(28 ** 2, 100, 0.2, epochs=2)
|
rbm = RBM(28 ** 2, 100, 0.2, epochs=2)
|
||||||
|
|
||||||
for i in range(100, 600):
|
for i in range(100, 600):
|
||||||
# normalize mnist data and train
|
# Normalisieren der mnist daten und trainieren
|
||||||
number = mnist[i] / 255
|
number = mnist[i] / 255
|
||||||
rbm.train(number)
|
rbm.train(number)
|
||||||
|
|
||||||
# plot results
|
# Ergebnisse plotten
|
||||||
rows, columns = (9, 9)
|
rows, columns = (9, 9)
|
||||||
|
|
||||||
fig = plt.figure(figsize=(10, 7))
|
fig = plt.figure(figsize=(10, 7))
|
||||||
fig.canvas.manager.set_window_title("Reconstruction of MNIST Numbers using a Restricted Boltzmann Machine")
|
fig.canvas.manager.set_window_title(
|
||||||
|
"Rekonstruktion des MNIST Datensatzes mit einer Restricted Boltzmann Machine")
|
||||||
|
|
||||||
for i in range((rows * columns)):
|
for i in range((rows * columns)):
|
||||||
if i % 3 == 0:
|
if i % 3 == 0:
|
||||||
number = mnist[i] / 255
|
number = mnist[i] / 255
|
||||||
(hidden, visible) = rbm.run(number)
|
(hidden, visible) = rbm.run(number)
|
||||||
|
|
||||||
results = [hidden.reshape((10, 10)), visible.reshape((28, 28)), number.reshape((28, 28))]
|
results = [hidden.reshape((10, 10)), visible.reshape(
|
||||||
|
(28, 28)), number.reshape((28, 28))]
|
||||||
|
|
||||||
for j, item in enumerate(results):
|
for j, item in enumerate(results):
|
||||||
fig.add_subplot(rows, columns, i + j + 1)
|
fig.add_subplot(rows, columns, i + j + 1)
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue