forked from 2211275/gnn
1
0
Fork 0
main
romanamo 2024-06-05 17:41:15 +02:00
parent bea4c2a20c
commit 5122ca3936
2 changed files with 47 additions and 6 deletions

View File

@ -4,7 +4,7 @@ import matplotlib.pyplot as plt
def load_data():
df_orig_train = pd.read_csv('mnist.csv')
df_orig_train = pd.read_csv('uebungen/aufgabe3/mnist.csv')
df_digits = df_orig_train.drop('label', axis=1)
return df_digits.to_numpy()
@ -39,13 +39,10 @@ class RBM:
self.hidden_bias = np.zeros(self.hidden_size) * 0.1
def activate(self, v0):
return self.sample(sigmoid(np.matmul(v0.T, self.weights) + self.hidden_bias))
return sigmoid(np.matmul(v0.T, self.weights) + self.hidden_bias)
def reactivate(self, h0):
return self.sample(sigmoid(np.matmul(self.weights, h0.T) + self.visible_bias))
def sample(self, a):
return np.where(a > np.random.uniform(0, 1, a.shape), 1, 0)
return sigmoid(np.matmul(self.weights, h0.T) + self.visible_bias)
def contrastive_divergence(self, v0, h0, v1, h1):
# Gradient

View File

@ -0,0 +1,44 @@
import numpy as np
import matplotlib.pyplot as plt
start = np.array([0, 0])
bias = np.array([-3.37, 0.125])
weights = np.array([
[-4,1.5],
[-1.5,0]
])
def activate(input):
# calculate activation as matrix
# o1 = w11 * o1 + w12 * o2 + b1
# o2 = w21 * o1 + w22 * o2 + b2
return np.matmul(weights, input) + bias
def predict(n):
current_output = start.copy()
points = np.zeros((2, n))
for i in range(n):
# calculate output with tanh(x)
current_output = np.tanh(activate(current_output))
#save datapoint
points[:, i] = current_output.copy()
return points
timespan = 50
timespan_range = range(timespan)
predictions = predict(timespan)
# fetch o1, o2 from datapoint prediction
o1 = predictions[0, :]
o2 = predictions[1, :]
plt.title("Recurrent Neural Network")
plt.plot(timespan_range, o1)
plt.plot(timespan_range, o2)
plt.legend(["o1", "o2"], loc="upper left")
plt.show()