forked from 2211275/gnn
added uebung3 base
parent
a4229dc2ac
commit
49da13274c
|
|
@ -0,0 +1,42 @@
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
visible = np.ones((10,1))
|
||||||
|
hidden = np.ones((5,1))
|
||||||
|
|
||||||
|
visible_bias = np.ones((len(visible), 1)) * 0.1
|
||||||
|
hidden_bias = np.ones((len(hidden),1)) * 0.1
|
||||||
|
|
||||||
|
weights = np.random.rand(len(visible), len(hidden))
|
||||||
|
phases = 1
|
||||||
|
|
||||||
|
learnrate = 0.2
|
||||||
|
|
||||||
|
def sigmoid(x):
|
||||||
|
return 1 / (1 + np.exp(-x)) # Sigmoidfunktion
|
||||||
|
|
||||||
|
|
||||||
|
for i in range(1):
|
||||||
|
activation = sigmoid(np.matmul(visible.T, weights) + hidden_bias)
|
||||||
|
|
||||||
|
# 2. Computer outer product vh
|
||||||
|
positive_gradient = np.matmul(visible, hidden.T)
|
||||||
|
|
||||||
|
t = sigmoid(np.matmul(weights, hidden) + visible_bias)
|
||||||
|
reconstructed = sigmoid(np.matmul(weights, hidden) + visible_bias)
|
||||||
|
|
||||||
|
# 4. Computer outer product v'h'
|
||||||
|
negative_gradient = np.matmul(reconstructed.T, activation)
|
||||||
|
# 5. Update weight matrix using gradients
|
||||||
|
|
||||||
|
delta_weights = learnrate * (positive_gradient - negative_gradient)
|
||||||
|
|
||||||
|
# 6. Update bias for visible and hidden layer
|
||||||
|
|
||||||
|
delta_visible_bias = learnrate * (visible - reconstructed)
|
||||||
|
delta_hidden_bias = learnrate * (hidden - activation)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Loading…
Reference in New Issue