new repo structure
parent
176ed663c1
commit
e6e35776b8
|
@ -172,5 +172,5 @@ cython_debug/
|
|||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
.idea/
|
||||
|
||||
|
|
14
README.md
14
README.md
|
@ -1,3 +1,15 @@
|
|||
# gnn
|
||||
|
||||
Code-Snippets für die Vorlesung "Grundlagen Neuronale Netze" im SS24.
|
||||
Code-Snippets für die Vorlesung "Grundlagen Neuronale Netze" im SS24.
|
||||
|
||||
## Aufgaben
|
||||
|
||||
| Nr | Aufgabenstellung | Kommentar |
|
||||
|----|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------|
|
||||
| 1 | Verändern Sie die Lernregel so, dass es die Perzeptron Lernregel ist. | |
|
||||
| 2 | Verändern Sie den Backpropagation Algorithmus so, dass er den Gradientenabstieg vollständig mit iRProp− macht. | |
|
||||
| 3 | Implementieren Sie eine Restricted Boltzmann Machine als einfachste Form eines Autoencoders. Am Eingang sollen die MNIST Ziffern angelegt werden. Durch Aktivierung der Hidden Schicht und Rückaktivierung sollten die MNIST Ziffern wieder rekonstruiert werden können. Nun reduzieren Sie die Anzahl der Hidden Neuronen auf 100. Können die Ziffern trotzdem so rekonstruiert werden, dass man sie noch erkennen kann? | |
|
||||
| 4 | | |
|
||||
| 5 | | |
|
||||
| 6 | | |
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -4,7 +4,7 @@ import matplotlib.pyplot as plt
|
|||
|
||||
|
||||
def load_data():
|
||||
df_orig_train = pd.read_csv('mnist_test_final.csv')
|
||||
df_orig_train = pd.read_csv('mnist.csv')
|
||||
df_digits = df_orig_train.drop('label', axis=1)
|
||||
|
||||
return df_digits.to_numpy()
|
||||
|
@ -19,8 +19,8 @@ def sigmoid(x):
|
|||
|
||||
class RBM:
|
||||
|
||||
def __init__(self, visible_size: int, hidden_size: int, learnrate: float = 0.1, epochs=20):
|
||||
"""__init__ Initializes a newly created Ristricted Bolzmann Machine.
|
||||
def __init__(self, visible_size: int, hidden_size: int, learnrate: float = 0.1, epochs: int = 20):
|
||||
"""__init__ Initializes a newly created Restricted Bolzmann Machine.
|
||||
|
||||
Args:
|
||||
visible_size (int): amount of neurons inside the visible layer
|
||||
|
@ -33,9 +33,6 @@ class RBM:
|
|||
self.hidden_size = hidden_size
|
||||
self.epochs = epochs
|
||||
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
# initialize/reset learnable attributes
|
||||
self.weights = np.random.randn(self.visible_size, self.hidden_size)
|
||||
self.visible_bias = np.zeros(self.visible_size) * 0.1
|
||||
|
@ -64,10 +61,10 @@ class RBM:
|
|||
# activate hidden layer
|
||||
h0 = self.activate(v0)
|
||||
|
||||
# reactivate visible layer
|
||||
# reactivate visible layer
|
||||
v1 = self.reactivate(h0)
|
||||
|
||||
#activate next hidden layer
|
||||
|
||||
# activate next hidden layer
|
||||
h1 = self.activate(v1)
|
||||
|
||||
# Adjust weights
|
||||
|
@ -80,34 +77,30 @@ class RBM:
|
|||
|
||||
return h0, v1
|
||||
|
||||
rbm = RBM(28**2, 256, 0.1, epochs=3)
|
||||
|
||||
def validate(idx):
|
||||
#flatten and normalize mnist data
|
||||
test = mnist[idx].flatten()/255
|
||||
rbm = RBM(28 ** 2, 100, 0.2, epochs=1)
|
||||
|
||||
# train bolzmann machine and run
|
||||
rbm.train(test)
|
||||
(hid, out) = rbm.run(test)
|
||||
|
||||
return (hid.reshape((16, 16)), out.reshape((28, 28)))
|
||||
for i in range(100):
|
||||
# normalize mnist data and train
|
||||
number = mnist[i] / 255
|
||||
rbm.train(number)
|
||||
|
||||
# plot results
|
||||
rows, columns = (4, 6)
|
||||
rows, columns = (9, 9)
|
||||
|
||||
fig = plt.figure(figsize=(10, 7))
|
||||
fig.canvas.manager.set_window_title("Reconstruction of MNIST Numbers using a Restricted Boltzmann Machine")
|
||||
|
||||
for i in range((rows * columns)):
|
||||
if i % 2 == 0:
|
||||
(hid, out) = validate(i)
|
||||
if i % 3 == 0:
|
||||
number = mnist[i] / 255
|
||||
(hidden, visible) = rbm.run(number)
|
||||
|
||||
# hidden layer
|
||||
fig.add_subplot(rows, columns, i+1)
|
||||
plt.imshow(hid, cmap='gray')
|
||||
plt.axis('off')
|
||||
results = [hidden.reshape((10, 10)), visible.reshape((28, 28)), number.reshape((28, 28))]
|
||||
|
||||
# visible layer
|
||||
fig.add_subplot(rows, columns, i+2)
|
||||
plt.imshow(out, cmap='gray')
|
||||
plt.axis('off')
|
||||
for j, item in enumerate(results):
|
||||
fig.add_subplot(rows, columns, i + j + 1)
|
||||
plt.imshow(item, cmap='gray')
|
||||
plt.axis('off')
|
||||
|
||||
plt.show()
|
Loading…
Reference in New Issue