109 lines
3.1 KiB
Python
109 lines
3.1 KiB
Python
import numpy as np
|
|
import random
|
|
|
|
from matplotlib import pyplot as plt
|
|
|
|
|
|
# Sigmoid-Aktivierungsfunktion und deren Ableitung
|
|
def sigmoid(x):
|
|
return 1 / (1 + np.exp(-x))
|
|
|
|
def sigmoid_derivative(x):
|
|
return x * (1 - x)
|
|
|
|
# Trainingsdaten erzeugen und normalisieren
|
|
inputs = []
|
|
targets = []
|
|
|
|
for i in range(10000): # Mehr Trainingsdaten
|
|
x = round(random.uniform(0, 1), 3)
|
|
y = round(random.uniform(0, 1), 3)
|
|
xy = x * y
|
|
inputs.append([x, y])
|
|
targets.append([xy])
|
|
|
|
inputs = np.array(inputs)
|
|
targets = np.array(targets)
|
|
|
|
# Hyperparameter
|
|
learning_rate = 0.00001 # Reduzierte Lernrate
|
|
epochs = 1000
|
|
|
|
# Initialisierung der Gewichte und Biases
|
|
np.random.seed(42)
|
|
input_layer_neurons = 2
|
|
hidden_layer_neurons = 4
|
|
output_neurons = 1
|
|
|
|
weights_input_hidden = np.random.uniform(-0.5, 0.5, (input_layer_neurons, hidden_layer_neurons))
|
|
weights_hidden_output = np.random.uniform(-0.5, 0.5, (hidden_layer_neurons, output_neurons))
|
|
|
|
bias_hidden = np.random.uniform(-0.5, 0.5, (1, hidden_layer_neurons))
|
|
bias_output = np.random.uniform(-0.5, 0.5, (1, output_neurons))
|
|
|
|
# Training des Netzwerks
|
|
for epoch in range(epochs):
|
|
# Vorwärtspropagation
|
|
hidden_layer_input = np.dot(inputs, weights_input_hidden) + bias_hidden
|
|
hidden_layer_output = sigmoid(hidden_layer_input)
|
|
|
|
output_layer_input = np.dot(hidden_layer_output, weights_hidden_output) + bias_output
|
|
predicted_output = sigmoid(output_layer_input)
|
|
|
|
# Fehlerberechnung
|
|
error = targets - predicted_output
|
|
|
|
# Backpropagation
|
|
d_predicted_output = error * sigmoid_derivative(predicted_output)
|
|
|
|
error_hidden_layer = d_predicted_output.dot(weights_hidden_output.T)
|
|
d_hidden_layer = error_hidden_layer * sigmoid_derivative(hidden_layer_output)
|
|
|
|
# Gewichte und Biases aktualisieren
|
|
weights_hidden_output += hidden_layer_output.T.dot(d_predicted_output) * learning_rate
|
|
weights_input_hidden += inputs.T.dot(d_hidden_layer) * learning_rate
|
|
|
|
bias_output += np.sum(d_predicted_output, axis=0, keepdims=True) * learning_rate
|
|
bias_hidden += np.sum(d_hidden_layer, axis=0, keepdims=True) * learning_rate
|
|
|
|
# Optional: Fortschritt anzeigen
|
|
if epoch % 1000 == 0:
|
|
loss = np.mean(np.square(error))
|
|
print(f"Epoch {epoch}, Loss: {loss}")
|
|
|
|
test_inputs = []
|
|
for i in range(10): # Mehr Trainingsdaten
|
|
x = round(random.uniform(0, 1), 3)
|
|
y = round(random.uniform(0, 1), 3)
|
|
test_inputs.append([x, y])
|
|
|
|
|
|
hidden_layer_input = np.dot(test_inputs, weights_input_hidden) + bias_hidden
|
|
hidden_layer_output = sigmoid(hidden_layer_input)
|
|
|
|
output_layer_input = np.dot(hidden_layer_output, weights_hidden_output) + bias_output
|
|
predicted_output = sigmoid(output_layer_input)
|
|
|
|
print("\nTest Results:")
|
|
for i, test_input in enumerate(test_inputs):
|
|
print(f"Input: {test_input}, Predicted Output: {predicted_output[i][0]:.3f}, Actual Output: {test_input[0]*test_input[1]:.3f}")
|
|
|
|
|
|
fig = plt.figure()
|
|
ax = fig.add_subplot(projection='3d')
|
|
|
|
X = []
|
|
Y = []
|
|
Z = []
|
|
|
|
# Grab some test data.
|
|
for test_input in test_inputs:
|
|
X.append(test_input[0])
|
|
Y.append(test_input[1])
|
|
Z.append(0)
|
|
|
|
Z = np.array(Z)
|
|
|
|
ax.plot_wireframe(X, Y, Z, rstride=10, cstride=10)
|
|
|
|
plt.show() |