added base repo structure
parent
5584be5dee
commit
e5b825b10d
|
@ -0,0 +1,55 @@
|
|||
import matplotlib.pyplot as plt
|
||||
from mpl_toolkits.mplot3d import Axes3D
|
||||
import numpy as np
|
||||
# BIAS,x,y
|
||||
train = np.array( [[1,0,0],
|
||||
[1,1,0],
|
||||
[1,0,1],
|
||||
[1,1,1]])
|
||||
target = np.array([0,0,0,1]) # AND Operation
|
||||
out = np.array([0,0,0,0])
|
||||
weight = np.random.rand(3)*(0.5)
|
||||
learnrate = 1.0
|
||||
grad = np.zeros(3)
|
||||
|
||||
def sigmoid(summe): # Transferfunktion
|
||||
return 1.0/(1.0+np.exp(-1.0*summe))
|
||||
|
||||
def learn():
|
||||
global train, weight, out, target, learnrate
|
||||
# Neuronenausgabe für alle 4 Trainingsmuster berechnen
|
||||
out = sigmoid(np.matmul(train, weight))
|
||||
# Gradienten berechnen
|
||||
grad = np.matmul(train.T,(out-target)) * (out.T.dot(np.subtract(np.ones(4),out)))
|
||||
weight -= learnrate*grad # Gewichte anpassen
|
||||
|
||||
def outp(N=100): # Daten für die Ausgabefunktion generieren
|
||||
global weight
|
||||
x = np.linspace(0, 1, N)
|
||||
y = np.linspace(0, 1, N)
|
||||
xx, yy = np.meshgrid(x, y)
|
||||
oo = sigmoid(weight[0] + weight[1]*xx + weight[2]*yy)
|
||||
return xx, yy, oo
|
||||
|
||||
def on_close(event): # Fenster schließen
|
||||
exit(0)
|
||||
|
||||
plt.ion()
|
||||
fig = plt.figure()
|
||||
fig.canvas.mpl_connect('close_event', on_close)
|
||||
while True: # Endlosschleife
|
||||
#for i in range(1000):
|
||||
learn() # lerne einen Schritt
|
||||
plt.clf() # Bildschirm löschen
|
||||
X, Y, Z = outp() # generiere Plotdaten
|
||||
ax = fig.add_subplot(111, projection='3d')
|
||||
# 3D plot von den Daten
|
||||
ax.plot_surface(X, Y, Z, edgecolor='royalblue',
|
||||
lw=0.5, rstride=8, cstride=8, alpha=0.3)
|
||||
ax.set_title('Neuron lernt AND-Funktion')
|
||||
ax.set_xlabel('In[1]')
|
||||
ax.set_ylabel('In[2]')
|
||||
ax.set_zlabel('Ausgabe\ndes Neurons')
|
||||
ax.set_zlim(0, 1)
|
||||
plt.draw()
|
||||
plt.pause(0.00001)
|
|
@ -0,0 +1,51 @@
|
|||
import numpy as np
|
||||
|
||||
# Sigmoide Aktivierungsfunktion und ihre Ableitung
|
||||
def sigmoid(x):
|
||||
return 1 / (1 + np.exp(-x)) # Sigmoidfunktion
|
||||
|
||||
def deriv_sigmoid(x):
|
||||
return x * (1 - x) # Ableitung der Sigmoiden
|
||||
|
||||
# Das XOR-Problem, input [bias, x, y] und Target-Daten
|
||||
inp = np.array([[1,0,0], [1,0,1], [1,1,0], [1,1,1]])
|
||||
target = np.array([[0], [1], [1], [0]])
|
||||
|
||||
# Die Architektur des neuronalen Netzes
|
||||
inp_size = 3 # Eingabeneuronen
|
||||
hid_size = 4 # Hidden-Neuronen
|
||||
out_size = 1 # Ausgabeneuron
|
||||
|
||||
# Gewichte zufällig initialisieren (Mittelwert = 0)
|
||||
w0 = np.random.random((inp_size, hid_size)) - 0.5
|
||||
w1 = np.random.random((hid_size, out_size)) - 0.5
|
||||
|
||||
# Netzwerk trainieren
|
||||
for i in range(100000):
|
||||
|
||||
# Vorwärtsaktivierung
|
||||
L0 = inp
|
||||
L1 = sigmoid(np.matmul(L0, w0))
|
||||
L1[0] = 1 # Bias-Neuron in der Hiddenschicht
|
||||
L2 = sigmoid(np.matmul(L1, w1))
|
||||
|
||||
# Fehler berechnen
|
||||
L2_error = target - L2
|
||||
|
||||
# Backpropagation
|
||||
L2_delta = L2_error * deriv_sigmoid(L2)
|
||||
L1_error = np.matmul(L2_delta, w1.T)
|
||||
L1_delta = L1_error * deriv_sigmoid(L1)
|
||||
|
||||
# Gewichte aktualisieren
|
||||
learnrate = 0.1
|
||||
w1 += learnrate * np.matmul(L1.T, L2_delta)
|
||||
w0 += learnrate * np.matmul(L0.T, L1_delta)
|
||||
|
||||
# Netzwerk testen
|
||||
L0 = inp
|
||||
L1 = sigmoid(np.matmul(inp, w0))
|
||||
L1[0] = 1 # Bias-Neuron in der Hiddenschicht
|
||||
L2 = sigmoid(np.matmul(L1, w1))
|
||||
|
||||
print(L2)
|
|
@ -0,0 +1,67 @@
|
|||
import matplotlib.pyplot as plt
|
||||
from mpl_toolkits.mplot3d import Axes3D
|
||||
import numpy as np
|
||||
# BIAS,x,y
|
||||
train = np.array( [[1,0,0],
|
||||
[1,1,0],
|
||||
[1,0,1],
|
||||
[1,1,1]])
|
||||
target = np.array([0,0,0,1]) # AND Operation
|
||||
out = np.array([0,0,0,0])
|
||||
weight = np.random.rand(3)*(0.5)
|
||||
delta = np.linspace(0.125,0.125,3)
|
||||
grad_old = np.zeros(3)
|
||||
grad_new = np.zeros(3)
|
||||
eta_plus = 1.2 # Faktor zur Vergrößerung der Lernrate
|
||||
eta_minus = 0.5 # Faktor zur Verkleinerung der Lernrate
|
||||
delta_max = 50 # Maximale Gewichtsänderung
|
||||
delta_min = 0 # Minimale Gewichtsänderung
|
||||
|
||||
def sigmoid(summe): # Transferfunktion
|
||||
return 1.0/(1.0+np.exp(-1.0*summe))
|
||||
|
||||
def learn():
|
||||
global train, weight, out, target, grad_old, grad_new, delta
|
||||
# Neuronenausgabe berechnen
|
||||
out = sigmoid(np.matmul(train, weight))
|
||||
# Gradienten berechnen
|
||||
grad_old = np.copy(grad_new)
|
||||
grad_new = np.matmul(train.T,(out-target))
|
||||
########### iRprop- #############
|
||||
for i in range(0,3):
|
||||
if grad_old[i]*grad_new[i]>0: # Lernrate vergrößern
|
||||
delta[i] = min(delta[i]*eta_plus, delta_max)
|
||||
if grad_old[i]*grad_new[i]<0: # Lernrate verkleinern
|
||||
delta[i] = max(delta[i]*eta_minus, delta_min)
|
||||
grad_new[i] = 0 # Einziger Unterschied zu Rprop
|
||||
weight -= delta*np.sign(grad_new) # Gewichte anpassen
|
||||
|
||||
def outp(N=100): # Daten für die Ausgabefunktion generieren
|
||||
global weight
|
||||
x = np.linspace(0, 1, N)
|
||||
y = np.linspace(0, 1, N)
|
||||
xx, yy = np.meshgrid(x, y)
|
||||
oo = sigmoid(weight[0] + weight[1]*xx + weight[2]*yy)
|
||||
return xx, yy, oo
|
||||
|
||||
def on_close(event): # Fenster schließen
|
||||
exit(0)
|
||||
|
||||
plt.ion()
|
||||
fig = plt.figure()
|
||||
fig.canvas.mpl_connect('close_event', on_close)
|
||||
while True: # Endlosschleife
|
||||
learn() # lerne einen Schritt iRprop-
|
||||
plt.clf() # Bildschirm löschen
|
||||
X, Y, Z = outp() # generiere Plotdaten
|
||||
ax = fig.add_subplot(111, projection='3d')
|
||||
# 3D plot von den Daten
|
||||
ax.plot_surface(X, Y, Z, edgecolor='royalblue',
|
||||
lw=0.5, rstride=8, cstride=8, alpha=0.3)
|
||||
ax.set_title('Neuron lernt AND-Funktion mit iRProp-')
|
||||
ax.set_xlabel('In[1]')
|
||||
ax.set_ylabel('In[2]')
|
||||
ax.set_zlabel('Ausgabe\ndes Neurons')
|
||||
ax.set_zlim(0, 1)
|
||||
plt.draw()
|
||||
plt.pause(0.00001)
|
|
@ -0,0 +1,28 @@
|
|||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
def rect(x,y,sx,sy,col): # zeichne gefülltes Rechteck
|
||||
xc = np.array([x,x+sx,x+sx,x])
|
||||
yc = np.array([y,y,y+sy,y+sy])
|
||||
plt.fill(xc, yc, col, edgecolor=col)
|
||||
|
||||
def hinton(matrix): # zeichne Hinton-Diagramm
|
||||
plt.clf()
|
||||
plt.axis('off')
|
||||
plt.axis('equal')
|
||||
height, width = matrix.shape
|
||||
rect(0,0,width,height,'gray')
|
||||
|
||||
for x in range(width):
|
||||
for y in range(height):
|
||||
w = matrix[y][x]
|
||||
sz = np.sqrt(abs(w)/np.abs(matrix).max()/8)
|
||||
col = 'white' if w > 0 else 'black'
|
||||
rect(x+0.5-sz, y+0.5-sz, 2*sz, 2*sz, col)
|
||||
|
||||
if __name__ == '__main__':
|
||||
np.random.seed(8216544)
|
||||
# Hinton-Diagramm einer Zufallsmatrix
|
||||
hinton(np.random.rand(20, 20) - 0.5)
|
||||
plt.title('Beispiel Hinton-Diagramm 20x20')
|
||||
plt.show()
|
|
@ -0,0 +1,24 @@
|
|||
# Importieren der notwendigen Bibliotheken
|
||||
import numpy as np
|
||||
from sklearn import datasets
|
||||
from sklearn.manifold import TSNE
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
# Laden des Digits-Datensatzes
|
||||
digits = datasets.load_digits()
|
||||
X = digits.data
|
||||
y = digits.target
|
||||
|
||||
# Anwendung von t-SNE
|
||||
tsne = TSNE(n_components=2, random_state=42)
|
||||
X_tsne = tsne.fit_transform(X)
|
||||
|
||||
# Visualisierung der Ergebnisse
|
||||
plt.figure(figsize=(10, 8))
|
||||
scatter = plt.scatter(X_tsne[:, 0], X_tsne[:, 1], c=y, cmap='viridis', s=50)
|
||||
legend1 = plt.legend(*scatter.legend_elements(), title="Classes")
|
||||
plt.gca().add_artist(legend1)
|
||||
plt.xlabel('t-SNE feature 1')
|
||||
plt.ylabel('t-SNE feature 2')
|
||||
plt.title('t-SNE-Abbildung des Digits-Datensatzes')
|
||||
plt.show()
|
|
@ -0,0 +1,54 @@
|
|||
import numpy as np
|
||||
from tensorflow.keras.datasets import fashion_mnist
|
||||
from tensorflow.keras.models import Model
|
||||
from tensorflow.keras.layers import Dense, Input
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
# Laden Sie den Fashion MNIST-Datensatz
|
||||
(x_train, _), (x_test, _) = fashion_mnist.load_data()
|
||||
|
||||
# Normalisieren Sie die Pixelwerte auf [0, 1]
|
||||
x_train = x_train.astype('float32') / 255.
|
||||
x_test = x_test.astype('float32') / 255.
|
||||
|
||||
# Ändern Sie die Form der Bilder in eine eindimensionale Darstellung
|
||||
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
|
||||
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
|
||||
|
||||
# Definieren Sie die Schichten des Autoencoders
|
||||
input_img = Input(shape=(784,))
|
||||
encoded = Dense(128, activation='relu')(input_img)
|
||||
encoded = Dense(64, activation='relu')(encoded)
|
||||
encoded = Dense(32, activation='relu')(encoded)
|
||||
decoded = Dense(64, activation='relu')(encoded)
|
||||
decoded = Dense(128, activation='relu')(decoded)
|
||||
decoded = Dense(784, activation='sigmoid')(decoded)
|
||||
|
||||
# Konstruieren und kompilieren Sie den Autoencoder
|
||||
autoencoder = Model(input_img, decoded)
|
||||
autoencoder.compile(optimizer='adam', loss='mean_squared_error')
|
||||
|
||||
# Trainieren Sie den Autoencoder
|
||||
autoencoder.fit(x_train, x_train, epochs=50, batch_size=256, shuffle=True, validation_data=(x_test, x_test))
|
||||
|
||||
# Verwenden Sie den Autoencoder, um die Testbilder zu rekonstruieren
|
||||
reconstructed_imgs = autoencoder.predict(x_test)
|
||||
|
||||
# Zeichnen Sie die Original- und rekonstruierten Bilder
|
||||
n = 10 # Anzahl der anzuzeigenden Bilder
|
||||
plt.figure(figsize=(20, 4))
|
||||
for i in range(n):
|
||||
# Originalbild
|
||||
ax = plt.subplot(2, n, i + 1)
|
||||
plt.imshow(x_test[i].reshape(28, 28))
|
||||
plt.gray()
|
||||
ax.get_xaxis().set_visible(False)
|
||||
ax.get_yaxis().set_visible(False)
|
||||
|
||||
# Rekonstruiertes Bild
|
||||
ax = plt.subplot(2, n, i + 1 + n)
|
||||
plt.imshow(reconstructed_imgs[i].reshape(28, 28))
|
||||
plt.gray()
|
||||
ax.get_xaxis().set_visible(False)
|
||||
ax.get_yaxis().set_visible(False)
|
||||
plt.show()
|
|
@ -0,0 +1,58 @@
|
|||
import numpy as np
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
# Anzahl Klassen
|
||||
num_classes = 10
|
||||
# Eingabedaten 28x28 Pixel x 1 Grauwert
|
||||
input_shape = (28, 28, 1)
|
||||
# lade Trainings und Testdatensets
|
||||
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
|
||||
# skaliere auf Werte zwischen 0 und 1
|
||||
x_train = x_train / 255
|
||||
x_test = x_test / 255
|
||||
|
||||
# Die shape anpassen auf (28, 28, 1)
|
||||
x_train = x_train.reshape(60000, 28, 28, 1)
|
||||
x_test = x_test.reshape(10000,28,28,1)
|
||||
|
||||
# die Nummer der Klassen muss in binäre
|
||||
# one-hot-Vektoren umgewandelt werden
|
||||
y_train = keras.utils.to_categorical(y_train, num_classes)
|
||||
y_test = keras.utils.to_categorical(y_test, num_classes)
|
||||
|
||||
# definiere das Netzwerk
|
||||
model = keras.Sequential([
|
||||
keras.Input(shape=input_shape),
|
||||
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
|
||||
layers.MaxPooling2D(pool_size=(2, 2)),
|
||||
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
|
||||
layers.MaxPooling2D(pool_size=(2, 2)),
|
||||
layers.Flatten(),
|
||||
layers.Dropout(0.5),
|
||||
layers.Dense(num_classes, activation="softmax"),
|
||||
])
|
||||
model.summary()
|
||||
# Modell compilieren...
|
||||
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
|
||||
# ... und trainieren ...
|
||||
history = model.fit(x_train, y_train, batch_size=128, epochs=15, validation_split=0.1)
|
||||
# Am Ende auswerten, accuracy plotten...
|
||||
plt.plot(history.history['accuracy'], label='Trainingsgenauigkeit')
|
||||
plt.plot(history.history['val_accuracy'], linestyle='dashed', label='Validierungsgenauigkeit')
|
||||
plt.title('Genauigkeit des Modells')
|
||||
plt.ylabel('Genauigkeit')
|
||||
plt.xlabel('Epoche')
|
||||
plt.legend(loc='center right')
|
||||
plt.savefig("cnn_accuracy.svg")
|
||||
plt.show()
|
||||
|
||||
# ... und Loss plotten
|
||||
plt.plot(history.history['loss'], label='Trainingsverlust')
|
||||
plt.plot(history.history['val_loss'], linestyle='dashed', label='Validierungsverlust')
|
||||
plt.title('Verlust des Modells')
|
||||
plt.ylabel('Verlust')
|
||||
plt.xlabel('Epoche')
|
||||
plt.legend(loc='center right')
|
||||
plt.show()
|
|
@ -0,0 +1,100 @@
|
|||
import numpy as np
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers
|
||||
from tensorflow.keras.preprocessing.text import Tokenizer
|
||||
from tensorflow.keras.preprocessing.sequence import pad_sequences
|
||||
|
||||
class PosEnc(layers.Layer):
|
||||
def __init__(self, **kwargs):
|
||||
super(PosEnc, self).__init__(**kwargs)
|
||||
|
||||
def build(self, input_shape):
|
||||
_, seq_len, d_model = input_shape
|
||||
self.positional_encoding = self.get_pos_enc(seq_len, d_model)
|
||||
super(PosEnc, self).build(input_shape)
|
||||
|
||||
def call(self, x):
|
||||
return x + self.positional_encoding
|
||||
|
||||
@staticmethod
|
||||
def get_pos_enc(seq_len, d_model):
|
||||
angles = np.arange(seq_len)[:, np.newaxis] / np.power(10000, 2 * np.arange(d_model)[np.newaxis, :] // d_model)
|
||||
angles[:, 0::2] = np.sin(angles[:, 0::2])
|
||||
angles[:, 1::2] = np.cos(angles[:, 1::2])
|
||||
return tf.cast(angles[np.newaxis, ...], tf.float32)
|
||||
|
||||
# -- generiert Buchstabensequenzen x und Targetbuchstaben y
|
||||
def gen_train_data(text, tokenizer, seq_len):
|
||||
encoded = tokenizer.texts_to_sequences([text])[0]
|
||||
sequences = []
|
||||
for i in range(seq_len, len(encoded)):
|
||||
sequence = encoded[i-seq_len:i+1]
|
||||
sequences.append(sequence)
|
||||
sequences = np.array(sequences)
|
||||
x, y = sequences[:, :-1], sequences[:, -1]
|
||||
return x, y
|
||||
|
||||
def create_transformer_model(vocab_size, d_model, nhead, max_seq_len, mask):
|
||||
inputs = tf.keras.Input(shape=(max_seq_len,))
|
||||
embedding = layers.Embedding(input_dim=vocab_size, output_dim=d_model)(inputs)
|
||||
pos_encoding = PosEnc()(embedding)
|
||||
x = pos_encoding
|
||||
|
||||
# Multi-Head Attention mit Residual-Verbindung
|
||||
attention_output = layers.MultiHeadAttention(num_heads=nhead, key_dim=d_model // nhead)(x, x, attention_mask=mask)
|
||||
x = layers.Add()([x, attention_output])
|
||||
x = layers.LayerNormalization()(x)
|
||||
x = layers.Dropout(0.1)(x)
|
||||
|
||||
# Zwei Dense Layer mit Residual-Verbindung
|
||||
d_1 = layers.Dense(d_model, activation='relu')(x)
|
||||
d_2 = layers.Dense(d_model, activation='relu')(d_1)
|
||||
x = layers.Add()([x, d_2])
|
||||
x = layers.LayerNormalization()(x)
|
||||
x = layers.Dropout(0.1)(x)
|
||||
|
||||
logits = layers.Dense(vocab_size, activation='softmax')(x[:, -1, :])
|
||||
|
||||
model = tf.keras.Model(inputs=inputs, outputs=logits)
|
||||
return model
|
||||
|
||||
# --- Parameter
|
||||
train_text = "Welches Tier ist das größte? Der Wal. Welches Tier ist das kleinste? Der Einzeller."
|
||||
seq_len = 32 # Sequenzlänge
|
||||
batch_size = 32 # Batch-Länge
|
||||
epochs = 100 # Trainingsepochen
|
||||
|
||||
# --- Der Tokenizer codiert folgende Zeichen
|
||||
chars = "\n,.;:-/!?$&'ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyzßöäüÖÄÜ0123456789"
|
||||
tokenizer = Tokenizer(char_level=True, filters='', lower=False)
|
||||
tokenizer.fit_on_texts(chars)
|
||||
|
||||
# --- Die Maske ist in Keras vom Datentyp bool
|
||||
mask = np.ones((seq_len, seq_len), dtype=bool)
|
||||
mask[np.triu_indices(seq_len, 1)] = False
|
||||
|
||||
# --- erzeuge Trainingsdaten
|
||||
x_train, y_train = gen_train_data(train_text, tokenizer, seq_len)
|
||||
|
||||
# --- erzeuge das Transformer-Model
|
||||
vocab_size = len(tokenizer.word_index)+1
|
||||
d_model = vocab_size # Dimension der Ausgabe des Modells
|
||||
nhead = 4 # 1 x Multi-Head Attention mit 4 Köpfen
|
||||
model = create_transformer_model(vocab_size, d_model, nhead, seq_len, mask)
|
||||
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(), metrics=['accuracy'])
|
||||
|
||||
# --- Trainiere das Modell
|
||||
model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size)
|
||||
|
||||
# --- Generiere den Text
|
||||
text = "Welches Bier ist das kleinste?"
|
||||
for _ in range(seq_len):
|
||||
enc_txt = tokenizer.texts_to_sequences([text])[0]
|
||||
padded_txt = pad_sequences([enc_txt], maxlen=seq_len, padding='pre', truncating='pre')
|
||||
logits = model.predict(padded_txt) # aktivieren
|
||||
next_char = np.argmax(logits[0, :]) # Besten nehmen
|
||||
next_char = tokenizer.index_word[next_char]
|
||||
text += next_char # Buchstabe anhängen
|
||||
print("Generierter Text:",text)
|
||||
if next_char=='.': # Punkt = Stopp!
|
||||
break
|
|
@ -0,0 +1,95 @@
|
|||
import tensorflow as tf
|
||||
from tensorflow.keras.layers import Dense, LeakyReLU, Reshape, Flatten
|
||||
from tensorflow.keras.models import Sequential
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
# Lade die MNIST Ziffern und normiere sie
|
||||
(x_train, _), (_, _) = tf.keras.datasets.mnist.load_data()
|
||||
|
||||
x_train = x_train.reshape(x_train.shape[0], 28 * 28).astype('float32')
|
||||
x_train = (x_train - 127.5) / 127.5 # normiere die Bilder auf [-1, 1]
|
||||
|
||||
buffer_size = x_train.shape[0]
|
||||
batch_size = 256
|
||||
train_dataset = tf.data.Dataset.from_tensor_slices(x_train).shuffle(buffer_size).batch(batch_size)
|
||||
|
||||
# Generator
|
||||
def create_generator():
|
||||
model = Sequential([
|
||||
Dense(256, input_shape=(100,), activation=LeakyReLU(0.2)),
|
||||
Dense(512, activation=LeakyReLU(0.2)),
|
||||
Dense(1024, activation=LeakyReLU(0.2)),
|
||||
Dense(28 * 28, activation='tanh')
|
||||
])
|
||||
return model
|
||||
|
||||
# Discriminator
|
||||
def create_discriminator():
|
||||
model = Sequential([
|
||||
Dense(1024, input_shape=(28 * 28,), activation=LeakyReLU(0.2)),
|
||||
Dense(512, activation=LeakyReLU(0.2)),
|
||||
Dense(256, activation=LeakyReLU(0.2)),
|
||||
Dense(2, activation='softmax')
|
||||
])
|
||||
return model
|
||||
|
||||
generator = create_generator()
|
||||
discriminator = create_discriminator()
|
||||
|
||||
# Loss und Optimierer
|
||||
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
|
||||
generator_optimizer = tf.keras.optimizers.Adam(1e-4)
|
||||
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)
|
||||
|
||||
# Training eines Schrittes
|
||||
@tf.function
|
||||
def train_step(images):
|
||||
noise = tf.random.normal([batch_size, 100])
|
||||
|
||||
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
|
||||
generated_images = generator(noise, training=True)
|
||||
|
||||
real_output = discriminator(images, training=True)
|
||||
fake_output = discriminator(generated_images, training=True)
|
||||
|
||||
gen_loss = cross_entropy(tf.ones_like(fake_output), fake_output)
|
||||
disc_loss = cross_entropy(tf.ones_like(real_output), real_output) + cross_entropy(tf.zeros_like(fake_output), fake_output)
|
||||
|
||||
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
|
||||
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
|
||||
|
||||
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
|
||||
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
|
||||
|
||||
# Training Schleife
|
||||
def train(dataset, epochs):
|
||||
for epoch in range(epochs):
|
||||
print(f'Epoch {epoch+1}')
|
||||
for batch in dataset:
|
||||
train_step(batch)
|
||||
|
||||
epochs = 100
|
||||
train(train_dataset, epochs)
|
||||
|
||||
# Generiere 100 Bilder
|
||||
num_images = 100
|
||||
noise = tf.random.normal([num_images, 100])
|
||||
generated_images = generator(noise, training=False)
|
||||
|
||||
# Skaliere und formatiere die generierten Bilder
|
||||
generated_images = (generated_images + 1) / 2
|
||||
generated_images = generated_images.numpy().reshape(num_images, 28, 28)
|
||||
|
||||
# Visualisiere die Bilder
|
||||
rows, cols = 10, 10
|
||||
fig, axes = plt.subplots(rows, cols, figsize=(15, 15))
|
||||
|
||||
for i in range(rows):
|
||||
for j in range(cols):
|
||||
ax = axes[i, j]
|
||||
ax.imshow(generated_images[i * cols + j], cmap='gray')
|
||||
ax.axis('off')
|
||||
|
||||
plt.show()
|
||||
|
|
@ -0,0 +1,95 @@
|
|||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras.layers import Input, Dense, Lambda
|
||||
from tensorflow.keras.models import Model
|
||||
from tensorflow.keras import backend as K
|
||||
from tensorflow.keras.datasets import mnist
|
||||
|
||||
# Daten laden und vorverarbeiten
|
||||
(x_train, y_train), (x_test, y_test) = mnist.load_data()
|
||||
x_train = x_train.astype('float32') / 255.
|
||||
x_test = x_test.astype('float32') / 255.
|
||||
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
|
||||
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
|
||||
|
||||
# VAE-Parameter
|
||||
input_dim = 784
|
||||
intermediate_dim = 256
|
||||
latent_dim = 2
|
||||
batch_size = 100
|
||||
epochs = 50
|
||||
|
||||
# Encoder
|
||||
inputs = Input(shape=(input_dim,))
|
||||
h = Dense(intermediate_dim, activation='relu')(inputs)
|
||||
z_mean = Dense(latent_dim)(h)
|
||||
z_log_sigma = Dense(latent_dim)(h)
|
||||
|
||||
def sampling(args):
|
||||
z_mean, z_log_sigma = args
|
||||
epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
|
||||
mean=0., stddev=0.1)
|
||||
return z_mean + K.exp(z_log_sigma) * epsilon
|
||||
|
||||
z = Lambda(sampling)([z_mean, z_log_sigma])
|
||||
|
||||
# Decoder
|
||||
decoder_h = Dense(intermediate_dim, activation='relu')
|
||||
decoder_mean = Dense(input_dim, activation='sigmoid')
|
||||
h_decoded = decoder_h(z)
|
||||
x_decoded_mean = decoder_mean(h_decoded)
|
||||
|
||||
# VAE-Modell
|
||||
vae = Model(inputs, x_decoded_mean)
|
||||
|
||||
# Verlustfunktion und Modellkompilierung
|
||||
xent_loss = input_dim * tf.keras.losses.binary_crossentropy(inputs, x_decoded_mean)
|
||||
kl_loss = - 0.5 * K.sum(1 + z_log_sigma - K.square(z_mean) - K.exp(z_log_sigma), axis=-1)
|
||||
vae_loss = K.mean(xent_loss + kl_loss)
|
||||
|
||||
vae.add_loss(vae_loss)
|
||||
vae.compile(optimizer='rmsprop')
|
||||
|
||||
# VAE-Training
|
||||
vae.fit(x_train, x_train, shuffle=True, epochs=epochs, batch_size=batch_size, validation_data=(x_test, x_test))
|
||||
|
||||
# Latenten Raum und rekonstruierte Bilder visualisieren
|
||||
encoder = Model(inputs, z_mean)
|
||||
|
||||
x_test_encoded = encoder.predict(x_test, batch_size=batch_size)
|
||||
plt.figure(figsize=(6, 6))
|
||||
plt.scatter(x_test_encoded[:, 0], x_test_encoded[:, 1], c=y_test)
|
||||
plt.colorbar()
|
||||
plt.title("Latenter Raum des VAE")
|
||||
plt.xlabel("Dimension 1")
|
||||
plt.ylabel("Dimension 2")
|
||||
plt.savefig("VAE_latent_space.svg")
|
||||
plt.show()
|
||||
|
||||
decoder_input = Input(shape=(latent_dim,))
|
||||
_h_decoded = decoder_h(decoder_input)
|
||||
_x_decoded_mean = decoder_mean(_h_decoded)
|
||||
generator = Model(decoder_input, _x_decoded_mean)
|
||||
|
||||
n = 15
|
||||
digit_size = 28
|
||||
figure = np.zeros((digit_size * n, digit_size * n))
|
||||
|
||||
grid_x = np.linspace(-4, 4, n)
|
||||
grid_y = np.linspace(-4, 4, n)
|
||||
|
||||
for i, yi in enumerate(grid_x):
|
||||
for j, xi in enumerate(grid_y):
|
||||
z_sample = np.array([[xi, yi]])
|
||||
x_decoded = generator.predict(z_sample)
|
||||
digit = x_decoded[0].reshape(digit_size, digit_size)
|
||||
figure[i * digit_size: (i + 1) * digit_size,
|
||||
j * digit_size: (j + 1) * digit_size] = digit
|
||||
|
||||
plt.figure(figsize=(10, 10))
|
||||
plt.imshow(figure)
|
||||
plt.title("Rekonstruktion des VAE über den latenten Raum")
|
||||
plt.xlabel("Dimension 1 des latenten Raums")
|
||||
plt.ylabel("Dimension 2 des latenten Raums")
|
||||
plt.show()
|
|
@ -0,0 +1,72 @@
|
|||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras.datasets import fashion_mnist
|
||||
from tensorflow.keras.models import Model
|
||||
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D
|
||||
from keras.layers import Add
|
||||
|
||||
(train_data, _), (test_data, _) = fashion_mnist.load_data()
|
||||
|
||||
# Daten normalisieren nach [0, 1]
|
||||
train_data = train_data.astype('float32') / 255.0
|
||||
test_data = test_data.astype('float32') / 255.0
|
||||
|
||||
# Reshape, um nur eine Dimension zu haben
|
||||
train_data = train_data.reshape(-1, 28, 28, 1)
|
||||
test_data = test_data.reshape(-1, 28, 28, 1)
|
||||
|
||||
# Zu Trainingsdaten und Testdaten Rauschen hinzufügen
|
||||
noise_factor = 0.5
|
||||
train_noisy = train_data + noise_factor * np.random.normal(size=train_data.shape)
|
||||
test_noisy = test_data + noise_factor * np.random.normal(size=test_data.shape)
|
||||
# ... und auf Werte zwischen 0 und 1 begrenzen
|
||||
train_noisy = np.clip(train_noisy, 0.0, 1.0)
|
||||
test_noisy = np.clip(test_noisy, 0.0, 1.0)
|
||||
|
||||
# die U-Net-Modelldefinition
|
||||
def unet_model(input_shape):
|
||||
input_img = Input(shape=input_shape)
|
||||
|
||||
# Encoder
|
||||
x1 = Conv2D(32, (3, 3), activation='relu', padding='same')(input_img)
|
||||
x2 = MaxPooling2D((2, 2), padding='same')(x1)
|
||||
x3 = Conv2D(64, (3, 3), activation='relu', padding='same')(x2)
|
||||
x4 = MaxPooling2D((2, 2), padding='same')(x3)
|
||||
|
||||
# Bottleneck
|
||||
bn = Conv2D(128, (3, 3), activation='relu', padding='same')(x4)
|
||||
|
||||
# Decoder
|
||||
x5 = Conv2D(64, (3, 3), activation='relu', padding='same')(bn)
|
||||
x6 = UpSampling2D((2, 2))(x5)
|
||||
# Hinzufügen von Residual-Verbindungen von x3
|
||||
x6 = Add()([x6, x3])
|
||||
|
||||
x7 = Conv2D(32, (3, 3), activation='relu', padding='same')(x6)
|
||||
x8 = UpSampling2D((2, 2))(x7)
|
||||
# Hinzufügen von Residual-Verbindungen von x1
|
||||
x8 = Add()([x8, x1])
|
||||
|
||||
x9 = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x8)
|
||||
|
||||
return Model(input_img, x9)
|
||||
|
||||
model = unet_model((28, 28, 1))
|
||||
model.compile(optimizer='adam', loss='mean_squared_error')
|
||||
model.fit(train_noisy, train_data, epochs=10, batch_size=128, validation_data=(test_noisy, test_data))
|
||||
|
||||
denoised_images = model.predict(test_noisy)
|
||||
# Plot
|
||||
n = 10
|
||||
plt.figure(figsize=(20, 6))
|
||||
for i in range(1, n + 1):
|
||||
ax = plt.subplot(2, n, i)
|
||||
plt.imshow(test_noisy[i].reshape(28, 28), cmap='gray')
|
||||
ax.set_title('verrauscht')
|
||||
|
||||
ax = plt.subplot(2, n, i + n)
|
||||
plt.imshow(denoised_images[i].reshape(28, 28), cmap='gray')
|
||||
ax.set_title('entrauscht')
|
||||
plt.show()
|
|
@ -0,0 +1,35 @@
|
|||
import numpy as np
|
||||
|
||||
def train_hopfield(patterns):
|
||||
dim = patterns.shape[1]
|
||||
weights = np.zeros((dim, dim)) # Gewichte am Anfang null
|
||||
|
||||
for pattern in patterns:
|
||||
weights += np.outer(pattern, pattern) # jedes mit jedem
|
||||
np.fill_diagonal(weights, 0) # Keine Selbstrückkopplung
|
||||
|
||||
return weights / patterns.shape[0]
|
||||
|
||||
def recall_hopfield(weights, patterns, iterations=5):
|
||||
recalled = np.copy(patterns)
|
||||
for iteration in range(iterations):
|
||||
for i in range(recalled.shape[0]):
|
||||
recalled[i] = np.where(np.dot(weights, recalled[i]) < 0, -1, 1)
|
||||
return recalled
|
||||
|
||||
# Trainingsmuster (Eingabe)
|
||||
patterns = np.array([[1, -1, 1, -1, 1, -1, 1, -1],
|
||||
[-1, 1, -1, 1, -1, 1, -1, 1]])
|
||||
|
||||
# Trainieren des Hopfield-Netzwerks
|
||||
weights = train_hopfield(patterns)
|
||||
|
||||
# Testmuster (leicht von den Trainingsmustern abweichend)
|
||||
test_patterns = np.array([[ 1, 1, 1, -1, 1, -1, 1, -1],
|
||||
[-1, -1, -1, 1, -1, 1, -1, 1]])
|
||||
|
||||
# Recall-Phase (Erinnerung)
|
||||
recalled_patterns = recall_hopfield(weights, test_patterns)
|
||||
|
||||
print("Rekonstruiertes Muster:")
|
||||
print(recalled_patterns)
|
|
@ -0,0 +1,75 @@
|
|||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
m_z=12345 # Zufallsinitialisierung (seed)
|
||||
m_w=34645 # Zufallsinitialisierung (seed)
|
||||
|
||||
def rndNum(): # Eigener Pseudozufallsgenerator
|
||||
global m_z, m_w
|
||||
m_z = 36969 * (m_z & 65535) + (m_z >> 16)
|
||||
m_w = 18000 * (m_w & 65535) + (m_w >> 16)
|
||||
return (((int)(m_z << 16) + m_w)%100000)/100000
|
||||
|
||||
def mackey_glass(tau=17, n=10, beta=0.2, gamma=0.1, t_max=3000, dt=1):
|
||||
# Generiere Mackey-Glass-Zeitreihen mit Euler
|
||||
t = np.arange(0, t_max+dt, dt)
|
||||
x = np.zeros(len(t))
|
||||
x[0:tau] = 0.5 # Anfangsbedingung
|
||||
for i in range(tau, len(t)):
|
||||
x[i] = x[i-1] + dt * (beta*x[i-tau]/(1 + x[i-tau]**n) - gamma*x[i-1])
|
||||
return x
|
||||
|
||||
# Daten generieren
|
||||
trainLen, testLen, initLen = 1500, 1500, 100
|
||||
data = mackey_glass(tau=17, t_max=trainLen+testLen)
|
||||
|
||||
# ESN-Reservoir generieren
|
||||
res_size = 300
|
||||
W_in = np.zeros((res_size,2)) # Eingangsgewichte
|
||||
W = np.zeros((res_size,res_size)) # Reservoirgewichte
|
||||
for i in range(res_size):
|
||||
W_in[i][0] = rndNum()-0.5
|
||||
W_in[i][1] = rndNum()-0.5
|
||||
for j in range(res_size):
|
||||
if rndNum()<0.05: # 5% Verbindungen
|
||||
W[i][j] = rndNum()-0.5
|
||||
|
||||
# Gewichtsmatrix auf Spektralradius von 1.2 skalieren
|
||||
W *= 1.2 / max(abs(np.linalg.eig(W)[0]))
|
||||
|
||||
X = np.zeros((1+res_size,trainLen-initLen)) # Reservoir-Werte
|
||||
Yt = data[initLen+1:trainLen+1] # Target-Werte
|
||||
|
||||
# ESN trainieren
|
||||
x = np.zeros(res_size)
|
||||
for t in range(trainLen):
|
||||
u = data[t]
|
||||
x = np.tanh(np.dot(W_in, [1,u])+np.dot(W, x))
|
||||
if t >= initLen:
|
||||
X[:,t-initLen] = np.concatenate(([1],x))
|
||||
|
||||
reg = 1e-7 # Regularisierungskoeffizient
|
||||
W_out = np.linalg.solve(np.dot(X,X.T) + reg*np.eye(1+res_size), np.dot(X,Yt)).T
|
||||
|
||||
# Mackey-Glass-Attraktor vorhersagen
|
||||
Y = np.zeros(testLen)
|
||||
u = data[trainLen]
|
||||
for t in range(testLen):
|
||||
x = np.tanh(np.dot(W_in, [1,u])+np.dot(W, x))
|
||||
y = np.dot( W_out, np.concatenate(([1],x)) )
|
||||
Y[t] = y
|
||||
u = y
|
||||
|
||||
mse = np.mean((data[trainLen+1:trainLen+501]-Y[:500])**2)
|
||||
|
||||
print('MSE = ' + str( mse ))
|
||||
|
||||
# Attraktoren plotten
|
||||
plt.figure(figsize=(12,6))
|
||||
plt.title("Mackey-Glass-Attraktor")
|
||||
plt.xlabel('t')
|
||||
plt.ylabel('y(t)')
|
||||
plt.plot(data[trainLen+1:trainLen+1001], 'g', label="Originale Daten")
|
||||
plt.plot(Y[:1000], '--r', label="Generierte Daten")
|
||||
plt.legend(loc='upper right')
|
||||
plt.show()
|
|
@ -0,0 +1,61 @@
|
|||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from keras.models import Sequential
|
||||
from keras.layers import LSTM, Dense
|
||||
|
||||
# Generiere unrealistische Aktienpreise
|
||||
np.random.seed(0)
|
||||
time_steps = 300
|
||||
total_samples = 1000
|
||||
|
||||
# Verlauf eines erfundenen Börsenkurses
|
||||
# mittels Wiener Prozess
|
||||
N = 1000
|
||||
T = 1.0
|
||||
t = np.linspace(0.0, T, N)
|
||||
dt = T / N
|
||||
dW = np.sqrt(dt) * np.random.normal(size=N)
|
||||
prices = np.cumsum(dW)
|
||||
|
||||
# Erzeuge Trainingssequenz
|
||||
X = [prices[i:i+time_steps] for i in range(total_samples - time_steps)]
|
||||
y = prices[time_steps:]
|
||||
|
||||
# Konvertieren und umformen für LSTM
|
||||
X = np.array(X).reshape(-1, time_steps, 1)
|
||||
y = np.array(y)
|
||||
|
||||
# Initialisiere sequenzielles Modell
|
||||
model = Sequential()
|
||||
|
||||
# Füge Layer mit 5 LSTM-Zellen hinzu
|
||||
model.add(LSTM(20, activation='tanh', input_shape=(time_steps, 1)))
|
||||
|
||||
# Füge eine Ausgabeeinheit hinzu
|
||||
model.add(Dense(1))
|
||||
|
||||
# Kompiliere das Modell mit "mean squared error" loss und ADAM optimizer
|
||||
model.compile(optimizer='adam', loss='mse')
|
||||
|
||||
# Trainiere das Modell
|
||||
model.fit(X, y, epochs=10, verbose=1)
|
||||
|
||||
# Generiere Vorhersagen
|
||||
future_predictions = []
|
||||
current_sequence = X[-1].reshape(-1, time_steps, 1)
|
||||
|
||||
for _ in range(300):
|
||||
# Sage den zukünftigen Preis vorher
|
||||
future_price = model.predict(current_sequence)[0][0]
|
||||
future_predictions.append(future_price)
|
||||
|
||||
# Ergänze die Sequenz durch die Zukunftsprognose
|
||||
current_sequence = np.roll(current_sequence, -1)
|
||||
current_sequence[-1][-1] = future_price
|
||||
|
||||
# Börsenkurs samt 30 Zukunftsprognosen plotten
|
||||
plt.figure(figsize=(10, 6))
|
||||
plt.plot(range(total_samples), prices, color='blue', label='Originalpreis')
|
||||
plt.plot(range(total_samples, total_samples + 300), future_predictions, color='red', linestyle='dashed', label='Zukunftsvorhersage')
|
||||
plt.legend()
|
||||
plt.show()
|
|
@ -0,0 +1,50 @@
|
|||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from tensorflow.keras.models import Sequential
|
||||
from tensorflow.keras.layers import Dense
|
||||
from sklearn.linear_model import LinearRegression
|
||||
from sklearn.metrics.pairwise import euclidean_distances
|
||||
|
||||
# Daten erstellen
|
||||
np.random.seed(0)
|
||||
n_samples = 5000
|
||||
X = np.random.randint(2, size=(n_samples, 5)) # 5 binäre Merkmale
|
||||
# Kredit wird vergeben, wenn mindestens 3 Kriterien erfüllt sind und kein Schufa-Eintrag vorhanden ist
|
||||
y = ((np.sum(X[:, :4], axis=1) >= 3) & (X[:, 4] == 1)).astype(int)
|
||||
|
||||
# Neuronales Netzwerk trainieren
|
||||
model = Sequential()
|
||||
model.add(Dense(10, input_dim=5, activation='relu'))
|
||||
model.add(Dense(1, activation='sigmoid'))
|
||||
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
|
||||
model.fit(X, y, epochs=10, verbose=0)
|
||||
|
||||
# Instanz auswählen, für die wir die Vorhersage erklären möchten
|
||||
instance = np.array([[1, 0, 1, 1, 1]])
|
||||
|
||||
# Instanz stören und Vorhersagen erhalten
|
||||
num_perturbations = 1000
|
||||
perturbed_instances = np.random.randint(2, size=(num_perturbations, 5))
|
||||
perturbed_predictions = model.predict(perturbed_instances).ravel()
|
||||
|
||||
# Gewichte basierend auf der Nähe zur ursprünglichen Instanz berechnen
|
||||
weights = np.exp(-euclidean_distances(perturbed_instances, instance))
|
||||
|
||||
# Interpretierbares Modell (Lineare Regression) trainieren
|
||||
interpretable_model = LinearRegression()
|
||||
interpretable_model.fit(perturbed_instances, perturbed_predictions, sample_weight=weights.ravel())
|
||||
|
||||
# Die Erklärung sind die Koeffizienten des linearen Modells
|
||||
explanation = interpretable_model.coef_
|
||||
print("Erklärung:", explanation)
|
||||
|
||||
# Visualisierung
|
||||
features = ['Wohnort', 'Alter', 'Berufstätigkeit', 'Gehalt', 'kein Schufa-Eintrag']
|
||||
plt.bar(features, explanation)
|
||||
plt.xlabel('Merkmale')
|
||||
plt.ylabel('Wichtigkeit')
|
||||
plt.title('Erklärung der Kreditvergabe mit LIME')
|
||||
plt.xticks(fontsize=8) # Schriftgröße der x-Achsen-Ticks ändern
|
||||
plt.yticks(fontsize=8) # Schriftgröße der y-Achsen-Ticks ändern
|
||||
plt.tight_layout()
|
||||
plt.show()
|
|
@ -0,0 +1,42 @@
|
|||
import numpy as np
|
||||
import itertools
|
||||
from math import comb
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
# Ein einfaches Modell zur Vorhersage des Hauspreises
|
||||
def house_price_model(features):
|
||||
# Annahme: Der Preis steigt mit der Größe des Hauses und sinkt mit der Entfernung zum Stadtzentrum
|
||||
return features[0] * 3000 - features[1] * 10000
|
||||
|
||||
def shapley_value(model, base_features, feature_index):
|
||||
all_features = list(range(len(base_features)))
|
||||
n = len(all_features)
|
||||
total_value = 0
|
||||
|
||||
for subset_size in range(n):
|
||||
for subset in itertools.combinations(all_features, subset_size):
|
||||
if feature_index not in subset:
|
||||
without_feature = list(subset)
|
||||
with_feature = list(subset) + [feature_index]
|
||||
|
||||
prediction_without = model([base_features[i] if i in without_feature else 0 for i in all_features])
|
||||
prediction_with = model([base_features[i] if i in with_feature else 0 for i in all_features])
|
||||
|
||||
marginal_contribution = (prediction_with - prediction_without) * comb(n - 1, subset_size)
|
||||
total_value += marginal_contribution
|
||||
|
||||
return total_value / (2 ** (n - 1))
|
||||
|
||||
# Basis-Features: Größe des Hauses (in Quadratmetern) und Entfernung zum Stadtzentrum (in Kilometern)
|
||||
base_features = [150, 5]
|
||||
shap_values = [shapley_value(house_price_model, base_features, i) for i in range(len(base_features))]
|
||||
|
||||
# Ergebnisse anzeigen
|
||||
print("Shapley-Werte:", shap_values)
|
||||
|
||||
# Mit Matplotlib visualisieren
|
||||
plt.bar(range(len(base_features)), shap_values)
|
||||
plt.xticks(range(len(base_features)), ['Größe des Hauses', 'Entfernung zum Stadtzentrum'])
|
||||
plt.ylabel('Shapley-Wert')
|
||||
plt.title('Shapley-Werte für jedes Feature')
|
||||
plt.show()
|
|
@ -0,0 +1,70 @@
|
|||
import pygame
|
||||
import numpy as np
|
||||
|
||||
pygame.init()
|
||||
screen = pygame.display.set_mode((200, 200))
|
||||
pygame.display.set_caption("Reinforcement Learning")
|
||||
clock = pygame.time.Clock()
|
||||
|
||||
maze=["##########",
|
||||
"# #",
|
||||
"# #",
|
||||
"#### ##",
|
||||
"# #",
|
||||
"# #",
|
||||
"# #####",
|
||||
"# #",
|
||||
"# G#",
|
||||
"##########"]
|
||||
|
||||
x_dir = [-1,1,0,0] # x-Richtung für Aktion 0,1,2,3
|
||||
y_dir = [0,0,-1,1] # y-Richtung für Aktion 0,1,2,3
|
||||
q = np.random.rand(100, 4)*0.1 # q[s][a]=0..0.1, q[100][4]
|
||||
alpha = 0.5 # Lernrate
|
||||
gamma = 0.9 # Discount Faktor
|
||||
epsilon = 50 # für Epsilon-Greedy Aktionsauswahl
|
||||
for episode in range(1000):
|
||||
x_agent = 1 # x-Agent auf Start
|
||||
y_agent = 1 # y-Agent auf Start
|
||||
goal_reached = False
|
||||
while(not goal_reached):
|
||||
for event in pygame.event.get():
|
||||
if event.type == pygame.QUIT:
|
||||
exit()
|
||||
# zeichne Labyrinth und Q-Bewertung
|
||||
screen.fill((0,0,0))
|
||||
for y in range(10):
|
||||
for x in range(10):
|
||||
if maze[y][x]=='#':
|
||||
pygame.draw.rect(screen, (0, 128, 255),
|
||||
pygame.Rect(x*20, y*20, 15, 15))
|
||||
else:
|
||||
pygame.draw.rect(screen,
|
||||
(0, 200*np.max(q[y*10+x]), 0),
|
||||
pygame.Rect(x*20, y*20, 15, 15))
|
||||
reward = 0
|
||||
s = y_agent*10+x_agent # eindimensionaler state
|
||||
|
||||
if np.random.randint(100)<epsilon: # Epsilon Greedy
|
||||
a = np.random.randint(4) # action
|
||||
else:
|
||||
# argmax ergibt den Index und damit die Aktion,
|
||||
# bei dem der q am größten ist
|
||||
a = np.argmax(q[s])
|
||||
# wenn keine Wand, bewege Agent
|
||||
if maze[y_agent+y_dir[a]][x_agent+x_dir[a]]!='#':
|
||||
x_agent += x_dir[a]
|
||||
y_agent += y_dir[a]
|
||||
# wenn Ziel erreicht, dann Belohnung
|
||||
if maze[y_agent][x_agent]=='G':
|
||||
goal_reached = True
|
||||
reward = 1
|
||||
# neuer eindimensionaler Zustand
|
||||
new_s = y_agent*10+x_agent
|
||||
# Q-Update Formel
|
||||
q[s][a] += alpha*(reward + gamma*np.max(q[new_s]) - q[s][a])
|
||||
|
||||
pygame.draw.rect(screen, (255, 0, 0),
|
||||
pygame.Rect(x_agent*20, y_agent*20, 15, 15))
|
||||
pygame.display.flip()
|
||||
clock.tick(60) # 60 Frames pro Sekunde
|
|
@ -0,0 +1,98 @@
|
|||
import pygame as pyg
|
||||
import numpy as np
|
||||
import random
|
||||
|
||||
# die Q-Updates machen
|
||||
def updateQ(reward, state, action, nextState):
|
||||
global er_re, er_s, er_a, er_ns, tick, Q, alpha, gamma
|
||||
# Replay-Buffer füllen
|
||||
er_re[tick%400]= reward # experience replay Belohnung
|
||||
er_s[tick%400] = state # experience replay Zustand
|
||||
er_a[tick%400] = action # experience replay Aktion
|
||||
er_ns[tick%400]= nextState# experience replay nächster Zustand
|
||||
|
||||
for i in range(batch_size):
|
||||
r = random.randint(0,399)
|
||||
# Q[s][a]+=r+alpha*(gamma * max_a' Q(s',a')-Q(s,a))
|
||||
Q[int(er_s[r])][int(er_a[r])] += er_re[r] + alpha*(gamma * np.max(Q[int(er_ns[r])]) - Q[int(er_s[r])][int(er_a[r])])
|
||||
|
||||
# Nächste Aktion anfragen
|
||||
def getAction(state): # gibt -1 für Schläger links oder +1 für rechts zurück
|
||||
global epsilon, Q
|
||||
if np.random.rand() <= epsilon:
|
||||
return np.random.choice([-1, 1])
|
||||
return (np.argmax(Q[int(state)]) * 2) - 1
|
||||
|
||||
# Macht aus 5 Koordinaten -> 1 Koordinate
|
||||
# damit bekommt jeder Zustand eine eindeutige Nummer
|
||||
def getState(x_ball, y_ball, vx_ball, vy_ball, x_racket):
|
||||
return (((x_ball*13 +y_ball)*2 +(vx_ball+1)/2)*2 +(vy_ball+1)/2)*12 +x_racket
|
||||
|
||||
# Parameter für Q-Learning und Experience Replay
|
||||
epsilon = 1
|
||||
alpha = 0.1
|
||||
gamma = 0.95
|
||||
batch_size = 32
|
||||
er_re = np.zeros(400)
|
||||
er_s = np.zeros(400)
|
||||
er_a = np.zeros(400)
|
||||
er_ns = np.zeros(400)
|
||||
|
||||
tick = 0 # zählt bei jeder Zustandsändrung hoch
|
||||
episode = 0 # zählt die Episoden
|
||||
|
||||
num_of_states = 13*12*2*2*12
|
||||
num_of_actions = 2
|
||||
Q = np.random.rand(num_of_states, num_of_actions)/1000.0
|
||||
|
||||
pyg.init()
|
||||
screen = pyg.display.set_mode((240, 260))
|
||||
pyg.display.set_caption("Q-Learning Experience-Replay")
|
||||
file = open('reward_experience_replay.txt','w')
|
||||
x_racket, x_ball, y_ball, vx_ball, vy_ball, score = 5, 1, 1, 1, 1, 0
|
||||
|
||||
cont = True
|
||||
#clock = pyg.time.Clock()
|
||||
while cont:
|
||||
for event in pyg.event.get():
|
||||
if event.type == pyg.QUIT:
|
||||
cont = False
|
||||
|
||||
epsilon -= 1/400
|
||||
if (epsilon<0):
|
||||
epsilon=0
|
||||
screen.fill((0,0,0))
|
||||
font = pyg.font.SysFont("arial", 15)
|
||||
t = font.render("Score:"+str(score)+" Episode:"+str(episode), True, (255,255,255))
|
||||
screen.blit(t, t.get_rect(centerx = screen.get_rect().centerx))
|
||||
pyg.draw.rect(screen, (0, 128, 255), pyg.Rect(x_racket*20, 250, 80, 10))
|
||||
pyg.draw.rect(screen, (255, 100, 0), pyg.Rect(x_ball*20, y_ball*20, 20, 20))
|
||||
|
||||
state = getState(x_ball, y_ball, vx_ball, vy_ball, x_racket)
|
||||
action = getAction(state)
|
||||
|
||||
x_racket = x_racket + action # Aktion ausführen
|
||||
if x_racket>11: x_racket = 11
|
||||
if x_racket<0: x_racket = 0
|
||||
|
||||
x_ball, y_ball = x_ball + vx_ball, y_ball + vy_ball
|
||||
if x_ball > 10 or x_ball < 1: vx_ball *= -1
|
||||
if y_ball > 11 or y_ball < 1: vy_ball *= -1
|
||||
|
||||
reward = 0
|
||||
if y_ball == 12:
|
||||
reward = -1 # Annahme: Ball daneben
|
||||
if x_ball >= x_racket and x_ball <= x_racket + 4:
|
||||
reward = +1 # Ball doch getroffen
|
||||
episode += 1
|
||||
score = score + reward
|
||||
|
||||
nextState = getState(x_ball, y_ball, vx_ball, vy_ball, x_racket)
|
||||
updateQ(reward, state, (action+1)//2, nextState)
|
||||
|
||||
tick += 1
|
||||
if reward!=0:
|
||||
file.write(str(reward)+",")
|
||||
file.flush()
|
||||
#clock.tick(60) # Refresh-Zeiten festlegen 60 FPS
|
||||
pyg.display.flip()
|
|
@ -0,0 +1,114 @@
|
|||
import pygame as pyg
|
||||
import numpy as np
|
||||
import random
|
||||
|
||||
def one_hot_state(input):
|
||||
# gibt den Zustand als one Hot Vektor zurück
|
||||
in_vector = np.zeros(state_dim)
|
||||
in_vector[int(input)] = 1 # one hot vector
|
||||
return in_vector
|
||||
|
||||
def model_predict(in_vec):
|
||||
# gibt die Aktivität beider Neuronen zurück
|
||||
return np.matmul(weights.T, in_vec)
|
||||
|
||||
def model_fit(in_vec, target_vec):
|
||||
global weights
|
||||
out_vec = model_predict(in_vec)
|
||||
# Gewichtsmatrix mit Delta-Lernregel anpassen
|
||||
weights += np.outer(in_vec.T,(target_vec-out_vec))
|
||||
|
||||
# die Q-Updates machen
|
||||
def updateQ(reward, state, action, nextState):
|
||||
global replay_re, replay_s, replay_a, replay_ns
|
||||
|
||||
# Experience Replay Ringbuffer füllen
|
||||
replay_re[tick % 400] = reward
|
||||
replay_s[ tick % 400] = state
|
||||
replay_a[ tick % 400] = action
|
||||
replay_ns[tick % 400] = nextState
|
||||
if tick>400:
|
||||
for i in range(batch_size):
|
||||
r = random.randint(0,399)
|
||||
Qval = model_predict(one_hot_state(replay_s[r]))
|
||||
target = np.zeros(2) # target mit Q-updateformel definieren
|
||||
target[int(replay_a[r])] = replay_re[r] + alpha*(gamma * np.max(model_predict(one_hot_state(replay_ns[r]))) - Qval[int(replay_a[r])])
|
||||
model_fit(one_hot_state(replay_s[r]), np.array(target))
|
||||
|
||||
# Nächste Aktion mit epsilon-Greedy
|
||||
def getAction(state):
|
||||
if np.random.rand() <= epsilon:
|
||||
return np.random.choice([-1, 1])
|
||||
act_values = model_predict(one_hot_state(state))
|
||||
return (np.argmax(act_values) * 2) - 1
|
||||
|
||||
# Reduziert den Zustand auf eine Zahl
|
||||
def getState(x_ball, y_ball, vx_ball, vy_ball, x_racket):
|
||||
return (((x_ball*13 +y_ball)*2 +(vx_ball+1)/2)*2 +(vy_ball+1)/2)*12 +x_racket
|
||||
|
||||
# Q-Network Parameter
|
||||
state_dim = 12*13*2*2*12
|
||||
action_dim = 2
|
||||
epsilon = 1
|
||||
alpha = 0.1
|
||||
gamma = 0.95
|
||||
batch_size = 32
|
||||
weights = np.random.rand(state_dim, action_dim)/1000.0
|
||||
episode = 0
|
||||
tick = 0
|
||||
replay_re = np.zeros(400, dtype=int)
|
||||
replay_s = np.zeros(400, dtype=int)
|
||||
replay_a = np.zeros(400, dtype=int)
|
||||
replay_ns = np.zeros(400, dtype=int)
|
||||
|
||||
pyg.init()
|
||||
screen = pyg.display.set_mode((240, 260))
|
||||
pyg.display.set_caption("Neural-Pong")
|
||||
file = open('reward_neural.txt','w')
|
||||
x_racket, x_ball, y_ball, vx_ball, vy_ball, score = 5, 1, 1, 1, 1, 0
|
||||
#clock = pyg.time.Clock()
|
||||
cont = True
|
||||
while cont:
|
||||
for event in pyg.event.get():
|
||||
if event.type == pyg.QUIT:
|
||||
cont = False
|
||||
|
||||
epsilon -= 1/400
|
||||
if (epsilon<0):
|
||||
epsilon=0
|
||||
screen.fill((0,0,0))
|
||||
font = pyg.font.SysFont("arial", 15)
|
||||
t = font.render("Score:"+str(score)+" Episode:"+str(episode), True, (255,255,255))
|
||||
screen.blit(t, t.get_rect(centerx = screen.get_rect().centerx))
|
||||
pyg.draw.rect(screen, (0, 128, 255), pyg.Rect(x_racket*20, 250, 80, 10))
|
||||
pyg.draw.rect(screen, (255, 100, 0), pyg.Rect(x_ball*20, y_ball*20, 20, 20))
|
||||
|
||||
state = getState(x_ball, y_ball, vx_ball, vy_ball, x_racket)
|
||||
action = getAction(state)
|
||||
|
||||
# berechne neuen State und neue Schlägerposition
|
||||
x_racket = x_racket + action
|
||||
if x_racket>11: x_racket = 11
|
||||
if x_racket<0: x_racket = 0
|
||||
|
||||
x_ball, y_ball = x_ball + vx_ball, y_ball + vy_ball
|
||||
if x_ball > 10 or x_ball < 1: vx_ball *= -1
|
||||
if y_ball > 11 or y_ball < 1: vy_ball *= -1
|
||||
|
||||
reward = 0
|
||||
if y_ball == 12:
|
||||
reward = -1
|
||||
if x_ball >= x_racket and x_ball <= x_racket + 4:
|
||||
reward = +1
|
||||
episode += 1
|
||||
score = score + reward
|
||||
|
||||
nextState = getState(x_ball, y_ball, vx_ball, vy_ball, x_racket)
|
||||
updateQ(reward, state, (action+1)//2, nextState)
|
||||
|
||||
tick += 1
|
||||
if reward!=0:
|
||||
file.write(str(reward)+",")
|
||||
file.flush()
|
||||
#clock.tick(60) # Refresh-Zeiten festlegen 60 FPS
|
||||
pyg.display.flip()
|
|
@ -0,0 +1,196 @@
|
|||
import pygame
|
||||
import numpy as np
|
||||
import random
|
||||
import math
|
||||
import cProfile
|
||||
|
||||
class MonteCarlo:
|
||||
def __init__(self, simulations=1000): # 1000 Simulationen
|
||||
self.simulations = simulations
|
||||
|
||||
def get_move(self, board, player):
|
||||
moves = self.get_close_moves(board)
|
||||
if len(moves) == 0:
|
||||
return None
|
||||
|
||||
best_move = None
|
||||
best_win_rate = -float('inf')
|
||||
|
||||
for move in moves:
|
||||
wins = 0
|
||||
for _ in range(self.simulations):
|
||||
wins += self._simulate(board.copy(), move, player)
|
||||
wins *= -1
|
||||
print("move="+str(move) + " wins="+str(wins))
|
||||
|
||||
win_rate = wins / self.simulations
|
||||
if win_rate > best_win_rate:
|
||||
best_win_rate = win_rate
|
||||
best_move = move
|
||||
|
||||
return best_move
|
||||
|
||||
def _simulate(self, board, move, player):
|
||||
board[move[0], move[1]] = player
|
||||
current_player = -player
|
||||
|
||||
# Monte Carlo Search spielt eigentlich bis zum Ende
|
||||
#while True: # das ginge mit dieser Endlosschleife
|
||||
# stattdessen werden hier nur die nächsten 10 Spielzüge
|
||||
# betrachtet
|
||||
for i in range(15):
|
||||
winner = Gomoku.has_won_static(board, move[0], move[1])
|
||||
if winner != 0:
|
||||
return winner
|
||||
move = self.get_rand_move(board)
|
||||
board[move[0], move[1]] = current_player
|
||||
current_player *= -1
|
||||
return 0
|
||||
|
||||
def get_rand_move(self, board):
|
||||
GRID_SIZE = len(board)
|
||||
positions = np.argwhere(board != 0)
|
||||
|
||||
while True:
|
||||
pos = random.choice(positions)
|
||||
# Ein Schritt in zufällige Richtung
|
||||
dx = random.choice([-1, 0, 1])
|
||||
dy = random.choice([-1, 0, 1])
|
||||
new_x, new_y = pos[1] + dx, pos[0] + dy
|
||||
if 0 <= new_x < GRID_SIZE and 0 <= new_y < GRID_SIZE and board[new_y, new_x] == 0:
|
||||
return(new_y, new_x)
|
||||
|
||||
# statt alle Spielzüge zu betrachten, werden hier nur die
|
||||
# betrachtet, die direkt neben einem anderen Stein liegen
|
||||
def get_close_moves(self, board):
|
||||
GRID_SIZE = len(board)
|
||||
occupied_positions = np.argwhere(board != 0)
|
||||
possible_moves = set()
|
||||
|
||||
# Definiere alle möglichen Richtungen (oben, unten, links, rechts, diagonal)
|
||||
directions = [(1, 0), (-1, 0), (0, 1), (0, -1), (1, 1), (-1, -1), (1, -1), (-1, 1)]
|
||||
|
||||
for pos in occupied_positions:
|
||||
for dx, dy in directions:
|
||||
new_x, new_y = pos[1] + dx, pos[0] + dy
|
||||
if 0 <= new_x < GRID_SIZE and 0 <= new_y < GRID_SIZE and board[new_y, new_x] == 0:
|
||||
possible_moves.add((new_y, new_x))
|
||||
|
||||
if not possible_moves: # Wenn keine möglichen Züge vorhanden sind
|
||||
return None
|
||||
|
||||
return list(possible_moves)
|
||||
|
||||
class Gomoku:
|
||||
GRID_SIZE = 15
|
||||
CELL_SIZE = 40
|
||||
OFFSET = 20
|
||||
BOARD_SIZE = (GRID_SIZE-1) * CELL_SIZE + 2 * OFFSET
|
||||
STONE_RADIUS = CELL_SIZE // 2 - 5
|
||||
BLACK = (0, 0, 0)
|
||||
WHITE = (255, 255, 255)
|
||||
BACKGROUND = (220, 180, 140)
|
||||
|
||||
def __init__(self):
|
||||
pygame.init()
|
||||
self.screen = pygame.display.set_mode((self.BOARD_SIZE, self.BOARD_SIZE))
|
||||
pygame.display.set_caption("Gomoku")
|
||||
self.clock = pygame.time.Clock()
|
||||
self.board = np.zeros((self.GRID_SIZE, self.GRID_SIZE), dtype=int)
|
||||
self.current_player = 1
|
||||
|
||||
def draw_board(self):
|
||||
self.screen.fill(self.BACKGROUND)
|
||||
for i in range(self.GRID_SIZE):
|
||||
pygame.draw.line(self.screen, self.BLACK, (i * self.CELL_SIZE + self.OFFSET, self.OFFSET), (i * self.CELL_SIZE + self.OFFSET, self.BOARD_SIZE - self.CELL_SIZE + self.OFFSET))
|
||||
pygame.draw.line(self.screen, self.BLACK, (self.OFFSET, i * self.CELL_SIZE + self.OFFSET), (self.BOARD_SIZE - self.CELL_SIZE + self.OFFSET, i * self.CELL_SIZE + self.OFFSET))
|
||||
|
||||
def draw_stones(self):
|
||||
for y in range(self.GRID_SIZE):
|
||||
for x in range(self.GRID_SIZE):
|
||||
if self.board[y, x] != 0:
|
||||
color = self.BLACK if self.board[y, x] == 1 else self.WHITE
|
||||
pygame.draw.circle(self.screen, color, (x * self.CELL_SIZE + self.OFFSET, y * self.CELL_SIZE + self.OFFSET), self.STONE_RADIUS)
|
||||
|
||||
@staticmethod
|
||||
def has_won_static(board, x, y): # gibt 0=keiner gewonnen 1=schwarz gewonnen oder -1=weiß gewonnen zurück
|
||||
# Der Stein, der zuletzt gespielt wurde
|
||||
last_stone = board[y][x]
|
||||
if last_stone == 0:
|
||||
return 0 # Kein Stein an dieser Position
|
||||
|
||||
# Alle vier Richtungen überprüfen: horizontal, vertikal, beide Diagonalen
|
||||
directions = [(1, 0), (0, 1), (1, 1), (1, -1)]
|
||||
GRID_SIZE = len(board)
|
||||
|
||||
for dx, dy in directions:
|
||||
count = 1 # Zähler für die Anzahl der zusammenhängenden Steine
|
||||
|
||||
# Überprüfe in einer Richtung
|
||||
for i in range(1, 5):
|
||||
new_x, new_y = x + dx * i, y + dy * i
|
||||
if 0 <= new_x < GRID_SIZE and 0 <= new_y < GRID_SIZE and board[new_y][new_x] == last_stone:
|
||||
count += 1
|
||||
else:
|
||||
break
|
||||
|
||||
# Überprüfe in der entgegengesetzten Richtung
|
||||
for i in range(1, 5):
|
||||
new_x, new_y = x - dx * i, y - dy * i
|
||||
if 0 <= new_x < GRID_SIZE and 0 <= new_y < GRID_SIZE and board[new_y][new_x] == last_stone:
|
||||
count += 1
|
||||
else:
|
||||
break
|
||||
|
||||
# Überprüfe, ob die Reihe lang genug ist
|
||||
if count >= 5:
|
||||
return board[y][x]
|
||||
return 0
|
||||
|
||||
def run(self):
|
||||
mc = MonteCarlo()
|
||||
winner = 0
|
||||
running = True
|
||||
xx, yy = 0, 0
|
||||
while running:
|
||||
for event in pygame.event.get():
|
||||
if event.type == pygame.QUIT:
|
||||
running = False
|
||||
# Mensch spielt schwarz
|
||||
if event.type == pygame.MOUSEBUTTONDOWN and self.current_player == 1:
|
||||
x, y = event.pos
|
||||
xx, yy = round((x - self.OFFSET) / self.CELL_SIZE), round((y - self.OFFSET) / self.CELL_SIZE)
|
||||
if 0 <= xx < self.GRID_SIZE and 0 <= yy < self.GRID_SIZE and self.board[yy, xx] == 0:
|
||||
self.board[yy, xx] = self.current_player
|
||||
self.current_player *= -1
|
||||
self.draw_board()
|
||||
self.draw_stones()
|
||||
pygame.display.flip()
|
||||
if winner == 0:
|
||||
winner = Gomoku.has_won_static(self.board, xx, yy)
|
||||
# Monte Carlo spielt weiß
|
||||
if self.current_player == -1 and winner==0:
|
||||
move = mc.get_move(self.board, self.current_player)
|
||||
if move:
|
||||
self.board[move[0], move[1]] = self.current_player
|
||||
yy = move[0]
|
||||
xx = move[1]
|
||||
self.current_player *= -1
|
||||
|
||||
self.draw_board()
|
||||
self.draw_stones()
|
||||
pygame.display.flip()
|
||||
if winner == 0:
|
||||
winner = Gomoku.has_won_static(self.board, xx, yy)
|
||||
|
||||
if winner == 1:
|
||||
print("Schwarz hat gewonnen.")
|
||||
if winner == -1:
|
||||
print("Weiß hat gewonnen.")
|
||||
|
||||
pygame.quit()
|
||||
|
||||
if __name__ == "__main__":
|
||||
game = Gomoku()
|
||||
#cProfile.run('game.run()')
|
||||
game.run()
|
|
@ -0,0 +1,21 @@
|
|||
import numpy as np
|
||||
|
||||
a = np.array([[5, 7, 1], [2, 6, 3], [2, 5, 4]])
|
||||
print("A=")
|
||||
print(a)
|
||||
u,s,v = np.linalg.svd(a)
|
||||
|
||||
# Matrix
|
||||
u = np.matrix(u[:,:2])
|
||||
s = np.diag(s[:2])
|
||||
v = np.matrix(v[:2,:])
|
||||
print("U=")
|
||||
print(u)
|
||||
print("S=")
|
||||
print(s)
|
||||
print("V=")
|
||||
print(v)
|
||||
usv = u*s*v
|
||||
print("U*S*V=")
|
||||
print(usv)
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
# generiere die Daten
|
||||
x = np.linspace(0, 10, 100)
|
||||
y = np.sin(x)
|
||||
|
||||
# Erzeuge einen plot
|
||||
plt.plot(x, y)
|
||||
|
||||
# Damit setzt an den Titel und die Label
|
||||
plt.title("Sinus-Funktion")
|
||||
plt.xlabel("x-Achse")
|
||||
plt.ylabel("y-Achse")
|
||||
|
||||
# zeige den plot
|
||||
plt.show()
|
|
@ -0,0 +1,18 @@
|
|||
import pygame # Pygame einbinden
|
||||
|
||||
pygame.init() # Pygame initialisieren
|
||||
screen = pygame.display.set_mode((800, 500))
|
||||
endlos=True # endlos währt am längsten
|
||||
|
||||
while endlos==True:
|
||||
for event in pygame.event.get():
|
||||
if event.type == pygame.QUIT:
|
||||
endlos = False
|
||||
screen.fill((0, 0, 0)) # Bildschirm löschen
|
||||
# Zeichne ein 20x20 Kästchen mit Grauverlauf
|
||||
for x in range(0,20):
|
||||
for y in range(0,20):
|
||||
col = x*10
|
||||
pygame.draw.rect(screen, (col, col, col),
|
||||
pygame.Rect(x*10, y*10, 10, 10))
|
||||
pygame.display.flip() # Zeige das Gezeichnete an
|
|
@ -0,0 +1,34 @@
|
|||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from tensorflow import keras
|
||||
|
||||
# Simulierter Datensatz
|
||||
N = 1000
|
||||
X = np.random.randn(N, 2) # zufällige Punkte
|
||||
# Zielfunktion (Target) definieren:
|
||||
# Sind die Punkte im Einheitskreis, dann y=1 sonst y=0
|
||||
y = (X[:, 0]**2 + X[:, 1]**2 < 1.0).astype(int)
|
||||
|
||||
# Aufteilen des Datensatzes in Trainings- und Testdaten
|
||||
X_train, X_test = X[:800], X[800:]
|
||||
y_train, y_test = y[:800], y[800:]
|
||||
|
||||
# Modells mit 2 Schichten: erst 10 Neuronen, dann 1 Neuron
|
||||
model = keras.Sequential([
|
||||
keras.layers.Dense(10, input_shape=(2,), activation='relu'),
|
||||
keras.layers.Dense(1, activation='sigmoid')
|
||||
])
|
||||
|
||||
# Kompilieren des Modells (binary_crossentropy wird zur Klassifizierung genommen)
|
||||
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
|
||||
|
||||
# Training des Modells
|
||||
history = model.fit(X_train, y_train, epochs=50, batch_size=32, validation_data=(X_test, y_test))
|
||||
# Fehler plotten, wie er kleiner wird
|
||||
plt.plot(history.history['loss'])
|
||||
plt.plot(history.history['val_loss'],"--")
|
||||
plt.title('Modellfehler')
|
||||
plt.ylabel('Fehler')
|
||||
plt.xlabel('Epoche')
|
||||
plt.legend(['Trainingsdaten', 'Testdaten'], loc='center right')
|
||||
plt.show()
|
|
@ -0,0 +1,28 @@
|
|||
from sklearn.datasets import load_iris
|
||||
from sklearn.model_selection import train_test_split
|
||||
from sklearn.preprocessing import StandardScaler
|
||||
from sklearn.neighbors import KNeighborsClassifier
|
||||
from sklearn.metrics import accuracy_score
|
||||
|
||||
# Daten laden
|
||||
iris = load_iris()
|
||||
X, y = iris.data, iris.target
|
||||
|
||||
# Daten in Trainings- und Testsets aufteilen
|
||||
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
||||
|
||||
# Daten standardisieren
|
||||
scaler = StandardScaler()
|
||||
X_train = scaler.fit_transform(X_train)
|
||||
X_test = scaler.transform(X_test)
|
||||
|
||||
# KNN-Modell trainieren
|
||||
knn = KNeighborsClassifier(n_neighbors=3)
|
||||
knn.fit(X_train, y_train)
|
||||
|
||||
# Vorhersagen treffen
|
||||
y_pred = knn.predict(X_test)
|
||||
|
||||
# Genauigkeit berechnen
|
||||
accuracy = accuracy_score(y_test, y_pred)
|
||||
print(f"Genauigkeit: {accuracy:.2f}")
|
|
@ -0,0 +1,39 @@
|
|||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
# Beispieldaten kreieren
|
||||
actual_labels = ["Hund", "Hund", "Katze", "Katze", "Maus", "Maus", "Maus"]
|
||||
predicted_labels = ["Hund", "Hund", "Katze", "Hund", "Maus", "Katze", "Maus"]
|
||||
|
||||
# Konfusionsmatrix zeichnen
|
||||
labels = np.unique(actual_labels)
|
||||
confusion_matrix = np.zeros((len(labels), len(labels)))
|
||||
for i in range(len(actual_labels)):
|
||||
actual_index = np.where(labels == actual_labels[i])[0][0]
|
||||
predicted_index = np.where(labels == predicted_labels[i])[0][0]
|
||||
confusion_matrix[actual_index][predicted_index] += 1
|
||||
|
||||
# zeichne die Konfusionsmatrix
|
||||
fig, ax = plt.subplots()
|
||||
im = ax.imshow(confusion_matrix)
|
||||
|
||||
# Label und Farbbalken hinzufügen
|
||||
ax.set_xticks(np.arange(len(labels)))
|
||||
ax.set_yticks(np.arange(len(labels)))
|
||||
ax.set_xticklabels(labels)
|
||||
ax.set_yticklabels(labels)
|
||||
im = ax.imshow(confusion_matrix, cmap='YlGnBu')
|
||||
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
|
||||
for i in range(len(labels)):
|
||||
for j in range(len(labels)):
|
||||
text_color = "white" if confusion_matrix[i, j] > confusion_matrix.max() / 2 else "black"
|
||||
text = ax.text(j, i, int(confusion_matrix[i, j]), ha="center", va="center", color=text_color)
|
||||
cbar = ax.figure.colorbar(im, ax=ax)
|
||||
|
||||
# Achenbeschriftung und Titel
|
||||
ax.set_title("Konfusionsmatrix")
|
||||
ax.set_xlabel("vorhergesagte Klasse")
|
||||
ax.set_ylabel("wirkliche Klasse")
|
||||
|
||||
# zeige den Plot
|
||||
plt.show()
|
|
@ -0,0 +1,29 @@
|
|||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
# Beispieldaten generieren
|
||||
data = np.random.rand(10, 10)
|
||||
|
||||
# Zeichne Heatmap ...
|
||||
fig, ax = plt.subplots()
|
||||
im = ax.imshow(data, cmap="YlGnBu")
|
||||
|
||||
# ... mit Text
|
||||
for i in range(data.shape[0]):
|
||||
for j in range(data.shape[1]):
|
||||
if data[i,j] > 0.5: # Farbe abh. vom Hintergrund
|
||||
text_color = 'white' # für dunklen Hintergrund
|
||||
else:
|
||||
text_color = 'black' # für hellen Hintergrund
|
||||
text = ax.text(j, i, "{:.2f}".format(data[i, j]), ha="center", va="center", color=text_color)
|
||||
|
||||
# und eine Farbleiste
|
||||
cbar = ax.figure.colorbar(im, ax=ax)
|
||||
|
||||
# Titel und Achsenbeschriftung
|
||||
ax.set_title("Beispiel-Heatmap")
|
||||
ax.set_xlabel("x-Label")
|
||||
ax.set_ylabel("y-Label")
|
||||
|
||||
# Plot zeichnen
|
||||
plt.show()
|
|
@ -0,0 +1,42 @@
|
|||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
cities = 100
|
||||
|
||||
def get_length(travel, distance):
|
||||
# Entfernung vom Ende der Reise zum Anfang
|
||||
length=distance[int(travel[-1]),int(travel[0])]
|
||||
for i in range(0, cities-1):
|
||||
# Einzelentfernungen aufaddieren
|
||||
length+=distance[int(travel[i]),int(travel[i+1])]
|
||||
return length
|
||||
|
||||
def main():
|
||||
# Entfernungstabelle mit Zufallszahlen
|
||||
distance = np.random.rand(cities, cities)
|
||||
# Rundreise als 0,1,2 ... 99 als Startrundreise setzen
|
||||
travel = np.linspace(0, cities, cities, endpoint=False)
|
||||
graph = np.array([])
|
||||
# 10000 Versuche, besser zu werden
|
||||
for step in range(0,10000):
|
||||
length = get_length(travel, distance)
|
||||
i = np.random.randint(0,cities-1)
|
||||
j = np.random.randint(0,cities-1)
|
||||
# vertausche Stadt i mit Stadt j
|
||||
travel[i], travel[j] = travel[j], travel[i]
|
||||
new_length = get_length(travel, distance)
|
||||
# Wenn die neue Rundreise länger ist, dann ...
|
||||
if (new_length>length):
|
||||
# ... tausche die Städte zurück
|
||||
travel[i], travel[j] = travel[j], travel[i]
|
||||
else:
|
||||
graph = np.append(graph,new_length)
|
||||
|
||||
plt.title('Hill Climber')
|
||||
plt.xlabel('Verbesserungsschritt')
|
||||
plt.ylabel('Rundreiselänge')
|
||||
plt.plot(graph)
|
||||
plt.show()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -0,0 +1,53 @@
|
|||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
cities = 100
|
||||
|
||||
def get_length(travel, distance):
|
||||
# Entfernung vom Ende der Reise zum Anfang
|
||||
length=distance[int(travel[-1]),int(travel[0])]
|
||||
for i in range(0, cities-1):
|
||||
# Einzelentfernungen aufadieren
|
||||
length+=distance[int(travel[i]),int(travel[i+1])]
|
||||
return length
|
||||
|
||||
def main():
|
||||
# Entfernungstabelle mit Zufallszahlen
|
||||
distance = np.random.rand(cities, cities)
|
||||
# Rundreise als 0,1,2,3,4...99 als Startrundreise setzen
|
||||
travel = np.linspace(0, cities, cities, endpoint=False)
|
||||
graph = np.array([])
|
||||
t = 1
|
||||
# 100000 Versuche besser zu werden
|
||||
for step in range(0,100000):
|
||||
t=t*0.9999 # die Temperatur wird verkleinert
|
||||
length = get_length(travel, distance)
|
||||
i = np.random.randint(0,cities-1)
|
||||
j = np.random.randint(0,cities-1)
|
||||
# vertausche Stadt i mit Stadt j
|
||||
travel[i], travel[j] = travel[j], travel[i]
|
||||
|
||||
new_length = get_length(travel, distance)
|
||||
# Wahrscheinlichkeit für einen Rückschritt
|
||||
# Achtung: Statt Fitness wird die Rundreiselänge
|
||||
# genommen. Deshalb das negative Vorzeichen...
|
||||
p = np.exp(-(new_length-length)/t)
|
||||
# Wenn die neue Rundreise kürzer ist oder
|
||||
# die Wahrscheinlichkeit für Rückschritte größer
|
||||
# als ein Zufallswert zwischen 0 und 1, dann...
|
||||
if (new_length<length or np.random.random()<p):
|
||||
# ...übernehme den Wert
|
||||
graph = np.append(graph,new_length)
|
||||
else:
|
||||
# sonst...tausche die Städte zurück
|
||||
travel[i], travel[j] = travel[j], travel[i]
|
||||
|
||||
|
||||
plt.title('Simulated Annealing')
|
||||
plt.xlabel('Vertauschungsschritt')
|
||||
plt.ylabel('Rundreiselänge')
|
||||
plt.plot(graph)
|
||||
plt.show()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -0,0 +1,75 @@
|
|||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
pop_size = 1000 # Populationsgröße
|
||||
gen_len = 100 # Länge eines Gens
|
||||
gene = np.zeros((pop_size, gen_len),dtype=int)
|
||||
new_gene = np.zeros((pop_size, gen_len),dtype=int)
|
||||
fitns = np.ones(pop_size)
|
||||
|
||||
rek_rate = 0.8 # Rekombinationsrate
|
||||
mut_rate = 0.2 # Mutationsrate
|
||||
|
||||
job_time = np.random.rand(gen_len) # Array von Jobzeiten
|
||||
|
||||
def rank_select():
|
||||
# Vereinfachte Rank Selection, nur die besten 500
|
||||
# Individuen mit gleicher Wahrscheinlichkeit wählen
|
||||
return np.random.randint(0,pop_size/2)
|
||||
|
||||
def crossover(): # Rekombination
|
||||
global gene
|
||||
split = np.random.randint(gen_len)
|
||||
sel1 = rank_select() # selektiere Individuum 1
|
||||
sel2 = rank_select() # selektiere Individuum 2
|
||||
return np.append(gene[sel1][:split],gene[sel2][split:]), np.append(gene[sel2][:split],gene[sel1][split:])
|
||||
|
||||
def mutate(): # Mutation
|
||||
global gene, pop_size
|
||||
for i in range((int)(pop_size*mut_rate)):
|
||||
indi = np.random.randint(0,pop_size-1)
|
||||
bit = np.random.randint(0,gen_len-1)# irgendein
|
||||
gene[indi][bit] = 1-gene[indi][bit] # Bit kippen
|
||||
|
||||
def eval_fitness(): # Auswertung der Fitness
|
||||
global job_time, fitns, pop_size, gene
|
||||
sum = 0
|
||||
for i in range(pop_size):
|
||||
# Zeit berechnen, die jede CPU braucht
|
||||
time_for_comp = np.zeros((2,), dtype=float)
|
||||
for j in range(gen_len):
|
||||
bit = gene[i][j] # Job auf CPU 0 oder 1
|
||||
time_for_comp[bit]+=job_time[j]
|
||||
# Fitness = negative Gesamtzeit
|
||||
# wobei Gesamtzeit = Maximum der CPU-Zeiten
|
||||
fitns[i] = -max(time_for_comp[0],time_for_comp[1])
|
||||
# sortiere gene[] nach fitns[] absteigend
|
||||
sorted_indices = np.flip(np.argsort(fitns))
|
||||
gene = gene[sorted_indices]
|
||||
fitns = fitns[sorted_indices]
|
||||
|
||||
if __name__ == "__main__":
|
||||
graph = np.array([])
|
||||
for i in range(10):
|
||||
mutate() # initialisiere das Array zufällig
|
||||
for gen in range(0,100): # Generation
|
||||
eval_fitness() # Fitness auswerten
|
||||
print("Gen:"+str(gen)+" Bestzeit:"+str(-fitns[0]))
|
||||
graph = np.append(graph,-fitns[0])
|
||||
for i in range(pop_size):
|
||||
if i<pop_size*rek_rate:
|
||||
# erzeuge zwei neue Individuen
|
||||
new_gene[i],new_gene[i+1] = crossover();
|
||||
i=i+1 # weil zwei Individuen erzeugt wurden
|
||||
else:
|
||||
# selektiere ein Individuum
|
||||
new_gene[i] = np.copy(gene[rank_select()])
|
||||
|
||||
gene = np.copy(new_gene)
|
||||
mutate()
|
||||
|
||||
plt.title('Genetischer Algorithmus')
|
||||
plt.xlabel('Generation')
|
||||
plt.ylabel('Beste Gesamtzeit')
|
||||
plt.plot(graph)
|
||||
plt.show()
|
|
@ -0,0 +1,33 @@
|
|||
import numpy as np
|
||||
import random
|
||||
def objective_function(x):
|
||||
"""Die Zielfunktion, die wir optimieren möchten."""
|
||||
return x**2
|
||||
|
||||
def one_plus_one_es(max_iterations=100, initial_solution=10.0, initial_step_size=1.0, step_size_adaptation=0.85):
|
||||
"""Ein einfacher (1+1)-ES Algorithmus."""
|
||||
|
||||
# Initialisiere den aktuellen Punkt und die Schrittweite
|
||||
x = initial_solution
|
||||
sigma = initial_step_size
|
||||
|
||||
for iteration in range(max_iterations):
|
||||
# Erzeuge einen Nachkommen durch Mutation
|
||||
y = x + sigma * random.gauss(0, 1)
|
||||
|
||||
# Wenn der Nachkomme besser ist, aktualisiere den aktuellen Punkt
|
||||
if objective_function(y) < objective_function(x):
|
||||
x = y
|
||||
sigma /= step_size_adaptation # Erhöhe die Schrittweite
|
||||
else:
|
||||
sigma *= step_size_adaptation # Verringere die Schrittweite
|
||||
|
||||
# Ausgabe der aktuellen Lösung und des Funktionswerts
|
||||
print(f"Iteration {iteration + 1}: x = {x:.4f}, f(x) = {objective_function(x):.4f}")
|
||||
|
||||
return x
|
||||
|
||||
# Führe den (1+1)-ES Algorithmus aus
|
||||
best_solution = one_plus_one_es()
|
||||
print(f"\nBeste gefundene Lösung: x = {best_solution:.4f}, f(x) = {objective_function(best_solution):.4f}")
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
import numpy as np
|
||||
from cmaes import CMA
|
||||
|
||||
# Rosenbrock Funktion
|
||||
def rosenbrock(x1, x2):
|
||||
return (1 - x1) ** 2 + 10 * (x2 - x1**2) ** 2
|
||||
|
||||
if __name__ == "__main__":
|
||||
optimizer = CMA(mean=np.zeros(2), sigma=1.3, population_size = 10)
|
||||
|
||||
# optimize over 50 generations
|
||||
for gen in range(50):
|
||||
solutions = []
|
||||
for _ in range(optimizer.population_size):
|
||||
x = optimizer.ask()
|
||||
val = rosenbrock(x[0], x[1])
|
||||
solutions.append((x, val))
|
||||
print(f"#{gen} {val} (x1={x[0]}, x2 = {x[1]})")
|
||||
optimizer.tell(solutions)
|
|
@ -0,0 +1,19 @@
|
|||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from sklearn.cluster import KMeans
|
||||
from sklearn.datasets import make_blobs
|
||||
|
||||
# Erzeuge 300 Datenpunkten und 4 Clustern
|
||||
data, _ = make_blobs(n_samples=300, centers=4, cluster_std=0.60, random_state=0)
|
||||
|
||||
# Wende K-Means Clustering an
|
||||
kmeans = KMeans(n_clusters=4)
|
||||
kmeans.fit(data)
|
||||
predicted_labels = kmeans.predict(data)
|
||||
|
||||
# Zeige die Cluster und Zentroide
|
||||
plt.scatter(data[:, 0], data[:, 1], c=predicted_labels, s=50, cmap='viridis')
|
||||
centers = kmeans.cluster_centers_
|
||||
plt.scatter(centers[:, 0], centers[:, 1], c='red', s=200, alpha=0.75, marker='X')
|
||||
plt.title("k-Means")
|
||||
plt.show()
|
|
@ -0,0 +1,18 @@
|
|||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from scipy.cluster.hierarchy import dendrogram, linkage
|
||||
from sklearn.datasets import make_blobs
|
||||
|
||||
# Erzeuge einen Beispieldatensatz
|
||||
data, _ = make_blobs(n_samples=100, centers=3, cluster_std=0.60, random_state=0)
|
||||
|
||||
# Anwendung des hierarchischen Clustering
|
||||
linked = linkage(data, 'single')
|
||||
|
||||
# Zeichnen des Dendrogramms
|
||||
plt.figure(figsize=(10, 7))
|
||||
dendrogram(linked)
|
||||
plt.title('Hierarchisches Clustering - Dendrogramm')
|
||||
plt.xlabel('Datenpunkte')
|
||||
plt.ylabel('Euklidische Entfernungen')
|
||||
plt.show()
|
|
@ -0,0 +1,16 @@
|
|||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from sklearn.cluster import DBSCAN
|
||||
from sklearn.datasets import make_moons
|
||||
|
||||
# Erzeuge einen Beispieldatensatz
|
||||
data, _ = make_moons(n_samples=300, noise=0.05, random_state=0)
|
||||
|
||||
# Anwendung von DBSCAN
|
||||
dbscan = DBSCAN(eps=0.3, min_samples=5)
|
||||
predicted_labels = dbscan.fit_predict(data)
|
||||
|
||||
# Visualisierung
|
||||
plt.scatter(data[:, 0], data[:, 1], c=predicted_labels, cmap='viridis')
|
||||
plt.title('DBSCAN-Clustering')
|
||||
plt.show()
|
|
@ -0,0 +1,57 @@
|
|||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import math
|
||||
|
||||
k = 1
|
||||
# Spiralen erzeugen
|
||||
spiral1 = np.zeros((97,2))
|
||||
spiral2 = np.zeros((97,2))
|
||||
proto = np.zeros((2*97,2))
|
||||
for i in range(97):
|
||||
phi = i/16 * math.pi
|
||||
r = (104 - i)/208
|
||||
# Klasse A (Array Index gerade)
|
||||
proto[2*i][0]=spiral1[i][0]=(r*math.cos(phi))+0.5
|
||||
proto[2*i][1]=spiral1[i][1]=(r*math.sin(phi))+0.5
|
||||
# Klasse B (Array Index ungerade)
|
||||
proto[2*i+1][0]=spiral2[i][0]=(-r*math.cos(phi))+0.5
|
||||
proto[2*i+1][1]=spiral2[i][1]=(-r*math.sin(phi))+0.5
|
||||
|
||||
test_pts = np.zeros((10000,2))
|
||||
i=0
|
||||
for y in np.arange(0.0 ,1.0 ,0.01):
|
||||
for x in np.arange(0.0 ,1.0 ,0.01):
|
||||
test_pts[i][0] = x
|
||||
test_pts[i][1] = y
|
||||
i+=1
|
||||
|
||||
knnA_x = np.array([])
|
||||
knnA_y = np.array([])
|
||||
knnB_x = np.array([])
|
||||
knnB_y = np.array([])
|
||||
|
||||
for i, point in enumerate(test_pts):
|
||||
count = 0
|
||||
distances=np.linalg.norm(proto[:,0:2]-point, axis=1)
|
||||
for j in range(k):
|
||||
bestIndex = np.argmin(distances)
|
||||
distances[bestIndex] = np.max(distances)
|
||||
if (bestIndex%2)==0: # Klasse A, Index gerade
|
||||
count+=1
|
||||
|
||||
if count>k/2: # Mehrheitsentscheidung
|
||||
knnA_x = np.append(knnA_x, test_pts[i][0])
|
||||
knnA_y = np.append(knnA_y, test_pts[i][1])
|
||||
else:
|
||||
knnB_x = np.append(knnB_x, test_pts[i][0])
|
||||
knnB_y = np.append(knnB_y, test_pts[i][1])
|
||||
|
||||
plt.scatter(knnA_x, knnA_y, c=[0.7,0.7,0.7], marker='.')
|
||||
plt.scatter(knnB_x, knnB_y, c=[0.3,0.3,0.3], marker='.')
|
||||
plt.scatter(spiral1[:,0], spiral1[:,1], c=[[1.,1.,1.]], marker='x')
|
||||
plt.scatter(spiral2[:,0], spiral2[:,1], c=[[0.,0.,0.]], marker='x')
|
||||
plt.title('K-Nächste-Nachbarn-Algorithmus mit k='+str(k))
|
||||
plt.xlabel('x-Achse')
|
||||
plt.ylabel('y-Achse')
|
||||
plt.show()
|
||||
|
|
@ -0,0 +1,73 @@
|
|||
import numpy as np
|
||||
from scipy.optimize import minimize
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
# Erstelle einige zweidimensionale Daten
|
||||
np.random.seed(0)
|
||||
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
|
||||
Y = [-1] * 20 + [1] * 20
|
||||
|
||||
# Trainiere eine lineare SVM
|
||||
n_samples = len(X)
|
||||
# P ist eine NxN Matrix mit xi.xj Produkt
|
||||
P = np.outer(Y,Y) * np.dot(X,X.T)
|
||||
|
||||
def objective(a):
|
||||
return 0.5 * np.dot(a, np.dot(a, P)) - np.sum(a)
|
||||
|
||||
def constraint(a):
|
||||
return np.dot(a, Y)
|
||||
|
||||
a0 = np.zeros(n_samples)
|
||||
bounds = [(0, None) for _ in range(n_samples)]
|
||||
constraints = {'type': 'eq', 'fun': constraint}
|
||||
|
||||
solution = minimize(objective, a0, bounds=bounds, constraints=constraints)
|
||||
|
||||
# Lagrange multiplikatoren
|
||||
a = np.ravel(solution.x)
|
||||
|
||||
# Support Vektoren haben nicht null lagrange multiplikatoren
|
||||
sv = a > 1e-5
|
||||
ind = np.arange(len(a))[sv]
|
||||
indices = np.where(sv)[0]
|
||||
a = a[indices]
|
||||
X_sv = X[indices]
|
||||
Y_sv = np.array(Y)[indices]
|
||||
|
||||
# Berechne den Bias
|
||||
b = 0
|
||||
for n in range(len(a)):
|
||||
b += Y_sv[n]
|
||||
b -= np.sum(a * Y_sv * np.dot(X_sv, X_sv[n]))
|
||||
b /= len(a)
|
||||
|
||||
# Berechne das Gewichtsvektor
|
||||
w = np.zeros(2)
|
||||
for n in range(len(a)):
|
||||
w += a[n] * Y_sv[n] * X_sv[n]
|
||||
|
||||
# Zeichne die Datenpunkte und die SVM Grenzlinie
|
||||
plt.figure(figsize=(10, 8))
|
||||
|
||||
# Zeichne die Datenpunkte als Kreuze und Kreise
|
||||
for (x1, x2), y in zip(X, Y):
|
||||
if y == -1:
|
||||
plt.scatter(x1, x2, c='b', marker='x') # Kreuze für Klasse -1
|
||||
else:
|
||||
plt.scatter(x1, x2, c='r', marker='o') # Kreise für Klasse 1
|
||||
|
||||
# Zeichne die SVM Grenzlinie
|
||||
ax = plt.gca()
|
||||
xlim = ax.get_xlim()
|
||||
ylim = ax.get_ylim()
|
||||
|
||||
xx = np.linspace(xlim[0], xlim[1], 30)
|
||||
yy = np.linspace(ylim[0], ylim[1], 30)
|
||||
YY, XX = np.meshgrid(yy, xx)
|
||||
xy = np.vstack([XX.ravel(), YY.ravel()]).T
|
||||
Z = np.dot(xy, w) + b
|
||||
Z = Z.reshape(XX.shape)
|
||||
ax.contour(XX, YY, Z, colors='k', levels=[-1, 0, 1], alpha=0.5, linestyles=['--', '-', '--'])
|
||||
plt.title("Support Vector Machine")
|
||||
plt.show()
|
|
@ -0,0 +1,36 @@
|
|||
from sklearn import svm
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from sklearn.datasets import make_circles
|
||||
|
||||
# Generiere nicht lineare Daten
|
||||
np.random.seed(0)
|
||||
X, Y = make_circles(n_samples=100, noise=0.05)
|
||||
|
||||
# Erstelle SVM-Modell mit RBF-Kernel
|
||||
model = svm.SVC(kernel='rbf', C=1.0, gamma='scale')
|
||||
|
||||
# Trainiere das Modell
|
||||
model.fit(X, Y)
|
||||
|
||||
# Zeichne die Datenpunkte als Kreuze und Kreise
|
||||
plt.scatter(X[Y==0, 0], X[Y==0, 1], c='blue', marker='x')
|
||||
plt.scatter(X[Y==1, 0], X[Y==1, 1], c='red', marker='o')
|
||||
|
||||
# Zeichne die Entscheidungsgrenze
|
||||
ax = plt.gca()
|
||||
xlim = ax.get_xlim()
|
||||
ylim = ax.get_ylim()
|
||||
|
||||
# Erstelle Gitter zum Auswerten des Modells
|
||||
xx = np.linspace(xlim[0], xlim[1], 30)
|
||||
yy = np.linspace(ylim[0], ylim[1], 30)
|
||||
YY, XX = np.meshgrid(yy, xx)
|
||||
xy = np.vstack([XX.ravel(), YY.ravel()]).T
|
||||
Z = model.decision_function(xy).reshape(XX.shape)
|
||||
|
||||
# Zeichne die Entscheidungsgrenze und die Margen
|
||||
ax.contour(XX, YY, Z, colors='k', levels=[-1, 0, 1], alpha=0.5, linestyles=['--', '-', '--'])
|
||||
|
||||
plt.title("Support Vector Machine mit RBF-Kernel")
|
||||
plt.show()
|
|
@ -0,0 +1,28 @@
|
|||
# Importiere die notwendigen Bibliotheken
|
||||
from sklearn.tree import DecisionTreeClassifier, plot_tree
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
# Erstelle ein Beispiel-Array (Tabelle) für Training
|
||||
data = [
|
||||
[20, 65, 10, 0, 1], # Schönes Wetter, wir spielen Fußball
|
||||
[25, 80, 5, 1, 0], # Es regnet, wir spielen nicht
|
||||
[18, 70, 15, 0, 1], # Noch gutes Wetter, wir spielen
|
||||
[10, 90, 20, 1, 0], # Kalt und regnerisch, wir spielen nicht
|
||||
[15, 85, 25, 0, 0], # Zu windig, wir spielen nicht
|
||||
[22, 60, 5, 0, 1], # Schönes Wetter, wir spielen
|
||||
[30, 50, 10, 0, 0], # Zu heiß, wir spielen nicht
|
||||
[20, 70, 8, 0, 1] # Schönes Wetter, wir spielen
|
||||
]
|
||||
|
||||
# Trenne die Merkmale (Features) und das Ziel (Target)
|
||||
X = [row[:-1] for row in data]
|
||||
y = [row[-1] for row in data]
|
||||
|
||||
# Erstelle und trainiere den Entscheidungsbaum
|
||||
tree_classifier = DecisionTreeClassifier()
|
||||
tree_classifier.fit(X, y)
|
||||
|
||||
# Visualisiere den Baum
|
||||
plt.figure(figsize=(10, 7))
|
||||
plot_tree(tree_classifier, filled=True, feature_names=['Temperatur', 'Luftfeuchtigkeit', 'Windgeschwindigkeit', 'Regen'], class_names=['Nicht spielen', 'Spielen'],fontsize=10)
|
||||
plt.show()
|
|
@ -0,0 +1,14 @@
|
|||
from sklearn.datasets import load_iris
|
||||
from sklearn.ensemble import RandomForestClassifier
|
||||
# Daten laden
|
||||
iris = load_iris ()
|
||||
X = iris.data
|
||||
y = iris.target
|
||||
# Random-Forest-Modell erstellen
|
||||
clf = RandomForestClassifier(n_estimators=100)
|
||||
# Modell trainieren
|
||||
clf.fit(X, y)
|
||||
# Vorhersage für einen neuen Datenpunkt
|
||||
sample_data = [[5.1, 3.5, 1.4, 0.2]]
|
||||
prediction = clf.predict(sample_data)
|
||||
print("Predicted class:", prediction)
|
|
@ -0,0 +1,38 @@
|
|||
from sklearn.feature_extraction.text import CountVectorizer
|
||||
from sklearn.naive_bayes import MultinomialNB
|
||||
|
||||
# Trainingsdaten
|
||||
spam_mails = [
|
||||
"Gewinnen Sie jetzt Geld!",
|
||||
"Klicken Sie hier für ein kostenloses iPhone!",
|
||||
"Exklusives Angebot nur für Sie!"
|
||||
]
|
||||
|
||||
normale_mails = [
|
||||
"Können wir uns morgen für ein Meeting treffen?",
|
||||
"Hier sind die Dokumente, die Sie angefordert haben.",
|
||||
"Vergessen Sie nicht, die Rechnung zu bezahlen."
|
||||
]
|
||||
|
||||
train_data = spam_mails + normale_mails
|
||||
train_labels = ['spam'] * 3 + ['nicht-spam'] * 3
|
||||
|
||||
# Testdaten
|
||||
test_data = [
|
||||
"Das Meeting wurde verschoben.",
|
||||
"Hier ist Ihr kostenloses Geschenk!",
|
||||
"Können wir das Meeting auf nächste Woche verschieben?"
|
||||
]
|
||||
|
||||
# Text in Vektor umwandeln
|
||||
vectorizer = CountVectorizer()
|
||||
X_train = vectorizer.fit_transform(train_data)
|
||||
|
||||
# Modell trainieren
|
||||
classifier = MultinomialNB()
|
||||
classifier.fit(X_train, train_labels)
|
||||
|
||||
# Testdaten klassifizieren
|
||||
X_test = vectorizer.transform(test_data)
|
||||
predictions = classifier.predict(X_test)
|
||||
print(predictions)
|
|
@ -0,0 +1,10 @@
|
|||
import numpy as np
|
||||
from sklearn.linear_model import Lasso
|
||||
from sklearn.datasets import make_regression
|
||||
# Erzeugen von Daten
|
||||
X, y = make_regression(n_samples=200, n_features=10, noise =0.5, random_state=42)
|
||||
# Lasso Regression mit einem Regularisierungsparameter von 1.0
|
||||
lasso = Lasso(alpha=1.0)
|
||||
lasso.fit(X, y)
|
||||
# Ausgabe der Koeffizienten
|
||||
print(lasso.coef_)
|
|
@ -0,0 +1,13 @@
|
|||
import numpy as np
|
||||
from sklearn.linear_model import Ridge
|
||||
from sklearn.datasets import make_regression
|
||||
|
||||
# Erzeugen von Daten
|
||||
X, y = make_regression(n_samples=200, n_features=10, noise=0.5, random_state=42)
|
||||
|
||||
# Ridge-Regression mit Regularisierungsparameter von 1.0
|
||||
ridge = Ridge(alpha=1.0)
|
||||
ridge.fit(X, y)
|
||||
|
||||
# Ausgabe der Koeffizienten
|
||||
print(ridge.coef_)
|
|
@ -0,0 +1,26 @@
|
|||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from sklearn.preprocessing import PolynomialFeatures
|
||||
from sklearn.linear_model import LinearRegression
|
||||
|
||||
# Erzeugen von Daten
|
||||
np.random.seed(0)
|
||||
X = np.sort(5 * np.random.rand(80, 1), axis=0)
|
||||
y = np.sin(X).ravel() + np.random.randn(80) * 0.1
|
||||
|
||||
# Umwandlung der Daten für ein Polynom zweiten Grades
|
||||
polynomial_features = PolynomialFeatures(degree=2)
|
||||
X_poly = polynomial_features.fit_transform(X)
|
||||
|
||||
# Polynomiale Regression
|
||||
regressor = LinearRegression()
|
||||
regressor.fit(X_poly, y)
|
||||
|
||||
# Vorhersagen und Plotten
|
||||
y_pred = regressor.predict(X_poly)
|
||||
plt.scatter(X, y, color='blue')
|
||||
plt.plot(X, y_pred, color='red')
|
||||
plt.title("Polynomiale Regression")
|
||||
plt.xlabel("X")
|
||||
plt.ylabel("y")
|
||||
plt.show()
|
|
@ -0,0 +1,34 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(\"hello\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "base",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
import matplotlib.pyplot as plt
|
||||
from mpl_toolkits.mplot3d import Axes3D
|
||||
import numpy as np
|
||||
# BIAS,x,y
|
||||
train = np.array( [[1,0,0],
|
||||
[1,1,0],
|
||||
[1,0,1],
|
||||
[1,1,1]])
|
||||
target = np.array([0,0,0,1]) # AND Operation
|
||||
out = np.array([0,0,0,0])
|
||||
weight = np.random.rand(3)*(0.5)
|
||||
learnrate = 1.0
|
||||
grad = np.zeros(3)
|
||||
|
||||
def sigmoid(summe): # Transferfunktion
|
||||
return 1.0/(1.0+np.exp(-1.0*summe))
|
||||
|
||||
def learn():
|
||||
#TODO implement here
|
||||
global train, weight, out, target, learnrate
|
||||
# Neuronenausgabe für alle 4 Trainingsmuster berechnen
|
||||
|
||||
def perceptron(output):
|
||||
return max(np.sign(output), 0)
|
||||
|
||||
vperceptron = np.vectorize(perceptron)
|
||||
|
||||
out = vperceptron(np.matmul(train, weight))
|
||||
|
||||
for j in range(4):
|
||||
for i in range(3):
|
||||
if train[j][i] == 1 and out[j] == 0 and target[j] == 1:
|
||||
weight[i] = weight[i] + train[j][i]
|
||||
elif train[j][i] == 1 and out[j] == 1 and target[j] == 0:
|
||||
weight[i] = weight[i] - train[j][i]
|
||||
|
||||
|
||||
def outp(N=100): # Daten für die Ausgabefunktion generieren
|
||||
global weight
|
||||
x = np.linspace(0, 1, N)
|
||||
y = np.linspace(0, 1, N)
|
||||
xx, yy = np.meshgrid(x, y)
|
||||
oo = sigmoid(weight[0] + weight[1]*xx + weight[2]*yy)
|
||||
return xx, yy, oo
|
||||
|
||||
def on_close(event): # Fenster schließen
|
||||
exit(0)
|
||||
|
||||
plt.ion()
|
||||
fig = plt.figure()
|
||||
fig.canvas.mpl_connect('close_event', on_close)
|
||||
while True: # Endlosschleife
|
||||
#for i in range(1000):
|
||||
learn() # lerne einen Schritt
|
||||
plt.clf() # Bildschirm löschen
|
||||
X, Y, Z = outp() # generiere Plotdaten
|
||||
ax = fig.add_subplot(111, projection='3d')
|
||||
# 3D plot von den Daten
|
||||
ax.plot_surface(X, Y, Z, edgecolor='royalblue',
|
||||
lw=0.5, rstride=8, cstride=8, alpha=0.3)
|
||||
ax.set_title('Neuron lernt AND-Funktion')
|
||||
ax.set_xlabel('In[1]')
|
||||
ax.set_ylabel('In[2]')
|
||||
ax.set_zlabel('Ausgabe\ndes Neurons')
|
||||
ax.set_zlim(0, 1)
|
||||
plt.draw()
|
||||
plt.pause(0.00001)
|
Loading…
Reference in New Issue