MLE/06_mul_func_nn/mul_func.py

70 lines
1.5 KiB
Python

import numpy as np
import random
import json
import math
import os
ETA = 0.95
# Load training data from JSON files
base_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
with open(os.path.join(base_path, 'training_input.json'), 'r') as f:
training_input = np.array(json.load(f))
with open(os.path.join(base_path, 'desired_output.json'), 'r') as f:
desired_output = np.array(json.load(f))
def sigmoid_func(z):
return 1 / (1 + math.exp(-z))
def relu(z):
return max(0, z)
def weight_loss_derivative(aL, aLm1, zL, y):
sigmoid = 1 if zL > 0 else sigmoid = 0
return aLm1 * sigmoid * 2 * (aL - y)
def bias_loss_derivative(a, y):
return (a - y) * a * (1 - a)
sigmoid = relu
w = np.array(
np.random.uniform(-1.0, 1.0, 8),
np.random.uniform(-1.0, 1.0, 4)
)
b = np.array(
np.random.uniform(-0.1, 0.1, 4),
random.uniform(-0.1, 0.1)
)
a = np.array(
[],
[]
)
c = np.array([])
for val_pair in training_input:
for idx, i in enumerate(range(0, len(w[1]), 2)): # i steps by 2, idx steps by 1
z = w[1][i] * val_pair[1] * w[1][i + 1] * val_pair[2] + b[1][idx]
np.append(a[1], sigmoid(z))
for j in range(len(w[2])):
z = w[2][j] * a[1][j] + b[2]
np.append(a[2], z)
output = np.sum(a[2])
print(output)
# calc freakin cost function
print(training_input.index[val_pair])
np.append(c, (output - desired_output[training_input.index[val_pair]]) ** 2 / 2)
print(w)