About
import numpy as np class MLP: def __init__(self, input_size, hidden_size, output_size, learning_rate): self.input_size = input_size self.hidden_size = hidden_size self.output_size = output_size self.learning_rate = learning_rate
# Initialize weights and biases for hidden and output layers
self.hidden_weights = np.array([[1.2,-0.8,0.3],[0.5,1.1,1.9],[0.6,0.1,0.8],[-1.3,0.5,-1.2]])
self.hidden_bias = np.array([0.7,0.8,-0.5,-0.7])
self.output_weights = np.array([-1.2,-0.4,0.3,-2.0])
self.output_bias = np.array([-1.0])
def forward(self, X):
# Compute hidden layer output
for i in range(self.input_size):
self.hidden_output = np.sum(np.dot(X, self.hidden_weights[i])) + self.hidden_bias[i]
self.hidden_activation = self.sigmoid(self.hidden_output)
# Compute output layer output
self.output = np.dot(self.hidden_activation, self.output_weights) + self.output_bias
self.output_activation = self.sigmoid(self.output)
return self.output_activation
def backward(self, X, y, output):
# Compute error and delta for output layer
error = y - output
output_delta = error * self.sigmoid_derivative(output)
# Compute error and delta for hidden layer
hidden_error = np.dot(output_delta, self.output_weights)
hidden_delta = hidden_error * self.sigmoid_derivative(self.hidden_activation)
# Update weights and biases for output and hidden layers
self.output_weights += self.learning_rate * np.dot(self.hidden_activation, output_delta)
self.output_bias += self.learning_rate * np.sum(output_delta, axis=0)
self.hidden_weights += self.learning_rate * np.dot(X, hidden_delta)
self.hidden_bias += self.learning_rate * np.sum(hidden_delta, axis=0)
print("Updated values :")
print("Output weights : \n",self.output_weights)
print("Output bias : \n",self.output_bias)
print("Hidden weights \n: ",self.hidden_weights)
print("Hidden bias : \n",self.hidden_bias)
def train(self, X, y, epochs): for i in range(epochs):
# Forward pass
output = self.forward(X)
# Backward pass
self.backward(X, y, output)
loss = np.mean(np.square(y - output))
print(f"Epoch {i}, Loss: {loss:.4f}")
print()
def predict(self, X): return self.forward(X) def sigmoid(self, x): return 1 / (1 + np.exp(-x)) def sigmoid_derivative(self, x): return x * (1 - x)
Generate random input and output data
X = np.array([-0.3,-0.4,0.6]) y = np.array([0.3])
Create MLP with input size of 3, hidden size of 4, output size of 1, and learning rate of 0.1
mlp = MLP(3, 4, 1, 0.1)
Train MLP on input and output data for 10 epochs
mlp.train(X, y, 10)
Activation functions
import numpy as np import matplotlib.pyplot as plt arr=np.arange(-10,9) linear_output = arr print(plt.plot(arr, linear_output, label='Linear'))
sigmoid_output = 1/(1+np.exp(-arr)) print(plt.plot(arr,sigmoid_output,label='Sigmoid'))
tanh_output = np.tanh(arr) plt.plot(arr,tanh_output,label='Tanh')
relu_output = np.maximum(arr, 0 ) plt.plot(arr,relu_output,label='ReLu')
leaky_relu_output = np.where(arr>0,arr,arr*0.1) plt.plot(arr,leaky_relu_output,label='Leaky_Relu')
elu_output = np.where(arr>0,arr,np.exp(arr)-1) plt.plot(arr,elu_output,label='ELU')
Loss functions
import numpy as np actual_outputs = np.random.normal(loc=0,scale=1,size=20) predicted_outputs = np.random.normal(loc=0,scale=1,size=20) def mae_loss(actual_outputs,predicted_outputs): return np.mean(np.abs(actual_outputs-predicted_outputs)) def mse_loss(actual_outputs,predicted_outputs): return np.mean((actual_outputs-predicted_outputs)2) def huberLoss(actual_outputs,predicted_outputs,delta=1): diff=np.abs(actual_outputs-predicted_outputs) huber_loss=np.where(diff<delta,0.5*diff2,delta(diff-0.5delta)) return np.mean(huber_loss) print("MAE Distance :",mae_loss(actual_outputs,predicted_outputs)) print("MSE Distance :",mse_loss(actual_outputs,predicted_outputs)) print("huberLoss Distance :",huberLoss(actual_outputs,predicted_outputs))