About
import numpy as np
class MultilayerPerceptron: def __init__(self, input_size, hidden_sizes, output_size): self.input_size = input_size self.hidden_sizes = hidden_sizes self.output_size = output_size self.weights = [] self.biases = []
# Initialize weights and biases for each layer
sizes = [input_size] + hidden_sizes + [output_size]
for i in range(len(sizes) - 1):
weight_matrix = np.random.randn(sizes[i], sizes[i+1])
bias_vector = np.random.randn(sizes[i+1])
self.weights.append(weight_matrix)
self.biases.append(bias_vector)
def forward(self, x):
# Apply feedforward propagation
activations = [x]
for i in range(len(self.weights)):
x = self.activation(np.dot(x, self.weights[i]) + self.biases[i])
activations.append(x)
return activations
def activation(self, x):
# Sigmoid activation function
return 1 / (1 + np.exp(-x))
def step_function(self, x):
# Step function
return np.where(x >= 0, 1, 0)
def sign_function(self, x):
# Sign function
return np.where(x >= 0, 1, -1)
mlp = MultilayerPerceptron(input_size=3, hidden_sizes=[4, 3], output_size=1)
def demonstrate_logical_function(logical_function): inputs = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]])
print(f"Logical Function: {logical_function.__name__}\n")
for input_data in inputs:
activations = mlp.forward(input_data)
output = activations[-1][0]
print(f"Input: {input_data}, Output: {logical_function(output)}")
print()
demonstrate_logical_function(mlp.step_function) demonstrate_logical_function(np.sign) demonstrate_logical_function(mlp.activation)