About
CNN import tensorflow as tf from tensorflow.keras import datasets, layers, models import matplotlib.pyplot as plt import numpy as np from tensorflow import keras from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
(x_train, y_train), (x_test, y_test) = datasets.cifar100.load_data()
x_train = x_train.astype('float32') / 255.0 x_test = x_test.astype('float32') / 255.0
X_train.shape
sample_images = X_train[:10] sample_labels = y_train[:10] fig, axs = plt.subplots(5, 2, figsize=(8, 12)) fig.tight_layout() for i, ax in enumerate(axs.flat): ax.imshow(sample_images[i]) plt.show()
model = Sequential() model.add(Conv2D(64, (3, 3), activation='relu', input_shape=(32, 32, 3))) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dense(100, activation='softmax')) model.summary() model.compile(optimizer='adagrad', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
epochs = 12 batch_size = 128 validation_split = 0.2
history = model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, validation_split=validation_split)
plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title('Model Accuracy') plt.legend(['Train', 'Validation'], loc='upper left') plt.show()
plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Model Loss') plt.legend(['Train', 'Validation'], loc='upper right') plt.show()
scores = model.evaluate(x_test, y_test) print('Test loss:', scores[0]) print('Test accuracy:', scores[1])
RNN import numpy as np from tensorflow.keras.datasets import imdb from tensorflow.keras.layers import Embedding, LSTM, Dense from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing import sequence import matplotlib.pyplot as plt
maxw = 5000 (x_train,y_train), (x_test, y_test) = imdb.load_data(num_words = maxw) maxl = 100 x_train = sequence.pad_sequences(x_train, maxlen= maxl) x_test = sequence.pad_sequences(x_test, maxlen=maxl) model = Sequential() model.add(Embedding(maxw,32, input_length = maxl)) model.add(LSTM(100)) model.add(Dense(1, activation='sigmoid')) print(model.summary()) model.compile(loss="binary_crossentropy", optimizer= 'adam', metrics=['accuracy']) batch_size = 64 epochs = 10 valid_split = 0.2 history = model.fit(x_train, y_train, batch_size = batch_size, epochs = epochs, validation_split= valid_split)
AE import numpy as np import matplotlib.pyplot as plt from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Flatten, Reshape, Input from tensorflow.keras.datasets import fashion_mnist (x_train, y_train) , (x_test, y_test) = fashion_mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 model = Sequential() model.add(Input(shape=(28,28,1))) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dense(128, activation='relu')) model.add(Dense(256, activation='elu')) model.add(Dense(784, activation='elu')) model.add(Reshape((28,28,1))) model.summary()
input_img = keras.Input(shape=(28,28,1)) encoded = layers.Flatten()(input_img) encoded = layers.Dense(512, activation='elu')(encoded) encoded = layers.Dense(256, activation='elu')(encoded) bottleneck = layers.Dense(128, activation='relu')(encoded) decoded = layers.Dense(256, activation='sigmoid')(bottleneck) decoded = layers.Dense(512, activation='sigmoid')(bottleneck) output = layers.Dense(784, activation='sigmoid')(decoded) output = layers.Reshape((28,28,1))(output)
autoencoder = keras.Model(input_img, output) autoencoder.compile(optimizer='adam', loss='binary_crossentropy') autoencoder.summary()
autoencoder.fit(x_train, x_train,epochs=2,batch_size=128,shuffle=True,validation_data=(x_test, x_test)) loss = autoencoder.evaluate(x_test, x_test) print('Test loss:', loss)
loss = autoencoder.evaluate(x_test, x_test) print('Test loss:', loss)
for i in range(10): sample_image = x_test[i] reconstructed_image = autoencoder.predict(sample_image.reshape((1,28, 28, 1))) fig, axes = plt.subplots(1, 2, figsize=(8, 4)) axes[0].imshow(sample_image.reshape((28, 28))) axes[0].set_title('Original Image') axes[1].imshow(reconstructed_image.reshape((28, 28))) axes[1].set_title('Reconstructed Image') plt.show()
MLP
iris_df = pd.DataFrame(iris.data, columns=iris.feature_names) iris_df['target'] = iris.target
Training the Model
from sklearn.neural_network import MLPClassifier
mlp = MLPClassifier(hidden_layer_sizes=(20, 15, 10), max_iter=1000, activation='relu', solver='adam')
mlp.fit(train_data, train_labels) from sklearn.metrics import accuracy_score
predictions_train = mlp.predict(train_data) print(accuracy_score(predictions_train, train_labels))
predictions_test = mlp.predict(test_data) print(accuracy_score(predictions_test, test_labels))