About
AE
-- coding: utf-8 --
"""AE1.ipynb
Automatically generated by Colaboratory.
Original file is located at https://colab.research.google.com/drive/1PTUrfACLskfbIOF8AeZot1vRrp7SkTy5 """
import numpy as np from tensorflow import keras from tensorflow.keras import layers import matplotlib.pyplot as plt
(x_train, _), (x_test, _) = keras.datasets.mnist.load_data()
Normalize and reshape the input data
x_train = x_train / 255.0 x_test = x_test / 255.0
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1))
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1))
from keras.layers import Flatten, Dense, Reshape
input_img = keras.Input(shape=(28, 28, 1)) encoded = Flatten()(input_img) encoded = Dense(64, activation='relu')(encoded) decoded = Dense(784, activation='sigmoid')(encoded) decoded = Reshape((28, 28, 1))(decoded)
autoencoder = keras.Model(input_img, decoded) autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
autoencoder.summary()
from tensorflow.keras.utils import plot_model plot_model(autoencoder, to_file='model_visualization.png', show_shapes=True)
autoencoder.fit(x_train, x_train, epochs=10, batch_size=128, shuffle=True, validation_data=(x_test, x_test))
Test the autoencoder
decoded_imgs = autoencoder.predict(x_test)
Display some results
n = 10 # Number of digits to display plt.figure(figsize=(20, 4)) for i in range(n):
Original images
ax = plt.subplot(2, n, i + 1) plt.imshow(x_test[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False)
Decoded images
ax = plt.subplot(2, n, i + 1 + n) plt.imshow(decoded_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show()
RNN
-- coding: utf-8 --
"""RNN.ipynb
Automatically generated by Colaboratory.
Original file is located at https://colab.research.google.com/drive/1RYw2R5oyRY1Dl1dQEybYpT5GhtJSra2z """
import numpy as np from tensorflow.keras.datasets import imdb from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Embedding, LSTM, Dense from tensorflow.keras.preprocessing import sequence
Set random seed for reproducibility
np.random.seed(42)
Set the maximum number of words to be used (vocabulary size)
max_words = 5000
Load the IMDB dataset
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_words)
Pad sequences to a fixed length
max_length = 100 x_train = sequence.pad_sequences(x_train, maxlen=max_length) x_test = sequence.pad_sequences(x_test, maxlen=max_length)
Create the RNN model
model = Sequential() model.add(Embedding(max_words, 32, input_length=max_length)) model.add(LSTM(100)) model.add(Dense(1, activation='sigmoid'))
Compile the model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
Print the model summary
print(model.summary())
Train the model
batch_size = 64 epochs = 5 model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test))
Evaluate the model
scores = model.evaluate(x_test, y_test, verbose=0) print('Test loss:', scores[0]) print('Test accuracy:', scores[1])
CNN
-- coding: utf-8 --
"""CNN1.ipynb
Automatically generated by Colaboratory.
Original file is located at https://colab.research.google.com/drive/133p22NryABJ8evbwD4GFIjP0JqgcjvGY """
import tensorflow as tf from tensorflow.keras import datasets, layers, models import matplotlib.pyplot as plt
(train_images, train_labels), (test_images, test_labels) = datasets.cifar100.load_data()
train_images, test_images = train_images/255.0, test_images/255.0
for i in range(25): plt.subplot(5,5,i+1) plt.imshow(train_images[i])
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
model = models.Sequential() model.add(Conv2D(64,(3,3), activation = 'relu', input_shape=(32,32,3))) model.add(MaxPooling2D((2,2))) model.add(Conv2D(128,(3,3), activation = 'relu'))
model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dense(100, activation='relu'))
model.summary()
from keras.utils import custom_object_scope
model.compile(optimizer='adagrad', loss='sparse_categorical_crossentropy', metrics=['accuracy']) history = model.fit(train_images, train_labels, epochs=3, batch_size=128, validation_split=0.2)
plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy'])
test_loss, test_accuracy = model.evaluate(test_images, test_labels) print("accuracy: "+test_accuracy) print("loss: "+test_loss)
history2 = model.fit(train_images, train_labels, epochs=3, batch_size=256, validation_split=0.2)
plt.plot(history2.history['accuracy']) plt.plot(history2.history['val_accuracy'])
test_loss2, test_accuracy2 = model.evaluate(test_images, test_labels) print("accuracy: "+test_accuracy2) print("loss: "+test_loss2)