About
import tensorflow as tf from tensorflow.keras import layers, models, optimizers, regularizers from tensorflow.keras.utils import to_categorical from tensorflow.keras.datasets import cifar100 import matplotlib.pyplot as plt (x_train, y_train), (x_val, y_val) = cifar100.load_data() x_train , x_val = x_train/255.0, x_val/255.0 y_train, y_val = to_categorical(y_train,100), to_categorical(y_val,100) x_train.shape y_train.shape x_train = x_train.reshape((x_train.shape[0], 32, 32 3)) x_val = x_val.reshape((x_val.shape[0], 32, 32 3)) model = models.Sequential() model.add(layers.Conv2D(32,(3,3), activation='relu',input_shape=(32,32,3))) model.add(layers.MaxPooling2D(2,2)) model.add(layers.Flatten()) model.add(layers.Dense(100, activation = 'softmax')) model.summary() model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy']) a = model.fit(x_train, y_train, epochs=1, validation_data=(x_val,y_val), verbose=2) print("Training Accuracy:", a.history['accuracy']) print("Validation Accuracy:", a.history['val_accuracy'])
Print training and validation loss
print("Training Loss:", a.history['loss']) print("Validation Loss:", a.history['val_loss']) plt.plot(a.history['accuracy'], label='accuracy') plt.plot(a.history['val_accuracy'], label='val_accuracy') plt.xlabel('epochs') plt.ylabel('accuracy') plt.legend() plt.show() plt.plot(a.history['loss'], label='loss') plt.plot(a.history['val_loss'], label='val_loss') plt.xlabel('epochs') plt.ylabel('loss') plt.legend() plt.show() import numpy as np
y_pred = model.predict(x_val) y_pred_classes = np.argmax(y_pred, axis=1) y_true_classes = np.argmax(y_val, axis=1)
Calculate the confusion matrix
cm = confusion_matrix(y_true_classes, y_pred_classes)
Print the confusion matrix
print("Confusion Matrix:") print(cm)
Sample images: import tensorflow as tf import matplotlib.pyplot as plt
Load MNIST dataset
(x_train, y_train), (_, _) = tf.keras.datasets.mnist.load_data()
Display sample images
num_samples = 5 plt.figure(figsize=(10, 2)) for i in range(num_samples): plt.subplot(1, num_samples, i + 1) plt.imshow(x_train[i], cmap='gray') # Use 'gray' colormap for grayscale images plt.title(str(y_train[i])) plt.axis('off')
plt.show()
import tensorflow as tf import matplotlib.pyplot as plt
Load CIFAR-100 dataset
(x_train, y_train), (_, _) = tf.keras.datasets.cifar100.load_data(label_mode='fine')
Display sample images
num_samples = 5 plt.figure(figsize=(10, 2)) for i in range(num_samples): plt.subplot(1, num_samples, i + 1) plt.imshow(x_train[i]) plt.title(str(y_train[i])) plt.axis('off')
plt.show()
import tensorflow as tf import matplotlib.pyplot as plt
Load CIFAR-10 dataset
(x_train, y_train), (_, _) = tf.keras.datasets.cifar10.load_data()
Define class labels
class_labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
Display sample images
num_samples = 5 plt.figure(figsize=(10, 2)) for i in range(num_samples): plt.subplot(1, num_samples, i + 1) plt.imshow(x_train[i]) plt.title(class_labels[int(y_train[i])]) plt.axis('off')
plt.show()
import tensorflow as tf import matplotlib.pyplot as plt
Load MNIST dataset
(x_train, y_train), (_, _) = tf.keras.datasets.fashion_mnist.load_data()
Display sample images
num_samples = 5 plt.figure(figsize=(10, 2)) for i in range(num_samples): plt.subplot(1, num_samples, i + 1) plt.imshow(x_train[i], cmap='gray') # Use 'gray' colormap for grayscale images plt.title(str(y_train[i])) plt.axis('off')
plt.show()
Confusion Matrix: import numpy as np import sklearn.metrics import matplotlib.pyplot as plt from sklearn.metrics import classification_report
Evaluation metrics
test_loss, test_accuracy = model.evaluate(x_train, y_train) print('Test Loss:', test_loss) print('Test Accuracy:', test_accuracy)
predictions = model.predict(x_train) predicted_labels = np.argmax(predictions, axis=1) true_labels = np.argmax(y_train, axis=1)
confusion_matrix = tf.math.confusion_matrix(true_labels, predicted_labels) print('Confusion Matrix:') print(confusion_matrix)
precision = sklearn.metrics.precision_score(true_labels, predicted_labels, average='weighted') recall = sklearn.metrics.recall_score(true_labels, predicted_labels, average='weighted') f1_score = sklearn.metrics.f1_score(true_labels, predicted_labels, average='weighted')
print('Precision:', precision) print('Recall:', recall) print('F1 Score:', f1_score)
report = classification_report(true_labels, predicted_labels) print('Classification Report:') print(report)
Data Augmentation: import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Flatten, Dropout from tensorflow.keras.datasets import mnist from tensorflow.keras.preprocessing.image import ImageDataGenerator import numpy as np import sklearn.metrics import matplotlib.pyplot as plt
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images, test_images = train_images / 255.0, test_images / 255.0
train_images = train_images[..., tf.newaxis] test_images = test_images[..., tf.newaxis]
data_augmentation_model = models.Sequential()
Add Convolutional layers
data_augmentation_model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1))) # Change input_shape based on your data data_augmentation_model.add(layers.MaxPooling2D((2, 2)))
data_augmentation_model.add(layers.Conv2D(64, (3, 3), activation='relu')) data_augmentation_model.add(layers.MaxPooling2D((2, 2)))
data_augmentation_model.add(layers.Conv2D(128, (3, 3), activation='relu')) data_augmentation_model.add(layers.MaxPooling2D((2, 2)))
Flatten the output and add Dense layers
data_augmentation_model.add(layers.Flatten()) data_augmentation_model.add(layers.Dense(128, activation='relu')) data_augmentation_model.add(layers.Dense(10, activation='softmax')) # Change the output size based on your problem
Compile the data augmentation model
data_augmentation_model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
Define data augmentation parameters using ImageDataGenerator
datagen = ImageDataGenerator( rotation_range=20, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True )
Fit the data augmentation model on the original training data
You can adjust batch_size and epochs as needed
history1 = data_augmentation_model.fit(datagen.flow(train_images, train_labels, batch_size=32), epochs=5,validation_data=(test_images, test_labels))
Evaluation metrics
test_loss, test_accuracy = data_augmentation_model.evaluate(test_images, test_labels) print('Test Loss:', test_loss) print('Test Accuracy:', test_accuracy)
Predictions and Metrics
predictions = data_augmentation_model.predict(test_images) predicted_labels = np.argmax(predictions, axis=1)
Note: test_labels contains the actual class labels directly, so there's no need for np.argmax here
true_labels = test_labels
confusion_matrix = tf.math.confusion_matrix(true_labels, predicted_labels) print('Confusion Matrix:') print(confusion_matrix)
precision = sklearn.metrics.precision_score(true_labels, predicted_labels, average='weighted') recall = sklearn.metrics.recall_score(true_labels, predicted_labels, average='weighted') f1_score = sklearn.metrics.f1_score(true_labels, predicted_labels, average='weighted')
print('Precision:', precision) print('Recall:', recall) print('F1 Score:', f1_score)
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1) plt.plot(history1.history['accuracy']) plt.plot(history1.history['val_accuracy']) plt.title('Model Accuracy') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend(['Train', 'Validation'], loc='upper left')
plt.subplot(1, 2, 2) plt.plot(history1.history['loss']) plt.plot(history1.history['val_loss']) plt.title('Model Loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend(['Train', 'Validation'], loc='upper left')