← Back

Program 15: Perform Image classification for a given dataset using CNN. You may use Tensorflow/Keras.

Simple Python Code
# Dataset: CIFAR-10 (60,000 colour images in 10 classes)

import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.datasets import cifar10
import matplotlib.pyplot as plt

# ── 1. Load and Explore the Dataset ──────────────────────────────────
(x_train, y_train), (x_test, y_test) = cifar10.load_data()

class_names = ['Airplane', 'Automobile', 'Bird', 'Cat', 'Deer',
               'Dog', 'Frog', 'Horse', 'Ship', 'Truck']

print("Training set shape  :", x_train.shape)   # (50000, 32, 32, 3)
print("Test set shape      :", x_test.shape)     # (10000, 32, 32, 3)
print("Number of classes   :", len(class_names))

# ── 2. Visualize Sample Images ───────────────────────────────────────
plt.figure(figsize=(10, 4))
for i in range(10):
    plt.subplot(2, 5, i + 1)
    plt.imshow(x_train[i])
    plt.title(class_names[y_train[i][0]])
    plt.axis('off')
plt.suptitle("Sample Training Images")
plt.tight_layout()
plt.show()

# ── 3. Preprocess the Data ───────────────────────────────────────────
x_train = x_train.astype('float32') / 255.0
x_test  = x_test.astype('float32') / 255.0

# ── 4. Build the CNN Model ───────────────────────────────────────────
model = keras.Sequential([
    # Block 1
    layers.Conv2D(32, (3, 3), activation='relu', padding='same',
                  input_shape=(32, 32, 3)),
    layers.BatchNormalization(),
    layers.Conv2D(32, (3, 3), activation='relu', padding='same'),
    layers.BatchNormalization(),
    layers.MaxPooling2D((2, 2)),
    layers.Dropout(0.25),

    # Block 2
    layers.Conv2D(64, (3, 3), activation='relu', padding='same'),
    layers.BatchNormalization(),
    layers.Conv2D(64, (3, 3), activation='relu', padding='same'),
    layers.BatchNormalization(),
    layers.MaxPooling2D((2, 2)),
    layers.Dropout(0.25),

    # Classifier Head
    layers.Flatten(),
    layers.Dense(128, activation='relu'),
    layers.BatchNormalization(),
    layers.Dropout(0.5),
    layers.Dense(10, activation='softmax')
])

model.summary()

# ── 5. Compile the Model ─────────────────────────────────────────────
model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

# ── 6. Train the Model ───────────────────────────────────────────────
history = model.fit(x_train, y_train,
                    epochs=10,
                    batch_size=64,
                    validation_split=0.1)

# ── 7. Evaluate on Test Set ──────────────────────────────────────────
test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2)
print(f"\nTest Loss     : {test_loss:.4f}")
print(f"Test Accuracy : {test_acc:.4f}")

# ── 8. Plot Training History ─────────────────────────────────────────
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))

ax1.plot(history.history['accuracy'],   label='Train')
ax1.plot(history.history['val_accuracy'], label='Validation')
ax1.set_title('Model Accuracy')
ax1.set_xlabel('Epoch')
ax1.set_ylabel('Accuracy')
ax1.legend()

ax2.plot(history.history['loss'],     label='Train')
ax2.plot(history.history['val_loss'], label='Validation')
ax2.set_title('Model Loss')
ax2.set_xlabel('Epoch')
ax2.set_ylabel('Loss')
ax2.legend()

plt.tight_layout()
plt.show()

# ── 9. Predict and Display Results ───────────────────────────────────
predictions = model.predict(x_test[:10])
pred_classes = np.argmax(predictions, axis=1)
true_classes = y_test[:10].flatten()

plt.figure(figsize=(12, 4))
for i in range(10):
    plt.subplot(2, 5, i + 1)
    plt.imshow(x_test[i])
    color = 'green' if pred_classes[i] == true_classes[i] else 'red'
    plt.title(f"P: {class_names[pred_classes[i]]}\n"
              f"T: {class_names[true_classes[i]]}", color=color, fontsize=8)
    plt.axis('off')
plt.suptitle("Predictions (Green = Correct, Red = Wrong)")
plt.tight_layout()
plt.show()

print("\nPredicted :", [class_names[c] for c in pred_classes])
print("Actual    :", [class_names[c] for c in true_classes])
Advanced Python Code
# Dataset: CIFAR-10 (60,000 colour images in 10 classes)

import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.datasets import cifar10
import matplotlib.pyplot as plt

# ── 1. Load and Explore the Dataset ──────────────────────────────────
(x_train, y_train), (x_test, y_test) = cifar10.load_data()

class_names = ['Airplane', 'Automobile', 'Bird', 'Cat', 'Deer',
               'Dog', 'Frog', 'Horse', 'Ship', 'Truck']

print("Training set shape  :", x_train.shape)   # (50000, 32, 32, 3)
print("Test set shape      :", x_test.shape)     # (10000, 32, 32, 3)
print("Number of classes   :", len(class_names))

# ── 2. Visualize Sample Images ───────────────────────────────────────
plt.figure(figsize=(10, 4))
for i in range(10):
    plt.subplot(2, 5, i + 1)
    plt.imshow(x_train[i])
    plt.title(class_names[y_train[i][0]])
    plt.axis('off')
plt.suptitle("Sample Training Images")
plt.tight_layout()
plt.show()

# ── 3. Preprocess the Data ───────────────────────────────────────────
x_train = x_train.astype('float32') / 255.0
x_test  = x_test.astype('float32') / 255.0

# ── 4. Build the CNN Model ───────────────────────────────────────────
model = keras.Sequential([
    # Block 1
    layers.Conv2D(32, (3, 3), activation='relu', padding='same',
                  input_shape=(32, 32, 3)),
    layers.BatchNormalization(),
    layers.Conv2D(32, (3, 3), activation='relu', padding='same'),
    layers.BatchNormalization(),
    layers.MaxPooling2D((2, 2)),
    layers.Dropout(0.25),

    # Block 2
    layers.Conv2D(64, (3, 3), activation='relu', padding='same'),
    layers.BatchNormalization(),
    layers.Conv2D(64, (3, 3), activation='relu', padding='same'),
    layers.BatchNormalization(),
    layers.MaxPooling2D((2, 2)),
    layers.Dropout(0.25),

    # Classifier Head
    layers.Flatten(),
    layers.Dense(128, activation='relu'),
    layers.BatchNormalization(),
    layers.Dropout(0.5),
    layers.Dense(10, activation='softmax')
])

model.summary()

# ── 5. Compile the Model ─────────────────────────────────────────────
model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

# ── 6. Train the Model ───────────────────────────────────────────────
history = model.fit(x_train, y_train,
                    epochs=10,
                    batch_size=64,
                    validation_split=0.1)

# ── 7. Evaluate on Test Set ──────────────────────────────────────────
test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2)
print(f"\nTest Loss     : {test_loss:.4f}")
print(f"Test Accuracy : {test_acc:.4f}")

# ── 8. Plot Training History ─────────────────────────────────────────
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))

ax1.plot(history.history['accuracy'],   label='Train')
ax1.plot(history.history['val_accuracy'], label='Validation')
ax1.set_title('Model Accuracy')
ax1.set_xlabel('Epoch')
ax1.set_ylabel('Accuracy')
ax1.legend()

ax2.plot(history.history['loss'],     label='Train')
ax2.plot(history.history['val_loss'], label='Validation')
ax2.set_title('Model Loss')
ax2.set_xlabel('Epoch')
ax2.set_ylabel('Loss')
ax2.legend()

plt.tight_layout()
plt.show()

# ── 9. Predict and Display Results ───────────────────────────────────
predictions = model.predict(x_test[:10])
pred_classes = np.argmax(predictions, axis=1)
true_classes = y_test[:10].flatten()

plt.figure(figsize=(12, 4))
for i in range(10):
    plt.subplot(2, 5, i + 1)
    plt.imshow(x_test[i])
    color = 'green' if pred_classes[i] == true_classes[i] else 'red'
    plt.title(f"P: {class_names[pred_classes[i]]}\n"
              f"T: {class_names[true_classes[i]]}", color=color, fontsize=8)
    plt.axis('off')
plt.suptitle("Predictions (Green = Correct, Red = Wrong)")
plt.tight_layout()
plt.show()

print("\nPredicted :", [class_names[c] for c in pred_classes])
print("Actual    :", [class_names[c] for c in true_classes])
Infographics