import os
import tensorflow as tf
from tensorflow.keras import regularizers
from tensorflow.keras import layers
from tensorflow.keras import Model
from tensorflow import keras
import matplotlib.pyplot as plt
base_dir = 'chest_xray/reduced size - unbalanced/unbalanced_more_normal_in_train/'

train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'test')

train_NORMAL_dir = os.path.join(train_dir, 'NORMAL')
train_PNEUMONIA_dir = os.path.join(train_dir, 'PNEUMONIA')

validation_NORMAL_dir = os.path.join(validation_dir, 'NORMAL')
validation_PNEUMONIA_dir = os.path.join(validation_dir, 'PNEUMONIA')

train_NORMAL_fnames = os.listdir( train_NORMAL_dir )
train_PNEUMONIA_fnames = os.listdir( train_PNEUMONIA_dir )

print(train_NORMAL_fnames[:10])
print(train_PNEUMONIA_fnames[:10])

print('total training NORMAL images :', len(os.listdir(      train_NORMAL_dir ) ))
print('total training PNEUMONIA images :', len(os.listdir(      train_PNEUMONIA_dir ) ))

print('total validation NORMAL images :', len(os.listdir( validation_NORMAL_dir ) ))
print('total validation PNEUMONIA images :', len(os.listdir( validation_PNEUMONIA_dir ) ))
['IM-0115-0001.jpeg', 'IM-0117-0001.jpeg', 'IM-0119-0001.jpeg', 'IM-0122-0001.jpeg', 'IM-0125-0001.jpeg', 'IM-0127-0001.jpeg', 'IM-0128-0001.jpeg', 'IM-0129-0001.jpeg', 'IM-0131-0001.jpeg', 'IM-0133-0001.jpeg']
['person10_bacteria_43.jpeg', 'person11_bacteria_45.jpeg', 'person12_bacteria_46.jpeg', 'person12_bacteria_47.jpeg', 'person12_bacteria_48.jpeg', 'person13_bacteria_49.jpeg', 'person13_bacteria_50.jpeg', 'person14_bacteria_51.jpeg', 'person15_bacteria_52.jpeg', 'person16_bacteria_53.jpeg']
total training NORMAL images : 500
total training PNEUMONIA images : 250
total validation NORMAL images : 100
total validation PNEUMONIA images : 100
import matplotlib.image as mpimg
import matplotlib.pyplot as plt

# Parameters for our graph; we'll output images in a 4x4 configuration
nrows = 4
ncols = 4
pic_index = 0 # Index for iterating over images

# Set up matplotlib fig, and size it to fit 4x4 pics
fig = plt.gcf()
fig.set_size_inches(ncols*4, nrows*4)

pic_index+=8

next_NORMAL_pix = [os.path.join(train_NORMAL_dir, fname)
                for fname in train_NORMAL_fnames[ pic_index-8:pic_index]
               ]

next_PNEUMONIA_pix = [os.path.join(train_PNEUMONIA_dir, fname)
                for fname in train_PNEUMONIA_fnames[ pic_index-8:pic_index]
               ]

for i, img_path in enumerate(next_NORMAL_pix+next_PNEUMONIA_pix):
  sp = plt.subplot(nrows, ncols, i + 1)
  sp.axis('Off')

  img = mpimg.imread(img_path)
  plt.imshow(img)
class Conv(tf.keras.Model):
    def __init__(self, filters, kernel_size):
        super(Conv, self).__init__()
        
        self.conv = tf.keras.layers.Conv2D(filters=filters, kernel_size=kernel_size)
        self.bn = tf.keras.layers.BatchNormalization()
        self.relu = tf.keras.layers.ReLU()
        self.pool = tf.keras.layers.MaxPool2D(pool_size=(2, 2))
        
    def call(self, inputs, training=True):
        x = self.conv(inputs)
        x = self.bn(x, training=training)
        x = self.relu(x)
        x = self.pool(x)
        return x
    
model = tf.keras.Sequential(name='X-ray_CNN')

model.add(Conv(filters=32, kernel_size=(3, 3)))
model.add(Conv(filters=64, kernel_size=(3, 3)))
model.add(Conv(filters=128, kernel_size=(3, 3)))
model.add(Conv(filters=128, kernel_size=(3, 3)))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(units=512, activation=tf.keras.activations.relu))
model.add(tf.keras.layers.Dense(units=2, activation=tf.keras.activations.softmax))

model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4),
              loss=tf.keras.losses.SparseCategoricalCrossentropy(),
              metrics=['accuracy'])

from glob import glob

base_dir = 'chest_xray/reduced size - unbalanced/unbalanced_more_normal_in_train/'
# train_len = len(glob(os.path.join(base_dir, 'train', 'NORMAL', '*.jpeg'))) * 2
val_len = len(glob(os.path.join(base_dir, 'val', 'NORMAL', '*.jpeg'))) * 2
test_len = len(glob(os.path.join(base_dir, 'test', 'NORMAL', '*.jpeg'))) * 2
train_len = 750
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.optimizers import SGD

from tensorflow.keras.preprocessing.image import ImageDataGenerator

train_datagen = ImageDataGenerator( rescale = 1.0/255,
                                    rotation_range=40,
                                    width_shift_range=0.2,
                                    height_shift_range=0.2,
                                    shear_range=0.2,
                                    zoom_range=0.2,
                                    horizontal_flip=True,
                                    fill_mode='nearest'
                                    )
validation_datagen  = ImageDataGenerator( rescale = 1.0/255,
                                    rotation_range=40,
                                    width_shift_range=0.2,
                                    height_shift_range=0.2,
                                    shear_range=0.2,
                                    zoom_range=0.2,
                                    horizontal_flip=True,
                                    fill_mode='nearest'
                                    )
# --------------------
train_generator = train_datagen.flow_from_directory(train_dir,
                                                    batch_size=20,
                                                    class_mode='binary',
                                                    target_size=(150, 150))
# --------------------
# Flow validation images in batches of 20 using test_datagen generator
# --------------------
validation_generator = validation_datagen.flow_from_directory(validation_dir,
                                                         batch_size=20,
                                                         class_mode  = 'binary',
                                                         target_size = (150, 150))


checkpoint_path = "training_1/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)

# Create a callback that saves the model's weights

callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, save_best_only=True,
                                                 save_weights_only=False,
                                                 verbose=1)
Found 750 images belonging to 2 classes.
Found 200 images belonging to 2 classes.
import numpy as np

def load(f, label):
    # load the file into tensor
    image = tf.io.read_file(f)
    # Decode it to JPEG format
    image = tf.image.decode_jpeg(image)
    # Convert it to tf.float32
    image = tf.cast(image, tf.float32)
    
    return image, label

def resize(input_image, size):
    return tf.image.resize(input_image, size)

def random_crop(input_image):
    return tf.image.random_crop(input_image, size=[150, 150, 1])

def central_crop(input_image):
    image = resize(input_image, [176, 176])
    return tf.image.central_crop(image, central_fraction=0.84)

def random_rotation(input_image):
    angles = np.random.randint(0, 3, 1)
    return tf.image.rot90(input_image, k=angles[0])

def random_jitter(input_image):
    # Resize it to 176 x 176 x 3
    image = resize(input_image, [176, 176])
    # Randomly Crop to 150 x 150 x 3
    image = random_crop(image)
    # Randomly rotation
    image = random_rotation(image)
    # Randomly mirroring
    image = tf.image.random_flip_left_right(image)
    return image

def normalize(input_image):
    mid = (tf.reduce_max(input_image) + tf.reduce_min(input_image)) / 2
    input_image = input_image / mid - 1
    return input_image

def load_image_train(image_file, label):
    image, label = load(image_file, label)
    image = random_jitter(image)
    image = normalize(image)
    return image, label

def load_image_val(image_file, label):
    image, label = load(image_file, label)
    image = central_crop(image)
    image = normalize(image)
    return image, label
temp_ds = tf.data.Dataset.list_files(os.path.join("chest_xray/reduced size - unbalanced/unbalanced_more_normal_in_train", 'train', 'NORMAL', '*.jpeg'))
temp_ds = temp_ds.map(lambda x: (x, 0))

temp2_ds = tf.data.Dataset.list_files(os.path.join("chest_xray/reduced size - unbalanced/unbalanced_more_normal_in_train", 'train', 'PNEUMONIA', '*.jpeg'))
temp2_ds = temp2_ds.map(lambda x: (x, 1))
train_ds = temp_ds.concatenate(temp2_ds)

buffer_size = tf.data.experimental.cardinality(train_ds).numpy()
train_ds = train_ds.shuffle(buffer_size)\
                   .map(load_image_train, num_parallel_calls=16)\
                   .batch(20)\
                   .repeat()

temp_ds = tf.data.Dataset.list_files(os.path.join("chest_xray/reduced size - unbalanced/unbalanced_more_normal_in_train", 'val', 'NORMAL', '*.jpeg'))
temp_ds = temp_ds.map(lambda x: (x, 0))

temp2_ds = tf.data.Dataset.list_files(os.path.join("chest_xray/reduced size - unbalanced/unbalanced_more_normal_in_train", 'val', 'PNEUMONIA', '*.jpeg'))
temp2_ds = temp2_ds.map(lambda x: (x, 1))

val_ds = temp_ds.concatenate(temp2_ds)

val_ds = val_ds.map(load_image_val, num_parallel_calls=16)\
               .batch(20)\
               .repeat()

temp_ds = tf.data.Dataset.list_files(os.path.join("chest_xray/reduced size - unbalanced/unbalanced_more_normal_in_train", 'test', 'NORMAL', '*.jpeg'))
temp_ds = temp_ds.map(lambda x: (x, 0))

temp2_ds = tf.data.Dataset.list_files(os.path.join("chest_xray/reduced size - unbalanced/unbalanced_more_normal_in_train", 'test', 'PNEUMONIA', '*.jpeg'))
temp2_ds = temp2_ds.map(lambda x: (x, 1))

test_ds = temp_ds.concatenate(temp2_ds)

batch_size = 10
test_ds = test_ds.map(load_image_val, num_parallel_calls=16)\
               .batch(batch_size)\
               .repeat()

for images, labels in train_ds.take(1):
    fig, ax = plt.subplots(1, 10, figsize=(20, 6))
    for j in range(10):
        image = images[j].numpy()
        image = image / np.amax(image)
        image = np.clip(image, 0, 1)
        ax[j].imshow(image)
        ax[j].set_title(labels[j].numpy())
plt.show()
checkpoint_path = "./train/x-ray/unbalanced/cp-{epoch:04d}.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)

cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
                                                 save_best_only=True,
                                                 save_weights_only=True,
                                                 verbose=1)
base_dir = "chest_xray/reduced size - unbalanced/unbalanced_more_normal_in_train/"
train_len = len(glob(os.path.join(base_dir, 'train', 'NORMAL', '*.jpeg'))) * 2
val_len = len(glob(os.path.join(base_dir, 'val', 'NORMAL', '*.jpeg'))) * 2
test_len = len(glob(os.path.join(base_dir, 'test', 'NORMAL', '*.jpeg'))) * 2
train_len = 750
history = model.fit(train_ds, 
          steps_per_epoch=train_len/20,
          validation_data=val_ds,
          validation_steps=val_len/20,
          epochs=50,
          verbose=1,
          callbacks=[cp_callback]
          )
Epoch 1/50
37/37 [==============================] - 85s 2s/step - loss: 0.4274 - accuracy: 0.8653 - val_loss: 0.8069 - val_accuracy: 0.5000

Epoch 00001: val_loss improved from inf to 0.80688, saving model to ./train/x-ray/unbalanced\cp-0001.ckpt
Epoch 2/50
37/37 [==============================] - 74s 2s/step - loss: 0.2388 - accuracy: 0.9160 - val_loss: 1.5580 - val_accuracy: 0.5000

Epoch 00002: val_loss did not improve from 0.80688
Epoch 3/50
37/37 [==============================] - 70s 2s/step - loss: 0.1303 - accuracy: 0.9640 - val_loss: 1.7370 - val_accuracy: 0.5000

Epoch 00003: val_loss did not improve from 0.80688
Epoch 4/50
37/37 [==============================] - 58s 2s/step - loss: 0.1116 - accuracy: 0.9533 - val_loss: 2.4322 - val_accuracy: 0.5000

Epoch 00004: val_loss did not improve from 0.80688
Epoch 5/50
37/37 [==============================] - 59s 2s/step - loss: 0.1460 - accuracy: 0.9507 - val_loss: 2.4165 - val_accuracy: 0.5000

Epoch 00005: val_loss did not improve from 0.80688
Epoch 6/50
37/37 [==============================] - 71s 2s/step - loss: 0.0914 - accuracy: 0.9720 - val_loss: 2.8740 - val_accuracy: 0.5000

Epoch 00006: val_loss did not improve from 0.80688
Epoch 7/50
37/37 [==============================] - 67s 2s/step - loss: 0.0867 - accuracy: 0.9693 - val_loss: 3.3604 - val_accuracy: 0.5000

Epoch 00007: val_loss did not improve from 0.80688
Epoch 8/50
37/37 [==============================] - 65s 2s/step - loss: 0.0833 - accuracy: 0.9667 - val_loss: 3.6610 - val_accuracy: 0.5000

Epoch 00008: val_loss did not improve from 0.80688
Epoch 9/50
37/37 [==============================] - 76s 2s/step - loss: 0.0712 - accuracy: 0.9773 - val_loss: 3.8780 - val_accuracy: 0.5000

Epoch 00009: val_loss did not improve from 0.80688
Epoch 10/50
37/37 [==============================] - 82s 2s/step - loss: 0.0863 - accuracy: 0.9800 - val_loss: 3.5440 - val_accuracy: 0.5000

Epoch 00010: val_loss did not improve from 0.80688
Epoch 11/50
37/37 [==============================] - 94s 3s/step - loss: 0.0553 - accuracy: 0.9813 - val_loss: 3.4028 - val_accuracy: 0.5000

Epoch 00011: val_loss did not improve from 0.80688
Epoch 12/50
37/37 [==============================] - 84s 2s/step - loss: 0.0749 - accuracy: 0.9733 - val_loss: 3.7114 - val_accuracy: 0.5000

Epoch 00012: val_loss did not improve from 0.80688
Epoch 13/50
37/37 [==============================] - 82s 2s/step - loss: 0.0679 - accuracy: 0.9813 - val_loss: 2.5699 - val_accuracy: 0.5100

Epoch 00013: val_loss did not improve from 0.80688
Epoch 14/50
37/37 [==============================] - 89s 2s/step - loss: 0.0744 - accuracy: 0.9760 - val_loss: 2.5526 - val_accuracy: 0.5400

Epoch 00014: val_loss did not improve from 0.80688
Epoch 15/50
37/37 [==============================] - 84s 2s/step - loss: 0.0782 - accuracy: 0.9733 - val_loss: 1.8670 - val_accuracy: 0.5800

Epoch 00015: val_loss did not improve from 0.80688
Epoch 16/50
37/37 [==============================] - 69s 2s/step - loss: 0.0945 - accuracy: 0.9720 - val_loss: 2.2911 - val_accuracy: 0.5550

Epoch 00016: val_loss did not improve from 0.80688
Epoch 17/50
37/37 [==============================] - 73s 2s/step - loss: 0.0617 - accuracy: 0.9800 - val_loss: 2.5040 - val_accuracy: 0.5450

Epoch 00017: val_loss did not improve from 0.80688
Epoch 18/50
37/37 [==============================] - 72s 2s/step - loss: 0.0499 - accuracy: 0.9813 - val_loss: 1.5777 - val_accuracy: 0.6550

Epoch 00018: val_loss did not improve from 0.80688
Epoch 19/50
37/37 [==============================] - 73s 2s/step - loss: 0.0597 - accuracy: 0.9733 - val_loss: 0.9758 - val_accuracy: 0.7800

Epoch 00019: val_loss did not improve from 0.80688
Epoch 20/50
37/37 [==============================] - 78s 2s/step - loss: 0.0684 - accuracy: 0.9800 - val_loss: 1.6673 - val_accuracy: 0.6150

Epoch 00020: val_loss did not improve from 0.80688
Epoch 21/50
37/37 [==============================] - 78s 2s/step - loss: 0.0813 - accuracy: 0.9733 - val_loss: 1.6039 - val_accuracy: 0.6500

Epoch 00021: val_loss did not improve from 0.80688
Epoch 22/50
37/37 [==============================] - 62s 2s/step - loss: 0.0512 - accuracy: 0.9787 - val_loss: 0.9194 - val_accuracy: 0.7200

Epoch 00022: val_loss did not improve from 0.80688
Epoch 23/50
37/37 [==============================] - 69s 2s/step - loss: 0.0983 - accuracy: 0.9680 - val_loss: 0.7328 - val_accuracy: 0.7700

Epoch 00023: val_loss improved from 0.80688 to 0.73283, saving model to ./train/x-ray/unbalanced\cp-0023.ckpt
Epoch 24/50
37/37 [==============================] - 72s 2s/step - loss: 0.0439 - accuracy: 0.9853 - val_loss: 1.1499 - val_accuracy: 0.7100

Epoch 00024: val_loss did not improve from 0.73283
Epoch 25/50
37/37 [==============================] - 75s 2s/step - loss: 0.0378 - accuracy: 0.9853 - val_loss: 1.7629 - val_accuracy: 0.6400

Epoch 00025: val_loss did not improve from 0.73283
Epoch 26/50
37/37 [==============================] - 71s 2s/step - loss: 0.0549 - accuracy: 0.9800 - val_loss: 0.8091 - val_accuracy: 0.7650

Epoch 00026: val_loss did not improve from 0.73283
Epoch 27/50
37/37 [==============================] - 72s 2s/step - loss: 0.0372 - accuracy: 0.9893 - val_loss: 0.8241 - val_accuracy: 0.7650

Epoch 00027: val_loss did not improve from 0.73283
Epoch 28/50
37/37 [==============================] - 76s 2s/step - loss: 0.0668 - accuracy: 0.9693 - val_loss: 0.8181 - val_accuracy: 0.7700

Epoch 00028: val_loss did not improve from 0.73283
Epoch 29/50
37/37 [==============================] - 61s 2s/step - loss: 0.0696 - accuracy: 0.9747 - val_loss: 0.6620 - val_accuracy: 0.7800

Epoch 00029: val_loss improved from 0.73283 to 0.66204, saving model to ./train/x-ray/unbalanced\cp-0029.ckpt
Epoch 30/50
37/37 [==============================] - 60s 2s/step - loss: 0.0368 - accuracy: 0.9853 - val_loss: 0.9653 - val_accuracy: 0.7750

Epoch 00030: val_loss did not improve from 0.66204
Epoch 31/50
37/37 [==============================] - 66s 2s/step - loss: 0.0520 - accuracy: 0.9853 - val_loss: 0.8299 - val_accuracy: 0.7350

Epoch 00031: val_loss did not improve from 0.66204
Epoch 32/50
37/37 [==============================] - 70s 2s/step - loss: 0.0407 - accuracy: 0.9800 - val_loss: 0.7292 - val_accuracy: 0.7850

Epoch 00032: val_loss did not improve from 0.66204
Epoch 33/50
37/37 [==============================] - 71s 2s/step - loss: 0.0829 - accuracy: 0.9653 - val_loss: 0.6929 - val_accuracy: 0.8050

Epoch 00033: val_loss did not improve from 0.66204
Epoch 34/50
37/37 [==============================] - 70s 2s/step - loss: 0.0403 - accuracy: 0.9893 - val_loss: 0.8146 - val_accuracy: 0.7700

Epoch 00034: val_loss did not improve from 0.66204
Epoch 35/50
37/37 [==============================] - 70s 2s/step - loss: 0.0393 - accuracy: 0.9840 - val_loss: 0.8583 - val_accuracy: 0.7550

Epoch 00035: val_loss did not improve from 0.66204
Epoch 36/50
37/37 [==============================] - 71s 2s/step - loss: 0.0273 - accuracy: 0.9933 - val_loss: 0.7692 - val_accuracy: 0.7800

Epoch 00036: val_loss did not improve from 0.66204
Epoch 37/50
37/37 [==============================] - 68s 2s/step - loss: 0.0390 - accuracy: 0.9893 - val_loss: 0.7631 - val_accuracy: 0.7650

Epoch 00037: val_loss did not improve from 0.66204
Epoch 38/50
37/37 [==============================] - 68s 2s/step - loss: 0.0269 - accuracy: 0.9893 - val_loss: 0.7933 - val_accuracy: 0.7650

Epoch 00038: val_loss did not improve from 0.66204
Epoch 39/50
37/37 [==============================] - 67s 2s/step - loss: 0.0487 - accuracy: 0.9827 - val_loss: 0.6302 - val_accuracy: 0.8300

Epoch 00039: val_loss improved from 0.66204 to 0.63018, saving model to ./train/x-ray/unbalanced\cp-0039.ckpt
Epoch 40/50
37/37 [==============================] - 67s 2s/step - loss: 0.0631 - accuracy: 0.9773 - val_loss: 0.6152 - val_accuracy: 0.8250

Epoch 00040: val_loss improved from 0.63018 to 0.61518, saving model to ./train/x-ray/unbalanced\cp-0040.ckpt
Epoch 41/50
37/37 [==============================] - 58s 2s/step - loss: 0.0316 - accuracy: 0.9853 - val_loss: 0.7001 - val_accuracy: 0.8250

Epoch 00041: val_loss did not improve from 0.61518
Epoch 42/50
37/37 [==============================] - 57s 2s/step - loss: 0.0181 - accuracy: 0.9920 - val_loss: 0.7331 - val_accuracy: 0.7900

Epoch 00042: val_loss did not improve from 0.61518
Epoch 43/50
37/37 [==============================] - 58s 2s/step - loss: 0.0104 - accuracy: 0.9960 - val_loss: 0.9991 - val_accuracy: 0.7250

Epoch 00043: val_loss did not improve from 0.61518
Epoch 44/50
37/37 [==============================] - 58s 2s/step - loss: 0.0147 - accuracy: 0.9933 - val_loss: 1.1504 - val_accuracy: 0.7400

Epoch 00044: val_loss did not improve from 0.61518
Epoch 45/50
37/37 [==============================] - 67s 2s/step - loss: 0.0324 - accuracy: 0.9867 - val_loss: 0.8989 - val_accuracy: 0.7600

Epoch 00045: val_loss did not improve from 0.61518
Epoch 46/50
37/37 [==============================] - 71s 2s/step - loss: 0.0216 - accuracy: 0.9907 - val_loss: 0.7924 - val_accuracy: 0.8100

Epoch 00046: val_loss did not improve from 0.61518
Epoch 47/50
37/37 [==============================] - 74s 2s/step - loss: 0.0482 - accuracy: 0.9893 - val_loss: 1.1679 - val_accuracy: 0.7250

Epoch 00047: val_loss did not improve from 0.61518
Epoch 48/50
37/37 [==============================] - 66s 2s/step - loss: 0.0369 - accuracy: 0.9907 - val_loss: 0.8126 - val_accuracy: 0.8150

Epoch 00048: val_loss did not improve from 0.61518
Epoch 49/50
37/37 [==============================] - 58s 2s/step - loss: 0.0318 - accuracy: 0.9880 - val_loss: 0.7549 - val_accuracy: 0.8450

Epoch 00049: val_loss did not improve from 0.61518
Epoch 50/50
37/37 [==============================] - 59s 2s/step - loss: 0.0482 - accuracy: 0.9867 - val_loss: 0.5634 - val_accuracy: 0.8150

Epoch 00050: val_loss improved from 0.61518 to 0.56336, saving model to ./train/x-ray/unbalanced\cp-0050.ckpt
from matplotlib.pyplot import figure
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
def plot_metrics(history):
  metrics = ['loss', 'accuracy']
  for n, metric in enumerate(metrics):
    name = metric.replace("_"," ").capitalize()
    figure(figsize=(20, 8))
    plt.subplot(1,2,n+1)
    plt.plot(history.epoch, history.history[metric], color=colors[0], label='Train')
    plt.plot(history.epoch, history.history['val_'+metric],
             color=colors[0], linestyle="--", label='Val')
    plt.xlabel('Epoch')
    plt.ylabel(name)
    if metric == 'loss':
      plt.ylim([0, plt.ylim()[1]])
    elif metric == 'auc':
      plt.ylim([0.8,1])
    else:
      plt.ylim([0,1])

    plt.legend()
plot_metrics(history)
model.summary()
Model: "X-ray_CNN"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv (Conv)                  (None, None, None, 32)    448       
_________________________________________________________________
conv_1 (Conv)                (None, None, None, 64)    18752     
_________________________________________________________________
conv_2 (Conv)                (None, None, None, 128)   74368     
_________________________________________________________________
conv_3 (Conv)                (None, None, None, 128)   148096    
_________________________________________________________________
flatten (Flatten)            (None, None)              0         
_________________________________________________________________
dense (Dense)                (None, 512)               3211776   
_________________________________________________________________
dense_1 (Dense)              (None, 2)                 1026      
=================================================================
Total params: 3,454,466
Trainable params: 3,453,762
Non-trainable params: 704
_________________________________________________________________
test_ds
<RepeatDataset shapes: ((None, 148, 148, None), (None,)), types: (tf.float32, tf.int32)>
model.evaluate(test_ds, steps=int(test_len/batch_size))
20/20 [==============================] - 3s 113ms/step - loss: 0.7190 - accuracy: 0.7700
[0.7190219759941101, 0.7699999809265137]
# divided by batch size"""
# batch_size = 10
# predictions = model.predict(test_ds, steps=int(test_len/batch_size))
# predictions

Need to convert the predictions to binaries to get classification report

# for i in range(predictions.shape[0]):
#     if float(predictions[i][0]) > float(predictions[i][1]):
#         pred[i] = 0
#     else:
#         pred[i] = 1
# pred
# predictions
i=0
p = []
gnd = []
prob = []

"""test_ds.take method takes samles from the test_ds n times when n is the number provided 
as argument. When all the samples are taken, it starts repeating from the first position"""

for images, labels in test_ds.take(int(test_len/batch_size)):
    i+=1
    predictions = model(images)
    p.append(predictions)
    fig, ax = plt.subplots(1, batch_size, figsize=(20, 6)) 
    """Here the second argument in plt.subplots is different from the test_ds.take method's argument
    plt.plot's second argument should be equalto the batch size whereas test_ds's argument indicates
    how many times the script will enter test_ds"""
    for j in range(batch_size): 
        """This argument is for loop is also equal to batch size"""
        image = images[j].numpy()
        image = image / np.amax(image)
        image = np.clip(image, 0, 1)
        ax[j].imshow(image)
        
        normal_prob = predictions[j][0]
        normal_prob = round(float(normal_prob),2)
        pneumonia_prob = predictions[j][1]
        pneumonia_prob = round(float(pneumonia_prob),2)
        ax[j].set_title(" gnd = {},\n  n = {},\n p = {}".format(labels[j].numpy(),\
                                                                        normal_prob,pneumonia_prob))
        
        gnd.append(labels[j].numpy())
        if (normal_prob>pneumonia_prob):
            prob_temp = 0
        else:
            prob_temp = 1
        prob.append(prob_temp)
        
        
#         ax[j].set_title(labels[j].numpy())
plt.show()
predictions
<tf.Tensor: shape=(10, 2), dtype=float32, numpy=
array([[1.6038069e-03, 9.9839622e-01],
       [9.3764621e-01, 6.2353782e-02],
       [7.5088359e-02, 9.2491162e-01],
       [9.9994087e-01, 5.9118669e-05],
       [8.2221758e-01, 1.7778248e-01],
       [8.5014814e-01, 1.4985187e-01],
       [5.0974756e-02, 9.4902527e-01],
       [9.4851780e-01, 5.1482230e-02],
       [1.1183850e-04, 9.9988818e-01],
       [6.5123318e-03, 9.9348760e-01]], dtype=float32)>
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
accuracy_score(gnd, prob)
0.695
precision_score(gnd, prob, average = None)
array([0.63636364, 0.84210526])
recall_score(gnd, prob, average = None)
array([0.91, 0.48])
f1_score(gnd, prob, average = None)
array([0.74897119, 0.61146497])
from sklearn.metrics import classification_report
print(classification_report(gnd, prob))
              precision    recall  f1-score   support

           0       0.64      0.91      0.75       100
           1       0.84      0.48      0.61       100

    accuracy                           0.69       200
   macro avg       0.74      0.70      0.68       200
weighted avg       0.74      0.69      0.68       200

from sklearn.metrics import confusion_matrix
print(confusion_matrix(gnd, prob))
[[91  9]
 [52 48]]