import os
import tensorflow as tf
from tensorflow.keras import regularizers
from tensorflow.keras import layers
from tensorflow.keras import Model
from tensorflow import keras
import matplotlib.pyplot as plt
base_dir = 'chest_xray/reduced size - dirty balanced/'

train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'test')

train_NORMAL_dir = os.path.join(train_dir, 'NORMAL')
train_PNEUMONIA_dir = os.path.join(train_dir, 'PNEUMONIA')

validation_NORMAL_dir = os.path.join(validation_dir, 'NORMAL')
validation_PNEUMONIA_dir = os.path.join(validation_dir, 'PNEUMONIA')

train_NORMAL_fnames = os.listdir( train_NORMAL_dir )
train_PNEUMONIA_fnames = os.listdir( train_PNEUMONIA_dir )

print(train_NORMAL_fnames[:10])
print(train_PNEUMONIA_fnames[:10])

print('total training NORMAL images :', len(os.listdir(      train_NORMAL_dir ) ))
print('total training PNEUMONIA images :', len(os.listdir(      train_PNEUMONIA_dir ) ))

print('total validation NORMAL images :', len(os.listdir( validation_NORMAL_dir ) ))
print('total validation PNEUMONIA images :', len(os.listdir( validation_PNEUMONIA_dir ) ))
['IM-0115-0001.jpeg', 'IM-0117-0001.jpeg', 'IM-0119-0001.jpeg', 'IM-0122-0001.jpeg', 'IM-0125-0001.jpeg', 'IM-0127-0001.jpeg', 'IM-0128-0001.jpeg', 'IM-0129-0001.jpeg', 'IM-0131-0001.jpeg', 'IM-0133-0001.jpeg']
['IM-0450-0001.jpeg', 'IM-0451-0001.jpeg', 'IM-0452-0001.jpeg', 'IM-0453-0001-0002.jpeg', 'IM-0453-0001.jpeg', 'IM-0455-0001.jpeg', 'IM-0456-0001.jpeg', 'IM-0457-0001.jpeg', 'IM-0458-0001.jpeg', 'IM-0459-0001.jpeg']
total training NORMAL images : 500
total training PNEUMONIA images : 500
total validation NORMAL images : 100
total validation PNEUMONIA images : 100
import matplotlib.image as mpimg
import matplotlib.pyplot as plt

# Parameters for our graph; we'll output images in a 4x4 configuration
nrows = 4
ncols = 4
pic_index = 0 # Index for iterating over images

# Set up matplotlib fig, and size it to fit 4x4 pics
fig = plt.gcf()
fig.set_size_inches(ncols*4, nrows*4)

pic_index+=8

next_NORMAL_pix = [os.path.join(train_NORMAL_dir, fname)
                for fname in train_NORMAL_fnames[ pic_index-8:pic_index]
               ]

next_PNEUMONIA_pix = [os.path.join(train_PNEUMONIA_dir, fname)
                for fname in train_PNEUMONIA_fnames[ pic_index-8:pic_index]
               ]

for i, img_path in enumerate(next_NORMAL_pix+next_PNEUMONIA_pix):
  sp = plt.subplot(nrows, ncols, i + 1)
  sp.axis('Off')

  img = mpimg.imread(img_path)
  plt.imshow(img)
class Conv(tf.keras.Model):
    def __init__(self, filters, kernel_size):
        super(Conv, self).__init__()
        
        self.conv = tf.keras.layers.Conv2D(filters=filters, kernel_size=kernel_size)
        self.bn = tf.keras.layers.BatchNormalization()
        self.relu = tf.keras.layers.ReLU()
        self.pool = tf.keras.layers.MaxPool2D(pool_size=(2, 2))
        
    def call(self, inputs, training=True):
        x = self.conv(inputs)
        x = self.bn(x, training=training)
        x = self.relu(x)
        x = self.pool(x)
        return x
    
model = tf.keras.Sequential(name='X-ray_CNN')

model.add(Conv(filters=32, kernel_size=(3, 3)))
model.add(Conv(filters=64, kernel_size=(3, 3)))
model.add(Conv(filters=128, kernel_size=(3, 3)))
model.add(Conv(filters=128, kernel_size=(3, 3)))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(units=512, activation=tf.keras.activations.relu))
model.add(tf.keras.layers.Dense(units=2, activation=tf.keras.activations.softmax))

model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4),
              loss=tf.keras.losses.SparseCategoricalCrossentropy(),
              metrics=['accuracy'])

from glob import glob

base_dir = 'chest_xray/reduced size - dirty balanced/'
train_len = len(glob(os.path.join(base_dir, 'train', 'NORMAL', '*.jpeg'))) * 2
val_len = len(glob(os.path.join(base_dir, 'val', 'NORMAL', '*.jpeg'))) * 2
test_len = len(glob(os.path.join(base_dir, 'test', 'NORMAL', '*.jpeg'))) * 2
train_len
1000
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.optimizers import SGD

from tensorflow.keras.preprocessing.image import ImageDataGenerator

train_datagen = ImageDataGenerator( rescale = 1.0/255,
                                    rotation_range=40,
                                    width_shift_range=0.2,
                                    height_shift_range=0.2,
                                    shear_range=0.2,
                                    zoom_range=0.2,
                                    horizontal_flip=True,
                                    fill_mode='nearest'
                                    )
validation_datagen  = ImageDataGenerator( rescale = 1.0/255,
                                    rotation_range=40,
                                    width_shift_range=0.2,
                                    height_shift_range=0.2,
                                    shear_range=0.2,
                                    zoom_range=0.2,
                                    horizontal_flip=True,
                                    fill_mode='nearest'
                                    )
# --------------------
train_generator = train_datagen.flow_from_directory(train_dir,
                                                    batch_size=20,
                                                    class_mode='binary',
                                                    target_size=(150, 150))
# --------------------
# Flow validation images in batches of 20 using test_datagen generator
# --------------------
validation_generator = validation_datagen.flow_from_directory(validation_dir,
                                                         batch_size=20,
                                                         class_mode  = 'binary',
                                                         target_size = (150, 150))


checkpoint_path = "training_1/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)

# Create a callback that saves the model's weights

callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, save_best_only=True,
                                                 save_weights_only=False,
                                                 verbose=1)
Found 1000 images belonging to 2 classes.
Found 200 images belonging to 2 classes.
import numpy as np

def load(f, label):
    # load the file into tensor
    image = tf.io.read_file(f)
    # Decode it to JPEG format
    image = tf.image.decode_jpeg(image)
    # Convert it to tf.float32
    image = tf.cast(image, tf.float32)
    
    return image, label

def resize(input_image, size):
    return tf.image.resize(input_image, size)

def random_crop(input_image):
    return tf.image.random_crop(input_image, size=[150, 150, 1])

def central_crop(input_image):
    image = resize(input_image, [176, 176])
    return tf.image.central_crop(image, central_fraction=0.84)

def random_rotation(input_image):
    angles = np.random.randint(0, 3, 1)
    return tf.image.rot90(input_image, k=angles[0])

def random_jitter(input_image):
    # Resize it to 176 x 176 x 3
    image = resize(input_image, [176, 176])
    # Randomly Crop to 150 x 150 x 3
    image = random_crop(image)
    # Randomly rotation
    image = random_rotation(image)
    # Randomly mirroring
    image = tf.image.random_flip_left_right(image)
    return image

def normalize(input_image):
    mid = (tf.reduce_max(input_image) + tf.reduce_min(input_image)) / 2
    input_image = input_image / mid - 1
    return input_image

def load_image_train(image_file, label):
    image, label = load(image_file, label)
    image = random_jitter(image)
    image = normalize(image)
    return image, label

def load_image_val(image_file, label):
    image, label = load(image_file, label)
    image = central_crop(image)
    image = normalize(image)
    return image, label
temp_ds = tf.data.Dataset.list_files(os.path.join("chest_xray/reduced size - dirty balanced", 'train', 'NORMAL', '*.jpeg'))
temp_ds = temp_ds.map(lambda x: (x, 0))

temp2_ds = tf.data.Dataset.list_files(os.path.join("chest_xray/reduced size - dirty balanced", 'train', 'PNEUMONIA', '*.jpeg'))
temp2_ds = temp2_ds.map(lambda x: (x, 1))
train_ds = temp_ds.concatenate(temp2_ds)

buffer_size = tf.data.experimental.cardinality(train_ds).numpy()
train_ds = train_ds.shuffle(buffer_size)\
                   .map(load_image_train, num_parallel_calls=16)\
                   .batch(20)\
                   .repeat()

temp_ds = tf.data.Dataset.list_files(os.path.join("chest_xray/reduced size - dirty balanced", 'val', 'NORMAL', '*.jpeg'))
temp_ds = temp_ds.map(lambda x: (x, 0))

temp2_ds = tf.data.Dataset.list_files(os.path.join("chest_xray/reduced size - dirty balanced", 'val', 'PNEUMONIA', '*.jpeg'))
temp2_ds = temp2_ds.map(lambda x: (x, 1))

val_ds = temp_ds.concatenate(temp2_ds)

val_ds = val_ds.map(load_image_val, num_parallel_calls=16)\
               .batch(20)\
               .repeat()

temp_ds = tf.data.Dataset.list_files(os.path.join("chest_xray/reduced size - dirty balanced", 'test', 'NORMAL', '*.jpeg'))
temp_ds = temp_ds.map(lambda x: (x, 0))

temp2_ds = tf.data.Dataset.list_files(os.path.join("chest_xray/reduced size - dirty balanced", 'test', 'PNEUMONIA', '*.jpeg'))
temp2_ds = temp2_ds.map(lambda x: (x, 1))

test_ds = temp_ds.concatenate(temp2_ds)

batch_size = 10
test_ds = test_ds.map(load_image_val, num_parallel_calls=16)\
               .batch(batch_size)\
               .repeat()

for images, labels in train_ds.take(1):
    fig, ax = plt.subplots(1, 10, figsize=(20, 6))
    for j in range(10):
        image = images[j].numpy()
        image = image / np.amax(image)
        image = np.clip(image, 0, 1)
        ax[j].imshow(image)
        ax[j].set_title(labels[j].numpy())
plt.show()
checkpoint_path = "./train/x-ray/cp-{epoch:04d}.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)

cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
                                                 save_best_only=True,
                                                 save_weights_only=True,
                                                 verbose=1)
base_dir = "chest_xray/reduced size - dirty balanced/"
train_len = len(glob(os.path.join(base_dir, 'train', 'NORMAL', '*.jpeg'))) * 2
val_len = len(glob(os.path.join(base_dir, 'val', 'NORMAL', '*.jpeg'))) * 2
test_len = len(glob(os.path.join(base_dir, 'test', 'NORMAL', '*.jpeg'))) * 2
train_len
1000
history = model.fit(train_ds, 
          steps_per_epoch=train_len/20,
          validation_data=val_ds,
          validation_steps=val_len/20,
          epochs=50,
          verbose=1,
          callbacks=[cp_callback]
          )
Epoch 1/50
50/50 [==============================] - 58s 1s/step - loss: 1.0541 - accuracy: 0.5270 - val_loss: 0.7022 - val_accuracy: 0.4550

Epoch 00001: val_loss improved from inf to 0.70225, saving model to ./train/x-ray\cp-0001.ckpt
Epoch 2/50
50/50 [==============================] - 47s 941ms/step - loss: 0.7141 - accuracy: 0.5700 - val_loss: 0.6997 - val_accuracy: 0.4150

Epoch 00002: val_loss improved from 0.70225 to 0.69967, saving model to ./train/x-ray\cp-0002.ckpt
Epoch 3/50
50/50 [==============================] - 56s 1s/step - loss: 0.6903 - accuracy: 0.5860 - val_loss: 0.7007 - val_accuracy: 0.4500

Epoch 00003: val_loss did not improve from 0.69967
Epoch 4/50
50/50 [==============================] - 55s 1s/step - loss: 0.6905 - accuracy: 0.5800 - val_loss: 0.8076 - val_accuracy: 0.5000

Epoch 00004: val_loss did not improve from 0.69967
Epoch 5/50
50/50 [==============================] - 48s 960ms/step - loss: 0.6741 - accuracy: 0.5970 - val_loss: 0.7274 - val_accuracy: 0.3700

Epoch 00005: val_loss did not improve from 0.69967
Epoch 6/50
50/50 [==============================] - 51s 1s/step - loss: 0.6404 - accuracy: 0.6470 - val_loss: 0.7942 - val_accuracy: 0.3900

Epoch 00006: val_loss did not improve from 0.69967
Epoch 7/50
50/50 [==============================] - 53s 1s/step - loss: 0.6568 - accuracy: 0.6350 - val_loss: 0.8845 - val_accuracy: 0.4750

Epoch 00007: val_loss did not improve from 0.69967
Epoch 8/50
50/50 [==============================] - 45s 888ms/step - loss: 0.6570 - accuracy: 0.6190 - val_loss: 0.8509 - val_accuracy: 0.4550

Epoch 00008: val_loss did not improve from 0.69967
Epoch 9/50
50/50 [==============================] - 43s 858ms/step - loss: 0.6222 - accuracy: 0.6560 - val_loss: 0.7760 - val_accuracy: 0.3600

Epoch 00009: val_loss did not improve from 0.69967
Epoch 10/50
50/50 [==============================] - 44s 886ms/step - loss: 0.6692 - accuracy: 0.6210 - val_loss: 0.8291 - val_accuracy: 0.4200

Epoch 00010: val_loss did not improve from 0.69967
Epoch 11/50
50/50 [==============================] - 43s 848ms/step - loss: 0.6432 - accuracy: 0.6240 - val_loss: 0.9801 - val_accuracy: 0.4900

Epoch 00011: val_loss did not improve from 0.69967
Epoch 12/50
50/50 [==============================] - 42s 830ms/step - loss: 0.6359 - accuracy: 0.6490 - val_loss: 0.8247 - val_accuracy: 0.4850

Epoch 00012: val_loss did not improve from 0.69967
Epoch 13/50
50/50 [==============================] - 43s 848ms/step - loss: 0.6206 - accuracy: 0.6640 - val_loss: 0.9618 - val_accuracy: 0.4350

Epoch 00013: val_loss did not improve from 0.69967
Epoch 14/50
50/50 [==============================] - 42s 844ms/step - loss: 0.6198 - accuracy: 0.6480 - val_loss: 1.0243 - val_accuracy: 0.3900

Epoch 00014: val_loss did not improve from 0.69967
Epoch 15/50
50/50 [==============================] - 42s 841ms/step - loss: 0.6132 - accuracy: 0.6470 - val_loss: 1.0294 - val_accuracy: 0.4300

Epoch 00015: val_loss did not improve from 0.69967
Epoch 16/50
50/50 [==============================] - 43s 861ms/step - loss: 0.5959 - accuracy: 0.6720 - val_loss: 0.8648 - val_accuracy: 0.4150

Epoch 00016: val_loss did not improve from 0.69967
Epoch 17/50
50/50 [==============================] - 43s 854ms/step - loss: 0.6106 - accuracy: 0.6670 - val_loss: 0.9379 - val_accuracy: 0.3850

Epoch 00017: val_loss did not improve from 0.69967
Epoch 18/50
50/50 [==============================] - 43s 861ms/step - loss: 0.5961 - accuracy: 0.6660 - val_loss: 1.1566 - val_accuracy: 0.4300

Epoch 00018: val_loss did not improve from 0.69967
Epoch 19/50
50/50 [==============================] - 45s 904ms/step - loss: 0.5998 - accuracy: 0.6740 - val_loss: 0.9172 - val_accuracy: 0.4600

Epoch 00019: val_loss did not improve from 0.69967
Epoch 20/50
50/50 [==============================] - 44s 884ms/step - loss: 0.6221 - accuracy: 0.6580 - val_loss: 0.9224 - val_accuracy: 0.4250

Epoch 00020: val_loss did not improve from 0.69967
Epoch 21/50
50/50 [==============================] - 42s 826ms/step - loss: 0.5806 - accuracy: 0.7040 - val_loss: 0.9108 - val_accuracy: 0.5050

Epoch 00021: val_loss did not improve from 0.69967
Epoch 22/50
50/50 [==============================] - 40s 789ms/step - loss: 0.5792 - accuracy: 0.7000 - val_loss: 0.7903 - val_accuracy: 0.4900

Epoch 00022: val_loss did not improve from 0.69967
Epoch 23/50
50/50 [==============================] - 40s 805ms/step - loss: 0.5806 - accuracy: 0.6950 - val_loss: 1.0931 - val_accuracy: 0.4500

Epoch 00023: val_loss did not improve from 0.69967
Epoch 24/50
50/50 [==============================] - 40s 795ms/step - loss: 0.5875 - accuracy: 0.6830 - val_loss: 1.0899 - val_accuracy: 0.3600

Epoch 00024: val_loss did not improve from 0.69967
Epoch 25/50
50/50 [==============================] - 43s 845ms/step - loss: 0.5672 - accuracy: 0.6940 - val_loss: 1.2295 - val_accuracy: 0.3400

Epoch 00025: val_loss did not improve from 0.69967
Epoch 26/50
50/50 [==============================] - 37s 746ms/step - loss: 0.5726 - accuracy: 0.7080 - val_loss: 1.1009 - val_accuracy: 0.4650

Epoch 00026: val_loss did not improve from 0.69967
Epoch 27/50
50/50 [==============================] - 37s 747ms/step - loss: 0.5703 - accuracy: 0.7020 - val_loss: 1.0845 - val_accuracy: 0.4050

Epoch 00027: val_loss did not improve from 0.69967
Epoch 28/50
50/50 [==============================] - 37s 742ms/step - loss: 0.6072 - accuracy: 0.6920 - val_loss: 0.9667 - val_accuracy: 0.4250

Epoch 00028: val_loss did not improve from 0.69967
Epoch 29/50
50/50 [==============================] - 38s 750ms/step - loss: 0.5443 - accuracy: 0.7250 - val_loss: 1.1259 - val_accuracy: 0.4300

Epoch 00029: val_loss did not improve from 0.69967
Epoch 30/50
50/50 [==============================] - 37s 736ms/step - loss: 0.5803 - accuracy: 0.6850 - val_loss: 1.0957 - val_accuracy: 0.4050

Epoch 00030: val_loss did not improve from 0.69967
Epoch 31/50
50/50 [==============================] - 38s 748ms/step - loss: 0.5511 - accuracy: 0.7190 - val_loss: 1.1487 - val_accuracy: 0.4200

Epoch 00031: val_loss did not improve from 0.69967
Epoch 32/50
50/50 [==============================] - 37s 732ms/step - loss: 0.5604 - accuracy: 0.6980 - val_loss: 1.0986 - val_accuracy: 0.3750

Epoch 00032: val_loss did not improve from 0.69967
Epoch 33/50
50/50 [==============================] - 38s 755ms/step - loss: 0.5623 - accuracy: 0.7110 - val_loss: 1.0909 - val_accuracy: 0.3850

Epoch 00033: val_loss did not improve from 0.69967
Epoch 34/50
50/50 [==============================] - 38s 760ms/step - loss: 0.5591 - accuracy: 0.7020 - val_loss: 1.1863 - val_accuracy: 0.4500

Epoch 00034: val_loss did not improve from 0.69967
Epoch 35/50
50/50 [==============================] - 39s 770ms/step - loss: 0.5540 - accuracy: 0.7250 - val_loss: 1.2082 - val_accuracy: 0.4250

Epoch 00035: val_loss did not improve from 0.69967
Epoch 36/50
50/50 [==============================] - 38s 759ms/step - loss: 0.5384 - accuracy: 0.7210 - val_loss: 1.0328 - val_accuracy: 0.4150

Epoch 00036: val_loss did not improve from 0.69967
Epoch 37/50
50/50 [==============================] - 38s 763ms/step - loss: 0.5698 - accuracy: 0.6990 - val_loss: 1.0828 - val_accuracy: 0.4050

Epoch 00037: val_loss did not improve from 0.69967
Epoch 38/50
50/50 [==============================] - 39s 769ms/step - loss: 0.5419 - accuracy: 0.7200 - val_loss: 1.2010 - val_accuracy: 0.3550

Epoch 00038: val_loss did not improve from 0.69967
Epoch 39/50
50/50 [==============================] - 39s 782ms/step - loss: 0.5126 - accuracy: 0.7420 - val_loss: 1.5429 - val_accuracy: 0.5500

Epoch 00039: val_loss did not improve from 0.69967
Epoch 40/50
50/50 [==============================] - 39s 783ms/step - loss: 0.5404 - accuracy: 0.7190 - val_loss: 1.1007 - val_accuracy: 0.5150

Epoch 00040: val_loss did not improve from 0.69967
Epoch 41/50
50/50 [==============================] - 40s 790ms/step - loss: 0.5186 - accuracy: 0.7520 - val_loss: 1.3532 - val_accuracy: 0.3750

Epoch 00041: val_loss did not improve from 0.69967
Epoch 42/50
50/50 [==============================] - 37s 736ms/step - loss: 0.5213 - accuracy: 0.7380 - val_loss: 1.2350 - val_accuracy: 0.3650

Epoch 00042: val_loss did not improve from 0.69967
Epoch 43/50
50/50 [==============================] - 37s 729ms/step - loss: 0.5180 - accuracy: 0.7450 - val_loss: 1.2567 - val_accuracy: 0.4100

Epoch 00043: val_loss did not improve from 0.69967
Epoch 44/50
50/50 [==============================] - 37s 738ms/step - loss: 0.5080 - accuracy: 0.7340 - val_loss: 1.0009 - val_accuracy: 0.5050

Epoch 00044: val_loss did not improve from 0.69967
Epoch 45/50
50/50 [==============================] - 37s 734ms/step - loss: 0.5182 - accuracy: 0.7530 - val_loss: 1.4317 - val_accuracy: 0.3800

Epoch 00045: val_loss did not improve from 0.69967
Epoch 46/50
50/50 [==============================] - 37s 731ms/step - loss: 0.4997 - accuracy: 0.7500 - val_loss: 1.2181 - val_accuracy: 0.4350

Epoch 00046: val_loss did not improve from 0.69967
Epoch 47/50
50/50 [==============================] - 37s 737ms/step - loss: 0.5057 - accuracy: 0.7560 - val_loss: 1.5084 - val_accuracy: 0.4250

Epoch 00047: val_loss did not improve from 0.69967
Epoch 48/50
50/50 [==============================] - 37s 740ms/step - loss: 0.4896 - accuracy: 0.7510 - val_loss: 1.2940 - val_accuracy: 0.4050

Epoch 00048: val_loss did not improve from 0.69967
Epoch 49/50
50/50 [==============================] - 38s 759ms/step - loss: 0.5116 - accuracy: 0.7540 - val_loss: 1.3898 - val_accuracy: 0.3800

Epoch 00049: val_loss did not improve from 0.69967
Epoch 50/50
50/50 [==============================] - 37s 733ms/step - loss: 0.5105 - accuracy: 0.7320 - val_loss: 1.4185 - val_accuracy: 0.4300

Epoch 00050: val_loss did not improve from 0.69967
from matplotlib.pyplot import figure
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
def plot_metrics(history):
  metrics = ['loss', 'accuracy']
  for n, metric in enumerate(metrics):
    name = metric.replace("_"," ").capitalize()
    figure(figsize=(20, 8))
    plt.subplot(1,2,n+1)
    plt.plot(history.epoch, history.history[metric], color=colors[0], label='Train')
    plt.plot(history.epoch, history.history['val_'+metric],
             color=colors[0], linestyle="--", label='Val')
    plt.xlabel('Epoch')
    plt.ylabel(name)
    if metric == 'loss':
      plt.ylim([0, plt.ylim()[1]])
    elif metric == 'auc':
      plt.ylim([0.8,1])
    else:
      plt.ylim([0,1])

    plt.legend()
plot_metrics(history)
model.summary()
Model: "X-ray_CNN"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv (Conv)                  (None, None, None, 32)    448       
_________________________________________________________________
conv_1 (Conv)                (None, None, None, 64)    18752     
_________________________________________________________________
conv_2 (Conv)                (None, None, None, 128)   74368     
_________________________________________________________________
conv_3 (Conv)                (None, None, None, 128)   148096    
_________________________________________________________________
flatten (Flatten)            (None, None)              0         
_________________________________________________________________
dense (Dense)                (None, 512)               3211776   
_________________________________________________________________
dense_1 (Dense)              (None, 2)                 1026      
=================================================================
Total params: 3,454,466
Trainable params: 3,453,762
Non-trainable params: 704
_________________________________________________________________
test_ds
<RepeatDataset shapes: ((None, 148, 148, None), (None,)), types: (tf.float32, tf.int32)>
model.evaluate(test_ds, steps=int(test_len/batch_size))
20/20 [==============================] - 3s 123ms/step - loss: 1.3300 - accuracy: 0.5150
[1.3299612998962402, 0.5149999856948853]
# divided by batch size"""
# batch_size = 10
# predictions = model.predict(test_ds, steps=int(test_len/batch_size))
# predictions

Need to convert the predictions to binaries to get classification report

# for i in range(predictions.shape[0]):
#     if float(predictions[i][0]) > float(predictions[i][1]):
#         pred[i] = 0
#     else:
#         pred[i] = 1
# pred
# predictions
i=0
p = []
gnd = []
prob = []

"""test_ds.take method takes samles from the test_ds n times when n is the number provided 
as argument. When all the samples are taken, it starts repeating from the first position"""

for images, labels in test_ds.take(int(test_len/batch_size)):
    i+=1
    predictions = model(images)
    p.append(predictions)
    fig, ax = plt.subplots(1, batch_size, figsize=(20, 6)) 
    """Here the second argument in plt.subplots is different from the test_ds.take method's argument
    plt.plot's second argument should be equalto the batch size whereas test_ds's argument indicates
    how many times the script will enter test_ds"""
    for j in range(batch_size): 
        """This argument is for loop is also equal to batch size"""
        image = images[j].numpy()
        image = image / np.amax(image)
        image = np.clip(image, 0, 1)
        ax[j].imshow(image)
        
        normal_prob = predictions[j][0]
        normal_prob = round(float(normal_prob),2)
        pneumonia_prob = predictions[j][1]
        pneumonia_prob = round(float(pneumonia_prob),2)
        ax[j].set_title(" gnd = {},\n  n = {},\n p = {}".format(labels[j].numpy(),\
                                                                        normal_prob,pneumonia_prob))
        
        gnd.append(labels[j].numpy())
        if (normal_prob>pneumonia_prob):
            prob_temp = 0
        else:
            prob_temp = 1
        prob.append(prob_temp)
        
        
#         ax[j].set_title(labels[j].numpy())
plt.show()
predictions
<tf.Tensor: shape=(10, 2), dtype=float32, numpy=
array([[0.8422008 , 0.15779918],
       [0.19302045, 0.80697954],
       [0.17157494, 0.82842505],
       [0.68447685, 0.31552318],
       [0.19884992, 0.8011501 ],
       [0.8882417 , 0.11175824],
       [0.67427087, 0.3257291 ],
       [0.26385015, 0.7361498 ],
       [0.9621145 , 0.03788555],
       [0.94220906, 0.05779094]], dtype=float32)>
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
accuracy_score(gnd, prob)
0.52
precision_score(gnd, prob, average = None)
array([0.51724138, 0.52380952])
recall_score(gnd, prob, average = None)
array([0.6 , 0.44])
f1_score(gnd, prob, average = None)
array([0.55555556, 0.47826087])
from sklearn.metrics import classification_report
print(classification_report(gnd, prob))
              precision    recall  f1-score   support

           0       0.52      0.60      0.56       100
           1       0.52      0.44      0.48       100

    accuracy                           0.52       200
   macro avg       0.52      0.52      0.52       200
weighted avg       0.52      0.52      0.52       200

from sklearn.metrics import confusion_matrix
print(confusion_matrix(gnd, prob))
[[60 40]
 [56 44]]