Spaces:
Runtime error
Runtime error
import numpy as np | |
import tensorflow as tf | |
from tensorflow import keras | |
from keras.layers import Input , Lambda , Dense , Flatten , Rescaling | |
from keras.models import Model | |
# Display | |
from IPython.display import Image, display | |
import matplotlib.pyplot as plt | |
import matplotlib.cm as cm | |
##### Configureable variabels ########### | |
model_weights_path = "./model_weights.h5" | |
#################################################################### | |
############ Creating new model ############################# | |
#################################################################### | |
base_model = keras.applications.Xception(input_shape=(160,160,3) , include_top=False) | |
base_model.trainable = False | |
def create_model(): | |
input_layer = keras.Input(shape=(160,160,3)) | |
if debug: | |
print(input_layer) | |
R1 = Rescaling(scale=1/255)(input_layer) | |
if debug: | |
print(R1) | |
B = base_model(R1 , training=False) | |
if debug: | |
print(B) | |
P1 = keras.layers.GlobalAveragePooling2D()(B) | |
if debug: | |
print(P1) | |
output_layer = Dense(1 , activation="sigmoid")(P1) | |
if debug: | |
print(f"output_layer: {output_layer}") | |
model = keras.Model(input_layer , output_layer) | |
model.compile(optimizer=keras.optimizers.Adam(0.001) , loss=keras.losses.BinaryCrossentropy() , metrics=["accuracy"]) | |
return model , input_layer, R1 , B , P1 , output_layer | |
print("Creating new model ...") | |
model , input_layer , R1 , B , P1 , output_layer = create_model() | |
print("Loading weights ....") | |
model.load_weights(model_weights_path) | |
#################################################################### | |
############ Creating gradcam model ######################### | |
#################################################################### | |
def make_gradcam_heatmap(img_array, model): | |
# we compute the gradient of the top predicted class for our input image | |
# with respect to the activations of the last conv layer | |
preds = None | |
with tf.GradientTape() as tape: | |
preds , last_conv_layer_output = grad_model(img_array) | |
# This is the gradient of the output neuron (top predicted or chosen) | |
# with regard to the output feature map of the last conv layer | |
grads = tape.gradient(preds, last_conv_layer_output) | |
# This is a vector where each entry is the mean intensity of the gradient | |
# over a specific feature map channel | |
pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2)) | |
# We multiply each channel in the feature map array | |
# by "how important this channel is" with regard to the top predicted class | |
# then sum all the channels to obtain the heatmap class activation | |
last_conv_layer_output = last_conv_layer_output[0] | |
heatmap = last_conv_layer_output @ pooled_grads[..., tf.newaxis] | |
heatmap = tf.squeeze(heatmap) | |
# For visualization purpose, we will also normalize the heatmap between 0 & 1 | |
heatmap = tf.maximum(heatmap, 0) / tf.math.reduce_max(heatmap) | |
return preds , heatmap.numpy() | |
def get_gradcam(img, heatmap , alpha=0.4): | |
# Rescale heatmap to a range 0-255 | |
heatmap = np.uint8(255 * heatmap) | |
# Use jet colormap to colorize heatmap | |
jet = cm.get_cmap("jet") | |
# Use RGB values of the colormap | |
jet_colors = jet(np.arange(256))[:, :3] | |
jet_heatmap = jet_colors[heatmap] | |
# Create an image with RGB colorized heatmap | |
jet_heatmap = keras.utils.array_to_img(jet_heatmap) | |
jet_heatmap = jet_heatmap.resize((img.shape[1], img.shape[0])) | |
jet_heatmap = keras.utils.img_to_array(jet_heatmap) | |
# Superimpose the heatmap on original image | |
superimposed_img = jet_heatmap * alpha + img | |
superimposed_img = keras.utils.array_to_img(superimposed_img) | |
return superimposed_img | |
print("Creating Gradcam model ....") | |
grad_model = keras.Model( | |
input_layer , [output_layer , B] | |
) | |
grad_model.layers[-1].activation = None | |
grad_model.summary() | |
###################################################### | |
def sigmoid(x): | |
return 1 / (1 + np.exp(-x)) | |
####################################################### | |
def predict_image(image): | |
try: | |
pred , heatmap = make_gradcam_heatmap(image.reshape((1, 160, 160, 3)) , grad_model) | |
gradcam_image = get_gradcam(image , heatmap) | |
pred = sigmoid(pred) | |
predicted_label = f"{(1-pred[0][0])*100:.2f}% Dog \n{pred[0][0]*100:.2f}% Cat \nFinal Prediction : {'Dog' if pred[0][0] < 0.5 else 'Cat'}" | |
# Return the predicted label | |
return predicted_label , gradcam_image | |
except Exception as error: | |
return str(error) | |
image_input = gr.inputs.Image(shape=(160,160)) | |
label_output = gr.outputs.Textbox() | |
label_output2 = gr.outputs.Textbox() | |
image_output = gr.outputs.Image(type="pil") | |
# Create the Gradio interface | |
gr.Interface(fn=predict_image, inputs=image_input, outputs=[label_output , image_output]).launch() |