FRUITS_AND_VEGETABLES_CLASSIFIER / fruit_classifier_app.py
Koleshjr's picture
Update fruit_classifier_app.py
2b6238a
raw
history blame contribute delete
No virus
2.43 kB
# -*- coding: utf-8 -*-
"""Fruit_Classifier_app.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1wFmOPbrpLNAJxJsRfdiNoE_r1f8brR6M
## FRUIT CLASSIFICATION APP
"""
!pip install gradio
!pip install -U albumentations
!pip install -U albumentations
!pip install opencv-python==4.5.4.60
!pip install timm==0.6.2.dev0
#Start by connecting gdrive into the google colab
from google.colab import drive
drive.mount('/content/gdrive')
path = '/content/gdrive/MyDrive/Fruit_Project/'
import gradio as gr
from fastai.vision.all import *
import skimage
import pathlib
from PIL import Image
import albumentations
from albumentations.pytorch import ToTensorV2
import timm
plt = platform.system()
if plt == 'Linux':
pathlib.WindowsPath = pathlib.PosixPath
# !unzip -o -q /content/gdrive/MyDrive/sign_prediction/ModImages -d Images/
class AlbumentationsTransform (RandTransform):
split_idx,order=None,2
def __init__(self, train_aug, valid_aug): store_attr()
def before_call(self, b, split_idx):
self.idx = split_idx
def encodes(self, img: PILImage):
if self.idx == 0:
aug_img = self.train_aug(image=np.array(img))['image']
else:
aug_img = self.valid_aug(image=np.array(img))['image']
return PILImage.create(aug_img)
def get_valid_aug(): return albumentations.Compose([
albumentations.Resize(224, 224),
], p=1.0)
learn = load_learner(path + 'fruit_model_v2.pkl')
labels = learn.dls.vocab
def predict(img):
pred,pred_idx,probs = learn.predict(img)
return {labels[i]: float(probs[i]) for i in range(len(labels))}
# predict('/content/gdrive/MyDrive/Fruit_Project/Onion.jpg')
title = "Fruit and Vegetation Classifier"
description = '''A simple app to classify various fruits and vegetables '''
examples = [[path + 'Onion.jpg'],
[path + 'orange.jpg'],
[path + 'plum.jpg'],
[path + 'tomato.jpg'],
[path + 'banana.jpg']]
enable_queue = True
gr.Interface (fn= predict,
inputs=gr.inputs.Image(shape = (224,224)),
outputs= gr.outputs.Label(num_top_classes =3),
title = title,
description = description,
examples = examples,
flagging_options=["Incorrect Prediction"],
enable_queue = enable_queue).launch()