Spaces:
Runtime error
Runtime error
File size: 825 Bytes
cd254a4 1a410aa cd254a4 23e6b97 c4a9309 cd254a4 c4a9309 cd254a4 23e6b97 319c17a 23e6b97 319c17a 23e6b97 319c17a cd254a4 23e6b97 cd254a4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 |
import gradio as gr
from PIL import Image
import torch
from torchvision.io import read_image
from transformers import ViTImageProcessor,pipeline
model = ViTImageProcessor.from_pretrained('SeyedAli/Food-Image-Classification-VIT')
def FoodClassification(image):
image = read_image(image)
# Encode your PIL Image as a JPEG without writing to disk
# buffer = io.BytesIO(image)
# YourImage.save(buffer, format='JPEG', quality=75)
# # You probably want
# desiredObject = buffer.getbuffer()
pipline = pipeline(task="image-classification", model=model)
#output=pipline(model(Image.open(desiredObject), return_tensors='pt'))
output=pipline(image, return_tensors='pt'))
return output
iface = gr.Interface(fn=FoodClassification, inputs="image", outputs="text")
iface.launch(share=False) |