Vrooh933's picture
Upload app.py
8f6f7fb
#!/usr/bin/env python
# coding: utf-8
# In[8]:
import gradio as gr
from PIL import Image
from transformers import VisionEncoderDecoderModel, ViTFeatureExtractor, PreTrainedTokenizerFast, AutoTokenizer
import requests
# In[5]:
model = VisionEncoderDecoderModel.from_pretrained(
"nlpconnect/vit-gpt2-image-captioning")
# In[6]:
feature_extractor = ViTFeatureExtractor.from_pretrained(
"nlpconnect/vit-gpt2-image-captioning")
# In[9]:
tokenizer = AutoTokenizer.from_pretrained(
"nlpconnect/vit-gpt2-image-captioning")
# In[17]:
def vit2distilgpt2(img):
pixel_values = feature_extractor(
images=img, return_tensors="pt").pixel_values
encoder_outputs = generated_ids = model.generate(
pixel_values.to('cpu'), num_beams=5)
generated_sentences = tokenizer.batch_decode(
encoder_outputs, skip_special_tokens=True)
return (generated_sentences[0].split('.')[0])
# In[18]:
# In[29]:
inputs = [
gr.inputs.Image(type="pil", label="Original Image")
]
outputs = [
gr.outputs.Textbox(label='Caption')
]
title = "Image Captioning using ViT + GPT2"
description = "ViT and GPT2 are used to generate Image Caption for the uploaded image. COCO Dataset was used for training. The validation Metric is Still low so there are high chance that some picture that model give caption with high bias"
article = "<p style='text-align: center'><a href='https://www.linkedin.com/in/m-afif-rizky-a-a96048182/'>Created by @Vrooh933 Production</a> | <a href='https://github.com/afifrizkyandika11551100310'>GitHub Profile</a>"
examples = [["food.jpg"],
["bang_mizan1.jpg"],
["gambar1.jpg"]]
gr.Interface(
vit2distilgpt2,
inputs,
outputs,
title=title,
description=description,
examples=examples,
article=article,
).launch()
# In[ ]: