File size: 1,838 Bytes
8f6f7fb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
#!/usr/bin/env python
# coding: utf-8

# In[8]:


import gradio as gr
from PIL import Image
from transformers import VisionEncoderDecoderModel, ViTFeatureExtractor, PreTrainedTokenizerFast, AutoTokenizer
import requests


# In[5]:


model = VisionEncoderDecoderModel.from_pretrained(
    "nlpconnect/vit-gpt2-image-captioning")


# In[6]:


feature_extractor = ViTFeatureExtractor.from_pretrained(
    "nlpconnect/vit-gpt2-image-captioning")


# In[9]:


tokenizer = AutoTokenizer.from_pretrained(
    "nlpconnect/vit-gpt2-image-captioning")


# In[17]:


def vit2distilgpt2(img):
    pixel_values = feature_extractor(
        images=img, return_tensors="pt").pixel_values
    encoder_outputs = generated_ids = model.generate(
        pixel_values.to('cpu'), num_beams=5)
    generated_sentences = tokenizer.batch_decode(
        encoder_outputs, skip_special_tokens=True)

    return (generated_sentences[0].split('.')[0])


# In[18]:


# In[29]:
inputs = [
    gr.inputs.Image(type="pil", label="Original Image")
]

outputs = [
    gr.outputs.Textbox(label='Caption')
]

title = "Image Captioning using ViT + GPT2"
description = "ViT and GPT2 are used to generate Image Caption for the uploaded image. COCO Dataset was used for training. The validation Metric is Still low so there are high chance that some picture that model give caption with high bias"
article = "<p style='text-align: center'><a href='https://www.linkedin.com/in/m-afif-rizky-a-a96048182/'>Created by @Vrooh933 Production</a> | <a href='https://github.com/afifrizkyandika11551100310'>GitHub Profile</a>"

examples = [["food.jpg"],
            ["bang_mizan1.jpg"],
            ["gambar1.jpg"]]

gr.Interface(
    vit2distilgpt2,
    inputs,
    outputs,
    title=title,
    description=description,
    examples=examples,
    article=article,
).launch()


# In[ ]: