File size: 2,849 Bytes
bfc21e7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7d564be
bfc21e7
 
 
 
 
 
 
 
 
 
 
d5a6505
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
# !pip install transformers

#import all the import libraries
#visionencodermodel can be used to initialize an image to text with any pretrained transformer
#based vision model as the encoder
# vif feature extractor extracts features 
# auto tokenizer  is responsible preprocessing text into array of numbers as an input to model
# pil stands for python image library, it deals with images
from transformers import VisionEncoderDecoderModel, ViTFeatureExtractor, AutoTokenizer
import torch
from PIL import Image

#providing hugging face source, pass the hugging face url, this will download the model
model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")

#download the feature extractor
feature_extractor = ViTFeatureExtractor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
#download the tokenizer
tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")

#check if there is any gpu available if not we will use the cpu

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

#create a dictionary with these parameters

max_length = 16
num_beams = 4
num_return_sequences = 3
gen_kwargs = {"max_length": max_length, "num_beams": num_beams}

# this function uses a combination of computer vision and natural language
#  processing techniques to generate textual descriptions or captions of an input image.

def predict_step(image):
    i_image = Image.fromarray(image.astype('uint8'), 'RGB')
    pixel_values = feature_extractor(images=i_image, return_tensors="pt").pixel_values
    pixel_values = pixel_values.to(device)

    output_ids = model.generate(pixel_values, **gen_kwargs, num_return_sequences=num_return_sequences)

    preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
    preds = [pred.strip() for pred in preds]
    return tuple(preds)

# ! pip install gradio

import gradio as gr

#create an interface 

inputs = gr.inputs.Image(type='numpy', label='Original Image')
outputs = [gr.outputs.Textbox(label=f'Caption {i+1}') for i in range(num_return_sequences)]

title = "Image Captioning using ViT + GPT2"
description = "Image caption is generated for the uploaded image using ViT and GPT2. For training, COCO Dataset was utilised. If you see any biases (gender, race, etc.) in our picture captioning model that we were unable to identify during our stress tests, please use the 'Flag' button to mark the offending image."
article = " <a href='https://huggingface.co/sachin/vit2distilgpt2'>Model Repo on Hugging Face Model Hub</a>"
examples = [["test1.png"], ["test2.png"],["test3.png"]]

gr.Interface(
    predict_step,
    inputs,
    outputs,
    title=title,
    description=description,
    article=article,
    examples=examples,
    theme="huggingface",
).launch(debug=True, enable_queue=True)
iface.launch()