Odulana Hammed commited on
Commit
1e93254
·
verified ·
1 Parent(s): 620ecb1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +94 -41
app.py CHANGED
@@ -1,47 +1,100 @@
1
- import gradio as gr
2
- from transformers import AutoProcessor, AutoModelForImageTextToText
3
  from PIL import Image
 
 
 
 
 
 
4
  import spaces
 
 
 
 
5
 
6
- # Load Vision-Instruct model
7
- processor = AutoProcessor.from_pretrained("alpindale/Llama-3.2-11B-Vision-Instruct")
8
- model = AutoModelForImageTextToText.from_pretrained("alpindale/Llama-3.2-11B-Vision-Instruct")
9
-
10
- # Move model to GPU
11
- model.to("cuda")
12
 
13
  @spaces.GPU
14
- def extract_text_from_image(image):
15
- """
16
- Function to extract text from a handwritten image using the Meta-Llama model.
17
- """
18
- try:
19
- # Preprocess the image
20
- inputs = processor(images=image, return_tensors="pt").to("cuda")
21
-
22
- # Generate predictions
23
- outputs = model.generate(**inputs)
24
-
25
- # Decode the generated text
26
- extracted_text = processor.decode(outputs[0], skip_special_tokens=True)
 
 
 
 
 
 
 
 
 
 
27
 
28
- return extracted_text
29
- except Exception as e:
30
- return f"An error occurred: {str(e)}"
31
-
32
- # Define Gradio interface
33
- title = "Handwritten Text Extraction"
34
- description = """
35
- Upload a handwritten image, and this app will use Meta-Llama Vision-Instruct to extract text from the image.
36
- """
37
-
38
- demo = gr.Interface(
39
- fn=extract_text_from_image,
40
- inputs=gr.Image(type="pil", label="Upload Handwritten Image"),
41
- outputs=gr.Textbox(label="Extracted Text"),
42
- title=title,
43
- description=description,
44
- )
45
-
46
- if __name__ == "__main__":
47
- demo.launch(server_name="0.0.0.0", server_port=7860)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import MllamaForConditionalGeneration, AutoProcessor, TextIteratorStreamer
 
2
  from PIL import Image
3
+ import requests
4
+ import torch
5
+ from threading import Thread
6
+ import gradio as gr
7
+ from gradio import FileData
8
+ import time
9
  import spaces
10
+ ckpt = "meta-llama/Llama-3.2-11B-Vision-Instruct"
11
+ model = MllamaForConditionalGeneration.from_pretrained(ckpt,
12
+ torch_dtype=torch.bfloat16).to("cuda")
13
+ processor = AutoProcessor.from_pretrained(ckpt)
14
 
 
 
 
 
 
 
15
 
16
  @spaces.GPU
17
+ def bot_streaming(message, history, max_new_tokens=250):
18
+
19
+ txt = message["text"]
20
+ ext_buffer = f"{txt}"
21
+
22
+ messages= []
23
+ images = []
24
+
25
+
26
+ for i, msg in enumerate(history):
27
+ if isinstance(msg[0], tuple):
28
+ messages.append({"role": "user", "content": [{"type": "text", "text": history[i+1][0]}, {"type": "image"}]})
29
+ messages.append({"role": "assistant", "content": [{"type": "text", "text": history[i+1][1]}]})
30
+ images.append(Image.open(msg[0][0]).convert("RGB"))
31
+ elif isinstance(history[i-1], tuple) and isinstance(msg[0], str):
32
+ # messages are already handled
33
+ pass
34
+ elif isinstance(history[i-1][0], str) and isinstance(msg[0], str): # text only turn
35
+ messages.append({"role": "user", "content": [{"type": "text", "text": msg[0]}]})
36
+ messages.append({"role": "assistant", "content": [{"type": "text", "text": msg[1]}]})
37
+
38
+ # add current message
39
+ if len(message["files"]) == 1:
40
 
41
+ if isinstance(message["files"][0], str): # examples
42
+ image = Image.open(message["files"][0]).convert("RGB")
43
+ else: # regular input
44
+ image = Image.open(message["files"][0]["path"]).convert("RGB")
45
+ images.append(image)
46
+ messages.append({"role": "user", "content": [{"type": "text", "text": txt}, {"type": "image"}]})
47
+ else:
48
+ messages.append({"role": "user", "content": [{"type": "text", "text": txt}]})
49
+
50
+
51
+ texts = processor.apply_chat_template(messages, add_generation_prompt=True)
52
+
53
+ if images == []:
54
+ inputs = processor(text=texts, return_tensors="pt").to("cuda")
55
+ else:
56
+ inputs = processor(text=texts, images=images, return_tensors="pt").to("cuda")
57
+ streamer = TextIteratorStreamer(processor, skip_special_tokens=True, skip_prompt=True)
58
+
59
+ generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=max_new_tokens)
60
+ generated_text = ""
61
+
62
+ thread = Thread(target=model.generate, kwargs=generation_kwargs)
63
+ thread.start()
64
+ buffer = ""
65
+
66
+ for new_text in streamer:
67
+ buffer += new_text
68
+ generated_text_without_prompt = buffer
69
+ time.sleep(0.01)
70
+ yield buffer
71
+
72
+
73
+ demo = gr.ChatInterface(fn=bot_streaming, title="Multimodal Llama", examples=[
74
+ [{"text": "Which era does this piece belong to? Give details about the era.", "files":["./examples/rococo.jpg"]},
75
+ 200],
76
+ [{"text": "Where do the droughts happen according to this diagram?", "files":["./examples/weather_events.png"]},
77
+ 250],
78
+ [{"text": "What happens when you take out white cat from this chain?", "files":["./examples/ai2d_test.jpg"]},
79
+ 250],
80
+ [{"text": "How long does it take from invoice date to due date? Be short and concise.", "files":["./examples/invoice.png"]},
81
+ 250],
82
+ [{"text": "Where to find this monument? Can you give me other recommendations around the area?", "files":["./examples/wat_arun.jpg"]},
83
+ 250],
84
+ ],
85
+ textbox=gr.MultimodalTextbox(),
86
+ additional_inputs = [gr.Slider(
87
+ minimum=10,
88
+ maximum=500,
89
+ value=250,
90
+ step=10,
91
+ label="Maximum number of new tokens to generate",
92
+ )
93
+ ],
94
+ cache_examples=False,
95
+ description="Try Multimodal Llama by Meta with transformers in this demo. Upload an image, and start chatting about it, or simply try one of the examples below. To learn more about Llama Vision, visit [our blog post](https://huggingface.co/blog/llama32). ",
96
+ stop_btn="Stop Generation",
97
+ fill_height=True,
98
+ multimodal=True)
99
+
100
+ demo.launch(debug=True)