Update app.py
Browse files
app.py
CHANGED
@@ -1,137 +1,137 @@
|
|
1 |
import gradio as gr
|
2 |
import os
|
|
|
|
|
3 |
from pdf2image import convert_from_path
|
4 |
-
from PIL import Image
|
5 |
-
import torch
|
6 |
-
from torchvision import transforms
|
7 |
from transformers import AutoModelForObjectDetection, TableTransformerForObjectDetection
|
|
|
8 |
import pandas as pd
|
9 |
import numpy as np
|
|
|
|
|
10 |
import easyocr
|
11 |
-
import matplotlib.pyplot as plt
|
12 |
|
13 |
-
|
14 |
-
# Load detection and structure models
|
15 |
def load_detection_model():
|
16 |
model = AutoModelForObjectDetection.from_pretrained("microsoft/table-transformer-detection", revision="no_timm")
|
17 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
18 |
model.to(device)
|
19 |
return model, device
|
20 |
|
21 |
-
|
22 |
def load_structure_model(device):
|
23 |
-
|
24 |
-
|
25 |
-
return
|
26 |
|
27 |
-
|
28 |
-
# Preprocess image
|
29 |
-
class MaxResize:
|
30 |
def __init__(self, max_size=800):
|
31 |
self.max_size = max_size
|
32 |
|
33 |
def __call__(self, image):
|
34 |
width, height = image.size
|
35 |
-
|
36 |
-
scale = self.max_size /
|
37 |
-
|
38 |
-
return
|
39 |
-
|
40 |
|
41 |
def preprocess_image(image, max_size=800):
|
42 |
-
|
43 |
MaxResize(max_size),
|
44 |
transforms.ToTensor(),
|
45 |
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
46 |
])
|
47 |
-
pixel_values =
|
48 |
return pixel_values
|
49 |
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
pixel_values = preprocess_image(image).to(device)
|
54 |
with torch.no_grad():
|
55 |
outputs = model(pixel_values)
|
56 |
return outputs
|
57 |
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
#
|
84 |
-
def
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
return
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
|
|
|
|
|
|
|
|
|
|
130 |
inputs=gr.inputs.File(label="Upload PDF"),
|
131 |
-
outputs=gr.outputs.File(label="
|
132 |
-
title="Table Detection
|
133 |
-
description="Upload a PDF, and this
|
134 |
)
|
135 |
|
136 |
if __name__ == "__main__":
|
137 |
-
|
|
|
1 |
import gradio as gr
|
2 |
import os
|
3 |
+
import shutil
|
4 |
+
import zipfile
|
5 |
from pdf2image import convert_from_path
|
|
|
|
|
|
|
6 |
from transformers import AutoModelForObjectDetection, TableTransformerForObjectDetection
|
7 |
+
from PIL import Image, ImageDraw
|
8 |
import pandas as pd
|
9 |
import numpy as np
|
10 |
+
import torch
|
11 |
+
from torchvision import transforms
|
12 |
import easyocr
|
|
|
13 |
|
14 |
+
# Define functions for model loading and preprocessing
|
|
|
15 |
def load_detection_model():
|
16 |
model = AutoModelForObjectDetection.from_pretrained("microsoft/table-transformer-detection", revision="no_timm")
|
17 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
18 |
model.to(device)
|
19 |
return model, device
|
20 |
|
|
|
21 |
def load_structure_model(device):
|
22 |
+
structure_model = TableTransformerForObjectDetection.from_pretrained("microsoft/table-structure-recognition-v1.1-all")
|
23 |
+
structure_model.to(device)
|
24 |
+
return structure_model
|
25 |
|
26 |
+
class MaxResize(object):
|
|
|
|
|
27 |
def __init__(self, max_size=800):
|
28 |
self.max_size = max_size
|
29 |
|
30 |
def __call__(self, image):
|
31 |
width, height = image.size
|
32 |
+
current_max_size = max(width, height)
|
33 |
+
scale = self.max_size / current_max_size
|
34 |
+
resized_image = image.resize((int(round(scale * width)), int(round(scale * height))))
|
35 |
+
return resized_image
|
|
|
36 |
|
37 |
def preprocess_image(image, max_size=800):
|
38 |
+
detection_transform = transforms.Compose([
|
39 |
MaxResize(max_size),
|
40 |
transforms.ToTensor(),
|
41 |
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
42 |
])
|
43 |
+
pixel_values = detection_transform(image).unsqueeze(0)
|
44 |
return pixel_values
|
45 |
|
46 |
+
# Define detection functions
|
47 |
+
def detect_tables(model, pixel_values, device):
|
48 |
+
pixel_values = pixel_values.to(device)
|
|
|
49 |
with torch.no_grad():
|
50 |
outputs = model(pixel_values)
|
51 |
return outputs
|
52 |
|
53 |
+
def rescale_bboxes(out_bbox, size):
|
54 |
+
img_w, img_h = size
|
55 |
+
x_c, y_c, w, h = out_bbox.unbind(-1)
|
56 |
+
b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]
|
57 |
+
return torch.stack(b, dim=1) * torch.tensor([img_w, img_h, img_w, img_h], dtype=torch.float32)
|
58 |
+
|
59 |
+
def outputs_to_objects(outputs, img_size, id2label):
|
60 |
+
m = outputs.logits.softmax(-1).max(-1)
|
61 |
+
pred_labels = list(m.indices.detach().cpu().numpy())[0]
|
62 |
+
pred_scores = list(m.values.detach().cpu().numpy())[0]
|
63 |
+
pred_bboxes = outputs["pred_boxes"].detach().cpu()[0]
|
64 |
+
pred_bboxes = [elem.tolist() for elem in rescale_bboxes(pred_bboxes, img_size)]
|
65 |
+
objects = [
|
66 |
+
{"label": id2label[int(label)], "score": float(score), "bbox": [float(x) for x in bbox]}
|
67 |
+
for label, score, bbox in zip(pred_labels, pred_scores, pred_bboxes)
|
68 |
+
if id2label[int(label)] != "no object"
|
69 |
+
]
|
70 |
+
return objects
|
71 |
+
|
72 |
+
# OCR function
|
73 |
+
def apply_ocr(image, language="vi"):
|
74 |
+
reader = easyocr.Reader([language])
|
75 |
+
result = reader.readtext(np.array(image), detail=0)
|
76 |
+
return result
|
77 |
+
|
78 |
+
# Process PDF
|
79 |
+
def process_pdf(pdf_path, output_dir):
|
80 |
+
images = convert_from_path(pdf_path)
|
81 |
+
model, device = load_detection_model()
|
82 |
+
structure_model = load_structure_model(device)
|
83 |
+
|
84 |
+
if os.path.exists(output_dir):
|
85 |
+
shutil.rmtree(output_dir)
|
86 |
+
os.makedirs(output_dir)
|
87 |
+
|
88 |
+
txt_output = []
|
89 |
+
zip_filename = os.path.join(output_dir, "output.zip")
|
90 |
+
with zipfile.ZipFile(zip_filename, "w") as zipf:
|
91 |
+
for page_num, image in enumerate(images):
|
92 |
+
pixel_values = preprocess_image(image)
|
93 |
+
outputs = detect_tables(model, pixel_values, device)
|
94 |
+
id2label = model.config.id2label
|
95 |
+
id2label[len(id2label)] = "no object"
|
96 |
+
objects = outputs_to_objects(outputs, image.size, id2label)
|
97 |
+
|
98 |
+
# Detect tables
|
99 |
+
detected_tables = [obj for obj in objects if obj["label"] in ["table", "table rotated"]]
|
100 |
+
for idx, table in enumerate(detected_tables):
|
101 |
+
x_min, y_min, x_max, y_max = map(int, table["bbox"])
|
102 |
+
cropped_table = image.crop((x_min, y_min, x_max, y_max))
|
103 |
+
table_data = apply_ocr(cropped_table)
|
104 |
+
|
105 |
+
# Save CSV
|
106 |
+
csv_filename = os.path.join(output_dir, f"page_{page_num+1}_table_{idx+1}.csv")
|
107 |
+
pd.DataFrame(table_data).to_csv(csv_filename, index=False)
|
108 |
+
zipf.write(csv_filename, os.path.basename(csv_filename))
|
109 |
+
|
110 |
+
# Extract remaining text
|
111 |
+
text = apply_ocr(image)
|
112 |
+
txt_output.append("\n".join(text))
|
113 |
+
|
114 |
+
# Save text
|
115 |
+
txt_filename = os.path.join(output_dir, "remaining_text.txt")
|
116 |
+
with open(txt_filename, "w", encoding="utf-8") as txt_file:
|
117 |
+
txt_file.write("\n".join(txt_output))
|
118 |
+
zipf.write(txt_filename, os.path.basename(txt_filename))
|
119 |
+
|
120 |
+
return zip_filename
|
121 |
+
|
122 |
+
# Define Gradio UI
|
123 |
+
def process_file(pdf_file):
|
124 |
+
output_dir = "output"
|
125 |
+
output_zip = process_pdf(pdf_file.name, output_dir)
|
126 |
+
return output_zip
|
127 |
+
|
128 |
+
app = gr.Interface(
|
129 |
+
fn=process_file,
|
130 |
inputs=gr.inputs.File(label="Upload PDF"),
|
131 |
+
outputs=gr.outputs.File(label="Download Output"),
|
132 |
+
title="Table Detection & OCR Extraction",
|
133 |
+
description="Upload a scanned PDF, and this app will extract detected tables as CSVs and text as a TXT file."
|
134 |
)
|
135 |
|
136 |
if __name__ == "__main__":
|
137 |
+
app.launch()
|