prithivMLmods commited on
Commit
a817886
1 Parent(s): 96cae05

Upload ocrtest_qwen.ipynb

Browse files
Files changed (1) hide show
  1. Demo/ocrtest_qwen.ipynb +556 -0
Demo/ocrtest_qwen.ipynb ADDED
@@ -0,0 +1,556 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": [],
7
+ "gpuType": "T4"
8
+ },
9
+ "kernelspec": {
10
+ "name": "python3",
11
+ "display_name": "Python 3"
12
+ },
13
+ "language_info": {
14
+ "name": "python"
15
+ },
16
+ "accelerator": "GPU"
17
+ },
18
+ "cells": [
19
+ {
20
+ "cell_type": "markdown",
21
+ "source": [
22
+ "# **FT; Key Information Extraction**\n",
23
+ "Qwen2VLForConditionalGeneration"
24
+ ],
25
+ "metadata": {
26
+ "id": "-b4-SW1aGOcF"
27
+ }
28
+ },
29
+ {
30
+ "cell_type": "code",
31
+ "source": [
32
+ "!pip install gradio spaces transformers accelerate numpy requests torch torchvision qwen-vl-utils av ipython reportlab fpdf python-docx pillow huggingface_hub"
33
+ ],
34
+ "metadata": {
35
+ "colab": {
36
+ "base_uri": "https://localhost:8080/"
37
+ },
38
+ "id": "oDmd1ZObGSel",
39
+ "outputId": "5b01f267-d5af-4409-cf67-6c318388d584"
40
+ },
41
+ "execution_count": 1,
42
+ "outputs": [
43
+ {
44
+ "output_type": "stream",
45
+ "name": "stdout",
46
+ "text": [
47
+ "Collecting gradio\n",
48
+ " Downloading gradio-5.9.1-py3-none-any.whl.metadata (16 kB)\n",
49
+ "Collecting spaces\n",
50
+ " Downloading spaces-0.31.1-py3-none-any.whl.metadata (1.0 kB)\n",
51
+ "Requirement already satisfied: transformers in /usr/local/lib/python3.10/dist-packages (4.47.1)\n",
52
+ "Requirement already satisfied: accelerate in /usr/local/lib/python3.10/dist-packages (1.2.1)\n",
53
+ "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (1.26.4)\n",
54
+ "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (2.32.3)\n",
55
+ "Requirement already satisfied: torch in /usr/local/lib/python3.10/dist-packages (2.5.1+cu121)\n",
56
+ "Requirement already satisfied: torchvision in /usr/local/lib/python3.10/dist-packages (0.20.1+cu121)\n",
57
+ "Collecting qwen-vl-utils\n",
58
+ " Downloading qwen_vl_utils-0.0.8-py3-none-any.whl.metadata (3.6 kB)\n",
59
+ "Collecting av\n",
60
+ " Downloading av-14.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (4.5 kB)\n",
61
+ "Requirement already satisfied: ipython in /usr/local/lib/python3.10/dist-packages (7.34.0)\n",
62
+ "Collecting reportlab\n",
63
+ " Downloading reportlab-4.2.5-py3-none-any.whl.metadata (1.5 kB)\n",
64
+ "Collecting fpdf\n",
65
+ " Downloading fpdf-1.7.2.tar.gz (39 kB)\n",
66
+ " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
67
+ "Collecting python-docx\n",
68
+ " Downloading python_docx-1.1.2-py3-none-any.whl.metadata (2.0 kB)\n",
69
+ "Requirement already satisfied: pillow in /usr/local/lib/python3.10/dist-packages (11.0.0)\n",
70
+ "Requirement already satisfied: huggingface_hub in /usr/local/lib/python3.10/dist-packages (0.27.0)\n",
71
+ "Collecting aiofiles<24.0,>=22.0 (from gradio)\n",
72
+ " Downloading aiofiles-23.2.1-py3-none-any.whl.metadata (9.7 kB)\n",
73
+ "Requirement already satisfied: anyio<5.0,>=3.0 in /usr/local/lib/python3.10/dist-packages (from gradio) (3.7.1)\n",
74
+ "Collecting fastapi<1.0,>=0.115.2 (from gradio)\n",
75
+ " Downloading fastapi-0.115.6-py3-none-any.whl.metadata (27 kB)\n",
76
+ "Collecting ffmpy (from gradio)\n",
77
+ " Downloading ffmpy-0.5.0-py3-none-any.whl.metadata (3.0 kB)\n",
78
+ "Collecting gradio-client==1.5.2 (from gradio)\n",
79
+ " Downloading gradio_client-1.5.2-py3-none-any.whl.metadata (7.1 kB)\n",
80
+ "Requirement already satisfied: httpx>=0.24.1 in /usr/local/lib/python3.10/dist-packages (from gradio) (0.28.1)\n",
81
+ "Requirement already satisfied: jinja2<4.0 in /usr/local/lib/python3.10/dist-packages (from gradio) (3.1.4)\n",
82
+ "Collecting markupsafe~=2.0 (from gradio)\n",
83
+ " Downloading MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (3.0 kB)\n",
84
+ "Requirement already satisfied: orjson~=3.0 in /usr/local/lib/python3.10/dist-packages (from gradio) (3.10.12)\n",
85
+ "Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from gradio) (24.2)\n",
86
+ "Requirement already satisfied: pandas<3.0,>=1.0 in /usr/local/lib/python3.10/dist-packages (from gradio) (2.2.2)\n",
87
+ "Requirement already satisfied: pydantic>=2.0 in /usr/local/lib/python3.10/dist-packages (from gradio) (2.10.3)\n",
88
+ "Collecting pydub (from gradio)\n",
89
+ " Downloading pydub-0.25.1-py2.py3-none-any.whl.metadata (1.4 kB)\n",
90
+ "Collecting python-multipart>=0.0.18 (from gradio)\n",
91
+ " Downloading python_multipart-0.0.20-py3-none-any.whl.metadata (1.8 kB)\n",
92
+ "Requirement already satisfied: pyyaml<7.0,>=5.0 in /usr/local/lib/python3.10/dist-packages (from gradio) (6.0.2)\n",
93
+ "Collecting ruff>=0.2.2 (from gradio)\n",
94
+ " Downloading ruff-0.8.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (25 kB)\n",
95
+ "Collecting safehttpx<0.2.0,>=0.1.6 (from gradio)\n",
96
+ " Downloading safehttpx-0.1.6-py3-none-any.whl.metadata (4.2 kB)\n",
97
+ "Collecting semantic-version~=2.0 (from gradio)\n",
98
+ " Downloading semantic_version-2.10.0-py2.py3-none-any.whl.metadata (9.7 kB)\n",
99
+ "Collecting starlette<1.0,>=0.40.0 (from gradio)\n",
100
+ " Downloading starlette-0.45.1-py3-none-any.whl.metadata (6.3 kB)\n",
101
+ "Collecting tomlkit<0.14.0,>=0.12.0 (from gradio)\n",
102
+ " Downloading tomlkit-0.13.2-py3-none-any.whl.metadata (2.7 kB)\n",
103
+ "Requirement already satisfied: typer<1.0,>=0.12 in /usr/local/lib/python3.10/dist-packages (from gradio) (0.15.1)\n",
104
+ "Requirement already satisfied: typing-extensions~=4.0 in /usr/local/lib/python3.10/dist-packages (from gradio) (4.12.2)\n",
105
+ "Collecting uvicorn>=0.14.0 (from gradio)\n",
106
+ " Downloading uvicorn-0.34.0-py3-none-any.whl.metadata (6.5 kB)\n",
107
+ "Requirement already satisfied: fsspec in /usr/local/lib/python3.10/dist-packages (from gradio-client==1.5.2->gradio) (2024.10.0)\n",
108
+ "Requirement already satisfied: websockets<15.0,>=10.0 in /usr/local/lib/python3.10/dist-packages (from gradio-client==1.5.2->gradio) (14.1)\n",
109
+ "Requirement already satisfied: psutil<6,>=2 in /usr/local/lib/python3.10/dist-packages (from spaces) (5.9.5)\n",
110
+ "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from transformers) (3.16.1)\n",
111
+ "Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.10/dist-packages (from transformers) (2024.11.6)\n",
112
+ "Requirement already satisfied: tokenizers<0.22,>=0.21 in /usr/local/lib/python3.10/dist-packages (from transformers) (0.21.0)\n",
113
+ "Requirement already satisfied: safetensors>=0.4.1 in /usr/local/lib/python3.10/dist-packages (from transformers) (0.4.5)\n",
114
+ "Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.10/dist-packages (from transformers) (4.67.1)\n",
115
+ "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests) (3.4.0)\n",
116
+ "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests) (3.10)\n",
117
+ "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests) (2.2.3)\n",
118
+ "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests) (2024.12.14)\n",
119
+ "Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch) (3.4.2)\n",
120
+ "Requirement already satisfied: sympy==1.13.1 in /usr/local/lib/python3.10/dist-packages (from torch) (1.13.1)\n",
121
+ "Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.10/dist-packages (from sympy==1.13.1->torch) (1.3.0)\n",
122
+ "Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.10/dist-packages (from ipython) (75.1.0)\n",
123
+ "Collecting jedi>=0.16 (from ipython)\n",
124
+ " Downloading jedi-0.19.2-py2.py3-none-any.whl.metadata (22 kB)\n",
125
+ "Requirement already satisfied: decorator in /usr/local/lib/python3.10/dist-packages (from ipython) (4.4.2)\n",
126
+ "Requirement already satisfied: pickleshare in /usr/local/lib/python3.10/dist-packages (from ipython) (0.7.5)\n",
127
+ "Requirement already satisfied: traitlets>=4.2 in /usr/local/lib/python3.10/dist-packages (from ipython) (5.7.1)\n",
128
+ "Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from ipython) (3.0.48)\n",
129
+ "Requirement already satisfied: pygments in /usr/local/lib/python3.10/dist-packages (from ipython) (2.18.0)\n",
130
+ "Requirement already satisfied: backcall in /usr/local/lib/python3.10/dist-packages (from ipython) (0.2.0)\n",
131
+ "Requirement already satisfied: matplotlib-inline in /usr/local/lib/python3.10/dist-packages (from ipython) (0.1.7)\n",
132
+ "Requirement already satisfied: pexpect>4.3 in /usr/local/lib/python3.10/dist-packages (from ipython) (4.9.0)\n",
133
+ "Requirement already satisfied: chardet in /usr/local/lib/python3.10/dist-packages (from reportlab) (5.2.0)\n",
134
+ "Requirement already satisfied: lxml>=3.1.0 in /usr/local/lib/python3.10/dist-packages (from python-docx) (5.3.0)\n",
135
+ "Requirement already satisfied: sniffio>=1.1 in /usr/local/lib/python3.10/dist-packages (from anyio<5.0,>=3.0->gradio) (1.3.1)\n",
136
+ "Requirement already satisfied: exceptiongroup in /usr/local/lib/python3.10/dist-packages (from anyio<5.0,>=3.0->gradio) (1.2.2)\n",
137
+ "Collecting starlette<1.0,>=0.40.0 (from gradio)\n",
138
+ " Downloading starlette-0.41.3-py3-none-any.whl.metadata (6.0 kB)\n",
139
+ "Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.10/dist-packages (from httpx>=0.24.1->gradio) (1.0.7)\n",
140
+ "Requirement already satisfied: h11<0.15,>=0.13 in /usr/local/lib/python3.10/dist-packages (from httpcore==1.*->httpx>=0.24.1->gradio) (0.14.0)\n",
141
+ "Requirement already satisfied: parso<0.9.0,>=0.8.4 in /usr/local/lib/python3.10/dist-packages (from jedi>=0.16->ipython) (0.8.4)\n",
142
+ "Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.10/dist-packages (from pandas<3.0,>=1.0->gradio) (2.8.2)\n",
143
+ "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas<3.0,>=1.0->gradio) (2024.2)\n",
144
+ "Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.10/dist-packages (from pandas<3.0,>=1.0->gradio) (2024.2)\n",
145
+ "Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.10/dist-packages (from pexpect>4.3->ipython) (0.7.0)\n",
146
+ "Requirement already satisfied: wcwidth in /usr/local/lib/python3.10/dist-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython) (0.2.13)\n",
147
+ "Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.10/dist-packages (from pydantic>=2.0->gradio) (0.7.0)\n",
148
+ "Requirement already satisfied: pydantic-core==2.27.1 in /usr/local/lib/python3.10/dist-packages (from pydantic>=2.0->gradio) (2.27.1)\n",
149
+ "Requirement already satisfied: click>=8.0.0 in /usr/local/lib/python3.10/dist-packages (from typer<1.0,>=0.12->gradio) (8.1.7)\n",
150
+ "Requirement already satisfied: shellingham>=1.3.0 in /usr/local/lib/python3.10/dist-packages (from typer<1.0,>=0.12->gradio) (1.5.4)\n",
151
+ "Requirement already satisfied: rich>=10.11.0 in /usr/local/lib/python3.10/dist-packages (from typer<1.0,>=0.12->gradio) (13.9.4)\n",
152
+ "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.8.2->pandas<3.0,>=1.0->gradio) (1.17.0)\n",
153
+ "Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.10/dist-packages (from rich>=10.11.0->typer<1.0,>=0.12->gradio) (3.0.0)\n",
154
+ "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.10/dist-packages (from markdown-it-py>=2.2.0->rich>=10.11.0->typer<1.0,>=0.12->gradio) (0.1.2)\n",
155
+ "Downloading gradio-5.9.1-py3-none-any.whl (57.2 MB)\n",
156
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m57.2/57.2 MB\u001b[0m \u001b[31m14.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
157
+ "\u001b[?25hDownloading gradio_client-1.5.2-py3-none-any.whl (320 kB)\n",
158
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m320.4/320.4 kB\u001b[0m \u001b[31m29.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
159
+ "\u001b[?25hDownloading spaces-0.31.1-py3-none-any.whl (28 kB)\n",
160
+ "Downloading qwen_vl_utils-0.0.8-py3-none-any.whl (5.9 kB)\n",
161
+ "Downloading av-14.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (33.0 MB)\n",
162
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m33.0/33.0 MB\u001b[0m \u001b[31m62.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
163
+ "\u001b[?25hDownloading reportlab-4.2.5-py3-none-any.whl (1.9 MB)\n",
164
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.9/1.9 MB\u001b[0m \u001b[31m84.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
165
+ "\u001b[?25hDownloading python_docx-1.1.2-py3-none-any.whl (244 kB)\n",
166
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m244.3/244.3 kB\u001b[0m \u001b[31m22.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
167
+ "\u001b[?25hDownloading aiofiles-23.2.1-py3-none-any.whl (15 kB)\n",
168
+ "Downloading fastapi-0.115.6-py3-none-any.whl (94 kB)\n",
169
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m94.8/94.8 kB\u001b[0m \u001b[31m9.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
170
+ "\u001b[?25hDownloading jedi-0.19.2-py2.py3-none-any.whl (1.6 MB)\n",
171
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m75.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
172
+ "\u001b[?25hDownloading MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (25 kB)\n",
173
+ "Downloading python_multipart-0.0.20-py3-none-any.whl (24 kB)\n",
174
+ "Downloading ruff-0.8.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (11.3 MB)\n",
175
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m11.3/11.3 MB\u001b[0m \u001b[31m114.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
176
+ "\u001b[?25hDownloading safehttpx-0.1.6-py3-none-any.whl (8.7 kB)\n",
177
+ "Downloading semantic_version-2.10.0-py2.py3-none-any.whl (15 kB)\n",
178
+ "Downloading starlette-0.41.3-py3-none-any.whl (73 kB)\n",
179
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m73.2/73.2 kB\u001b[0m \u001b[31m6.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
180
+ "\u001b[?25hDownloading tomlkit-0.13.2-py3-none-any.whl (37 kB)\n",
181
+ "Downloading uvicorn-0.34.0-py3-none-any.whl (62 kB)\n",
182
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62.3/62.3 kB\u001b[0m \u001b[31m6.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
183
+ "\u001b[?25hDownloading ffmpy-0.5.0-py3-none-any.whl (6.0 kB)\n",
184
+ "Downloading pydub-0.25.1-py2.py3-none-any.whl (32 kB)\n",
185
+ "Building wheels for collected packages: fpdf\n",
186
+ " Building wheel for fpdf (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
187
+ " Created wheel for fpdf: filename=fpdf-1.7.2-py2.py3-none-any.whl size=40704 sha256=442a41ba3b572ac9bae1220ac5f13b34f02252630b892bab6e55af0e878115c0\n",
188
+ " Stored in directory: /root/.cache/pip/wheels/f9/95/ba/f418094659025eb9611f17cbcaf2334236bf39a0c3453ea455\n",
189
+ "Successfully built fpdf\n",
190
+ "Installing collected packages: pydub, fpdf, uvicorn, tomlkit, semantic-version, ruff, reportlab, python-multipart, python-docx, markupsafe, jedi, ffmpy, av, aiofiles, starlette, qwen-vl-utils, safehttpx, gradio-client, fastapi, gradio, spaces\n",
191
+ " Attempting uninstall: markupsafe\n",
192
+ " Found existing installation: MarkupSafe 3.0.2\n",
193
+ " Uninstalling MarkupSafe-3.0.2:\n",
194
+ " Successfully uninstalled MarkupSafe-3.0.2\n",
195
+ "Successfully installed aiofiles-23.2.1 av-14.0.1 fastapi-0.115.6 ffmpy-0.5.0 fpdf-1.7.2 gradio-5.9.1 gradio-client-1.5.2 jedi-0.19.2 markupsafe-2.1.5 pydub-0.25.1 python-docx-1.1.2 python-multipart-0.0.20 qwen-vl-utils-0.0.8 reportlab-4.2.5 ruff-0.8.5 safehttpx-0.1.6 semantic-version-2.10.0 spaces-0.31.1 starlette-0.41.3 tomlkit-0.13.2 uvicorn-0.34.0\n"
196
+ ]
197
+ }
198
+ ]
199
+ },
200
+ {
201
+ "cell_type": "code",
202
+ "source": [
203
+ "# Authenticate with Hugging Face\n",
204
+ "from huggingface_hub import login\n",
205
+ "\n",
206
+ "# Log in to Hugging Face using the provided token\n",
207
+ "hf_token = '----xxx----'\n",
208
+ "login(hf_token)\n",
209
+ "\n",
210
+ "#Demo\n",
211
+ "import gradio as gr\n",
212
+ "import spaces\n",
213
+ "from transformers import Qwen2VLForConditionalGeneration, AutoProcessor, TextIteratorStreamer\n",
214
+ "from qwen_vl_utils import process_vision_info\n",
215
+ "import torch\n",
216
+ "from PIL import Image\n",
217
+ "import os\n",
218
+ "import uuid\n",
219
+ "import io\n",
220
+ "from threading import Thread\n",
221
+ "from reportlab.lib.pagesizes import A4\n",
222
+ "from reportlab.lib.styles import getSampleStyleSheet\n",
223
+ "from reportlab.lib import colors\n",
224
+ "from reportlab.platypus import SimpleDocTemplate, Image as RLImage, Paragraph, Spacer\n",
225
+ "from reportlab.lib.units import inch\n",
226
+ "from reportlab.pdfbase import pdfmetrics\n",
227
+ "from reportlab.pdfbase.ttfonts import TTFont\n",
228
+ "import docx\n",
229
+ "from docx.enum.text import WD_ALIGN_PARAGRAPH\n",
230
+ "\n",
231
+ "# Define model options\n",
232
+ "MODEL_OPTIONS = {\n",
233
+ " \"OCR-KIE\": \"prithivMLmods/Qwen2-VL-OCR-2B-Instruct\",\n",
234
+ "}\n",
235
+ "\n",
236
+ "# Preload models and processors into CUDA\n",
237
+ "models = {}\n",
238
+ "processors = {}\n",
239
+ "for name, model_id in MODEL_OPTIONS.items():\n",
240
+ " print(f\"Loading {name}...\")\n",
241
+ " models[name] = Qwen2VLForConditionalGeneration.from_pretrained(\n",
242
+ " model_id,\n",
243
+ " trust_remote_code=True,\n",
244
+ " torch_dtype=torch.float16\n",
245
+ " ).to(\"cuda\").eval()\n",
246
+ " processors[name] = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)\n",
247
+ "\n",
248
+ "image_extensions = Image.registered_extensions()\n",
249
+ "\n",
250
+ "def identify_and_save_blob(blob_path):\n",
251
+ " \"\"\"Identifies if the blob is an image and saves it.\"\"\"\n",
252
+ " try:\n",
253
+ " with open(blob_path, 'rb') as file:\n",
254
+ " blob_content = file.read()\n",
255
+ " try:\n",
256
+ " Image.open(io.BytesIO(blob_content)).verify() # Check if it's a valid image\n",
257
+ " extension = \".png\" # Default to PNG for saving\n",
258
+ " media_type = \"image\"\n",
259
+ " except (IOError, SyntaxError):\n",
260
+ " raise ValueError(\"Unsupported media type. Please upload a valid image.\")\n",
261
+ "\n",
262
+ " filename = f\"temp_{uuid.uuid4()}_media{extension}\"\n",
263
+ " with open(filename, \"wb\") as f:\n",
264
+ " f.write(blob_content)\n",
265
+ "\n",
266
+ " return filename, media_type\n",
267
+ "\n",
268
+ " except FileNotFoundError:\n",
269
+ " raise ValueError(f\"The file {blob_path} was not found.\")\n",
270
+ " except Exception as e:\n",
271
+ " raise ValueError(f\"An error occurred while processing the file: {e}\")\n",
272
+ "\n",
273
+ "@spaces.GPU\n",
274
+ "def qwen_inference(model_name, media_input, text_input=None):\n",
275
+ " \"\"\"Handles inference for the selected model.\"\"\"\n",
276
+ " model = models[model_name]\n",
277
+ " processor = processors[model_name]\n",
278
+ "\n",
279
+ " if isinstance(media_input, str):\n",
280
+ " media_path = media_input\n",
281
+ " if media_path.endswith(tuple([i for i in image_extensions.keys()])):\n",
282
+ " media_type = \"image\"\n",
283
+ " else:\n",
284
+ " try:\n",
285
+ " media_path, media_type = identify_and_save_blob(media_input)\n",
286
+ " except Exception as e:\n",
287
+ " raise ValueError(\"Unsupported media type. Please upload a valid image.\")\n",
288
+ "\n",
289
+ " messages = [\n",
290
+ " {\n",
291
+ " \"role\": \"user\",\n",
292
+ " \"content\": [\n",
293
+ " {\n",
294
+ " \"type\": media_type,\n",
295
+ " media_type: media_path\n",
296
+ " },\n",
297
+ " {\"type\": \"text\", \"text\": text_input},\n",
298
+ " ],\n",
299
+ " }\n",
300
+ " ]\n",
301
+ "\n",
302
+ " text = processor.apply_chat_template(\n",
303
+ " messages, tokenize=False, add_generation_prompt=True\n",
304
+ " )\n",
305
+ " image_inputs, _ = process_vision_info(messages)\n",
306
+ " inputs = processor(\n",
307
+ " text=[text],\n",
308
+ " images=image_inputs,\n",
309
+ " padding=True,\n",
310
+ " return_tensors=\"pt\",\n",
311
+ " ).to(\"cuda\")\n",
312
+ "\n",
313
+ " streamer = TextIteratorStreamer(\n",
314
+ " processor.tokenizer, skip_prompt=True, skip_special_tokens=True\n",
315
+ " )\n",
316
+ " generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)\n",
317
+ "\n",
318
+ " thread = Thread(target=model.generate, kwargs=generation_kwargs)\n",
319
+ " thread.start()\n",
320
+ "\n",
321
+ " buffer = \"\"\n",
322
+ " for new_text in streamer:\n",
323
+ " buffer += new_text\n",
324
+ " # Remove <|im_end|> or similar tokens from the output\n",
325
+ " buffer = buffer.replace(\"<|im_end|>\", \"\")\n",
326
+ " yield buffer\n",
327
+ "\n",
328
+ "def format_plain_text(output_text):\n",
329
+ " \"\"\"Formats the output text as plain text without LaTeX delimiters.\"\"\"\n",
330
+ " # Remove LaTeX delimiters and convert to plain text\n",
331
+ " plain_text = output_text.replace(\"\\\\(\", \"\").replace(\"\\\\)\", \"\").replace(\"\\\\[\", \"\").replace(\"\\\\]\", \"\")\n",
332
+ " return plain_text\n",
333
+ "\n",
334
+ "def generate_document(media_path, output_text, file_format, font_size, line_spacing, alignment, image_size):\n",
335
+ " \"\"\"Generates a document with the input image and plain text output.\"\"\"\n",
336
+ " plain_text = format_plain_text(output_text)\n",
337
+ " if file_format == \"pdf\":\n",
338
+ " return generate_pdf(media_path, plain_text, font_size, line_spacing, alignment, image_size)\n",
339
+ " elif file_format == \"docx\":\n",
340
+ " return generate_docx(media_path, plain_text, font_size, line_spacing, alignment, image_size)\n",
341
+ "\n",
342
+ "def generate_pdf(media_path, plain_text, font_size, line_spacing, alignment, image_size):\n",
343
+ " \"\"\"Generates a PDF document.\"\"\"\n",
344
+ " filename = f\"output_{uuid.uuid4()}.pdf\"\n",
345
+ " doc = SimpleDocTemplate(\n",
346
+ " filename,\n",
347
+ " pagesize=A4,\n",
348
+ " rightMargin=inch,\n",
349
+ " leftMargin=inch,\n",
350
+ " topMargin=inch,\n",
351
+ " bottomMargin=inch\n",
352
+ " )\n",
353
+ " styles = getSampleStyleSheet()\n",
354
+ " styles[\"Normal\"].fontSize = int(font_size)\n",
355
+ " styles[\"Normal\"].leading = int(font_size) * line_spacing\n",
356
+ " styles[\"Normal\"].alignment = {\n",
357
+ " \"Left\": 0,\n",
358
+ " \"Center\": 1,\n",
359
+ " \"Right\": 2,\n",
360
+ " \"Justified\": 4\n",
361
+ " }[alignment]\n",
362
+ "\n",
363
+ " story = []\n",
364
+ "\n",
365
+ " # Add image with size adjustment\n",
366
+ " image_sizes = {\n",
367
+ " \"Small\": (200, 200),\n",
368
+ " \"Medium\": (400, 400),\n",
369
+ " \"Large\": (600, 600)\n",
370
+ " }\n",
371
+ " img = RLImage(media_path, width=image_sizes[image_size][0], height=image_sizes[image_size][1])\n",
372
+ " story.append(img)\n",
373
+ " story.append(Spacer(1, 12))\n",
374
+ "\n",
375
+ " # Add plain text output\n",
376
+ " text = Paragraph(plain_text, styles[\"Normal\"])\n",
377
+ " story.append(text)\n",
378
+ "\n",
379
+ " doc.build(story)\n",
380
+ " return filename\n",
381
+ "\n",
382
+ "def generate_docx(media_path, plain_text, font_size, line_spacing, alignment, image_size):\n",
383
+ " \"\"\"Generates a DOCX document.\"\"\"\n",
384
+ " filename = f\"output_{uuid.uuid4()}.docx\"\n",
385
+ " doc = docx.Document()\n",
386
+ "\n",
387
+ " # Add image with size adjustment\n",
388
+ " image_sizes = {\n",
389
+ " \"Small\": docx.shared.Inches(2),\n",
390
+ " \"Medium\": docx.shared.Inches(4),\n",
391
+ " \"Large\": docx.shared.Inches(6)\n",
392
+ " }\n",
393
+ " doc.add_picture(media_path, width=image_sizes[image_size])\n",
394
+ " doc.add_paragraph()\n",
395
+ "\n",
396
+ " # Add plain text output\n",
397
+ " paragraph = doc.add_paragraph()\n",
398
+ " paragraph.paragraph_format.line_spacing = line_spacing\n",
399
+ " paragraph.paragraph_format.alignment = {\n",
400
+ " \"Left\": WD_ALIGN_PARAGRAPH.LEFT,\n",
401
+ " \"Center\": WD_ALIGN_PARAGRAPH.CENTER,\n",
402
+ " \"Right\": WD_ALIGN_PARAGRAPH.RIGHT,\n",
403
+ " \"Justified\": WD_ALIGN_PARAGRAPH.JUSTIFY\n",
404
+ " }[alignment]\n",
405
+ " run = paragraph.add_run(plain_text)\n",
406
+ " run.font.size = docx.shared.Pt(int(font_size))\n",
407
+ "\n",
408
+ " doc.save(filename)\n",
409
+ " return filename\n",
410
+ "\n",
411
+ "# CSS for output styling\n",
412
+ "css = \"\"\"\n",
413
+ " #output {\n",
414
+ " height: 500px;\n",
415
+ " overflow: auto;\n",
416
+ " border: 1px solid #ccc;\n",
417
+ " }\n",
418
+ ".submit-btn {\n",
419
+ " background-color: #cf3434 !important;\n",
420
+ " color: white !important;\n",
421
+ "}\n",
422
+ ".submit-btn:hover {\n",
423
+ " background-color: #ff2323 !important;\n",
424
+ "}\n",
425
+ ".download-btn {\n",
426
+ " background-color: #35a6d6 !important;\n",
427
+ " color: white !important;\n",
428
+ "}\n",
429
+ ".download-btn:hover {\n",
430
+ " background-color: #22bcff !important;\n",
431
+ "}\n",
432
+ "\"\"\"\n",
433
+ "\n",
434
+ "# Gradio app setup\n",
435
+ "with gr.Blocks(css=css) as demo:\n",
436
+ " gr.Markdown(\"# Qwen2VL Models: Vision and Language Processing\")\n",
437
+ "\n",
438
+ " with gr.Tab(label=\"Image Input\"):\n",
439
+ "\n",
440
+ " with gr.Row():\n",
441
+ " with gr.Column():\n",
442
+ " model_choice = gr.Dropdown(\n",
443
+ " label=\"Model Selection\",\n",
444
+ " choices=list(MODEL_OPTIONS.keys()),\n",
445
+ " value=\"OCR-KIE\"\n",
446
+ " )\n",
447
+ " input_media = gr.File(\n",
448
+ " label=\"Upload Image\", type=\"filepath\"\n",
449
+ " )\n",
450
+ " text_input = gr.Textbox(label=\"Question\", placeholder=\"Ask a question about the image...\")\n",
451
+ " submit_btn = gr.Button(value=\"Submit\", elem_classes=\"submit-btn\")\n",
452
+ "\n",
453
+ " with gr.Column():\n",
454
+ " output_text = gr.Textbox(label=\"Output Text\", lines=10)\n",
455
+ " plain_text_output = gr.Textbox(label=\"Standardized Plain Text\", lines=10)\n",
456
+ "\n",
457
+ " submit_btn.click(\n",
458
+ " qwen_inference, [model_choice, input_media, text_input], [output_text]\n",
459
+ " ).then(\n",
460
+ " lambda output_text: format_plain_text(output_text), [output_text], [plain_text_output]\n",
461
+ " )\n",
462
+ "\n",
463
+ " # Add examples directly usable by clicking\n",
464
+ " with gr.Row():\n",
465
+ " with gr.Column():\n",
466
+ " line_spacing = gr.Dropdown(\n",
467
+ " choices=[0.5, 1.0, 1.15, 1.5, 2.0, 2.5, 3.0],\n",
468
+ " value=1.5,\n",
469
+ " label=\"Line Spacing\"\n",
470
+ " )\n",
471
+ " font_size = gr.Dropdown(\n",
472
+ " choices=[\"8\", \"10\", \"12\", \"14\", \"16\", \"18\", \"20\", \"22\", \"24\"],\n",
473
+ " value=\"18\",\n",
474
+ " label=\"Font Size\"\n",
475
+ " )\n",
476
+ " alignment = gr.Dropdown(\n",
477
+ " choices=[\"Left\", \"Center\", \"Right\", \"Justified\"],\n",
478
+ " value=\"Justified\",\n",
479
+ " label=\"Text Alignment\"\n",
480
+ " )\n",
481
+ " image_size = gr.Dropdown(\n",
482
+ " choices=[\"Small\", \"Medium\", \"Large\"],\n",
483
+ " value=\"Small\",\n",
484
+ " label=\"Image Size\"\n",
485
+ " )\n",
486
+ " file_format = gr.Radio([\"pdf\", \"docx\"], label=\"File Format\", value=\"pdf\")\n",
487
+ " get_document_btn = gr.Button(value=\"Get Document\", elem_classes=\"download-btn\")\n",
488
+ "\n",
489
+ " get_document_btn.click(\n",
490
+ " generate_document, [input_media, output_text, file_format, font_size, line_spacing, alignment, image_size], gr.File(label=\"Download Document\")\n",
491
+ " )\n",
492
+ "\n",
493
+ "demo.launch(debug=True)"
494
+ ],
495
+ "metadata": {
496
+ "colab": {
497
+ "base_uri": "https://localhost:8080/",
498
+ "height": 715
499
+ },
500
+ "id": "ovBSsRFhGbs2",
501
+ "outputId": "2a5bc724-4eab-4167-9c52-b378ce3799e3"
502
+ },
503
+ "execution_count": 4,
504
+ "outputs": [
505
+ {
506
+ "output_type": "stream",
507
+ "name": "stderr",
508
+ "text": [
509
+ "The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is ignored.\n"
510
+ ]
511
+ },
512
+ {
513
+ "output_type": "stream",
514
+ "name": "stdout",
515
+ "text": [
516
+ "Loading OCR-KIE...\n",
517
+ "Running Gradio in a Colab notebook requires sharing enabled. Automatically setting `share=True` (you can turn this off by setting `share=False` in `launch()` explicitly).\n",
518
+ "\n",
519
+ "Colab notebook detected. This cell will run indefinitely so that you can see errors and logs. To turn off, set debug=False in launch().\n",
520
+ "* Running on public URL: https://49b22c4dd53a6ec06e.gradio.live\n",
521
+ "\n",
522
+ "This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from the terminal in the working directory to deploy to Hugging Face Spaces (https://huggingface.co/spaces)\n"
523
+ ]
524
+ },
525
+ {
526
+ "output_type": "display_data",
527
+ "data": {
528
+ "text/plain": [
529
+ "<IPython.core.display.HTML object>"
530
+ ],
531
+ "text/html": [
532
+ "<div><iframe src=\"https://49b22c4dd53a6ec06e.gradio.live\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
533
+ ]
534
+ },
535
+ "metadata": {}
536
+ },
537
+ {
538
+ "output_type": "stream",
539
+ "name": "stdout",
540
+ "text": [
541
+ "Keyboard interruption in main thread... closing server.\n",
542
+ "Killing tunnel 127.0.0.1:7860 <> https://49b22c4dd53a6ec06e.gradio.live\n"
543
+ ]
544
+ },
545
+ {
546
+ "output_type": "execute_result",
547
+ "data": {
548
+ "text/plain": []
549
+ },
550
+ "metadata": {},
551
+ "execution_count": 4
552
+ }
553
+ ]
554
+ }
555
+ ]
556
+ }