Spaces:
Running
on
Zero
Running
on
Zero
ferferefer
commited on
Commit
·
65cdf3c
1
Parent(s):
58337af
fin?
Browse files
app.py
CHANGED
@@ -127,43 +127,40 @@ def run_example(files, text_input=None):
|
|
127 |
if not files:
|
128 |
return "Please upload at least one image for analysis."
|
129 |
|
130 |
-
# Add loading message
|
131 |
-
yield "Processing your images... Please wait."
|
132 |
-
|
133 |
temp_paths = []
|
134 |
processed_images = []
|
135 |
|
136 |
try:
|
137 |
clear_gpu_memory()
|
138 |
|
139 |
-
# Validate all files first
|
140 |
-
for file in files:
|
141 |
-
try:
|
142 |
-
validate_file(file)
|
143 |
-
except ValueError as e:
|
144 |
-
return str(e)
|
145 |
-
|
146 |
# Process files
|
147 |
for file in files:
|
148 |
try:
|
149 |
-
|
150 |
-
|
|
|
|
|
|
|
|
|
|
|
151 |
processed_images.extend(images)
|
|
|
|
|
|
|
|
|
|
|
152 |
except Exception as e:
|
153 |
return f"Error processing file {file.name}: {str(e)}"
|
154 |
|
155 |
if not processed_images:
|
156 |
return "No valid images were processed. Please check your files."
|
157 |
|
158 |
-
# Process images with progress updates
|
159 |
-
yield "Images loaded successfully. Starting analysis..."
|
160 |
-
|
161 |
# Save processed images temporarily
|
162 |
image_paths = []
|
163 |
for idx, img in enumerate(processed_images):
|
164 |
try:
|
165 |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
166 |
-
filename = f"temp_image_{timestamp}_{idx}.png"
|
167 |
# Resize image to reduce memory usage
|
168 |
img = img.resize((512, 512), Image.Resampling.LANCZOS)
|
169 |
img.save(filename)
|
@@ -174,7 +171,7 @@ def run_example(files, text_input=None):
|
|
174 |
return f"Error saving processed image: {str(e)}"
|
175 |
|
176 |
try:
|
177 |
-
# Process
|
178 |
messages = [
|
179 |
{
|
180 |
"role": "user",
|
@@ -188,13 +185,12 @@ def run_example(files, text_input=None):
|
|
188 |
}
|
189 |
]
|
190 |
|
191 |
-
# Process images with the model
|
192 |
text = processor.apply_chat_template(
|
193 |
messages, tokenize=False, add_generation_prompt=True
|
194 |
)
|
195 |
image_inputs, video_inputs = process_vision_info(messages)
|
196 |
|
197 |
-
with torch.cuda.amp.autocast():
|
198 |
inputs = processor(
|
199 |
text=[text],
|
200 |
images=image_inputs,
|
@@ -204,12 +200,11 @@ def run_example(files, text_input=None):
|
|
204 |
)
|
205 |
inputs = inputs.to("cuda")
|
206 |
|
207 |
-
# Generate response with reduced memory usage
|
208 |
generated_ids = model.generate(
|
209 |
**inputs,
|
210 |
-
max_new_tokens=512,
|
211 |
-
do_sample=False,
|
212 |
-
num_beams=1,
|
213 |
pad_token_id=processor.tokenizer.pad_token_id,
|
214 |
eos_token_id=processor.tokenizer.eos_token_id,
|
215 |
)
|
@@ -223,7 +218,6 @@ def run_example(files, text_input=None):
|
|
223 |
clean_up_tokenization_spaces=False
|
224 |
)
|
225 |
|
226 |
-
# Clean up CUDA memory
|
227 |
del inputs, generated_ids, generated_ids_trimmed
|
228 |
clear_gpu_memory()
|
229 |
|
@@ -236,7 +230,6 @@ def run_example(files, text_input=None):
|
|
236 |
return f"Error processing images: {str(e)}"
|
237 |
|
238 |
finally:
|
239 |
-
# Clean up temporary files and GPU memory
|
240 |
cleanup_temp_files(temp_paths)
|
241 |
clear_gpu_memory()
|
242 |
|
@@ -362,13 +355,5 @@ with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
|
|
362 |
concurrency_limit=10 # Higher limit for clear operation as it's lightweight
|
363 |
)
|
364 |
|
365 |
-
#
|
366 |
-
demo.launch(
|
367 |
-
debug=False,
|
368 |
-
show_error=True,
|
369 |
-
server_name="0.0.0.0",
|
370 |
-
server_port=7860,
|
371 |
-
share=False,
|
372 |
-
max_threads=40, # Set maximum number of threads
|
373 |
-
enable_queue=True
|
374 |
-
)
|
|
|
127 |
if not files:
|
128 |
return "Please upload at least one image for analysis."
|
129 |
|
|
|
|
|
|
|
130 |
temp_paths = []
|
131 |
processed_images = []
|
132 |
|
133 |
try:
|
134 |
clear_gpu_memory()
|
135 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
# Process files
|
137 |
for file in files:
|
138 |
try:
|
139 |
+
# For HuggingFace Spaces, we need to handle the file path directly
|
140 |
+
file_path = file.name
|
141 |
+
file_extension = Path(file_path).suffix.lower()
|
142 |
+
|
143 |
+
if file_extension == '.pdf':
|
144 |
+
# Convert PDF to images
|
145 |
+
images = pdf2image.convert_from_path(file_path)
|
146 |
processed_images.extend(images)
|
147 |
+
else:
|
148 |
+
# Handle regular image files
|
149 |
+
img = Image.open(file_path)
|
150 |
+
processed_images.append(img)
|
151 |
+
|
152 |
except Exception as e:
|
153 |
return f"Error processing file {file.name}: {str(e)}"
|
154 |
|
155 |
if not processed_images:
|
156 |
return "No valid images were processed. Please check your files."
|
157 |
|
|
|
|
|
|
|
158 |
# Save processed images temporarily
|
159 |
image_paths = []
|
160 |
for idx, img in enumerate(processed_images):
|
161 |
try:
|
162 |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
163 |
+
filename = f"/tmp/temp_image_{timestamp}_{idx}.png"
|
164 |
# Resize image to reduce memory usage
|
165 |
img = img.resize((512, 512), Image.Resampling.LANCZOS)
|
166 |
img.save(filename)
|
|
|
171 |
return f"Error saving processed image: {str(e)}"
|
172 |
|
173 |
try:
|
174 |
+
# Process images with the model
|
175 |
messages = [
|
176 |
{
|
177 |
"role": "user",
|
|
|
185 |
}
|
186 |
]
|
187 |
|
|
|
188 |
text = processor.apply_chat_template(
|
189 |
messages, tokenize=False, add_generation_prompt=True
|
190 |
)
|
191 |
image_inputs, video_inputs = process_vision_info(messages)
|
192 |
|
193 |
+
with torch.cuda.amp.autocast():
|
194 |
inputs = processor(
|
195 |
text=[text],
|
196 |
images=image_inputs,
|
|
|
200 |
)
|
201 |
inputs = inputs.to("cuda")
|
202 |
|
|
|
203 |
generated_ids = model.generate(
|
204 |
**inputs,
|
205 |
+
max_new_tokens=512,
|
206 |
+
do_sample=False,
|
207 |
+
num_beams=1,
|
208 |
pad_token_id=processor.tokenizer.pad_token_id,
|
209 |
eos_token_id=processor.tokenizer.eos_token_id,
|
210 |
)
|
|
|
218 |
clean_up_tokenization_spaces=False
|
219 |
)
|
220 |
|
|
|
221 |
del inputs, generated_ids, generated_ids_trimmed
|
222 |
clear_gpu_memory()
|
223 |
|
|
|
230 |
return f"Error processing images: {str(e)}"
|
231 |
|
232 |
finally:
|
|
|
233 |
cleanup_temp_files(temp_paths)
|
234 |
clear_gpu_memory()
|
235 |
|
|
|
355 |
concurrency_limit=10 # Higher limit for clear operation as it's lightweight
|
356 |
)
|
357 |
|
358 |
+
# Simplified launch for HuggingFace Spaces
|
359 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|