fashxp commited on
Commit
a721ac0
·
1 Parent(s): f277ad3

initial commit

Browse files
Files changed (3) hide show
  1. .gitignore +3 -0
  2. README.md +36 -1
  3. handler.py +48 -0
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # PhpStorm / IDEA
2
+ .idea
3
+
README.md CHANGED
@@ -1,3 +1,38 @@
1
  ---
2
- license: apache-2.0
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ tags:
3
+ - vision
4
+ - image-to-image
5
+ - endpoints-template
6
+ inference: false
7
+ pipeline_tag: image-to-image
8
+ base_model: caidas/swin2SR-classical-sr-x2-64
9
+ library_name: generic
10
  ---
11
+
12
+ # Fork of [caidas/swin2SR-classical-sr-x2-64](https://huggingface.co/caidas/swin2SR-classical-sr-x2-64) for a `image-to-image` Inference endpoint.
13
+
14
+ > Inspired by https://huggingface.co/sergeipetrov/swin2SR-classical-sr-x2-64-IE
15
+
16
+ This repository implements a `custom` task for `image-to-image` for 🤗 Inference Endpoints to allow image up scaling by doubling image resolution.
17
+ The code for the customized pipeline is in the handler.py.
18
+
19
+ To use deploy this model an Inference Endpoint you have to select `Custom` as task to use the `handler.py` file.
20
+
21
+ ### expected Request payload
22
+
23
+ Image to be labeled as binary.
24
+
25
+ #### CURL
26
+
27
+ ```
28
+ curl URL \
29
+ -X POST \
30
+ --data-binary @car.png \
31
+ -H "Content-Type: image/png"
32
+ ```
33
+
34
+ #### Python
35
+
36
+ ```python
37
+ requests.post(ENDPOINT_URL, headers={"Content-Type": "image/png"}, data=open("car.png", 'rb').read()).json()
38
+ ```
handler.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Any
2
+ from transformers import AutoImageProcessor, Swin2SRForImageSuperResolution
3
+ import torch
4
+ import base64
5
+ import logging
6
+ import numpy as np
7
+ from PIL import Image
8
+ from io import BytesIO
9
+
10
+ logger = logging.getLogger()
11
+ logger.setLevel(logging.DEBUG)
12
+
13
+ # check for GPU
14
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
+
16
+
17
+ class EndpointHandler:
18
+ def __init__(self, path=""):
19
+ # load the model
20
+ self.processor = AutoImageProcessor.from_pretrained("caidas/swin2SR-classical-sr-x2-64")
21
+ self.model = Swin2SRForImageSuperResolution.from_pretrained("caidas/swin2SR-classical-sr-x2-64")
22
+ # move model to device
23
+ self.model.to(device)
24
+
25
+ def __call__(self, data: Any):
26
+ """
27
+ Args:
28
+ data (:obj:):
29
+ binary image data to be labeled
30
+ Return:
31
+ A :obj:`string`:. Base64 encoded image string
32
+ """
33
+
34
+ image = data["inputs"]
35
+ inputs = self.processor(image, return_tensors="pt").to(device)
36
+ with torch.no_grad():
37
+ outputs = self.model(**inputs)
38
+
39
+ output = outputs.reconstruction.data.squeeze().float().cpu().clamp_(0, 1).numpy()
40
+ output = np.moveaxis(output, source=0, destination=-1)
41
+ output = (output * 255.0).round().astype(np.uint8)
42
+
43
+ img = Image.fromarray(output)
44
+ buffered = BytesIO()
45
+ img.save(buffered, format="JPEG")
46
+ img_str = base64.b64encode(buffered.getvalue())
47
+
48
+ return img_str.decode()