promptsai commited on
Commit
2bd24e9
·
verified ·
1 Parent(s): 8b7b742

Upload 28 files

Browse files
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2020 CVLab@StonyBrook
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
__pycache__/models.cpython-39.pyc ADDED
Binary file (2.15 kB). View file
 
datasets/__init__.py ADDED
File without changes
datasets/crowd.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import torch.utils.data as data
3
+ import os
4
+ from glob import glob
5
+ import torch
6
+ import torchvision.transforms.functional as F
7
+ from torchvision import transforms
8
+ import random
9
+ import numpy as np
10
+ import scipy.io as sio
11
+
12
+
13
+ def random_crop(im_h, im_w, crop_h, crop_w):
14
+ res_h = im_h - crop_h
15
+ res_w = im_w - crop_w
16
+ i = random.randint(0, res_h)
17
+ j = random.randint(0, res_w)
18
+ return i, j, crop_h, crop_w
19
+
20
+
21
+ def gen_discrete_map(im_height, im_width, points):
22
+ """
23
+ func: generate the discrete map.
24
+ points: [num_gt, 2], for each row: [width, height]
25
+ """
26
+ discrete_map = np.zeros([im_height, im_width], dtype=np.float32)
27
+ h, w = discrete_map.shape[:2]
28
+ num_gt = points.shape[0]
29
+ if num_gt == 0:
30
+ return discrete_map
31
+
32
+ # fast create discrete map
33
+ points_np = np.array(points).round().astype(int)
34
+ p_h = np.minimum(points_np[:, 1], np.array([h-1]*num_gt).astype(int))
35
+ p_w = np.minimum(points_np[:, 0], np.array([w-1]*num_gt).astype(int))
36
+ p_index = torch.from_numpy(p_h* im_width + p_w)
37
+ discrete_map = torch.zeros(im_width * im_height).scatter_add_(0, index=p_index, src=torch.ones(im_width*im_height)).view(im_height, im_width).numpy()
38
+
39
+ ''' slow method
40
+ for p in points:
41
+ p = np.round(p).astype(int)
42
+ p[0], p[1] = min(h - 1, p[1]), min(w - 1, p[0])
43
+ discrete_map[p[0], p[1]] += 1
44
+ '''
45
+ assert np.sum(discrete_map) == num_gt
46
+ return discrete_map
47
+
48
+
49
+ class Base(data.Dataset):
50
+ def __init__(self, root_path, crop_size, downsample_ratio=8):
51
+
52
+ self.root_path = root_path
53
+ self.c_size = crop_size
54
+ self.d_ratio = downsample_ratio
55
+ assert self.c_size % self.d_ratio == 0
56
+ self.dc_size = self.c_size // self.d_ratio
57
+ self.trans = transforms.Compose([
58
+ transforms.ToTensor(),
59
+ transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
60
+ ])
61
+
62
+ def __len__(self):
63
+ pass
64
+
65
+ def __getitem__(self, item):
66
+ pass
67
+
68
+ def train_transform(self, img, keypoints):
69
+ wd, ht = img.size
70
+ st_size = 1.0 * min(wd, ht)
71
+ assert st_size >= self.c_size
72
+ assert len(keypoints) >= 0
73
+ i, j, h, w = random_crop(ht, wd, self.c_size, self.c_size)
74
+ img = F.crop(img, i, j, h, w)
75
+ if len(keypoints) > 0:
76
+ keypoints = keypoints - [j, i]
77
+ idx_mask = (keypoints[:, 0] >= 0) * (keypoints[:, 0] <= w) * \
78
+ (keypoints[:, 1] >= 0) * (keypoints[:, 1] <= h)
79
+ keypoints = keypoints[idx_mask]
80
+ else:
81
+ keypoints = np.empty([0, 2])
82
+
83
+ gt_discrete = gen_discrete_map(h, w, keypoints)
84
+ down_w = w // self.d_ratio
85
+ down_h = h // self.d_ratio
86
+ gt_discrete = gt_discrete.reshape([down_h, self.d_ratio, down_w, self.d_ratio]).sum(axis=(1, 3))
87
+ assert np.sum(gt_discrete) == len(keypoints)
88
+
89
+ if len(keypoints) > 0:
90
+ if random.random() > 0.5:
91
+ img = F.hflip(img)
92
+ gt_discrete = np.fliplr(gt_discrete)
93
+ keypoints[:, 0] = w - keypoints[:, 0]
94
+ else:
95
+ if random.random() > 0.5:
96
+ img = F.hflip(img)
97
+ gt_discrete = np.fliplr(gt_discrete)
98
+ gt_discrete = np.expand_dims(gt_discrete, 0)
99
+
100
+ return self.trans(img), torch.from_numpy(keypoints.copy()).float(), torch.from_numpy(
101
+ gt_discrete.copy()).float()
102
+
103
+
104
+ class Crowd_qnrf(Base):
105
+ def __init__(self, root_path, crop_size,
106
+ downsample_ratio=8,
107
+ method='train'):
108
+ super().__init__(root_path, crop_size, downsample_ratio)
109
+ self.method = method
110
+ self.im_list = sorted(glob(os.path.join(self.root_path, '*.jpg')))
111
+ print('number of img: {}'.format(len(self.im_list)))
112
+ if method not in ['train', 'val']:
113
+ raise Exception("not implement")
114
+
115
+ def __len__(self):
116
+ return len(self.im_list)
117
+
118
+ def __getitem__(self, item):
119
+ img_path = self.im_list[item]
120
+ gd_path = img_path.replace('jpg', 'npy')
121
+ img = Image.open(img_path).convert('RGB')
122
+ if self.method == 'train':
123
+ keypoints = np.load(gd_path)
124
+ return self.train_transform(img, keypoints)
125
+ elif self.method == 'val':
126
+ keypoints = np.load(gd_path)
127
+ img = self.trans(img)
128
+ name = os.path.basename(img_path).split('.')[0]
129
+ return img, len(keypoints), name
130
+
131
+
132
+ class Crowd_nwpu(Base):
133
+ def __init__(self, root_path, crop_size,
134
+ downsample_ratio=8,
135
+ method='train'):
136
+ super().__init__(root_path, crop_size, downsample_ratio)
137
+ self.method = method
138
+ self.im_list = sorted(glob(os.path.join(self.root_path, '*.jpg')))
139
+ print('number of img: {}'.format(len(self.im_list)))
140
+
141
+ if method not in ['train', 'val', 'test']:
142
+ raise Exception("not implement")
143
+
144
+ def __len__(self):
145
+ return len(self.im_list)
146
+
147
+ def __getitem__(self, item):
148
+ img_path = self.im_list[item]
149
+ gd_path = img_path.replace('jpg', 'npy')
150
+ img = Image.open(img_path).convert('RGB')
151
+ if self.method == 'train':
152
+ keypoints = np.load(gd_path)
153
+ return self.train_transform(img, keypoints)
154
+ elif self.method == 'val':
155
+ keypoints = np.load(gd_path)
156
+ img = self.trans(img)
157
+ name = os.path.basename(img_path).split('.')[0]
158
+ return img, len(keypoints), name
159
+ elif self.method == 'test':
160
+ img = self.trans(img)
161
+ name = os.path.basename(img_path).split('.')[0]
162
+ return img, name
163
+
164
+
165
+ class Crowd_sh(Base):
166
+ def __init__(self, root_path, crop_size,
167
+ downsample_ratio=8,
168
+ method='train'):
169
+ super().__init__(root_path, crop_size, downsample_ratio)
170
+ self.method = method
171
+ if method not in ['train', 'val']:
172
+ raise Exception("not implement")
173
+
174
+ self.im_list = sorted(glob(os.path.join(self.root_path, 'images', '*.jpg')))
175
+ print('number of img: {}'.format(len(self.im_list)))
176
+
177
+ def __len__(self):
178
+ return len(self.im_list)
179
+
180
+ def __getitem__(self, item):
181
+ img_path = self.im_list[item]
182
+ name = os.path.basename(img_path).split('.')[0]
183
+ gd_path = os.path.join(self.root_path, 'ground-truth', 'GT_{}.mat'.format(name))
184
+ img = Image.open(img_path).convert('RGB')
185
+ keypoints = sio.loadmat(gd_path)['image_info'][0][0][0][0][0]
186
+
187
+ if self.method == 'train':
188
+ return self.train_transform(img, keypoints)
189
+ elif self.method == 'val':
190
+ img = self.trans(img)
191
+ return img, len(keypoints), name
192
+
193
+ def train_transform(self, img, keypoints):
194
+ wd, ht = img.size
195
+ st_size = 1.0 * min(wd, ht)
196
+ # resize the image to fit the crop size
197
+ if st_size < self.c_size:
198
+ rr = 1.0 * self.c_size / st_size
199
+ wd = round(wd * rr)
200
+ ht = round(ht * rr)
201
+ st_size = 1.0 * min(wd, ht)
202
+ img = img.resize((wd, ht), Image.BICUBIC)
203
+ keypoints = keypoints * rr
204
+ assert st_size >= self.c_size, print(wd, ht)
205
+ assert len(keypoints) >= 0
206
+ i, j, h, w = random_crop(ht, wd, self.c_size, self.c_size)
207
+ img = F.crop(img, i, j, h, w)
208
+ if len(keypoints) > 0:
209
+ keypoints = keypoints - [j, i]
210
+ idx_mask = (keypoints[:, 0] >= 0) * (keypoints[:, 0] <= w) * \
211
+ (keypoints[:, 1] >= 0) * (keypoints[:, 1] <= h)
212
+ keypoints = keypoints[idx_mask]
213
+ else:
214
+ keypoints = np.empty([0, 2])
215
+
216
+ gt_discrete = gen_discrete_map(h, w, keypoints)
217
+ down_w = w // self.d_ratio
218
+ down_h = h // self.d_ratio
219
+ gt_discrete = gt_discrete.reshape([down_h, self.d_ratio, down_w, self.d_ratio]).sum(axis=(1, 3))
220
+ assert np.sum(gt_discrete) == len(keypoints)
221
+
222
+ if len(keypoints) > 0:
223
+ if random.random() > 0.5:
224
+ img = F.hflip(img)
225
+ gt_discrete = np.fliplr(gt_discrete)
226
+ keypoints[:, 0] = w - keypoints[:, 0] - 1
227
+ else:
228
+ if random.random() > 0.5:
229
+ img = F.hflip(img)
230
+ gt_discrete = np.fliplr(gt_discrete)
231
+ gt_discrete = np.expand_dims(gt_discrete, 0)
232
+
233
+ return self.trans(img), torch.from_numpy(keypoints.copy()).float(), torch.from_numpy(
234
+ gt_discrete.copy()).float()
demo.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from models import vgg19
3
+ import gdown
4
+ from PIL import Image
5
+ from torchvision import transforms
6
+ import gradio as gr
7
+ import cv2
8
+ import numpy as np
9
+ import scipy
10
+
11
+ model_path = "pretrained_models/model_qnrf.pth"
12
+ url = "https://drive.google.com/uc?id=1nnIHPaV9RGqK8JHL645zmRvkNrahD9ru"
13
+ gdown.download(url, model_path, quiet=False)
14
+
15
+ device = torch.device('cpu') # device can be "cpu" or "gpu"
16
+
17
+ model = vgg19()
18
+ model.to(device)
19
+ model.load_state_dict(torch.load(model_path, device))
20
+ model.eval()
21
+
22
+
23
+ def predict(inp):
24
+ inp = Image.fromarray(inp.astype('uint8'), 'RGB')
25
+ inp = transforms.ToTensor()(inp).unsqueeze(0)
26
+ inp = inp.to(device)
27
+ with torch.set_grad_enabled(False):
28
+ outputs, _ = model(inp)
29
+ count = torch.sum(outputs).item()
30
+ vis_img = outputs[0, 0].cpu().numpy()
31
+ # normalize density map values from 0 to 1, then map it to 0-255.
32
+ vis_img = (vis_img - vis_img.min()) / (vis_img.max() - vis_img.min() + 1e-5)
33
+ vis_img = (vis_img * 255).astype(np.uint8)
34
+ vis_img = cv2.applyColorMap(vis_img, cv2.COLORMAP_JET)
35
+ vis_img = cv2.cvtColor(vis_img, cv2.COLOR_BGR2RGB)
36
+ return vis_img, int(count)
37
+
38
+
39
+ inputs = gr.Image(label="Image of Crowd")
40
+ outputs = [
41
+ gr.Image(label="Predicted Density Map"),
42
+ gr.Label(label="Predicted Count")
43
+ ]
44
+
45
+ # Assuming `title`, `desc`, and `examples` variables are defined elsewhere in your code.
46
+ title = "Your App Title"
47
+ desc = "Your App Description"
48
+
49
+ gr.Interface(fn=predict,
50
+ inputs=inputs,
51
+ outputs=outputs,
52
+ title=title,
53
+ description=desc,
54
+ allow_flagging="never").launch(share=True)
example_images/1.png ADDED
example_images/2.png ADDED
example_images/3.png ADDED
losses/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+
losses/bregman_pytorch.py ADDED
@@ -0,0 +1,484 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Rewrite ot.bregman.sinkhorn in Python Optimal Transport (https://pythonot.github.io/_modules/ot/bregman.html#sinkhorn)
4
+ using pytorch operations.
5
+ Bregman projections for regularized OT (Sinkhorn distance).
6
+ """
7
+
8
+ import torch
9
+
10
+ M_EPS = 1e-16
11
+
12
+
13
+ def sinkhorn(a, b, C, reg=1e-1, method='sinkhorn', maxIter=1000, tau=1e3,
14
+ stopThr=1e-9, verbose=False, log=True, warm_start=None, eval_freq=10, print_freq=200, **kwargs):
15
+ """
16
+ Solve the entropic regularization optimal transport
17
+ The input should be PyTorch tensors
18
+ The function solves the following optimization problem:
19
+
20
+ .. math::
21
+ \gamma = arg\min_\gamma <\gamma,C>_F + reg\cdot\Omega(\gamma)
22
+ s.t. \gamma 1 = a
23
+ \gamma^T 1= b
24
+ \gamma\geq 0
25
+ where :
26
+ - C is the (ns,nt) metric cost matrix
27
+ - :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
28
+ - a and b are target and source measures (sum to 1)
29
+ The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [1].
30
+
31
+ Parameters
32
+ ----------
33
+ a : torch.tensor (na,)
34
+ samples measure in the target domain
35
+ b : torch.tensor (nb,)
36
+ samples in the source domain
37
+ C : torch.tensor (na,nb)
38
+ loss matrix
39
+ reg : float
40
+ Regularization term > 0
41
+ method : str
42
+ method used for the solver either 'sinkhorn', 'greenkhorn', 'sinkhorn_stabilized' or
43
+ 'sinkhorn_epsilon_scaling', see those function for specific parameters
44
+ maxIter : int, optional
45
+ Max number of iterations
46
+ stopThr : float, optional
47
+ Stop threshol on error ( > 0 )
48
+ verbose : bool, optional
49
+ Print information along iterations
50
+ log : bool, optional
51
+ record log if True
52
+
53
+ Returns
54
+ -------
55
+ gamma : (na x nb) torch.tensor
56
+ Optimal transportation matrix for the given parameters
57
+ log : dict
58
+ log dictionary return only if log==True in parameters
59
+
60
+ References
61
+ ----------
62
+ [1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013
63
+ See Also
64
+ --------
65
+
66
+ """
67
+
68
+ if method.lower() == 'sinkhorn':
69
+ return sinkhorn_knopp(a, b, C, reg, maxIter=maxIter,
70
+ stopThr=stopThr, verbose=verbose, log=log,
71
+ warm_start=warm_start, eval_freq=eval_freq, print_freq=print_freq,
72
+ **kwargs)
73
+ elif method.lower() == 'sinkhorn_stabilized':
74
+ return sinkhorn_stabilized(a, b, C, reg, maxIter=maxIter, tau=tau,
75
+ stopThr=stopThr, verbose=verbose, log=log,
76
+ warm_start=warm_start, eval_freq=eval_freq, print_freq=print_freq,
77
+ **kwargs)
78
+ elif method.lower() == 'sinkhorn_epsilon_scaling':
79
+ return sinkhorn_epsilon_scaling(a, b, C, reg,
80
+ maxIter=maxIter, maxInnerIter=100, tau=tau,
81
+ scaling_base=0.75, scaling_coef=None, stopThr=stopThr,
82
+ verbose=False, log=log, warm_start=warm_start, eval_freq=eval_freq,
83
+ print_freq=print_freq, **kwargs)
84
+ else:
85
+ raise ValueError("Unknown method '%s'." % method)
86
+
87
+
88
+ def sinkhorn_knopp(a, b, C, reg=1e-1, maxIter=1000, stopThr=1e-9,
89
+ verbose=False, log=False, warm_start=None, eval_freq=10, print_freq=200, **kwargs):
90
+ """
91
+ Solve the entropic regularization optimal transport
92
+ The input should be PyTorch tensors
93
+ The function solves the following optimization problem:
94
+
95
+ .. math::
96
+ \gamma = arg\min_\gamma <\gamma,C>_F + reg\cdot\Omega(\gamma)
97
+ s.t. \gamma 1 = a
98
+ \gamma^T 1= b
99
+ \gamma\geq 0
100
+ where :
101
+ - C is the (ns,nt) metric cost matrix
102
+ - :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
103
+ - a and b are target and source measures (sum to 1)
104
+ The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [1].
105
+
106
+ Parameters
107
+ ----------
108
+ a : torch.tensor (na,)
109
+ samples measure in the target domain
110
+ b : torch.tensor (nb,)
111
+ samples in the source domain
112
+ C : torch.tensor (na,nb)
113
+ loss matrix
114
+ reg : float
115
+ Regularization term > 0
116
+ maxIter : int, optional
117
+ Max number of iterations
118
+ stopThr : float, optional
119
+ Stop threshol on error ( > 0 )
120
+ verbose : bool, optional
121
+ Print information along iterations
122
+ log : bool, optional
123
+ record log if True
124
+
125
+ Returns
126
+ -------
127
+ gamma : (na x nb) torch.tensor
128
+ Optimal transportation matrix for the given parameters
129
+ log : dict
130
+ log dictionary return only if log==True in parameters
131
+
132
+ References
133
+ ----------
134
+ [1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013
135
+ See Also
136
+ --------
137
+
138
+ """
139
+
140
+ device = a.device
141
+ na, nb = C.shape
142
+
143
+ assert na >= 1 and nb >= 1, 'C needs to be 2d'
144
+ assert na == a.shape[0] and nb == b.shape[0], "Shape of a or b does't match that of C"
145
+ assert reg > 0, 'reg should be greater than 0'
146
+ assert a.min() >= 0. and b.min() >= 0., 'Elements in a or b less than 0'
147
+
148
+ if log:
149
+ log = {'err': []}
150
+
151
+ if warm_start is not None:
152
+ u = warm_start['u']
153
+ v = warm_start['v']
154
+ else:
155
+ u = torch.ones(na, dtype=a.dtype).to(device) / na
156
+ v = torch.ones(nb, dtype=b.dtype).to(device) / nb
157
+
158
+ K = torch.empty(C.shape, dtype=C.dtype).to(device)
159
+ torch.div(C, -reg, out=K)
160
+ torch.exp(K, out=K)
161
+
162
+ b_hat = torch.empty(b.shape, dtype=C.dtype).to(device)
163
+
164
+ it = 1
165
+ err = 1
166
+
167
+ # allocate memory beforehand
168
+ KTu = torch.empty(v.shape, dtype=v.dtype).to(device)
169
+ Kv = torch.empty(u.shape, dtype=u.dtype).to(device)
170
+
171
+ while (err > stopThr and it <= maxIter):
172
+ upre, vpre = u, v
173
+ torch.matmul(u, K, out=KTu)
174
+ v = torch.div(b, KTu + M_EPS)
175
+ torch.matmul(K, v, out=Kv)
176
+ u = torch.div(a, Kv + M_EPS)
177
+
178
+ if torch.any(torch.isnan(u)) or torch.any(torch.isnan(v)) or \
179
+ torch.any(torch.isinf(u)) or torch.any(torch.isinf(v)):
180
+ print('Warning: numerical errors at iteration', it)
181
+ u, v = upre, vpre
182
+ break
183
+
184
+ if log and it % eval_freq == 0:
185
+ # we can speed up the process by checking for the error only all
186
+ # the eval_freq iterations
187
+ # below is equivalent to:
188
+ # b_hat = torch.sum(u.reshape(-1, 1) * K * v.reshape(1, -1), 0)
189
+ # but with more memory efficient
190
+ b_hat = torch.matmul(u, K) * v
191
+ err = (b - b_hat).pow(2).sum().item()
192
+ # err = (b - b_hat).abs().sum().item()
193
+ log['err'].append(err)
194
+
195
+ if verbose and it % print_freq == 0:
196
+ print('iteration {:5d}, constraint error {:5e}'.format(it, err))
197
+
198
+ it += 1
199
+
200
+ if log:
201
+ log['u'] = u
202
+ log['v'] = v
203
+ log['alpha'] = reg * torch.log(u + M_EPS)
204
+ log['beta'] = reg * torch.log(v + M_EPS)
205
+
206
+ # transport plan
207
+ P = u.reshape(-1, 1) * K * v.reshape(1, -1)
208
+ if log:
209
+ return P, log
210
+ else:
211
+ return P
212
+
213
+
214
+ def sinkhorn_stabilized(a, b, C, reg=1e-1, maxIter=1000, tau=1e3, stopThr=1e-9,
215
+ verbose=False, log=False, warm_start=None, eval_freq=10, print_freq=200, **kwargs):
216
+ """
217
+ Solve the entropic regularization OT problem with log stabilization
218
+ The function solves the following optimization problem:
219
+
220
+ .. math::
221
+ \gamma = arg\min_\gamma <\gamma,C>_F + reg\cdot\Omega(\gamma)
222
+ s.t. \gamma 1 = a
223
+ \gamma^T 1= b
224
+ \gamma\geq 0
225
+ where :
226
+ - C is the (ns,nt) metric cost matrix
227
+ - :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
228
+ - a and b are target and source measures (sum to 1)
229
+
230
+ The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [1]
231
+ but with the log stabilization proposed in [3] an defined in [2] (Algo 3.1)
232
+
233
+ Parameters
234
+ ----------
235
+ a : torch.tensor (na,)
236
+ samples measure in the target domain
237
+ b : torch.tensor (nb,)
238
+ samples in the source domain
239
+ C : torch.tensor (na,nb)
240
+ loss matrix
241
+ reg : float
242
+ Regularization term > 0
243
+ tau : float
244
+ thershold for max value in u or v for log scaling
245
+ maxIter : int, optional
246
+ Max number of iterations
247
+ stopThr : float, optional
248
+ Stop threshol on error ( > 0 )
249
+ verbose : bool, optional
250
+ Print information along iterations
251
+ log : bool, optional
252
+ record log if True
253
+
254
+ Returns
255
+ -------
256
+ gamma : (na x nb) torch.tensor
257
+ Optimal transportation matrix for the given parameters
258
+ log : dict
259
+ log dictionary return only if log==True in parameters
260
+
261
+ References
262
+ ----------
263
+ [1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013
264
+ [2] Bernhard Schmitzer. Stabilized Sparse Scaling Algorithms for Entropy Regularized Transport Problems. SIAM Journal on Scientific Computing, 2019
265
+ [3] Chizat, L., Peyré, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816.
266
+
267
+ See Also
268
+ --------
269
+
270
+ """
271
+
272
+ device = a.device
273
+ na, nb = C.shape
274
+
275
+ assert na >= 1 and nb >= 1, 'C needs to be 2d'
276
+ assert na == a.shape[0] and nb == b.shape[0], "Shape of a or b does't match that of C"
277
+ assert reg > 0, 'reg should be greater than 0'
278
+ assert a.min() >= 0. and b.min() >= 0., 'Elements in a or b less than 0'
279
+
280
+ if log:
281
+ log = {'err': []}
282
+
283
+ if warm_start is not None:
284
+ alpha = warm_start['alpha']
285
+ beta = warm_start['beta']
286
+ else:
287
+ alpha = torch.zeros(na, dtype=a.dtype).to(device)
288
+ beta = torch.zeros(nb, dtype=b.dtype).to(device)
289
+
290
+ u = torch.ones(na, dtype=a.dtype).to(device) / na
291
+ v = torch.ones(nb, dtype=b.dtype).to(device) / nb
292
+
293
+ def update_K(alpha, beta):
294
+ """log space computation"""
295
+ """memory efficient"""
296
+ torch.add(alpha.reshape(-1, 1), beta.reshape(1, -1), out=K)
297
+ torch.add(K, -C, out=K)
298
+ torch.div(K, reg, out=K)
299
+ torch.exp(K, out=K)
300
+
301
+ def update_P(alpha, beta, u, v, ab_updated=False):
302
+ """log space P (gamma) computation"""
303
+ torch.add(alpha.reshape(-1, 1), beta.reshape(1, -1), out=P)
304
+ torch.add(P, -C, out=P)
305
+ torch.div(P, reg, out=P)
306
+ if not ab_updated:
307
+ torch.add(P, torch.log(u + M_EPS).reshape(-1, 1), out=P)
308
+ torch.add(P, torch.log(v + M_EPS).reshape(1, -1), out=P)
309
+ torch.exp(P, out=P)
310
+
311
+ K = torch.empty(C.shape, dtype=C.dtype).to(device)
312
+ update_K(alpha, beta)
313
+
314
+ b_hat = torch.empty(b.shape, dtype=C.dtype).to(device)
315
+
316
+ it = 1
317
+ err = 1
318
+ ab_updated = False
319
+
320
+ # allocate memory beforehand
321
+ KTu = torch.empty(v.shape, dtype=v.dtype).to(device)
322
+ Kv = torch.empty(u.shape, dtype=u.dtype).to(device)
323
+ P = torch.empty(C.shape, dtype=C.dtype).to(device)
324
+
325
+ while (err > stopThr and it <= maxIter):
326
+ upre, vpre = u, v
327
+ torch.matmul(u, K, out=KTu)
328
+ v = torch.div(b, KTu + M_EPS)
329
+ torch.matmul(K, v, out=Kv)
330
+ u = torch.div(a, Kv + M_EPS)
331
+
332
+ ab_updated = False
333
+ # remove numerical problems and store them in K
334
+ if u.abs().sum() > tau or v.abs().sum() > tau:
335
+ alpha += reg * torch.log(u + M_EPS)
336
+ beta += reg * torch.log(v + M_EPS)
337
+ u.fill_(1. / na)
338
+ v.fill_(1. / nb)
339
+ update_K(alpha, beta)
340
+ ab_updated = True
341
+
342
+ if log and it % eval_freq == 0:
343
+ # we can speed up the process by checking for the error only all
344
+ # the eval_freq iterations
345
+ update_P(alpha, beta, u, v, ab_updated)
346
+ b_hat = torch.sum(P, 0)
347
+ err = (b - b_hat).pow(2).sum().item()
348
+ log['err'].append(err)
349
+
350
+ if verbose and it % print_freq == 0:
351
+ print('iteration {:5d}, constraint error {:5e}'.format(it, err))
352
+
353
+ it += 1
354
+
355
+ if log:
356
+ log['u'] = u
357
+ log['v'] = v
358
+ log['alpha'] = alpha + reg * torch.log(u + M_EPS)
359
+ log['beta'] = beta + reg * torch.log(v + M_EPS)
360
+
361
+ # transport plan
362
+ update_P(alpha, beta, u, v, False)
363
+
364
+ if log:
365
+ return P, log
366
+ else:
367
+ return P
368
+
369
+
370
+ def sinkhorn_epsilon_scaling(a, b, C, reg=1e-1, maxIter=100, maxInnerIter=100, tau=1e3, scaling_base=0.75,
371
+ scaling_coef=None, stopThr=1e-9, verbose=False, log=False, warm_start=None, eval_freq=10,
372
+ print_freq=200, **kwargs):
373
+ """
374
+ Solve the entropic regularization OT problem with log stabilization
375
+ The function solves the following optimization problem:
376
+
377
+ .. math::
378
+ \gamma = arg\min_\gamma <\gamma,C>_F + reg\cdot\Omega(\gamma)
379
+ s.t. \gamma 1 = a
380
+ \gamma^T 1= b
381
+ \gamma\geq 0
382
+ where :
383
+ - C is the (ns,nt) metric cost matrix
384
+ - :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
385
+ - a and b are target and source measures (sum to 1)
386
+
387
+ The algorithm used for solving the problem is the Sinkhorn-Knopp matrix
388
+ scaling algorithm as proposed in [1] but with the log stabilization
389
+ proposed in [3] and the log scaling proposed in [2] algorithm 3.2
390
+
391
+ Parameters
392
+ ----------
393
+ a : torch.tensor (na,)
394
+ samples measure in the target domain
395
+ b : torch.tensor (nb,)
396
+ samples in the source domain
397
+ C : torch.tensor (na,nb)
398
+ loss matrix
399
+ reg : float
400
+ Regularization term > 0
401
+ tau : float
402
+ thershold for max value in u or v for log scaling
403
+ maxIter : int, optional
404
+ Max number of iterations
405
+ stopThr : float, optional
406
+ Stop threshol on error ( > 0 )
407
+ verbose : bool, optional
408
+ Print information along iterations
409
+ log : bool, optional
410
+ record log if True
411
+
412
+ Returns
413
+ -------
414
+ gamma : (na x nb) torch.tensor
415
+ Optimal transportation matrix for the given parameters
416
+ log : dict
417
+ log dictionary return only if log==True in parameters
418
+
419
+ References
420
+ ----------
421
+ [1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013
422
+ [2] Bernhard Schmitzer. Stabilized Sparse Scaling Algorithms for Entropy Regularized Transport Problems. SIAM Journal on Scientific Computing, 2019
423
+ [3] Chizat, L., Peyré, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816.
424
+
425
+ See Also
426
+ --------
427
+
428
+ """
429
+
430
+ na, nb = C.shape
431
+
432
+ assert na >= 1 and nb >= 1, 'C needs to be 2d'
433
+ assert na == a.shape[0] and nb == b.shape[0], "Shape of a or b does't match that of C"
434
+ assert reg > 0, 'reg should be greater than 0'
435
+ assert a.min() >= 0. and b.min() >= 0., 'Elements in a or b less than 0'
436
+
437
+ def get_reg(it, reg, pre_reg):
438
+ if it == 1:
439
+ return scaling_coef
440
+ else:
441
+ if (pre_reg - reg) * scaling_base < M_EPS:
442
+ return reg
443
+ else:
444
+ return (pre_reg - reg) * scaling_base + reg
445
+
446
+ if scaling_coef is None:
447
+ scaling_coef = C.max() + reg
448
+
449
+ it = 1
450
+ err = 1
451
+ running_reg = scaling_coef
452
+
453
+ if log:
454
+ log = {'err': []}
455
+
456
+ warm_start = None
457
+
458
+ while (err > stopThr and it <= maxIter):
459
+ running_reg = get_reg(it, reg, running_reg)
460
+ P, _log = sinkhorn_stabilized(a, b, C, running_reg, maxIter=maxInnerIter, tau=tau,
461
+ stopThr=stopThr, verbose=False, log=True,
462
+ warm_start=warm_start, eval_freq=eval_freq, print_freq=print_freq,
463
+ **kwargs)
464
+
465
+ warm_start = {}
466
+ warm_start['alpha'] = _log['alpha']
467
+ warm_start['beta'] = _log['beta']
468
+
469
+ primal_val = (C * P).sum() + reg * (P * torch.log(P)).sum() - reg * P.sum()
470
+ dual_val = (_log['alpha'] * a).sum() + (_log['beta'] * b).sum() - reg * P.sum()
471
+ err = primal_val - dual_val
472
+ log['err'].append(err)
473
+
474
+ if verbose and it % print_freq == 0:
475
+ print('iteration {:5d}, constraint error {:5e}'.format(it, err))
476
+
477
+ it += 1
478
+
479
+ if log:
480
+ log['alpha'] = _log['alpha']
481
+ log['beta'] = _log['beta']
482
+ return P, log
483
+ else:
484
+ return P
losses/ot_loss.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.nn import Module
3
+ from .bregman_pytorch import sinkhorn
4
+
5
+ class OT_Loss(Module):
6
+ def __init__(self, c_size, stride, norm_cood, device, num_of_iter_in_ot=100, reg=10.0):
7
+ super(OT_Loss, self).__init__()
8
+ assert c_size % stride == 0
9
+
10
+ self.c_size = c_size
11
+ self.device = device
12
+ self.norm_cood = norm_cood
13
+ self.num_of_iter_in_ot = num_of_iter_in_ot
14
+ self.reg = reg
15
+
16
+ # coordinate is same to image space, set to constant since crop size is same
17
+ self.cood = torch.arange(0, c_size, step=stride,
18
+ dtype=torch.float32, device=device) + stride / 2
19
+ self.density_size = self.cood.size(0)
20
+ self.cood.unsqueeze_(0) # [1, #cood]
21
+ if self.norm_cood:
22
+ self.cood = self.cood / c_size * 2 - 1 # map to [-1, 1]
23
+ self.output_size = self.cood.size(1)
24
+
25
+
26
+ def forward(self, normed_density, unnormed_density, points):
27
+ batch_size = normed_density.size(0)
28
+ assert len(points) == batch_size
29
+ assert self.output_size == normed_density.size(2)
30
+ loss = torch.zeros([1]).to(self.device)
31
+ ot_obj_values = torch.zeros([1]).to(self.device)
32
+ wd = 0 # wasserstain distance
33
+ for idx, im_points in enumerate(points):
34
+ if len(im_points) > 0:
35
+ # compute l2 square distance, it should be source target distance. [#gt, #cood * #cood]
36
+ if self.norm_cood:
37
+ im_points = im_points / self.c_size * 2 - 1 # map to [-1, 1]
38
+ x = im_points[:, 0].unsqueeze_(1) # [#gt, 1]
39
+ y = im_points[:, 1].unsqueeze_(1)
40
+ x_dis = -2 * torch.matmul(x, self.cood) + x * x + self.cood * self.cood # [#gt, #cood]
41
+ y_dis = -2 * torch.matmul(y, self.cood) + y * y + self.cood * self.cood
42
+ y_dis.unsqueeze_(2)
43
+ x_dis.unsqueeze_(1)
44
+ dis = y_dis + x_dis
45
+ dis = dis.view((dis.size(0), -1)) # size of [#gt, #cood * #cood]
46
+
47
+ source_prob = normed_density[idx][0].view([-1]).detach()
48
+ target_prob = (torch.ones([len(im_points)]) / len(im_points)).to(self.device)
49
+ # use sinkhorn to solve OT, compute optimal beta.
50
+ P, log = sinkhorn(target_prob, source_prob, dis, self.reg, maxIter=self.num_of_iter_in_ot, log=True)
51
+ beta = log['beta'] # size is the same as source_prob: [#cood * #cood]
52
+ ot_obj_values += torch.sum(normed_density[idx] * beta.view([1, self.output_size, self.output_size]))
53
+ # compute the gradient of OT loss to predicted density (unnormed_density).
54
+ # im_grad = beta / source_count - < beta, source_density> / (source_count)^2
55
+ source_density = unnormed_density[idx][0].view([-1]).detach()
56
+ source_count = source_density.sum()
57
+ im_grad_1 = (source_count) / (source_count * source_count+1e-8) * beta # size of [#cood * #cood]
58
+ im_grad_2 = (source_density * beta).sum() / (source_count * source_count + 1e-8) # size of 1
59
+ im_grad = im_grad_1 - im_grad_2
60
+ im_grad = im_grad.detach().view([1, self.output_size, self.output_size])
61
+ # Define loss = <im_grad, predicted density>. The gradient of loss w.r.t prediced density is im_grad.
62
+ loss += torch.sum(unnormed_density[idx] * im_grad)
63
+ wd += torch.sum(dis * P).item()
64
+
65
+ return loss, wd, ot_obj_values
66
+
67
+
models.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+ import torch.utils.model_zoo as model_zoo
3
+ from torch.nn import functional as F
4
+
5
+ __all__ = ['vgg19']
6
+ model_urls = {
7
+ 'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
8
+ }
9
+
10
+ class VGG(nn.Module):
11
+ def __init__(self, features):
12
+ super(VGG, self).__init__()
13
+ self.features = features
14
+ self.reg_layer = nn.Sequential(
15
+ nn.Conv2d(512, 256, kernel_size=3, padding=1),
16
+ nn.ReLU(inplace=True),
17
+ nn.Conv2d(256, 128, kernel_size=3, padding=1),
18
+ nn.ReLU(inplace=True),
19
+ )
20
+ self.density_layer = nn.Sequential(nn.Conv2d(128, 1, 1), nn.ReLU())
21
+
22
+ def forward(self, x):
23
+ x = self.features(x)
24
+ x = F.upsample_bilinear(x, scale_factor=2)
25
+ x = self.reg_layer(x)
26
+ mu = self.density_layer(x)
27
+ B, C, H, W = mu.size()
28
+ mu_sum = mu.view([B, -1]).sum(1).unsqueeze(1).unsqueeze(2).unsqueeze(3)
29
+ mu_normed = mu / (mu_sum + 1e-6)
30
+ return mu, mu_normed
31
+
32
+ def make_layers(cfg, batch_norm=False):
33
+ layers = []
34
+ in_channels = 3
35
+ for v in cfg:
36
+ if v == 'M':
37
+ layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
38
+ else:
39
+ conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
40
+ if batch_norm:
41
+ layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
42
+ else:
43
+ layers += [conv2d, nn.ReLU(inplace=True)]
44
+ in_channels = v
45
+ return nn.Sequential(*layers)
46
+
47
+ cfg = {
48
+ 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512]
49
+ }
50
+
51
+ def vgg19():
52
+ """VGG 19-layer model (configuration "E")
53
+ model pre-trained on ImageNet
54
+ """
55
+ model = VGG(make_layers(cfg['E']))
56
+ model.load_state_dict(model_zoo.load_url(model_urls['vgg19']), strict=False)
57
+ return model
preprocess/__init__.py ADDED
File without changes
preprocess/preprocess_dataset_nwpu.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from scipy.io import loadmat
2
+ from PIL import Image
3
+ import numpy as np
4
+ import os
5
+ import cv2
6
+
7
+
8
+ def cal_new_size_v2(im_h, im_w, min_size, max_size):
9
+ rate = 1.0 * max_size / im_h
10
+ rate_w = im_w * rate
11
+ if rate_w > max_size:
12
+ rate = 1.0 * max_size / im_w
13
+ tmp_h = int(1.0 * im_h * rate / 16) * 16
14
+
15
+ if tmp_h < min_size:
16
+ rate = 1.0 * min_size / im_h
17
+ tmp_w = int(1.0 * im_w * rate / 16) * 16
18
+
19
+ if tmp_w < min_size:
20
+ rate = 1.0 * min_size / im_w
21
+ tmp_h = min(max(int(1.0 * im_h * rate / 16) * 16, min_size), max_size)
22
+ tmp_w = min(max(int(1.0 * im_w * rate / 16) * 16, min_size), max_size)
23
+
24
+ rate_h = 1.0 * tmp_h / im_h
25
+ rate_w = 1.0 * tmp_w / im_w
26
+ assert tmp_h >= min_size and tmp_h <= max_size
27
+ assert tmp_w >= min_size and tmp_w <= max_size
28
+ return tmp_h, tmp_w, rate_h, rate_w
29
+
30
+
31
+ def gen_density_map_gaussian(im_height, im_width, points, sigma=4):
32
+ """
33
+ func: generate the density map.
34
+ points: [num_gt, 2], for each row: [width, height]
35
+ """
36
+ density_map = np.zeros([im_height, im_width], dtype=np.float32)
37
+ h, w = density_map.shape[:2]
38
+ num_gt = np.squeeze(points).shape[0]
39
+ if num_gt == 0:
40
+ return density_map
41
+ for p in points:
42
+ p = np.round(p).astype(int)
43
+ p[0], p[1] = min(h - 1, p[1]), min(w - 1, p[0])
44
+ gaussian_radius = sigma * 2 - 1
45
+ gaussian_map = np.multiply(
46
+ cv2.getGaussianKernel(int(gaussian_radius * 2 + 1), sigma),
47
+ cv2.getGaussianKernel(int(gaussian_radius * 2 + 1), sigma).T
48
+ )
49
+ x_left, x_right, y_up, y_down = 0, gaussian_map.shape[1], 0, gaussian_map.shape[0]
50
+ # cut the gaussian kernel
51
+ if p[1] < gaussian_radius:
52
+ x_left = gaussian_radius - p[1]
53
+ if p[0] < gaussian_radius:
54
+ y_up = gaussian_radius - p[0]
55
+ if p[1] + gaussian_radius >= w:
56
+ x_right = gaussian_map.shape[1] - (gaussian_radius + p[1] - w) - 1
57
+ if p[0] + gaussian_radius >= h:
58
+ y_down = gaussian_map.shape[0] - (gaussian_radius + p[0] - h) - 1
59
+ gaussian_map = gaussian_map[y_up:y_down, x_left:x_right]
60
+ if np.sum(gaussian_map):
61
+ gaussian_map = gaussian_map / np.sum(gaussian_map)
62
+ density_map[
63
+ max(0, p[0] - gaussian_radius):min(h, p[0] + gaussian_radius + 1),
64
+ max(0, p[1] - gaussian_radius):min(w, p[1] + gaussian_radius + 1)
65
+ ] += gaussian_map
66
+ density_map = density_map / (np.sum(density_map / num_gt))
67
+ return density_map
68
+
69
+
70
+ def generate_data(im_path, mat_path, min_size, max_size):
71
+ im = Image.open(im_path).convert('RGB')
72
+ im_w, im_h = im.size
73
+ points = loadmat(mat_path)['annPoints'].astype(np.float32)
74
+ if len(points) > 0: # some image has no crowd
75
+ idx_mask = (points[:, 0] >= 0) * (points[:, 0] <= im_w) * (points[:, 1] >= 0) * (points[:, 1] <= im_h)
76
+ points = points[idx_mask]
77
+ im_h, im_w, rr_h, rr_w = cal_new_size_v2(im_h, im_w, min_size, max_size)
78
+ im = np.array(im)
79
+ if rr_h != 1.0 or rr_w != 1.0:
80
+ im = cv2.resize(np.array(im), (im_w, im_h), cv2.INTER_CUBIC)
81
+ if len(points) > 0: # some image has no crowd
82
+ points[:, 0] = points[:, 0] * rr_w
83
+ points[:, 1] = points[:, 1] * rr_h
84
+
85
+ density_map = gen_density_map_gaussian(im_h, im_w, points, sigma=8)
86
+ return Image.fromarray(im), points, density_map
87
+
88
+
89
+ def generate_image(im_path, min_size, max_size):
90
+ im = Image.open(im_path).convert('RGB')
91
+ im_w, im_h = im.size
92
+ im_h, im_w, rr_h, rr_w = cal_new_size_v2(im_h, im_w, min_size, max_size)
93
+ im = np.array(im)
94
+ if rr_h != 1.0 or rr_w != 1.0:
95
+ im = cv2.resize(np.array(im), (im_w, im_h), cv2.INTER_CUBIC)
96
+ return Image.fromarray(im)
97
+
98
+
99
+ def main(input_dataset_path, output_dataset_path, min_size=384, max_size=1920):
100
+ ori_img_path = os.path.join(input_dataset_path, 'images')
101
+ ori_anno_path = os.path.join(input_dataset_path, 'mats')
102
+
103
+ for phase in ['train', 'val']:
104
+ sub_save_dir = os.path.join(output_dataset_path, phase)
105
+ if not os.path.exists(sub_save_dir):
106
+ os.makedirs(sub_save_dir)
107
+ with open(os.path.join(input_dataset_path, '{}.txt'.format(phase))) as f:
108
+ lines = f.readlines()
109
+ for i in lines:
110
+ i = i.strip().split(' ')[0]
111
+ im_path = os.path.join(ori_img_path, i + '.jpg')
112
+ mat_path = os.path.join(ori_anno_path, i + '.mat')
113
+ name = os.path.basename(im_path)
114
+ im_save_path = os.path.join(sub_save_dir, name)
115
+ print(name)
116
+ # The Gaussian smoothed density map is just for visualization. It's not used in training.
117
+ im, points, density_map = generate_data(im_path, mat_path, min_size, max_size)
118
+ im.save(im_save_path)
119
+ gd_save_path = im_save_path.replace('jpg', 'npy')
120
+ np.save(gd_save_path, points)
121
+ dm_save_path = im_save_path.replace('.jpg', '_densitymap.npy')
122
+ np.save(dm_save_path, density_map)
123
+
124
+ for phase in ['test']:
125
+ sub_save_dir = os.path.join(output_dataset_path, phase)
126
+ if not os.path.exists(sub_save_dir):
127
+ os.makedirs(sub_save_dir)
128
+ with open(os.path.join(input_dataset_path, '{}.txt'.format(phase))) as f:
129
+ lines = f.readlines()
130
+ for i in lines:
131
+ i = i.strip().split(' ')[0]
132
+ im_path = os.path.join(ori_img_path, i + '.jpg')
133
+ name = os.path.basename(im_path)
134
+ im_save_path = os.path.join(sub_save_dir, name)
135
+ print(name)
136
+ im = generate_image(im_path, min_size, max_size)
137
+ im.save(im_save_path)
preprocess/preprocess_dataset_qnrf.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from scipy.io import loadmat
2
+ from PIL import Image
3
+ import numpy as np
4
+ import os
5
+ from glob import glob
6
+ import cv2
7
+
8
+ dir_name = os.path.dirname(os.path.abspath(__file__))
9
+
10
+ def cal_new_size(im_h, im_w, min_size, max_size):
11
+ if im_h < im_w:
12
+ if im_h < min_size:
13
+ ratio = 1.0 * min_size / im_h
14
+ im_h = min_size
15
+ im_w = round(im_w * ratio)
16
+ elif im_h > max_size:
17
+ ratio = 1.0 * max_size / im_h
18
+ im_h = max_size
19
+ im_w = round(im_w * ratio)
20
+ else:
21
+ ratio = 1.0
22
+ else:
23
+ if im_w < min_size:
24
+ ratio = 1.0 * min_size / im_w
25
+ im_w = min_size
26
+ im_h = round(im_h * ratio)
27
+ elif im_w > max_size:
28
+ ratio = 1.0 * max_size / im_w
29
+ im_w = max_size
30
+ im_h = round(im_h * ratio)
31
+ else:
32
+ ratio = 1.0
33
+ return im_h, im_w, ratio
34
+
35
+
36
+ def generate_data(im_path, min_size, max_size):
37
+ im = Image.open(im_path)
38
+ im_w, im_h = im.size
39
+ mat_path = im_path.replace('.jpg', '_ann.mat')
40
+ points = loadmat(mat_path)['annPoints'].astype(np.float32)
41
+ idx_mask = (points[:, 0] >= 0) * (points[:, 0] <= im_w) * (points[:, 1] >= 0) * (points[:, 1] <= im_h)
42
+ points = points[idx_mask]
43
+ im_h, im_w, rr = cal_new_size(im_h, im_w, min_size, max_size)
44
+ im = np.array(im)
45
+ if rr != 1.0:
46
+ im = cv2.resize(np.array(im), (im_w, im_h), cv2.INTER_CUBIC)
47
+ points = points * rr
48
+ return Image.fromarray(im), points
49
+
50
+
51
+ def main(input_dataset_path, output_dataset_path, min_size=512, max_size=2048):
52
+ for phase in ['Train', 'Test']:
53
+ sub_dir = os.path.join(input_dataset_path, phase)
54
+ if phase == 'Train':
55
+ sub_phase_list = ['train', 'val']
56
+ for sub_phase in sub_phase_list:
57
+ sub_save_dir = os.path.join(output_dataset_path, sub_phase)
58
+ if not os.path.exists(sub_save_dir):
59
+ os.makedirs(sub_save_dir)
60
+ with open(os.path.join(dir_name, 'qnrf_{}.txt'.format(sub_phase))) as f:
61
+ for i in f:
62
+ im_path = os.path.join(sub_dir, i.strip())
63
+ name = os.path.basename(im_path)
64
+ print(name)
65
+ im, points = generate_data(im_path, min_size, max_size)
66
+ im_save_path = os.path.join(sub_save_dir, name)
67
+ im.save(im_save_path)
68
+ gd_save_path = im_save_path.replace('jpg', 'npy')
69
+ np.save(gd_save_path, points)
70
+ else:
71
+ sub_save_dir = os.path.join(output_dataset_path, 'test')
72
+ if not os.path.exists(sub_save_dir):
73
+ os.makedirs(sub_save_dir)
74
+ im_list = glob(os.path.join(sub_dir, '*jpg'))
75
+ for im_path in im_list:
76
+ name = os.path.basename(im_path)
77
+ print(name)
78
+ im, points = generate_data(im_path, min_size, max_size)
79
+ im_save_path = os.path.join(sub_save_dir, name)
80
+ im.save(im_save_path)
81
+ gd_save_path = im_save_path.replace('jpg', 'npy')
82
+ np.save(gd_save_path, points)
preprocess/qnrf_train.txt ADDED
@@ -0,0 +1,1081 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ img_0526.jpg
2
+ img_0639.jpg
3
+ img_0826.jpg
4
+ img_0415.jpg
5
+ img_0720.jpg
6
+ img_0123.jpg
7
+ img_0529.jpg
8
+ img_1071.jpg
9
+ img_0501.jpg
10
+ img_0804.jpg
11
+ img_0873.jpg
12
+ img_0601.jpg
13
+ img_0177.jpg
14
+ img_0173.jpg
15
+ img_0675.jpg
16
+ img_1001.jpg
17
+ img_0096.jpg
18
+ img_1139.jpg
19
+ img_0001.jpg
20
+ img_0084.jpg
21
+ img_0395.jpg
22
+ img_0166.jpg
23
+ img_0368.jpg
24
+ img_0093.jpg
25
+ img_0004.jpg
26
+ img_0572.jpg
27
+ img_0956.jpg
28
+ img_0721.jpg
29
+ img_0120.jpg
30
+ img_0554.jpg
31
+ img_0308.jpg
32
+ img_0131.jpg
33
+ img_0992.jpg
34
+ img_0156.jpg
35
+ img_0532.jpg
36
+ img_0476.jpg
37
+ img_0427.jpg
38
+ img_1162.jpg
39
+ img_0660.jpg
40
+ img_0538.jpg
41
+ img_0298.jpg
42
+ img_0306.jpg
43
+ img_1173.jpg
44
+ img_1157.jpg
45
+ img_0777.jpg
46
+ img_0859.jpg
47
+ img_0537.jpg
48
+ img_0236.jpg
49
+ img_0986.jpg
50
+ img_0370.jpg
51
+ img_0491.jpg
52
+ img_1150.jpg
53
+ img_0719.jpg
54
+ img_1083.jpg
55
+ img_0107.jpg
56
+ img_1029.jpg
57
+ img_0927.jpg
58
+ img_0893.jpg
59
+ img_0286.jpg
60
+ img_1135.jpg
61
+ img_0640.jpg
62
+ img_0530.jpg
63
+ img_1115.jpg
64
+ img_0533.jpg
65
+ img_0105.jpg
66
+ img_0945.jpg
67
+ img_1035.jpg
68
+ img_0484.jpg
69
+ img_1168.jpg
70
+ img_0760.jpg
71
+ img_0939.jpg
72
+ img_0907.jpg
73
+ img_0401.jpg
74
+ img_0429.jpg
75
+ img_0828.jpg
76
+ img_1167.jpg
77
+ img_0144.jpg
78
+ img_0553.jpg
79
+ img_0421.jpg
80
+ img_0560.jpg
81
+ img_0743.jpg
82
+ img_0817.jpg
83
+ img_0657.jpg
84
+ img_0106.jpg
85
+ img_0079.jpg
86
+ img_0473.jpg
87
+ img_0865.jpg
88
+ img_0730.jpg
89
+ img_0989.jpg
90
+ img_0243.jpg
91
+ img_0182.jpg
92
+ img_0252.jpg
93
+ img_0812.jpg
94
+ img_0508.jpg
95
+ img_0744.jpg
96
+ img_0439.jpg
97
+ img_0181.jpg
98
+ img_0965.jpg
99
+ img_0487.jpg
100
+ img_0710.jpg
101
+ img_1054.jpg
102
+ img_0947.jpg
103
+ img_0321.jpg
104
+ img_0758.jpg
105
+ img_0014.jpg
106
+ img_0504.jpg
107
+ img_0674.jpg
108
+ img_0991.jpg
109
+ img_0358.jpg
110
+ img_1138.jpg
111
+ img_0019.jpg
112
+ img_0677.jpg
113
+ img_0336.jpg
114
+ img_0070.jpg
115
+ img_0766.jpg
116
+ img_0612.jpg
117
+ img_1109.jpg
118
+ img_0840.jpg
119
+ img_0616.jpg
120
+ img_0926.jpg
121
+ img_0376.jpg
122
+ img_0761.jpg
123
+ img_0020.jpg
124
+ img_0795.jpg
125
+ img_0046.jpg
126
+ img_0459.jpg
127
+ img_0267.jpg
128
+ img_0428.jpg
129
+ img_1122.jpg
130
+ img_0247.jpg
131
+ img_1143.jpg
132
+ img_0290.jpg
133
+ img_0524.jpg
134
+ img_0275.jpg
135
+ img_1120.jpg
136
+ img_0115.jpg
137
+ img_0698.jpg
138
+ img_0092.jpg
139
+ img_0922.jpg
140
+ img_1052.jpg
141
+ img_0297.jpg
142
+ img_0112.jpg
143
+ img_0180.jpg
144
+ img_0520.jpg
145
+ img_0351.jpg
146
+ img_0478.jpg
147
+ img_0588.jpg
148
+ img_0109.jpg
149
+ img_0738.jpg
150
+ img_0592.jpg
151
+ img_0752.jpg
152
+ img_1028.jpg
153
+ img_1164.jpg
154
+ img_0450.jpg
155
+ img_0168.jpg
156
+ img_1108.jpg
157
+ img_0799.jpg
158
+ img_0649.jpg
159
+ img_0272.jpg
160
+ img_0902.jpg
161
+ img_0874.jpg
162
+ img_0870.jpg
163
+ img_0821.jpg
164
+ img_0153.jpg
165
+ img_0426.jpg
166
+ img_0949.jpg
167
+ img_0527.jpg
168
+ img_1198.jpg
169
+ img_0443.jpg
170
+ img_0063.jpg
171
+ img_0013.jpg
172
+ img_0564.jpg
173
+ img_0040.jpg
174
+ img_0764.jpg
175
+ img_0411.jpg
176
+ img_0118.jpg
177
+ img_1172.jpg
178
+ img_0196.jpg
179
+ img_0879.jpg
180
+ img_0985.jpg
181
+ img_0437.jpg
182
+ img_0918.jpg
183
+ img_0493.jpg
184
+ img_0271.jpg
185
+ img_0860.jpg
186
+ img_0059.jpg
187
+ img_0645.jpg
188
+ img_1126.jpg
189
+ img_0911.jpg
190
+ img_1082.jpg
191
+ img_0383.jpg
192
+ img_0422.jpg
193
+ img_0139.jpg
194
+ img_1192.jpg
195
+ img_0904.jpg
196
+ img_0503.jpg
197
+ img_0512.jpg
198
+ img_0541.jpg
199
+ img_0330.jpg
200
+ img_0348.jpg
201
+ img_0425.jpg
202
+ img_0673.jpg
203
+ img_0210.jpg
204
+ img_0950.jpg
205
+ img_0151.jpg
206
+ img_0792.jpg
207
+ img_0469.jpg
208
+ img_0661.jpg
209
+ img_0003.jpg
210
+ img_0089.jpg
211
+ img_0312.jpg
212
+ img_0555.jpg
213
+ img_0215.jpg
214
+ img_0023.jpg
215
+ img_1129.jpg
216
+ img_0249.jpg
217
+ img_0451.jpg
218
+ img_1032.jpg
219
+ img_0689.jpg
220
+ img_1189.jpg
221
+ img_0391.jpg
222
+ img_0146.jpg
223
+ img_0653.jpg
224
+ img_0248.jpg
225
+ img_0695.jpg
226
+ img_0402.jpg
227
+ img_0075.jpg
228
+ img_1018.jpg
229
+ img_1020.jpg
230
+ img_0163.jpg
231
+ img_0440.jpg
232
+ img_0756.jpg
233
+ img_0253.jpg
234
+ img_0712.jpg
235
+ img_0962.jpg
236
+ img_0471.jpg
237
+ img_0842.jpg
238
+ img_0525.jpg
239
+ img_1176.jpg
240
+ img_1021.jpg
241
+ img_0127.jpg
242
+ img_0295.jpg
243
+ img_1045.jpg
244
+ img_1088.jpg
245
+ img_1090.jpg
246
+ img_0622.jpg
247
+ img_0650.jpg
248
+ img_0518.jpg
249
+ img_0854.jpg
250
+ img_0262.jpg
251
+ img_0323.jpg
252
+ img_0522.jpg
253
+ img_0933.jpg
254
+ img_0951.jpg
255
+ img_0366.jpg
256
+ img_0325.jpg
257
+ img_1034.jpg
258
+ img_0827.jpg
259
+ img_0194.jpg
260
+ img_0636.jpg
261
+ img_0051.jpg
262
+ img_0683.jpg
263
+ img_0558.jpg
264
+ img_0309.jpg
265
+ img_0345.jpg
266
+ img_0438.jpg
267
+ img_1091.jpg
268
+ img_0577.jpg
269
+ img_0500.jpg
270
+ img_0279.jpg
271
+ img_1145.jpg
272
+ img_0886.jpg
273
+ img_1161.jpg
274
+ img_0617.jpg
275
+ img_0726.jpg
276
+ img_0620.jpg
277
+ img_0444.jpg
278
+ img_1118.jpg
279
+ img_0506.jpg
280
+ img_0164.jpg
281
+ img_0507.jpg
282
+ img_0614.jpg
283
+ img_0769.jpg
284
+ img_1131.jpg
285
+ img_0185.jpg
286
+ img_0694.jpg
287
+ img_1055.jpg
288
+ img_0754.jpg
289
+ img_0569.jpg
290
+ img_0317.jpg
291
+ img_0228.jpg
292
+ img_0492.jpg
293
+ img_1190.jpg
294
+ img_0566.jpg
295
+ img_0921.jpg
296
+ img_0818.jpg
297
+ img_0204.jpg
298
+ img_0974.jpg
299
+ img_0866.jpg
300
+ img_1039.jpg
301
+ img_0101.jpg
302
+ img_0169.jpg
303
+ img_0375.jpg
304
+ img_0334.jpg
305
+ img_1078.jpg
306
+ img_0061.jpg
307
+ img_0113.jpg
308
+ img_0981.jpg
309
+ img_0080.jpg
310
+ img_0324.jpg
311
+ img_0316.jpg
312
+ img_0643.jpg
313
+ img_0408.jpg
314
+ img_0890.jpg
315
+ img_0363.jpg
316
+ img_0765.jpg
317
+ img_0822.jpg
318
+ img_0430.jpg
319
+ img_0245.jpg
320
+ img_0671.jpg
321
+ img_0486.jpg
322
+ img_1201.jpg
323
+ img_0129.jpg
324
+ img_1142.jpg
325
+ img_0843.jpg
326
+ img_1133.jpg
327
+ img_0238.jpg
328
+ img_0955.jpg
329
+ img_1017.jpg
330
+ img_0858.jpg
331
+ img_1154.jpg
332
+ img_0559.jpg
333
+ img_0002.jpg
334
+ img_0407.jpg
335
+ img_1146.jpg
336
+ img_1086.jpg
337
+ img_0495.jpg
338
+ img_0857.jpg
339
+ img_0133.jpg
340
+ img_0121.jpg
341
+ img_0973.jpg
342
+ img_0830.jpg
343
+ img_0165.jpg
344
+ img_0278.jpg
345
+ img_1012.jpg
346
+ img_0393.jpg
347
+ img_0202.jpg
348
+ img_0700.jpg
349
+ img_0313.jpg
350
+ img_0024.jpg
351
+ img_0055.jpg
352
+ img_0979.jpg
353
+ img_0162.jpg
354
+ img_0135.jpg
355
+ img_0098.jpg
356
+ img_0727.jpg
357
+ img_0969.jpg
358
+ img_1137.jpg
359
+ img_0932.jpg
360
+ img_1102.jpg
361
+ img_0301.jpg
362
+ img_0047.jpg
363
+ img_0595.jpg
364
+ img_0805.jpg
365
+ img_0801.jpg
366
+ img_1151.jpg
367
+ img_0387.jpg
368
+ img_0999.jpg
369
+ img_0136.jpg
370
+ img_1037.jpg
371
+ img_1087.jpg
372
+ img_1186.jpg
373
+ img_0032.jpg
374
+ img_0195.jpg
375
+ img_0360.jpg
376
+ img_0276.jpg
377
+ img_0642.jpg
378
+ img_0913.jpg
379
+ img_0231.jpg
380
+ img_0670.jpg
381
+ img_1123.jpg
382
+ img_0517.jpg
383
+ img_0707.jpg
384
+ img_0088.jpg
385
+ img_0594.jpg
386
+ img_0838.jpg
387
+ img_0848.jpg
388
+ img_0354.jpg
389
+ img_0936.jpg
390
+ img_0876.jpg
391
+ img_1081.jpg
392
+ img_0322.jpg
393
+ img_0637.jpg
394
+ img_0739.jpg
395
+ img_0917.jpg
396
+ img_0244.jpg
397
+ img_0591.jpg
398
+ img_0628.jpg
399
+ img_0964.jpg
400
+ img_0691.jpg
401
+ img_0609.jpg
402
+ img_0342.jpg
403
+ img_1097.jpg
404
+ img_1077.jpg
405
+ img_0502.jpg
406
+ img_0423.jpg
407
+ img_0561.jpg
408
+ img_1059.jpg
409
+ img_0568.jpg
410
+ img_0920.jpg
411
+ img_0389.jpg
412
+ img_0940.jpg
413
+ img_0787.jpg
414
+ img_0634.jpg
415
+ img_0516.jpg
416
+ img_0900.jpg
417
+ img_0463.jpg
418
+ img_0942.jpg
419
+ img_0796.jpg
420
+ img_0835.jpg
421
+ img_0789.jpg
422
+ img_0184.jpg
423
+ img_0397.jpg
424
+ img_1195.jpg
425
+ img_1089.jpg
426
+ img_0319.jpg
427
+ img_0328.jpg
428
+ img_0724.jpg
429
+ img_0852.jpg
430
+ img_0662.jpg
431
+ img_0225.jpg
432
+ img_0479.jpg
433
+ img_0266.jpg
434
+ img_0499.jpg
435
+ img_0134.jpg
436
+ img_1023.jpg
437
+ img_1064.jpg
438
+ img_0400.jpg
439
+ img_0226.jpg
440
+ img_0015.jpg
441
+ img_0203.jpg
442
+ img_0548.jpg
443
+ img_1084.jpg
444
+ img_0970.jpg
445
+ img_0718.jpg
446
+ img_0138.jpg
447
+ img_0095.jpg
448
+ img_0831.jpg
449
+ img_0482.jpg
450
+ img_1000.jpg
451
+ img_0234.jpg
452
+ img_0183.jpg
453
+ img_0687.jpg
454
+ img_0923.jpg
455
+ img_0197.jpg
456
+ img_1016.jpg
457
+ img_1100.jpg
458
+ img_0034.jpg
459
+ img_0587.jpg
460
+ img_0229.jpg
461
+ img_1178.jpg
462
+ img_0124.jpg
463
+ img_0424.jpg
464
+ img_0496.jpg
465
+ img_0179.jpg
466
+ img_1110.jpg
467
+ img_0998.jpg
468
+ img_0742.jpg
469
+ img_0578.jpg
470
+ img_0207.jpg
471
+ img_0305.jpg
472
+ img_0373.jpg
473
+ img_0971.jpg
474
+ img_0292.jpg
475
+ img_0861.jpg
476
+ img_0621.jpg
477
+ img_0414.jpg
478
+ img_1140.jpg
479
+ img_0737.jpg
480
+ img_0176.jpg
481
+ img_1057.jpg
482
+ img_1095.jpg
483
+ img_0667.jpg
484
+ img_0755.jpg
485
+ img_0318.jpg
486
+ img_0170.jpg
487
+ img_0418.jpg
488
+ img_0178.jpg
489
+ img_1200.jpg
490
+ img_0021.jpg
491
+ img_0652.jpg
492
+ img_0327.jpg
493
+ img_0627.jpg
494
+ img_1051.jpg
495
+ img_0837.jpg
496
+ img_0352.jpg
497
+ img_0029.jpg
498
+ img_0833.jpg
499
+ img_0952.jpg
500
+ img_0488.jpg
501
+ img_0474.jpg
502
+ img_0702.jpg
503
+ img_0819.jpg
504
+ img_1188.jpg
505
+ img_0261.jpg
506
+ img_0685.jpg
507
+ img_1024.jpg
508
+ img_0008.jpg
509
+ img_0734.jpg
510
+ img_0509.jpg
511
+ img_0888.jpg
512
+ img_0676.jpg
513
+ img_0404.jpg
514
+ img_1046.jpg
515
+ img_1127.jpg
516
+ img_1008.jpg
517
+ img_0161.jpg
518
+ img_0699.jpg
519
+ img_0085.jpg
520
+ img_0703.jpg
521
+ img_0083.jpg
522
+ img_0934.jpg
523
+ img_0626.jpg
524
+ img_1170.jpg
525
+ img_1065.jpg
526
+ img_0664.jpg
527
+ img_0883.jpg
528
+ img_0655.jpg
529
+ img_0263.jpg
530
+ img_1005.jpg
531
+ img_1061.jpg
532
+ img_0333.jpg
533
+ img_0881.jpg
534
+ img_1041.jpg
535
+ img_0540.jpg
536
+ img_1185.jpg
537
+ img_0953.jpg
538
+ img_0586.jpg
539
+ img_1011.jpg
540
+ img_0846.jpg
541
+ img_0149.jpg
542
+ img_1075.jpg
543
+ img_0894.jpg
544
+ img_0759.jpg
545
+ img_1177.jpg
546
+ img_0258.jpg
547
+ img_0171.jpg
548
+ img_0740.jpg
549
+ img_0006.jpg
550
+ img_0353.jpg
551
+ img_0615.jpg
552
+ img_0810.jpg
553
+ img_0142.jpg
554
+ img_0958.jpg
555
+ img_0584.jpg
556
+ img_0390.jpg
557
+ img_0585.jpg
558
+ img_0365.jpg
559
+ img_0026.jpg
560
+ img_0458.jpg
561
+ img_0143.jpg
562
+ img_0575.jpg
563
+ img_1027.jpg
564
+ img_1183.jpg
565
+ img_0535.jpg
566
+ img_0891.jpg
567
+ img_1085.jpg
568
+ img_0757.jpg
569
+ img_0549.jpg
570
+ img_0436.jpg
571
+ img_0815.jpg
572
+ img_0635.jpg
573
+ img_0954.jpg
574
+ img_0367.jpg
575
+ img_0064.jpg
576
+ img_0410.jpg
577
+ img_0277.jpg
578
+ img_1111.jpg
579
+ img_1025.jpg
580
+ img_0434.jpg
581
+ img_1175.jpg
582
+ img_1171.jpg
583
+ img_0610.jpg
584
+ img_0618.jpg
585
+ img_0208.jpg
586
+ img_0281.jpg
587
+ img_0058.jpg
588
+ img_0851.jpg
589
+ img_0300.jpg
590
+ img_0017.jpg
591
+ img_0110.jpg
592
+ img_0265.jpg
593
+ img_0362.jpg
594
+ img_1038.jpg
595
+ img_0580.jpg
596
+ img_1096.jpg
597
+ img_0972.jpg
598
+ img_0666.jpg
599
+ img_0090.jpg
600
+ img_1007.jpg
601
+ img_0982.jpg
602
+ img_0287.jpg
603
+ img_0714.jpg
604
+ img_0218.jpg
605
+ img_0832.jpg
606
+ img_0145.jpg
607
+ img_0072.jpg
608
+ img_0222.jpg
609
+ img_0137.jpg
610
+ img_0741.jpg
611
+ img_0028.jpg
612
+ img_0413.jpg
613
+ img_0232.jpg
614
+ img_0573.jpg
615
+ img_0849.jpg
616
+ img_0855.jpg
617
+ img_0770.jpg
618
+ img_0283.jpg
619
+ img_0914.jpg
620
+ img_0611.jpg
621
+ img_1047.jpg
622
+ img_0596.jpg
623
+ img_0706.jpg
624
+ img_0847.jpg
625
+ img_0868.jpg
626
+ img_0193.jpg
627
+ img_0780.jpg
628
+ img_0100.jpg
629
+ img_0786.jpg
630
+ img_0337.jpg
631
+ img_0728.jpg
632
+ img_0656.jpg
633
+ img_0602.jpg
634
+ img_1015.jpg
635
+ img_0273.jpg
636
+ img_0797.jpg
637
+ img_0398.jpg
638
+ img_0693.jpg
639
+ img_0944.jpg
640
+ img_0593.jpg
641
+ img_0768.jpg
642
+ img_0995.jpg
643
+ img_1125.jpg
644
+ img_0078.jpg
645
+ img_0543.jpg
646
+ img_0167.jpg
647
+ img_0420.jpg
648
+ img_0264.jpg
649
+ img_0016.jpg
650
+ img_0599.jpg
651
+ img_0417.jpg
652
+ img_0448.jpg
653
+ img_0748.jpg
654
+ img_0311.jpg
655
+ img_0071.jpg
656
+ img_0749.jpg
657
+ img_0941.jpg
658
+ img_0237.jpg
659
+ img_0214.jpg
660
+ img_1149.jpg
661
+ img_0241.jpg
662
+ img_0461.jpg
663
+ img_0018.jpg
664
+ img_0356.jpg
665
+ img_0483.jpg
666
+ img_0099.jpg
667
+ img_0130.jpg
668
+ img_0372.jpg
669
+ img_0800.jpg
670
+ img_0654.jpg
671
+ img_0544.jpg
672
+ img_1099.jpg
673
+ img_1068.jpg
674
+ img_0326.jpg
675
+ img_0374.jpg
676
+ img_0074.jpg
677
+ img_0938.jpg
678
+ img_0117.jpg
679
+ img_0456.jpg
680
+ img_0901.jpg
681
+ img_0713.jpg
682
+ img_0788.jpg
683
+ img_0665.jpg
684
+ img_0294.jpg
685
+ img_0841.jpg
686
+ img_0269.jpg
687
+ img_0579.jpg
688
+ img_1098.jpg
689
+ img_0466.jpg
690
+ img_0480.jpg
691
+ img_0709.jpg
692
+ img_0672.jpg
693
+ img_1010.jpg
694
+ img_0314.jpg
695
+ img_0043.jpg
696
+ img_0349.jpg
697
+ img_0172.jpg
698
+ img_1187.jpg
699
+ img_0371.jpg
700
+ img_0320.jpg
701
+ img_1103.jpg
702
+ img_1159.jpg
703
+ img_0629.jpg
704
+ img_0399.jpg
705
+ img_0663.jpg
706
+ img_0335.jpg
707
+ img_1148.jpg
708
+ img_0108.jpg
709
+ img_0254.jpg
710
+ img_0432.jpg
711
+ img_0915.jpg
712
+ img_0624.jpg
713
+ img_0997.jpg
714
+ img_0711.jpg
715
+ img_0704.jpg
716
+ img_1147.jpg
717
+ img_0036.jpg
718
+ img_0519.jpg
719
+ img_0680.jpg
720
+ img_0498.jpg
721
+ img_0651.jpg
722
+ img_0230.jpg
723
+ img_0198.jpg
724
+ img_0905.jpg
725
+ img_0751.jpg
726
+ img_0928.jpg
727
+ img_0630.jpg
728
+ img_0140.jpg
729
+ img_0644.jpg
730
+ img_0776.jpg
731
+ img_0057.jpg
732
+ img_0361.jpg
733
+ img_0209.jpg
734
+ img_0158.jpg
735
+ img_1160.jpg
736
+ img_1169.jpg
737
+ img_0735.jpg
738
+ img_0551.jpg
739
+ img_0681.jpg
740
+ img_0515.jpg
741
+ img_0077.jpg
742
+ img_0968.jpg
743
+ img_0240.jpg
744
+ img_1166.jpg
745
+ img_0937.jpg
746
+ img_0877.jpg
747
+ img_0513.jpg
748
+ img_0528.jpg
749
+ img_0150.jpg
750
+ img_1165.jpg
751
+ img_0200.jpg
752
+ img_0246.jpg
753
+ img_0869.jpg
754
+ img_0011.jpg
755
+ img_0160.jpg
756
+ img_0464.jpg
757
+ img_0285.jpg
758
+ img_0132.jpg
759
+ img_0701.jpg
760
+ img_0082.jpg
761
+ img_1182.jpg
762
+ img_0030.jpg
763
+ img_0126.jpg
764
+ img_0632.jpg
765
+ img_0731.jpg
766
+ img_0875.jpg
767
+ img_0978.jpg
768
+ img_0717.jpg
769
+ img_0460.jpg
770
+ img_1044.jpg
771
+ img_1194.jpg
772
+ img_0910.jpg
773
+ img_0049.jpg
774
+ img_0331.jpg
775
+ img_0213.jpg
776
+ img_0885.jpg
777
+ img_0468.jpg
778
+ img_0419.jpg
779
+ img_1158.jpg
780
+ img_0022.jpg
781
+ img_0174.jpg
782
+ img_0747.jpg
783
+ img_1006.jpg
784
+ img_0381.jpg
785
+ img_1036.jpg
786
+ img_0863.jpg
787
+ img_0994.jpg
788
+ img_0783.jpg
789
+ img_0346.jpg
790
+ img_0233.jpg
791
+ img_0820.jpg
792
+ img_1107.jpg
793
+ img_1193.jpg
794
+ img_0943.jpg
795
+ img_1191.jpg
796
+ img_0005.jpg
797
+ img_0087.jpg
798
+ img_0039.jpg
799
+ img_0813.jpg
800
+ img_0239.jpg
801
+ img_0206.jpg
802
+ img_0256.jpg
803
+ img_1070.jpg
804
+ img_0409.jpg
805
+ img_0377.jpg
806
+ img_0446.jpg
807
+ img_0216.jpg
808
+ img_0189.jpg
809
+ img_0785.jpg
810
+ img_0041.jpg
811
+ img_0598.jpg
812
+ img_0310.jpg
813
+ img_0307.jpg
814
+ img_1093.jpg
815
+ img_0465.jpg
816
+ img_0746.jpg
817
+ img_0380.jpg
818
+ img_0732.jpg
819
+ img_0781.jpg
820
+ img_0906.jpg
821
+ img_0619.jpg
822
+ img_0604.jpg
823
+ img_0983.jpg
824
+ img_0753.jpg
825
+ img_0211.jpg
826
+ img_0552.jpg
827
+ img_0892.jpg
828
+ img_0767.jpg
829
+ img_1180.jpg
830
+ img_1069.jpg
831
+ img_0154.jpg
832
+ img_0899.jpg
833
+ img_0343.jpg
834
+ img_0025.jpg
835
+ img_1196.jpg
836
+ img_0155.jpg
837
+ img_0433.jpg
838
+ img_0597.jpg
839
+ img_0570.jpg
840
+ img_0867.jpg
841
+ img_0223.jpg
842
+ img_0581.jpg
843
+ img_0186.jpg
844
+ img_0122.jpg
845
+ img_1134.jpg
846
+ img_0340.jpg
847
+ img_0957.jpg
848
+ img_0364.jpg
849
+ img_0069.jpg
850
+ img_1114.jpg
851
+ img_0646.jpg
852
+ img_0679.jpg
853
+ img_0623.jpg
854
+ img_0392.jpg
855
+ img_0814.jpg
856
+ img_0589.jpg
857
+ img_0299.jpg
858
+ img_0931.jpg
859
+ img_0836.jpg
860
+ img_0963.jpg
861
+ img_0094.jpg
862
+ img_0987.jpg
863
+ img_0930.jpg
864
+ img_0976.jpg
865
+ img_0924.jpg
866
+ img_0384.jpg
867
+ img_0035.jpg
868
+ img_0076.jpg
869
+ img_1101.jpg
870
+ img_0405.jpg
871
+ img_0350.jpg
872
+ img_0147.jpg
873
+ img_0659.jpg
874
+ img_1013.jpg
875
+ img_0948.jpg
876
+ img_0066.jpg
877
+ img_1132.jpg
878
+ img_0829.jpg
879
+ img_0690.jpg
880
+ img_1060.jpg
881
+ img_0457.jpg
882
+ img_0897.jpg
883
+ img_0825.jpg
884
+ img_1163.jpg
885
+ img_0803.jpg
886
+ img_0563.jpg
887
+ img_0574.jpg
888
+ img_0175.jpg
889
+ img_1112.jpg
890
+ img_0668.jpg
891
+ img_0045.jpg
892
+ img_0259.jpg
893
+ img_0341.jpg
894
+ img_1067.jpg
895
+ img_1040.jpg
896
+ img_1106.jpg
897
+ img_0205.jpg
898
+ img_0296.jpg
899
+ img_0255.jpg
900
+ img_1152.jpg
901
+ img_0772.jpg
902
+ img_0613.jpg
903
+ img_1121.jpg
904
+ img_0834.jpg
905
+ img_0406.jpg
906
+ img_0762.jpg
907
+ img_0442.jpg
908
+ img_0192.jpg
909
+ img_0044.jpg
910
+ img_0774.jpg
911
+ img_0606.jpg
912
+ img_0359.jpg
913
+ img_0467.jpg
914
+ img_0779.jpg
915
+ img_0060.jpg
916
+ img_1074.jpg
917
+ img_0494.jpg
918
+ img_1153.jpg
919
+ img_0102.jpg
920
+ img_0582.jpg
921
+ img_0386.jpg
922
+ img_0212.jpg
923
+ img_0625.jpg
924
+ img_0844.jpg
925
+ img_0872.jpg
926
+ img_1105.jpg
927
+ img_0396.jpg
928
+ img_1119.jpg
929
+ img_0052.jpg
930
+ img_0454.jpg
931
+ img_1179.jpg
932
+ img_0862.jpg
933
+ img_0481.jpg
934
+ img_1026.jpg
935
+ img_0511.jpg
936
+ img_0912.jpg
937
+ img_1124.jpg
938
+ img_0148.jpg
939
+ img_0960.jpg
940
+ img_0523.jpg
941
+ img_0531.jpg
942
+ img_0729.jpg
943
+ img_0571.jpg
944
+ img_0908.jpg
945
+ img_0889.jpg
946
+ img_0188.jpg
947
+ img_0037.jpg
948
+ img_0716.jpg
949
+ img_1014.jpg
950
+ img_0394.jpg
951
+ img_1056.jpg
952
+ img_0462.jpg
953
+ img_0850.jpg
954
+ img_0784.jpg
955
+ img_1002.jpg
956
+ img_0763.jpg
957
+ img_0159.jpg
958
+ img_0009.jpg
959
+ img_0708.jpg
960
+ img_1050.jpg
961
+ img_0678.jpg
962
+ img_0648.jpg
963
+ img_0010.jpg
964
+ img_1031.jpg
965
+ img_0445.jpg
966
+ img_0355.jpg
967
+ img_1117.jpg
968
+ img_0378.jpg
969
+ img_0550.jpg
970
+ img_0217.jpg
971
+ img_0260.jpg
972
+ img_0816.jpg
973
+ img_0996.jpg
974
+ img_0081.jpg
975
+ img_0878.jpg
976
+ img_0199.jpg
977
+ img_0431.jpg
978
+ img_1144.jpg
979
+ img_0688.jpg
980
+ img_0745.jpg
981
+ img_0686.jpg
982
+ img_1042.jpg
983
+ img_0187.jpg
984
+ img_1066.jpg
985
+ img_0682.jpg
986
+ img_0048.jpg
987
+ img_0896.jpg
988
+ img_0608.jpg
989
+ img_1003.jpg
990
+ img_1156.jpg
991
+ img_0723.jpg
992
+ img_0692.jpg
993
+ img_0220.jpg
994
+ img_0993.jpg
995
+ img_1197.jpg
996
+ img_0447.jpg
997
+ img_0369.jpg
998
+ img_0056.jpg
999
+ img_0807.jpg
1000
+ img_0315.jpg
1001
+ img_0567.jpg
1002
+ img_0452.jpg
1003
+ img_1128.jpg
1004
+ img_0647.jpg
1005
+ img_0242.jpg
1006
+ img_0201.jpg
1007
+ img_0497.jpg
1008
+ img_0031.jpg
1009
+ img_0771.jpg
1010
+ img_0547.jpg
1011
+ img_0705.jpg
1012
+ img_0725.jpg
1013
+ img_1058.jpg
1014
+ img_0053.jpg
1015
+ img_1043.jpg
1016
+ img_0722.jpg
1017
+ img_0435.jpg
1018
+ img_0284.jpg
1019
+ img_0583.jpg
1020
+ img_0882.jpg
1021
+ img_0111.jpg
1022
+ img_0959.jpg
1023
+ img_1076.jpg
1024
+ img_0880.jpg
1025
+ img_0224.jpg
1026
+ img_0977.jpg
1027
+ img_0270.jpg
1028
+ img_0793.jpg
1029
+ img_0603.jpg
1030
+ img_1116.jpg
1031
+ img_0304.jpg
1032
+ img_0884.jpg
1033
+ img_1136.jpg
1034
+ img_0235.jpg
1035
+ img_0412.jpg
1036
+ img_0980.jpg
1037
+ img_0988.jpg
1038
+ img_0773.jpg
1039
+ img_1174.jpg
1040
+ img_0562.jpg
1041
+ img_0871.jpg
1042
+ img_0798.jpg
1043
+ img_0453.jpg
1044
+ img_0696.jpg
1045
+ img_0104.jpg
1046
+ img_0607.jpg
1047
+ img_0669.jpg
1048
+ img_0293.jpg
1049
+ img_1141.jpg
1050
+ img_0329.jpg
1051
+ img_0534.jpg
1052
+ img_1113.jpg
1053
+ img_0288.jpg
1054
+ img_0961.jpg
1055
+ img_0388.jpg
1056
+ img_0073.jpg
1057
+ img_0141.jpg
1058
+ img_0935.jpg
1059
+ img_1062.jpg
1060
+ img_0227.jpg
1061
+ img_0895.jpg
1062
+ img_0449.jpg
1063
+ img_0565.jpg
1064
+ img_1009.jpg
1065
+ img_0282.jpg
1066
+ img_0806.jpg
1067
+ img_1033.jpg
1068
+ img_0332.jpg
1069
+ img_0903.jpg
1070
+ img_0475.jpg
1071
+ img_0050.jpg
1072
+ img_0455.jpg
1073
+ img_0845.jpg
1074
+ img_0946.jpg
1075
+ img_0490.jpg
1076
+ img_0274.jpg
1077
+ img_0909.jpg
1078
+ img_0966.jpg
1079
+ img_0219.jpg
1080
+ img_0898.jpg
1081
+ img_0403.jpg
preprocess/qnrf_val.txt ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ img_0042.jpg
2
+ img_0697.jpg
3
+ img_0012.jpg
4
+ img_0062.jpg
5
+ img_0990.jpg
6
+ img_1048.jpg
7
+ img_0576.jpg
8
+ img_0802.jpg
9
+ img_0116.jpg
10
+ img_0119.jpg
11
+ img_0967.jpg
12
+ img_0054.jpg
13
+ img_0782.jpg
14
+ img_0514.jpg
15
+ img_0929.jpg
16
+ img_0809.jpg
17
+ img_0033.jpg
18
+ img_0125.jpg
19
+ img_0633.jpg
20
+ img_0038.jpg
21
+ img_0775.jpg
22
+ img_0600.jpg
23
+ img_0157.jpg
24
+ img_0824.jpg
25
+ img_0103.jpg
26
+ img_0984.jpg
27
+ img_0250.jpg
28
+ img_0505.jpg
29
+ img_0631.jpg
30
+ img_0556.jpg
31
+ img_1049.jpg
32
+ img_1181.jpg
33
+ img_0097.jpg
34
+ img_0536.jpg
35
+ img_1104.jpg
36
+ img_0733.jpg
37
+ img_1130.jpg
38
+ img_0808.jpg
39
+ img_0086.jpg
40
+ img_0302.jpg
41
+ img_0114.jpg
42
+ img_0470.jpg
43
+ img_0715.jpg
44
+ img_0641.jpg
45
+ img_0557.jpg
46
+ img_0510.jpg
47
+ img_0152.jpg
48
+ img_0485.jpg
49
+ img_0190.jpg
50
+ img_0065.jpg
51
+ img_0839.jpg
52
+ img_0068.jpg
53
+ img_0864.jpg
54
+ img_0477.jpg
55
+ img_0441.jpg
56
+ img_0546.jpg
57
+ img_0091.jpg
58
+ img_0853.jpg
59
+ img_0975.jpg
60
+ img_0357.jpg
61
+ img_1004.jpg
62
+ img_0794.jpg
63
+ img_0750.jpg
64
+ img_0791.jpg
65
+ img_0605.jpg
66
+ img_0590.jpg
67
+ img_0489.jpg
68
+ img_0191.jpg
69
+ img_0007.jpg
70
+ img_0778.jpg
71
+ img_0658.jpg
72
+ img_0289.jpg
73
+ img_0925.jpg
74
+ img_1184.jpg
75
+ img_0521.jpg
76
+ img_0291.jpg
77
+ img_0823.jpg
78
+ img_0382.jpg
79
+ img_0416.jpg
80
+ img_0736.jpg
81
+ img_0268.jpg
82
+ img_0128.jpg
83
+ img_0280.jpg
84
+ img_1022.jpg
85
+ img_0545.jpg
86
+ img_0257.jpg
87
+ img_0251.jpg
88
+ img_0684.jpg
89
+ img_1092.jpg
90
+ img_0638.jpg
91
+ img_1079.jpg
92
+ img_0790.jpg
93
+ img_0811.jpg
94
+ img_0303.jpg
95
+ img_0542.jpg
96
+ img_1019.jpg
97
+ img_0472.jpg
98
+ img_0027.jpg
99
+ img_0539.jpg
100
+ img_0856.jpg
101
+ img_1094.jpg
102
+ img_1030.jpg
103
+ img_1063.jpg
104
+ img_0887.jpg
105
+ img_0067.jpg
106
+ img_0379.jpg
107
+ img_0919.jpg
108
+ img_1155.jpg
109
+ img_0221.jpg
110
+ img_1053.jpg
111
+ img_0916.jpg
112
+ img_1072.jpg
113
+ img_0347.jpg
114
+ img_1199.jpg
115
+ img_1080.jpg
116
+ img_0385.jpg
117
+ img_0344.jpg
118
+ img_1073.jpg
119
+ img_0339.jpg
120
+ img_0338.jpg
preprocess_dataset.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Preprocess images in QNRF and NWPU dataset.
2
+
3
+ import argparse
4
+
5
+ parser = argparse.ArgumentParser(description='Preprocess')
6
+ parser.add_argument('--dataset', default='qnrf',
7
+ help='dataset name, only support qnrf and nwpu')
8
+ parser.add_argument('--input-dataset-path', default='data/QNRF',
9
+ help='original data directory')
10
+ parser.add_argument('--output-dataset-path', default='data/QNRF-Train-Val-Test',
11
+ help='processed data directory')
12
+ args = parser.parse_args()
13
+
14
+ if args.dataset.lower() == 'qnrf':
15
+ from preprocess.preprocess_dataset_qnrf import main
16
+
17
+ main(args.input_dataset_path, args.output_dataset_path, 512, 2048)
18
+ elif args.dataset.lower() == 'nwpu':
19
+ from preprocess.preprocess_dataset_nwpu import main
20
+
21
+ main(args.input_dataset_path, args.output_dataset_path, 384, 1920)
22
+ else:
23
+ raise NotImplementedError
pretrained_models/model_nwpu.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2a0c92ac22b5c6aee08b59ad9f002561e75da07555b58c20dd699db8aac59b2
3
+ size 86005202
pretrained_models/model_qnrf.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16ef954a2cef40c66ee664f69273559553b735d5e2c9f90e2444f3c25dd45e05
3
+ size 86005202
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ torchvision
3
+ numpy>=1.16.5
4
+ scipy>=1.3.0
5
+ opencv-python
6
+ gdown
7
+ Pillow
8
+ gradio
test.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import torch
3
+ import os
4
+ import numpy as np
5
+ import datasets.crowd as crowd
6
+ from models import vgg19
7
+
8
+ parser = argparse.ArgumentParser(description='Test ')
9
+ parser.add_argument('--device', default='0', help='assign device')
10
+ parser.add_argument('--crop-size', type=int, default=512,
11
+ help='the crop size of the train image')
12
+ parser.add_argument('--model-path', type=str, default='pretrained_models/model_qnrf.pth',
13
+ help='saved model path')
14
+ parser.add_argument('--data-path', type=str,
15
+ default='data/QNRF-Train-Val-Test',
16
+ help='saved model path')
17
+ parser.add_argument('--dataset', type=str, default='qnrf',
18
+ help='dataset name: qnrf, nwpu, sha, shb')
19
+ parser.add_argument('--pred-density-map-path', type=str, default='',
20
+ help='save predicted density maps when pred-density-map-path is not empty.')
21
+
22
+
23
+ args = parser.parse_args()
24
+
25
+ os.environ['CUDA_VISIBLE_DEVICES'] = args.device # set vis gpu
26
+ device = torch.device('cuda')
27
+
28
+ model_path = args.model_path
29
+ crop_size = args.crop_size
30
+ data_path = args.data_path
31
+ if args.dataset.lower() == 'qnrf':
32
+ dataset = crowd.Crowd_qnrf(os.path.join(data_path, 'test'), crop_size, 8, method='val')
33
+ elif args.dataset.lower() == 'nwpu':
34
+ dataset = crowd.Crowd_nwpu(os.path.join(data_path, 'val'), crop_size, 8, method='val')
35
+ elif args.dataset.lower() == 'sha' or args.dataset.lower() == 'shb':
36
+ dataset = crowd.Crowd_sh(os.path.join(data_path, 'test_data'), crop_size, 8, method='val')
37
+ else:
38
+ raise NotImplementedError
39
+ dataloader = torch.utils.data.DataLoader(dataset, 1, shuffle=False,
40
+ num_workers=1, pin_memory=True)
41
+
42
+ if args.pred_density_map_path:
43
+ import cv2
44
+ if not os.path.exists(args.pred_density_map_path):
45
+ os.makedirs(args.pred_density_map_path)
46
+
47
+ model = vgg19()
48
+ model.to(device)
49
+ model.load_state_dict(torch.load(model_path, device))
50
+ model.eval()
51
+ image_errs = []
52
+ for inputs, count, name in dataloader:
53
+ inputs = inputs.to(device)
54
+ assert inputs.size(0) == 1, 'the batch size should equal to 1'
55
+ with torch.set_grad_enabled(False):
56
+ outputs, _ = model(inputs)
57
+ img_err = count[0].item() - torch.sum(outputs).item()
58
+
59
+ print(name, img_err, count[0].item(), torch.sum(outputs).item())
60
+ image_errs.append(img_err)
61
+
62
+ if args.pred_density_map_path:
63
+ vis_img = outputs[0, 0].cpu().numpy()
64
+ # normalize density map values from 0 to 1, then map it to 0-255.
65
+ vis_img = (vis_img - vis_img.min()) / (vis_img.max() - vis_img.min() + 1e-5)
66
+ vis_img = (vis_img * 255).astype(np.uint8)
67
+ vis_img = cv2.applyColorMap(vis_img, cv2.COLORMAP_JET)
68
+ cv2.imwrite(os.path.join(args.pred_density_map_path, str(name[0]) + '.png'), vis_img)
69
+
70
+ image_errs = np.array(image_errs)
71
+ mse = np.sqrt(np.mean(np.square(image_errs)))
72
+ mae = np.mean(np.abs(image_errs))
73
+ print('{}: mae {}, mse {}\n'.format(model_path, mae, mse))
train.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import torch
4
+ from train_helper import Trainer
5
+
6
+
7
+ def str2bool(v):
8
+ return v.lower() in ("yes", "true", "t", "1")
9
+
10
+
11
+ def parse_args():
12
+ parser = argparse.ArgumentParser(description='Train')
13
+ parser.add_argument('--data-dir', default='data/UCF-Train-Val-Test', help='data path')
14
+ parser.add_argument('--dataset', default='qnrf', help='dataset name: qnrf, nwpu, sha, shb')
15
+ parser.add_argument('--lr', type=float, default=1e-5,
16
+ help='the initial learning rate')
17
+ parser.add_argument('--weight-decay', type=float, default=1e-4,
18
+ help='the weight decay')
19
+ parser.add_argument('--resume', default='', type=str,
20
+ help='the path of resume training model')
21
+ parser.add_argument('--max-epoch', type=int, default=1000,
22
+ help='max training epoch')
23
+ parser.add_argument('--val-epoch', type=int, default=5,
24
+ help='the num of steps to log training information')
25
+ parser.add_argument('--val-start', type=int, default=50,
26
+ help='the epoch start to val')
27
+ parser.add_argument('--batch-size', type=int, default=10,
28
+ help='train batch size')
29
+ parser.add_argument('--device', default='0', help='assign device')
30
+ parser.add_argument('--num-workers', type=int, default=3,
31
+ help='the num of training process')
32
+ parser.add_argument('--crop-size', type=int, default=512,
33
+ help='the crop size of the train image')
34
+ parser.add_argument('--wot', type=float, default=0.1, help='weight on OT loss')
35
+ parser.add_argument('--wtv', type=float, default=0.01, help='weight on TV loss')
36
+ parser.add_argument('--reg', type=float, default=10.0,
37
+ help='entropy regularization in sinkhorn')
38
+ parser.add_argument('--num-of-iter-in-ot', type=int, default=100,
39
+ help='sinkhorn iterations')
40
+ parser.add_argument('--norm-cood', type=int, default=0, help='whether to norm cood when computing distance')
41
+
42
+ args = parser.parse_args()
43
+
44
+ if args.dataset.lower() == 'qnrf':
45
+ args.crop_size = 512
46
+ elif args.dataset.lower() == 'nwpu':
47
+ args.crop_size = 384
48
+ args.val_epoch = 50
49
+ elif args.dataset.lower() == 'sha':
50
+ args.crop_size = 256
51
+ elif args.dataset.lower() == 'shb':
52
+ args.crop_size = 512
53
+ else:
54
+ raise NotImplementedError
55
+ return args
56
+
57
+
58
+ if __name__ == '__main__':
59
+ args = parse_args()
60
+ torch.backends.cudnn.benchmark = True
61
+ os.environ['CUDA_VISIBLE_DEVICES'] = args.device.strip() # set vis gpu
62
+ trainer = Trainer(args)
63
+ trainer.setup()
64
+ trainer.train()
train_helper.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import torch
4
+ import torch.nn as nn
5
+ from torch import optim
6
+ from torch.utils.data import DataLoader
7
+ from torch.utils.data.dataloader import default_collate
8
+ import numpy as np
9
+ from datetime import datetime
10
+
11
+ from datasets.crowd import Crowd_qnrf, Crowd_nwpu, Crowd_sh
12
+ from models import vgg19
13
+ from losses.ot_loss import OT_Loss
14
+ from utils.pytorch_utils import Save_Handle, AverageMeter
15
+ import utils.log_utils as log_utils
16
+
17
+
18
+ def train_collate(batch):
19
+ transposed_batch = list(zip(*batch))
20
+ images = torch.stack(transposed_batch[0], 0)
21
+ points = transposed_batch[1] # the number of points is not fixed, keep it as a list of tensor
22
+ gt_discretes = torch.stack(transposed_batch[2], 0)
23
+ return images, points, gt_discretes
24
+
25
+
26
+ class Trainer(object):
27
+ def __init__(self, args):
28
+ self.args = args
29
+
30
+ def setup(self):
31
+ args = self.args
32
+ sub_dir = 'input-{}_wot-{}_wtv-{}_reg-{}_nIter-{}_normCood-{}'.format(
33
+ args.crop_size, args.wot, args.wtv, args.reg, args.num_of_iter_in_ot, args.norm_cood)
34
+
35
+ self.save_dir = os.path.join('ckpts', sub_dir)
36
+ if not os.path.exists(self.save_dir):
37
+ os.makedirs(self.save_dir)
38
+
39
+ time_str = datetime.strftime(datetime.now(), '%m%d-%H%M%S')
40
+ self.logger = log_utils.get_logger(os.path.join(self.save_dir, 'train-{:s}.log'.format(time_str)))
41
+ log_utils.print_config(vars(args), self.logger)
42
+
43
+ if torch.cuda.is_available():
44
+ self.device = torch.device("cuda")
45
+ self.device_count = torch.cuda.device_count()
46
+ assert self.device_count == 1
47
+ self.logger.info('using {} gpus'.format(self.device_count))
48
+ else:
49
+ raise Exception("gpu is not available")
50
+
51
+ downsample_ratio = 8
52
+ if args.dataset.lower() == 'qnrf':
53
+ self.datasets = {x: Crowd_qnrf(os.path.join(args.data_dir, x),
54
+ args.crop_size, downsample_ratio, x) for x in ['train', 'val']}
55
+ elif args.dataset.lower() == 'nwpu':
56
+ self.datasets = {x: Crowd_nwpu(os.path.join(args.data_dir, x),
57
+ args.crop_size, downsample_ratio, x) for x in ['train', 'val']}
58
+ elif args.dataset.lower() == 'sha' or args.dataset.lower() == 'shb':
59
+ self.datasets = {'train': Crowd_sh(os.path.join(args.data_dir, 'train_data'),
60
+ args.crop_size, downsample_ratio, 'train'),
61
+ 'val': Crowd_sh(os.path.join(args.data_dir, 'test_data'),
62
+ args.crop_size, downsample_ratio, 'val'),
63
+ }
64
+ else:
65
+ raise NotImplementedError
66
+
67
+ self.dataloaders = {x: DataLoader(self.datasets[x],
68
+ collate_fn=(train_collate
69
+ if x == 'train' else default_collate),
70
+ batch_size=(args.batch_size
71
+ if x == 'train' else 1),
72
+ shuffle=(True if x == 'train' else False),
73
+ num_workers=args.num_workers * self.device_count,
74
+ pin_memory=(True if x == 'train' else False))
75
+ for x in ['train', 'val']}
76
+ self.model = vgg19()
77
+ self.model.to(self.device)
78
+ self.optimizer = optim.Adam(self.model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
79
+
80
+ self.start_epoch = 0
81
+ if args.resume:
82
+ self.logger.info('loading pretrained model from ' + args.resume)
83
+ suf = args.resume.rsplit('.', 1)[-1]
84
+ if suf == 'tar':
85
+ checkpoint = torch.load(args.resume, self.device)
86
+ self.model.load_state_dict(checkpoint['model_state_dict'])
87
+ self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
88
+ self.start_epoch = checkpoint['epoch'] + 1
89
+ elif suf == 'pth':
90
+ self.model.load_state_dict(torch.load(args.resume, self.device))
91
+ else:
92
+ self.logger.info('random initialization')
93
+
94
+ self.ot_loss = OT_Loss(args.crop_size, downsample_ratio, args.norm_cood, self.device, args.num_of_iter_in_ot,
95
+ args.reg)
96
+ self.tv_loss = nn.L1Loss(reduction='none').to(self.device)
97
+ self.mse = nn.MSELoss().to(self.device)
98
+ self.mae = nn.L1Loss().to(self.device)
99
+ self.save_list = Save_Handle(max_num=1)
100
+ self.best_mae = np.inf
101
+ self.best_mse = np.inf
102
+ self.best_count = 0
103
+
104
+ def train(self):
105
+ """training process"""
106
+ args = self.args
107
+ for epoch in range(self.start_epoch, args.max_epoch + 1):
108
+ self.logger.info('-' * 5 + 'Epoch {}/{}'.format(epoch, args.max_epoch) + '-' * 5)
109
+ self.epoch = epoch
110
+ self.train_eopch()
111
+ if epoch % args.val_epoch == 0 and epoch >= args.val_start:
112
+ self.val_epoch()
113
+
114
+ def train_eopch(self):
115
+ epoch_ot_loss = AverageMeter()
116
+ epoch_ot_obj_value = AverageMeter()
117
+ epoch_wd = AverageMeter()
118
+ epoch_count_loss = AverageMeter()
119
+ epoch_tv_loss = AverageMeter()
120
+ epoch_loss = AverageMeter()
121
+ epoch_mae = AverageMeter()
122
+ epoch_mse = AverageMeter()
123
+ epoch_start = time.time()
124
+ self.model.train() # Set model to training mode
125
+
126
+ for step, (inputs, points, gt_discrete) in enumerate(self.dataloaders['train']):
127
+ inputs = inputs.to(self.device)
128
+ gd_count = np.array([len(p) for p in points], dtype=np.float32)
129
+ points = [p.to(self.device) for p in points]
130
+ gt_discrete = gt_discrete.to(self.device)
131
+ N = inputs.size(0)
132
+
133
+ with torch.set_grad_enabled(True):
134
+ outputs, outputs_normed = self.model(inputs)
135
+ # Compute OT loss.
136
+ ot_loss, wd, ot_obj_value = self.ot_loss(outputs_normed, outputs, points)
137
+ ot_loss = ot_loss * self.args.wot
138
+ ot_obj_value = ot_obj_value * self.args.wot
139
+ epoch_ot_loss.update(ot_loss.item(), N)
140
+ epoch_ot_obj_value.update(ot_obj_value.item(), N)
141
+ epoch_wd.update(wd, N)
142
+
143
+ # Compute counting loss.
144
+ count_loss = self.mae(outputs.sum(1).sum(1).sum(1),
145
+ torch.from_numpy(gd_count).float().to(self.device))
146
+ epoch_count_loss.update(count_loss.item(), N)
147
+
148
+ # Compute TV loss.
149
+ gd_count_tensor = torch.from_numpy(gd_count).float().to(self.device).unsqueeze(1).unsqueeze(
150
+ 2).unsqueeze(3)
151
+ gt_discrete_normed = gt_discrete / (gd_count_tensor + 1e-6)
152
+ tv_loss = (self.tv_loss(outputs_normed, gt_discrete_normed).sum(1).sum(1).sum(
153
+ 1) * torch.from_numpy(gd_count).float().to(self.device)).mean(0) * self.args.wtv
154
+ epoch_tv_loss.update(tv_loss.item(), N)
155
+
156
+ loss = ot_loss + count_loss + tv_loss
157
+
158
+ self.optimizer.zero_grad()
159
+ loss.backward()
160
+ self.optimizer.step()
161
+
162
+ pred_count = torch.sum(outputs.view(N, -1), dim=1).detach().cpu().numpy()
163
+ pred_err = pred_count - gd_count
164
+ epoch_loss.update(loss.item(), N)
165
+ epoch_mse.update(np.mean(pred_err * pred_err), N)
166
+ epoch_mae.update(np.mean(abs(pred_err)), N)
167
+
168
+ self.logger.info(
169
+ 'Epoch {} Train, Loss: {:.2f}, OT Loss: {:.2e}, Wass Distance: {:.2f}, OT obj value: {:.2f}, '
170
+ 'Count Loss: {:.2f}, TV Loss: {:.2f}, MSE: {:.2f} MAE: {:.2f}, Cost {:.1f} sec'
171
+ .format(self.epoch, epoch_loss.get_avg(), epoch_ot_loss.get_avg(), epoch_wd.get_avg(),
172
+ epoch_ot_obj_value.get_avg(), epoch_count_loss.get_avg(), epoch_tv_loss.get_avg(),
173
+ np.sqrt(epoch_mse.get_avg()), epoch_mae.get_avg(),
174
+ time.time() - epoch_start))
175
+ model_state_dic = self.model.state_dict()
176
+ save_path = os.path.join(self.save_dir, '{}_ckpt.tar'.format(self.epoch))
177
+ torch.save({
178
+ 'epoch': self.epoch,
179
+ 'optimizer_state_dict': self.optimizer.state_dict(),
180
+ 'model_state_dict': model_state_dic
181
+ }, save_path)
182
+ self.save_list.append(save_path)
183
+
184
+ def val_epoch(self):
185
+ args = self.args
186
+ epoch_start = time.time()
187
+ self.model.eval() # Set model to evaluate mode
188
+ epoch_res = []
189
+ for inputs, count, name in self.dataloaders['val']:
190
+ inputs = inputs.to(self.device)
191
+ assert inputs.size(0) == 1, 'the batch size should equal to 1 in validation mode'
192
+ with torch.set_grad_enabled(False):
193
+ outputs, _ = self.model(inputs)
194
+ res = count[0].item() - torch.sum(outputs).item()
195
+ epoch_res.append(res)
196
+
197
+ epoch_res = np.array(epoch_res)
198
+ mse = np.sqrt(np.mean(np.square(epoch_res)))
199
+ mae = np.mean(np.abs(epoch_res))
200
+ self.logger.info('Epoch {} Val, MSE: {:.2f} MAE: {:.2f}, Cost {:.1f} sec'
201
+ .format(self.epoch, mse, mae, time.time() - epoch_start))
202
+
203
+ model_state_dic = self.model.state_dict()
204
+ if (2.0 * mse + mae) < (2.0 * self.best_mse + self.best_mae):
205
+ self.best_mse = mse
206
+ self.best_mae = mae
207
+ self.logger.info("save best mse {:.2f} mae {:.2f} model epoch {}".format(self.best_mse,
208
+ self.best_mae,
209
+ self.epoch))
210
+ torch.save(model_state_dic, os.path.join(self.save_dir, 'best_model_{}.pth'.format(self.best_count)))
211
+ self.best_count += 1
utils/__init__.py ADDED
File without changes
utils/log_utils.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+
3
+
4
+ def get_logger(log_file):
5
+ logger = logging.getLogger(log_file)
6
+ logger.setLevel(logging.DEBUG)
7
+ fh = logging.FileHandler(log_file)
8
+ fh.setLevel(logging.DEBUG)
9
+ ch = logging.StreamHandler()
10
+ ch.setLevel(logging.INFO)
11
+ formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
12
+ ch.setFormatter(formatter)
13
+ fh.setFormatter(formatter)
14
+ logger.addHandler(ch)
15
+ logger.addHandler(fh)
16
+ return logger
17
+
18
+
19
+ def print_config(config, logger):
20
+ """
21
+ Print configuration of the model
22
+ """
23
+ for k, v in config.items():
24
+ logger.info("{}:\t{}".format(k.ljust(15), v))
utils/pytorch_utils.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ def adjust_learning_rate(optimizer, epoch, initial_lr=0.001, decay_epoch=10):
4
+ """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
5
+ lr = max(initial_lr * (0.1 ** (epoch // decay_epoch)), 1e-6)
6
+ for param_group in optimizer.param_groups:
7
+ param_group['lr'] = lr
8
+
9
+
10
+ class Save_Handle(object):
11
+ """handle the number of """
12
+ def __init__(self, max_num):
13
+ self.save_list = []
14
+ self.max_num = max_num
15
+
16
+ def append(self, save_path):
17
+ if len(self.save_list) < self.max_num:
18
+ self.save_list.append(save_path)
19
+ else:
20
+ remove_path = self.save_list[0]
21
+ del self.save_list[0]
22
+ self.save_list.append(save_path)
23
+ if os.path.exists(remove_path):
24
+ os.remove(remove_path)
25
+
26
+
27
+ class AverageMeter(object):
28
+ """Computes and stores the average and current value"""
29
+ def __init__(self):
30
+ self.reset()
31
+
32
+ def reset(self):
33
+ self.val = 0
34
+ self.avg = 0
35
+ self.sum = 0
36
+ self.count = 0
37
+
38
+ def update(self, val, n=1):
39
+ self.val = val
40
+ self.sum += val * n
41
+ self.count += n
42
+ self.avg = 1.0 * self.sum / self.count
43
+
44
+ def get_avg(self):
45
+ return self.avg
46
+
47
+ def get_count(self):
48
+ return self.count
49
+
50
+
51
+ def set_trainable(model, requires_grad):
52
+ for param in model.parameters():
53
+ param.requires_grad = requires_grad
54
+
55
+
56
+
57
+ def get_num_params(model):
58
+ return sum(p.numel() for p in model.parameters() if p.requires_grad)