orhir commited on
Commit
c9e8b79
·
verified ·
1 Parent(s): 0e6ae18

Upload 85 files

Browse files
EdgeCape/models/detectors/EdgeCape.py CHANGED
@@ -99,12 +99,12 @@ class EdgeCape(BasePose):
99
  """Defines the computation performed at every call when training."""
100
  bs, _, h, w = img_q.shape
101
  random_mask = kwargs.get('rand_mask', None)
102
- output, initial_proposals, similarity_map, mask_s, reconstructed_keypoints = self.predict(img_s,
103
- target_s,
104
- target_weight_s,
105
- img_q,
106
- img_metas,
107
- random_mask)
108
 
109
  # parse the img meta to get the target keypoints
110
  device = output.device
@@ -149,12 +149,12 @@ class EdgeCape(BasePose):
149
 
150
  """Defines the computation performed at every call when testing."""
151
  batch_size, _, img_height, img_width = img_q.shape
152
- output, initial_proposals, similarity_map, mask_s, reconstructed_keypoints = self.predict(img_s,
153
- target_s,
154
- target_weight_s,
155
- img_q,
156
- img_metas
157
- )
158
  predicted_pose = output[-1].detach().cpu().numpy()
159
  result = {}
160
 
@@ -166,6 +166,7 @@ class EdgeCape(BasePose):
166
  result.update({"points": torch.cat((initial_proposals[None], output)).cpu().numpy()})
167
 
168
  result.update({"sample_image_file": [img_metas[i]['sample_image_file'] for i in range(len(img_metas))]})
 
169
 
170
  return result
171
 
@@ -185,10 +186,10 @@ class EdgeCape(BasePose):
185
  feature_q, feature_s = self.extract_features(img_s, img_q)
186
  skeleton_lst = [i['sample_skeleton'][0] for i in img_metas]
187
 
188
- (output, initial_proposals, similarity_map, reconstructed_keypoints) = self.keypoint_head_module(
189
  feature_q, feature_s, target_s, mask_s, skeleton_lst, random_mask=random_mask)
190
 
191
- return output, initial_proposals, similarity_map, mask_s, reconstructed_keypoints
192
 
193
  def extract_features(self, img_s, img_q):
194
  with torch.no_grad():
 
99
  """Defines the computation performed at every call when training."""
100
  bs, _, h, w = img_q.shape
101
  random_mask = kwargs.get('rand_mask', None)
102
+ output, initial_proposals, similarity_map, mask_s, reconstructed_keypoints, adj = self.predict(img_s,
103
+ target_s,
104
+ target_weight_s,
105
+ img_q,
106
+ img_metas,
107
+ random_mask)
108
 
109
  # parse the img meta to get the target keypoints
110
  device = output.device
 
149
 
150
  """Defines the computation performed at every call when testing."""
151
  batch_size, _, img_height, img_width = img_q.shape
152
+ output, initial_proposals, similarity_map, mask_s, reconstructed_keypoints, adj = self.predict(img_s,
153
+ target_s,
154
+ target_weight_s,
155
+ img_q,
156
+ img_metas
157
+ )
158
  predicted_pose = output[-1].detach().cpu().numpy()
159
  result = {}
160
 
 
166
  result.update({"points": torch.cat((initial_proposals[None], output)).cpu().numpy()})
167
 
168
  result.update({"sample_image_file": [img_metas[i]['sample_image_file'] for i in range(len(img_metas))]})
169
+ result.update({"skeleton": adj[0].cpu().numpy()})
170
 
171
  return result
172
 
 
186
  feature_q, feature_s = self.extract_features(img_s, img_q)
187
  skeleton_lst = [i['sample_skeleton'][0] for i in img_metas]
188
 
189
+ output, initial_proposals, similarity_map, reconstructed_keypoints, adj = self.keypoint_head_module(
190
  feature_q, feature_s, target_s, mask_s, skeleton_lst, random_mask=random_mask)
191
 
192
+ return output, initial_proposals, similarity_map, mask_s, reconstructed_keypoints, adj
193
 
194
  def extract_features(self, img_s, img_q):
195
  with torch.no_grad():
EdgeCape/models/detectors/__pycache__/EdgeCape.cpython-39.pyc CHANGED
Binary files a/EdgeCape/models/detectors/__pycache__/EdgeCape.cpython-39.pyc and b/EdgeCape/models/detectors/__pycache__/EdgeCape.cpython-39.pyc differ
 
EdgeCape/models/keypoint_heads/__pycache__/head.cpython-39.pyc CHANGED
Binary files a/EdgeCape/models/keypoint_heads/__pycache__/head.cpython-39.pyc and b/EdgeCape/models/keypoint_heads/__pycache__/head.cpython-39.pyc differ
 
EdgeCape/models/keypoint_heads/head.py CHANGED
@@ -219,7 +219,7 @@ class TwoStageHead(nn.Module):
219
  layer_outputs_unsig = layer_delta_unsig + inverse_sigmoid(out_points[idx])
220
  output_kpts.append(layer_outputs_unsig.sigmoid())
221
 
222
- return torch.stack(output_kpts, dim=0), initial_proposals, similarity_map, reconstructed_keypoints
223
 
224
  def get_loss(self, output, initial_proposals, similarity_map, target,
225
  target_heatmap, target_weight, target_sizes, reconstructed_keypoints):
 
219
  layer_outputs_unsig = layer_delta_unsig + inverse_sigmoid(out_points[idx])
220
  output_kpts.append(layer_outputs_unsig.sigmoid())
221
 
222
+ return torch.stack(output_kpts, dim=0), initial_proposals, similarity_map, reconstructed_keypoints, adj
223
 
224
  def get_loss(self, output, initial_proposals, similarity_map, target,
225
  target_heatmap, target_weight, target_sizes, reconstructed_keypoints):
examples/dog1.png ADDED
examples/dog2.png ADDED
examples/person1.jpeg ADDED
examples/person2.jpeg ADDED
examples/sofa1.jpg ADDED
examples/sofa2.jpg ADDED