Onearth commited on
Commit
96a90d7
·
verified ·
1 Parent(s): 742ef47

Upload 2 files

Browse files
Files changed (2) hide show
  1. ECM_tracking.py +246 -0
  2. Player_ECM.py +851 -0
ECM_tracking.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import math
3
+ import sys
4
+ import csv
5
+ import datetime
6
+ import crtk
7
+ import dvrk
8
+ import numpy
9
+ import argparse
10
+ import surrol
11
+ from surrol.robots.ecm import Ecm
12
+ import pybullet as p
13
+ import numpy as np
14
+ from surrol.utils.pybullet_utils import *
15
+ from surrol.tasks.ecm_env import EcmEnv, goal_distance
16
+ import torch
17
+ import torch.nn as nn
18
+ import numpy as np
19
+ import os
20
+ import cv2
21
+ import dvrk
22
+ import PyKDL
23
+ from PIL import Image
24
+ import matplotlib.pyplot as plt
25
+ import yaml
26
+ import math
27
+ from scipy.spatial.transform import Rotation as R
28
+
29
+
30
+
31
+
32
+
33
+ # def cVc_to_dq(robot, sim_robot, cVc: np.ndarray) -> np.ndarray:
34
+ # """
35
+ # convert the camera velocity in its own frame (cVc) into the joint velocity q_dot
36
+ # """
37
+ # cVc = cVc.reshape((3, 1))
38
+
39
+ # # restrict the step size, need tune
40
+ # if np.abs(cVc).max() > 0.01:
41
+ # cVc = cVc / np.abs(cVc).max() * 0.01
42
+
43
+ # # Forward kinematics
44
+ # q = robot.measured_jp()
45
+ # bRe = sim_robot.robot.fkine(q).R # use rtb instead of PyBullet, no tool_tip_offset
46
+ # orn_cam = robot.measured_cp().M
47
+ # wRc = np.array(p.getMatrixFromQuaternion(orn_cam)).reshape((3, 3))
48
+
49
+ # # Rotation
50
+ # R1, R2 = self._wRc0, wRc
51
+ # x = R1[0, 0] * R2[1, 0] - R1[1, 0] * R2[0, 0] + R1[0, 1] * R2[1, 1] - R1[1, 1] * R2[0, 1]
52
+ # y = R1[0, 0] * R2[1, 1] - R1[1, 0] * R2[0, 1] - R1[0, 1] * R2[1, 0] + R1[1, 1] * R2[0, 0]
53
+ # dz = np.arctan(x / y)
54
+ # k1, k2 = 25.0, 0.1
55
+ # self._wz = k1 * dz * np.exp(-k2 * np.linalg.norm(self._homo_delta))
56
+ # # print(' -> x: {:.4f}, y: {:.4f}, dz: {:.4f}, wz: {:.4f}'.format(x, y, dz, self._wz))
57
+
58
+ # # Pseudo Solution
59
+ # d = self._tip_offset
60
+ # Jd = np.matmul(self._eRc,
61
+ # np.array([[0, 0, d, 0],
62
+ # [0, -d, 0, 0],
63
+ # [1, 0, 0, 0]]))
64
+ # Je = np.matmul(self._eRc,
65
+ # np.array([[0, 1, 0, 0],
66
+ # [0, 0, 1, 0],
67
+ # [0, 0, 0, 1]]))
68
+
69
+ # eVe4 = np.dot(np.linalg.pinv(Jd), cVc) \
70
+ # + np.dot(np.dot((np.eye(4) - np.dot(np.linalg.pinv(Jd), Jd)), np.linalg.pinv(Je)),
71
+ # np.array([[0], [0], [self._wz]]))
72
+ # eVe = np.zeros((6, 1))
73
+ # eVe[2: 6] = eVe4[0: 4]
74
+ # Q = np.zeros((6, 6))
75
+ # Q[0: 3, 0: 3] = - bRe
76
+ # Q[3: 6, 3: 6] = - bRe
77
+ # bVe = np.dot(Q, eVe)
78
+
79
+ # # Compute the Jacobian matrix
80
+ # bJe = self.get_jacobian_spatial()
81
+ # dq = np.dot(np.linalg.pinv(bJe), bVe)
82
+ # # print(" -> cVc: {}, q: {}, dq: {}".format(list(np.round(cVc.flatten(), 4)), q, list(dq.flatten())))
83
+ # return dq.flatten()
84
+
85
+ def reset_camera(yaw=50.0, pitch=-35.0, dist=5.0, target=(0.0, 0.0, 0.0)):
86
+ p.resetDebugVisualizerCamera(
87
+ cameraDistance=dist, cameraYaw=yaw, cameraPitch=pitch, cameraTargetPosition=target)
88
+
89
+
90
+ def get_camera():
91
+ return CameraInfo(*p.getDebugVisualizerCamera())
92
+
93
+
94
+ def render_image(width, height, view_matrix, proj_matrix, shadow=1):
95
+ (_, _, px, _, mask) = p.getCameraImage(width=width,
96
+ height=height,
97
+ viewMatrix=view_matrix,
98
+ projectionMatrix=proj_matrix,
99
+ shadow=shadow,
100
+ lightDirection=(10, 0, 10),
101
+ renderer=p.ER_BULLET_HARDWARE_OPENGL)
102
+
103
+ rgb_array = np.array(px, dtype=np.uint8)
104
+ rgb_array = np.reshape(rgb_array, (height, width, 4))
105
+
106
+ rgb_array = rgb_array[:, :, :3]
107
+ return rgb_array, mask
108
+
109
+
110
+
111
+ class Sim_ECM(EcmEnv):
112
+ ACTION_SIZE = 3 # (dx, dy, dz) or cVc or droll (1)
113
+ ACTION_MODE = 'cVc'
114
+ DISTANCE_THRESHOLD = 0.005
115
+ POSE_ECM = ((0.15, 0.0, 0.7524), (0, 20 / 180 * np.pi, 0))
116
+ QPOS_ECM = (0, 0.6, 0.04, 0)
117
+ WORKSPACE_LIMITS = ((0.45, 0.55), (-0.05, 0.05), (0.60, 0.70))
118
+ SCALING = 1.
119
+ p = p.connect(p.GUI)
120
+ def __init__(self, render_mode: str = None, cid = -1):
121
+ # workspace
122
+ self.workspace_limits = np.asarray(self.WORKSPACE_LIMITS)
123
+ self.workspace_limits *= self.SCALING
124
+
125
+ # camera
126
+ self.use_camera = False
127
+
128
+ # has_object
129
+ self.has_object = False
130
+ self.obj_id = None
131
+
132
+ # super(Sim_ECM, self).__init__(render_mode, cid)
133
+
134
+ # change duration
135
+ self._duration = 0.1
136
+
137
+ # distance_threshold
138
+ self.distance_threshold = self.DISTANCE_THRESHOLD * self.SCALING
139
+
140
+ # render related setting
141
+ self._view_matrix = p.computeViewMatrixFromYawPitchRoll(
142
+ cameraTargetPosition=(0.27 * self.SCALING, -0.20 * self.SCALING, 0.55 * self.SCALING),
143
+ distance=1.80 * self.SCALING,
144
+ yaw=150,
145
+ pitch=-30,
146
+ roll=0,
147
+ upAxisIndex=2
148
+ )
149
+
150
+ def reset_env(self):
151
+ assert self.ACTION_MODE in ('cVc', 'dmove', 'droll')
152
+ # camera
153
+
154
+ reset_camera(yaw=150.0, pitch=-30.0, dist=1.50 * self.SCALING,
155
+ target=(0.27 * self.SCALING, -0.20 * self.SCALING, 0.55 * self.SCALING))
156
+
157
+ # robot
158
+ self.ecm = Ecm(self.POSE_ECM[0], p.getQuaternionFromEuler(self.POSE_ECM[1]),
159
+ scaling=self.SCALING)
160
+
161
+ def run(name):
162
+ # create dVRK robot
163
+ robot = dvrk.ecm('ECM')
164
+ # # file to save data
165
+ # now = datetime.datetime.now()
166
+ # now_string = now.strftime("%Y-%m-%d-%H-%M")
167
+ # csv_file_name = name + '-gc-' + now_string + '.csv'
168
+ # print("Values will be saved in: ", csv_file_name)
169
+
170
+
171
+ # # compute joint limits
172
+ d2r = math.pi / 180.0 # degrees to radians
173
+ lower_limits = [-80.0 * d2r, -40.0 * d2r, 0.005]
174
+ upper_limits = [ 80.0 * d2r, 60.0 * d2r, 0.230]
175
+ sim_ecm = Sim_ECM('human')
176
+ sim_ecm.reset_env()
177
+
178
+ current_dvrk_jp = robot.measured_jp()
179
+ print('current dvrk jp: ',current_dvrk_jp)
180
+ current_dvrk_pose = robot.measured_cp()
181
+ state = current_dvrk_pose.M
182
+ position = current_dvrk_pose.p
183
+ ECM_rotate = np.array([[state[0,0], state[0,1], state[0,2]],
184
+ [state[1,0], state[1,1], state[1,2]],
185
+ [state[2,0], state[2,1], state[2,2]]])
186
+ ECM_position = np.array([position[0], position[1], position[2]])
187
+ print(ECM_position)
188
+ ECM_quat = R.from_matrix(ECM_rotate).as_quat()
189
+ print(ECM_quat)
190
+ # transform_M[:3, :3] = orn
191
+ # transform_M[:3, 3] = np.array(pos)
192
+ sim_ecm.ecm.reset_joint(np.array(current_dvrk_jp))
193
+
194
+ print('current sim ecm jp: ', sim_ecm.ecm.get_current_joint_position())
195
+ print('current sim ecm position: ', sim_ecm._get_robot_state())
196
+ print('converted')
197
+ while True:
198
+ p.stepSimulation()
199
+ # # set sampling for data
200
+ # # increments = [40.0 * d2r, 40.0 * d2r, 0.10] # less samples
201
+ # increments = [20.0 * d2r, 20.0 * d2r, 0.05] # more samples
202
+ # directions = [1.0, 1.0, 1.0]
203
+
204
+ # # start position
205
+ # positions = [lower_limits[0],
206
+ # lower_limits[1],
207
+ # lower_limits[2]]
208
+
209
+ # all_reached = False
210
+
211
+ # robot.home()
212
+
213
+ # while not all_reached:
214
+ # next_dimension_increment = True
215
+ # for index in range(3):
216
+ # if next_dimension_increment:
217
+ # future = positions[index] + directions[index] * increments[index]
218
+ # if (future > upper_limits[index]):
219
+ # directions[index] = -1.0
220
+ # if index == 2:
221
+ # all_reached = True
222
+ # elif (future < lower_limits[index]):
223
+ # directions[index] = 1.0
224
+ # else:
225
+ # positions[index] = future
226
+ # next_dimension_increment = False
227
+
228
+ # robot.move_jp(numpy.array([positions[0],
229
+ # positions[1],
230
+ # positions[2],
231
+ # 0.0])).wait()
232
+ # time.sleep(1.0)
233
+
234
+
235
+ if __name__ == '__main__':
236
+ # extract ros arguments (e.g. __ns:= for namespace)
237
+
238
+ # parse arguments
239
+ parser = argparse.ArgumentParser()
240
+ parser.add_argument('-a', '--arm', type=str, required=True,
241
+ choices=['ECM', 'PSM1', 'PSM2', 'PSM3'],
242
+ help = 'arm name corresponding to ROS topics without namespace. Use __ns:= to specify the namespace')
243
+ parser.add_argument('-i', '--interval', type=float, default=0.01,
244
+ help = 'expected interval in seconds between messages sent by the device')
245
+
246
+ run('ECM')
Player_ECM.py ADDED
@@ -0,0 +1,851 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import numpy as np
4
+ import os
5
+ import cv2
6
+ import dvrk
7
+ import PyKDL
8
+ from PIL import Image
9
+ import matplotlib.pyplot as plt
10
+ import yaml
11
+ import math
12
+ from scipy.spatial.transform import Rotation as R
13
+ from easydict import EasyDict as edict
14
+ import sys
15
+ sys.path.append('IGEV/core')
16
+ sys.path.append('IGEV')
17
+ from igev_stereo import IGEVStereo
18
+ from IGEV.core.utils.utils import InputPadder
19
+ from rl.agents.ddpg import DDPG
20
+ import rl.components as components
21
+
22
+ import argparse
23
+ from FastSAM.fastsam import FastSAM, FastSAMPrompt
24
+ import ast
25
+ from PIL import Image
26
+ from FastSAM.utils.tools import convert_box_xywh_to_xyxy
27
+
28
+ import torch.nn.functional as F
29
+ import queue, threading
30
+
31
+ from vmodel import vismodel
32
+ from config import opts
33
+
34
+ from rectify import my_rectify
35
+
36
+ from surrol.robots.ecm import Ecm
37
+ import pybullet as p
38
+ import numpy as np
39
+ from surrol.utils.pybullet_utils import *
40
+
41
+ class Sim_ECM():
42
+ ACTION_SIZE = 3 # (dx, dy, dz) or cVc or droll (1)
43
+ ACTION_MODE = 'cVc'
44
+ DISTANCE_THRESHOLD = 0.005
45
+ POSE_ECM = ((0.15, 0.0, 0.7524), (0, 20 / 180 * np.pi, 0))
46
+ QPOS_ECM = (0, 0.6, 0.04, 0)
47
+ WORKSPACE_LIMITS = ((0.45, 0.55), (-0.05, 0.05), (0.60, 0.70))
48
+ SCALING = 1.
49
+ p = p.connect(p.GUI)
50
+ def __init__(self, render_mode: str = None, cid = -1):
51
+ # workspace
52
+ self.workspace_limits = np.asarray(self.WORKSPACE_LIMITS)
53
+ self.workspace_limits *= self.SCALING
54
+
55
+ # camera
56
+ self.use_camera = False
57
+
58
+ # has_object
59
+ self.has_object = False
60
+ self.obj_id = None
61
+
62
+ # super(Sim_ECM, self).__init__(render_mode, cid)
63
+
64
+ # change duration
65
+ self._duration = 0.1
66
+
67
+ # distance_threshold
68
+ self.distance_threshold = self.DISTANCE_THRESHOLD * self.SCALING
69
+
70
+ # render related setting
71
+ self._view_matrix = p.computeViewMatrixFromYawPitchRoll(
72
+ cameraTargetPosition=(0.27 * self.SCALING, -0.20 * self.SCALING, 0.55 * self.SCALING),
73
+ distance=1.80 * self.SCALING,
74
+ yaw=150,
75
+ pitch=-30,
76
+ roll=0,
77
+ upAxisIndex=2
78
+ )
79
+
80
+ def reset_env(self):
81
+ assert self.ACTION_MODE in ('cVc', 'dmove', 'droll')
82
+ # camera
83
+
84
+ reset_camera(yaw=150.0, pitch=-30.0, dist=1.50 * self.SCALING,
85
+ target=(0.27 * self.SCALING, -0.20 * self.SCALING, 0.55 * self.SCALING))
86
+
87
+ # robot
88
+ self.ecm = Ecm(self.POSE_ECM[0], p.getQuaternionFromEuler(self.POSE_ECM[1]),
89
+ scaling=self.SCALING)
90
+
91
+
92
+ def SetPoints(windowname, img):
93
+
94
+ points = []
95
+
96
+ def onMouse(event, x, y, flags, param):
97
+ if event == cv2.EVENT_LBUTTONDOWN:
98
+ cv2.circle(temp_img, (x, y), 10, (102, 217, 239), -1)
99
+ points.append([x, y])
100
+ cv2.imshow(windowname, temp_img)
101
+
102
+ temp_img = img.copy()
103
+ cv2.namedWindow(windowname)
104
+ cv2.imshow(windowname, temp_img)
105
+ cv2.setMouseCallback(windowname, onMouse)
106
+ key = cv2.waitKey(0)
107
+ if key == 13: # Enter
108
+ print('select point: ', points)
109
+ del temp_img
110
+ cv2.destroyAllWindows()
111
+ return points
112
+ elif key == 27: # ESC
113
+ print('quit!')
114
+ del temp_img
115
+ cv2.destroyAllWindows()
116
+ return
117
+ else:
118
+
119
+ print('retry')
120
+ return SetPoints(windowname, img)
121
+
122
+ def resize_with_pad(image, target_width, target_height):
123
+ # 读取原始图片
124
+ #image = cv2.imread(image_path)
125
+
126
+ # 计算缩放比例
127
+ height, width = image.shape[:2]
128
+ scale = min(target_width / width, target_height / height)
129
+
130
+ # 缩放图片
131
+ resized_image = cv2.resize(image, None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA)
132
+
133
+ # 计算pad的大小
134
+ pad_height = target_height - resized_image.shape[0]
135
+ pad_width = target_width - resized_image.shape[1]
136
+
137
+ # 加入pad
138
+ padded_image = cv2.copyMakeBorder(resized_image, 0, pad_height, 0, pad_width, cv2.BORDER_CONSTANT, value=[154,149,142 ])
139
+
140
+ return padded_image
141
+
142
+ def crop_img(img):
143
+ crop_img = img[:,100: ]
144
+ crop_img = crop_img[:,: -100]
145
+ #print(crop_img.shape)
146
+ crop_img=cv2.resize(crop_img ,(256,256))
147
+ return crop_img
148
+
149
+ # bufferless VideoCapture
150
+ class VideoCapture:
151
+
152
+ def __init__(self, name):
153
+ self.cap = cv2.VideoCapture(name)
154
+ video_name='test_record/{}.mp4'.format(name.split('/')[-1])
155
+ self.output_video = cv2.VideoWriter(video_name, cv2.VideoWriter_fourcc(*'mp4v'), 30, (800, 600))
156
+
157
+ self.q = queue.Queue()
158
+ t = threading.Thread(target=self._reader)
159
+ t.daemon = True
160
+ t.start()
161
+ #t.join()
162
+
163
+ # read frames as soon as they are available, keeping only most recent one
164
+ def _reader(self):
165
+ while True:
166
+ ret, frame = self.cap.read()
167
+ if not ret:
168
+ break
169
+ self.output_video.write(frame)
170
+ if not self.q.empty():
171
+ try:
172
+ self.q.get_nowait() # discard previous (unprocessed) frame
173
+ except queue.Empty:
174
+ pass
175
+ self.q.put(frame)
176
+
177
+ def read(self):
178
+ return self.q.get()
179
+
180
+ def release(self):
181
+
182
+ self.cap.release()
183
+ self.output_video.release()
184
+
185
+
186
+ def transf_DH_modified(alpha, a, theta, d):
187
+ trnsf = np.array([[math.cos(theta), -math.sin(theta), 0., a],
188
+ [math.sin(theta) * math.cos(alpha), math.cos(theta) * math.cos(alpha), -math.sin(alpha), -d * math.sin(alpha)],
189
+ [math.sin(theta) * math.sin(alpha), math.cos(theta) * math.sin(alpha), math.cos(alpha), d * math.cos(alpha)],
190
+ [0., 0., 0., 1.]])
191
+ return trnsf
192
+
193
+
194
+
195
+ basePSM_T_cam =np.array([[-0.89330132, 0.3482998 , -0.28407746, -0.0712333 ],
196
+ [ 0.44895017, 0.72151095, -0.52712968, 0.08994234],
197
+ [ 0.02136583, -0.59842226, -0.80089594, -0.06478026],
198
+ [ 0. , 0. , 0. , 1. ]])
199
+
200
+
201
+
202
+ cam_T_basePSM =np.array([[-0.89330132, 0.44895017, 0.02136583, -0.10262834],
203
+ [ 0.3482998 , 0.72151095, -0.59842226, -0.07884979],
204
+ [-0.28407746, -0.52712968, -0.80089594, -0.02470674],
205
+ [ 0. , 0. , 0. , 1. ]])
206
+
207
+
208
+ class VisPlayer(nn.Module):
209
+ def __init__(self):
210
+ super().__init__()
211
+ # depth estimation
212
+ self.device='cuda:0'
213
+ #self._load_depth_model()
214
+ #self._load_policy_model()
215
+ self._init_rcm()
216
+ self.img_size=(320,240)
217
+ self.scaling=1. # for peg transfer
218
+ self.calibration_data = {
219
+ 'baseline': 0.005500,
220
+ 'focal_length_left': 916.367081,
221
+ 'focal_length_right': 918.730361
222
+ }
223
+ self.threshold=0.013
224
+ #self.init_run()
225
+
226
+ def _init_rcm(self):
227
+ # TODO check matrix
228
+ self.tool_T_tip=np.array([[ 0. ,-1. , 0. , 0.],
229
+ [ 0. , 0. , 1. , 0.],
230
+ [-1. , 0. , 0. , 0.],
231
+ [ 0. , 0. , 0. , 1.]])
232
+
233
+ eul=np.array([np.deg2rad(-90), 0., 0.])
234
+ eul= self.get_matrix_from_euler(eul)
235
+ self.rcm_init_eul=np.array([-2.94573084 , 0.15808114 , 1.1354972])
236
+ #object pos [-0.123593, 0.0267398, -0.141579]
237
+ # target pos [-0.0577594, 0.0043639, -0.133283]
238
+ self.rcm_init_pos=np.array([ -0.0617016, -0.00715477, -0.0661465])
239
+
240
+ def _load_depth_model(self, checkpoint_path='pretrained_models/sceneflow.pth'):
241
+ args=edict()
242
+ args.restore_ckpt=checkpoint_path
243
+ args.save_numpy=False
244
+ args.mixed_precision=False
245
+ args.valid_iters=32
246
+ args.hidden_dims=[128]*3
247
+ args.corr_implementation="reg"
248
+ args.shared_backbone=False
249
+ args.corr_levels=2
250
+ args.corr_radius=4
251
+ args.n_downsample=2
252
+ args.slow_fast_gru=False
253
+ args.n_gru_layers=3
254
+ args.max_disp=192
255
+
256
+ self.depth_model = torch.nn.DataParallel(IGEVStereo(args), device_ids=[0])
257
+ #self.depth_model=IGEVStereo(args)
258
+ self.depth_model.load_state_dict(torch.load(args.restore_ckpt))
259
+
260
+ self.depth_model = self.depth_model.module
261
+ self.depth_model.to("cuda")
262
+ self.depth_model.eval()
263
+
264
+
265
+ def _load_policy_model(self, vmodel_file, filepath='./pretrained_models/state_dict.pt'):
266
+ with open('rl/configs/agent/ddpg.yaml',"r") as f:
267
+ agent_params=yaml.load(f.read(),Loader=yaml.FullLoader)
268
+ agent_params=edict(agent_params)
269
+ env_params = edict(
270
+ obs=19,
271
+ achieved_goal=3,
272
+ goal=3,
273
+ act=7,
274
+ max_timesteps=10,
275
+ max_action=1,
276
+ act_rand_sampler=None,
277
+ )
278
+
279
+
280
+ self.agent=DDPG(env_params=env_params,agent_cfg=agent_params)
281
+ checkpt_path=filepath
282
+ checkpt = torch.load(checkpt_path, map_location=self.device)
283
+ self.agent.load_state_dict(checkpt)
284
+ #self.agent.g_norm = checkpt['g_norm']
285
+ #self.agent.o_norm = checkpt['o_norm']
286
+ #self.agent.update_norm_test()
287
+ #print('self.agent.g_norm.mean: ',self.agent.g_norm.mean)
288
+ self.agent.g_norm.std=self.agent.g_norm_v.numpy()
289
+ self.agent.g_norm.mean=self.agent.g_norm_mean.numpy()
290
+ self.agent.o_norm.std=self.agent.o_norm_v.numpy()
291
+ self.agent.o_norm.mean=self.agent.o_norm_mean.numpy()
292
+ #print('self.agent.g_norm.mean: ',self.agent.g_norm.mean)
293
+ #exit()
294
+
295
+ '''
296
+
297
+ self.agent.depth_norm.std=self.agent.d_norm_v.numpy()
298
+ self.agent.depth_norm.mean=self.agent.d_norm_mean.numpy()
299
+ s
300
+ #print(self.agent.g_norm_v)
301
+ '''
302
+ self.agent.cuda()
303
+ self.agent.eval()
304
+
305
+ opts.device='cuda:0'
306
+ self.v_model=vismodel(opts)
307
+ ckpt=torch.load(vmodel_file, map_location=opts.device)
308
+ self.v_model.load_state_dict(ckpt['state_dict'])
309
+ self.v_model.to(opts.device)
310
+ self.v_model.eval()
311
+
312
+ def convert_disparity_to_depth(self, disparity, baseline, focal_length):
313
+ depth = baseline * focal_length/ disparity
314
+ return depth
315
+
316
+
317
+ def _get_depth(self, limg, rimg):
318
+ # input image should be RGB(Image.open().convert('RGB')); numpy.array
319
+ '''
320
+ img = np.array(Image.open(imfile)).astype(np.uint8)
321
+ img = torch.from_numpy(img).permute(2, 0, 1).float()
322
+ return img[None].to(DEVICE)
323
+ '''
324
+ limg=torch.from_numpy(limg).permute(2, 0, 1).float().to(self.device).unsqueeze(0)
325
+ rimg=torch.from_numpy(rimg).permute(2, 0, 1).float().to(self.device).unsqueeze(0)
326
+
327
+ with torch.no_grad():
328
+ #print(limg.ndim)
329
+ padder = InputPadder(limg.shape, divis_by=32)
330
+ image1, image2 = padder.pad(limg, rimg)
331
+ disp = self.depth_model(image1, image2, iters=32, test_mode=True)
332
+ disp = disp.cpu().numpy()
333
+
334
+ disp = padder.unpad(disp).squeeze()
335
+ depth_map = self.convert_disparity_to_depth(disp, self.calibration_data['baseline'], self.calibration_data['focal_length_left'])
336
+ #return disp
337
+ return depth_map
338
+
339
+ def _load_fastsam(self, model_path="./FastSAM/weights/FastSAM-x.pt"):
340
+
341
+ self.seg_model=FastSAM(model_path)
342
+
343
+
344
+ def _seg_with_fastsam(self, input, object_point):
345
+ point_prompt=str([object_point,[200,200]])
346
+ point_prompt = ast.literal_eval(point_prompt)
347
+ point_label = ast.literal_eval("[1,0]")
348
+ everything_results = self.seg_model(
349
+ input,
350
+ device=self.device,
351
+ retina_masks=True,
352
+ imgsz=608,
353
+ conf=0.25,
354
+ iou=0.7
355
+ )
356
+
357
+ prompt_process = FastSAMPrompt(input, everything_results, device=self.device)
358
+ ann = prompt_process.point_prompt(
359
+ points=point_prompt, pointlabel=point_label
360
+ )
361
+
362
+ return ann[0]
363
+
364
+ def _seg_with_red(self, grid_RGB):
365
+ # input image RGB
366
+ grid_HSV = cv2.cvtColor(grid_RGB, cv2.COLOR_RGB2HSV)
367
+
368
+ # H、S、V range1:
369
+ lower1 = np.array([0,59,25])
370
+ upper1 = np.array([20,255,255])
371
+ mask1 = cv2.inRange(grid_HSV, lower1, upper1) # mask: binary
372
+
373
+ # H、S、V range2:
374
+ #lower2 = np.array([156,43,46])
375
+ #upper2 = np.array([180,255,255])
376
+ #mask2 = cv2.inRange(grid_HSV, lower2, upper2)
377
+
378
+ mask3 = mask1 #+ mask2
379
+
380
+ return mask3
381
+
382
+ def _get_visual_state(self, seg, depth, robot_pos, robot_rot, jaw, goal):
383
+ seg_d=np.concatenate([seg.reshape(1, self.img_size[0], self.img_size[1]), \
384
+ depth.reshape(1, self.img_size[0], self.img_size[1])],axis=0)
385
+
386
+ inputs=torch.tensor(seg_d).unsqueeze(0).float().to(self.device)
387
+ robot_pos=torch.tensor(robot_pos).to(self.device)
388
+ robot_rot=torch.tensor(robot_rot).to(self.device)
389
+ jaw=torch.tensor(jaw).to(self.device)
390
+ goal=torch.tensor(goal).to(self.device)
391
+
392
+ with torch.no_grad():
393
+ #print(inputs.shape)
394
+ v_output=self.agent.v_processor(inputs).squeeze()
395
+
396
+ waypoint_pos_rot=v_output[3:]
397
+
398
+ return waypoint_pos_rot[:3].cpu().data.numpy().copy(), waypoint_pos_rot[3:].cpu().data.numpy().copy()
399
+
400
+ def _get_action(self, seg, depth, robot_pos, robot_rot, ecm_wz, goal):
401
+ # the pos should be in ecm space
402
+ '''
403
+ input: seg (h,w); depth(h,w); robot_pos 3; robot_rot 3; jaw 1; goal 3
404
+ '''
405
+ #depth=self.agent.depth_norm.normalize(depth.reshape(self.img_size*self.img_size),device=self.device).reshape(self.img_size,self.img_size)
406
+ #plt.imsave('test_record/pred_depth_norm_{}.png'.format(count),depth)
407
+
408
+ #image = self.img_transform({'image': rgb})['image']
409
+
410
+ seg=torch.from_numpy(seg).to("cuda:0").float()
411
+ depth=torch.from_numpy(depth).to("cuda:0").float()
412
+
413
+ robot_pos=torch.tensor(robot_pos).to(self.device)
414
+ robot_rot=torch.tensor(robot_rot).to(self.device)
415
+
416
+ jaw=torch.tensor(jaw).to(self.device)
417
+ goal=torch.tensor(goal).to(self.device)
418
+
419
+ with torch.no_grad():
420
+
421
+ v_output=self.v_model.get_obs(seg.unsqueeze(0), depth.unsqueeze(0))[0]
422
+ assert v_output.shape == (2,)
423
+
424
+ o_new=torch.cat([
425
+ robot_pos, robot_rot, torch.tensor([0.0,0.0]),
426
+ v_output, ecm_wz
427
+ ])
428
+ print('o_new: ',o_new)
429
+ o_norm=self.agent.o_norm.normalize(o_new,device=self.device)
430
+
431
+ g_norm=self.agent.g_norm.normalize(goal, device=self.device)
432
+
433
+ input_tensor=torch.cat((o_norm, g_norm), axis=0).to(torch.float32)
434
+
435
+ action = self.agent.actor(input_tensor).cpu().data.numpy().flatten()
436
+ return action
437
+
438
+ def get_euler_from_matrix(self, mat):
439
+ """
440
+ :param mat: rotation matrix (3*3)
441
+ :return: rotation in 'xyz' euler
442
+ """
443
+ rot = R.from_matrix(mat)
444
+ return rot.as_euler('xyz', degrees=False)
445
+
446
+ def get_matrix_from_euler(self, ori):
447
+ """
448
+ :param ori: rotation in 'xyz' euler
449
+ :return: rotation matrix (3*3)
450
+ """
451
+ rot = R.from_euler('xyz', ori)
452
+ return rot.as_matrix()
453
+
454
+ def wrap_angle(self, theta):
455
+ return (theta + np.pi) % (2 * np.pi) - np.pi
456
+
457
+ def convert_pos(self,pos,matrix):
458
+ '''
459
+ input: ecm pose matrix 4x4
460
+ output rcm pose matrix 4x4
461
+ '''
462
+ return np.matmul(matrix[:3,:3],pos)+matrix[:3,3]
463
+ #bPSM_T_j6=self.get_bPSM_T_j6(joint)
464
+ #new_ma=matrix @ bPSM_T_j6
465
+ #a=np.matmul(new_ma[:3,:3],pos)+new_ma[:3,3]
466
+ #return a
467
+
468
+ def convert_rot(self, euler_angles, matrix):
469
+ # Convert Euler angles to rotation matrix
470
+ # return: matrix
471
+ roll, pitch, yaw = euler_angles
472
+ R_x = np.array([[1, 0, 0], [0, np.cos(roll), -np.sin(roll)], [0, np.sin(roll), np.cos(roll)]])
473
+ R_y = np.array([[np.cos(pitch), 0, np.sin(pitch)], [0, 1, 0], [-np.sin(pitch), 0, np.cos(pitch)]])
474
+ R_z = np.array([[np.cos(yaw), -np.sin(yaw), 0], [np.sin(yaw), np.cos(yaw), 0], [0, 0, 1]])
475
+ rotation_matrix = np.matmul(R_z, np.matmul(R_y, R_x))
476
+
477
+ # Invert the extrinsic matrix
478
+ extrinsic_matrix_inv = np.linalg.inv(matrix)
479
+
480
+ # Extract the rotation part from the inverted extrinsic matrix
481
+ rotation_matrix_inv = extrinsic_matrix_inv[:3, :3]
482
+
483
+ # Perform the rotation
484
+ position_rotated = np.matmul(rotation_matrix_inv, rotation_matrix)
485
+
486
+ return position_rotated
487
+
488
+ def get_bPSM_T_j6(self, joint_value):
489
+ LRcc = 0.4318
490
+ LTool = 0.4162
491
+ LPitch2Yaw = 0.0091
492
+ # alpha , a , theta , d
493
+ base_T_j1 = transf_DH_modified( np.pi*0.5, 0. , joint_value[0] + np.pi*0.5 , 0. )
494
+ j1_T_j2 = transf_DH_modified(-np.pi*0.5, 0. , joint_value[1] - np.pi*0.5 , 0. )
495
+ j2_T_j3 = transf_DH_modified( np.pi*0.5, 0. , 0.0 , joint_value[2]-LRcc )
496
+ j3_T_j4 = transf_DH_modified( 0. , 0. , joint_value[3] , LTool )
497
+ j4_T_j5 = transf_DH_modified(-np.pi*0.5, 0. , joint_value[4] - np.pi*0.5 , 0. )
498
+ j5_T_j6 = transf_DH_modified(-np.pi*0.5 , LPitch2Yaw , joint_value[5] - np.pi*0.5 , 0. )
499
+
500
+ j6_T_j6f = np.array([[ 0.0, -1.0, 0.0, 0.0], # Offset from file `psm-pro-grasp.json`
501
+ [ 0.0, 0.0, 1.0, 0.0],
502
+ [-1.0, 0.0, 0.0, 0.0],
503
+ [ 0.0, 0.0, 0.0, 1.0]])
504
+
505
+ bPSM_T_j2 = np.matmul(base_T_j1, j1_T_j2)
506
+ bPSM_T_j3 = np.matmul(bPSM_T_j2, j2_T_j3)
507
+ bPSM_T_j4 = np.matmul(bPSM_T_j3, j3_T_j4)
508
+ bPSM_T_j5 = np.matmul(bPSM_T_j4, j4_T_j5)
509
+ bPSM_T_j6 = np.matmul(bPSM_T_j5, j5_T_j6)
510
+ bPSM_T_j6f = np.matmul(bPSM_T_j6, j6_T_j6f) # To make pose the same as the one in the dVRK
511
+ return bPSM_T_j6f
512
+
513
+ def rcm2tip(self, rcm_action):
514
+ return np.matmul(rcm_action, self.tool_T_tip)
515
+
516
+ def _set_action(self, action, robot_pos, rot):
517
+ '''
518
+ robot_pos in cam coodinate
519
+ robot_rot in rcm; matrix
520
+ '''
521
+ action[:3] *= 0.01 * self.scaling
522
+ #action[1]=action[1]*-1
523
+ #print(action)
524
+
525
+ ecm_pos=robot_pos+action[:3]
526
+ print('aft robot pos tip ecm: ',ecm_pos)
527
+ psm_pose=np.zeros((4,4))
528
+
529
+ psm_pose[3,3]=1
530
+ psm_pose[:3,:3]=rot
531
+ #print('ecm pos: ',ecm_pos)
532
+ rcm_pos=self.convert_pos(ecm_pos,basePSM_T_cam)
533
+ print('aft robot pos tip rcm: ',rcm_pos)
534
+ psm_pose[:3,3]=rcm_pos
535
+
536
+ #rcm_action=self.rcm2tip(psm_pose)
537
+ #return rcm_action
538
+
539
+ return psm_pose
540
+
541
+
542
+ '''
543
+ def _set_action(self, action, rot, robot_pos):
544
+ """
545
+ delta_position (6), delta_theta (1) and open/close the gripper (1)
546
+ in the ecm coordinate system
547
+ input: robot_rot, robot_pos in ecm
548
+ """
549
+ # TODO: need to ensure to use this scaling
550
+ action[:3] *= 0.01 * self.scaling # position, limit maximum change in position
551
+ #ecm_pose=self.rcm2ecm(psm_pose)
552
+ #ecm_pos=self.convert_pos(robot_pos, cam_T_basePSM)
553
+ ecm_pos=robot_pos+action[:3]
554
+ #ecm_pos[2]=ecm_pos[2]-2*action[2]
555
+
556
+ #ecm_pose[:3,3]=ecm_pose[:3,3]+action[:3]
557
+ #rot=self.get_euler_from_matrix(ecm_pose[:3,:3])
558
+ #rot=self.convert_rot(robot_rot, cam_T_basePSM)
559
+ #rot=self.get_euler_from_matrix(robot_rot)
560
+
561
+ #action[3:6] *= np.deg2rad(20)
562
+ #rot =(self.wrap_angle(rot[0]+action[3]),self.wrap_angle(rot[1]+action[4]),self.wrap_angle(rot[2]+action[5]))
563
+ #rcm_action_matrix=self.convert_rot(rot,basePSM_T_cam) # ecm2rcm rotation
564
+
565
+ rcm_pos=self.convert_pos(ecm_pos,basePSM_T_cam) # ecm2rcm position
566
+
567
+ rot_matrix=self.get_matrix_from_euler(rot)
568
+ #rcm_action_matrix=self.convert_rot(ecm_pose) #self.ecm2rcm(ecm_pose)
569
+
570
+ #rcm_action_eul=self.get_euler_from_matrix(rcm_action_matrix)
571
+ #rcm_action_eul=(self.rcm_init_eul[0], self.rcm_init_eul[1],rcm_action_eul[2])
572
+ #rcm_action_matrix=self.get_matrix_from_euler(rcm_action_eul)
573
+
574
+ psm_pose=np.zeros((4,4))
575
+ psm_pose[3,3]=1
576
+ psm_pose[:3,:3]=rot_matrix
577
+ psm_pose[:3,3]=rcm_pos
578
+
579
+ # TODO: use get_bPSM_T_j6 function
580
+ rcm_action=self.rcm2tip(psm_pose)
581
+ rcm_action=psm_pose
582
+
583
+ return rcm_action
584
+ '''
585
+ def convert_point_to_camera_axis(self, x, y, depth, intrinsics_matrix):
586
+ '''
587
+ # Example usage
588
+ x = 100
589
+ y = 200
590
+ depth = 5.0
591
+ intrinsics_matrix = np.array([[500, 0, 320], [0, 500, 240], [0, 0, 1]])
592
+
593
+ xc, yc, zc = convert_point_to_camera_axis(x, y, depth, intrinsics_matrix)
594
+ print(f"Camera axis coordinates: xc={xc}, yc={yc}, zc={zc}")
595
+ '''
596
+ # Extract camera intrinsics matrix components
597
+ fx, fy, cx, cy = intrinsics_matrix[0, 0], intrinsics_matrix[1, 1], intrinsics_matrix[0, 2], intrinsics_matrix[1, 2]
598
+
599
+ # Normalize pixel coordinates
600
+ xn = (x - cx) / fx
601
+ yn = (y - cy) / fy
602
+
603
+ # Convert to camera axis coordinates
604
+ xc = xn * depth
605
+ yc = yn * depth
606
+ zc = depth
607
+
608
+ return np.array([xc, yc, zc])
609
+
610
+ def goal_distance(self,goal_a, goal_b):
611
+ assert goal_a.shape==goal_b.shape
612
+ return np.linalg.norm(goal_a-goal_b,axis=-1)
613
+
614
+ def is_success(self, curr_pos, desired_goal):
615
+ d=self.goal_distance(curr_pos, desired_goal)
616
+ d3=np.abs(curr_pos[2]-desired_goal[2])
617
+ print('distance: ',d)
618
+ print('distance z-axis: ',d3)
619
+ if d3<0.003:
620
+ return True
621
+ return (d<self.threshold and d3<0.003).astype(np.float32)
622
+
623
+ def init_run(self):
624
+ intrinsics_matrix=np.array([[916.367081, 1.849829, 381.430393], [0.000000, 918.730361, 322.704614], [0.000000, 0.000000, 1.000000]])
625
+ self.ecm = dvrk.ecm('ECM')
626
+ self._finished=False
627
+ #player=VisPlayer()
628
+
629
+ self._load_depth_model()
630
+ #player._load_dam()
631
+ self._load_policy_model(vmodel_file='/home/kj/kj_demo/active/pretrained_models/best_model.pt',filepath='/home/kj/kj_demo/active/pretrained_models/s80_DDPG_demo0_traj_best_kj.pt')
632
+ self._load_fastsam()
633
+
634
+ self.cap_0=VideoCapture("/dev/video8") # left 5.23
635
+ self.cap_2=VideoCapture("/dev/video6") # right 5.23
636
+
637
+ # TODO the goal in scaled image vs. goal in simualtor?
638
+ for i in range(10):
639
+ frame1=self.cap_0.read()
640
+ frame2=self.cap_2.read()
641
+
642
+ self.fs = cv2.FileStorage("/home/kj/ar/EndoscopeCalibration/calibration_new.yaml", cv2.FILE_STORAGE_READ)
643
+
644
+ frame1, frame2 = my_rectify(frame1, frame2, self.fs)
645
+
646
+
647
+ frame1=cv2.resize(frame1, self.img_size)
648
+ frame2=cv2.resize(frame2, self.img_size)
649
+
650
+ point=SetPoints("Goal Selection", frame1)
651
+
652
+ self.object_point=point[0]
653
+
654
+
655
+ frame1=cv2.cvtColor(frame1, cv2.COLOR_BGR2RGB)
656
+ frame2=cv2.cvtColor(frame2, cv2.COLOR_BGR2RGB)
657
+
658
+ goal= np.array([0.0,0.0,0.0])
659
+
660
+ self.goal=goal
661
+
662
+
663
+ self.count=0
664
+ ####### Setup the simulator for ECM motion planning
665
+ self.sim_ecm = Sim_ECM('human')
666
+ self.sim_ecm.reset_env()
667
+ ## get the current dvrk joint position and sync it to the simulator
668
+ current_dvrk_jp = self.ecm.measured_jp()
669
+ self.sim_ecm.ecm.reset_joint(np.array(current_dvrk_jp))
670
+
671
+ def run_step(self):
672
+ if self._finished:
673
+ return True
674
+
675
+ #time.sleep(.5)
676
+ self.count+=1
677
+ print("--------step {}----------".format(self.count))
678
+ #time.sleep(2)
679
+
680
+ frame1=self.cap_0.read()
681
+ frame2=self.cap_2.read()
682
+
683
+ #fs = cv2.FileStorage("/home/kj/ar/EndoscopeCalibration/calibration_new.yaml", cv2.FILE_STORAGE_READ)
684
+
685
+ frame1, frame2 = my_rectify(frame1, frame2, self.fs)
686
+
687
+ frame1=cv2.resize(frame1, self.img_size)
688
+ frame2=cv2.resize(frame2, self.img_size)
689
+
690
+ #frame1=resize_with_pad(frame1, player.img_size, player.img_size)
691
+ #frame2=resize_with_pad(frame2, player.img_size, player.img_size)
692
+
693
+ #frame1=crop_img(frame1)
694
+ #frame2=crop_img(frame2)
695
+
696
+ frame1=cv2.cvtColor(frame1, cv2.COLOR_BGR2RGB)
697
+ frame2=cv2.cvtColor(frame2, cv2.COLOR_BGR2RGB)
698
+
699
+
700
+ plt.imsave( 'test_record/frame1_{}.png'.format(self.count),frame1)
701
+ plt.imsave( 'test_record/frame2_{}.png'.format(self.count),frame2)
702
+
703
+ # 1. get depth from left and right image
704
+
705
+ #depth=player._get_depth(frame1, frame2)
706
+
707
+ #depth=player._get_depth_with_dam(frame1)/10+0.025
708
+ #depth=depth/player.scaling
709
+
710
+
711
+
712
+ #frame1=cv2.resize(frame1, player.img_size)
713
+ #frame2=cv2.resize(frame2, player.img_size)
714
+ depth=self._get_depth(frame1, frame2)+0.09
715
+ #print(frame1.shape)
716
+ #print('depth shape: ',depth.shape)
717
+ #np.save('/home/kj/ar/GauzeRetrievel/test_record/depth.npy',depth)
718
+ #print(depth[self.object_point[0]][self.object_point[1]])
719
+ #print(depth)
720
+ #print(depth.mean())
721
+ #print(depth.std())
722
+ plt.imsave('test_record/pred_depth_{}.png'.format(self.count),depth)
723
+
724
+ seg=self._seg_with_fastsam(frame1,self.object_point)
725
+ #print(seg)
726
+
727
+ seg=np.array(seg==True).astype(int)
728
+
729
+ np.save('test_record/seg.npy',seg)
730
+ plt.imsave('test_record/seg_{}.png'.format(self.count),seg)
731
+ #seg=np.load('/home/kj/ar/peg_transfer/test_record/seg_from_depth.npy')
732
+ print("finish seg")
733
+
734
+ # exit()
735
+ # 3. get robot pose
736
+ # an example of the state
737
+ #PSM1_rotate = PyKDL.Rotation(transform[0,0], transform[0,1], transform[0,2],
738
+ # transform[1,0], transform[1,1], transform[1,2],
739
+ # transform[2,0], transform[2,1], transform[2,2])
740
+ #PSM1_pose = PyKDL.Vector(transform[0,-1], transform[1,-1], transform[2,-1])
741
+
742
+ #goal = PyKDL.Frame(PSM1_rotate, PSM1_pose)
743
+ #p.move_cp(goal).wait()
744
+
745
+
746
+ robot_pose=self.p.measured_cp()
747
+ robot_pos=robot_pose.p
748
+ print("pre action pos rcm: ",robot_pos)
749
+ robot_pos=np.array([robot_pos[0],robot_pos[1],robot_pos[2]])
750
+ #robot_pos=player.rcm2tip(robot_pos)
751
+ pre_robot_pos=np.array([robot_pos[0],robot_pos[1],robot_pos[2]])
752
+ # can be replaced with robot_pose.M.GetRPY()
753
+ # start
754
+ transform_2=robot_pose.M
755
+ np_m=np.array([[transform_2[0,0], transform_2[0,1], transform_2[0,2]],
756
+ [transform_2[1,0], transform_2[1,1], transform_2[1,2]],
757
+ [transform_2[2,0], transform_2[2,1], transform_2[2,2]]])
758
+
759
+ tip_psm_pose=np.zeros((4,4))
760
+
761
+ tip_psm_pose[3,3]=1
762
+ tip_psm_pose[:3,:3]=np_m
763
+ tip_psm_pose[:3,3]=robot_pos
764
+ #print('tip_psm_pose before: ',tip_psm_pose)
765
+ tip_psm_pose=self.rcm2tip(tip_psm_pose)
766
+ #print('tip_psm_pose aft: ',tip_psm_pose)
767
+
768
+ np_m=tip_psm_pose[:3,:3]
769
+ robot_pos=tip_psm_pose[:3,3]
770
+ #print("pre action pos tip rcm: ",robot_pos)
771
+
772
+
773
+ #robot_rot=np_m
774
+ robot_rot=self.get_euler_from_matrix(np_m)
775
+ robot_rot=self.convert_rot(robot_rot, cam_T_basePSM)
776
+ robot_rot=self.get_euler_from_matrix(robot_rot)
777
+ robot_pos=self.convert_pos(robot_pos,cam_T_basePSM)
778
+ print("pre action pos tip ecm: ",robot_pos)
779
+ # end
780
+
781
+ jaw=self.p.jaw.measured_jp()
782
+ action=self._get_action(seg, depth ,robot_pos, robot_rot, jaw, self.goal)
783
+ print("finish get action")
784
+ print("action: ",action)
785
+ #obtained_object_position=player.convert_pos(action, basePSM_T_cam)
786
+ #print('obtained_object_position: ',obtained_object_position)
787
+ #PSM2_pose=PyKDL.Vector(obtained_object_position[0], obtained_object_position[1], obtained_object_position[2])
788
+
789
+ # 4. action -> state
790
+ state=self._set_action(action, robot_pos, np_m)
791
+ print("finish set action")
792
+ print("state: ",state)
793
+ #z_delta=state[2,-1]-pre_robot_pos[2]
794
+ #state[2,-1]=pre_robot_pos[2]-z_delta
795
+
796
+ # 5. move
797
+ PSM2_rotate = PyKDL.Rotation(state[0,0], state[0,1], state[0,2],
798
+ state[1,0], state[1,1], state[1,2],
799
+ state[2,0], state[2,1], state[2,2])
800
+
801
+ PSM2_pose = PyKDL.Vector(state[0,-1], state[1,-1], state[2,-1])
802
+ curr_robot_pos=np.array([state[0,-1], state[1,-1], state[2,-1]])
803
+
804
+ move_goal = PyKDL.Frame(PSM2_rotate, PSM2_pose)
805
+ move_goal=PyKDL.Frame(robot_pose.M,PSM2_pose)
806
+ #if count>7:
807
+ # break
808
+
809
+ self.p.move_cp(move_goal).wait()
810
+ print("finish move")
811
+ print('is sccess: ',self.is_success(curr_robot_pos, self.rcm_goal))
812
+ if self.is_success(curr_robot_pos, self.rcm_goal) or self.count>9:
813
+
814
+ self._finished=True
815
+
816
+ return self._finished
817
+ '''
818
+ if action[3] < 0:
819
+ # close jaw
820
+ p.jaw.move_jp(np.array(-0.5)).wait()
821
+ else:
822
+ # open jaw
823
+ p.jaw.move_jp(np.array(0.5)).wait()
824
+ '''
825
+ #if cv2.waitKey(1)==27:
826
+ # break
827
+
828
+ def record_video(self, out1, out2):
829
+ for i in range(10):
830
+ frame1=self.cap_0.read()
831
+ frame2=self.cap_2.read()
832
+ out1.write(frame1)
833
+ out2.write(frame2)
834
+ return
835
+
836
+
837
+ import threading
838
+
839
+ if __name__=="__main__":
840
+ #lock = threading.Lock()
841
+
842
+ player=VisPlayer()
843
+ player.init_run()
844
+ finished=False
845
+ while not finished:
846
+ #player.record_video
847
+ finished=player.run_step()
848
+
849
+ player.cap_0.release()
850
+ player.cap_2.release()
851
+