Onearth commited on
Commit
742ef47
1 Parent(s): fb408ea

Upload 3 files

Browse files
Files changed (3) hide show
  1. ecm.py +207 -0
  2. ecm_active_track_v1.py +379 -0
  3. ecm_env.py +179 -0
ecm.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author(s): Jiaqi Xu
2
+ # Created on: 2020-11
3
+
4
+ """
5
+ PSM wrapper
6
+ Refer to:
7
+ https://github.com/jhu-dvrk/dvrk-ros/blob/master/dvrk_python/src/dvrk/ecm.py
8
+ https://github.com/jhu-dvrk/dvrk-ros/blob/7b3d48ca164755ccfc88028e15baa9fbf7aa1360/dvrk_python/src/dvrk/ecm.py
9
+ https://github.com/jhu-dvrk/sawIntuitiveResearchKit/blob/master/share/kinematic/ecm.json
10
+ https://github.com/jhu-dvrk/sawIntuitiveResearchKit/blob/4a8b4817ee7404b3183dfba269c0efe5885b41c2/share/arm/ecm-straight.json
11
+ """
12
+ import os
13
+ import numpy as np
14
+ import pybullet as p
15
+
16
+ from surrol.robots.arm import Arm
17
+ from surrol.const import ASSET_DIR_PATH
18
+ from surrol.utils.pybullet_utils import (
19
+ get_joint_positions,
20
+ get_link_pose,
21
+ render_image
22
+ )
23
+
24
+ # Rendering width and height
25
+ RENDER_HEIGHT = 256
26
+ RENDER_WIDTH = 256
27
+ FoV = 60
28
+
29
+ LINKS = (
30
+ 'ecm_base_link', 'ecm_yaw_link', 'ecm_pitch_end_link', # -1, 0, 1
31
+ 'ecm_main_insertion_link', 'ecm_tool_link', # 2, 3
32
+ 'ecm_end_link', # 4
33
+ 'ecm_tip_link', # 5
34
+ 'ecm_pitch_front_link', # 6
35
+ 'ecm_pitch_bottom_link', 'ecm_pitch_top_link', # 7, 8
36
+ 'ecm_pitch_back_link', # 9
37
+ 'ecm_remote_center_link', # 10
38
+ )
39
+
40
+ # tooltip-offset; refer to .json
41
+ tool_T_tip = np.array([[0.0, 1.0, 0.0, 0.0],
42
+ [-1.0, 0.0, 0.0, 0.0],
43
+ [0.0, 0.0, 1.0, 0.0],
44
+ [0.0, 0.0, 0.0, 1.0]])
45
+
46
+ # Joint limits. No limits in the .json. TODO: dVRK config modified
47
+ TOOL_JOINT_LIMIT = {
48
+ 'lower': np.deg2rad([-90.0, -45.0, 0.0, -np.inf]), # not sure about the last joint
49
+ 'upper': np.deg2rad([ 90.0, 66.4, 254.0, np.inf]),
50
+ }
51
+ TOOL_JOINT_LIMIT['lower'][2] = -0.01 # allow small tolerance
52
+ TOOL_JOINT_LIMIT['upper'][2] = 0.254 # prismatic joint (m); not sure, from ambf
53
+ # [-1.57079633, -0.78539816, 0. , -1.57079633]
54
+ # [ 1.57079633, 1.15889862, 0.254, 1.57079633]
55
+
56
+
57
+ class Ecm(Arm):
58
+ NAME = 'ECM'
59
+ URDF_PATH = os.path.join(ASSET_DIR_PATH, 'ecm/ecm.urdf')
60
+ DoF = 4 # 4-dof arm
61
+ JOINT_TYPES = ('R', 'R', 'P', 'R')
62
+ EEF_LINK_INDEX = 4 # EEF link index, one redundant joint for inverse kinematics
63
+ TIP_LINK_INDEX = 5 # redundant joint for easier camera matrix computation
64
+ RCM_LINK_INDEX = 10 # RCM link index
65
+ # D-H parameters
66
+ A = np.array([0.0, 0.0, 0.0, 0.0])
67
+ ALPHA = np.array([np.pi / 2, -np.pi / 2, np.pi / 2, 0.0])
68
+ D = np.array([0.0, 0.0, -0.3822, 0.3829])
69
+ THETA = np.array([np.pi / 2, -np.pi / 2, 0.0, 0.0])
70
+
71
+ def __init__(self, pos=(0., 0., 1.), orn=(0., 0., 0., 1.),
72
+ scaling=1.):
73
+ super(Ecm, self).__init__(self.URDF_PATH, pos, orn,
74
+ TOOL_JOINT_LIMIT, tool_T_tip, scaling)
75
+
76
+ # camera control related parameters
77
+ self.view_matrix = None
78
+ self.proj_matrix = None
79
+ self._homo_delta = np.zeros((2, 1))
80
+ self._wz = 0
81
+
82
+ # b: rcm, e: eef, c: camera
83
+ pos_eef, orn_eef = get_link_pose(self.body, self.EEF_LINK_INDEX)
84
+ pos_cam, orn_cam = get_link_pose(self.body, self.TIP_LINK_INDEX)
85
+ self._tip_offset = np.linalg.norm(np.array(pos_eef) - np.array(pos_cam)) # TODO
86
+ wRe = np.array(p.getMatrixFromQuaternion(orn_eef)).reshape((3, 3))
87
+ wRc = np.array(p.getMatrixFromQuaternion(orn_cam)).reshape((3, 3))
88
+ self._wRc0 = wRc.copy() # initial rotation matrix
89
+ self._eRc = np.matmul(wRe.T, wRc)
90
+
91
+ def _get_joint_positions_all(self, abs_input):
92
+ """ With the consideration of parallel mechanism constraints and other redundant joints.
93
+ """
94
+ positions = get_joint_positions(self.body, self.joints)
95
+ joint_positions = [
96
+ abs_input[0], abs_input[1], # 0, 1
97
+ abs_input[2] * self.scaling, abs_input[3], # 2, 3
98
+ positions[4], positions[5], # 4 (0.0), 5 (0.0)
99
+ abs_input[1], # 6
100
+ -abs_input[1], -abs_input[1], # 7, 8
101
+ abs_input[1], # 9
102
+ positions[10], # 10 (0.0)
103
+ ]
104
+ return joint_positions
105
+
106
+ def cVc_to_dq(self, cVc: np.ndarray) -> np.ndarray:
107
+ """
108
+ convert the camera velocity in its own frame (cVc) into the joint velocity q_dot
109
+ """
110
+ cVc = cVc.reshape((3, 1))
111
+
112
+ # restrict the step size, need tune
113
+ if np.abs(cVc).max() > 0.01:
114
+ cVc = cVc / np.abs(cVc).max() * 0.01
115
+
116
+ # Forward kinematics
117
+ q = self.get_current_joint_position()
118
+ bRe = self.robot.fkine(q).R # use rtb instead of PyBullet, no tool_tip_offset
119
+ _, orn_cam = get_link_pose(self.body, self.TIP_LINK_INDEX)
120
+ wRc = np.array(p.getMatrixFromQuaternion(orn_cam)).reshape((3, 3))
121
+
122
+ # Rotation
123
+ R1, R2 = self._wRc0, wRc
124
+ x = R1[0, 0] * R2[1, 0] - R1[1, 0] * R2[0, 0] + R1[0, 1] * R2[1, 1] - R1[1, 1] * R2[0, 1]
125
+ y = R1[0, 0] * R2[1, 1] - R1[1, 0] * R2[0, 1] - R1[0, 1] * R2[1, 0] + R1[1, 1] * R2[0, 0]
126
+ dz = np.arctan(x / y)
127
+ k1, k2 = 25.0, 0.1
128
+ self._wz = k1 * dz * np.exp(-k2 * np.linalg.norm(self._homo_delta))
129
+ # print(' -> x: {:.4f}, y: {:.4f}, dz: {:.4f}, wz: {:.4f}'.format(x, y, dz, self._wz))
130
+
131
+ # Pseudo Solution
132
+ d = self._tip_offset
133
+ Jd = np.matmul(self._eRc,
134
+ np.array([[0, 0, d, 0],
135
+ [0, -d, 0, 0],
136
+ [1, 0, 0, 0]]))
137
+ Je = np.matmul(self._eRc,
138
+ np.array([[0, 1, 0, 0],
139
+ [0, 0, 1, 0],
140
+ [0, 0, 0, 1]]))
141
+
142
+ eVe4 = np.dot(np.linalg.pinv(Jd), cVc) \
143
+ + np.dot(np.dot((np.eye(4) - np.dot(np.linalg.pinv(Jd), Jd)), np.linalg.pinv(Je)),
144
+ np.array([[0], [0], [self._wz]]))
145
+ eVe = np.zeros((6, 1))
146
+ eVe[2: 6] = eVe4[0: 4]
147
+ Q = np.zeros((6, 6))
148
+ Q[0: 3, 0: 3] = - bRe
149
+ Q[3: 6, 3: 6] = - bRe
150
+ bVe = np.dot(Q, eVe)
151
+
152
+ # Compute the Jacobian matrix
153
+ bJe = self.get_jacobian_spatial()
154
+ dq = np.dot(np.linalg.pinv(bJe), bVe)
155
+ # print(" -> cVc: {}, q: {}, dq: {}".format(list(np.round(cVc.flatten(), 4)), q, list(dq.flatten())))
156
+ return dq.flatten()
157
+
158
+ def render_image(self, width=RENDER_WIDTH, height=RENDER_HEIGHT):
159
+ pos_eef, orn_eef = get_link_pose(self.body, self.EEF_LINK_INDEX)
160
+ pos_tip = get_link_pose(self.body, self.TIP_LINK_INDEX)[0]
161
+ mat_eef = np.array(p.getMatrixFromQuaternion(orn_eef)).reshape((3, 3))
162
+
163
+ # TODO: need to check the up vector
164
+ self.view_matrix = p.computeViewMatrix(cameraEyePosition=pos_eef,
165
+ cameraTargetPosition=pos_tip,
166
+ cameraUpVector=mat_eef[:, 0])
167
+ self.proj_matrix = p.computeProjectionMatrixFOV(fov=FoV,
168
+ aspect=float(width) / height,
169
+ nearVal=0.01,
170
+ farVal=10.0)
171
+
172
+ rgb_array, mask, depth = render_image(width, height,
173
+ self.view_matrix, self.proj_matrix)
174
+ return rgb_array, mask, depth
175
+
176
+ def get_centroid_proj(self, pos) -> np.ndarray:
177
+ """
178
+ Compute the object position in the camera NDC space.
179
+ Refer to OpenGL.
180
+ :param pos: object position in the world frame.
181
+ """
182
+ assert len(pos) in (3, 4)
183
+ if len(pos) == 3:
184
+ # homogeneous coordinates: (x, y, z) -> (x, y, z, w)
185
+ pos_obj = np.ones((4, 1))
186
+ pos_obj[: 3, 0] = pos
187
+ else:
188
+ pos_obj = np.array(pos).reshape((4, 1))
189
+
190
+ view_matrix = np.array(self.view_matrix).reshape(4, 4).T
191
+ proj_matrix = np.array(self.proj_matrix).reshape(4, 4).T
192
+ # pos in the camera frame
193
+ pos_cam = np.dot(proj_matrix, np.dot(view_matrix, pos_obj))
194
+ pos_cam /= pos_cam[3, 0]
195
+ return np.array([pos_cam[0][0], - pos_cam[1][0]]) # be consistent with get_centroid
196
+
197
+ @property
198
+ def homo_delta(self):
199
+ return self._homo_delta
200
+
201
+ @homo_delta.setter
202
+ def homo_delta(self, val: np.ndarray):
203
+ self._homo_delta = val
204
+
205
+ @property
206
+ def wz(self):
207
+ return self._wz
ecm_active_track_v1.py ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+
4
+ import pybullet as p
5
+ from surrol.tasks.ecm_env import EcmEnv, goal_distance
6
+ from surrol.utils.pybullet_utils import (
7
+ get_body_pose,
8
+ )
9
+ import random
10
+ import cv2
11
+ import pickle
12
+ from surrol.utils.robotics import (
13
+ get_euler_from_matrix,
14
+ get_matrix_from_euler
15
+ )
16
+ import torch
17
+ from surrol.utils.utils import RGB_COLOR_255, Boundary, Trajectory, get_centroid
18
+ from surrol.robots.ecm import RENDER_HEIGHT, RENDER_WIDTH, FoV
19
+ from surrol.const import ASSET_DIR_PATH
20
+ import numpy as np
21
+ from surrol.robots.psm import Psm1, Psm2
22
+ import sys
23
+ sys.path.append('/home/kejianshi/Desktop/Surgical_Robot/science_robotics/stateregress_back')
24
+ sys.path.append('/home/kejianshi/Desktop/Surgical_Robot/science_robotics/stateregress_back/utils')
25
+ from general_utils import AttrDict
26
+ sys.path.append('/home/kejianshi/Desktop/Surgical_Robot/science_robotics/ar_surrol/surrol_datagen/tasks')
27
+ from depth_anything.dpt import DepthAnything
28
+ from depth_anything.util.transform import Resize, NormalizeImage, PrepareForNet
29
+
30
+ from vmodel import vismodel
31
+ from config import opts
32
+
33
+ class ActiveTrack(EcmEnv):
34
+ """
35
+ Active track is not a GoalEnv since the environment changes internally.
36
+ The reward is shaped.
37
+ """
38
+ ACTION_MODE = 'cVc'
39
+ # RCM_ACTION_MODE = 'yaw'
40
+ QPOS_ECM = (0, 0, 0.02, 0)
41
+ WORKSPACE_LIMITS = ((-0.3, 0.6), (-0.4, 0.4), (0.05, 0.05))
42
+
43
+ CUBE_NUMBER = 18
44
+
45
+ def __init__(self, render_mode=None):
46
+ # to control the step
47
+ self._step = 0
48
+ self.counter=0
49
+ self.img_list={}
50
+ super(ActiveTrack, self).__init__(render_mode)
51
+
52
+ def step(self, action: np.ndarray):
53
+ obs, reward, done, info = super().step(action)
54
+ centroid = obs['observation'][-3: -1]
55
+ if not (-1.2 < centroid[0] < 1.1 and -1.1 < centroid[1] < 1.1):
56
+ # early stop if out of view
57
+ done = True
58
+ info['achieved_goal'] = centroid
59
+ return obs, reward, done, info
60
+
61
+ def compute_reward(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info):
62
+ """ Dense reward."""
63
+ centroid, wz = achieved_goal, self.ecm.wz
64
+ d = goal_distance(centroid, desired_goal) / 2
65
+ reward = 1 - (d + np.linalg.norm(wz) * 0.1) # maximum reward is 1, important for baseline DDPG
66
+ return reward
67
+
68
+ def _env_setup(self):
69
+ super(ActiveTrack, self)._env_setup()
70
+ opts.device='cuda:0'
71
+ self.v_model=vismodel(opts)
72
+ ckpt=torch.load(opts.ckpt_dir, map_location=opts.device)
73
+ self.v_model.load_state_dict(ckpt['state_dict'])
74
+ self.v_model.to(opts.device)
75
+ self.v_model.eval()
76
+
77
+ self.use_camera = True
78
+
79
+ # robot
80
+ self.ecm.reset_joint(self.QPOS_ECM)
81
+ pos_x = random.uniform(0.18, 0.24)
82
+ pos_y = random.uniform(0.21, 0.24)
83
+ pos_z = random.uniform(0.5, 0.6)
84
+ left_right = random.choice([-1, 1])
85
+
86
+ self.POSE_PSM1 = ((pos_x, left_right*pos_y, pos_z), (0, 0, -(90+ left_right*20 ) / 180 * np.pi)) #(x:0.18-0.25, y:0.21-0.24, z:0.5)
87
+ self.QPOS_PSM1 = (0, 0, 0.10, 0, 0, 0)
88
+ self.PSM_WORLSPACE_LIMITS = ((0.18+0.45,0.18+0.55), (0.24-0.29,0.24-0.19), (0.5-0.1774,0.5-0.1074))
89
+ self.PSM_WORLSPACE_LIMITS = np.asarray(self.PSM_WORLSPACE_LIMITS) \
90
+ + np.array([0., 0., 0.0102]).reshape((3, 1))
91
+ # trajectory
92
+ traj = Trajectory(self.PSM_WORLSPACE_LIMITS, seed=None)
93
+ self.traj = traj
94
+ self.traj.set_step(self._step)
95
+ self.psm1 = Psm1(self.POSE_PSM1[0], p.getQuaternionFromEuler(self.POSE_PSM1[1]),
96
+ scaling=self.SCALING)
97
+ if left_right == 1:
98
+ self.psm1.move_joint([0.4516922970194888, -0.11590085534517788, 0.1920614431341014, -0.275713630305575, -0.025332969748983816, -0.44957632598600145])
99
+ else:
100
+ self.psm1.move_joint([0.4516922970194888, -0.11590085534517788, 0.1920614431341014, -0.275713630305575, -0.025332969748983816, -0.44957632598600145])
101
+ # target cube
102
+ init_psm_Pose = self.psm1.get_current_position(frame='world')
103
+ # print(init_psm_Pose[:3, 3])
104
+ # exit()
105
+ b = Boundary(self.PSM_WORLSPACE_LIMITS)
106
+ x, y = self.traj.step()
107
+ obj_id = p.loadURDF(os.path.join(ASSET_DIR_PATH, 'cube/cube.urdf'),
108
+ (init_psm_Pose[0, 3], init_psm_Pose[1, 3], init_psm_Pose[2, 3]),
109
+ p.getQuaternionFromEuler(np.random.uniform(np.deg2rad([0, 0, -90]),
110
+ np.deg2rad([0, 0, 90]))),
111
+ globalScaling=0.001 * self.SCALING)
112
+ # print('psm_eef:', self.psm1.get_joint_number())
113
+ color = RGB_COLOR_255[0]
114
+ p.changeVisualShape(obj_id, -1,
115
+ rgbaColor=(color[0] / 255, color[1] / 255, color[2] / 255, 0),
116
+ specularColor=(0.1, 0.1, 0.1))
117
+ self.obj_ids['fixed'].append(obj_id) # 0 (target)
118
+ self.obj_id = obj_id
119
+ b.add(obj_id, sample=False, min_distance=0.12)
120
+ # self._cid = p.createConstraint(obj_id, -1, -1, -1,
121
+ # p.JOINT_FIXED, [0, 0, 0], [0, 0, 0], [x, y, 0.05 * self.SCALING])
122
+ self._cid = p.createConstraint(
123
+ parentBodyUniqueId=self.psm1.body,
124
+ parentLinkIndex=5,
125
+ childBodyUniqueId=self.obj_id,
126
+ childLinkIndex=-1,
127
+ jointType=p.JOINT_FIXED,
128
+ jointAxis=[0, 0, 0],
129
+ parentFramePosition=[0, 0, 0],
130
+ childFramePosition=[0, 0, 0]
131
+ )
132
+
133
+
134
+ # '''
135
+ # Set up initial env
136
+ # '''
137
+ # self.psm1_eul = np.array(p.getEulerFromQuaternion(
138
+ # self.psm1.pose_rcm2world(self.psm1.get_current_position(), 'tuple')[1])) # in the world frame
139
+
140
+ # # robot
141
+ # #self.psm1_eul = np.array(p.getEulerFromQuaternion(
142
+ # # self.psm1.pose_rcm2world(self.psm1.get_current_position(), 'tuple')[1])) # in the world frame
143
+
144
+ # if self.RCM_ACTION_MODE == 'yaw':
145
+ # #self.psm1_eul = np.array([np.deg2rad(-90), 0., self.psm1_eul[2]])
146
+ # '''
147
+ # # RCM init
148
+ # #eul=np.array([np.deg2rad(-90), 0., 0.])
149
+ # print(self.psm1.wTr)
150
+ # print(self.psm1.tool_T_tip)
151
+ # init_pose=self.psm1.get_current_position()
152
+
153
+ # eul=np.array([0, 0.,np.deg2rad(-50)])
154
+ # rcm_eul=get_matrix_from_euler(eul)
155
+ # init_pose[:3,:3]=rcm_eul
156
+
157
+ # rcm_pose=self.psm1.pose_world2rcm(init_pose)
158
+ # rcm_eul=get_euler_from_matrix(rcm_pose[:3,:3])
159
+ # print('from [0, 0.,np.deg2rad(-50)] to ',rcm_eul)
160
+ # #exit()
161
+ # eul=np.array([0, 0.,np.deg2rad(-90)])
162
+ # rcm_eul=get_matrix_from_euler(eul)
163
+ # init_pose[:3,:3]=rcm_eul
164
+
165
+ # rcm_pose=self.psm1.pose_world2rcm(init_pose)
166
+ # rcm_eul=get_euler_from_matrix(rcm_pose[:3,:3])
167
+ # print('from [0, 0.,np.deg2rad(-90)] to ',rcm_eul)
168
+
169
+ # m=np.array([[ 0.93969262 ,-0.34202014 , 0. , 1.21313615],
170
+ # [ 0.34202014 , 0.93969262 , 0. ,-2.25649898],
171
+ # [ 0. , 0. , 1. ,-4.25550013],
172
+ # [ 0. , 0. , 0. , 1. ]])
173
+ # #print(m.shape)
174
+
175
+ # m=get_euler_from_matrix(m[:3,:3])
176
+ # print('m1: ',m)
177
+
178
+ # m=np.array([[ 0. ,-0.93969262 ,-0.34202014 , 1.21313615],
179
+ # [ 0. ,-0.34202014 , 0.93969262 ,-2.25649898],
180
+ # [-1. , 0. , 0. ,-4.25550013],
181
+ # [ 0. , 0. , 0. , 1. ],])
182
+ # m=get_euler_from_matrix(m[:3,:3])
183
+ # print('m2: ',m)
184
+ # exit()
185
+ # '''
186
+ # # RCM init
187
+ # eul=np.array([np.deg2rad(-90), 0., 0.])
188
+ # eul= get_matrix_from_euler(eul)
189
+ # init_pose=self.psm1.get_current_position()
190
+ # self.rcm_init_eul=np.array(get_euler_from_matrix(init_pose[:3, :3]))
191
+ # init_pose[:3,:3]=eul
192
+ # rcm_pose=self.psm1.pose_world2rcm_no_tip(init_pose)
193
+ # rcm_eul=get_euler_from_matrix(rcm_pose[:3,:3])
194
+ # #print('rcm eul: ',rcm_eul)
195
+ # #exit()
196
+ # self.rcm_init_eul[0]=rcm_eul[0]
197
+ # self.rcm_init_eul[1]=rcm_eul[1]
198
+ # print(self.rcm_init_eul)
199
+ # #exit()
200
+
201
+
202
+
203
+
204
+
205
+ # elif self.RCM_ACTION_MODE == 'pitch':
206
+ # self.psm1_eul = np.array([np.deg2rad(0), self.psm1_eul[1], np.deg2rad(-90)])
207
+ # else:
208
+ # raise NotImplementedError
209
+ # self.psm2 = None
210
+ # self._contact_constraint = None
211
+ # self._contact_approx = False
212
+ # other cubes
213
+ # b.set_boundary(self.workspace_limits + np.array([[-0.2, 0], [0, 0], [0, 0]]))
214
+ # for i in range(self.CUBE_NUMBER):
215
+ # obj_id = p.loadURDF(os.path.join(ASSET_DIR_PATH, 'cube/cube.urdf'),
216
+ # (0, 0, 0.05), (0, 0, 0, 1),
217
+ # globalScaling=0.8 * self.SCALING)
218
+ # color = RGB_COLOR_255[1 + i // 2]
219
+ # p.changeVisualShape(obj_id, -1,
220
+ # rgbaColor=(color[0] / 255, color[1] / 255, color[2] / 255, 1),
221
+ # specularColor=(0.1, 0.1, 0.1))
222
+ # # p.changeDynamics(obj_id, -1, mass=0.01)
223
+ # b.add(obj_id, min_distance=0.12)
224
+
225
+ # def _get_obs(self) -> np.ndarray:
226
+ # robot_state = self._get_robot_state()
227
+ # # may need to modify
228
+ # rgb_array, mask, depth = self.ecm.render_image()
229
+ # in_view, centroids = get_centroid(mask, self.obj_id)
230
+
231
+ # if not in_view:
232
+ # # out of view; differ when the object is on the boundary.
233
+ # pos, _ = get_body_pose(self.obj_id)
234
+ # centroids = self.ecm.get_centroid_proj(pos)
235
+ # # print(" -> Out of view! {}".format(np.round(centroids, 4)))
236
+
237
+ # observation = np.concatenate([
238
+ # robot_state, np.array(in_view).astype(np.float).ravel(),
239
+ # centroids.ravel(), np.array(self.ecm.wz).ravel() # achieved_goal.copy(),
240
+ # ])
241
+ # return observation
242
+ def _get_obs(self) -> dict:
243
+ robot_state = self._get_robot_state()
244
+
245
+ render_obs,seg, depth=self.ecm.render_image()
246
+ #cv2.imwrite('/research/d1/rshr/arlin/data/debug/depth_noise_debug/img.png',cv2.cvtColor(render_obs, cv2.COLOR_BGR2RGB))
247
+ #plt.imsave('/research/d1/rshr/arlin/data/debug/depth_noise_debug/img2.png',render_obs)
248
+ #print('depth max: ',np.max(depth))
249
+ #exit()
250
+ render_obs=cv2.resize(render_obs,(320,240))
251
+
252
+ self.counter+=1
253
+ #print(render_obs[0][0])
254
+ #exit()
255
+ #seg=np.array(seg==6).astype(int)
256
+
257
+ seg=np.array((seg==6 )| (seg==1)).astype(int)
258
+ #seg=np.array(seg==1).astype(int)
259
+ #seg=np.resize(seg,(320,240))
260
+
261
+ #plt.imsave('/research/d1/rshr/arlin/data/debug/depth_noise_debug/depth.png',depth)
262
+ #exit()
263
+ seg = cv2.resize(seg, (320,240), interpolation =cv2.INTER_NEAREST)
264
+ #plt.imsave('/research/d1/rshr/arlin/data/debug/seg_debug/noise_{}/seg.png'.format(self.curr_intensity),seg)
265
+ #exit()
266
+ depth = cv2.resize(depth, (320,240), interpolation =cv2.INTER_NEAREST)
267
+ #print(np.max(depth))
268
+ #depth = cv2.resize(depth, (320,240),interpolation=cv2.INTER_LANCZOS4)
269
+
270
+
271
+ #image=cv2.cvtColor(render_obs, cv2.COLOR_BGR2RGB) / 255.0
272
+ #plt.imsave('/home/student/code/regress_data7/seg/seg_{}.png'.format(self.counter),seg)
273
+ #image = self.transform({'image': image})['image']
274
+ #image=torch.from_numpy(image).to("cuda:0").float()
275
+
276
+ # test depth noise
277
+
278
+ #if np.random.randn()<0.5:
279
+ # instensity=np.random.randint(3,15)/100
280
+ #instensity=0.1
281
+ # depth = add_gaussian_noise(depth, instensity)
282
+ '''
283
+ if self.counter==10:
284
+ cv2.imwrite('/research/d1/rshr/arlin/data/debug/depth_noise_debug/gaussian/img.png',cv2.cvtColor(render_obs, cv2.COLOR_BGR2RGB))
285
+ plt.imsave('/research/d1/rshr/arlin/data/debug/depth_noise_debug/gaussian/depth.png',depth)
286
+ for i in [0.01,0.05,0.1,0.15,0.2]:
287
+ noisy_depth_map = add_random_noise(depth, i)
288
+ plt.imsave('/research/d1/rshr/arlin/data/debug/depth_noise_debug/gaussian/noise_{}.png'.format(i),noisy_depth_map)
289
+
290
+ exit()
291
+ '''
292
+
293
+ #noisy_segmentation_map = add_noise_to_segmentation(seg, self.seg_noise_intensity)
294
+ #noisy_depth_map = add_gaussian_noise(depth, self.curr_intensity)
295
+ #if self.counter==10:
296
+ # plt.imsave('/research/d1/rshr/arlin/data/debug/seg_debug/noise_{}/img.png'.format(self.curr_intensity),render_obs)
297
+ # plt.imsave('/research/d1/rshr/arlin/data/debug/seg_debug/noise_{}/seg.png'.format(self.curr_intensity),seg)
298
+ # plt.imsave('/research/d1/rshr/arlin/data/debug/seg_debug/noise_{}/noise_seg.png'.format(self.curr_intensity),noisy_segmentation_map)
299
+
300
+ seg=torch.from_numpy(seg).to("cuda:0").float()
301
+ depth=torch.from_numpy(depth).to("cuda:0").float()
302
+
303
+
304
+ with torch.no_grad():
305
+ v_output=self.v_model.get_obs(seg.unsqueeze(0), depth.unsqueeze(0))[0]#.cpu().data().numpy()
306
+ #print(v_output.shape)
307
+ v_output=v_output.cpu().numpy()
308
+
309
+ achieved_goal = np.array([
310
+ v_output[0], v_output[1], self.ecm.wz
311
+ ])
312
+
313
+ observation = np.concatenate([
314
+ robot_state, np.array([0.0]).astype(np.float).ravel(),
315
+ v_output.ravel(), np.array(self.ecm.wz).ravel() # achieved_goal.copy(),
316
+ ])
317
+ obs = {
318
+ 'observation': observation.copy(),
319
+ 'achieved_goal': achieved_goal.copy(),
320
+ 'desired_goal': np.array([0., 0., 0.]).copy()
321
+ }
322
+ return obs
323
+
324
+
325
+ def _sample_goal(self) -> np.ndarray:
326
+ """ Samples a new goal and returns it.
327
+ """
328
+ goal = np.array([0., 0.])
329
+ return goal.copy()
330
+
331
+ def _step_callback(self):
332
+ """ Move the target along the trajectory
333
+ """
334
+ for _ in range(10):
335
+ x, y = self.traj.step()
336
+ self._step = self.traj.get_step()
337
+ current_PSM_position = self.psm1.get_current_position(frame='world')
338
+ new_PSM_position = current_PSM_position.copy()
339
+
340
+ new_PSM_position[0, 3] =x
341
+ new_PSM_position[1, 3] =y
342
+ new_PSM_position = self.psm1.pose_world2rcm(new_PSM_position)
343
+ self.psm1.move(new_PSM_position)
344
+ # pivot = [x, y, 0.05 * self.SCALING]
345
+ # p.changeConstraint(self._cid, pivot, maxForce=50)
346
+ p.stepSimulation()
347
+
348
+ def get_oracle_action(self, obs) -> np.ndarray:
349
+ """
350
+ Define a human expert strategy
351
+ """
352
+ centroid = obs['observation'][-3: -1]
353
+ cam_u = centroid[0] * RENDER_WIDTH
354
+ cam_v = centroid[1] * RENDER_HEIGHT
355
+ self.ecm.homo_delta = np.array([cam_u, cam_v]).reshape((2, 1))
356
+ if np.linalg.norm(self.ecm.homo_delta) < 8 and np.linalg.norm(self.ecm.wz) < 0.1:
357
+ # e difference is small enough
358
+ action = np.zeros(3)
359
+ else:
360
+ # print("Pixel error: {:.4f}".format(np.linalg.norm(self.ecm.homo_delta)))
361
+ # controller
362
+ fov = np.deg2rad(FoV)
363
+ fx = (RENDER_WIDTH / 2) / np.tan(fov / 2)
364
+ fy = (RENDER_HEIGHT / 2) / np.tan(fov / 2) # TODO: not sure
365
+ cz = 1.0
366
+ Lmatrix = np.array([[-fx / cz, 0., cam_u / cz],
367
+ [0., -fy / cz, cam_v / cz]])
368
+ action = 0.5 * np.dot(np.linalg.pinv(Lmatrix), self.ecm.homo_delta).flatten() / 0.01
369
+ if np.abs(action).max() > 1:
370
+ action /= np.abs(action).max()
371
+ return action
372
+
373
+
374
+ if __name__ == "__main__":
375
+ env = ActiveTrack(render_mode='human') # create one process and corresponding env
376
+
377
+ env.test(horizon=200)
378
+ env.close()
379
+ time.sleep(2)
ecm_env.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pybullet as p
3
+ from surrol.gym.surrol_env import SurRoLEnv, RENDER_HEIGHT
4
+ from surrol.robots.ecm import Ecm
5
+ from surrol.utils.pybullet_utils import (
6
+ get_link_pose,
7
+ reset_camera
8
+ )
9
+ from surrol.utils.robotics import get_pose_2d_from_matrix
10
+
11
+
12
+ def goal_distance(goal_a, goal_b):
13
+ assert goal_a.shape == goal_b.shape
14
+ return np.linalg.norm(goal_a - goal_b, axis=-1)
15
+
16
+
17
+ class EcmEnv(SurRoLEnv):
18
+ """
19
+ Single arm env using ECM (active_track is not a GoalEnv)
20
+ Refer to Gym fetch
21
+ https://github.com/openai/gym/blob/master/gym/envs/robotics/fetch_env.py
22
+ ravens
23
+ https://github.com/google-research/ravens/blob/master/ravens/environments/environment.py
24
+ """
25
+ ACTION_SIZE = 3 # (dx, dy, dz) or cVc or droll (1)
26
+ ACTION_MODE = 'cVc'
27
+ DISTANCE_THRESHOLD = 0.005
28
+ POSE_ECM = ((0.15, 0.0, 0.7524), (0, 30 / 180 * np.pi, 0))
29
+ QPOS_ECM = (0, 0.6, 0.04, 0)
30
+ WORKSPACE_LIMITS = ((0.45, 0.55), (-0.05, 0.05), (0.60, 0.70))
31
+ SCALING = 1.
32
+
33
+ def __init__(self, render_mode: str = None, cid = -1):
34
+ # workspace
35
+ self.workspace_limits = np.asarray(self.WORKSPACE_LIMITS)
36
+ self.workspace_limits *= self.SCALING
37
+
38
+ # camera
39
+ self.use_camera = False
40
+
41
+ # has_object
42
+ self.has_object = False
43
+ self.obj_id = None
44
+
45
+ super(EcmEnv, self).__init__(render_mode, cid)
46
+
47
+ # change duration
48
+ self._duration = 0.1
49
+
50
+ # distance_threshold
51
+ self.distance_threshold = self.DISTANCE_THRESHOLD * self.SCALING
52
+
53
+ # render related setting
54
+ self._view_matrix = p.computeViewMatrixFromYawPitchRoll(
55
+ cameraTargetPosition=(0.27 * self.SCALING, -0.20 * self.SCALING, 0.55 * self.SCALING),
56
+ distance=1.80 * self.SCALING,
57
+ yaw=150,
58
+ pitch=-30,
59
+ roll=0,
60
+ upAxisIndex=2
61
+ )
62
+
63
+ def render(self, mode='rgb_array'):
64
+ # TODO: check how to disable specular color when using EGL renderer
65
+ if mode != "rgb_array":
66
+ return np.array([])
67
+ rgb_array = super().render(mode)
68
+ if self.use_camera:
69
+ rgb_cam, _ = self.ecm.render_image(RENDER_HEIGHT, RENDER_HEIGHT)
70
+ rgb_array = np.concatenate((rgb_array, rgb_cam), axis=1)
71
+ return rgb_array
72
+
73
+ def compute_reward(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info):
74
+ """ Sparse reward. """
75
+ # d = goal_distance(achieved_goal, desired_goal)
76
+ # return - (d > self.distance_threshold).astype(np.float32)
77
+ return self._is_success(achieved_goal, desired_goal).astype(np.float32) - 1.
78
+
79
+ def _env_setup(self):
80
+ assert self.ACTION_MODE in ('cVc', 'dmove', 'droll')
81
+ # camera
82
+ if self._render_mode == 'human':
83
+ reset_camera(yaw=150.0, pitch=-30.0, dist=1.50 * self.SCALING,
84
+ target=(0.27 * self.SCALING, -0.20 * self.SCALING, 0.55 * self.SCALING))
85
+
86
+ # robot
87
+ self.ecm = Ecm(self.POSE_ECM[0], p.getQuaternionFromEuler(self.POSE_ECM[1]),
88
+ scaling=self.SCALING)
89
+
90
+ pass # need to implement based on every task
91
+ # self.obj_ids
92
+
93
+ def _get_robot_state(self) -> np.ndarray:
94
+ # TODO
95
+ # robot state: eef pose in the ECM coordinate
96
+ pose_rcm = get_pose_2d_from_matrix(self.ecm.get_current_position())
97
+ return np.concatenate([
98
+ np.array(pose_rcm[0]), np.array(p.getEulerFromQuaternion(pose_rcm[1]))
99
+ ])
100
+
101
+ def _get_obs(self) -> dict:
102
+ robot_state = self._get_robot_state()
103
+ # may need to modify
104
+ if self.has_object:
105
+ pos, _ = get_link_pose(self.obj_id, -1)
106
+ object_pos = np.array(pos)
107
+ else:
108
+ object_pos = np.zeros(0)
109
+
110
+ if self.has_object:
111
+ achieved_goal = object_pos.copy()
112
+ else:
113
+ achieved_goal = np.array(get_link_pose(self.ecm.body, self.ecm.EEF_LINK_INDEX)[0]) # eef position
114
+
115
+ observation = np.concatenate([
116
+ robot_state, object_pos.ravel(), # achieved_goal.copy(),
117
+ ])
118
+ obs = {
119
+ 'observation': observation.copy(),
120
+ 'achieved_goal': achieved_goal.copy(),
121
+ 'desired_goal': self.goal.copy()
122
+ }
123
+ return obs
124
+
125
+ def _set_action(self, action: np.ndarray):
126
+ """
127
+ delta_position (3) and delta_theta (1); in world coordinate
128
+ """
129
+ # print('action: ', action)
130
+ assert len(action) == self.ACTION_SIZE
131
+ action = action.copy() # ensure that we don't change the action outside of this scope
132
+ if self.ACTION_MODE == 'cVc':
133
+ # hyper-parameters are sensitive; need to tune
134
+ # if np.linalg.norm(action) > 1:
135
+ # action /= np.linalg.norm(action)
136
+ action *= 0.01 * self.SCALING # velocity (HeadPitch, HeadYaw), limit maximum change in velocity
137
+ dq = 0.05 * self.ecm.cVc_to_dq(action) # scaled
138
+ result = self.ecm.dmove_joint(dq)
139
+ if result is False:
140
+ return False
141
+ else:
142
+ return True
143
+ elif self.ACTION_MODE == 'dmove':
144
+ # Incremental motion in cartesian space in the base frame
145
+ action *= 0.01 * self.SCALING
146
+ pose_rcm = self.ecm.get_current_position()
147
+ pose_rcm[:3, 3] += action
148
+ pos, _ = self.ecm.pose_rcm2world(pose_rcm, 'tuple')
149
+ joint_positions = self.ecm.inverse_kinematics((pos, None), self.ecm.EEF_LINK_INDEX) # do not consider orn
150
+ self.ecm.move_joint(joint_positions[:self.ecm.DoF])
151
+ elif self.ACTION_MODE == 'droll':
152
+ # change the roll angle
153
+ action *= np.deg2rad(3)
154
+ self.ecm.dmove_joint_one(action[0], 3)
155
+ else:
156
+ raise NotImplementedError
157
+
158
+ def _is_success(self, achieved_goal, desired_goal):
159
+ """ Indicates whether or not the achieved goal successfully achieved the desired goal.
160
+ """
161
+ d = goal_distance(achieved_goal, desired_goal)
162
+ return (d < self.distance_threshold).astype(np.float32)
163
+
164
+ def _sample_goal(self) -> np.ndarray:
165
+ """ Samples a new goal and returns it.
166
+ """
167
+ raise NotImplementedError
168
+
169
+ @property
170
+ def action_size(self):
171
+ return self.ACTION_SIZE
172
+
173
+ def get_oracle_action(self, obs) -> np.ndarray:
174
+ """
175
+ Define a scripted oracle strategy
176
+ """
177
+ raise NotImplementedError
178
+
179
+