Update attitude calculation method

modified:   src/maintain/scripts/test copy.py
	modified:   src/maintain/scripts/test.py
This commit is contained in:
wxchen
2023-05-09 16:32:38 +08:00
parent 5ee37caf7c
commit 7045f42ceb
2 changed files with 96 additions and 75 deletions

View File

@@ -90,4 +90,59 @@ if __name__ == "__main__":
ts.registerCallback(callback) ts.registerCallback(callback)
rospy.spin() rospy.spin()
# backup
def calculate_image_edge_plane_normal(depth_roi):
# Get the shape of the depth_roi
height, width = depth_roi.shape
# Get the edges of the ROI
left_edge = [(0, y) for y in range(height)]
right_edge = [(width-1, y) for y in range(height)]
top_edge = [(x, 0) for x in range(width)]
bottom_edge = [(x, height-1) for x in range(width)]
edges = left_edge + right_edge + top_edge + bottom_edge
# Create a 2D grid of X and Y coordinates
X, Y = np.meshgrid(np.arange(width), np.arange(height))
# Reshape the X, Y, and depth_roi arrays into one-dimensional arrays
X = X.reshape(-1)
Y = Y.reshape(-1)
Z = depth_roi.reshape(-1)
# Stack the X, Y, and depth_roi arrays vertically to create a 3D array of points in the form of [X, Y, Z]
points = np.vstack([X, Y, Z]).T
# Compute the mean depth value of the edges
edge_depths = []
for edge_point in edges:
edge_depths.append(depth_roi[edge_point[1], edge_point[0]])
mean_depth = np.mean(edge_depths)
# Create a mask to extract the points on the edges
mask = np.zeros_like(depth_roi, dtype=np.uint8)
for edge_point in edges:
mask[edge_point[1], edge_point[0]] = 1
masked_depth_roi = depth_roi * mask
# Extract the 3D coordinates of the points on the edges
edge_points = []
for edge_point in edges:
edge_points.append([edge_point[0], edge_point[1], masked_depth_roi[edge_point[1], edge_point[0]]])
# Convert the list of edge points to a numpy array
edge_points = np.array(edge_points)
# Shift the edge points so that the mean depth value is at the origin
edge_points = edge_points - np.array([width/2, height/2, mean_depth])
# Compute the singular value decomposition (SVD) of the edge points
U, S, V = np.linalg.svd(edge_points)
# Extract the normal vector of the plane that best fits the edge points from the right-singular vector corresponding to the smallest singular value
normal = V[2]
return normal

View File

@@ -11,8 +11,8 @@ from geometry_msgs.msg import PoseStamped, TransformStamped, Quaternion
import message_filters import message_filters
from cv_bridge import CvBridge, CvBridgeError from cv_bridge import CvBridge, CvBridgeError
import rospkg import rospkg
import open3d as o3d # import open3d as o3d
from open3d_ros_helper import open3d_ros_helper as orh # from open3d_ros_helper import open3d_ros_helper as orh
import os import os
import sys import sys
@@ -22,58 +22,27 @@ from detection_msgs.msg import BoundingBox, BoundingBoxes
bridge = CvBridge() bridge = CvBridge()
annulus_width = 10 annulus_width = 10
def calculate_image_edge_plane_normal(depth_roi): # 2d to 3d
# Get the shape of the depth_roi def computer_2d_3d(x, y, depth_roi, color_intrinsics):
height, width = depth_roi.shape pz = np.mean(depth_roi) * 0.001
px = (x - color_intrinsics[2]) * pz / color_intrinsics[0]
# Get the edges of the ROI py = (y - color_intrinsics[5]) * pz / color_intrinsics[4]
left_edge = [(0, y) for y in range(height)] return px, py, pz
right_edge = [(width-1, y) for y in range(height)]
top_edge = [(x, 0) for x in range(width)]
bottom_edge = [(x, height-1) for x in range(width)]
edges = left_edge + right_edge + top_edge + bottom_edge
# Create a 2D grid of X and Y coordinates def compute_normal_vector(p1, p2, p3, p4):
X, Y = np.meshgrid(np.arange(width), np.arange(height)) # Compute two vectors in the plane
v1 = np.array(p2) - np.array(p1)
# Reshape the X, Y, and depth_roi arrays into one-dimensional arrays v2 = np.array(p3) - np.array(p1)
X = X.reshape(-1) # Compute the cross product of the two vectors to get the normal vector
Y = Y.reshape(-1) n = np.cross(v1, v2)
Z = depth_roi.reshape(-1) # Compute the fourth point in the plane
p4 = np.array(p4)
# Stack the X, Y, and depth_roi arrays vertically to create a 3D array of points in the form of [X, Y, Z] # Check if the fourth point is on the same side of the plane as the origin
points = np.vstack([X, Y, Z]).T if np.dot(n, p4 - np.array(p1)) < 0:
n = -n
# Compute the mean depth value of the edges # Normalize the normal vector to obtain a unit vector
edge_depths = [] n = n / np.linalg.norm(n)
for edge_point in edges: return n
edge_depths.append(depth_roi[edge_point[1], edge_point[0]])
mean_depth = np.mean(edge_depths)
# Create a mask to extract the points on the edges
mask = np.zeros_like(depth_roi, dtype=np.uint8)
for edge_point in edges:
mask[edge_point[1], edge_point[0]] = 1
masked_depth_roi = depth_roi * mask
# Extract the 3D coordinates of the points on the edges
edge_points = []
for edge_point in edges:
edge_points.append([edge_point[0], edge_point[1], masked_depth_roi[edge_point[1], edge_point[0]]])
# Convert the list of edge points to a numpy array
edge_points = np.array(edge_points)
# Shift the edge points so that the mean depth value is at the origin
edge_points = edge_points - np.array([width/2, height/2, mean_depth])
# Compute the singular value decomposition (SVD) of the edge points
U, S, V = np.linalg.svd(edge_points)
# Extract the normal vector of the plane that best fits the edge points from the right-singular vector corresponding to the smallest singular value
normal = V[2]
return normal
def filter_quaternion(quat, quat_prev, alpha): def filter_quaternion(quat, quat_prev, alpha):
if quat_prev is None: if quat_prev is None:
@@ -84,45 +53,41 @@ def filter_quaternion(quat, quat_prev, alpha):
quat_filtered[i] = alpha * quat[i] + (1-alpha) * quat_prev[i] quat_filtered[i] = alpha * quat[i] + (1-alpha) * quat_prev[i]
# Normalize the quaternion # Normalize the quaternion
quat_filtered = quat_filtered / np.linalg.norm(quat_filtered) quat_filtered = quat_filtered / np.linalg.norm(quat_filtered)
return quat_filtered return quat_filtered
def box_callback(box, depth, color_info): def box_callback(box, depth, color_info):
try: try:
color_intrinsics = color_info.K color_intrinsics = color_info.K
depth_image = bridge.imgmsg_to_cv2(depth, '16UC1') depth_image = bridge.imgmsg_to_cv2(depth, '16UC1')
# pc = orh.rospc_to_o3dpc(pc_msg)
# get the center of screw # get the center of screw
boundingBox = box.bounding_boxes[0] boundingBox = box.bounding_boxes[0]
screw_x = (boundingBox.xmax + boundingBox.xmin) / 2 screw_x = (boundingBox.xmax + boundingBox.xmin) / 2
screw_y = (boundingBox.ymax + boundingBox.ymin) / 2 screw_y = (boundingBox.ymax + boundingBox.ymin) / 2
# print(screw_x,screw_y) # print(screw_x,screw_y)
depth_array = np.array(depth_image, dtype=np.float32) depth_array = np.array(depth_image, dtype=np.float32)
depth_roi = depth_array[boundingBox.ymin:boundingBox.ymax, boundingBox.xmin:boundingBox.xmax] depth_roi = depth_array[boundingBox.ymin:boundingBox.ymax, boundingBox.xmin:boundingBox.xmax]
z = np.mean(depth_roi) * 0.001 x, y, z = computer_2d_3d(screw_x, screw_y, depth_roi, color_intrinsics)
x = (screw_x - color_intrinsics[2]) * z / color_intrinsics[0]
y = (screw_y - color_intrinsics[5]) * z / color_intrinsics[4]
# rospy.loginfo("screw pose: x: %f, y: %f, z: %f", x, y, z) # rospy.loginfo("screw pose: x: %f, y: %f, z: %f", x, y, z)
# calculate normal direction of screw area # calculate normal direction of screw area
p1x, p1y, p1z = computer_2d_3d(boundingBox.xmin-annulus_width, boundingBox.ymin-annulus_width, depth_roi, color_intrinsics)
annulus_roi = depth_array[boundingBox.ymin-annulus_width:boundingBox.ymax+annulus_width, boundingBox.xmin-annulus_width:boundingBox.xmax+annulus_width] p2x, p2y, p2z = computer_2d_3d(boundingBox.xmax+annulus_width, boundingBox.ymin-annulus_width, depth_roi, color_intrinsics)
normal = calculate_image_edge_plane_normal(annulus_roi) p3x, p3y, p3z = computer_2d_3d(boundingBox.xmax+annulus_width, boundingBox.ymax+annulus_width, depth_roi, color_intrinsics)
print(normal) p4x, p4y, p4z = computer_2d_3d(boundingBox.xmin-annulus_width, boundingBox.ymax+annulus_width, depth_roi, color_intrinsics)
p1 = [p1x, p1y, p1z]
p2 = [p2x, p2y, p2z]
p3 = [p3x, p3y, p3z]
p4 = [p4x, p4y, p4z]
normal = compute_normal_vector(p1, p2, p3, p4)
# publish screw pose # annulus_roi = depth_array[boundingBox.ymin-annulus_width:boundingBox.ymax+annulus_width, boundingBox.xmin-annulus_width:boundingBox.xmax+annulus_width]
# screw_pose = PoseStamped() # normal = calculate_image_edge_plane_normal(annulus_roi)
# screw_pose.header.stamp = rospy.Time.now() # print(normal)
# screw_pose.header.frame_id = "camera_color_optical_frame"
# screw_pose.pose.position.x = x
# screw_pose.pose.position.y = y
# screw_pose.pose.position.z = z
# screw_pose.pose.orientation.x = 0
# screw_pose.pose.orientation.y = 0
# screw_pose.pose.orientation.z = 0
# screw_pose.pose.orientation.w = 1
# pose_pub.publish(screw_pose)
# normal vector to quaternion # normal vector to quaternion
screw_quat = tf.transformations.quaternion_from_euler(0, 0, 0) screw_quat = tf.transformations.quaternion_from_euler(0, 0, 0)
@@ -174,6 +139,7 @@ if __name__ == "__main__":
box_sub = message_filters.Subscriber("/yolov5/detections", BoundingBoxes) box_sub = message_filters.Subscriber("/yolov5/detections", BoundingBoxes)
depth_sub = message_filters.Subscriber("/camera/aligned_depth_to_color/image_raw", Image) depth_sub = message_filters.Subscriber("/camera/aligned_depth_to_color/image_raw", Image)
color_info = message_filters.Subscriber("/camera/color/camera_info", CameraInfo) color_info = message_filters.Subscriber("/camera/color/camera_info", CameraInfo)
# pc_sub = message_filters.Subscriber("/camera/depth/color/points", PointCloud2)
tf_broadcaster = tf2_ros.TransformBroadcaster() tf_broadcaster = tf2_ros.TransformBroadcaster()