This commit is contained in:
wxchen
2023-05-09 16:56:57 +08:00
5 changed files with 132 additions and 149 deletions

1
.gitignore vendored
View File

@@ -1,5 +1,6 @@
.catkin_tools
.vscode
.vscode/
.vscode/*
/build
/devel

View File

@@ -1,20 +0,0 @@
{
"configurations": [
{
"browse": {
"databaseFilename": "${default}",
"limitSymbolsToIncludedHeaders": false
},
"includePath": [
"/opt/ros/melodic/include/**",
"/usr/include/**"
],
"name": "ROS",
"intelliSenseMode": "gcc-x64",
"compilerPath": "/usr/bin/gcc",
"cStandard": "gnu11",
"cppStandard": "c++14"
}
],
"version": 4
}

View File

@@ -1,8 +0,0 @@
{
"python.autoComplete.extraPaths": [
"/opt/ros/melodic/lib/python2.7/dist-packages"
],
"python.analysis.extraPaths": [
"/opt/ros/melodic/lib/python2.7/dist-packages"
]
}

View File

@@ -91,3 +91,58 @@ if __name__ == "__main__":
rospy.spin()
# backup
def calculate_image_edge_plane_normal(depth_roi):
# Get the shape of the depth_roi
height, width = depth_roi.shape
# Get the edges of the ROI
left_edge = [(0, y) for y in range(height)]
right_edge = [(width-1, y) for y in range(height)]
top_edge = [(x, 0) for x in range(width)]
bottom_edge = [(x, height-1) for x in range(width)]
edges = left_edge + right_edge + top_edge + bottom_edge
# Create a 2D grid of X and Y coordinates
X, Y = np.meshgrid(np.arange(width), np.arange(height))
# Reshape the X, Y, and depth_roi arrays into one-dimensional arrays
X = X.reshape(-1)
Y = Y.reshape(-1)
Z = depth_roi.reshape(-1)
# Stack the X, Y, and depth_roi arrays vertically to create a 3D array of points in the form of [X, Y, Z]
points = np.vstack([X, Y, Z]).T
# Compute the mean depth value of the edges
edge_depths = []
for edge_point in edges:
edge_depths.append(depth_roi[edge_point[1], edge_point[0]])
mean_depth = np.mean(edge_depths)
# Create a mask to extract the points on the edges
mask = np.zeros_like(depth_roi, dtype=np.uint8)
for edge_point in edges:
mask[edge_point[1], edge_point[0]] = 1
masked_depth_roi = depth_roi * mask
# Extract the 3D coordinates of the points on the edges
edge_points = []
for edge_point in edges:
edge_points.append([edge_point[0], edge_point[1], masked_depth_roi[edge_point[1], edge_point[0]]])
# Convert the list of edge points to a numpy array
edge_points = np.array(edge_points)
# Shift the edge points so that the mean depth value is at the origin
edge_points = edge_points - np.array([width/2, height/2, mean_depth])
# Compute the singular value decomposition (SVD) of the edge points
U, S, V = np.linalg.svd(edge_points)
# Extract the normal vector of the plane that best fits the edge points from the right-singular vector corresponding to the smallest singular value
normal = V[2]
return normal

View File

@@ -6,11 +6,13 @@ from matplotlib import pyplot as plt
import rospy
import tf2_ros
import tf
from sensor_msgs.msg import Image , CameraInfo
from sensor_msgs.msg import Image , CameraInfo, PointCloud2
from geometry_msgs.msg import PoseStamped, TransformStamped, Quaternion
import message_filters
from cv_bridge import CvBridge, CvBridgeError
import rospkg
# import open3d as o3d
# from open3d_ros_helper import open3d_ros_helper as orh
import os
import sys
@@ -20,58 +22,27 @@ from detection_msgs.msg import BoundingBox, BoundingBoxes
bridge = CvBridge()
annulus_width = 10
def calculate_image_edge_plane_normal(depth_roi):
# Get the shape of the depth_roi
height, width = depth_roi.shape
# 2d to 3d
def computer_2d_3d(x, y, depth_roi, color_intrinsics):
pz = np.mean(depth_roi) * 0.001
px = (x - color_intrinsics[2]) * pz / color_intrinsics[0]
py = (y - color_intrinsics[5]) * pz / color_intrinsics[4]
return px, py, pz
# Get the edges of the ROI
left_edge = [(0, y) for y in range(height)]
right_edge = [(width-1, y) for y in range(height)]
top_edge = [(x, 0) for x in range(width)]
bottom_edge = [(x, height-1) for x in range(width)]
edges = left_edge + right_edge + top_edge + bottom_edge
# Create a 2D grid of X and Y coordinates
X, Y = np.meshgrid(np.arange(width), np.arange(height))
# Reshape the X, Y, and depth_roi arrays into one-dimensional arrays
X = X.reshape(-1)
Y = Y.reshape(-1)
Z = depth_roi.reshape(-1)
# Stack the X, Y, and depth_roi arrays vertically to create a 3D array of points in the form of [X, Y, Z]
points = np.vstack([X, Y, Z]).T
# Compute the mean depth value of the edges
edge_depths = []
for edge_point in edges:
edge_depths.append(depth_roi[edge_point[1], edge_point[0]])
mean_depth = np.mean(edge_depths)
# Create a mask to extract the points on the edges
mask = np.zeros_like(depth_roi, dtype=np.uint8)
for edge_point in edges:
mask[edge_point[1], edge_point[0]] = 1
masked_depth_roi = depth_roi * mask
# Extract the 3D coordinates of the points on the edges
edge_points = []
for edge_point in edges:
edge_points.append([edge_point[0], edge_point[1], masked_depth_roi[edge_point[1], edge_point[0]]])
# Convert the list of edge points to a numpy array
edge_points = np.array(edge_points)
# Shift the edge points so that the mean depth value is at the origin
edge_points = edge_points - np.array([width/2, height/2, mean_depth])
# Compute the singular value decomposition (SVD) of the edge points
U, S, V = np.linalg.svd(edge_points)
# Extract the normal vector of the plane that best fits the edge points from the right-singular vector corresponding to the smallest singular value
normal = V[2]
return normal
def compute_normal_vector(p1, p2, p3, p4):
# Compute two vectors in the plane
v1 = np.array(p2) - np.array(p1)
v2 = np.array(p3) - np.array(p1)
# Compute the cross product of the two vectors to get the normal vector
n = np.cross(v1, v2)
# Compute the fourth point in the plane
p4 = np.array(p4)
# Check if the fourth point is on the same side of the plane as the origin
if np.dot(n, p4 - np.array(p1)) < 0:
n = -n
# Normalize the normal vector to obtain a unit vector
n = n / np.linalg.norm(n)
return n
def filter_quaternion(quat, quat_prev, alpha):
if quat_prev is None:
@@ -84,13 +55,14 @@ def filter_quaternion(quat, quat_prev, alpha):
quat_filtered = quat_filtered / np.linalg.norm(quat_filtered)
return quat_filtered
def box_callback(box, depth, color_info):
try:
color_intrinsics = color_info.K
depth_image = bridge.imgmsg_to_cv2(depth, '16UC1')
# pc = orh.rospc_to_o3dpc(pc_msg)
if box is not None and len(box.bounding_boxes) > 0:
# get the center of screw
if box.bounding_boxes[0] is not None:
boundingBox = box.bounding_boxes[0]
screw_x = (boundingBox.xmax + boundingBox.xmin) / 2
screw_y = (boundingBox.ymax + boundingBox.ymin) / 2
@@ -99,47 +71,31 @@ def box_callback(box, depth, color_info):
depth_array = np.array(depth_image, dtype=np.float32)
depth_roi = depth_array[boundingBox.ymin:boundingBox.ymax, boundingBox.xmin:boundingBox.xmax]
z = np.mean(depth_roi) * 0.001
x = (screw_x - color_intrinsics[2]) * z / color_intrinsics[0]
y = (screw_y - color_intrinsics[5]) * z / color_intrinsics[4]
rospy.loginfo("screw pose: x: %f, y: %f, z: %f", x, y, z)
x, y, z = computer_2d_3d(screw_x, screw_y, depth_roi, color_intrinsics)
# rospy.loginfo("screw pose: x: %f, y: %f, z: %f", x, y, z)
# calculate normal direction of screw area
p1x, p1y, p1z = computer_2d_3d(boundingBox.xmin-annulus_width, boundingBox.ymin-annulus_width, depth_roi, color_intrinsics)
p2x, p2y, p2z = computer_2d_3d(boundingBox.xmax+annulus_width, boundingBox.ymin-annulus_width, depth_roi, color_intrinsics)
p3x, p3y, p3z = computer_2d_3d(boundingBox.xmax+annulus_width, boundingBox.ymax+annulus_width, depth_roi, color_intrinsics)
p4x, p4y, p4z = computer_2d_3d(boundingBox.xmin-annulus_width, boundingBox.ymax+annulus_width, depth_roi, color_intrinsics)
p1 = [p1x, p1y, p1z]
p2 = [p2x, p2y, p2z]
p3 = [p3x, p3y, p3z]
p4 = [p4x, p4y, p4z]
normal = compute_normal_vector(p1, p2, p3, p4)
annulus_roi = depth_array[boundingBox.ymin-annulus_width:boundingBox.ymax+annulus_width, boundingBox.xmin-annulus_width:boundingBox.xmax+annulus_width]
normal = calculate_image_edge_plane_normal(annulus_roi)
# annulus_roi = depth_array[boundingBox.ymin-annulus_width:boundingBox.ymax+annulus_width, boundingBox.xmin-annulus_width:boundingBox.xmax+annulus_width]
# normal = calculate_image_edge_plane_normal(annulus_roi)
# print(normal)
# X,Y = np.meshgrid(np.arange(annulus_roi.shape[1]), np.arange(annulus_roi.shape[0]))
# X = X.reshape(-1)
# Y = Y.reshape(-1)
# Z = annulus_roi.reshape(-1)
# points = np.vstack([X, Y, Z]).T
# center = np.mean(points, axis=0)
# points = points - center
# U, S, V = np.linalg.svd(points)
# normal = V[2]
# publish screw pose
# screw_pose = PoseStamped()
# screw_pose.header.stamp = rospy.Time.now()
# screw_pose.header.frame_id = "camera_color_optical_frame"
# screw_pose.pose.position.x = x
# screw_pose.pose.position.y = y
# screw_pose.pose.position.z = z
# screw_pose.pose.orientation.x = 0
# screw_pose.pose.orientation.y = 0
# screw_pose.pose.orientation.z = 0
# screw_pose.pose.orientation.w = 1
# pose_pub.publish(screw_pose)
# normal vector to quaternion
screw_quat = tf.transformations.quaternion_from_euler(0, 0, 0)
screw_quat[0] = normal[0]
screw_quat[1] = normal[1]
screw_quat[2] = normal[2]
screw_quat[3] = 0
# quaternion to euler
screw_euler = tf.transformations.euler_from_quaternion(screw_quat)
screw_quat = tf.transformations.quaternion_from_euler(screw_euler[0], screw_euler[1], 0)
@@ -165,11 +121,9 @@ def box_callback(box, depth, color_info):
screw_tf.transform.rotation.z = screw_quat_filtered[2]
screw_tf.transform.rotation.w = screw_quat_filtered[3]
tf_broadcaster.sendTransform(screw_tf)
except Exception as e:
print(e)
@@ -185,6 +139,7 @@ if __name__ == "__main__":
box_sub = message_filters.Subscriber("/yolov5/detections", BoundingBoxes)
depth_sub = message_filters.Subscriber("/camera/aligned_depth_to_color/image_raw", Image)
color_info = message_filters.Subscriber("/camera/color/camera_info", CameraInfo)
# pc_sub = message_filters.Subscriber("/camera/depth/color/points", PointCloud2)
tf_broadcaster = tf2_ros.TransformBroadcaster()