Fit a plane to the points,

Calculate the rotation between the plane normal and the Z axis
	new file:   src/maintain/scripts/maintain.py
	deleted:    src/maintain/scripts/test copy.py
	modified:   src/maintain/scripts/test.py
	modified:   src/yolov5_ros/launch/yolov5.launch
This commit is contained in:
wxchen
2023-05-10 16:25:33 +08:00
parent d3956cbec1
commit 68e8ba7901
4 changed files with 218 additions and 170 deletions

136
src/maintain/scripts/maintain.py Executable file
View File

@@ -0,0 +1,136 @@
#! /home/wxchen/.conda/envs/gsmini/bin/python
import rospy
import numpy as np
import open3d as o3d
from sensor_msgs.msg import Image , CameraInfo, PointCloud2
from detection_msgs.msg import BoundingBox, BoundingBoxes
import sensor_msgs.point_cloud2 as pc2
import cv_bridge
from cv_bridge import CvBridge, CvBridgeError
import cv2
import tf2_ros
import tf
from geometry_msgs.msg import PoseStamped, TransformStamped
bridge = CvBridge()
color_intrinsics = None
cloud = None
box = None
d_width = 100
def camera_info_callback(msg):
global color_intrinsics
color_intrinsics = msg
def depth_image_callback(msg):
global depth_image
depth_image = bridge.imgmsg_to_cv2(msg, '16UC1')
def point_cloud_callback(msg):
global cloud
cloud = pc2.read_points(msg, field_names=("x", "y", "z"), skip_nans=True)
def bounding_boxes_callback(msg):
global box
for bounding_box in msg.bounding_boxes:
# Assuming there's only one box, you can add a condition to filter the boxes if needed
box = [bounding_box.xmin - d_width, bounding_box.ymin - d_width, bounding_box.xmax + d_width, bounding_box.ymax + d_width]
def main():
rospy.init_node("plane_fitting_node")
rospy.Subscriber("/camera/color/camera_info", CameraInfo, camera_info_callback)
rospy.Subscriber("/camera/aligned_depth_to_color/image_raw", Image, depth_image_callback)
rospy.Subscriber("/camera/depth/color/points", PointCloud2, point_cloud_callback)
rospy.Subscriber("/yolov5/detections", BoundingBoxes, bounding_boxes_callback)
tf_broadcaster = tf2_ros.TransformBroadcaster()
plane_pub = rospy.Publisher("/plane_pose", PoseStamped, queue_size=10)
rate = rospy.Rate(10) # 10 Hz
while not rospy.is_shutdown():
if color_intrinsics is not None and cloud is not None and box is not None:
# Get the 3D points corresponding to the box
fx, fy = color_intrinsics.K[0], color_intrinsics.K[4]
cx, cy = color_intrinsics.K[2], color_intrinsics.K[5]
points = []
center_x = (box[0] + box[2]) / 2
center_y = (box[1] + box[3]) / 2
depth_array = np.array(depth_image, dtype=np.float32)
pz = depth_array[int(center_y), int(center_x)] / 1000.0
px = (center_x - color_intrinsics.K[2]) * pz / color_intrinsics.K[0]
py = (center_y - color_intrinsics.K[5]) * pz / color_intrinsics.K[4]
rospy.loginfo("Center point: {}".format([px, py, pz]))
screw_point = None
for x, y, z in cloud:
if z != 0:
u = int(np.round((x * fx) / z + cx))
v = int(np.round((y * fy) / z + cy))
if u == center_x and v == center_y:
screw_point = [x, y, z]
if u >= box[0] and u <= box[2] and v >= box[1] and v <= box[3]:
points.append([x, y, z])
points = np.array(points)
if px != 0 and py != 0 and pz != 0:
# rospy.loginfo("Screw point: {}".format(screw_point))
# Fit a plane to the points
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
plane_model, inliers = pcd.segment_plane(distance_threshold=0.02, ransac_n=3, num_iterations=100)
[a, b, c, d] = plane_model
# Calculate the rotation between the plane normal and the Z axis
normal = np.array([a, b, c])
z_axis = np.array([0, 0, 1])
cos_theta = np.dot(normal, z_axis) / (np.linalg.norm(normal) * np.linalg.norm(z_axis))
theta = np.arccos(cos_theta)
rotation_axis = np.cross(z_axis, normal)
rotation_axis = rotation_axis / np.linalg.norm(rotation_axis)
quaternion = np.hstack((rotation_axis * np.sin(theta / 2), [np.cos(theta / 2)]))
# Publish the plane pose
# plane_pose = PoseStamped()
# plane_pose.header.stamp = rospy.Time.now()
# plane_pose.header.frame_id = "camera_color_optical_frame"
# plane_pose.pose.position.x = screw_point[0]
# plane_pose.pose.position.y = screw_point[1]
# plane_pose.pose.position.z = -d / np.linalg.norm(normal)
# plane_pose.pose.orientation.x = quaternion[0]
# plane_pose.pose.orientation.y = quaternion[1]
# plane_pose.pose.orientation.z = quaternion[2]
# plane_pose.pose.orientation.w = quaternion[3]
# plane_pub.publish(plane_pose)
# publish screw tf
screw_tf = TransformStamped()
screw_tf.header.stamp = rospy.Time.now()
screw_tf.header.frame_id = "camera_color_optical_frame"
screw_tf.child_frame_id = "screw"
screw_tf.transform.translation.x = px
screw_tf.transform.translation.y = py
screw_tf.transform.translation.z = pz
screw_tf.transform.rotation.x = quaternion[0]
screw_tf.transform.rotation.y = quaternion[1]
screw_tf.transform.rotation.z = quaternion[2]
screw_tf.transform.rotation.w = quaternion[3]
tf_broadcaster.sendTransform(screw_tf)
rate.sleep()
if __name__ == "__main__":
try:
main()
except rospy.ROSInterruptException:
pass

View File

@@ -1,148 +0,0 @@
#! /home/wxchen/.conda/envs/gsmini/bin/python
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
import rospy
from sensor_msgs.msg import Image
import message_filters
from cv_bridge import CvBridge, CvBridgeError
import rospkg
MIN_MATCH_COUNT = 10
pkg_path = rospkg.RosPack().get_path('maintain')
rospy.loginfo(pkg_path)
img_template = cv.imread(pkg_path + '/scripts/tt.png',0)
def callback(rgb, depth):
rospy.loginfo("callback")
bridge = CvBridge()
# rospy.loginfo(rgb.header.stamp)
# rospy.loginfo(depth.header.stamp)
try:
rgb_image = bridge.imgmsg_to_cv2(rgb, 'bgr8')
depth_image = bridge.imgmsg_to_cv2(depth, '16UC1')
img_matcher = matcher(rgb_image)
cv.imshow("img_matcher", img_matcher)
cv.waitKey(1000)
except CvBridgeError as e:
print(e)
def matcher(img):
try:
# Initiate SIFT detector
sift = cv.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img_template,None)
kp2, des2 = sift.detectAndCompute(img,None)
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w = img_template.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv.perspectiveTransform(pts,M)
roi = img[np.int32(dst)[0][0][1]:np.int32(dst)[2][0][1], np.int32(dst)[0][0][0]:np.int32(dst)[2][0][0]]
# roi = detect_black(roi)
# img2 = cv.polylines(img2,[np.int32(dst)],True,255,3, cv.LINE_AA)
else:
print( "Not enough matches are found - {}/{}".format(len(good), MIN_MATCH_COUNT) )
return roi
except Exception as e:
print(e)
if __name__ == "__main__":
rospy.init_node("maintain")
rospy.loginfo("maintain task start ......")
rgb_sub = message_filters.Subscriber("/camera/color/image_raw", Image)
depth_sub = message_filters.Subscriber("/camera/aligned_depth_to_color/image_raw", Image)
ts = message_filters.TimeSynchronizer([rgb_sub, depth_sub], 1)
ts.registerCallback(callback)
rospy.spin()
# backup
def calculate_image_edge_plane_normal(depth_roi):
# Get the shape of the depth_roi
height, width = depth_roi.shape
# Get the edges of the ROI
left_edge = [(0, y) for y in range(height)]
right_edge = [(width-1, y) for y in range(height)]
top_edge = [(x, 0) for x in range(width)]
bottom_edge = [(x, height-1) for x in range(width)]
edges = left_edge + right_edge + top_edge + bottom_edge
# Create a 2D grid of X and Y coordinates
X, Y = np.meshgrid(np.arange(width), np.arange(height))
# Reshape the X, Y, and depth_roi arrays into one-dimensional arrays
X = X.reshape(-1)
Y = Y.reshape(-1)
Z = depth_roi.reshape(-1)
# Stack the X, Y, and depth_roi arrays vertically to create a 3D array of points in the form of [X, Y, Z]
points = np.vstack([X, Y, Z]).T
# Compute the mean depth value of the edges
edge_depths = []
for edge_point in edges:
edge_depths.append(depth_roi[edge_point[1], edge_point[0]])
mean_depth = np.mean(edge_depths)
# Create a mask to extract the points on the edges
mask = np.zeros_like(depth_roi, dtype=np.uint8)
for edge_point in edges:
mask[edge_point[1], edge_point[0]] = 1
masked_depth_roi = depth_roi * mask
# Extract the 3D coordinates of the points on the edges
edge_points = []
for edge_point in edges:
edge_points.append([edge_point[0], edge_point[1], masked_depth_roi[edge_point[1], edge_point[0]]])
# Convert the list of edge points to a numpy array
edge_points = np.array(edge_points)
# Shift the edge points so that the mean depth value is at the origin
edge_points = edge_points - np.array([width/2, height/2, mean_depth])
# Compute the singular value decomposition (SVD) of the edge points
U, S, V = np.linalg.svd(edge_points)
# Extract the normal vector of the plane that best fits the edge points from the right-singular vector corresponding to the smallest singular value
normal = V[2]
return normal

View File

@@ -60,29 +60,88 @@ def compute_plane_normal(box, depth, color_intrinsics):
normal += np.cross(v3, v4) normal += np.cross(v3, v4)
normal += np.cross(v4, v1) normal += np.cross(v4, v1)
normal /= np.linalg.norm(normal) normal /= np.linalg.norm(normal)
# 法向量转换为四元数表示 # 计算法向量相对于参考向量的旋转角度和旋转轴
theta = math.acos(normal[2]) ref_vector = np.array([0, 0, 1])
sin_theta_2 = math.sin(theta/2) normal_vector = normal
quaternion = [math.cos(theta/2), sin_theta_2 * normal[0], sin_theta_2 * normal[1], sin_theta_2 * normal[2]] angle = math.acos(np.dot(ref_vector, normal_vector) / (np.linalg.norm(ref_vector) * np.linalg.norm(normal_vector)))
axis = np.cross(ref_vector, normal_vector)
axis = axis / np.linalg.norm(axis)
# 将旋转角度和旋转轴转换为四元数
qx, qy, qz, qw = tf.transformations.quaternion_about_axis(angle, axis)
quaternion = [qx, qy, qz, qw]
return quaternion return quaternion
def compute_normal_vector(p1, p2, p3, p4): def calculate_image_edge_plane_normal(depth_roi):
# Compute two vectors in the plane # Get the shape of the depth_roi
v1 = np.array(p2) - np.array(p1) height, width = depth_roi.shape
v2 = np.array(p3) - np.array(p1)
# Compute the cross product of the two vectors to get the normal vector # Get the edges of the ROI
n = np.cross(v1, v2) left_edge = [(0, y) for y in range(height)]
# Compute the fourth point in the plane right_edge = [(width-1, y) for y in range(height)]
p4 = np.array(p4) top_edge = [(x, 0) for x in range(width)]
# Check if the fourth point is on the same side of the plane as the origin bottom_edge = [(x, height-1) for x in range(width)]
if np.dot(n, p4 - np.array(p1)) < 0: edges = left_edge + right_edge + top_edge + bottom_edge
n = -n
# Normalize the normal vector to obtain a unit vector # Create a 2D grid of X and Y coordinates
n = n / np.linalg.norm(n) X, Y = np.meshgrid(np.arange(width), np.arange(height))
theta = math.acos(n[2])
sin_theta_2 = math.sin(theta/2) # Reshape the X, Y, and depth_roi arrays into one-dimensional arrays
quaternion = [math.cos(theta/2), sin_theta_2 * n[0], sin_theta_2 * n[1], sin_theta_2 * n[2]] X = X.reshape(-1)
return quaternion Y = Y.reshape(-1)
Z = depth_roi.reshape(-1)
# Stack the X, Y, and depth_roi arrays vertically to create a 3D array of points in the form of [X, Y, Z]
points = np.vstack([X, Y, Z]).T
# Compute the mean depth value of the edges
edge_depths = []
for edge_point in edges:
edge_depths.append(depth_roi[edge_point[1], edge_point[0]])
mean_depth = np.mean(edge_depths)
# Create a mask to extract the points on the edges
mask = np.zeros_like(depth_roi, dtype=np.uint8)
for edge_point in edges:
mask[edge_point[1], edge_point[0]] = 1
masked_depth_roi = depth_roi * mask
# Extract the 3D coordinates of the points on the edges
edge_points = []
for edge_point in edges:
edge_points.append([edge_point[0], edge_point[1], masked_depth_roi[edge_point[1], edge_point[0]]])
# Convert the list of edge points to a numpy array
edge_points = np.array(edge_points)
# Shift the edge points so that the mean depth value is at the origin
edge_points = edge_points - np.array([width/2, height/2, mean_depth])
# Compute the singular value decomposition (SVD) of the edge points
U, S, V = np.linalg.svd(edge_points)
# Extract the normal vector of the plane that best fits the edge points from the right-singular vector corresponding to the smallest singular value
normal = V[2]
return normal
# def compute_normal_vector(p1, p2, p3, p4):
# # Compute two vectors in the plane
# v1 = np.array(p2) - np.array(p1)
# v2 = np.array(p3) - np.array(p1)
# # Compute the cross product of the two vectors to get the normal vector
# n = np.cross(v1, v2)
# # Compute the fourth point in the plane
# p4 = np.array(p4)
# # Check if the fourth point is on the same side of the plane as the origin
# if np.dot(n, p4 - np.array(p1)) < 0:
# n = -n
# # Normalize the normal vector to obtain a unit vector
# n = n / np.linalg.norm(n)
# theta = math.acos(n[2])
# sin_theta_2 = math.sin(theta/2)
# quaternion = [math.cos(theta/2), sin_theta_2 * n[0], sin_theta_2 * n[1], sin_theta_2 * n[2]]
# return quaternion
def filter_quaternion(quat, quat_prev, alpha): def filter_quaternion(quat, quat_prev, alpha):
if quat_prev is None: if quat_prev is None:

View File

@@ -50,7 +50,8 @@
<param name="publish_image" value="$(arg publish_image)"/> <param name="publish_image" value="$(arg publish_image)"/>
<param name="output_image_topic" value="$(arg output_image_topic)"/> <param name="output_image_topic" value="$(arg output_image_topic)"/>
</node> </node>
<!-- <include file="$(find camera_launch)/launch/d435.launch"/> --> <include file="$(find realsense2_camera)/launch/my_camera.launch" >
</include>
</launch> </launch>