149 lines
4.8 KiB
Python
Executable File
149 lines
4.8 KiB
Python
Executable File
#! /home/wxchen/.conda/envs/gsmini/bin/python
|
|
|
|
import numpy as np
|
|
import cv2 as cv
|
|
from matplotlib import pyplot as plt
|
|
import rospy
|
|
from sensor_msgs.msg import Image
|
|
import message_filters
|
|
from cv_bridge import CvBridge, CvBridgeError
|
|
import rospkg
|
|
|
|
MIN_MATCH_COUNT = 10
|
|
pkg_path = rospkg.RosPack().get_path('maintain')
|
|
rospy.loginfo(pkg_path)
|
|
img_template = cv.imread(pkg_path + '/scripts/tt.png',0)
|
|
|
|
def callback(rgb, depth):
|
|
rospy.loginfo("callback")
|
|
bridge = CvBridge()
|
|
# rospy.loginfo(rgb.header.stamp)
|
|
# rospy.loginfo(depth.header.stamp)
|
|
try:
|
|
rgb_image = bridge.imgmsg_to_cv2(rgb, 'bgr8')
|
|
depth_image = bridge.imgmsg_to_cv2(depth, '16UC1')
|
|
|
|
img_matcher = matcher(rgb_image)
|
|
cv.imshow("img_matcher", img_matcher)
|
|
cv.waitKey(1000)
|
|
|
|
except CvBridgeError as e:
|
|
print(e)
|
|
|
|
def matcher(img):
|
|
|
|
try:
|
|
# Initiate SIFT detector
|
|
sift = cv.SIFT_create()
|
|
|
|
# find the keypoints and descriptors with SIFT
|
|
kp1, des1 = sift.detectAndCompute(img_template,None)
|
|
kp2, des2 = sift.detectAndCompute(img,None)
|
|
|
|
FLANN_INDEX_KDTREE = 1
|
|
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
|
|
search_params = dict(checks = 50)
|
|
|
|
flann = cv.FlannBasedMatcher(index_params, search_params)
|
|
matches = flann.knnMatch(des1,des2,k=2)
|
|
|
|
# store all the good matches as per Lowe's ratio test.
|
|
good = []
|
|
for m,n in matches:
|
|
if m.distance < 0.7*n.distance:
|
|
good.append(m)
|
|
|
|
if len(good)>MIN_MATCH_COUNT:
|
|
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
|
|
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
|
|
|
|
M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC,5.0)
|
|
matchesMask = mask.ravel().tolist()
|
|
|
|
h,w = img_template.shape
|
|
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
|
|
dst = cv.perspectiveTransform(pts,M)
|
|
|
|
roi = img[np.int32(dst)[0][0][1]:np.int32(dst)[2][0][1], np.int32(dst)[0][0][0]:np.int32(dst)[2][0][0]]
|
|
# roi = detect_black(roi)
|
|
|
|
# img2 = cv.polylines(img2,[np.int32(dst)],True,255,3, cv.LINE_AA)
|
|
else:
|
|
print( "Not enough matches are found - {}/{}".format(len(good), MIN_MATCH_COUNT) )
|
|
|
|
return roi
|
|
except Exception as e:
|
|
print(e)
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
rospy.init_node("maintain")
|
|
rospy.loginfo("maintain task start ......")
|
|
|
|
rgb_sub = message_filters.Subscriber("/camera/color/image_raw", Image)
|
|
depth_sub = message_filters.Subscriber("/camera/aligned_depth_to_color/image_raw", Image)
|
|
|
|
ts = message_filters.TimeSynchronizer([rgb_sub, depth_sub], 1)
|
|
ts.registerCallback(callback)
|
|
|
|
|
|
rospy.spin()
|
|
|
|
|
|
# backup
|
|
def calculate_image_edge_plane_normal(depth_roi):
|
|
# Get the shape of the depth_roi
|
|
height, width = depth_roi.shape
|
|
|
|
# Get the edges of the ROI
|
|
left_edge = [(0, y) for y in range(height)]
|
|
right_edge = [(width-1, y) for y in range(height)]
|
|
top_edge = [(x, 0) for x in range(width)]
|
|
bottom_edge = [(x, height-1) for x in range(width)]
|
|
edges = left_edge + right_edge + top_edge + bottom_edge
|
|
|
|
# Create a 2D grid of X and Y coordinates
|
|
X, Y = np.meshgrid(np.arange(width), np.arange(height))
|
|
|
|
# Reshape the X, Y, and depth_roi arrays into one-dimensional arrays
|
|
X = X.reshape(-1)
|
|
Y = Y.reshape(-1)
|
|
Z = depth_roi.reshape(-1)
|
|
|
|
# Stack the X, Y, and depth_roi arrays vertically to create a 3D array of points in the form of [X, Y, Z]
|
|
points = np.vstack([X, Y, Z]).T
|
|
|
|
# Compute the mean depth value of the edges
|
|
edge_depths = []
|
|
for edge_point in edges:
|
|
edge_depths.append(depth_roi[edge_point[1], edge_point[0]])
|
|
mean_depth = np.mean(edge_depths)
|
|
|
|
# Create a mask to extract the points on the edges
|
|
mask = np.zeros_like(depth_roi, dtype=np.uint8)
|
|
for edge_point in edges:
|
|
mask[edge_point[1], edge_point[0]] = 1
|
|
masked_depth_roi = depth_roi * mask
|
|
|
|
# Extract the 3D coordinates of the points on the edges
|
|
edge_points = []
|
|
for edge_point in edges:
|
|
edge_points.append([edge_point[0], edge_point[1], masked_depth_roi[edge_point[1], edge_point[0]]])
|
|
|
|
# Convert the list of edge points to a numpy array
|
|
edge_points = np.array(edge_points)
|
|
|
|
# Shift the edge points so that the mean depth value is at the origin
|
|
edge_points = edge_points - np.array([width/2, height/2, mean_depth])
|
|
|
|
# Compute the singular value decomposition (SVD) of the edge points
|
|
U, S, V = np.linalg.svd(edge_points)
|
|
|
|
# Extract the normal vector of the plane that best fits the edge points from the right-singular vector corresponding to the smallest singular value
|
|
normal = V[2]
|
|
|
|
return normal
|