Compare commits

..

5 Commits

Author SHA1 Message Date
wxchen
aabf72c149 Merge branch 'master' of http://git.wxchen.site/wxchen/maintain into da 2023-05-08 13:33:19 +08:00
wxchen
b281a5caed modified: src/maintain/scripts/test.py 2023-05-08 12:23:28 +08:00
wxchen
5aad39b7ba Merge branch 'master' of http://git.wxchen.site/wxchen/maintain into da 2023-05-08 11:18:26 +08:00
wxchen
bda7453e51 modified: src/maintain/scripts/test.py 2023-05-08 11:11:43 +08:00
wxchen
231f5fc815 da changes
modified:   .gitignore
	modified:   .vscode/tasks.json
	modified:   src/maintain/scripts/test.py
	modified:   src/vision_opencv/cv_bridge/python/cv_bridge/__pycache__/core.cpython-38.pyc
2023-05-08 10:03:17 +08:00
10 changed files with 250 additions and 341 deletions

1
.gitignore vendored
View File

@@ -1,6 +1,5 @@
.catkin_tools .catkin_tools
.vscode .vscode
.vscode/
.vscode/* .vscode/*
/build /build
/devel /devel

20
.vscode/c_cpp_properties.json vendored Normal file
View File

@@ -0,0 +1,20 @@
{
"configurations": [
{
"browse": {
"databaseFilename": "${default}",
"limitSymbolsToIncludedHeaders": false
},
"includePath": [
"/opt/ros/melodic/include/**",
"/usr/include/**"
],
"name": "ROS",
"intelliSenseMode": "gcc-x64",
"compilerPath": "/usr/bin/gcc",
"cStandard": "gnu11",
"cppStandard": "c++14"
}
],
"version": 4
}

8
.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,8 @@
{
"python.autoComplete.extraPaths": [
"/opt/ros/melodic/lib/python2.7/dist-packages"
],
"python.analysis.extraPaths": [
"/opt/ros/melodic/lib/python2.7/dist-packages"
]
}

18
.vscode/tasks.json vendored Normal file
View File

@@ -0,0 +1,18 @@
{
"tasks": [
{
"type": "shell",
"command": "catkin",
"args": [
"build",
"-DPYTHON_EXECUTABLE=/home/da/miniconda3/envs/gsmini/bin/python",
//"-DPYTHON_EXECUTABLE=${HOME}/.conda/envs/gsmini/bin/python"
],
"problemMatcher": [
"$catkin-gcc"
],
"group": "build",
"label": "catkin: build"
}
]
}

View File

@@ -1,136 +0,0 @@
#! /home/wxchen/.conda/envs/gsmini/bin/python
import rospy
import numpy as np
import open3d as o3d
from sensor_msgs.msg import Image , CameraInfo, PointCloud2
from detection_msgs.msg import BoundingBox, BoundingBoxes
import sensor_msgs.point_cloud2 as pc2
import cv_bridge
from cv_bridge import CvBridge, CvBridgeError
import cv2
import tf2_ros
import tf
from geometry_msgs.msg import PoseStamped, TransformStamped
bridge = CvBridge()
color_intrinsics = None
cloud = None
box = None
d_width = 100
def camera_info_callback(msg):
global color_intrinsics
color_intrinsics = msg
def depth_image_callback(msg):
global depth_image
depth_image = bridge.imgmsg_to_cv2(msg, '16UC1')
def point_cloud_callback(msg):
global cloud
cloud = pc2.read_points(msg, field_names=("x", "y", "z"), skip_nans=True)
def bounding_boxes_callback(msg):
global box
for bounding_box in msg.bounding_boxes:
# Assuming there's only one box, you can add a condition to filter the boxes if needed
box = [bounding_box.xmin - d_width, bounding_box.ymin - d_width, bounding_box.xmax + d_width, bounding_box.ymax + d_width]
def main():
rospy.init_node("plane_fitting_node")
rospy.Subscriber("/camera/color/camera_info", CameraInfo, camera_info_callback)
rospy.Subscriber("/camera/aligned_depth_to_color/image_raw", Image, depth_image_callback)
rospy.Subscriber("/camera/depth/color/points", PointCloud2, point_cloud_callback)
rospy.Subscriber("/yolov5/detections", BoundingBoxes, bounding_boxes_callback)
tf_broadcaster = tf2_ros.TransformBroadcaster()
plane_pub = rospy.Publisher("/plane_pose", PoseStamped, queue_size=10)
rate = rospy.Rate(10) # 10 Hz
while not rospy.is_shutdown():
if color_intrinsics is not None and cloud is not None and box is not None:
# Get the 3D points corresponding to the box
fx, fy = color_intrinsics.K[0], color_intrinsics.K[4]
cx, cy = color_intrinsics.K[2], color_intrinsics.K[5]
points = []
center_x = (box[0] + box[2]) / 2
center_y = (box[1] + box[3]) / 2
depth_array = np.array(depth_image, dtype=np.float32)
pz = depth_array[int(center_y), int(center_x)] / 1000.0
px = (center_x - color_intrinsics.K[2]) * pz / color_intrinsics.K[0]
py = (center_y - color_intrinsics.K[5]) * pz / color_intrinsics.K[4]
rospy.loginfo("Center point: {}".format([px, py, pz]))
screw_point = None
for x, y, z in cloud:
if z != 0:
u = int(np.round((x * fx) / z + cx))
v = int(np.round((y * fy) / z + cy))
if u == center_x and v == center_y:
screw_point = [x, y, z]
if u >= box[0] and u <= box[2] and v >= box[1] and v <= box[3]:
points.append([x, y, z])
points = np.array(points)
if px != 0 and py != 0 and pz != 0:
# rospy.loginfo("Screw point: {}".format(screw_point))
# Fit a plane to the points
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
plane_model, inliers = pcd.segment_plane(distance_threshold=0.02, ransac_n=3, num_iterations=100)
[a, b, c, d] = plane_model
# Calculate the rotation between the plane normal and the Z axis
normal = np.array([a, b, c])
z_axis = np.array([0, 0, 1])
cos_theta = np.dot(normal, z_axis) / (np.linalg.norm(normal) * np.linalg.norm(z_axis))
theta = np.arccos(cos_theta)
rotation_axis = np.cross(z_axis, normal)
rotation_axis = rotation_axis / np.linalg.norm(rotation_axis)
quaternion = np.hstack((rotation_axis * np.sin(theta / 2), [np.cos(theta / 2)]))
# Publish the plane pose
# plane_pose = PoseStamped()
# plane_pose.header.stamp = rospy.Time.now()
# plane_pose.header.frame_id = "camera_color_optical_frame"
# plane_pose.pose.position.x = screw_point[0]
# plane_pose.pose.position.y = screw_point[1]
# plane_pose.pose.position.z = -d / np.linalg.norm(normal)
# plane_pose.pose.orientation.x = quaternion[0]
# plane_pose.pose.orientation.y = quaternion[1]
# plane_pose.pose.orientation.z = quaternion[2]
# plane_pose.pose.orientation.w = quaternion[3]
# plane_pub.publish(plane_pose)
# publish screw tf
screw_tf = TransformStamped()
screw_tf.header.stamp = rospy.Time.now()
screw_tf.header.frame_id = "camera_color_optical_frame"
screw_tf.child_frame_id = "screw"
screw_tf.transform.translation.x = px
screw_tf.transform.translation.y = py
screw_tf.transform.translation.z = pz
screw_tf.transform.rotation.x = quaternion[0]
screw_tf.transform.rotation.y = quaternion[1]
screw_tf.transform.rotation.z = quaternion[2]
screw_tf.transform.rotation.w = quaternion[3]
tf_broadcaster.sendTransform(screw_tf)
rate.sleep()
if __name__ == "__main__":
try:
main()
except rospy.ROSInterruptException:
pass

View File

@@ -0,0 +1,93 @@
#! /home/wxchen/.conda/envs/gsmini/bin/python
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
import rospy
from sensor_msgs.msg import Image
import message_filters
from cv_bridge import CvBridge, CvBridgeError
import rospkg
MIN_MATCH_COUNT = 10
pkg_path = rospkg.RosPack().get_path('maintain')
rospy.loginfo(pkg_path)
img_template = cv.imread(pkg_path + '/scripts/tt.png',0)
def callback(rgb, depth):
rospy.loginfo("callback")
bridge = CvBridge()
# rospy.loginfo(rgb.header.stamp)
# rospy.loginfo(depth.header.stamp)
try:
rgb_image = bridge.imgmsg_to_cv2(rgb, 'bgr8')
depth_image = bridge.imgmsg_to_cv2(depth, '16UC1')
img_matcher = matcher(rgb_image)
cv.imshow("img_matcher", img_matcher)
cv.waitKey(1000)
except CvBridgeError as e:
print(e)
def matcher(img):
try:
# Initiate SIFT detector
sift = cv.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img_template,None)
kp2, des2 = sift.detectAndCompute(img,None)
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w = img_template.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv.perspectiveTransform(pts,M)
roi = img[np.int32(dst)[0][0][1]:np.int32(dst)[2][0][1], np.int32(dst)[0][0][0]:np.int32(dst)[2][0][0]]
# roi = detect_black(roi)
# img2 = cv.polylines(img2,[np.int32(dst)],True,255,3, cv.LINE_AA)
else:
print( "Not enough matches are found - {}/{}".format(len(good), MIN_MATCH_COUNT) )
return roi
except Exception as e:
print(e)
if __name__ == "__main__":
rospy.init_node("maintain")
rospy.loginfo("maintain task start ......")
rgb_sub = message_filters.Subscriber("/camera/color/image_raw", Image)
depth_sub = message_filters.Subscriber("/camera/aligned_depth_to_color/image_raw", Image)
ts = message_filters.TimeSynchronizer([rgb_sub, depth_sub], 1)
ts.registerCallback(callback)
rospy.spin()

View File

@@ -1,166 +1,28 @@
#! /home/wxchen/.conda/envs/gsmini/bin/python #! /home/da/miniconda3/envs/gsmini/bin/python
import numpy as np import numpy as np
import cv2 as cv import cv2 as cv
from matplotlib import pyplot as plt from matplotlib import pyplot as plt
import rospy import rospy
import tf2_ros import tf2_ros
import tf import tf2_ros
from sensor_msgs.msg import Image , CameraInfo, PointCloud2 from sensor_msgs.msg import Image , CameraInfo
from geometry_msgs.msg import PoseStamped, TransformStamped, Quaternion from geometry_msgs.msg import PoseStamped, TransformStamped
from geometry_msgs.msg import PoseStamped, TransformStamped
import message_filters import message_filters
from cv_bridge import CvBridge, CvBridgeError from cv_bridge import CvBridge, CvBridgeError
import rospkg import rospkg
# import open3d as o3d
# from open3d_ros_helper import open3d_ros_helper as orh
import math import os
import sys
from rostopic import get_topic_type from rostopic import get_topic_type
from detection_msgs.msg import BoundingBox, BoundingBoxes from detection_msgs.msg import BoundingBox, BoundingBoxes
bridge = CvBridge() bridge = CvBridge()
annulus_width = 10
# 2d to 3d
def computer_2d_3d(x, y, depth_roi, color_intrinsics):
pz = depth_roi[int(y), int(x)] / 1000.0
px = (x - color_intrinsics[2]) * pz / color_intrinsics[0]
py = (y - color_intrinsics[5]) * pz / color_intrinsics[4]
return px, py, pz
def compute_plane_normal(box, depth, color_intrinsics):
# 计算相机内参
fx = color_intrinsics[0]
fy = color_intrinsics[4]
cx = color_intrinsics[2]
cy = color_intrinsics[5]
# 计算矩形中心点坐标
x_center = (box[0] + box[2]) / 2
y_center = (box[1] + box[3]) / 2
z = depth[int(y_center), int(x_center)]
x = (x_center - cx) * z / fx
y = (y_center - cy) * z / fy
# 计算四个顶点坐标
x1 = (box[0] - cx) * z / fx
y1 = (box[1] - cy) * z / fy
x2 = (box[2] - cx) * z / fx
y2 = (box[1] - cy) * z / fy
x3 = (box[2] - cx) * z / fx
y3 = (box[3] - cy) * z / fy
x4 = (box[0] - cx) * z / fx
y4 = (box[3] - cy) * z / fy
# 计算矩形边缘向量
v1 = np.array([x2 - x1, y2 - y1, depth[int(box[1]), int(box[0])] - z])
v2 = np.array([x3 - x2, y3 - y2, depth[int(box[1]), int(box[2])] - z])
v3 = np.array([x4 - x3, y4 - y3, depth[int(box[3]), int(box[2])] - z])
v4 = np.array([x1 - x4, y1 - y4, depth[int(box[3]), int(box[0])] - z])
# 计算平面法向量
normal = np.cross(v1, v2)
normal += np.cross(v2, v3)
normal += np.cross(v3, v4)
normal += np.cross(v4, v1)
normal /= np.linalg.norm(normal)
# 计算法向量相对于参考向量的旋转角度和旋转轴
ref_vector = np.array([0, 0, 1])
normal_vector = normal
angle = math.acos(np.dot(ref_vector, normal_vector) / (np.linalg.norm(ref_vector) * np.linalg.norm(normal_vector)))
axis = np.cross(ref_vector, normal_vector)
axis = axis / np.linalg.norm(axis)
# 将旋转角度和旋转轴转换为四元数
qx, qy, qz, qw = tf.transformations.quaternion_about_axis(angle, axis)
quaternion = [qx, qy, qz, qw]
return quaternion
def calculate_image_edge_plane_normal(depth_roi):
# Get the shape of the depth_roi
height, width = depth_roi.shape
# Get the edges of the ROI
left_edge = [(0, y) for y in range(height)]
right_edge = [(width-1, y) for y in range(height)]
top_edge = [(x, 0) for x in range(width)]
bottom_edge = [(x, height-1) for x in range(width)]
edges = left_edge + right_edge + top_edge + bottom_edge
# Create a 2D grid of X and Y coordinates
X, Y = np.meshgrid(np.arange(width), np.arange(height))
# Reshape the X, Y, and depth_roi arrays into one-dimensional arrays
X = X.reshape(-1)
Y = Y.reshape(-1)
Z = depth_roi.reshape(-1)
# Stack the X, Y, and depth_roi arrays vertically to create a 3D array of points in the form of [X, Y, Z]
points = np.vstack([X, Y, Z]).T
# Compute the mean depth value of the edges
edge_depths = []
for edge_point in edges:
edge_depths.append(depth_roi[edge_point[1], edge_point[0]])
mean_depth = np.mean(edge_depths)
# Create a mask to extract the points on the edges
mask = np.zeros_like(depth_roi, dtype=np.uint8)
for edge_point in edges:
mask[edge_point[1], edge_point[0]] = 1
masked_depth_roi = depth_roi * mask
# Extract the 3D coordinates of the points on the edges
edge_points = []
for edge_point in edges:
edge_points.append([edge_point[0], edge_point[1], masked_depth_roi[edge_point[1], edge_point[0]]])
# Convert the list of edge points to a numpy array
edge_points = np.array(edge_points)
# Shift the edge points so that the mean depth value is at the origin
edge_points = edge_points - np.array([width/2, height/2, mean_depth])
# Compute the singular value decomposition (SVD) of the edge points
U, S, V = np.linalg.svd(edge_points)
# Extract the normal vector of the plane that best fits the edge points from the right-singular vector corresponding to the smallest singular value
normal = V[2]
return normal
# def compute_normal_vector(p1, p2, p3, p4):
# # Compute two vectors in the plane
# v1 = np.array(p2) - np.array(p1)
# v2 = np.array(p3) - np.array(p1)
# # Compute the cross product of the two vectors to get the normal vector
# n = np.cross(v1, v2)
# # Compute the fourth point in the plane
# p4 = np.array(p4)
# # Check if the fourth point is on the same side of the plane as the origin
# if np.dot(n, p4 - np.array(p1)) < 0:
# n = -n
# # Normalize the normal vector to obtain a unit vector
# n = n / np.linalg.norm(n)
# theta = math.acos(n[2])
# sin_theta_2 = math.sin(theta/2)
# quaternion = [math.cos(theta/2), sin_theta_2 * n[0], sin_theta_2 * n[1], sin_theta_2 * n[2]]
# return quaternion
def filter_quaternion(quat, quat_prev, alpha):
if quat_prev is None:
quat_prev = quat
# Apply low-pass filter to quaternion
quat_filtered = np.zeros(4)
for i in range(4):
quat_filtered[i] = alpha * quat[i] + (1-alpha) * quat_prev[i]
# Normalize the quaternion
quat_filtered = quat_filtered / np.linalg.norm(quat_filtered)
return quat_filtered
def box_callback(box, depth, color_info): def box_callback(box, depth, color_info):
try: try:
color_intrinsics = color_info.K color_intrinsics = color_info.K
depth_image = bridge.imgmsg_to_cv2(depth, '16UC1') depth_image = bridge.imgmsg_to_cv2(depth, '16UC1')
# pc = orh.rospc_to_o3dpc(pc_msg)
if box is not None and len(box.bounding_boxes) > 0:
# get the center of screw # get the center of screw
boundingBox = box.bounding_boxes[0] boundingBox = box.bounding_boxes[0]
screw_x = (boundingBox.xmax + boundingBox.xmin) / 2 screw_x = (boundingBox.xmax + boundingBox.xmin) / 2
@@ -168,46 +30,38 @@ def box_callback(box, depth, color_info):
# print(screw_x,screw_y) # print(screw_x,screw_y)
depth_array = np.array(depth_image, dtype=np.float32) depth_array = np.array(depth_image, dtype=np.float32)
# depth_roi = depth_array[boundingBox.ymin:boundingBox.ymax, boundingBox.xmin:boundingBox.xmax] depth_roi = depth_array[boundingBox.ymin:boundingBox.ymax, boundingBox.xmin:boundingBox.xmax]
x, y, z = computer_2d_3d(screw_x, screw_y, depth_array, color_intrinsics) z = np.mean(depth_roi) * 0.001
# rospy.loginfo("screw pose: x: %f, y: %f, z: %f", x, y, z) x = (screw_x - color_intrinsics[2]) * z / color_intrinsics[0]
y = (screw_y - color_intrinsics[5]) * z / color_intrinsics[4]
rospy.loginfo("screw pose: x: %f, y: %f, z: %f", x, y, z)
# calculate normal direction of screw area # calculate normal direction of screw area
box = [boundingBox.ymin - annulus_width, boundingBox.xmin - annulus_width, boundingBox.ymax + annulus_width, boundingBox.xmax + annulus_width]
# p1x, p1y, p1z = computer_2d_3d(boundingBox.xmin-annulus_width, boundingBox.ymin-annulus_width, depth_array, color_intrinsics)
# p2x, p2y, p2z = computer_2d_3d(boundingBox.xmax+annulus_width, boundingBox.ymin-annulus_width, depth_array, color_intrinsics)
# p3x, p3y, p3z = computer_2d_3d(boundingBox.xmax+annulus_width, boundingBox.ymax+annulus_width, depth_array, color_intrinsics)
# p4x, p4y, p4z = computer_2d_3d(boundingBox.xmin-annulus_width, boundingBox.ymax+annulus_width, depth_array, color_intrinsics)
# p1 = [p1x, p1y, p1z]
# p2 = [p2x, p2y, p2z]
# p3 = [p3x, p3y, p3z]
# p4 = [p4x, p4y, p4z]
# normal_q = compute_normal_vector(p1, p2, p3, p4)
normal_q = compute_plane_normal(box, depth_array, color_intrinsics)
# annulus_roi = depth_array[boundingBox.ymin-annulus_width:boundingBox.ymax+annulus_width, boundingBox.xmin-annulus_width:boundingBox.xmax+annulus_width] X,Y = np.meshgrid(np.arange(depth_roi.shape[1]), np.arange(depth_roi.shape[0]))
# normal = calculate_image_edge_plane_normal(annulus_roi) X = X.reshape(-1)
Y = Y.reshape(-1)
Z = depth_roi.reshape(-1)
points = np.vstack([X, Y, Z]).T
center = np.mean(points, axis=0)
points = points - center
U, S, V = np.linalg.svd(points)
normal = V[2]
# print(normal) # print(normal)
# publish screw pose
# screw_pose = PoseStamped()
# screw_pose.header.stamp = rospy.Time.now()
# screw_pose.header.frame_id = "camera_color_optical_frame"
# screw_pose.pose.position.x = x
# screw_pose.pose.position.y = y
# screw_pose.pose.position.z = z
# screw_pose.pose.orientation.x = 0
# screw_pose.pose.orientation.y = 0
# screw_pose.pose.orientation.z = 0
# screw_pose.pose.orientation.w = 1
# normal vector to quaternion # pose_pub.publish(screw_pose)
screw_quat = tf.transformations.quaternion_from_euler(0, 0, 0)
screw_quat[0] = normal_q[0]
screw_quat[1] = normal_q[1]
screw_quat[2] = normal_q[2]
screw_quat[3] = normal_q[3]
# quaternion to euler
screw_euler = tf.transformations.euler_from_quaternion(screw_quat)
screw_quat_zero_z = tf.transformations.quaternion_from_euler(screw_euler[0], screw_euler[1], 0)
# Apply low-pass filter to screw quaternion
alpha = 0.4
global screw_quat_prev
screw_quat_filtered = filter_quaternion(screw_quat, screw_quat_prev, alpha)
screw_quat_prev = screw_quat_filtered
# publish screw tf # publish screw tf
screw_tf = TransformStamped() screw_tf = TransformStamped()
@@ -217,22 +71,21 @@ def box_callback(box, depth, color_info):
screw_tf.transform.translation.x = x screw_tf.transform.translation.x = x
screw_tf.transform.translation.y = y screw_tf.transform.translation.y = y
screw_tf.transform.translation.z = z screw_tf.transform.translation.z = z
screw_tf.transform.rotation.x = screw_quat_filtered[0] screw_tf.transform.rotation.x = normal[0]
screw_tf.transform.rotation.y = screw_quat_filtered[1] screw_tf.transform.rotation.y = normal[1]
screw_tf.transform.rotation.z = screw_quat_filtered[2] screw_tf.transform.rotation.z = normal[2]
screw_tf.transform.rotation.w = screw_quat_filtered[3] screw_tf.transform.rotation.w = 0
tf_broadcaster.sendTransform(screw_tf) tf_broadcaster.sendTransform(screw_tf)
except Exception as e: except Exception as e:
print(e) print(e)
if __name__ == "__main__": if __name__ == "__main__":
global screw_quat_prev
screw_quat_prev = None
rospy.init_node("maintain") rospy.init_node("maintain")
rospy.loginfo("maintain task start ......") rospy.loginfo("maintain task start ......")
@@ -240,7 +93,6 @@ if __name__ == "__main__":
box_sub = message_filters.Subscriber("/yolov5/detections", BoundingBoxes) box_sub = message_filters.Subscriber("/yolov5/detections", BoundingBoxes)
depth_sub = message_filters.Subscriber("/camera/aligned_depth_to_color/image_raw", Image) depth_sub = message_filters.Subscriber("/camera/aligned_depth_to_color/image_raw", Image)
color_info = message_filters.Subscriber("/camera/color/camera_info", CameraInfo) color_info = message_filters.Subscriber("/camera/color/camera_info", CameraInfo)
# pc_sub = message_filters.Subscriber("/camera/depth/color/points", PointCloud2)
tf_broadcaster = tf2_ros.TransformBroadcaster() tf_broadcaster = tf2_ros.TransformBroadcaster()

View File

@@ -50,8 +50,7 @@
<param name="publish_image" value="$(arg publish_image)"/> <param name="publish_image" value="$(arg publish_image)"/>
<param name="output_image_topic" value="$(arg output_image_topic)"/> <param name="output_image_topic" value="$(arg output_image_topic)"/>
</node> </node>
<include file="$(find realsense2_camera)/launch/my_camera.launch" > <!-- <include file="$(find camera_launch)/launch/d435.launch"/> -->
</include>
</launch> </launch>

View File

@@ -0,0 +1,56 @@
<launch>
<!-- Detection configuration -->
<arg name="weights" default="$(find yolov5_ros)/best.pt"/>
<arg name="data" default="$(find yolov5_ros)/src/yolov5/data/coco128.yaml"/>
<arg name="confidence_threshold" default="0.5"/>
<arg name="iou_threshold" default="0.45"/>
<arg name="maximum_detections" default="1000"/>
<arg name="device" default="0"/>
<arg name="agnostic_nms" default="true"/>
<arg name="line_thickness" default="3"/>
<arg name="dnn" default="true"/>
<arg name="half" default="false"/>
<!-- replace imgsz -->
<arg name="inference_size_h" default="640"/>
<arg name="inference_size_w" default="640"/>
<!-- Visualize using OpenCV window -->
<arg name="view_image" default="true"/>
<!-- ROS topics -->
<arg name="input_image_topic" default="/clover0/main_camera/image_raw"/>
<arg name="output_topic" default="/yolov5/detections"/>
<!-- Optional topic (publishing annotated image) -->
<arg name="publish_image" default="false"/>
<arg name="output_image_topic" default="/yolov5/image_out"/>
<node pkg="yolov5_ros" name="detect" type="detect.py" output="screen">
<param name="weights" value="$(arg weights)"/>
<param name="data" value="$(arg data)"/>
<param name="confidence_threshold" value="$(arg confidence_threshold)"/>
<param name="iou_threshold" value="$(arg iou_threshold)" />
<param name="maximum_detections" value="$(arg maximum_detections)"/>
<param name="device" value="$(arg device)" />
<param name="agnostic_nms" value="$(arg agnostic_nms)" />
<param name="line_thickness" value="$(arg line_thickness)"/>
<param name="dnn" value="$(arg dnn)"/>
<param name="half" value="$(arg half)"/>
<param name="inference_size_h" value="$(arg inference_size_h)"/>
<param name="inference_size_w" value="$(arg inference_size_w)"/>
<param name="input_image_topic" value="$(arg input_image_topic)"/>
<param name="output_topic" value="$(arg output_topic)"/>
<param name="view_image" value="$(arg view_image)"/>
<param name="publish_image" value="$(arg publish_image)"/>
<param name="output_image_topic" value="$(arg output_image_topic)"/>
</node>
<include file="$(find camera_launch)/launch/d435.launch"/>
</launch>