diff --git a/.gitignore b/.gitignore deleted file mode 100644 index f0af0f7..0000000 --- a/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -.catkin_tools -.vscode -.vscode/* -/build -/devel -/logs -*.pyc \ No newline at end of file diff --git a/.vscode/c_cpp_properties.json b/.vscode/c_cpp_properties.json deleted file mode 100644 index 1cc3db0..0000000 --- a/.vscode/c_cpp_properties.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "configurations": [ - { - "browse": { - "databaseFilename": "${default}", - "limitSymbolsToIncludedHeaders": false - }, - "includePath": [ - "/opt/ros/melodic/include/**", - "/usr/include/**" - ], - "name": "ROS", - "intelliSenseMode": "gcc-x64", - "compilerPath": "/usr/bin/gcc", - "cStandard": "gnu11", - "cppStandard": "c++14" - } - ], - "version": 4 -} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json deleted file mode 100644 index 9af6def..0000000 --- a/.vscode/settings.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "python.autoComplete.extraPaths": [ - "/opt/ros/melodic/lib/python2.7/dist-packages" - ], - "python.analysis.extraPaths": [ - "/opt/ros/melodic/lib/python2.7/dist-packages" - ] -} \ No newline at end of file diff --git a/.vscode/tasks.json b/.vscode/tasks.json deleted file mode 100644 index d6dcdd7..0000000 --- a/.vscode/tasks.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "tasks": [ - { - "type": "shell", - "command": "catkin", - "args": [ - "build", - // "-DPYTHON_EXECUTABLE=/home/da/miniconda3/envs/gsmini/bin/python", - "-DPYTHON_EXECUTABLE=${HOME}/.conda/envs/gsmini/bin/python" - ], - "problemMatcher": [ - "$catkin-gcc" - ], - "group": "build", - "label": "catkin: build" - } - ] -} \ No newline at end of file diff --git a/README.md b/README.md deleted file mode 100644 index c745a15..0000000 --- a/README.md +++ /dev/null @@ -1 +0,0 @@ -git clone -b melodic-devel https://github.com/ros/geometry2 \ No newline at end of file diff --git a/src/detection_msgs/CMakeLists.txt b/src/detection_msgs/CMakeLists.txt deleted file mode 100644 index df2a260..0000000 --- a/src/detection_msgs/CMakeLists.txt +++ /dev/null @@ -1,205 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2...3.26.3) -project(detection_msgs) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - std_msgs - message_generation -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -# catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -add_message_files( - FILES - BoundingBox.msg - BoundingBoxes.msg -) - -## Generate services in the 'srv' folder -# add_service_files( -# FILES -# Service1.srv -# Service2.srv -# ) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here -generate_messages( - DEPENDENCIES - std_msgs -) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES detection_msgs - CATKIN_DEPENDS std_msgs message_runtime -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include - ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/detection_msgs.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/detection_msgs_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_detection_msgs.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/src/detection_msgs/msg/BoundingBox.msg b/src/detection_msgs/msg/BoundingBox.msg deleted file mode 100644 index 4ea8113..0000000 --- a/src/detection_msgs/msg/BoundingBox.msg +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (c) 2017, Marko Bjelonic, Robotic Systems Lab, ETH Zurich -# All rights reserved. - -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of the copyright holder nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. - -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY -# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -string Class -float64 probability -int64 xmin -int64 ymin -int64 xmax -int64 ymax \ No newline at end of file diff --git a/src/detection_msgs/msg/BoundingBoxes.msg b/src/detection_msgs/msg/BoundingBoxes.msg deleted file mode 100644 index a906c33..0000000 --- a/src/detection_msgs/msg/BoundingBoxes.msg +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) 2017, Marko Bjelonic, Robotic Systems Lab, ETH Zurich -# All rights reserved. - -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of the copyright holder nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. - -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY -# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Header header -Header image_header -BoundingBox[] bounding_boxes \ No newline at end of file diff --git a/src/detection_msgs/package.xml b/src/detection_msgs/package.xml deleted file mode 100644 index a9f1554..0000000 --- a/src/detection_msgs/package.xml +++ /dev/null @@ -1,63 +0,0 @@ - - - detection_msgs - 0.0.0 - The detection_msgs package - - - - - nle17 - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - message_generation - - - - - - message_runtime - - - - - catkin - std_msgs - std_msgs - std_msgs - - - - - - - - - diff --git a/src/geometry2/geometry2/CHANGELOG.rst b/src/geometry2/geometry2/CHANGELOG.rst deleted file mode 100644 index 797fe1f..0000000 --- a/src/geometry2/geometry2/CHANGELOG.rst +++ /dev/null @@ -1,41 +0,0 @@ -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Changelog for package geometry2 -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -0.6.7 (2020-03-09) ------------------- - -0.6.6 (2020-01-09) ------------------- - -0.6.5 (2018-11-16) ------------------- - -0.6.4 (2018-11-06) ------------------- - -0.6.3 (2018-07-09) ------------------- - -0.6.2 (2018-05-02) ------------------- - -0.6.1 (2018-03-21) ------------------- - -0.6.0 (2018-03-21) ------------------- - -0.5.17 (2018-01-01) -------------------- - -0.5.16 (2017-07-14) -------------------- - -0.5.15 (2017-01-24) -------------------- - -0.5.14 (2017-01-16) -------------------- -* create geometry2 metapackage and make geometry_experimental depend on it for clarity of reverse dependency walking. -* Contributors: Tully Foote diff --git a/src/geometry2/geometry2/CMakeLists.txt b/src/geometry2/geometry2/CMakeLists.txt deleted file mode 100644 index 83f1b03..0000000 --- a/src/geometry2/geometry2/CMakeLists.txt +++ /dev/null @@ -1,4 +0,0 @@ -cmake_minimum_required(VERSION 2.8.3) -project(geometry2) -find_package(catkin REQUIRED) -catkin_metapackage() diff --git a/src/geometry2/geometry2/package.xml b/src/geometry2/geometry2/package.xml deleted file mode 100644 index 7cc8288..0000000 --- a/src/geometry2/geometry2/package.xml +++ /dev/null @@ -1,29 +0,0 @@ - - geometry2 - 0.6.7 - - A metapackage to bring in the default packages second generation Transform Library in ros, tf2. - - Tully Foote - Tully Foote - BSD - - http://www.ros.org/wiki/geometry2 - - catkin - - tf2 - tf2_bullet - tf2_eigen - tf2_geometry_msgs - tf2_kdl - tf2_msgs - tf2_py - tf2_ros - tf2_sensor_msgs - tf2_tools - - - - - diff --git a/src/geometry2/test_tf2/CHANGELOG.rst b/src/geometry2/test_tf2/CHANGELOG.rst deleted file mode 100644 index 645e03a..0000000 --- a/src/geometry2/test_tf2/CHANGELOG.rst +++ /dev/null @@ -1,274 +0,0 @@ -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Changelog for package test_tf2 -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -0.6.7 (2020-03-09) ------------------- -* [windows][melodic] more portable fixes. (`#443 `_) - * more portable fixes. -* Contributors: Sean Yen - -0.6.6 (2020-01-09) ------------------- -* Update shebang and add launch prefixes for python3 support (`#421 `_) -* Always call catkin_package() (`#418 `_) -* Remove roslib.load_manifest `#404 `_ from otamachan/remove-load-manifest -* Contributors: Shane Loretz, Tamaki Nishino, Tully Foote - -0.6.5 (2018-11-16) ------------------- - -0.6.4 (2018-11-06) ------------------- - -0.6.3 (2018-07-09) ------------------- -* use correct unit test for test_tf2_bullet (`#301 `_) -* update cmake order (`#298 `_) -* Contributors: Tully Foote - -0.6.2 (2018-05-02) ------------------- - -0.6.1 (2018-03-21) ------------------- - -0.6.0 (2018-03-21) ------------------- - -0.5.17 (2018-01-01) -------------------- -* Merge pull request `#257 `_ from delftrobotics-forks/python3 - Make tf2_py python3 compatible again -* Use python3 print function. -* Contributors: Maarten de Vries, Tully Foote - -0.5.16 (2017-07-14) -------------------- -* Remove generate_rand_vectors() from a number of tests. (`#227 `_) - * Remove a slew of trailing whitespace. - Signed-off-by: Chris Lalancette - * Remove generate_rand_vectors() from a number of tests. - It was never used, so there is no reason to carry it around. - Signed-off-by: Chris Lalancette -* store gtest return value as int (`#229 `_) -* Contributors: Chris Lalancette, dhood - -0.5.15 (2017-01-24) -------------------- - -0.5.14 (2017-01-16) -------------------- -* Typos. -* Adds unit tests for TF loaded from parameter server. - This tests both success (loading a valid TF into the param server) and - failures (parameter does not exist, parameter contents are invalid). -* Code linting & reorganization - - whitespace - - indentation - - re-organized code to remove duplications. - whitespace & indentation changes only. - simplified (de-duplicated) duplicate code. - missing a duplicate variable. - whitespace changes only. -* Contributors: Felix Duvallet - -0.5.13 (2016-03-04) -------------------- -* Remove LGPL from license tags - LGPL was erroneously included in 2a38724. As there are no files with it - in the package. -* Contributors: Jochen Sprickerhof - -0.5.12 (2015-08-05) -------------------- -* add utilities to get yaw, pitch, roll and identity transform -* provide more conversions between types - The previous conversion always assumed that it was converting a - non-message type to a non-message type. Now, one, both or none - can be a message or a non-message. -* Contributors: Vincent Rabaud - -0.5.11 (2015-04-22) -------------------- - -0.5.10 (2015-04-21) -------------------- - -0.5.9 (2015-03-25) ------------------- - -0.5.8 (2015-03-17) ------------------- -* remove useless Makefile files -* Contributors: Vincent Rabaud - -0.5.7 (2014-12-23) ------------------- - -0.5.6 (2014-09-18) ------------------- - -0.5.5 (2014-06-23) ------------------- -* Removed AsyncSpinner workaround -* Contributors: Esteve Fernandez - -0.5.4 (2014-05-07) ------------------- -* Clean up warnings about autostart and add some assertions for coverage -* Contributors: Tully Foote - -0.5.3 (2014-02-21) ------------------- - -0.5.2 (2014-02-20) ------------------- - -0.5.1 (2014-02-14) ------------------- - -0.5.0 (2014-02-14) ------------------- - -0.4.10 (2013-12-26) -------------------- -* fixing kdl linking for tests -* Contributors: Tully Foote - -0.4.9 (2013-11-06) ------------------- - -0.4.8 (2013-11-06) ------------------- -* Fixed static_transform_publisher duplicate check, added rostest. - -0.4.7 (2013-08-28) ------------------- - -0.4.6 (2013-08-28) ------------------- - -0.4.5 (2013-07-11) ------------------- -* fixing quaternion in unit test and adding a timeout on the waitForServer -* fixing usage string to show quaternions and using quaternions in the test app -* removing redundant declaration -* disabling whole cmake invocation in test_tf2 when not CATKIN_ENABLE_TESTING - -0.4.4 (2013-07-09) ------------------- - -0.4.3 (2013-07-05) ------------------- - -0.4.2 (2013-07-05) ------------------- - -0.4.1 (2013-07-05) ------------------- -* fixing test target dependencies -* fixing colliding target names between geometry and geometry_experimental -* stripping tf2_ros dependency from tf2_bullet. Test was moved to test_tf2 - -0.4.0 (2013-06-27) ------------------- -* splitting rospy dependency into tf2_py so tf2 is pure c++ library. -* switching to console_bridge from rosconsole -* moving convert methods back into tf2 because it does not have any ros dependencies beyond ros::Time which is already a dependency of tf2 -* Cleaning up unnecessary dependency on roscpp -* converting contents of tf2_ros to be properly namespaced in the tf2_ros namespace -* Cleaning up packaging of tf2 including: - removing unused nodehandle - fixing overmatch on search and replace - cleaning up a few dependencies and linking - removing old backup of package.xml - making diff minimally different from tf version of library -* Restoring test packages and bullet packages. - reverting 3570e8c42f9b394ecbfd9db076b920b41300ad55 to get back more of the packages previously implemented - reverting 04cf29d1b58c660fdc999ab83563a5d4b76ab331 to fix `#7 `_ - -0.3.6 (2013-03-03) ------------------- - -0.3.5 (2013-02-15 14:46) ------------------------- - -0.3.4 (2013-02-15 13:14) ------------------------- - -0.3.3 (2013-02-15 11:30) ------------------------- - -0.3.2 (2013-02-15 00:42) ------------------------- - -0.3.1 (2013-02-14) ------------------- - -0.3.0 (2013-02-13) ------------------- -* removing packages with missing deps -* catkinizing geometry-experimental -* add boost linkage -* fixing test for header cleanup -* fixing usage of bullet for migration to native bullet -* Cleanup on test code, all tests pass -* cleanup on optimized tests, still failing -* Cleanup in compound transform test -* Adding more frames to compound transform case -* Compound transform test fails on optimized case after more frames added -* Compound transform test has more frames in it -* Cleanup of compount transform test -* Compound transform at root node test fails for optimized branch -* compount transform test, non-optimized -* time-varying tests with different time-steps for optimized case -* Time-varying test inserts data at different time-steps for non-optimized case -* Helix (time-varying) test works on optimized branch -* Adding more complicated case to helix test -* Adding helix test for time-varying transforms in non-optimized case -* Corrected ring45 values in buffer core test -* Corrected values of ring45 test for non-optimized case -* Ring 45 test running on non-optimized tf2 branch, from Tully's commit r880 -* filling out ring test case which finds errors in the optimization -* Add option to use a callback queue in the message filter -* another out-the-back test -* move the message filter to tf2_ros -* fix warnings -* merge from tf_rework -* tf2::MessageFilter + tests. Still need to change it around to pass in a callback queue, since we're being triggered directly from the tf2 buffer -* adding in y configuration test -* a little more realistic -* Don't add the request if the transform is already available. Add some new tests -* working transformable callbacks with a simple (incomplete) test case -* cleaning up test setup -* check_v implemented and passing v test and multi tree test -* working toward multi configuration tests -* removing restructuring for it won't nest like I thought -* continuing restructuring and filling in test case setup -* restructuring before scaling -* Completely remove lookupLists(). canTransform() now uses the same walking code as lookupTransform(). Also fixed a bug in the static transform publisher test -* testing chaining in a ring -* test dataset generator -* more complicated test with interleaving static and dynamic frames passing -* static transform tested and working -* test in progress, need to unshelve changes. -* tests passing and all throw catches removed too\! -* move to tf2_ros completed. tests pass again -* merge tf2_cpp and tf2_py into tf2_ros -* merging and fixing broken unittest -* Got transform with types working in python -* A working first version of transforming and converting between different types -* removing unused datatypes -* removing include of old tf from tf2 -* testing new argument validation and catching bug -* unit test of single link one to try to debug eitan's client bug -* working towards interpolation too -* A working version of a test case for the python buffer client -* merging -* adding else to catch uncovered cases, and changing time for easier use -* Adding a test for the python buffer client -* using permuter now and doing a,b,c to a,b,c, at three different times including 0 -* Moving tf2_tests to test_tf2 -* moving test to new package -* initial package created for testing tf2 diff --git a/src/geometry2/test_tf2/CMakeLists.txt b/src/geometry2/test_tf2/CMakeLists.txt deleted file mode 100644 index a2543ca..0000000 --- a/src/geometry2/test_tf2/CMakeLists.txt +++ /dev/null @@ -1,60 +0,0 @@ -cmake_minimum_required(VERSION 2.8.3) - -project(test_tf2) - -find_package(catkin REQUIRED COMPONENTS rosconsole roscpp rostest tf tf2 tf2_bullet tf2_ros tf2_geometry_msgs tf2_kdl tf2_msgs tf2_eigen) -find_package(Boost REQUIRED COMPONENTS thread) -find_package(orocos_kdl REQUIRED) - -catkin_package() - -if(NOT CATKIN_ENABLE_TESTING) - return() -endif() - -include_directories(${Boost_INCLUDE_DIRS} ${catkin_INCLUDE_DIRS} ${orocos_kdl_INCLUDE_DIRS}) - -link_directories(${orocos_kdl_LIBRARY_DIRS}) - -catkin_add_gtest(buffer_core_test test/buffer_core_test.cpp) -target_link_libraries(buffer_core_test ${Boost_LIBRARIES} ${catkin_LIBRARIES} ${orocos_kdl_LIBRARIES}) - -catkin_add_gtest(test_tf2_message_filter test/test_message_filter.cpp) -target_link_libraries(test_tf2_message_filter ${Boost_LIBRARIES} ${catkin_LIBRARIES}) - -catkin_add_gtest(test_convert test/test_convert.cpp) -target_link_libraries(test_convert ${Boost_LIBRARIES} ${catkin_LIBRARIES} ${orocos_kdl_LIBRARIES}) - -catkin_add_gtest(test_utils test/test_utils.cpp) -target_link_libraries(test_utils ${Boost_LIBRARIES} ${catkin_LIBRARIES} ${orocos_kdl_LIBRARIES}) - -add_executable(test_buffer_server EXCLUDE_FROM_ALL test/test_buffer_server.cpp) -target_link_libraries(test_buffer_server ${Boost_LIBRARIES} ${catkin_LIBRARIES} ${GTEST_LIBRARIES}) - -add_executable(test_buffer_client EXCLUDE_FROM_ALL test/test_buffer_client.cpp) -target_link_libraries(test_buffer_client ${Boost_LIBRARIES} ${catkin_LIBRARIES} ${GTEST_LIBRARIES} ${orocos_kdl_LIBRARIES}) - - -add_rostest(test/buffer_client_tester.launch) - -add_executable(test_static_publisher EXCLUDE_FROM_ALL test/test_static_publisher.cpp) -target_link_libraries(test_static_publisher ${Boost_LIBRARIES} ${catkin_LIBRARIES} ${GTEST_LIBRARIES}) - -add_rostest(test/static_publisher.launch) - - -add_executable(test_tf2_bullet EXCLUDE_FROM_ALL test/test_tf2_bullet.cpp) -target_link_libraries(test_tf2_bullet ${catkin_LIBRARIES} ${GTEST_LIBRARIES}) - -add_rostest(${CMAKE_CURRENT_SOURCE_DIR}/test/test_tf2_bullet.launch) - - -if(TARGET tests) - add_dependencies(tests test_buffer_server test_buffer_client test_static_publisher test_tf2_bullet) -endif() - - -# used as a test fixture -if(TARGET tf2_ros_static_transform_publisher) - add_dependencies(tests tf2_ros_static_transform_publisher test_static_publisher) -endif() diff --git a/src/geometry2/test_tf2/mainpage.dox b/src/geometry2/test_tf2/mainpage.dox deleted file mode 100644 index a01bfa2..0000000 --- a/src/geometry2/test_tf2/mainpage.dox +++ /dev/null @@ -1,26 +0,0 @@ -/** -\mainpage -\htmlinclude manifest.html - -\b test_tf2 is ... - - - - -\section codeapi Code API - - - - -*/ diff --git a/src/geometry2/test_tf2/package.xml b/src/geometry2/test_tf2/package.xml deleted file mode 100644 index 1371163..0000000 --- a/src/geometry2/test_tf2/package.xml +++ /dev/null @@ -1,45 +0,0 @@ - - test_tf2 - 0.6.7 - - tf2 unit tests - - Tully Foote - Eitan Marder-Eppstein - Tully Foote - BSD - - http://www.ros.org/wiki/geometry_experimental - - catkin - - rosconsole - roscpp - rostest - tf - tf2 - tf2_bullet - tf2_ros - tf2_geometry_msgs - tf2_kdl - tf2_msgs - tf2_eigen - - rosconsole - roscpp - rostest - tf - tf2 - tf2_bullet - tf2_ros - tf2_geometry_msgs - tf2_kdl - tf2_msgs - tf2_eigen - - rosunit - rosbash - - - - diff --git a/src/geometry2/test_tf2/test/buffer_client_tester.launch b/src/geometry2/test_tf2/test/buffer_client_tester.launch deleted file mode 100644 index 71c2d5e..0000000 --- a/src/geometry2/test_tf2/test/buffer_client_tester.launch +++ /dev/null @@ -1,5 +0,0 @@ - - - - - diff --git a/src/geometry2/test_tf2/test/buffer_core_test.cpp b/src/geometry2/test_tf2/test/buffer_core_test.cpp deleted file mode 100644 index 4a14487..0000000 --- a/src/geometry2/test_tf2/test/buffer_core_test.cpp +++ /dev/null @@ -1,2797 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include "tf2/exceptions.h" -#include -#include "LinearMath/btVector3.h" -#include "LinearMath/btTransform.h" -#include "rostest/permuter.h" - -void setIdentity(geometry_msgs::Transform& trans) -{ - trans.translation.x = 0; - trans.translation.y = 0; - trans.translation.z = 0; - trans.rotation.x = 0; - trans.rotation.y = 0; - trans.rotation.z = 0; - trans.rotation.w = 1; -} - - -void push_back_i(std::vector& children, std::vector& parents, - std::vector& dx, std::vector& dy) -{ - /* - "a" - v (1,0) - "b" - v (1,0) - "c" - */ - - children.push_back("b"); - parents.push_back("a"); - dx.push_back(1.0); - dy.push_back(0.0); - children.push_back("c"); - parents.push_back("b"); - dx.push_back(1.0); - dy.push_back(0.0); -} - - -void push_back_y(std::vector& children, std::vector& parents, - std::vector& dx, std::vector& dy) -{ - /* - "a" - v (1,0) - "b" ------(0,1)-----> "d" - v (1,0) v (0,1) - "c" "e" - */ - // a>b - children.push_back("b"); - parents.push_back("a"); - dx.push_back(1.0); - dy.push_back(0.0); - // b>c - children.push_back("c"); - parents.push_back("b"); - dx.push_back(1.0); - dy.push_back(0.0); - // b>d - children.push_back("d"); - parents.push_back("b"); - dx.push_back(0.0); - dy.push_back(1.0); - // d>e - children.push_back("e"); - parents.push_back("d"); - dx.push_back(0.0); - dy.push_back(1.0); -} - -void push_back_v(std::vector& children, std::vector& parents, - std::vector& dx, std::vector& dy) -{ - /* - "a" ------(0,1)-----> "f" - v (1,0) v (0,1) - "b" "g" - v (1,0) - "c" - */ - // a>b - children.push_back("b"); - parents.push_back("a"); - dx.push_back(1.0); - dy.push_back(0.0); - // b>c - children.push_back("c"); - parents.push_back("b"); - dx.push_back(1.0); - dy.push_back(0.0); - // a>f - children.push_back("f"); - parents.push_back("a"); - dx.push_back(0.0); - dy.push_back(1.0); - // f>g - children.push_back("g"); - parents.push_back("f"); - dx.push_back(0.0); - dy.push_back(1.0); - -} - -void push_back_1(std::vector& children, std::vector& parents, - std::vector& dx, std::vector& dy) -{ - children.push_back("2"); - parents.push_back("1"); - dx.push_back(1.0); - dy.push_back(0.0); -} - -void setupTree(tf2::BufferCore& mBC, const std::string& mode, const ros::Time & time, const ros::Duration& interpolation_space = ros::Duration()) -{ - ROS_DEBUG("Clearing Buffer Core for new test setup"); - mBC.clear(); - - ROS_DEBUG("Setting up test tree for formation %s", mode.c_str()); - - std::vector children; - std::vector parents; - std::vector dx, dy; - - if (mode == "i") - { - push_back_i(children, parents, dx, dy); - } - else if (mode == "y") - { - push_back_y(children, parents, dx, dy); - } - - else if (mode == "v") - { - push_back_v(children, parents, dx, dy); - } - - else if (mode == "ring_45") - { - /* Form a ring of transforms at every 45 degrees on the unit circle. */ - - std::vector frames; - - - - frames.push_back("a"); - frames.push_back("b"); - frames.push_back("c"); - frames.push_back("d"); - frames.push_back("e"); - frames.push_back("f"); - frames.push_back("g"); - frames.push_back("h"); - frames.push_back("i"); - - for (uint8_t iteration = 0; iteration < 2; ++iteration) - { - double direction = 1; - std::string frame_prefix; - if (iteration == 0) - { - frame_prefix = "inverse_"; - direction = -1; - } - else - frame_prefix =""; - for (uint64_t i = 1; i < frames.size(); i++) - { - geometry_msgs::TransformStamped ts; - setIdentity(ts.transform); - ts.transform.translation.x = direction * ( sqrt(2)/2 - 1); - ts.transform.translation.y = direction * sqrt(2)/2; - ts.transform.rotation.x = 0; - ts.transform.rotation.y = 0; - ts.transform.rotation.z = sin(direction * M_PI/8); - ts.transform.rotation.w = cos(direction * M_PI/8); - if (time > ros::Time() + (interpolation_space * .5)) - ts.header.stamp = time - (interpolation_space * .5); - else - ts.header.stamp = ros::Time(); - - ts.header.frame_id = frame_prefix + frames[i-1]; - if (i > 1) - ts.child_frame_id = frame_prefix + frames[i]; - else - ts.child_frame_id = frames[i]; // connect first frame - EXPECT_TRUE(mBC.setTransform(ts, "authority")); - if (interpolation_space > ros::Duration()) - { - ts.header.stamp = time + interpolation_space * .5; - EXPECT_TRUE(mBC.setTransform(ts, "authority")); - - } - } - } - return; // nonstandard setup return before standard executinog - } - else if (mode == "1") - { - push_back_1(children, parents, dx, dy); - - } - else if (mode =="1_v") - { - push_back_1(children, parents, dx, dy); - push_back_v(children, parents, dx, dy); - } - else - EXPECT_FALSE("Undefined mode for tree setup. Test harness improperly setup."); - - - /// Standard - for (uint64_t i = 0; i < children.size(); i++) - { - geometry_msgs::TransformStamped ts; - setIdentity(ts.transform); - ts.transform.translation.x = dx[i]; - ts.transform.translation.y = dy[i]; - if (time > ros::Time() + (interpolation_space * .5)) - ts.header.stamp = time - (interpolation_space * .5); - else - ts.header.stamp = ros::Time(); - - ts.header.frame_id = parents[i]; - ts.child_frame_id = children[i]; - EXPECT_TRUE(mBC.setTransform(ts, "authority")); - if (interpolation_space > ros::Duration()) - { - ts.header.stamp = time + interpolation_space * .5; - EXPECT_TRUE(mBC.setTransform(ts, "authority")); - - } - } -} - - -TEST(BufferCore_setTransform, NoInsertOnSelfTransform) -{ - tf2::BufferCore mBC; - geometry_msgs::TransformStamped tranStamped; - setIdentity(tranStamped.transform); - tranStamped.header.stamp = ros::Time().fromNSec(10.0); - tranStamped.header.frame_id = "same_frame"; - tranStamped.child_frame_id = "same_frame"; - EXPECT_FALSE(mBC.setTransform(tranStamped, "authority")); -} - -TEST(BufferCore_setTransform, NoInsertWithNan) -{ - tf2::BufferCore mBC; - geometry_msgs::TransformStamped tranStamped; - setIdentity(tranStamped.transform); - tranStamped.header.stamp = ros::Time().fromNSec(10.0); - tranStamped.header.frame_id = "same_frame"; - tranStamped.child_frame_id = "other_frame"; - EXPECT_TRUE(mBC.setTransform(tranStamped, "authority")); - tranStamped.transform.translation.x = std::nan(""); - EXPECT_TRUE(std::isnan(tranStamped.transform.translation.x)); - EXPECT_FALSE(mBC.setTransform(tranStamped, "authority")); - -} - -TEST(BufferCore_setTransform, NoInsertWithNoFrameID) -{ - tf2::BufferCore mBC; - geometry_msgs::TransformStamped tranStamped; - setIdentity(tranStamped.transform); - tranStamped.header.stamp = ros::Time().fromNSec(10.0); - tranStamped.header.frame_id = "same_frame"; - tranStamped.child_frame_id = ""; - EXPECT_FALSE(mBC.setTransform(tranStamped, "authority")); - tranStamped.child_frame_id = "/"; - EXPECT_FALSE(mBC.setTransform(tranStamped, "authority")); - -} - -TEST(BufferCore_setTransform, NoInsertWithNoParentID) -{ - tf2::BufferCore mBC; - geometry_msgs::TransformStamped tranStamped; - setIdentity(tranStamped.transform); - tranStamped.header.stamp = ros::Time().fromNSec(10.0); - tranStamped.header.frame_id = ""; - tranStamped.child_frame_id = "some_frame"; - EXPECT_FALSE(mBC.setTransform(tranStamped, "authority")); - - tranStamped.header.frame_id = "/"; - EXPECT_FALSE(mBC.setTransform(tranStamped, "authority")); -} - -/* -TEST(tf, ListOneInverse) -{ - unsigned int runs = 4; - double epsilon = 1e-6; - seed_rand(); - - tf::Transformer mTR(true); - std::vector xvalues(runs), yvalues(runs), zvalues(runs); - for ( uint64_t i = 0; i < runs ; i++ ) - { - xvalues[i] = 10.0 * ((double) rand() - (double)RAND_MAX /2.0) /(double)RAND_MAX; - yvalues[i] = 10.0 * ((double) rand() - (double)RAND_MAX /2.0) /(double)RAND_MAX; - zvalues[i] = 10.0 * ((double) rand() - (double)RAND_MAX /2.0) /(double)RAND_MAX; - - StampedTransform tranStamped (btTransform(btQuaternion(0,0,0,1), btVector3(xvalues[i],yvalues[i],zvalues[i])), ros::Time().fromNSec(10 + i), "my_parent", "child"); - mTR.setTransform(tranStamped); - } - - // std::cout << mTR.allFramesAsString() << std::endl; - // std::cout << mTR.chainAsString("child", 0, "my_parent2", 0, "my_parent2") << std::endl; - - for ( uint64_t i = 0; i < runs ; i++ ) - - { - Stamped inpose (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(10 + i), "child"); - - try{ - Stamped outpose; - outpose.setIdentity(); //to make sure things are getting mutated - mTR.transformPose("my_parent",inpose, outpose); - EXPECT_NEAR(outpose.getOrigin().x(), xvalues[i], epsilon); - EXPECT_NEAR(outpose.getOrigin().y(), yvalues[i], epsilon); - EXPECT_NEAR(outpose.getOrigin().z(), zvalues[i], epsilon); - } - catch (tf::TransformException & ex) - { - std::cout << "TransformExcepion got through!!!!! " << ex.what() << std::endl; - bool exception_improperly_thrown = true; - EXPECT_FALSE(exception_improperly_thrown); - } - } - -} - -TEST(tf, ListTwoInverse) -{ - unsigned int runs = 4; - double epsilon = 1e-6; - seed_rand(); - - tf::Transformer mTR(true); - std::vector xvalues(runs), yvalues(runs), zvalues(runs); - for ( unsigned int i = 0; i < runs ; i++ ) - { - xvalues[i] = 10.0 * ((double) rand() - (double)RAND_MAX /2.0) /(double)RAND_MAX; - yvalues[i] = 10.0 * ((double) rand() - (double)RAND_MAX /2.0) /(double)RAND_MAX; - zvalues[i] = 10.0 * ((double) rand() - (double)RAND_MAX /2.0) /(double)RAND_MAX; - - StampedTransform tranStamped(btTransform(btQuaternion(0,0,0,1), btVector3(xvalues[i],yvalues[i],zvalues[i])), ros::Time().fromNSec(10 + i), "my_parent", "child"); - mTR.setTransform(tranStamped); - StampedTransform tranStamped2(btTransform(btQuaternion(0,0,0,1), btVector3(xvalues[i],yvalues[i],zvalues[i])), ros::Time().fromNSec(10 + i), "child", "grandchild"); - mTR.setTransform(tranStamped2); - } - - // std::cout << mTR.allFramesAsString() << std::endl; - // std::cout << mTR.chainAsString("child", 0, "my_parent2", 0, "my_parent2") << std::endl; - - for ( unsigned int i = 0; i < runs ; i++ ) - - { - Stamped inpose (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(10 + i), "grandchild"); - - try{ - Stamped outpose; - outpose.setIdentity(); //to make sure things are getting mutated - mTR.transformPose("my_parent",inpose, outpose); - EXPECT_NEAR(outpose.getOrigin().x(), 2*xvalues[i], epsilon); - EXPECT_NEAR(outpose.getOrigin().y(), 2*yvalues[i], epsilon); - EXPECT_NEAR(outpose.getOrigin().z(), 2*zvalues[i], epsilon); - } - catch (tf::TransformException & ex) - { - std::cout << "TransformExcepion got through!!!!! " << ex.what() << std::endl; - bool exception_improperly_thrown = true; - EXPECT_FALSE(exception_improperly_thrown); - } - } - -} - - -TEST(tf, ListOneForward) -{ - unsigned int runs = 4; - double epsilon = 1e-6; - seed_rand(); - - tf::Transformer mTR(true); - std::vector xvalues(runs), yvalues(runs), zvalues(runs); - for ( uint64_t i = 0; i < runs ; i++ ) - { - xvalues[i] = 10.0 * ((double) rand() - (double)RAND_MAX /2.0) /(double)RAND_MAX; - yvalues[i] = 10.0 * ((double) rand() - (double)RAND_MAX /2.0) /(double)RAND_MAX; - zvalues[i] = 10.0 * ((double) rand() - (double)RAND_MAX /2.0) /(double)RAND_MAX; - - StampedTransform tranStamped(btTransform(btQuaternion(0,0,0,1), btVector3(xvalues[i],yvalues[i],zvalues[i])), ros::Time().fromNSec(10 + i), "my_parent", "child"); - mTR.setTransform(tranStamped); - } - - // std::cout << mTR.allFramesAsString() << std::endl; - // std::cout << mTR.chainAsString("child", 0, "my_parent2", 0, "my_parent2") << std::endl; - - for ( uint64_t i = 0; i < runs ; i++ ) - - { - Stamped inpose (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(10 + i), "my_parent"); - - try{ - Stamped outpose; - outpose.setIdentity(); //to make sure things are getting mutated - mTR.transformPose("child",inpose, outpose); - EXPECT_NEAR(outpose.getOrigin().x(), -xvalues[i], epsilon); - EXPECT_NEAR(outpose.getOrigin().y(), -yvalues[i], epsilon); - EXPECT_NEAR(outpose.getOrigin().z(), -zvalues[i], epsilon); - } - catch (tf::TransformException & ex) - { - std::cout << "TransformExcepion got through!!!!! " << ex.what() << std::endl; - bool exception_improperly_thrown = true; - EXPECT_FALSE(exception_improperly_thrown); - } - } - -} - -TEST(tf, ListTwoForward) -{ - unsigned int runs = 4; - double epsilon = 1e-6; - seed_rand(); - - tf::Transformer mTR(true); - std::vector xvalues(runs), yvalues(runs), zvalues(runs); - for ( unsigned int i = 0; i < runs ; i++ ) - { - xvalues[i] = 10.0 * ((double) rand() - (double)RAND_MAX /2.0) /(double)RAND_MAX; - yvalues[i] = 10.0 * ((double) rand() - (double)RAND_MAX /2.0) /(double)RAND_MAX; - zvalues[i] = 10.0 * ((double) rand() - (double)RAND_MAX /2.0) /(double)RAND_MAX; - - StampedTransform tranStamped(btTransform(btQuaternion(0,0,0,1), btVector3(xvalues[i],yvalues[i],zvalues[i])), ros::Time().fromNSec(10 + i), "my_parent", "child"); - mTR.setTransform(tranStamped); - StampedTransform tranStamped2(btTransform(btQuaternion(0,0,0,1), btVector3(xvalues[i],yvalues[i],zvalues[i])), ros::Time().fromNSec(10 + i), "child", "grandchild"); - mTR.setTransform(tranStamped2); - } - - // std::cout << mTR.allFramesAsString() << std::endl; - // std::cout << mTR.chainAsString("child", 0, "my_parent2", 0, "my_parent2") << std::endl; - - for ( unsigned int i = 0; i < runs ; i++ ) - - { - Stamped inpose (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(10 + i), "my_parent"); - - try{ - Stamped outpose; - outpose.setIdentity(); //to make sure things are getting mutated - mTR.transformPose("grandchild",inpose, outpose); - EXPECT_NEAR(outpose.getOrigin().x(), -2*xvalues[i], epsilon); - EXPECT_NEAR(outpose.getOrigin().y(), -2*yvalues[i], epsilon); - EXPECT_NEAR(outpose.getOrigin().z(), -2*zvalues[i], epsilon); - } - catch (tf::TransformException & ex) - { - std::cout << "TransformExcepion got through!!!!! " << ex.what() << std::endl; - bool exception_improperly_thrown = true; - EXPECT_FALSE(exception_improperly_thrown); - } - } - -} - -TEST(tf, TransformThrougRoot) -{ - unsigned int runs = 4; - double epsilon = 1e-6; - seed_rand(); - - tf::Transformer mTR(true); - std::vector xvalues(runs), yvalues(runs), zvalues(runs); - for ( unsigned int i = 0; i < runs ; i++ ) - { - xvalues[i] = 10.0 * ((double) rand() - (double)RAND_MAX /2.0) /(double)RAND_MAX; - yvalues[i] = 10.0 * ((double) rand() - (double)RAND_MAX /2.0) /(double)RAND_MAX; - zvalues[i] = 10.0 * ((double) rand() - (double)RAND_MAX /2.0) /(double)RAND_MAX; - - StampedTransform tranStamped(btTransform(btQuaternion(0,0,0,1), btVector3(xvalues[i],yvalues[i],zvalues[i])), ros::Time().fromNSec(1000 + i*100), "my_parent", "childA"); - mTR.setTransform(tranStamped); - StampedTransform tranStamped2(btTransform(btQuaternion(0,0,0,1), btVector3(xvalues[i],yvalues[i],zvalues[i])), ros::Time().fromNSec(1000 + i*100), "my_parent", "childB"); - mTR.setTransform(tranStamped2); - } - - // std::cout << mTR.allFramesAsString() << std::endl; - // std::cout << mTR.chainAsString("child", 0, "my_parent2", 0, "my_parent2") << std::endl; - - for ( unsigned int i = 0; i < runs ; i++ ) - - { - Stamped inpose (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(1000 + i*100), "childA"); - - try{ - Stamped outpose; - outpose.setIdentity(); //to make sure things are getting mutated - mTR.transformPose("childB",inpose, outpose); - EXPECT_NEAR(outpose.getOrigin().x(), 0*xvalues[i], epsilon); - EXPECT_NEAR(outpose.getOrigin().y(), 0*yvalues[i], epsilon); - EXPECT_NEAR(outpose.getOrigin().z(), 0*zvalues[i], epsilon); - } - catch (tf::TransformException & ex) - { - std::cout << "TransformExcepion got through!!!!! " << ex.what() << std::endl; - bool exception_improperly_thrown = true; - EXPECT_FALSE(exception_improperly_thrown); - } - } - -} - -TEST(tf, TransformThroughNO_PARENT) -{ - unsigned int runs = 4; - double epsilon = 1e-6; - seed_rand(); - - tf::Transformer mTR(true); - std::vector xvalues(runs), yvalues(runs), zvalues(runs); - for ( unsigned int i = 0; i < runs ; i++ ) - { - xvalues[i] = 10.0 * ((double) rand() - (double)RAND_MAX /2.0) /(double)RAND_MAX; - yvalues[i] = 10.0 * ((double) rand() - (double)RAND_MAX /2.0) /(double)RAND_MAX; - zvalues[i] = 10.0 * ((double) rand() - (double)RAND_MAX /2.0) /(double)RAND_MAX; - - StampedTransform tranStamped(btTransform(btQuaternion(0,0,0,1), btVector3(xvalues[i],yvalues[i],zvalues[i])), ros::Time().fromNSec(10 + i), "my_parentA", "childA"); - mTR.setTransform(tranStamped); - StampedTransform tranStamped2(btTransform(btQuaternion(0,0,0,1), btVector3(xvalues[i],yvalues[i],zvalues[i])), ros::Time().fromNSec(10 + i), "my_parentB", "childB"); - mTR.setTransform(tranStamped2); - } - - // std::cout << mTR.allFramesAsString() << std::endl; - // std::cout << mTR.chainAsString("child", 0, "my_parent2", 0, "my_parent2") << std::endl; - - for ( unsigned int i = 0; i < runs ; i++ ) - - { - Stamped inpose (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(10 + i), "childA"); - bool exception_thrown = false; - - try{ - Stamped outpose; - outpose.setIdentity(); //to make sure things are getting mutated - mTR.transformPose("childB",inpose, outpose); - EXPECT_NEAR(outpose.getOrigin().x(), 0*xvalues[i], epsilon); - EXPECT_NEAR(outpose.getOrigin().y(), 0*yvalues[i], epsilon); - EXPECT_NEAR(outpose.getOrigin().z(), 0*zvalues[i], epsilon); - } - catch (tf::TransformException & ex) - { - exception_thrown = true; - } - EXPECT_TRUE(exception_thrown); - } - -} - -*/ - - -TEST(BufferCore_lookupTransform, i_configuration) -{ - double epsilon = 1e-6; - - - - rostest::Permuter permuter; - - std::vector times; - times.push_back(ros::Time(1.0)); - times.push_back(ros::Time(10.0)); - times.push_back(ros::Time(0.0)); - ros::Time eval_time; - permuter.addOptionSet(times, &eval_time); - - std::vector durations; - durations.push_back(ros::Duration(1.0)); - durations.push_back(ros::Duration(0.001)); - durations.push_back(ros::Duration(0.1)); - ros::Duration interpolation_space; - // permuter.addOptionSet(durations, &interpolation_space); - - std::vector frames; - frames.push_back("a"); - frames.push_back("b"); - frames.push_back("c"); - std::string source_frame; - permuter.addOptionSet(frames, &source_frame); - - std::string target_frame; - permuter.addOptionSet(frames, &target_frame); - - while (permuter.step()) - { - - tf2::BufferCore mBC; - setupTree(mBC, "i", eval_time, interpolation_space); - - geometry_msgs::TransformStamped outpose = mBC.lookupTransform(source_frame, target_frame, eval_time); - //printf("source_frame %s target_frame %s time %f\n", source_frame.c_str(), target_frame.c_str(), eval_time.toSec()); - EXPECT_EQ(outpose.header.stamp, eval_time); - EXPECT_EQ(outpose.header.frame_id, source_frame); - EXPECT_EQ(outpose.child_frame_id, target_frame); - EXPECT_NEAR(outpose.transform.translation.y, 0, epsilon); - EXPECT_NEAR(outpose.transform.translation.z, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.x, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.y, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.z, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.w, 1, epsilon); - - //Zero distance - if (source_frame == target_frame) - { - EXPECT_NEAR(outpose.transform.translation.x, 0, epsilon); - } - else if ((source_frame == "a" && target_frame =="b") || - (source_frame == "b" && target_frame =="c")) - { - EXPECT_NEAR(outpose.transform.translation.x, 1, epsilon); - } - else if ((source_frame == "b" && target_frame =="a") || - (source_frame == "c" && target_frame =="b")) - { - EXPECT_NEAR(outpose.transform.translation.x, -1, epsilon); - } - else if (source_frame == "a" && target_frame =="c") - { - EXPECT_NEAR(outpose.transform.translation.x, 2, epsilon); - } - else if (source_frame == "c" && target_frame =="a") - { - EXPECT_NEAR(outpose.transform.translation.x, -2, epsilon); - } - else - { - EXPECT_FALSE("i configuration: Shouldn't get here"); - printf("source_frame %s target_frame %s time %f\n", source_frame.c_str(), target_frame.c_str(), eval_time.toSec()); - } - - } -} - -/* Check 1 result return false if test parameters unmet */ -bool check_1_result(const geometry_msgs::TransformStamped& outpose, const std::string& source_frame, const std::string& target_frame, const ros::Time& eval_time, double epsilon) -{ - //printf("source_frame %s target_frame %s time %f\n", source_frame.c_str(), target_frame.c_str(), eval_time.toSec()); - EXPECT_EQ(outpose.header.stamp, eval_time); - EXPECT_EQ(outpose.header.frame_id, source_frame); - EXPECT_EQ(outpose.child_frame_id, target_frame); - EXPECT_NEAR(outpose.transform.translation.y, 0, epsilon); - EXPECT_NEAR(outpose.transform.translation.z, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.x, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.y, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.z, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.w, 1, epsilon); - - //Zero distance - if (source_frame == target_frame) - { - EXPECT_NEAR(outpose.transform.translation.x, 0, epsilon); - } - else if (source_frame == "1" && target_frame =="2") - { - EXPECT_NEAR(outpose.transform.translation.x, 1, epsilon); - } - else if (source_frame == "2" && target_frame =="1") - { - EXPECT_NEAR(outpose.transform.translation.x, -1, epsilon); - } - else - { - //printf("source_frame %s target_frame %s time %f\n", source_frame.c_str(), target_frame.c_str(), eval_time.toSec()); - return false; - } - return true; -} - -/* Check v result return false if test parameters unmet */ -bool check_v_result(const geometry_msgs::TransformStamped& outpose, const std::string& source_frame, const std::string& target_frame, const ros::Time& eval_time, double epsilon) -{ - //printf("source_frame %s target_frame %s time %f\n", source_frame.c_str(), target_frame.c_str(), eval_time.toSec()); - EXPECT_EQ(outpose.header.stamp, eval_time); - EXPECT_EQ(outpose.header.frame_id, source_frame); - EXPECT_EQ(outpose.child_frame_id, target_frame); - EXPECT_NEAR(outpose.transform.translation.z, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.x, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.y, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.z, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.w, 1, epsilon); - - //Zero distance - if (source_frame == target_frame) - { - EXPECT_NEAR(outpose.transform.translation.x, 0, epsilon); - } - else if ((source_frame == "a" && target_frame =="b") || - (source_frame == "b" && target_frame =="c")) - { - EXPECT_NEAR(outpose.transform.translation.x, 1, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, 0, epsilon); - } - else if ((source_frame == "b" && target_frame =="a") || - (source_frame == "c" && target_frame =="b")) - { - EXPECT_NEAR(outpose.transform.translation.x, -1, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, 0, epsilon); - } - else if ((source_frame == "a" && target_frame =="f") || - (source_frame == "f" && target_frame =="g")) - { - EXPECT_NEAR(outpose.transform.translation.x, 0, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, 1, epsilon); - } - else if ((source_frame == "f" && target_frame =="a") || - (source_frame == "g" && target_frame =="f")) - { - EXPECT_NEAR(outpose.transform.translation.x, 0, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, -1, epsilon); - } - else if (source_frame == "a" && target_frame =="g") - { - EXPECT_NEAR(outpose.transform.translation.x, 0, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, 2, epsilon); - } - else if (source_frame == "g" && target_frame =="a") - { - EXPECT_NEAR(outpose.transform.translation.x, 0, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, -2, epsilon); - } - else if (source_frame == "a" && target_frame =="c") - { - EXPECT_NEAR(outpose.transform.translation.x, 2, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, 0, epsilon); - } - else if (source_frame == "c" && target_frame =="a") - { - EXPECT_NEAR(outpose.transform.translation.x, -2, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, 0, epsilon); - } - else if (source_frame == "b" && target_frame =="f") - { - EXPECT_NEAR(outpose.transform.translation.x, -1, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, 1, epsilon); - } - else if (source_frame == "f" && target_frame =="b") - { - EXPECT_NEAR(outpose.transform.translation.x, 1, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, -1, epsilon); - } - else if (source_frame == "c" && target_frame =="f") - { - EXPECT_NEAR(outpose.transform.translation.x, -2, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, 1, epsilon); - } - else if (source_frame == "f" && target_frame =="c") - { - EXPECT_NEAR(outpose.transform.translation.x, 2, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, -1, epsilon); - } - else if (source_frame == "b" && target_frame =="g") - { - EXPECT_NEAR(outpose.transform.translation.x, -1, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, 2, epsilon); - } - else if (source_frame == "g" && target_frame =="b") - { - EXPECT_NEAR(outpose.transform.translation.x, 1, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, -2, epsilon); - } - else if (source_frame == "c" && target_frame =="g") - { - EXPECT_NEAR(outpose.transform.translation.x, -2, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, 2, epsilon); - } - else if (source_frame == "g" && target_frame =="c") - { - EXPECT_NEAR(outpose.transform.translation.x, 2, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, -2, epsilon); - } - else - { - //printf("source_frame %s target_frame %s time %f\n", source_frame.c_str(), target_frame.c_str(), eval_time.toSec()); - return false; - } - return true; -} - -/* Check v result return false if test parameters unmet */ -bool check_y_result(const geometry_msgs::TransformStamped& outpose, const std::string& source_frame, const std::string& target_frame, const ros::Time& eval_time, double epsilon) -{ - //printf("source_frame %s target_frame %s time %f\n", source_frame.c_str(), target_frame.c_str(), eval_time.toSec()); - EXPECT_EQ(outpose.header.stamp, eval_time); - EXPECT_EQ(outpose.header.frame_id, source_frame); - EXPECT_EQ(outpose.child_frame_id, target_frame); - EXPECT_NEAR(outpose.transform.translation.z, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.x, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.y, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.z, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.w, 1, epsilon); - - //Zero distance - if (source_frame == target_frame) - { - EXPECT_NEAR(outpose.transform.translation.x, 0, epsilon); - } - else if ((source_frame == "a" && target_frame =="b") || - (source_frame == "b" && target_frame =="c")) - { - EXPECT_NEAR(outpose.transform.translation.x, 1, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, 0, epsilon); - } - else if ((source_frame == "b" && target_frame =="a") || - (source_frame == "c" && target_frame =="b")) - { - EXPECT_NEAR(outpose.transform.translation.x, -1, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, 0, epsilon); - } - else if ((source_frame == "b" && target_frame =="d") || - (source_frame == "d" && target_frame =="e")) - { - EXPECT_NEAR(outpose.transform.translation.x, 0, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, 1, epsilon); - } - else if ((source_frame == "d" && target_frame =="b") || - (source_frame == "e" && target_frame =="d")) - { - EXPECT_NEAR(outpose.transform.translation.x, 0, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, -1, epsilon); - } - else if (source_frame == "b" && target_frame =="e") - { - EXPECT_NEAR(outpose.transform.translation.x, 0, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, 2, epsilon); - } - else if (source_frame == "e" && target_frame =="b") - { - EXPECT_NEAR(outpose.transform.translation.x, 0, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, -2, epsilon); - } - else if (source_frame == "a" && target_frame =="c") - { - EXPECT_NEAR(outpose.transform.translation.x, 2, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, 0, epsilon); - } - else if (source_frame == "c" && target_frame =="a") - { - EXPECT_NEAR(outpose.transform.translation.x, -2, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, 0, epsilon); - } - else if (source_frame == "a" && target_frame =="d") - { - EXPECT_NEAR(outpose.transform.translation.x, 1, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, 1, epsilon); - } - else if (source_frame == "d" && target_frame =="a") - { - EXPECT_NEAR(outpose.transform.translation.x, -1, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, -1, epsilon); - } - else if (source_frame == "c" && target_frame =="d") - { - EXPECT_NEAR(outpose.transform.translation.x, -1, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, 1, epsilon); - } - else if (source_frame == "d" && target_frame =="c") - { - EXPECT_NEAR(outpose.transform.translation.x, 1, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, -1, epsilon); - } - else if (source_frame == "a" && target_frame =="e") - { - EXPECT_NEAR(outpose.transform.translation.x, 1, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, 2, epsilon); - } - else if (source_frame == "e" && target_frame =="a") - { - EXPECT_NEAR(outpose.transform.translation.x, -1, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, -2, epsilon); - } - else if (source_frame == "c" && target_frame =="e") - { - EXPECT_NEAR(outpose.transform.translation.x, -1, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, 2, epsilon); - } - else if (source_frame == "e" && target_frame =="c") - { - EXPECT_NEAR(outpose.transform.translation.x, 1, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, -2, epsilon); - } - else - { - //printf("source_frame %s target_frame %s time %f\n", source_frame.c_str(), target_frame.c_str(), eval_time.toSec()); - return false; - } - return true; -} - - -TEST(BufferCore_lookupTransform, one_link_configuration) -{ - double epsilon = 1e-6; - - - - rostest::Permuter permuter; - - std::vector times; - times.push_back(ros::Time(1.0)); - times.push_back(ros::Time(10.0)); - times.push_back(ros::Time(0.0)); - ros::Time eval_time; - permuter.addOptionSet(times, &eval_time); - - std::vector durations; - durations.push_back(ros::Duration(1.0)); - durations.push_back(ros::Duration(0.001)); - durations.push_back(ros::Duration(0.1)); - ros::Duration interpolation_space; - // permuter.addOptionSet(durations, &interpolation_space); - - std::vector frames; - frames.push_back("1"); - frames.push_back("2"); - std::string source_frame; - permuter.addOptionSet(frames, &source_frame); - - std::string target_frame; - permuter.addOptionSet(frames, &target_frame); - - while (permuter.step()) - { - - tf2::BufferCore mBC; - setupTree(mBC, "1", eval_time, interpolation_space); - - geometry_msgs::TransformStamped outpose = mBC.lookupTransform(source_frame, target_frame, eval_time); - - EXPECT_TRUE(check_1_result(outpose, source_frame, target_frame, eval_time, epsilon)); - } -} - - -TEST(BufferCore_lookupTransform, v_configuration) -{ - double epsilon = 1e-6; - - - - rostest::Permuter permuter; - - std::vector times; - times.push_back(ros::Time(1.0)); - times.push_back(ros::Time(10.0)); - times.push_back(ros::Time(0.0)); - ros::Time eval_time; - permuter.addOptionSet(times, &eval_time); - - std::vector durations; - durations.push_back(ros::Duration(1.0)); - durations.push_back(ros::Duration(0.001)); - durations.push_back(ros::Duration(0.1)); - ros::Duration interpolation_space; - // permuter.addOptionSet(durations, &interpolation_space); - - std::vector frames; - frames.push_back("a"); - frames.push_back("b"); - frames.push_back("c"); - frames.push_back("f"); - frames.push_back("g"); - std::string source_frame; - permuter.addOptionSet(frames, &source_frame); - - std::string target_frame; - permuter.addOptionSet(frames, &target_frame); - - while (permuter.step()) - { - - tf2::BufferCore mBC; - setupTree(mBC, "v", eval_time, interpolation_space); - - geometry_msgs::TransformStamped outpose = mBC.lookupTransform(source_frame, target_frame, eval_time); - - EXPECT_TRUE(check_v_result(outpose, source_frame, target_frame, eval_time, epsilon)); - } -} - - -TEST(BufferCore_lookupTransform, y_configuration) -{ - double epsilon = 1e-6; - - - - rostest::Permuter permuter; - - std::vector times; - times.push_back(ros::Time(1.0)); - times.push_back(ros::Time(10.0)); - times.push_back(ros::Time(0.0)); - ros::Time eval_time; - permuter.addOptionSet(times, &eval_time); - - std::vector durations; - durations.push_back(ros::Duration(1.0)); - durations.push_back(ros::Duration(0.001)); - durations.push_back(ros::Duration(0.1)); - ros::Duration interpolation_space; - // permuter.addOptionSet(durations, &interpolation_space); - - std::vector frames; - frames.push_back("a"); - frames.push_back("b"); - frames.push_back("c"); - frames.push_back("d"); - frames.push_back("e"); - std::string source_frame; - permuter.addOptionSet(frames, &source_frame); - - std::string target_frame; - permuter.addOptionSet(frames, &target_frame); - - while (permuter.step()) - { - - tf2::BufferCore mBC; - setupTree(mBC, "y", eval_time, interpolation_space); - - geometry_msgs::TransformStamped outpose = mBC.lookupTransform(source_frame, target_frame, eval_time); - - EXPECT_TRUE(check_y_result(outpose, source_frame, target_frame, eval_time, epsilon)); - } -} - -TEST(BufferCore_lookupTransform, multi_configuration) -{ - double epsilon = 1e-6; - - - - rostest::Permuter permuter; - - std::vector times; - times.push_back(ros::Time(1.0)); - times.push_back(ros::Time(10.0)); - times.push_back(ros::Time(0.0)); - ros::Time eval_time; - permuter.addOptionSet(times, &eval_time); - - std::vector durations; - durations.push_back(ros::Duration(1.0)); - durations.push_back(ros::Duration(0.001)); - durations.push_back(ros::Duration(0.1)); - ros::Duration interpolation_space; - // permuter.addOptionSet(durations, &interpolation_space); - - std::vector frames; - frames.push_back("1"); - frames.push_back("2"); - frames.push_back("a"); - frames.push_back("b"); - frames.push_back("c"); - frames.push_back("f"); - frames.push_back("g"); - std::string source_frame; - permuter.addOptionSet(frames, &source_frame); - - std::string target_frame; - permuter.addOptionSet(frames, &target_frame); - - while (permuter.step()) - { - - tf2::BufferCore mBC; - setupTree(mBC, "1_v", eval_time, interpolation_space); - - if (mBC.canTransform(source_frame, target_frame, eval_time)) - { - geometry_msgs::TransformStamped outpose = mBC.lookupTransform(source_frame, target_frame, eval_time); - - if ((source_frame == "1" || source_frame =="2") && (target_frame =="1" || target_frame == "2")) - EXPECT_TRUE(check_1_result(outpose, source_frame, target_frame, eval_time, epsilon)); - else if ((source_frame == "a" || source_frame == "b" || source_frame == "c" || source_frame == "f" || source_frame == "g") && - (target_frame == "a" || target_frame == "b" || target_frame == "c" || target_frame == "f" || target_frame == "g")) - EXPECT_TRUE(check_v_result(outpose, source_frame, target_frame, eval_time, epsilon)); - else - EXPECT_FALSE("Frames unhandled"); - } - else - EXPECT_TRUE(((source_frame == "a" || source_frame =="b" || source_frame == "c" || source_frame == "f" || source_frame == "g") && - (target_frame == "1" || target_frame == "2") ) - || - ((target_frame == "a" || target_frame =="b" || target_frame == "c" || target_frame == "f" || target_frame == "g") && - (source_frame == "1" || source_frame == "2")) - ); - - } -} - -#define CHECK_QUATERNION_NEAR(_q1, _x, _y, _z, _w, _epsilon) \ - { \ - btQuaternion q1(_q1.x, _q1.y, _q1.z, _q1.w); \ - btQuaternion q2(_x, _y, _z, _w); \ - double angle = q1.angle(q2); \ - EXPECT_TRUE(fabs(angle) < _epsilon || fabs(angle - M_PI) < _epsilon); \ - } - -#define CHECK_TRANSFORMS_NEAR(_out, _expected, _eps) \ - EXPECT_NEAR(_out.transform.translation.x, _expected.getOrigin().x(), epsilon); \ - EXPECT_NEAR(_out.transform.translation.y, _expected.getOrigin().y(), epsilon); \ - EXPECT_NEAR(_out.transform.translation.z, _expected.getOrigin().z(), epsilon); \ - CHECK_QUATERNION_NEAR(_out.transform.rotation, _expected.getRotation().x(), _expected.getRotation().y(), _expected.getRotation().z(), _expected.getRotation().w(), _eps); - - -// Simple test with compound transform -TEST(BufferCore_lookupTransform, compound_xfm_configuration) -{ - /* - * Frames - * - * root->a - * - * root->b->c->d - * - */ - - double epsilon = 2e-5; // Larger epsilon for interpolation values - - tf2::BufferCore mBC; - - geometry_msgs::TransformStamped tsa; - tsa.header.frame_id = "root"; - tsa.child_frame_id = "a"; - tsa.transform.translation.x = 1.0; - tsa.transform.translation.y = 1.0; - tsa.transform.translation.z = 1.0; - btQuaternion q1; - q1.setEuler(0.25, .5, .75); - tsa.transform.rotation.x = q1.x(); - tsa.transform.rotation.y = q1.y(); - tsa.transform.rotation.z = q1.z(); - tsa.transform.rotation.w = q1.w(); - EXPECT_TRUE(mBC.setTransform(tsa, "authority")); - - geometry_msgs::TransformStamped tsb; - tsb.header.frame_id = "root"; - tsb.child_frame_id = "b"; - tsb.transform.translation.x = -1.0; - tsb.transform.translation.y = 0.0; - tsb.transform.translation.z = -1.0; - btQuaternion q2; - q2.setEuler(1.0, 0.25, 0.5); - tsb.transform.rotation.x = q2.x(); - tsb.transform.rotation.y = q2.y(); - tsb.transform.rotation.z = q2.z(); - tsb.transform.rotation.w = q2.w(); - EXPECT_TRUE(mBC.setTransform(tsb, "authority")); - - geometry_msgs::TransformStamped tsc; - tsc.header.frame_id = "b"; - tsc.child_frame_id = "c"; - tsc.transform.translation.x = 0.0; - tsc.transform.translation.y = 2.0; - tsc.transform.translation.z = 0.5; - btQuaternion q3; - q3.setEuler(0.25, .75, 1.25); - tsc.transform.rotation.x = q3.x(); - tsc.transform.rotation.y = q3.y(); - tsc.transform.rotation.z = q3.z(); - tsc.transform.rotation.w = q3.w(); - EXPECT_TRUE(mBC.setTransform(tsc, "authority")); - - geometry_msgs::TransformStamped tsd; - tsd.header.frame_id = "c"; - tsd.child_frame_id = "d"; - tsd.transform.translation.x = 0.5; - tsd.transform.translation.y = -1; - tsd.transform.translation.z = 1.5; - btQuaternion q4; - q4.setEuler(-0.5, 1.0, -.75); - tsd.transform.rotation.x = q4.x(); - tsd.transform.rotation.y = q4.y(); - tsd.transform.rotation.z = q4.z(); - tsd.transform.rotation.w = q4.w(); - EXPECT_TRUE(mBC.setTransform(tsd, "authority")); - - btTransform ta, tb, tc, td, expected_ab, expected_bc, expected_cb, expected_ac, expected_ba, expected_ca, expected_ad, expected_da, expected_bd, expected_db, expected_rootd, expected_rootc; - ta.setOrigin(btVector3(1.0, 1.0, 1.0)); - ta.setRotation(q1); - tb.setOrigin(btVector3(-1.0, 0.0, -1.0)); - tb.setRotation(q2); - tc.setOrigin(btVector3(0.0, 2.0, 0.5)); - tc.setRotation(q3); - td.setOrigin(btVector3(0.5, -1, 1.5)); - td.setRotation(q4); - - - expected_ab = ta.inverse() * tb; - expected_ac = ta.inverse() * tb * tc; - expected_ad = ta.inverse() * tb * tc * td; - expected_cb = tc.inverse(); - expected_bc = tc; - expected_bd = tc * td; - expected_db = expected_bd.inverse(); - expected_ba = tb.inverse() * ta; - expected_ca = tc.inverse() * tb.inverse() * ta; - expected_da = td.inverse() * tc.inverse() * tb.inverse() * ta; - expected_rootd = tb * tc * td; - expected_rootc = tb * tc; - - // root -> b -> c - geometry_msgs::TransformStamped out_rootc = mBC.lookupTransform("root", "c", ros::Time()); - CHECK_TRANSFORMS_NEAR(out_rootc, expected_rootc, epsilon); - - // root -> b -> c -> d - geometry_msgs::TransformStamped out_rootd = mBC.lookupTransform("root", "d", ros::Time()); - CHECK_TRANSFORMS_NEAR(out_rootd, expected_rootd, epsilon); - - // a <- root -> b - geometry_msgs::TransformStamped out_ab = mBC.lookupTransform("a", "b", ros::Time()); - CHECK_TRANSFORMS_NEAR(out_ab, expected_ab, epsilon); - - geometry_msgs::TransformStamped out_ba = mBC.lookupTransform("b", "a", ros::Time()); - CHECK_TRANSFORMS_NEAR(out_ba, expected_ba, epsilon); - - // a <- root -> b -> c - geometry_msgs::TransformStamped out_ac = mBC.lookupTransform("a", "c", ros::Time()); - CHECK_TRANSFORMS_NEAR(out_ac, expected_ac, epsilon); - - geometry_msgs::TransformStamped out_ca = mBC.lookupTransform("c", "a", ros::Time()); - CHECK_TRANSFORMS_NEAR(out_ca, expected_ca, epsilon); - - // a <- root -> b -> c -> d - geometry_msgs::TransformStamped out_ad = mBC.lookupTransform("a", "d", ros::Time()); - CHECK_TRANSFORMS_NEAR(out_ad, expected_ad, epsilon); - - geometry_msgs::TransformStamped out_da = mBC.lookupTransform("d", "a", ros::Time()); - CHECK_TRANSFORMS_NEAR(out_da, expected_da, epsilon); - - // b -> c - geometry_msgs::TransformStamped out_cb = mBC.lookupTransform("c", "b", ros::Time()); - CHECK_TRANSFORMS_NEAR(out_cb, expected_cb, epsilon); - - geometry_msgs::TransformStamped out_bc = mBC.lookupTransform("b", "c", ros::Time()); - CHECK_TRANSFORMS_NEAR(out_bc, expected_bc, epsilon); - - // b -> c -> d - geometry_msgs::TransformStamped out_bd = mBC.lookupTransform("b", "d", ros::Time()); - CHECK_TRANSFORMS_NEAR(out_bd, expected_bd, epsilon); - - geometry_msgs::TransformStamped out_db = mBC.lookupTransform("d", "b", ros::Time()); - CHECK_TRANSFORMS_NEAR(out_db, expected_db, epsilon); -} - -// Time varying transforms, testing interpolation -TEST(BufferCore_lookupTransform, helix_configuration) -{ - double epsilon = 2e-5; // Larger epsilon for interpolation values - - tf2::BufferCore mBC; - - ros::Time t0 = ros::Time() + ros::Duration(10); - ros::Duration step = ros::Duration(0.05); - ros::Duration half_step = ros::Duration(0.025); - ros::Time t1 = t0 + ros::Duration(5.0); - - /* - * a->b->c - * - * b.z = vel * (t - t0) - * c.x = cos(theta * (t - t0)) - * c.y = sin(theta * (t - t0)) - * - * a->d - * - * d.z = 2 * cos(theta * (t - t0)) - * a->d transforms are at half-step between a->b->c transforms - */ - - double theta = 0.25; - double vel = 1.0; - - for (ros::Time t = t0; t <= t1; t += step) - { - ros::Time t2 = t + half_step; - double dt = (t - t0).toSec(); - double dt2 = (t2 - t0).toSec(); - - geometry_msgs::TransformStamped ts; - ts.header.frame_id = "a"; - ts.header.stamp = t; - ts.child_frame_id = "b"; - ts.transform.translation.z = vel * dt; - ts.transform.rotation.w = 1.0; - EXPECT_TRUE(mBC.setTransform(ts, "authority")); - - geometry_msgs::TransformStamped ts2; - ts2.header.frame_id = "b"; - ts2.header.stamp = t; - ts2.child_frame_id = "c"; - ts2.transform.translation.x = cos(theta * dt); - ts2.transform.translation.y = sin(theta * dt); - btQuaternion q; - q.setEuler(0,0,theta*dt); - ts2.transform.rotation.z = q.z(); - ts2.transform.rotation.w = q.w(); - EXPECT_TRUE(mBC.setTransform(ts2, "authority")); - - geometry_msgs::TransformStamped ts3; - ts3.header.frame_id = "a"; - ts3.header.stamp = t2; - ts3.child_frame_id = "d"; - ts3.transform.translation.z = cos(theta * dt2); - ts3.transform.rotation.w = 1.0; - EXPECT_TRUE(mBC.setTransform(ts3, "authority")); - } - - - for (ros::Time t = t0 + half_step; t < t1; t += step) - { - ros::Time t2 = t + half_step; - double dt = (t - t0).toSec(); - double dt2 = (t2 - t0).toSec(); - - geometry_msgs::TransformStamped out_ab = mBC.lookupTransform("a", "b", t); - EXPECT_NEAR(out_ab.transform.translation.z, vel * dt, epsilon); - - geometry_msgs::TransformStamped out_ac = mBC.lookupTransform("a", "c", t); - EXPECT_NEAR(out_ac.transform.translation.x, cos(theta * dt), epsilon); - EXPECT_NEAR(out_ac.transform.translation.y, sin(theta * dt), epsilon); - EXPECT_NEAR(out_ac.transform.translation.z, vel * dt, epsilon); - btQuaternion q; - q.setEuler(0,0,theta*dt); - CHECK_QUATERNION_NEAR(out_ac.transform.rotation, 0, 0, q.z(), q.w(), epsilon); - - geometry_msgs::TransformStamped out_ad = mBC.lookupTransform("a", "d", t); - EXPECT_NEAR(out_ad.transform.translation.z, cos(theta * dt), epsilon); - - geometry_msgs::TransformStamped out_cd = mBC.lookupTransform("c", "d", t2); - EXPECT_NEAR(out_cd.transform.translation.x, -1, epsilon); - EXPECT_NEAR(out_cd.transform.translation.y, 0, epsilon); - EXPECT_NEAR(out_cd.transform.translation.z, cos(theta * dt2) - vel * dt2, epsilon); - btQuaternion mq; - mq.setEuler(0,0,-theta*dt2); - CHECK_QUATERNION_NEAR(out_cd.transform.rotation, 0, 0, mq.z(), mq.w(), epsilon); - } - - // Advanced API - for (ros::Time t = t0 + half_step; t < t1; t += (step + step)) - { - ros::Time t2 = t + step; - double dt = (t - t0).toSec(); - double dt2 = (t2 - t0).toSec(); - - geometry_msgs::TransformStamped out_cd2 = mBC.lookupTransform("c", t, "d", t2, "a"); - EXPECT_NEAR(out_cd2.transform.translation.x, -1, epsilon); - EXPECT_NEAR(out_cd2.transform.translation.y, 0, epsilon); - EXPECT_NEAR(out_cd2.transform.translation.z, cos(theta * dt2) - vel * dt, epsilon); - btQuaternion mq2; - mq2.setEuler(0,0,-theta*dt); - CHECK_QUATERNION_NEAR(out_cd2.transform.rotation, 0, 0, mq2.z(), mq2.w(), epsilon); - } -} - - -TEST(BufferCore_lookupTransform, ring_45_configuration) -{ - double epsilon = 1e-6; - rostest::Permuter permuter; - - std::vector times; - times.push_back(ros::Time(1.0)); - times.push_back(ros::Time(10.0)); - times.push_back(ros::Time(0.0)); - ros::Time eval_time; - permuter.addOptionSet(times, &eval_time); - - std::vector durations; - durations.push_back(ros::Duration(1.0)); - durations.push_back(ros::Duration(0.001)); - durations.push_back(ros::Duration(0.1)); - ros::Duration interpolation_space; - // permuter.addOptionSet(durations, &interpolation_space); - - std::vector frames; - frames.push_back("a"); - frames.push_back("b"); - frames.push_back("c"); - frames.push_back("d"); - frames.push_back("e"); - frames.push_back("f"); - frames.push_back("g"); - frames.push_back("h"); - frames.push_back("i"); - /* frames.push_back("inverse_b"); - frames.push_back("inverse_c"); - frames.push_back("inverse_d"); - frames.push_back("inverse_e"); - frames.push_back("inverse_f"); - frames.push_back("inverse_g"); - frames.push_back("inverse_h"); - frames.push_back("inverse_i");*/ - std::string source_frame; - permuter.addOptionSet(frames, &source_frame); - - std::string target_frame; - permuter.addOptionSet(frames, &target_frame); - - while (permuter.step()) - { - - tf2::BufferCore mBC; - setupTree(mBC, "ring_45", eval_time, interpolation_space); - - geometry_msgs::TransformStamped outpose = mBC.lookupTransform(source_frame, target_frame, eval_time); - - - //printf("source_frame %s target_frame %s time %f\n", source_frame.c_str(), target_frame.c_str(), eval_time.toSec()); - EXPECT_EQ(outpose.header.stamp, eval_time); - EXPECT_EQ(outpose.header.frame_id, source_frame); - EXPECT_EQ(outpose.child_frame_id, target_frame); - - - - //Zero distance or all the way - if (source_frame == target_frame || - (source_frame == "a" && target_frame == "i") || - (source_frame == "i" && target_frame == "a") || - (source_frame == "a" && target_frame == "inverse_i") || - (source_frame == "inverse_i" && target_frame == "a") ) - { - //printf ("here %s %s\n", source_frame.c_str(), target_frame.c_str()); - EXPECT_NEAR(outpose.transform.translation.x, 0, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, 0, epsilon); - EXPECT_NEAR(outpose.transform.translation.z, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.x, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.y, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.z, 0, epsilon); - EXPECT_NEAR(fabs(outpose.transform.rotation.w), 1, epsilon); - } - // Chaining 1 - else if ((source_frame == "a" && target_frame =="b") || - (source_frame == "b" && target_frame =="c") || - (source_frame == "c" && target_frame =="d") || - (source_frame == "d" && target_frame =="e") || - (source_frame == "e" && target_frame =="f") || - (source_frame == "f" && target_frame =="g") || - (source_frame == "g" && target_frame =="h") || - (source_frame == "h" && target_frame =="i") - ) - { - EXPECT_NEAR(outpose.transform.translation.x, sqrt(2)/2 - 1, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, sqrt(2)/2 , epsilon); - EXPECT_NEAR(outpose.transform.translation.z, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.x, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.y, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.z, sin(M_PI/8), epsilon); - EXPECT_NEAR(outpose.transform.rotation.w, cos(M_PI/8), epsilon); - } - // Inverse Chaining 1 - else if ((source_frame == "b" && target_frame =="a") || - (source_frame == "c" && target_frame =="b") || - (source_frame == "d" && target_frame =="c") || - (source_frame == "e" && target_frame =="d") || - (source_frame == "f" && target_frame =="e") || - (source_frame == "g" && target_frame =="f") || - (source_frame == "h" && target_frame =="g") || - (source_frame == "i" && target_frame =="h") - ) - { - EXPECT_NEAR(outpose.transform.translation.x, sqrt(2)/2 - 1, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, -sqrt(2)/2 , epsilon); - EXPECT_NEAR(outpose.transform.translation.z, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.x, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.y, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.z, sin(-M_PI/8), epsilon); - EXPECT_NEAR(outpose.transform.rotation.w, cos(-M_PI/8), epsilon); - } - // Chaining 2 - else if ((source_frame == "a" && target_frame =="c") || - (source_frame == "b" && target_frame =="d") || - (source_frame == "c" && target_frame =="e") || - (source_frame == "d" && target_frame =="f") || - (source_frame == "e" && target_frame =="g") || - (source_frame == "f" && target_frame =="h") || - (source_frame == "g" && target_frame =="i") - ) - { - EXPECT_NEAR(outpose.transform.translation.x, -1, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, 1 , epsilon); - EXPECT_NEAR(outpose.transform.translation.z, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.x, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.y, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.z, sin(M_PI/4), epsilon); - EXPECT_NEAR(outpose.transform.rotation.w, cos(M_PI/4), epsilon); - } - // Inverse Chaining 2 - else if ((source_frame == "c" && target_frame =="a") || - (source_frame == "d" && target_frame =="b") || - (source_frame == "e" && target_frame =="c") || - (source_frame == "f" && target_frame =="d") || - (source_frame == "g" && target_frame =="e") || - (source_frame == "h" && target_frame =="f") || - (source_frame == "i" && target_frame =="g") - ) - { - EXPECT_NEAR(outpose.transform.translation.x, -1, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, -1 , epsilon); - EXPECT_NEAR(outpose.transform.translation.z, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.x, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.y, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.z, sin(-M_PI/4), epsilon); - EXPECT_NEAR(outpose.transform.rotation.w, cos(-M_PI/4), epsilon); - } - // Chaining 3 - else if ((source_frame == "a" && target_frame =="d") || - (source_frame == "b" && target_frame =="e") || - (source_frame == "c" && target_frame =="f") || - (source_frame == "d" && target_frame =="g") || - (source_frame == "e" && target_frame =="h") || - (source_frame == "f" && target_frame =="i") - ) - { - EXPECT_NEAR(outpose.transform.translation.x, -1 - sqrt(2)/2, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, sqrt(2)/2 , epsilon); - EXPECT_NEAR(outpose.transform.translation.z, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.x, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.y, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.z, sin(M_PI*3/8), epsilon); - EXPECT_NEAR(outpose.transform.rotation.w, cos(M_PI*3/8), epsilon); - } - // Inverse Chaining 3 - else if ((target_frame == "a" && source_frame =="d") || - (target_frame == "b" && source_frame =="e") || - (target_frame == "c" && source_frame =="f") || - (target_frame == "d" && source_frame =="g") || - (target_frame == "e" && source_frame =="h") || - (target_frame == "f" && source_frame =="i") - ) - { - EXPECT_NEAR(outpose.transform.translation.x, -1 - sqrt(2)/2, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, - sqrt(2)/2 , epsilon); - EXPECT_NEAR(outpose.transform.translation.z, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.x, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.y, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.z, sin(-M_PI*3/8), epsilon); - EXPECT_NEAR(outpose.transform.rotation.w, cos(-M_PI*3/8), epsilon); - } - // Chaining 4 - else if ((source_frame == "a" && target_frame =="e") || - (source_frame == "b" && target_frame =="f") || - (source_frame == "c" && target_frame =="g") || - (source_frame == "d" && target_frame =="h") || - (source_frame == "e" && target_frame =="i") - ) - { - EXPECT_NEAR(outpose.transform.translation.x, -2, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, 0 , epsilon); - EXPECT_NEAR(outpose.transform.translation.z, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.x, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.y, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.z, sin(M_PI/2), epsilon); - EXPECT_NEAR(outpose.transform.rotation.w, cos(M_PI/2), epsilon); - } - // Inverse Chaining 4 - else if ((target_frame == "a" && source_frame =="e") || - (target_frame == "b" && source_frame =="f") || - (target_frame == "c" && source_frame =="g") || - (target_frame == "d" && source_frame =="h") || - (target_frame == "e" && source_frame =="i") - ) - { - EXPECT_NEAR(outpose.transform.translation.x, -2, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, 0 , epsilon); - EXPECT_NEAR(outpose.transform.translation.z, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.x, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.y, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.z, sin(-M_PI/2), epsilon); - EXPECT_NEAR(outpose.transform.rotation.w, cos(-M_PI/2), epsilon); - } - // Chaining 5 - else if ((source_frame == "a" && target_frame =="f") || - (source_frame == "b" && target_frame =="g") || - (source_frame == "c" && target_frame =="h") || - (source_frame == "d" && target_frame =="i") - ) - { - EXPECT_NEAR(outpose.transform.translation.x, -1 - sqrt(2) /2, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, - sqrt(2) /2 , epsilon); - EXPECT_NEAR(outpose.transform.translation.z, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.x, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.y, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.z, sin(M_PI*5/8), epsilon); - EXPECT_NEAR(outpose.transform.rotation.w, cos(M_PI*5/8), epsilon); - } - // Inverse Chaining 5 - else if ((target_frame == "a" && source_frame =="f") || - (target_frame == "b" && source_frame =="g") || - (target_frame == "c" && source_frame =="h") || - (target_frame == "d" && source_frame =="i") - ) - { - EXPECT_NEAR(outpose.transform.translation.x, -1 - sqrt(2)/2, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, sqrt(2)/2 , epsilon); - EXPECT_NEAR(outpose.transform.translation.z, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.x, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.y, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.z, sin(-M_PI*5/8), epsilon); - EXPECT_NEAR(outpose.transform.rotation.w, cos(-M_PI*5/8), epsilon); - } - // Chaining 6 - else if ((source_frame == "a" && target_frame =="g") || - (source_frame == "b" && target_frame =="h") || - (source_frame == "c" && target_frame =="i") - ) - { - EXPECT_NEAR(outpose.transform.translation.x, -1, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, -1 , epsilon); - EXPECT_NEAR(outpose.transform.translation.z, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.x, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.y, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.z, sin(M_PI*6/8), epsilon); - EXPECT_NEAR(outpose.transform.rotation.w, cos(M_PI*6/8), epsilon); - } - // Inverse Chaining 6 - else if ((target_frame == "a" && source_frame =="g") || - (target_frame == "b" && source_frame =="h") || - (target_frame == "c" && source_frame =="i") - ) - { - EXPECT_NEAR(outpose.transform.translation.x, -1, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, 1 , epsilon); - EXPECT_NEAR(outpose.transform.translation.z, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.x, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.y, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.z, sin(-M_PI*6/8), epsilon); - EXPECT_NEAR(outpose.transform.rotation.w, cos(-M_PI*6/8), epsilon); - } - // Chaining 7 - else if ((source_frame == "a" && target_frame =="h") || - (source_frame == "b" && target_frame =="i") - ) - { - EXPECT_NEAR(outpose.transform.translation.x, sqrt(2)/2 - 1, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, -sqrt(2)/2 , epsilon); - EXPECT_NEAR(outpose.transform.translation.z, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.x, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.y, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.z, sin(M_PI*7/8), epsilon); - EXPECT_NEAR(outpose.transform.rotation.w, cos(M_PI*7/8), epsilon); - } - // Inverse Chaining 7 - else if ((target_frame == "a" && source_frame =="h") || - (target_frame == "b" && source_frame =="i") - ) - { - EXPECT_NEAR(outpose.transform.translation.x, sqrt(2)/2 - 1, epsilon); - EXPECT_NEAR(outpose.transform.translation.y, sqrt(2)/2 , epsilon); - EXPECT_NEAR(outpose.transform.translation.z, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.x, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.y, 0, epsilon); - EXPECT_NEAR(outpose.transform.rotation.z, sin(-M_PI*7/8), epsilon); - EXPECT_NEAR(outpose.transform.rotation.w, cos(-M_PI*7/8), epsilon); - } - else - { - EXPECT_FALSE("Ring_45 testing Shouldn't get here"); - printf("source_frame %s target_frame %s time %f\n", source_frame.c_str(), target_frame.c_str(), eval_time.toSec()); - } - - } -} - -TEST(BufferCore_lookupTransform, invalid_arguments) -{ - tf2::BufferCore mBC; - - setupTree(mBC, "i", ros::Time(1.0)); - - EXPECT_NO_THROW(mBC.lookupTransform("b", "a", ros::Time())); - - //Empty frame_id - EXPECT_THROW(mBC.lookupTransform("", "a", ros::Time()), tf2::InvalidArgumentException); - EXPECT_THROW(mBC.lookupTransform("b", "", ros::Time()), tf2::InvalidArgumentException); - - //frame_id with / - EXPECT_THROW(mBC.lookupTransform("/b", "a", ros::Time()), tf2::InvalidArgumentException); - EXPECT_THROW(mBC.lookupTransform("b", "/a", ros::Time()), tf2::InvalidArgumentException); - -}; - -TEST(BufferCore_canTransform, invalid_arguments) -{ - tf2::BufferCore mBC; - - setupTree(mBC, "i", ros::Time(1.0)); - - EXPECT_TRUE(mBC.canTransform("b", "a", ros::Time())); - - - //Empty frame_id - EXPECT_FALSE(mBC.canTransform("", "a", ros::Time())); - EXPECT_FALSE(mBC.canTransform("b", "", ros::Time())); - - //frame_id with / - EXPECT_FALSE(mBC.canTransform("/b", "a", ros::Time())); - EXPECT_FALSE(mBC.canTransform("b", "/a", ros::Time())); - -}; - -struct TransformableHelper -{ - TransformableHelper() - : called(false) - {} - - void callback(tf2::TransformableRequestHandle request_handle, const std::string& target_frame, const std::string& source_frame, - ros::Time time, tf2::TransformableResult result) - { - called = true; - } - - bool called; -}; - -TEST(BufferCore_transformableCallbacks, alreadyTransformable) -{ - tf2::BufferCore b; - TransformableHelper h; - - geometry_msgs::TransformStamped t; - t.header.stamp = ros::Time(1); - t.header.frame_id = "a"; - t.child_frame_id = "b"; - t.transform.rotation.w = 1.0; - b.setTransform(t, "me"); - - tf2::TransformableCallbackHandle cb_handle = b.addTransformableCallback(boost::bind(&TransformableHelper::callback, &h, _1, _2, _3, _4, _5)); - EXPECT_EQ(b.addTransformableRequest(cb_handle, "a", "b", ros::Time(1)), 0U); -} - -TEST(BufferCore_transformableCallbacks, waitForNewTransform) -{ - tf2::BufferCore b; - TransformableHelper h; - tf2::TransformableCallbackHandle cb_handle = b.addTransformableCallback(boost::bind(&TransformableHelper::callback, &h, _1, _2, _3, _4, _5)); - EXPECT_GT(b.addTransformableRequest(cb_handle, "a", "b", ros::Time(10)), 0U); - - geometry_msgs::TransformStamped t; - for (uint32_t i = 1; i <= 10; ++i) - { - t.header.stamp = ros::Time(i); - t.header.frame_id = "a"; - t.child_frame_id = "b"; - t.transform.rotation.w = 1.0; - b.setTransform(t, "me"); - - if (i < 10) - { - ASSERT_FALSE(h.called); - } - else - { - ASSERT_TRUE(h.called); - } - } -} - -TEST(BufferCore_transformableCallbacks, waitForOldTransform) -{ - tf2::BufferCore b; - TransformableHelper h; - tf2::TransformableCallbackHandle cb_handle = b.addTransformableCallback(boost::bind(&TransformableHelper::callback, &h, _1, _2, _3, _4, _5)); - EXPECT_GT(b.addTransformableRequest(cb_handle, "a", "b", ros::Time(1)), 0U); - - geometry_msgs::TransformStamped t; - for (uint32_t i = 10; i > 0; --i) - { - t.header.stamp = ros::Time(i); - t.header.frame_id = "a"; - t.child_frame_id = "b"; - t.transform.rotation.w = 1.0; - b.setTransform(t, "me"); - - if (i > 1) - { - ASSERT_FALSE(h.called); - } - else - { - ASSERT_TRUE(h.called); - } - } -} - -/* -TEST(tf, Exceptions) -{ - - tf::Transformer mTR(true); - - - Stamped outpose; - - //connectivity when no data - EXPECT_FALSE(mTR.canTransform("parent", "me", ros::Time().fromNSec(10000000))); - try - { - mTR.transformPose("parent",Stamped(btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(10000000) , "me"), outpose); - EXPECT_FALSE("ConnectivityException Not Thrown"); - } - catch ( tf::LookupException &ex) - { - EXPECT_TRUE("Lookupgh Exception Caught"); - } - catch (tf::TransformException& ex) - { - printf("%s\n",ex.what()); - EXPECT_FALSE("Other Exception Caught"); - } - - mTR.setTransform( StampedTransform(btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(100000), "parent", "me")); - - //Extrapolation not valid with one value - EXPECT_FALSE(mTR.canTransform("parent", "me", ros::Time().fromNSec(200000))); - try - { - mTR.transformPose("parent",Stamped(btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(200000) , "me"), outpose); - EXPECT_TRUE("ExtrapolationException Not Thrown"); - } - catch ( tf::ExtrapolationException &ex) - { - EXPECT_TRUE("Extrapolation Exception Caught"); - } - catch (tf::TransformException& ex) - { - printf("%s\n",ex.what()); - EXPECT_FALSE("Other Exception Caught"); - } - - - mTR.setTransform( StampedTransform (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(300000), "parent", "me")); - - //NO Extration when Interpolating - //inverse list - EXPECT_TRUE(mTR.canTransform("parent", "me", ros::Time().fromNSec(200000))); - try - { - mTR.transformPose("parent",Stamped(btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(200000) , "me"), outpose); - EXPECT_TRUE("ExtrapolationException Not Thrown"); - } - catch ( tf::ExtrapolationException &ex) - { - EXPECT_FALSE("Extrapolation Exception Caught"); - } - catch (tf::TransformException& ex) - { - printf("%s\n",ex.what()); - EXPECT_FALSE("Other Exception Caught"); - } - - - - //forward list - EXPECT_TRUE(mTR.canTransform("me", "parent", ros::Time().fromNSec(200000))); - try - { - mTR.transformPose("me",Stamped(btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(200000) , "parent"), outpose); - EXPECT_TRUE("ExtrapolationException Not Thrown"); - } - catch ( tf::ExtrapolationException &ex) - { - EXPECT_FALSE("Extrapolation Exception Caught"); - } - catch (tf::TransformException& ex) - { - printf("%s\n",ex.what()); - EXPECT_FALSE("Other Exception Caught"); - } - - - //Extrapolating backwards - //inverse list - EXPECT_FALSE(mTR.canTransform("parent", "me", ros::Time().fromNSec(1000))); - try - { - mTR.transformPose("parent",Stamped (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(1000) , "me"), outpose); - EXPECT_FALSE("ExtrapolationException Not Thrown"); - } - catch ( tf::ExtrapolationException &ex) - { - EXPECT_TRUE("Extrapolation Exception Caught"); - } - catch (tf::TransformException& ex) - { - printf("%s\n",ex.what()); - EXPECT_FALSE("Other Exception Caught"); - } - //forwards list - EXPECT_FALSE(mTR.canTransform("me", "parent", ros::Time().fromNSec(1000))); - try - { - mTR.transformPose("me",Stamped (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(1000) , "parent"), outpose); - EXPECT_FALSE("ExtrapolationException Not Thrown"); - } - catch ( tf::ExtrapolationException &ex) - { - EXPECT_TRUE("Extrapolation Exception Caught"); - } - catch (tf::TransformException& ex) - { - printf("%s\n",ex.what()); - EXPECT_FALSE("Other Exception Caught"); - } - - - - // Test extrapolation inverse and forward linkages FORWARD - - //inverse list - EXPECT_FALSE(mTR.canTransform("parent", "me", ros::Time().fromNSec(350000))); - try - { - mTR.transformPose("parent", Stamped (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(350000) , "me"), outpose); - EXPECT_FALSE("ExtrapolationException Not Thrown"); - } - catch ( tf::ExtrapolationException &ex) - { - EXPECT_TRUE("Extrapolation Exception Caught"); - } - catch (tf::TransformException& ex) - { - printf("%s\n",ex.what()); - EXPECT_FALSE("Other Exception Caught"); - } - - //forward list - EXPECT_FALSE(mTR.canTransform("parent", "me", ros::Time().fromNSec(350000))); - try - { - mTR.transformPose("me", Stamped (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(350000) , "parent"), outpose); - EXPECT_FALSE("ExtrapolationException Not Thrown"); - } - catch ( tf::ExtrapolationException &ex) - { - EXPECT_TRUE("Extrapolation Exception Caught"); - } - catch (tf::TransformException& ex) - { - printf("%s\n",ex.what()); - EXPECT_FALSE("Other Exception Caught"); - } - - - - -} - - - -TEST(tf, NoExtrapolationExceptionFromParent) -{ - tf::Transformer mTR(true, ros::Duration().fromNSec(1000000)); - - - - mTR.setTransform( StampedTransform (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(1000), "parent", "a")); - mTR.setTransform( StampedTransform (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(10000), "parent", "a")); - - - mTR.setTransform( StampedTransform (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(1000), "parent", "b")); - mTR.setTransform( StampedTransform (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(10000), "parent", "b")); - - mTR.setTransform( StampedTransform (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(1000), "parent's parent", "parent")); - mTR.setTransform( StampedTransform (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(1000), "parent's parent's parent", "parent's parent")); - - mTR.setTransform( StampedTransform (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(10000), "parent's parent", "parent")); - mTR.setTransform( StampedTransform (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(10000), "parent's parent's parent", "parent's parent")); - - Stamped output; - - try - { - mTR.transformPoint( "b", Stamped(Point(1,1,1), ros::Time().fromNSec(2000), "a"), output); - } - catch (ExtrapolationException &ex) - { - EXPECT_FALSE("Shouldn't have gotten this exception"); - } - - - -}; - - - -TEST(tf, ExtrapolationFromOneValue) -{ - tf::Transformer mTR(true, ros::Duration().fromNSec(1000000)); - - - - mTR.setTransform( StampedTransform (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(1000), "parent", "a")); - - mTR.setTransform( StampedTransform (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(1000), "parent's parent", "parent")); - - - Stamped output; - - bool excepted = false; - //Past time - try - { - mTR.transformPoint( "parent", Stamped(Point(1,1,1), ros::Time().fromNSec(10), "a"), output); - } - catch (ExtrapolationException &ex) - { - excepted = true; - } - - EXPECT_TRUE(excepted); - - excepted = false; - //Future one element - try - { - mTR.transformPoint( "parent", Stamped(Point(1,1,1), ros::Time().fromNSec(100000), "a"), output); - } - catch (ExtrapolationException &ex) - { - excepted = true; - } - - EXPECT_TRUE(excepted); - - //Past multi link - excepted = false; - try - { - mTR.transformPoint( "parent's parent", Stamped(Point(1,1,1), ros::Time().fromNSec(1), "a"), output); - } - catch (ExtrapolationException &ex) - { - excepted = true; - } - - EXPECT_TRUE(excepted); - - //Future case multi link - excepted = false; - try - { - mTR.transformPoint( "parent's parent", Stamped(Point(1,1,1), ros::Time().fromNSec(10000), "a"), output); - } - catch (ExtrapolationException &ex) - { - excepted = true; - } - - EXPECT_TRUE(excepted); - - mTR.setTransform( StampedTransform (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(20000), "parent", "a")); - - excepted = false; - try - { - mTR.transformPoint( "parent", Stamped(Point(1,1,1), ros::Time().fromNSec(10000), "a"), output); - } - catch (ExtrapolationException &ex) - { - excepted = true; - } - - EXPECT_FALSE(excepted); - -}; - - - -TEST(tf, getLatestCommonTime) -{ - tf::Transformer mTR(true); - mTR.setTransform( StampedTransform (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(1000), "parent", "a")); - mTR.setTransform( StampedTransform (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(2000), "parent's parent", "parent")); - - //simple case - ros::Time t; - mTR.getLatestCommonTime("a", "parent's parent", t, NULL); - EXPECT_EQ(t, ros::Time().fromNSec(1000)); - - //no connection - EXPECT_EQ(tf::LOOKUP_ERROR, mTR.getLatestCommonTime("a", "not valid", t, NULL)); - EXPECT_EQ(t, ros::Time()); - - //testing with update - mTR.setTransform( StampedTransform (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(3000), "parent", "a")); - mTR.getLatestCommonTime("a", "parent's parent",t, NULL); - EXPECT_EQ(t, ros::Time().fromNSec(2000)); - - //longer chain - mTR.setTransform( StampedTransform (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(4000), "parent", "b")); - mTR.setTransform( StampedTransform (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(3000), "b", "c")); - mTR.setTransform( StampedTransform (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(9000), "c", "d")); - mTR.setTransform( StampedTransform (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(5000), "f", "e")); - - //shared parent - mTR.getLatestCommonTime("a", "b",t, NULL); - EXPECT_EQ(t, ros::Time().fromNSec(3000)); - - //two degrees - mTR.getLatestCommonTime("a", "c", t, NULL); - EXPECT_EQ(t, ros::Time().fromNSec(3000)); - //reversed - mTR.getLatestCommonTime("c", "a", t, NULL); - EXPECT_EQ(t, ros::Time().fromNSec(3000)); - - //three degrees - mTR.getLatestCommonTime("a", "d", t, NULL); - EXPECT_EQ(t, ros::Time().fromNSec(3000)); - //reversed - mTR.getLatestCommonTime("d", "a", t, NULL); - EXPECT_EQ(t, ros::Time().fromNSec(3000)); - - //disconnected tree - mTR.getLatestCommonTime("e", "f", t, NULL); - EXPECT_EQ(t, ros::Time().fromNSec(5000)); - //reversed order - mTR.getLatestCommonTime("f", "e", t, NULL); - EXPECT_EQ(t, ros::Time().fromNSec(5000)); - - - mTR.setExtrapolationLimit(ros::Duration().fromNSec(20000)); - - //check timestamps resulting - tf::Stamped output, output2; - try - { - mTR.transformPoint( "parent", Stamped(Point(1,1,1), ros::Time(), "b"), output); - mTR.transformPoint( "a", ros::Time(),Stamped(Point(1,1,1), ros::Time(), "b"), "c", output2); - } - catch (tf::TransformException &ex) - { - printf("%s\n", ex.what()); - EXPECT_FALSE("Shouldn't get this Exception"); - } - - EXPECT_EQ(output.stamp_, ros::Time().fromNSec(4000)); - EXPECT_EQ(output2.stamp_, ros::Time().fromNSec(3000)); - - - //zero length lookup zero time - ros::Time now1 = ros::Time::now(); - ros::Time time_output; - mTR.getLatestCommonTime("a", "a", time_output, NULL); - EXPECT_LE(now1.toSec(), time_output.toSec()); - EXPECT_LE(time_output.toSec(), ros::Time::now().toSec()); - - -} - -TEST(tf, RepeatedTimes) -{ - Transformer mTR; - mTR.setTransform( StampedTransform (btTransform(btQuaternion(1,0,0), btVector3(0,0,0)), ros::Time().fromNSec(4000), "parent", "b")); - mTR.setTransform( StampedTransform (btTransform(btQuaternion(1,1,0), btVector3(0,0,0)), ros::Time().fromNSec(4000), "parent", "b")); - - tf::StampedTransform output; - try{ - mTR.lookupTransform("parent", "b" , ros::Time().fromNSec(4000), output); - EXPECT_TRUE(!std::isnan(output.getOrigin().x())); - EXPECT_TRUE(!std::isnan(output.getOrigin().y())); - EXPECT_TRUE(!std::isnan(output.getOrigin().z())); - EXPECT_TRUE(!std::isnan(output.getRotation().x())); - EXPECT_TRUE(!std::isnan(output.getRotation().y())); - EXPECT_TRUE(!std::isnan(output.getRotation().z())); - EXPECT_TRUE(!std::isnan(output.getRotation().w())); - } - catch (...) - { - EXPECT_FALSE("Excetion improperly thrown"); - } - - -} - -TEST(tf, frameExists) -{ - Transformer mTR; - - // test with fully qualified name - EXPECT_FALSE(mTR.frameExists("/b"));; - EXPECT_FALSE(mTR.frameExists("/parent")); - EXPECT_FALSE(mTR.frameExists("/other")); - EXPECT_FALSE(mTR.frameExists("/frame")); - - //test with resolveping - EXPECT_FALSE(mTR.frameExists("b"));; - EXPECT_FALSE(mTR.frameExists("parent")); - EXPECT_FALSE(mTR.frameExists("other")); - EXPECT_FALSE(mTR.frameExists("frame")); - - mTR.setTransform( StampedTransform (btTransform(btQuaternion(1,0,0), btVector3(0,0,0)), ros::Time().fromNSec(4000), "/parent", "/b")); - - // test with fully qualified name - EXPECT_TRUE(mTR.frameExists("/b")); - EXPECT_TRUE(mTR.frameExists("/parent")); - EXPECT_FALSE(mTR.frameExists("/other")); - EXPECT_FALSE(mTR.frameExists("/frame")); - - //Test with resolveping - EXPECT_TRUE(mTR.frameExists("b")); - EXPECT_TRUE(mTR.frameExists("parent")); - EXPECT_FALSE(mTR.frameExists("other")); - EXPECT_FALSE(mTR.frameExists("frame")); - - mTR.setTransform( StampedTransform (btTransform(btQuaternion(1,1,0), btVector3(0,0,0)), ros::Time().fromNSec(4000), "/frame", "/other")); - - // test with fully qualified name - EXPECT_TRUE(mTR.frameExists("/b")); - EXPECT_TRUE(mTR.frameExists("/parent")); - EXPECT_TRUE(mTR.frameExists("/other")); - EXPECT_TRUE(mTR.frameExists("/frame")); - - //Test with resolveping - EXPECT_TRUE(mTR.frameExists("b")); - EXPECT_TRUE(mTR.frameExists("parent")); - EXPECT_TRUE(mTR.frameExists("other")); - EXPECT_TRUE(mTR.frameExists("frame")); - -} - -TEST(tf, resolve) -{ - //no prefix - EXPECT_STREQ("/id", tf::resolve("","id").c_str()); - //prefix w/o / - EXPECT_STREQ("/asdf/id", tf::resolve("asdf","id").c_str()); - //prefix w / - EXPECT_STREQ("/asdf/id", tf::resolve("/asdf","id").c_str()); - // frame_id w / -> no prefix - EXPECT_STREQ("/id", tf::resolve("asdf","/id").c_str()); - // frame_id w / -> no prefix - EXPECT_STREQ("/id", tf::resolve("/asdf","/id").c_str()); - -} - -TEST(tf, canTransform) -{ - Transformer mTR; - - //confirm zero length list disconnected will return true - EXPECT_TRUE(mTR.canTransform("some_frame","some_frame", ros::Time())); - EXPECT_TRUE(mTR.canTransform("some_frame","some_frame", ros::Time::now())); - - //Create a two link tree between times 10 and 20 - for (int i = 10; i < 20; i++) - { - mTR.setTransform( StampedTransform (btTransform(btQuaternion(1,0,0), btVector3(0,0,0)), ros::Time().fromSec(i), "parent", "child")); - mTR.setTransform( StampedTransform (btTransform(btQuaternion(1,0,0), btVector3(0,0,0)), ros::Time().fromSec(i), "parent", "other_child")); - } - - // four different timestamps related to tf state - ros::Time zero_time = ros::Time().fromSec(0); - ros::Time old_time = ros::Time().fromSec(5); - ros::Time valid_time = ros::Time().fromSec(15); - ros::Time future_time = ros::Time().fromSec(25); - - - //confirm zero length list disconnected will return true - EXPECT_TRUE(mTR.canTransform("some_frame","some_frame", zero_time)); - EXPECT_TRUE(mTR.canTransform("some_frame","some_frame", old_time)); - EXPECT_TRUE(mTR.canTransform("some_frame","some_frame", valid_time)); - EXPECT_TRUE(mTR.canTransform("some_frame","some_frame", future_time)); - - // Basic API Tests - - //Valid data should pass - EXPECT_TRUE(mTR.canTransform("child", "parent", valid_time)); - EXPECT_TRUE(mTR.canTransform("child", "other_child", valid_time)); - - //zero data should pass - EXPECT_TRUE(mTR.canTransform("child", "parent", zero_time)); - EXPECT_TRUE(mTR.canTransform("child", "other_child", zero_time)); - - //Old data should fail - EXPECT_FALSE(mTR.canTransform("child", "parent", old_time)); - EXPECT_FALSE(mTR.canTransform("child", "other_child", old_time)); - - //Future data should fail - EXPECT_FALSE(mTR.canTransform("child", "parent", future_time)); - EXPECT_FALSE(mTR.canTransform("child", "other_child", future_time)); - - //Same Frame should pass for all times - EXPECT_TRUE(mTR.canTransform("child", "child", zero_time)); - EXPECT_TRUE(mTR.canTransform("child", "child", old_time)); - EXPECT_TRUE(mTR.canTransform("child", "child", valid_time)); - EXPECT_TRUE(mTR.canTransform("child", "child", future_time)); - - // Advanced API Tests - - // Source = Fixed - //zero data in fixed frame should pass - EXPECT_TRUE(mTR.canTransform("child", zero_time, "parent", valid_time, "child")); - EXPECT_TRUE(mTR.canTransform("child", zero_time, "other_child", valid_time, "child")); - //Old data in fixed frame should pass - EXPECT_TRUE(mTR.canTransform("child", old_time, "parent", valid_time, "child")); - EXPECT_TRUE(mTR.canTransform("child", old_time, "other_child", valid_time, "child")); - //valid data in fixed frame should pass - EXPECT_TRUE(mTR.canTransform("child", valid_time, "parent", valid_time, "child")); - EXPECT_TRUE(mTR.canTransform("child", valid_time, "other_child", valid_time, "child")); - //future data in fixed frame should pass - EXPECT_TRUE(mTR.canTransform("child", future_time, "parent", valid_time, "child")); - EXPECT_TRUE(mTR.canTransform("child", future_time, "other_child", valid_time, "child")); - - //transforming through fixed into the past - EXPECT_FALSE(mTR.canTransform("child", valid_time, "parent", old_time, "child")); - EXPECT_FALSE(mTR.canTransform("child", valid_time, "other_child", old_time, "child")); - //transforming through fixed into the future - EXPECT_FALSE(mTR.canTransform("child", valid_time, "parent", future_time, "child")); - EXPECT_FALSE(mTR.canTransform("child", valid_time, "other_child", future_time, "child")); - - // Target = Fixed - //zero data in fixed frame should pass - EXPECT_TRUE(mTR.canTransform("child", zero_time, "parent", valid_time, "parent")); - //Old data in fixed frame should pass - EXPECT_FALSE(mTR.canTransform("child", old_time, "parent", valid_time, "parent")); - //valid data in fixed frame should pass - EXPECT_TRUE(mTR.canTransform("child", valid_time, "parent", valid_time, "parent")); - //future data in fixed frame should pass - EXPECT_FALSE(mTR.canTransform("child", future_time, "parent", valid_time, "parent")); - - //transforming through fixed into the zero - EXPECT_TRUE(mTR.canTransform("child", valid_time, "parent", zero_time, "parent")); - //transforming through fixed into the past - EXPECT_TRUE(mTR.canTransform("child", valid_time, "parent", old_time, "parent")); - //transforming through fixed into the valid - EXPECT_TRUE(mTR.canTransform("child", valid_time, "parent", valid_time, "parent")); - //transforming through fixed into the future - EXPECT_TRUE(mTR.canTransform("child", valid_time, "parent", future_time, "parent")); - -} - -TEST(tf, lookupTransform) -{ - Transformer mTR; - //Create a two link tree between times 10 and 20 - for (int i = 10; i < 20; i++) - { - mTR.setTransform( StampedTransform (btTransform(btQuaternion(1,0,0), btVector3(0,0,0)), ros::Time().fromSec(i), "parent", "child")); - mTR.setTransform( StampedTransform (btTransform(btQuaternion(1,0,0), btVector3(0,0,0)), ros::Time().fromSec(i), "parent", "other_child")); - } - - // four different timestamps related to tf state - ros::Time zero_time = ros::Time().fromSec(0); - ros::Time old_time = ros::Time().fromSec(5); - ros::Time valid_time = ros::Time().fromSec(15); - ros::Time future_time = ros::Time().fromSec(25); - - //output - tf::StampedTransform output; - - // Basic API Tests - - try - { - //confirm zero length list disconnected will return true - mTR.lookupTransform("some_frame","some_frame", zero_time, output); - mTR.lookupTransform("some_frame","some_frame", old_time, output); - mTR.lookupTransform("some_frame","some_frame", valid_time, output); - mTR.lookupTransform("some_frame","some_frame", future_time, output); - mTR.lookupTransform("child","child", future_time, output); - mTR.lookupTransform("other_child","other_child", future_time, output); - - //Valid data should pass - mTR.lookupTransform("child", "parent", valid_time, output); - mTR.lookupTransform("child", "other_child", valid_time, output); - - //zero data should pass - mTR.lookupTransform("child", "parent", zero_time, output); - mTR.lookupTransform("child", "other_child", zero_time, output); - } - catch (tf::TransformException &ex) - { - printf("Exception improperly thrown: %s", ex.what()); - EXPECT_FALSE("Exception thrown"); - } - try - { - //Old data should fail - mTR.lookupTransform("child", "parent", old_time, output); - EXPECT_FALSE("Exception should have been thrown"); - } - catch (tf::TransformException) - { - EXPECT_TRUE("Exception Thrown Correctly"); - } - try { - //Future data should fail - mTR.lookupTransform("child", "parent", future_time, output); - EXPECT_FALSE("Exception should have been thrown"); - } - catch (tf::TransformException) - { - EXPECT_TRUE("Exception Thrown Correctly"); - } - - try { - //Same Frame should pass for all times - mTR.lookupTransform("child", "child", zero_time, output); - mTR.lookupTransform("child", "child", old_time, output); - mTR.lookupTransform("child", "child", valid_time, output); - mTR.lookupTransform("child", "child", future_time, output); - - // Advanced API Tests - - // Source = Fixed - //zero data in fixed frame should pass - mTR.lookupTransform("child", zero_time, "parent", valid_time, "child", output); - mTR.lookupTransform("child", zero_time, "other_child", valid_time, "child", output); - //Old data in fixed frame should pass - mTR.lookupTransform("child", old_time, "parent", valid_time, "child", output); - mTR.lookupTransform("child", old_time, "other_child", valid_time, "child", output); - //valid data in fixed frame should pass - mTR.lookupTransform("child", valid_time, "parent", valid_time, "child", output); - mTR.lookupTransform("child", valid_time, "other_child", valid_time, "child", output); - //future data in fixed frame should pass - mTR.lookupTransform("child", future_time, "parent", valid_time, "child", output); - mTR.lookupTransform("child", future_time, "other_child", valid_time, "child", output); - } - catch (tf::TransformException &ex) - { - printf("Exception improperly thrown: %s", ex.what()); - EXPECT_FALSE("Exception incorrectly thrown"); - } - - try { - //transforming through fixed into the past - mTR.lookupTransform("child", valid_time, "parent", old_time, "child", output); - EXPECT_FALSE("Exception should have been thrown"); - } - catch (tf::TransformException) - { - EXPECT_TRUE("Exception Thrown Correctly"); - } - - try { - //transforming through fixed into the future - mTR.lookupTransform("child", valid_time, "parent", future_time, "child", output); - EXPECT_FALSE("Exception should have been thrown"); - } - catch (tf::TransformException) - { - EXPECT_TRUE("Exception Thrown Correctly"); - } - - try { - // Target = Fixed - //zero data in fixed frame should pass - mTR.lookupTransform("child", zero_time, "parent", valid_time, "parent", output); - //valid data in fixed frame should pass - mTR.lookupTransform("child", valid_time, "parent", valid_time, "parent", output); - } - catch (tf::TransformException &ex) - { - printf("Exception improperly thrown: %s", ex.what()); - EXPECT_FALSE("Exception incorrectly thrown"); - } - - try { - //Old data in fixed frame should pass - mTR.lookupTransform("child", old_time, "parent", valid_time, "parent", output); - EXPECT_FALSE("Exception should have been thrown"); - } - catch (tf::TransformException) - { - EXPECT_TRUE("Exception Thrown Correctly"); - } - try { - //future data in fixed frame should pass - mTR.lookupTransform("child", future_time, "parent", valid_time, "parent", output); - EXPECT_FALSE("Exception should have been thrown"); - } - catch (tf::TransformException) - { - EXPECT_TRUE("Exception Thrown Correctly"); - } - - try { - //transforming through fixed into the zero - mTR.lookupTransform("child", valid_time, "parent", zero_time, "parent", output); - //transforming through fixed into the past - mTR.lookupTransform("child", valid_time, "parent", old_time, "parent", output); - //transforming through fixed into the valid - mTR.lookupTransform("child", valid_time, "parent", valid_time, "parent", output); - //transforming through fixed into the future - mTR.lookupTransform("child", valid_time, "parent", future_time, "parent", output); - } - catch (tf::TransformException &ex) - { - printf("Exception improperly thrown: %s", ex.what()); - EXPECT_FALSE("Exception improperly thrown"); - } - - - //make sure zero goes to now for zero length - try - { - ros::Time now1 = ros::Time::now(); - - mTR.lookupTransform("a", "a", ros::Time(),output); - EXPECT_LE(now1.toSec(), output.stamp_.toSec()); - EXPECT_LE(output.stamp_.toSec(), ros::Time::now().toSec()); - } - catch (tf::TransformException &ex) - { - printf("Exception improperly thrown: %s", ex.what()); - EXPECT_FALSE("Exception improperly thrown"); - } - -} - - -TEST(tf, getFrameStrings) -{ - Transformer mTR; - - - mTR.setTransform( StampedTransform (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(4000), "/parent", "/b")); - std::vector frames_string; - mTR.getFrameStrings(frames_string); - ASSERT_EQ(frames_string.size(), (unsigned)2); - EXPECT_STREQ(frames_string[0].c_str(), std::string("/b").c_str()); - EXPECT_STREQ(frames_string[1].c_str(), std::string("/parent").c_str()); - - - mTR.setTransform( StampedTransform (btTransform(btQuaternion(0,0,0,1), btVector3(0,0,0)), ros::Time().fromNSec(4000), "/frame", "/other")); - - mTR.getFrameStrings(frames_string); - ASSERT_EQ(frames_string.size(), (unsigned)4); - EXPECT_STREQ(frames_string[0].c_str(), std::string("/b").c_str()); - EXPECT_STREQ(frames_string[1].c_str(), std::string("/parent").c_str()); - EXPECT_STREQ(frames_string[2].c_str(), std::string("/other").c_str()); - EXPECT_STREQ(frames_string[3].c_str(), std::string("/frame").c_str()); - -} - -bool expectInvalidQuaternion(tf::Quaternion q) -{ - try - { - tf::assertQuaternionValid(q); - printf("this should have thrown\n"); - return false; - } - catch (tf::InvalidArgument &ex) - { - return true; - } - catch (...) - { - printf("A different type of exception was expected\n"); - return false; - } - return false; -} - -bool expectValidQuaternion(tf::Quaternion q) -{ - try - { - tf::assertQuaternionValid(q); - } - catch (tf::TransformException &ex) - { - return false; - } - return true; -} - -bool expectInvalidQuaternion(geometry_msgs::Quaternion q) -{ - try - { - tf::assertQuaternionValid(q); - printf("this should have thrown\n"); - return false; - } - catch (tf::InvalidArgument &ex) - { - return true; - } - catch (...) - { - printf("A different type of exception was expected\n"); - return false; - } - return false; -} - -bool expectValidQuaternion(geometry_msgs::Quaternion q) -{ - try - { - tf::assertQuaternionValid(q); - } - catch (tf::TransformException &ex) - { - return false; - } - return true; -} - - -TEST(tf, assertQuaternionValid) -{ - tf::Quaternion q(1,0,0,0); - EXPECT_TRUE(expectValidQuaternion(q)); - q.setX(0); - EXPECT_TRUE(expectInvalidQuaternion(q)); - q.setY(1); - EXPECT_TRUE(expectValidQuaternion(q)); - q.setZ(1); - EXPECT_TRUE(expectInvalidQuaternion(q)); - q.setY(0); - EXPECT_TRUE(expectValidQuaternion(q)); - q.setW(1); - EXPECT_TRUE(expectInvalidQuaternion(q)); - q.setZ(0); - EXPECT_TRUE(expectValidQuaternion(q)); - q.setZ(sqrt(2.0)/2.0); - EXPECT_TRUE(expectInvalidQuaternion(q)); - q.setW(sqrt(2.0)/2.0); - EXPECT_TRUE(expectValidQuaternion(q)); - - q.setZ(sqrt(2.0)/2.0 + 0.01); - EXPECT_TRUE(expectInvalidQuaternion(q)); - - q.setZ(sqrt(2.0)/2.0 - 0.01); - EXPECT_TRUE(expectInvalidQuaternion(q)); - - EXPECT_THROW(tf::assertQuaternionValid(q), tf::InvalidArgument); - // Waiting for gtest 1.1 or later - // EXPECT_NO_THROW(tf::assertQuaternionValid(q)); - //q.setX(0); - //EXPECT_THROW(tf::assertQuaternionValid(q), tf::InvalidArgument); - //q.setY(1); - //EXPECT_NO_THROW(tf::assertQuaternionValid(q)); - -} -TEST(tf, assertQuaternionMsgValid) -{ - geometry_msgs::Quaternion q; - q.x = 1;//others zeroed to start - - EXPECT_TRUE(expectValidQuaternion(q)); - q.x = 0; - EXPECT_TRUE(expectInvalidQuaternion(q)); - q.y = 1; - EXPECT_TRUE(expectValidQuaternion(q)); - q.z = 1; - EXPECT_TRUE(expectInvalidQuaternion(q)); - q.y = 0; - EXPECT_TRUE(expectValidQuaternion(q)); - q.w = 1; - EXPECT_TRUE(expectInvalidQuaternion(q)); - q.z = 0; - EXPECT_TRUE(expectValidQuaternion(q)); - q.z = sqrt(2.0)/2.0; - EXPECT_TRUE(expectInvalidQuaternion(q)); - q.w = sqrt(2.0)/2.0; - EXPECT_TRUE(expectValidQuaternion(q)); - - q.z = sqrt(2.0)/2.0 + 0.01; - EXPECT_TRUE(expectInvalidQuaternion(q)); - - q.z = sqrt(2.0)/2.0 - 0.01; - EXPECT_TRUE(expectInvalidQuaternion(q)); - - - // Waiting for gtest 1.1 or later - // EXPECT_NO_THROW(tf::assertQuaternionValid(q)); - //q.x = 0); - //EXPECT_THROW(tf::assertQuaternionValid(q), tf::InvalidArgument); - //q.y = 1); - //EXPECT_NO_THROW(tf::assertQuaternionValid(q)); - -} - - -TEST(tf2_stamped, OperatorEqualEqual) -{ - btTransform transform0, transform1, transform0a; - transform0.setIdentity(); - transform0a.setIdentity(); - transform1.setIdentity(); - transform1.setOrigin(btVector3(1, 0, 0)); - tf2::StampedTransform stamped_transform_reference(transform0a, ros::Time(), "frame_id", "child_frame_id"); - tf2::StampedTransform stamped_transform0A(transform0, ros::Time(), "frame_id", "child_frame_id"); - EXPECT_TRUE(stamped_transform0A == stamped_transform_reference); // Equal - tf2::StampedTransform stamped_transform0B(transform0, ros::Time(), "frame_id_not_equal", "child_frame_id"); - EXPECT_FALSE(stamped_transform0B == stamped_transform_reference); // Different Frame id - tf2::StampedTransform stamped_transform0C(transform0, ros::Time(1.0), "frame_id", "child_frame_id"); - EXPECT_FALSE(stamped_transform0C == stamped_transform_reference); // Different Time - tf2::StampedTransform stamped_transform0D(transform0, ros::Time(1.0), "frame_id_not_equal", "child_frame_id"); - EXPECT_FALSE(stamped_transform0D == stamped_transform_reference); // Different frame id and time - tf2::StampedTransform stamped_transform0E(transform1, ros::Time(), "frame_id_not_equal", "child_frame_id"); - EXPECT_FALSE(stamped_transform0E == stamped_transform_reference); // Different transform, frame id - tf2::StampedTransform stamped_transform0F(transform1, ros::Time(1.0), "frame_id", "child_frame_id"); - EXPECT_FALSE(stamped_transform0F == stamped_transform_reference); // Different transform, time - tf2::StampedTransform stamped_transform0G(transform1, ros::Time(1.0), "frame_id_not_equal", "child_frame_id"); - EXPECT_FALSE(stamped_transform0G == stamped_transform_reference); // Different transform, frame id and time - tf2::StampedTransform stamped_transform0H(transform1, ros::Time(), "frame_id", "child_frame_id"); - EXPECT_FALSE(stamped_transform0H == stamped_transform_reference); // Different transform - - - //Different child_frame_id - tf2::StampedTransform stamped_transform1A(transform0, ros::Time(), "frame_id", "child_frame_id2"); - EXPECT_FALSE(stamped_transform1A == stamped_transform_reference); // Equal - tf2::StampedTransform stamped_transform1B(transform0, ros::Time(), "frame_id_not_equal", "child_frame_id2"); - EXPECT_FALSE(stamped_transform1B == stamped_transform_reference); // Different Frame id - tf2::StampedTransform stamped_transform1C(transform0, ros::Time(1.0), "frame_id", "child_frame_id2"); - EXPECT_FALSE(stamped_transform1C == stamped_transform_reference); // Different Time - tf2::StampedTransform stamped_transform1D(transform0, ros::Time(1.0), "frame_id_not_equal", "child_frame_id2"); - EXPECT_FALSE(stamped_transform1D == stamped_transform_reference); // Different frame id and time - tf2::StampedTransform stamped_transform1E(transform1, ros::Time(), "frame_id_not_equal", "child_frame_id2"); - EXPECT_FALSE(stamped_transform1E == stamped_transform_reference); // Different transform, frame id - tf2::StampedTransform stamped_transform1F(transform1, ros::Time(1.0), "frame_id", "child_frame_id2"); - EXPECT_FALSE(stamped_transform1F == stamped_transform_reference); // Different transform, time - tf2::StampedTransform stamped_transform1G(transform1, ros::Time(1.0), "frame_id_not_equal", "child_frame_id2"); - EXPECT_FALSE(stamped_transform1G == stamped_transform_reference); // Different transform, frame id and time - tf2::StampedTransform stamped_transform1H(transform1, ros::Time(), "frame_id", "child_frame_id2"); - EXPECT_FALSE(stamped_transform1H == stamped_transform_reference); // Different transform - -} - -TEST(tf2_stamped, OperatorEqual) -{ - btTransform pose0, pose1, pose0a; - pose0.setIdentity(); - pose1.setIdentity(); - pose1.setOrigin(btVector3(1, 0, 0)); - tf2::Stamped stamped_pose0(pose0, ros::Time(), "frame_id"); - tf2::Stamped stamped_pose1(pose1, ros::Time(1.0), "frame_id_not_equal"); - EXPECT_FALSE(stamped_pose1 == stamped_pose0); - stamped_pose1 = stamped_pose0; - EXPECT_TRUE(stamped_pose1 == stamped_pose0); - -} - */ -int main(int argc, char **argv){ - testing::InitGoogleTest(&argc, argv); - ros::Time::init(); //needed for ros::TIme::now() - ros::init(argc, argv, "tf_unittest"); - return RUN_ALL_TESTS(); -} diff --git a/src/geometry2/test_tf2/test/static_publisher.launch b/src/geometry2/test_tf2/test/static_publisher.launch deleted file mode 100644 index a8abdf9..0000000 --- a/src/geometry2/test_tf2/test/static_publisher.launch +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - - - - - - - - - - - - - - diff --git a/src/geometry2/test_tf2/test/test_buffer_client.cpp b/src/geometry2/test_tf2/test/test_buffer_client.cpp deleted file mode 100644 index 9a93977..0000000 --- a/src/geometry2/test_tf2/test/test_buffer_client.cpp +++ /dev/null @@ -1,111 +0,0 @@ -/********************************************************************* -* -* Software License Agreement (BSD License) -* -* Copyright (c) 2009, Willow Garage, Inc. -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions -* are met: -* -* * Redistributions of source code must retain the above copyright -* notice, this list of conditions and the following disclaimer. -* * Redistributions in binary form must reproduce the above -* copyright notice, this list of conditions and the following -* disclaimer in the documentation and/or other materials provided -* with the distribution. -* * Neither the name of Willow Garage, Inc. nor the names of its -* contributors may be used to endorse or promote products derived -* from this software without specific prior written permission. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -* -* Author: Eitan Marder-Eppstein -*********************************************************************/ -#include -#include -#include -#include -#include -#include - -static const double EPS = 1e-3; - -TEST(tf2_ros, buffer_client) -{ - tf2_ros::BufferClient client("tf_action"); - - //make sure that things are set up - ASSERT_TRUE(client.waitForServer(ros::Duration(4.0))); - - geometry_msgs::PointStamped p1; - p1.header.frame_id = "a"; - p1.header.stamp = ros::Time(); - p1.point.x = 0.0; - p1.point.y = 0.0; - p1.point.z = 0.0; - - try - { - geometry_msgs::PointStamped p2 = client.transform(p1, "b"); - ROS_INFO("p1: (%.2f, %.2f, %.2f), p2: (%.2f, %.2f, %.2f)", p1.point.x, - p1.point.y, p1.point.z, p2.point.x, p2.point.y, p2.point.z); - - EXPECT_NEAR(p2.point.x, -5.0, EPS); - EXPECT_NEAR(p2.point.y, -6.0, EPS); - EXPECT_NEAR(p2.point.z, -7.0, EPS); - } - catch(tf2::TransformException& ex) - { - ROS_ERROR("Failed to transform: %s", ex.what()); - ASSERT_FALSE("Should not get here"); - } -} - -TEST(tf2_ros, buffer_client_different_types) -{ - tf2_ros::BufferClient client("tf_action"); - - //make sure that things are set up - ASSERT_TRUE(client.waitForServer(ros::Duration(4.0))); - - tf2::Stamped k1(KDL::Vector(0, 0, 0), ros::Time(), "a"); - - try - { - tf2::Stamped b1; - client.transform(k1, b1, "b"); - ROS_INFO_STREAM("Bullet: (" << b1[0] << ", " << b1[1] << ", " << b1[2] << ")"); - ROS_INFO_STREAM("KDL: (" << k1[0] << ", " << k1[1] << ", " << k1[2] << ")"); - EXPECT_NEAR(b1[0], -5.0, EPS); - EXPECT_NEAR(b1[1], -6.0, EPS); - EXPECT_NEAR(b1[2], -7.0, EPS); - EXPECT_EQ(b1.frame_id_, "b"); - EXPECT_EQ(k1.frame_id_, "a"); - } - catch(tf2::TransformException& ex) - { - ROS_ERROR("Failed to transform: %s", ex.what()); - ASSERT_FALSE("Should not get here"); - } -} - -int main(int argc, char** argv) -{ - testing::InitGoogleTest(&argc, argv); - ros::init(argc, argv, "buffer_client_test"); - return RUN_ALL_TESTS(); -} - diff --git a/src/geometry2/test_tf2/test/test_buffer_client.py b/src/geometry2/test_tf2/test/test_buffer_client.py deleted file mode 100755 index 5433082..0000000 --- a/src/geometry2/test_tf2/test/test_buffer_client.py +++ /dev/null @@ -1,88 +0,0 @@ -#! /usr/bin/env python -#*********************************************************** -#* Software License Agreement (BSD License) -#* -#* Copyright (c) 2009, Willow Garage, Inc. -#* All rights reserved. -#* -#* Redistribution and use in source and binary forms, with or without -#* modification, are permitted provided that the following conditions -#* are met: -#* -#* * Redistributions of source code must retain the above copyright -#* notice, this list of conditions and the following disclaimer. -#* * Redistributions in binary form must reproduce the above -#* copyright notice, this list of conditions and the following -#* disclaimer in the documentation and/or other materials provided -#* with the distribution. -#* * Neither the name of Willow Garage, Inc. nor the names of its -#* contributors may be used to endorse or promote products derived -#* from this software without specific prior written permission. -#* -#* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -#* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -#* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -#* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -#* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -#* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -#* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -#* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -#* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -#* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -#* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -#* POSSIBILITY OF SUCH DAMAGE. -#* -#* Author: Eitan Marder-Eppstein -#*********************************************************** -PKG = 'test_tf2' - -import sys -import unittest - -import tf2_py as tf2 -import tf2_ros -import tf2_kdl -import tf2_geometry_msgs -from geometry_msgs.msg import PointStamped -import rospy -import PyKDL - -class TestBufferClient(unittest.TestCase): - def test_buffer_client(self): - client = tf2_ros.BufferClient("tf_action") - client.wait_for_server() - - p1 = PointStamped() - p1.header.frame_id = "a" - p1.header.stamp = rospy.Time(0.0) - p1.point.x = 0.0 - p1.point.y = 0.0 - p1.point.z = 0.0 - - try: - p2 = client.transform(p1, "b") - rospy.loginfo("p1: %s, p2: %s" % (p1, p2)) - except tf2.TransformException as e: - rospy.logerr("%s" % e) - - def test_transform_type(self): - client = tf2_ros.BufferClient("tf_action") - client.wait_for_server() - - p1 = PointStamped() - p1.header.frame_id = "a" - p1.header.stamp = rospy.Time(0.0) - p1.point.x = 0.0 - p1.point.y = 0.0 - p1.point.z = 0.0 - - try: - p2 = client.transform(p1, "b", new_type = PyKDL.Vector) - rospy.loginfo("p1: %s, p2: %s" % (str(p1), str(p2))) - except tf2.TransformException as e: - rospy.logerr("%s" % e) - -if __name__ == '__main__': - rospy.init_node("test_buffer_client") - import rostest - rostest.rosrun(PKG, 'test_buffer_client', TestBufferClient) diff --git a/src/geometry2/test_tf2/test/test_buffer_server.cpp b/src/geometry2/test_tf2/test/test_buffer_server.cpp deleted file mode 100644 index ebd988b..0000000 --- a/src/geometry2/test_tf2/test/test_buffer_server.cpp +++ /dev/null @@ -1,52 +0,0 @@ -/********************************************************************* -* -* Software License Agreement (BSD License) -* -* Copyright (c) 2009, Willow Garage, Inc. -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions -* are met: -* -* * Redistributions of source code must retain the above copyright -* notice, this list of conditions and the following disclaimer. -* * Redistributions in binary form must reproduce the above -* copyright notice, this list of conditions and the following -* disclaimer in the documentation and/or other materials provided -* with the distribution. -* * Neither the name of Willow Garage, Inc. nor the names of its -* contributors may be used to endorse or promote products derived -* from this software without specific prior written permission. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -* -* Author: Eitan Marder-Eppstein -*********************************************************************/ -#include -#include -#include -#include - -int main(int argc, char** argv) -{ - ros::init(argc, argv, "buffer_server_test"); - tf2_ros::Buffer buffer; - tf2_ros::TransformListener tfl(buffer); - tf2_ros::BufferServer server(buffer, "tf_action", false); - - server.start(); - ros::spin(); -} - diff --git a/src/geometry2/test_tf2/test/test_convert.cpp b/src/geometry2/test_tf2/test/test_convert.cpp deleted file mode 100644 index a8b0dfe..0000000 --- a/src/geometry2/test_tf2/test/test_convert.cpp +++ /dev/null @@ -1,118 +0,0 @@ -/********************************************************************* -* -* Software License Agreement (BSD License) -* -* Copyright (c) 2009, Willow Garage, Inc. -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions -* are met: -* -* * Redistributions of source code must retain the above copyright -* notice, this list of conditions and the following disclaimer. -* * Redistributions in binary form must reproduce the above -* copyright notice, this list of conditions and the following -* disclaimer in the documentation and/or other materials provided -* with the distribution. -* * Neither the name of Willow Garage, Inc. nor the names of its -* contributors may be used to endorse or promote products derived -* from this software without specific prior written permission. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -* -* Author: Eitan Marder-Eppstein -*********************************************************************/ -#include -#include -#include -#include -#include -#include -#include - -TEST(tf2Convert, kdlToBullet) -{ - double epsilon = 1e-9; - - tf2::Stamped b(btVector3(1,2,3), ros::Time(), "my_frame"); - - tf2::Stamped b1 = b; - tf2::Stamped k1; - tf2::convert(b1, k1); - - tf2::Stamped b2; - tf2::convert(k1, b2); - - EXPECT_EQ(b.frame_id_, b2.frame_id_); - EXPECT_NEAR(b.stamp_.toSec(), b2.stamp_.toSec(), epsilon); - EXPECT_NEAR(b.x(), b2.x(), epsilon); - EXPECT_NEAR(b.y(), b2.y(), epsilon); - EXPECT_NEAR(b.z(), b2.z(), epsilon); - - - EXPECT_EQ(b1.frame_id_, b2.frame_id_); - EXPECT_NEAR(b1.stamp_.toSec(), b2.stamp_.toSec(), epsilon); - EXPECT_NEAR(b1.x(), b2.x(), epsilon); - EXPECT_NEAR(b1.y(), b2.y(), epsilon); - EXPECT_NEAR(b1.z(), b2.z(), epsilon); -} - -TEST(tf2Convert, kdlBulletROSConversions) -{ - double epsilon = 1e-9; - - tf2::Stamped b1(btVector3(1,2,3), ros::Time(), "my_frame"), b2, b3, b4; - geometry_msgs::PointStamped r1, r2, r3; - tf2::Stamped k1, k2, k3; - - // Do bullet -> self -> bullet -> KDL -> self -> KDL -> ROS -> self -> ROS -> KDL -> bullet -> ROS -> bullet - tf2::convert(b1, b1); - tf2::convert(b1, b2); - tf2::convert(b2, k1); - tf2::convert(k1, k1); - tf2::convert(k1, k2); - tf2::convert(k2, r1); - tf2::convert(r1, r1); - tf2::convert(r1, r2); - tf2::convert(r2, k3); - tf2::convert(k3, b3); - tf2::convert(b3, r3); - tf2::convert(r3, b4); - - EXPECT_EQ(b1.frame_id_, b4.frame_id_); - EXPECT_NEAR(b1.stamp_.toSec(), b4.stamp_.toSec(), epsilon); - EXPECT_NEAR(b1.x(), b4.x(), epsilon); - EXPECT_NEAR(b1.y(), b4.y(), epsilon); - EXPECT_NEAR(b1.z(), b4.z(), epsilon); -} - -TEST(tf2Convert, ConvertTf2Quaternion) -{ - tf2::Quaternion tq(1,2,3,4); - Eigen::Quaterniond eq; - tf2::convert(tq, eq); - - EXPECT_EQ(tq.w(), eq.w()); - EXPECT_EQ(tq.x(), eq.x()); - EXPECT_EQ(tq.y(), eq.y()); - EXPECT_EQ(tq.z(), eq.z()); -} - -int main(int argc, char** argv) -{ - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} - diff --git a/src/geometry2/test_tf2/test/test_convert.py b/src/geometry2/test_tf2/test/test_convert.py deleted file mode 100755 index 682bb93..0000000 --- a/src/geometry2/test_tf2/test/test_convert.py +++ /dev/null @@ -1,68 +0,0 @@ -#! /usr/bin/env python -#*********************************************************** -#* Software License Agreement (BSD License) -#* -#* Copyright (c) 2009, Willow Garage, Inc. -#* All rights reserved. -#* -#* Redistribution and use in source and binary forms, with or without -#* modification, are permitted provided that the following conditions -#* are met: -#* -#* * Redistributions of source code must retain the above copyright -#* notice, this list of conditions and the following disclaimer. -#* * Redistributions in binary form must reproduce the above -#* copyright notice, this list of conditions and the following -#* disclaimer in the documentation and/or other materials provided -#* with the distribution. -#* * Neither the name of Willow Garage, Inc. nor the names of its -#* contributors may be used to endorse or promote products derived -#* from this software without specific prior written permission. -#* -#* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -#* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -#* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -#* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -#* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -#* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -#* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -#* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -#* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -#* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -#* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -#* POSSIBILITY OF SUCH DAMAGE. -#* -#* Author: Eitan Marder-Eppstein -#*********************************************************** - -from __future__ import print_function - -PKG = 'test_tf2' - -import sys -import unittest - -import tf2_py as tf2 -import tf2_ros -import tf2_geometry_msgs -from geometry_msgs.msg import PointStamped -import rospy -import tf2_kdl -import PyKDL - -class TestConvert(unittest.TestCase): - def test_convert(self): - p = tf2_ros.Stamped(PyKDL.Vector(1, 2, 3), rospy.Time(), 'my_frame') - print(p) - msg = tf2_ros.convert(p, PointStamped) - print(msg) - p2 = tf2_ros.convert(msg, PyKDL.Vector) - print(p2) - p2[0] = 100 - print(p) - print(p2) - print(p2.header) - -if __name__ == '__main__': - import rostest - rostest.unitrun(PKG, 'test_buffer_client', TestConvert) diff --git a/src/geometry2/test_tf2/test/test_message_filter.cpp b/src/geometry2/test_tf2/test/test_message_filter.cpp deleted file mode 100644 index b36716b..0000000 --- a/src/geometry2/test_tf2/test/test_message_filter.cpp +++ /dev/null @@ -1,346 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** \author Josh Faust */ - - -#include -#include -#include -#include -#include - -#include "ros/ros.h" -#include "ros/callback_queue.h" - -#include - -using namespace tf2; -using namespace tf2_ros; - -class Notification -{ -public: - Notification(int expected_count) : - count_(0), expected_count_(expected_count), failure_count_(0) - { - } - - void notify(const geometry_msgs::PointStamped::ConstPtr& message) - { - ++count_; - } - - void failure(const geometry_msgs::PointStamped::ConstPtr& message, FilterFailureReason reason) - { - ++failure_count_; - } - - int count_; - int expected_count_; - int failure_count_; -}; - -TEST(MessageFilter, noTransforms) -{ - BufferCore bc; - Notification n(1); - MessageFilter filter(bc, "frame1", 1, 0); - filter.registerCallback(boost::bind(&Notification::notify, &n, _1)); - - geometry_msgs::PointStampedPtr msg(new geometry_msgs::PointStamped); - msg->header.stamp = ros::Time(1); - msg->header.frame_id = "frame2"; - filter.add(msg); - - EXPECT_EQ(0, n.count_); -} - -TEST(MessageFilter, noTransformsSameFrame) -{ - BufferCore bc; - Notification n(1); - MessageFilter filter(bc, "frame1", 1, 0); - filter.registerCallback(boost::bind(&Notification::notify, &n, _1)); - - geometry_msgs::PointStampedPtr msg(new geometry_msgs::PointStamped); - msg->header.stamp = ros::Time(1); - msg->header.frame_id = "frame1"; - filter.add(msg); - - EXPECT_EQ(1, n.count_); -} - -geometry_msgs::TransformStamped createTransform(Quaternion q, Vector3 v, ros::Time stamp, const std::string& frame1, const std::string& frame2) -{ - geometry_msgs::TransformStamped t; - t.header.frame_id = frame1; - t.child_frame_id = frame2; - t.header.stamp = stamp; - t.transform.translation.x = v.x(); - t.transform.translation.y = v.y(); - t.transform.translation.z = v.z(); - t.transform.rotation.x = q.x(); - t.transform.rotation.y = q.y(); - t.transform.rotation.z = q.z(); - t.transform.rotation.w = q.w(); - return t; -} - -TEST(MessageFilter, preexistingTransforms) -{ - BufferCore bc; - Notification n(1); - MessageFilter filter(bc, "frame1", 1, 0); - filter.registerCallback(boost::bind(&Notification::notify, &n, _1)); - - ros::Time stamp(1); - bc.setTransform(createTransform(Quaternion(0,0,0,1), Vector3(1,2,3), stamp, "frame1", "frame2"), "me"); - - geometry_msgs::PointStampedPtr msg(new geometry_msgs::PointStamped); - msg->header.stamp = stamp; - msg->header.frame_id = "frame2"; - - filter.add(msg); - - EXPECT_EQ(1, n.count_); -} - -TEST(MessageFilter, postTransforms) -{ - BufferCore bc; - Notification n(1); - MessageFilter filter(bc, "frame1", 1, 0); - filter.registerCallback(boost::bind(&Notification::notify, &n, _1)); - - ros::Time stamp(1); - - geometry_msgs::PointStampedPtr msg(new geometry_msgs::PointStamped); - msg->header.stamp = stamp; - msg->header.frame_id = "frame2"; - - filter.add(msg); - - EXPECT_EQ(0, n.count_); - - bc.setTransform(createTransform(Quaternion(0,0,0,1), Vector3(1,2,3), stamp, "frame1", "frame2"), "me"); - - EXPECT_EQ(1, n.count_); -} - -TEST(MessageFilter, queueSize) -{ - BufferCore bc; - Notification n(10); - MessageFilter filter(bc, "frame1", 10, 0); - filter.registerCallback(boost::bind(&Notification::notify, &n, _1)); - filter.registerFailureCallback(boost::bind(&Notification::failure, &n, _1, _2)); - - ros::Time stamp(1); - - for (int i = 0; i < 20; ++i) - { - geometry_msgs::PointStampedPtr msg(new geometry_msgs::PointStamped); - msg->header.stamp = stamp; - msg->header.frame_id = "frame2"; - - filter.add(msg); - } - - EXPECT_EQ(0, n.count_); - EXPECT_EQ(10, n.failure_count_); - - bc.setTransform(createTransform(Quaternion(0,0,0,1), Vector3(1,2,3), stamp, "frame1", "frame2"), "me"); - - EXPECT_EQ(10, n.count_); -} - -TEST(MessageFilter, setTargetFrame) -{ - BufferCore bc; - Notification n(1); - MessageFilter filter(bc, "frame1", 1, 0); - filter.registerCallback(boost::bind(&Notification::notify, &n, _1)); - filter.setTargetFrame("frame1000"); - - ros::Time stamp(1); - bc.setTransform(createTransform(Quaternion(0,0,0,1), Vector3(1,2,3), stamp, "frame1000", "frame2"), "me"); - - geometry_msgs::PointStampedPtr msg(new geometry_msgs::PointStamped); - msg->header.stamp = stamp; - msg->header.frame_id = "frame2"; - - filter.add(msg); - - EXPECT_EQ(1, n.count_); -} - - -TEST(MessageFilter, multipleTargetFrames) -{ - BufferCore bc; - Notification n(1); - MessageFilter filter(bc, "", 1, 0); - filter.registerCallback(boost::bind(&Notification::notify, &n, _1)); - - std::vector target_frames; - target_frames.push_back("frame1"); - target_frames.push_back("frame2"); - filter.setTargetFrames(target_frames); - - ros::Time stamp(1); - bc.setTransform(createTransform(Quaternion(0,0,0,1), Vector3(1,2,3), stamp, "frame1", "frame3"), "me"); - - geometry_msgs::PointStampedPtr msg(new geometry_msgs::PointStamped); - msg->header.stamp = stamp; - msg->header.frame_id = "frame3"; - filter.add(msg); - - EXPECT_EQ(0, n.count_); // frame1->frame3 exists, frame2->frame3 does not (yet) - - //ros::Time::setNow(ros::Time::now() + ros::Duration(1.0)); - - bc.setTransform(createTransform(Quaternion(0,0,0,1), Vector3(1,2,3), stamp, "frame1", "frame2"), "me"); - - EXPECT_EQ(1, n.count_); // frame2->frame3 now exists -} - -TEST(MessageFilter, tolerance) -{ - ros::Duration offset(0.2); - BufferCore bc; - Notification n(1); - MessageFilter filter(bc, "frame1", 1, 0); - filter.registerCallback(boost::bind(&Notification::notify, &n, _1)); - filter.setTolerance(offset); - - ros::Time stamp(1); - bc.setTransform(createTransform(Quaternion(0,0,0,1), Vector3(1,2,3), stamp, "frame1", "frame2"), "me"); - - geometry_msgs::PointStampedPtr msg(new geometry_msgs::PointStamped); - msg->header.stamp = stamp; - msg->header.frame_id = "frame2"; - filter.add(msg); - - EXPECT_EQ(0, n.count_); //No return due to lack of space for offset - - bc.setTransform(createTransform(Quaternion(0,0,0,1), Vector3(1,2,3), stamp + (offset * 1.1), "frame1", "frame2"), "me"); - - EXPECT_EQ(1, n.count_); // Now have data for the message published earlier - - msg->header.stamp = stamp + offset; - filter.add(msg); - - EXPECT_EQ(1, n.count_); // Latest message is off the end of the offset -} - -TEST(MessageFilter, outTheBackFailure) -{ - BufferCore bc; - Notification n(1); - MessageFilter filter(bc, "frame1", 1, 0); - filter.registerFailureCallback(boost::bind(&Notification::failure, &n, _1, _2)); - - ros::Time stamp(1); - bc.setTransform(createTransform(Quaternion(0,0,0,1), Vector3(1,2,3), stamp, "frame1", "frame2"), "me"); - bc.setTransform(createTransform(Quaternion(0,0,0,1), Vector3(1,2,3), stamp + ros::Duration(10000), "frame1", "frame2"), "me"); - - geometry_msgs::PointStampedPtr msg(new geometry_msgs::PointStamped); - msg->header.stamp = stamp; - msg->header.frame_id = "frame2"; - filter.add(msg); - - EXPECT_EQ(1, n.failure_count_); -} - -TEST(MessageFilter, outTheBackFailure2) -{ - BufferCore bc; - Notification n(1); - MessageFilter filter(bc, "frame1", 1, 0); - filter.registerFailureCallback(boost::bind(&Notification::failure, &n, _1, _2)); - - ros::Time stamp(1); - - geometry_msgs::PointStampedPtr msg(new geometry_msgs::PointStamped); - msg->header.stamp = stamp; - msg->header.frame_id = "frame2"; - filter.add(msg); - - EXPECT_EQ(0, n.count_); - EXPECT_EQ(0, n.failure_count_); - - bc.setTransform(createTransform(Quaternion(0,0,0,1), Vector3(1,2,3), stamp + ros::Duration(10000), "frame1", "frame2"), "me"); - - EXPECT_EQ(1, n.failure_count_); -} - -TEST(MessageFilter, emptyFrameIDFailure) -{ - BufferCore bc; - Notification n(1); - MessageFilter filter(bc, "frame1", 1, 0); - filter.registerFailureCallback(boost::bind(&Notification::failure, &n, _1, _2)); - - geometry_msgs::PointStampedPtr msg(new geometry_msgs::PointStamped); - msg->header.frame_id = ""; - filter.add(msg); - - EXPECT_EQ(1, n.failure_count_); -} - -TEST(MessageFilter, callbackQueue) -{ - BufferCore bc; - Notification n(1); - ros::CallbackQueue queue; - MessageFilter filter(bc, "frame1", 1, &queue); - filter.registerCallback(boost::bind(&Notification::notify, &n, _1)); - - geometry_msgs::PointStampedPtr msg(new geometry_msgs::PointStamped); - msg->header.stamp = ros::Time(1); - msg->header.frame_id = "frame1"; - filter.add(msg); - - EXPECT_EQ(0, n.count_); - - queue.callAvailable(); - - EXPECT_EQ(1, n.count_); -} - - -int main(int argc, char** argv) -{ - testing::InitGoogleTest(&argc, argv); - - int ret = RUN_ALL_TESTS(); - - return ret; -} diff --git a/src/geometry2/test_tf2/test/test_static_publisher.cpp b/src/geometry2/test_tf2/test/test_static_publisher.cpp deleted file mode 100644 index d68eb3d..0000000 --- a/src/geometry2/test_tf2/test/test_static_publisher.cpp +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include "tf2/exceptions.h" -#include -#include -#include "rostest/permuter.h" - -#include "tf2_ros/transform_listener.h" - -TEST(StaticTransformPublisher, a_b_different_times) -{ - tf2_ros::Buffer mB; - tf2_ros::TransformListener tfl(mB); - EXPECT_TRUE(mB.canTransform("a", "b", ros::Time(), ros::Duration(1.0))); - EXPECT_TRUE(mB.canTransform("a", "b", ros::Time(100), ros::Duration(1.0))); - EXPECT_TRUE(mB.canTransform("a", "b", ros::Time(1000), ros::Duration(1.0))); -}; - -TEST(StaticTransformPublisher, a_c_different_times) -{ - tf2_ros::Buffer mB; - tf2_ros::TransformListener tfl(mB); - EXPECT_TRUE(mB.canTransform("a", "c", ros::Time(), ros::Duration(1.0))); - EXPECT_TRUE(mB.canTransform("a", "c", ros::Time(100), ros::Duration(1.0))); - EXPECT_TRUE(mB.canTransform("a", "c", ros::Time(1000), ros::Duration(1.0))); -}; - -TEST(StaticTransformPublisher, a_d_different_times) -{ - tf2_ros::Buffer mB; - tf2_ros::TransformListener tfl(mB); - geometry_msgs::TransformStamped ts; - ts.transform.rotation.w = 1; - ts.header.frame_id = "c"; - ts.header.stamp = ros::Time(10.0); - ts.child_frame_id = "d"; - - // make sure listener has populated - EXPECT_TRUE(mB.canTransform("a", "c", ros::Time(), ros::Duration(1.0))); - EXPECT_TRUE(mB.canTransform("a", "c", ros::Time(100), ros::Duration(1.0))); - EXPECT_TRUE(mB.canTransform("a", "c", ros::Time(1000), ros::Duration(1.0))); - - mB.setTransform(ts, "authority"); - //printf("%s\n", mB.allFramesAsString().c_str()); - EXPECT_TRUE(mB.canTransform("c", "d", ros::Time(10), ros::Duration(0))); - - EXPECT_TRUE(mB.canTransform("a", "d", ros::Time(), ros::Duration(0))); - EXPECT_FALSE(mB.canTransform("a", "d", ros::Time(1), ros::Duration(0))); - EXPECT_TRUE(mB.canTransform("a", "d", ros::Time(10), ros::Duration(0))); - EXPECT_FALSE(mB.canTransform("a", "d", ros::Time(100), ros::Duration(0))); -}; - -TEST(StaticTransformPublisher, multiple_parent_test) -{ - tf2_ros::Buffer mB; - tf2_ros::TransformListener tfl(mB); - tf2_ros::StaticTransformBroadcaster stb; - geometry_msgs::TransformStamped ts; - ts.transform.rotation.w = 1; - ts.header.frame_id = "c"; - ts.header.stamp = ros::Time(10.0); - ts.child_frame_id = "d"; - - stb.sendTransform(ts); - - // make sure listener has populated - EXPECT_TRUE(mB.canTransform("a", "d", ros::Time(), ros::Duration(1.0))); - EXPECT_TRUE(mB.canTransform("a", "d", ros::Time(100), ros::Duration(1.0))); - EXPECT_TRUE(mB.canTransform("a", "d", ros::Time(1000), ros::Duration(1.0))); - - // Publish new transform with child 'd', should replace old one in static tf - ts.header.frame_id = "new_parent"; - stb.sendTransform(ts); - ts.child_frame_id = "other_child"; - stb.sendTransform(ts); - ts.child_frame_id = "other_child2"; - stb.sendTransform(ts); - - EXPECT_TRUE(mB.canTransform("new_parent", "d", ros::Time(), ros::Duration(1.0))); - EXPECT_TRUE(mB.canTransform("new_parent", "other_child", ros::Time(), ros::Duration(1.0))); - EXPECT_TRUE(mB.canTransform("new_parent", "other_child2", ros::Time(), ros::Duration(1.0))); - EXPECT_FALSE(mB.canTransform("a", "d", ros::Time(), ros::Duration(1.0))); -}; - -TEST(StaticTransformPublisher, tf_from_param_server_valid) -{ - // This TF is loaded from the parameter server; ensure it is valid. - tf2_ros::Buffer mB; - tf2_ros::TransformListener tfl(mB); - EXPECT_TRUE(mB.canTransform("robot_calibration", "world", ros::Time(), ros::Duration(1.0))); - EXPECT_TRUE(mB.canTransform("robot_calibration", "world", ros::Time(100), ros::Duration(1.0))); - EXPECT_TRUE(mB.canTransform("robot_calibration", "world", ros::Time(1000), ros::Duration(1.0))); -} - -int main(int argc, char **argv){ - testing::InitGoogleTest(&argc, argv); - ros::init(argc, argv, "tf_unittest"); - return RUN_ALL_TESTS(); -} diff --git a/src/geometry2/test_tf2/test/test_static_publisher.py b/src/geometry2/test_tf2/test/test_static_publisher.py deleted file mode 100755 index c29e87a..0000000 --- a/src/geometry2/test_tf2/test/test_static_publisher.py +++ /dev/null @@ -1,95 +0,0 @@ -#! /usr/bin/env python -#*********************************************************** -#* Software License Agreement (BSD License) -#* -#* Copyright (c) 2016, Felix Duvallet -#* All rights reserved. -#* -#* Redistribution and use in source and binary forms, with or without -#* modification, are permitted provided that the following conditions -#* are met: -#* -#* * Redistributions of source code must retain the above copyright -#* notice, this list of conditions and the following disclaimer. -#* * Redistributions in binary form must reproduce the above -#* copyright notice, this list of conditions and the following -#* disclaimer in the documentation and/or other materials provided -#* with the distribution. -#* * Neither the name of Willow Garage, Inc. nor the names of its -#* contributors may be used to endorse or promote products derived -#* from this software without specific prior written permission. -#* -#* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -#* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -#* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -#* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -#* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -#* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -#* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -#* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -#* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -#* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -#* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -#* POSSIBILITY OF SUCH DAMAGE. -#* -#* Author: Felix Duvallet -#*********************************************************** - -import subprocess -import unittest - -import rospy -PKG = 'test_tf2' - - -class TestStaticPublisher(unittest.TestCase): - """ - These tests ensure the static transform publisher dies gracefully when - provided with an invalid (or non-existent) transform parameter. - - These tests are started by the static_publisher.launch, which loads - parameters into the param server. - - We check the output to make sure the correct error is occurring, since the - return code is always -1 (255). - - Note that this *could* cause a problem if a valid TF is stored in the param - server for one of the names; in this case the subprocess would never return - and the test would run forever. - """ - - def test_publisher_no_args(self): - # Start the publisher with no argument. - cmd = 'rosrun tf2_ros static_transform_publisher' - with self.assertRaises(subprocess.CalledProcessError) as cm: - ret = subprocess.check_output( - cmd.split(' '), stderr=subprocess.STDOUT) - self.assertEqual(255, cm.exception.returncode) - self.assertIn('not having the right number of arguments', - cm.exception.output) - - def test_publisher_nonexistent_param(self): - # Here there is no paramater by that name. - cmd = 'rosrun tf2_ros static_transform_publisher /test_tf2/tf_null' - with self.assertRaises(subprocess.CalledProcessError) as cm: - ret = subprocess.check_output( - cmd.split(' '), stderr=subprocess.STDOUT) - - self.assertEqual(255, cm.exception.returncode) - self.assertIn('Could not read TF', cm.exception.output) - - def test_publisher_invalid_param(self): - # Here there is an invalid parameter stored in the parameter server. - cmd = 'rosrun tf2_ros static_transform_publisher /test_tf2/tf_invalid' - with self.assertRaises(subprocess.CalledProcessError) as cm: - ret = subprocess.check_output( - cmd.split(' '), stderr=subprocess.STDOUT) - - self.assertEqual(255, cm.exception.returncode) - self.assertIn('Could not validate XmlRpcC', cm.exception.output) - - -if __name__ == '__main__': - rospy.init_node("test_static_publisher_py") - import rostest - rostest.rosrun(PKG, 'test_static_publisher_py', TestStaticPublisher) diff --git a/src/geometry2/test_tf2/test/test_tf2_bullet.cpp b/src/geometry2/test_tf2/test/test_tf2_bullet.cpp deleted file mode 100644 index af81f5f..0000000 --- a/src/geometry2/test_tf2/test/test_tf2_bullet.cpp +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** \author Wim Meeussen */ - - -#include -#include -#include -#include -#include - -tf2_ros::Buffer* tf_buffer; -static const double EPS = 1e-3; - -TEST(TfBullet, Transform) -{ - tf2::Stamped v1(btTransform(btQuaternion(1,0,0,0), btVector3(1,2,3)), ros::Time(2.0), "A"); - - // simple api - btTransform v_simple = tf_buffer->transform(v1, "B", ros::Duration(2.0)); - EXPECT_NEAR(v_simple.getOrigin().getX(), -9, EPS); - EXPECT_NEAR(v_simple.getOrigin().getY(), 18, EPS); - EXPECT_NEAR(v_simple.getOrigin().getZ(), 27, EPS); - - // advanced api - btTransform v_advanced = tf_buffer->transform(v1, "B", ros::Time(2.0), - "B", ros::Duration(3.0)); - EXPECT_NEAR(v_advanced.getOrigin().getX(), -9, EPS); - EXPECT_NEAR(v_advanced.getOrigin().getY(), 18, EPS); - EXPECT_NEAR(v_advanced.getOrigin().getZ(), 27, EPS); -} - - - -TEST(TfBullet, Vector) -{ - tf2::Stamped v1(btVector3(1,2,3), ros::Time(2.0), "A"); - - // simple api - btVector3 v_simple = tf_buffer->transform(v1, "B", ros::Duration(2.0)); - EXPECT_NEAR(v_simple.getX(), -9, EPS); - EXPECT_NEAR(v_simple.getY(), 18, EPS); - EXPECT_NEAR(v_simple.getZ(), 27, EPS); - - // advanced api - btVector3 v_advanced = tf_buffer->transform(v1, "B", ros::Time(2.0), - "B", ros::Duration(3.0)); - EXPECT_NEAR(v_advanced.getX(), -9, EPS); - EXPECT_NEAR(v_advanced.getY(), 18, EPS); - EXPECT_NEAR(v_advanced.getZ(), 27, EPS); -} - - - - -int main(int argc, char **argv){ - testing::InitGoogleTest(&argc, argv); - ros::init(argc, argv, "test"); - ros::NodeHandle n; - - tf_buffer = new tf2_ros::Buffer(); - - // populate buffer - geometry_msgs::TransformStamped t; - t.transform.translation.x = 10; - t.transform.translation.y = 20; - t.transform.translation.z = 30; - t.transform.rotation.x = 1; - t.header.stamp = ros::Time(2.0); - t.header.frame_id = "A"; - t.child_frame_id = "B"; - tf_buffer->setTransform(t, "test"); - - int ret = RUN_ALL_TESTS(); - delete tf_buffer; - return ret; -} diff --git a/src/geometry2/test_tf2/test/test_tf2_bullet.launch b/src/geometry2/test_tf2/test/test_tf2_bullet.launch deleted file mode 100644 index 07a438d..0000000 --- a/src/geometry2/test_tf2/test/test_tf2_bullet.launch +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/src/geometry2/test_tf2/test/test_tf_invalid.yaml b/src/geometry2/test_tf2/test/test_tf_invalid.yaml deleted file mode 100644 index 0cb8bb6..0000000 --- a/src/geometry2/test_tf2/test/test_tf_invalid.yaml +++ /dev/null @@ -1,7 +0,0 @@ -# This is not a valid TF. - -child_frame_id: calibration -some_data: - - 1 - - 2 - - 3 diff --git a/src/geometry2/test_tf2/test/test_tf_valid.yaml b/src/geometry2/test_tf2/test/test_tf_valid.yaml deleted file mode 100644 index 33a8b2d..0000000 --- a/src/geometry2/test_tf2/test/test_tf_valid.yaml +++ /dev/null @@ -1,17 +0,0 @@ -header: - seq: 0 - stamp: - secs: 1619 - nsecs: 601000000 - frame_id: world -child_frame_id: robot_calibration -transform: - translation: - x: 0.75 - y: 0.5 - z: 1.0 - rotation: - x: -0.62908825919 - y: 0.210952809338 - z: 0.640171445021 - w: 0.38720459109 diff --git a/src/geometry2/test_tf2/test/test_utils.cpp b/src/geometry2/test_tf2/test/test_utils.cpp deleted file mode 100644 index 144a452..0000000 --- a/src/geometry2/test_tf2/test/test_utils.cpp +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2014 Open Source Robotics Foundation, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include -#include -#include -#include - -double epsilon = 1e-9; - -template -void yprTest(const T& t, double yaw1, double pitch1, double roll1) { - double yaw2, pitch2, roll2; - - tf2::getEulerYPR(t, yaw2, pitch2, roll2); - - EXPECT_NEAR(yaw1, yaw2, epsilon); - EXPECT_NEAR(pitch1, pitch2, epsilon); - EXPECT_NEAR(roll1, roll2, epsilon); - EXPECT_NEAR(tf2::getYaw(t), yaw1, epsilon); -} - -TEST(tf2Utils, yaw) -{ - double x, y, z, w; - x = 0.4; - y = 0.5; - z = 0.6; - w = 0.7; - - double yaw1, pitch1, roll1; - // Compute results one way with KDL - KDL::Rotation::Quaternion(x, y, z, w).GetRPY(roll1, pitch1, yaw1); - { - // geometry_msgs::Quaternion - geometry_msgs::Quaternion q; - q.x = x; q.y =y; q.z = z; q.w = w; - yprTest(q, yaw1, pitch1, roll1); - - // geometry_msgs::QuaternionStamped - geometry_msgs::QuaternionStamped qst; - qst.quaternion = q; - yprTest(qst, yaw1, pitch1, roll1); - } - - - { - // tf2::Quaternion - tf2::Quaternion q(x, y, z, w); - yprTest(q, yaw1, pitch1, roll1); - - // tf2::Stamped - tf2::Stamped sq; - sq.setData(q); - yprTest(sq, yaw1, pitch1, roll1); - } -} - -TEST(tf2Utils, identity) -{ - geometry_msgs::Transform t; - t.translation.x = 0.1; - t.translation.y = 0.2; - t.translation.z = 0.3; - t.rotation.x = 0.4; - t.rotation.y = 0.5; - t.rotation.z = 0.6; - t.rotation.w = 0.7; - - // Test identity - t = tf2::getTransformIdentity(); - - EXPECT_EQ(t.translation.x, 0); - EXPECT_EQ(t.translation.y, 0); - EXPECT_EQ(t.translation.z, 0); - EXPECT_EQ(t.rotation.x, 0); - EXPECT_EQ(t.rotation.y, 0); - EXPECT_EQ(t.rotation.z, 0); - EXPECT_EQ(t.rotation.w, 1); -} - -int main(int argc, char** argv) -{ - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} - diff --git a/src/geometry2/tf2/CHANGELOG.rst b/src/geometry2/tf2/CHANGELOG.rst deleted file mode 100644 index 43bc67b..0000000 --- a/src/geometry2/tf2/CHANGELOG.rst +++ /dev/null @@ -1,451 +0,0 @@ -^^^^^^^^^^^^^^^^^^^^^^^^^ -Changelog for package tf2 -^^^^^^^^^^^^^^^^^^^^^^^^^ - -0.6.7 (2020-03-09) ------------------- -* [windows][melodic] more portable fixes. (`#443 `_) -* [Windows][melodic-devel] Fix install locations (`#442 `_) -* Revert "rework Eigen functions namespace hack" (`#436 `_) -* Contributors: Sean Yen, Tully Foote - -0.6.6 (2020-01-09) ------------------- -* Fix compile error missing ros/ros.h (`#400 `_) - * ros/ros.h -> ros/time.h - * tf2_eigen doesn't need ros/ros.h -* rework Eigen functions namespace hack -* separate transform function declarations into transform_functions.h -* use ROS_DEPRECATED macro for portability (`#362 `_) -* Remove `signals` from find_package(Boost COMPONENTS ...). -* Remove legacy inclusion in CMakeLists of tf2. -* Contributors: James Xu, Maarten de Vries, Marco Tranzatto, Shane Loretz, Tully Foote - -0.6.5 (2018-11-16) ------------------- - -0.6.4 (2018-11-06) ------------------- -* Resolved pedantic warnings -* fix issue `#315 `_ -* fixed nan interpoaltion issue -* Contributors: Keller Fabian Rudolf (CC-AD/EYC3), Kuang Fangjun, Martin Ganeff - -0.6.3 (2018-07-09) ------------------- -* preserve constness of const argument to avoid warnings (`#307 `_) -* Change comment style for unused doxygen (`#297 `_) -* Contributors: Jacob Perron, Tully Foote - -0.6.2 (2018-05-02) ------------------- - -0.6.1 (2018-03-21) ------------------- -* Replaced deprecated console_bridge macro calls (tests) -* Contributors: Johannes Meyer, Tully Foote - -0.6.0 (2018-03-21) ------------------- -* Replaced deprecated log macro calls -* Contributors: Tim Rakowski, Tully Foote - -0.5.17 (2018-01-01) -------------------- -* Merge pull request `#278 `_ from ros/chain_as_vec_test2 - Clean up results of _chainAsVector -* Simple test to check BufferCore::_chainAsVector. - Unit tests for walk and chain passing now. -* Merge pull request `#267 `_ from at-wat/speedup-timecache-for-large-buffer - Speed-up TimeCache search for large cache time. -* Merge pull request `#265 `_ from vsherrod/interpolation_fix - Corrected time output on interpolation function. -* Add time_interval option to tf2 speed-test. -* Merge pull request `#269 `_ from ros/frames_as_yaml - allFrameAsYaml consistently outputting a dict -* resolve https://github.com/ros/geometry/pull/153 at the source instead of needing the workaround. -* Speed-up TimeCache search for large cache time. -* Modified tests for correct time in interpolation to existing tests. -* Corrected time output on interpolation function. - Added unit test to check for this. -* Contributors: Atsushi Watanabe, Miguel Prada, Tully Foote, Vallan Sherrod - -0.5.16 (2017-07-14) -------------------- -* remove explicit templating to standardize on overloading. But provide backwards compatibility with deprecation. -* Merge pull request `#144 `_ from clearpathrobotics/dead_lock_fix - Solve a bug that causes a deadlock in MessageFilter -* Resolve 2 places where the error_msg would not be propogated. - Fixes `#198 `_ -* Remove generate_rand_vectors() from a number of tests. (`#227 `_) -* fixing include directory order to support overlays (`#231 `_) -* replaced dependencies on tf2_msgs_gencpp by exported dependencies -* Document the lifetime of the returned reference for getFrameId getTimestamp -* relax normalization tolerance. `#196 `_ was too strict for some use cases. (`#220 `_) -* Solve a bug that causes a deadlock in MessageFilter -* Contributors: Adel Fakih, Chris Lalancette, Christopher Wecht, Tully Foote, dhood - -0.5.15 (2017-01-24) -------------------- - -0.5.14 (2017-01-16) -------------------- -* fixes `#194 `_ check for quaternion normalization before inserting into storage (`#196 `_) - * check for quaternion normalization before inserting into storage - * Add test to check for transform failure on invalid quaternion input -* updating getAngleShortestPath() (`#187 `_) -* Move internal cache functions into a namespace - Fixes https://github.com/ros/geometry2/issues/175 -* Link properly to convert.h -* Landing page for tf2 describing the conversion interface -* Fix comment on BufferCore::MAX_GRAPH_DEPTH. -* Contributors: Jackie Kay, Phil Osteen, Tully Foote, alex, gavanderhoorn - -0.5.13 (2016-03-04) -------------------- - -0.5.12 (2015-08-05) -------------------- -* add utilities to get yaw, pitch, roll and identity transform -* provide more conversions between types - The previous conversion always assumed that it was converting a - non-message type to a non-message type. Now, one, both or none - can be a message or a non-message. -* Contributors: Vincent Rabaud - -0.5.11 (2015-04-22) -------------------- - -0.5.10 (2015-04-21) -------------------- -* move lct_cache into function local memoryfor `#92 `_ -* Clean up range checking. Re: `#92 `_ -* Fixed chainToVector -* release lock before possibly invoking user callbacks. Fixes `#91 `_ -* Contributors: Jackie Kay, Tully Foote - -0.5.9 (2015-03-25) ------------------- -* fixing edge case where two no frame id lookups matched in getLatestCommonTime -* Contributors: Tully Foote - -0.5.8 (2015-03-17) ------------------- -* change from default argument to overload to avoid linking issue `#84 `_ -* remove useless Makefile files -* Remove unused assignments in max/min functions -* change _allFramesAsDot() -> _allFramesAsDot(double current_time) -* Contributors: Jon Binney, Kei Okada, Tully Foote, Vincent Rabaud - -0.5.7 (2014-12-23) ------------------- - -0.5.6 (2014-09-18) ------------------- - -0.5.5 (2014-06-23) ------------------- -* convert to use console bridge from upstream debian package https://github.com/ros/rosdistro/issues/4633 -* Fix format string -* Contributors: Austin, Tully Foote - -0.5.4 (2014-05-07) ------------------- -* switch to boost signals2 following `ros/ros_comm#267 `_, blocking `ros/geometry#23 `_ -* Contributors: Tully Foote - -0.5.3 (2014-02-21) ------------------- - -0.5.2 (2014-02-20) ------------------- - -0.5.1 (2014-02-14) ------------------- - -0.5.0 (2014-02-14) ------------------- - -0.4.10 (2013-12-26) -------------------- -* updated error message. fixes `#38 `_ -* tf2: add missing console bridge include directories (fix `#48 `_) -* Fix const correctness of tf2::Vector3 rotate() method - The method does not modify the class thus should be const. - This has already been fixed in Bullet itself. -* Contributors: Dirk Thomas, Timo Rohling, Tully Foote - -0.4.9 (2013-11-06) ------------------- - -0.4.8 (2013-11-06) ------------------- -* moving python documentation to tf2_ros from tf2 to follow the code -* removing legacy rospy dependency. implementation removed in 0.4.0 fixes `#27 `_ - -0.4.7 (2013-08-28) ------------------- -* switching to use allFramesAsStringNoLock inside of getLatestCommonTime and walkToParent and locking in public API _getLatestCommonTime instead re `#23 `_ -* Fixes a crash in tf's view_frames related to dot code generation in allFramesAsDot - -0.4.6 (2013-08-28) ------------------- -* cleaner fix for `#19 `_ -* fix pointer initialization. Fixes `#19 `_ -* fixes `#18 `_ for hydro -* package.xml: corrected typo in description - -0.4.5 (2013-07-11) ------------------- -* adding _chainAsVector method for https://github.com/ros/geometry/issues/18 -* adding _allFramesAsDot for backwards compatability https://github.com/ros/geometry/issues/18 - -0.4.4 (2013-07-09) ------------------- -* making repo use CATKIN_ENABLE_TESTING correctly and switching rostest to be a test_depend with that change. -* tf2: Fixes a warning on OS X, but generally safer - Replaces the use of pointers with shared_ptrs, - this allows the polymorphism and makes it so that - the compiler doesn't yell at us about calling - delete on a class with a public non-virtual - destructor. -* tf2: Fixes compiler warnings on OS X - This exploited a gcc specific extension and is not - C++ standard compliant. There used to be a "fix" - for OS X which no longer applies. I think it is ok - to use this as an int instead of a double, but - another way to fix it would be to use a define. -* tf2: Fixes linkedit errors on OS X - -0.4.3 (2013-07-05) ------------------- - -0.4.2 (2013-07-05) ------------------- -* adding getCacheLength() to parallel old tf API -* removing legacy static const variable MAX_EXTRAPOLATION_DISTANCE copied from tf unnecessesarily - -0.4.1 (2013-07-05) ------------------- -* adding old style callback notifications to BufferCore to enable backwards compatability of message filters -* exposing dedicated thread logic in BufferCore and checking in Buffer -* more methods to expose, and check for empty cache before getting latest timestamp -* adding methods to enable backwards compatability for passing through to tf::Transformer - -0.4.0 (2013-06-27) ------------------- -* splitting rospy dependency into tf2_py so tf2 is pure c++ library. -* switching to console_bridge from rosconsole -* moving convert methods back into tf2 because it does not have any ros dependencies beyond ros::Time which is already a dependency of tf2 -* Cleaning up unnecessary dependency on roscpp -* Cleaning up packaging of tf2 including: - removing unused nodehandle - fixing overmatch on search and replace - cleaning up a few dependencies and linking - removing old backup of package.xml - making diff minimally different from tf version of library -* suppressing bullet LinearMath copy inside of tf2, so it will not collide, and should not be used externally. -* Restoring test packages and bullet packages. - reverting 3570e8c42f9b394ecbfd9db076b920b41300ad55 to get back more of the packages previously implemented - reverting 04cf29d1b58c660fdc999ab83563a5d4b76ab331 to fix `#7 `_ -* fixing includes in unit tests -* Make PythonLibs find_package python2 specific - On systems with python 3 installed and default, find_package(PythonLibs) will find the python 3 paths and libraries. However, the c++ include structure seems to be different in python 3 and tf2 uses includes that are no longer present or deprecated. - Until the includes are made to be python 3 compliant, we should specify that the version of python found must be python 2. - -0.3.6 (2013-03-03) ------------------- - -0.3.5 (2013-02-15 14:46) ------------------------- -* 0.3.4 -> 0.3.5 - -0.3.4 (2013-02-15 13:14) ------------------------- -* 0.3.3 -> 0.3.4 -* moving LinearMath includes to include/tf2 - -0.3.3 (2013-02-15 11:30) ------------------------- -* 0.3.2 -> 0.3.3 -* fixing include installation of tf2 - -0.3.2 (2013-02-15 00:42) ------------------------- -* 0.3.1 -> 0.3.2 -* fixed missing include export & tf2_ros dependecy - -0.3.1 (2013-02-14) ------------------- -* 0.3.0 -> 0.3.1 -* fixing PYTHON installation directory - -0.3.0 (2013-02-13) ------------------- -* switching to version 0.3.0 -* adding setup.py to tf2 package -* fixed tf2 exposing python functionality -* removed line that was killing tf2_ros.so -* fixing catkin message dependencies -* removing packages with missing deps -* adding missing package.xml -* adding missing package.xml -* adding missing package.xml -* catkinizing geometry-experimental -* removing bullet headers from use in header files -* removing bullet headers from use in header files -* merging my recent changes -* setting child_frame_id overlooked in revision 6a0eec022be0 which fixed failing tests -* allFramesAsString public and internal methods seperated. Public method is locked, private method is not -* fixing another scoped lock -* fixing one scoped lock -* fixing test compilation -* merge -* Error message fix, ros-pkg5085 -* Check if target equals to source before validation -* When target_frame == source_frame, just returns an identity transform. -* adding addition ros header includes for strictness -* Fixed optimized lookups with compound transforms -* Fixed problem in tf2 optimized branch. Quaternion multiplication order was incorrect -* fix compilation on 32-bit -* Josh fix: Final inverse transform composition (missed multiplying the sourcd->top vector by the target->top inverse orientation). b44877d2b054 -* Josh change: fix first/last time case. 46bf33868e0d -* fix transform accumulation to parent -* fix parent lookup, now works on the real pr2's tree -* move the message filter to tf2_ros -* tf2::MessageFilter + tests. Still need to change it around to pass in a callback queue, since we're being triggered directly from the tf2 buffer -* Don't add the request if the transform is already available. Add some new tests -* working transformable callbacks with a simple (incomplete) test case -* first pass at a transformable callback api, not tested yet -* add interpolation cases -* fix getLatestCommonTime -- no longer returns the latest of any of the times -* Some more optimization -- allow findClosest to inline -* another minor speedup -* Minorly speed up canTransform by not requiring the full data lookup, and only looking up the parent -* Add explicit operator= so that we can see the time in it on a profile graph. Also some minor cleanup -* minor cleanup -* add 3 more cases to the speed test -* Remove use of btTransform at all from transform accumulation, since the conversion to/from is unnecessary, expensive, and can introduce floating point error -* Don't use btTransform as an intermediate when accumulating transforms, as constructing them takes quite a bit of time -* Completely remove lookupLists(). canTransform() now uses the same walking code as lookupTransform(). Also fixed a bug in the static transform publisher test -* Genericise the walk-to-top-parent code in lookupTransform so that it will be able to be used by canTransform as well (minus the cost of actually computing the transform) -* remove id lookup that wasn't doing anything -* Some more optimization: - * Reduce # of TransformStorage copies made in TimeCache::getData() - * Remove use of lookupLists from getLatestCommonTime -* lookupTransform() no longer uses lookupLists unless it's called with Time(0). Removes lots of object construction/destruction due to removal of pushing back on the lists -* Remove CompactFrameID in favor of a typedef -* these mode checks are no longer necessary -* Fix crash when testing extrapolation on the forward transforms -* Update cache unit tests to work with the changes TransformStorage. - Also make sure that BT_USE_DOUBLE_PRECISION is set for tf2. -* remove exposure of time_cache.h from buffer_core.h -* Removed the mutex from TimeCache, as it's unnecessary (BufferCore needs to have its own mutex locked anyway), and this speeds things up by about 20% - Also fixed a number of thread-safety problems -* Optimize test_extrapolation a bit, 25% speedup of lookupTransform -* use a hash map for looking up frame numbers, speeds up lookupTransform by ~8% -* Cache vectors used for looking up transforms. Speeds up lookupTransform by another 10% -* speed up lookupTransform by another 25% -* speed up lookupTransform by another 2x. also reduces the memory footprint of the cache significantly -* sped up lookupTransform by another 2x -* First add of a simple speed test - Sped up lookupTransform 2x -* roscpp dependency explicit, instead of relying on implicit -* static transform tested and working -* tests passing and all throw catches removed too\! -* validating frame_ids up front for lookup exceptions -* working with single base class vector -* tests passing for static storage -* making method private for clarity -* static cache implementation and test -* cleaning up API doc typos -* sphinx docs for Buffer -* new dox mainpage -* update tf2 manifest -* commenting out twist -* Changed cache_time to cache_time_ to follow C++ style guide, also initialized it to actually get things to work -* no more rand in cache tests -* Changing tf2_py.cpp to use underscores instead of camelCase -* removing all old converter functions from transform_datatypes.h -* removing last references to transform_datatypes.h in tf2 -* transform conversions internalized -* removing unused datatypes -* copying bullet transform headers into tf2 and breaking bullet dependency -* merge -* removing dependency on tf -* removing include of old tf from tf2 -* update doc -* merge -* kdl unittest passing -* Spaces instead of tabs in YAML grrrr -* Adding quotes for parent -* canTransform advanced ported -* Hopefully fixing YAML syntax -* new version of view_frames in new tf2_tools package -* testing new argument validation and catching bug -* Python support for debugging -* merge -* adding validation of frame_ids in queries with warnings and exceptions where appropriate -* Exposing ability to get frames as a string -* A compiling version of YAML debugging interface for BufferCore -* placeholder for tf debug -* fixing tf:: to tf2:: ns issues and stripping slashes on set in tf2 for backwards compatiabily -* Adding a python version of the BufferClient -* moving test to new package -* merging -* working unit test for BufferCore::lookupTransform -* removing unused method test and converting NO_PARENT test to new API -* Adding some comments -* Moving the python bindings for tf2 to the tf2 package from the tf2_py package -* buffercore tests upgraded -* porting tf_unittest while running incrmentally instead of block copy -* BufferCore::clear ported forward -* successfully changed lookupTransform advanced to new version -* switching to new implementation of lookupTransform tests still passing -* compiling lookupTransform new version -* removing tf_prefix from BufferCore. BuferCore is independent of any frame_ids. tf_prefix should be implemented at the ROS API level. -* initializing tf_prefix -* adding missing initialization -* suppressing warnings -* more tests ported -* removing tests for apis not ported forward -* setTransform tests ported -* old tests in new package passing due to backwards dependency. now for the fun, port all 1500 lines :-) -* setTransform working in new framework as well as old -* porting more methods -* more compatability -* bringing in helper functions for buffer_core from tf.h/cpp -* rethrowing to new exceptions -* converting Storage to geometry_msgs::TransformStamped -* removing deprecated useage -* cleaning up includes -* moving all implementations into cpp file -* switching test to new class from old one -* Compiling version of the buffer client -* moving listener to tf_cpp -* removing listener, it should be in another package -* most of listener -* add cantransform implementation -* removing deprecated API usage -* initial import of listener header -* move implementation into library -* 2 tests of buffer -* moving executables back into bin -* compiling again with new design -* rename tfcore to buffercore -* almost compiling version of template code -* compiling tf2_core simple test -* add test to start compiling -* copying in tf_unittest for tf_core testing template -* prototype of tf2_core implemented using old tf. -* first version of template functions -* remove timeouts -* properly naming tf2_core.h from tf_core.h -* working cache test with tf2 lib -* first unit test passing, not yet ported -* tf_core api -* tf2 v2 -* aborting port -* moving across time cache tf and datatypes headers -* copying exceptions from tf -* switching to tf2 from tf_core diff --git a/src/geometry2/tf2/CMakeLists.txt b/src/geometry2/tf2/CMakeLists.txt deleted file mode 100644 index e5faec8..0000000 --- a/src/geometry2/tf2/CMakeLists.txt +++ /dev/null @@ -1,53 +0,0 @@ -cmake_minimum_required(VERSION 2.8.3) -project(tf2) - -find_package(console_bridge REQUIRED) -find_package(catkin REQUIRED COMPONENTS geometry_msgs rostime tf2_msgs) -find_package(Boost REQUIRED COMPONENTS system thread) - -catkin_package( - INCLUDE_DIRS include - LIBRARIES tf2 - DEPENDS console_bridge - CATKIN_DEPENDS geometry_msgs tf2_msgs rostime) - -include_directories(include ${catkin_INCLUDE_DIRS} ${console_bridge_INCLUDE_DIRS}) - -# export user definitions - -#CPP Libraries -add_library(tf2 src/cache.cpp src/buffer_core.cpp src/static_cache.cpp) -target_link_libraries(tf2 ${Boost_LIBRARIES} ${catkin_LIBRARIES} ${console_bridge_LIBRARIES}) -add_dependencies(tf2 ${catkin_EXPORTED_TARGETS}) - -install(TARGETS tf2 - ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} - LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} - RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -) - -install(DIRECTORY include/${PROJECT_NAME}/ - DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -) - -# Tests -if(CATKIN_ENABLE_TESTING) - -catkin_add_gtest(test_cache_unittest test/cache_unittest.cpp) -target_link_libraries(test_cache_unittest tf2 ${console_bridge_LIBRARIES}) -add_dependencies(test_cache_unittest ${catkin_EXPORTED_TARGETS}) - -catkin_add_gtest(test_static_cache_unittest test/static_cache_test.cpp) -target_link_libraries(test_static_cache_unittest tf2 ${console_bridge_LIBRARIES}) -add_dependencies(test_static_cache_unittest ${catkin_EXPORTED_TARGETS}) - -catkin_add_gtest(test_simple test/simple_tf2_core.cpp) -target_link_libraries(test_simple tf2 ${console_bridge_LIBRARIES}) -add_dependencies(test_simple ${catkin_EXPORTED_TARGETS}) - -add_executable(speed_test EXCLUDE_FROM_ALL test/speed_test.cpp) -target_link_libraries(speed_test tf2 ${console_bridge_LIBRARIES}) -add_dependencies(tests speed_test) -add_dependencies(tests ${catkin_EXPORTED_TARGETS}) - -endif() diff --git a/src/geometry2/tf2/include/tf2/LinearMath/Matrix3x3.h b/src/geometry2/tf2/include/tf2/LinearMath/Matrix3x3.h deleted file mode 100644 index 5fffb7b..0000000 --- a/src/geometry2/tf2/include/tf2/LinearMath/Matrix3x3.h +++ /dev/null @@ -1,696 +0,0 @@ -/* -Copyright (c) 2003-2006 Gino van den Bergen / Erwin Coumans http://continuousphysics.com/Bullet/ - -This software is provided 'as-is', without any express or implied warranty. -In no event will the authors be held liable for any damages arising from the use of this software. -Permission is granted to anyone to use this software for any purpose, -including commercial applications, and to alter it and redistribute it freely, -subject to the following restrictions: - -1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. -2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. -3. This notice may not be removed or altered from any source distribution. -*/ - - -#ifndef TF2_MATRIX3x3_H -#define TF2_MATRIX3x3_H - -#include "Vector3.h" -#include "Quaternion.h" - -#include - -namespace tf2 -{ - - -#define Matrix3x3Data Matrix3x3DoubleData - - -/**@brief The Matrix3x3 class implements a 3x3 rotation matrix, to perform linear algebra in combination with Quaternion, Transform and Vector3. -* Make sure to only include a pure orthogonal matrix without scaling. */ -class Matrix3x3 { - - ///Data storage for the matrix, each vector is a row of the matrix - Vector3 m_el[3]; - -public: - /** @brief No initializaion constructor */ - Matrix3x3 () {} - - // explicit Matrix3x3(const tf2Scalar *m) { setFromOpenGLSubMatrix(m); } - - /**@brief Constructor from Quaternion */ - explicit Matrix3x3(const Quaternion& q) { setRotation(q); } - /* - template - Matrix3x3(const tf2Scalar& yaw, const tf2Scalar& pitch, const tf2Scalar& roll) - { - setEulerYPR(yaw, pitch, roll); - } - */ - /** @brief Constructor with row major formatting */ - Matrix3x3(const tf2Scalar& xx, const tf2Scalar& xy, const tf2Scalar& xz, - const tf2Scalar& yx, const tf2Scalar& yy, const tf2Scalar& yz, - const tf2Scalar& zx, const tf2Scalar& zy, const tf2Scalar& zz) - { - setValue(xx, xy, xz, - yx, yy, yz, - zx, zy, zz); - } - /** @brief Copy constructor */ - TF2SIMD_FORCE_INLINE Matrix3x3 (const Matrix3x3& other) - { - m_el[0] = other.m_el[0]; - m_el[1] = other.m_el[1]; - m_el[2] = other.m_el[2]; - } - - - /** @brief Assignment Operator */ - TF2SIMD_FORCE_INLINE Matrix3x3& operator=(const Matrix3x3& other) - { - m_el[0] = other.m_el[0]; - m_el[1] = other.m_el[1]; - m_el[2] = other.m_el[2]; - return *this; - } - - - /** @brief Get a column of the matrix as a vector - * @param i Column number 0 indexed */ - TF2SIMD_FORCE_INLINE Vector3 getColumn(int i) const - { - return Vector3(m_el[0][i],m_el[1][i],m_el[2][i]); - } - - - /** @brief Get a row of the matrix as a vector - * @param i Row number 0 indexed */ - TF2SIMD_FORCE_INLINE const Vector3& getRow(int i) const - { - tf2FullAssert(0 <= i && i < 3); - return m_el[i]; - } - - /** @brief Get a mutable reference to a row of the matrix as a vector - * @param i Row number 0 indexed */ - TF2SIMD_FORCE_INLINE Vector3& operator[](int i) - { - tf2FullAssert(0 <= i && i < 3); - return m_el[i]; - } - - /** @brief Get a const reference to a row of the matrix as a vector - * @param i Row number 0 indexed */ - TF2SIMD_FORCE_INLINE const Vector3& operator[](int i) const - { - tf2FullAssert(0 <= i && i < 3); - return m_el[i]; - } - - /** @brief Multiply by the target matrix on the right - * @param m Rotation matrix to be applied - * Equivilant to this = this * m */ - Matrix3x3& operator*=(const Matrix3x3& m); - - /** @brief Set from a carray of tf2Scalars - * @param m A pointer to the beginning of an array of 9 tf2Scalars */ - void setFromOpenGLSubMatrix(const tf2Scalar *m) - { - m_el[0].setValue(m[0],m[4],m[8]); - m_el[1].setValue(m[1],m[5],m[9]); - m_el[2].setValue(m[2],m[6],m[10]); - - } - /** @brief Set the values of the matrix explicitly (row major) - * @param xx Top left - * @param xy Top Middle - * @param xz Top Right - * @param yx Middle Left - * @param yy Middle Middle - * @param yz Middle Right - * @param zx Bottom Left - * @param zy Bottom Middle - * @param zz Bottom Right*/ - void setValue(const tf2Scalar& xx, const tf2Scalar& xy, const tf2Scalar& xz, - const tf2Scalar& yx, const tf2Scalar& yy, const tf2Scalar& yz, - const tf2Scalar& zx, const tf2Scalar& zy, const tf2Scalar& zz) - { - m_el[0].setValue(xx,xy,xz); - m_el[1].setValue(yx,yy,yz); - m_el[2].setValue(zx,zy,zz); - } - - /** @brief Set the matrix from a quaternion - * @param q The Quaternion to match */ - void setRotation(const Quaternion& q) - { - tf2Scalar d = q.length2(); - tf2FullAssert(d != tf2Scalar(0.0)); - tf2Scalar s = tf2Scalar(2.0) / d; - tf2Scalar xs = q.x() * s, ys = q.y() * s, zs = q.z() * s; - tf2Scalar wx = q.w() * xs, wy = q.w() * ys, wz = q.w() * zs; - tf2Scalar xx = q.x() * xs, xy = q.x() * ys, xz = q.x() * zs; - tf2Scalar yy = q.y() * ys, yz = q.y() * zs, zz = q.z() * zs; - setValue(tf2Scalar(1.0) - (yy + zz), xy - wz, xz + wy, - xy + wz, tf2Scalar(1.0) - (xx + zz), yz - wx, - xz - wy, yz + wx, tf2Scalar(1.0) - (xx + yy)); - } - - - /** @brief Set the matrix from euler angles using YPR around ZYX respectively - * @param yaw Yaw about Z axis - * @param pitch Pitch about Y axis - * @param roll Roll about X axis - */ - ROS_DEPRECATED void setEulerZYX(const tf2Scalar& yaw, const tf2Scalar& pitch, const tf2Scalar& roll) - { - setEulerYPR(yaw, pitch, roll); - } - - /** @brief Set the matrix from euler angles YPR around ZYX axes - * @param eulerZ Yaw aboud Z axis - * @param eulerY Pitch around Y axis - * @param eulerX Roll about X axis - * - * These angles are used to produce a rotation matrix. The euler - * angles are applied in ZYX order. I.e a vector is first rotated - * about X then Y and then Z - **/ - void setEulerYPR(tf2Scalar eulerZ, tf2Scalar eulerY,tf2Scalar eulerX) { - tf2Scalar ci ( tf2Cos(eulerX)); - tf2Scalar cj ( tf2Cos(eulerY)); - tf2Scalar ch ( tf2Cos(eulerZ)); - tf2Scalar si ( tf2Sin(eulerX)); - tf2Scalar sj ( tf2Sin(eulerY)); - tf2Scalar sh ( tf2Sin(eulerZ)); - tf2Scalar cc = ci * ch; - tf2Scalar cs = ci * sh; - tf2Scalar sc = si * ch; - tf2Scalar ss = si * sh; - - setValue(cj * ch, sj * sc - cs, sj * cc + ss, - cj * sh, sj * ss + cc, sj * cs - sc, - -sj, cj * si, cj * ci); - } - - /** @brief Set the matrix using RPY about XYZ fixed axes - * @param roll Roll about X axis - * @param pitch Pitch around Y axis - * @param yaw Yaw aboud Z axis - * - **/ - void setRPY(tf2Scalar roll, tf2Scalar pitch,tf2Scalar yaw) { - setEulerYPR(yaw, pitch, roll); - } - - /**@brief Set the matrix to the identity */ - void setIdentity() - { - setValue(tf2Scalar(1.0), tf2Scalar(0.0), tf2Scalar(0.0), - tf2Scalar(0.0), tf2Scalar(1.0), tf2Scalar(0.0), - tf2Scalar(0.0), tf2Scalar(0.0), tf2Scalar(1.0)); - } - - static const Matrix3x3& getIdentity() - { - static const Matrix3x3 identityMatrix(tf2Scalar(1.0), tf2Scalar(0.0), tf2Scalar(0.0), - tf2Scalar(0.0), tf2Scalar(1.0), tf2Scalar(0.0), - tf2Scalar(0.0), tf2Scalar(0.0), tf2Scalar(1.0)); - return identityMatrix; - } - - /**@brief Fill the values of the matrix into a 9 element array - * @param m The array to be filled */ - void getOpenGLSubMatrix(tf2Scalar *m) const - { - m[0] = tf2Scalar(m_el[0].x()); - m[1] = tf2Scalar(m_el[1].x()); - m[2] = tf2Scalar(m_el[2].x()); - m[3] = tf2Scalar(0.0); - m[4] = tf2Scalar(m_el[0].y()); - m[5] = tf2Scalar(m_el[1].y()); - m[6] = tf2Scalar(m_el[2].y()); - m[7] = tf2Scalar(0.0); - m[8] = tf2Scalar(m_el[0].z()); - m[9] = tf2Scalar(m_el[1].z()); - m[10] = tf2Scalar(m_el[2].z()); - m[11] = tf2Scalar(0.0); - } - - /**@brief Get the matrix represented as a quaternion - * @param q The quaternion which will be set */ - void getRotation(Quaternion& q) const - { - tf2Scalar trace = m_el[0].x() + m_el[1].y() + m_el[2].z(); - tf2Scalar temp[4]; - - if (trace > tf2Scalar(0.0)) - { - tf2Scalar s = tf2Sqrt(trace + tf2Scalar(1.0)); - temp[3]=(s * tf2Scalar(0.5)); - s = tf2Scalar(0.5) / s; - - temp[0]=((m_el[2].y() - m_el[1].z()) * s); - temp[1]=((m_el[0].z() - m_el[2].x()) * s); - temp[2]=((m_el[1].x() - m_el[0].y()) * s); - } - else - { - int i = m_el[0].x() < m_el[1].y() ? - (m_el[1].y() < m_el[2].z() ? 2 : 1) : - (m_el[0].x() < m_el[2].z() ? 2 : 0); - int j = (i + 1) % 3; - int k = (i + 2) % 3; - - tf2Scalar s = tf2Sqrt(m_el[i][i] - m_el[j][j] - m_el[k][k] + tf2Scalar(1.0)); - temp[i] = s * tf2Scalar(0.5); - s = tf2Scalar(0.5) / s; - - temp[3] = (m_el[k][j] - m_el[j][k]) * s; - temp[j] = (m_el[j][i] + m_el[i][j]) * s; - temp[k] = (m_el[k][i] + m_el[i][k]) * s; - } - q.setValue(temp[0],temp[1],temp[2],temp[3]); - } - - /**@brief Get the matrix represented as euler angles around ZYX - * @param yaw Yaw around Z axis - * @param pitch Pitch around Y axis - * @param roll around X axis - * @param solution_number Which solution of two possible solutions ( 1 or 2) are possible values*/ - ROS_DEPRECATED void getEulerZYX(tf2Scalar& yaw, tf2Scalar& pitch, tf2Scalar& roll, unsigned int solution_number = 1) const - { - getEulerYPR(yaw, pitch, roll, solution_number); - }; - - - /**@brief Get the matrix represented as euler angles around YXZ, roundtrip with setEulerYPR - * @param yaw Yaw around Z axis - * @param pitch Pitch around Y axis - * @param roll around X axis */ - void getEulerYPR(tf2Scalar& yaw, tf2Scalar& pitch, tf2Scalar& roll, unsigned int solution_number = 1) const - { - struct Euler - { - tf2Scalar yaw; - tf2Scalar pitch; - tf2Scalar roll; - }; - - Euler euler_out; - Euler euler_out2; //second solution - //get the pointer to the raw data - - // Check that pitch is not at a singularity - // Check that pitch is not at a singularity - if (tf2Fabs(m_el[2].x()) >= 1) - { - euler_out.yaw = 0; - euler_out2.yaw = 0; - - // From difference of angles formula - tf2Scalar delta = tf2Atan2(m_el[2].y(),m_el[2].z()); - if (m_el[2].x() < 0) //gimbal locked down - { - euler_out.pitch = TF2SIMD_PI / tf2Scalar(2.0); - euler_out2.pitch = TF2SIMD_PI / tf2Scalar(2.0); - euler_out.roll = delta; - euler_out2.roll = delta; - } - else // gimbal locked up - { - euler_out.pitch = -TF2SIMD_PI / tf2Scalar(2.0); - euler_out2.pitch = -TF2SIMD_PI / tf2Scalar(2.0); - euler_out.roll = delta; - euler_out2.roll = delta; - } - } - else - { - euler_out.pitch = - tf2Asin(m_el[2].x()); - euler_out2.pitch = TF2SIMD_PI - euler_out.pitch; - - euler_out.roll = tf2Atan2(m_el[2].y()/tf2Cos(euler_out.pitch), - m_el[2].z()/tf2Cos(euler_out.pitch)); - euler_out2.roll = tf2Atan2(m_el[2].y()/tf2Cos(euler_out2.pitch), - m_el[2].z()/tf2Cos(euler_out2.pitch)); - - euler_out.yaw = tf2Atan2(m_el[1].x()/tf2Cos(euler_out.pitch), - m_el[0].x()/tf2Cos(euler_out.pitch)); - euler_out2.yaw = tf2Atan2(m_el[1].x()/tf2Cos(euler_out2.pitch), - m_el[0].x()/tf2Cos(euler_out2.pitch)); - } - - if (solution_number == 1) - { - yaw = euler_out.yaw; - pitch = euler_out.pitch; - roll = euler_out.roll; - } - else - { - yaw = euler_out2.yaw; - pitch = euler_out2.pitch; - roll = euler_out2.roll; - } - } - - /**@brief Get the matrix represented as roll pitch and yaw about fixed axes XYZ - * @param roll around X axis - * @param pitch Pitch around Y axis - * @param yaw Yaw around Z axis - * @param solution_number Which solution of two possible solutions ( 1 or 2) are possible values*/ - void getRPY(tf2Scalar& roll, tf2Scalar& pitch, tf2Scalar& yaw, unsigned int solution_number = 1) const - { - getEulerYPR(yaw, pitch, roll, solution_number); - } - - /**@brief Create a scaled copy of the matrix - * @param s Scaling vector The elements of the vector will scale each column */ - - Matrix3x3 scaled(const Vector3& s) const - { - return Matrix3x3(m_el[0].x() * s.x(), m_el[0].y() * s.y(), m_el[0].z() * s.z(), - m_el[1].x() * s.x(), m_el[1].y() * s.y(), m_el[1].z() * s.z(), - m_el[2].x() * s.x(), m_el[2].y() * s.y(), m_el[2].z() * s.z()); - } - - /**@brief Return the determinant of the matrix */ - tf2Scalar determinant() const; - /**@brief Return the adjoint of the matrix */ - Matrix3x3 adjoint() const; - /**@brief Return the matrix with all values non negative */ - Matrix3x3 absolute() const; - /**@brief Return the transpose of the matrix */ - Matrix3x3 transpose() const; - /**@brief Return the inverse of the matrix */ - Matrix3x3 inverse() const; - - Matrix3x3 transposeTimes(const Matrix3x3& m) const; - Matrix3x3 timesTranspose(const Matrix3x3& m) const; - - TF2SIMD_FORCE_INLINE tf2Scalar tdotx(const Vector3& v) const - { - return m_el[0].x() * v.x() + m_el[1].x() * v.y() + m_el[2].x() * v.z(); - } - TF2SIMD_FORCE_INLINE tf2Scalar tdoty(const Vector3& v) const - { - return m_el[0].y() * v.x() + m_el[1].y() * v.y() + m_el[2].y() * v.z(); - } - TF2SIMD_FORCE_INLINE tf2Scalar tdotz(const Vector3& v) const - { - return m_el[0].z() * v.x() + m_el[1].z() * v.y() + m_el[2].z() * v.z(); - } - - - /**@brief diagonalizes this matrix by the Jacobi method. - * @param rot stores the rotation from the coordinate system in which the matrix is diagonal to the original - * coordinate system, i.e., old_this = rot * new_this * rot^T. - * @param threshold See iteration - * @param iteration The iteration stops when all off-diagonal elements are less than the threshold multiplied - * by the sum of the absolute values of the diagonal, or when maxSteps have been executed. - * - * Note that this matrix is assumed to be symmetric. - */ - void diagonalize(Matrix3x3& rot, tf2Scalar threshold, int maxSteps) - { - rot.setIdentity(); - for (int step = maxSteps; step > 0; step--) - { - // find off-diagonal element [p][q] with largest magnitude - int p = 0; - int q = 1; - int r = 2; - tf2Scalar max = tf2Fabs(m_el[0][1]); - tf2Scalar v = tf2Fabs(m_el[0][2]); - if (v > max) - { - q = 2; - r = 1; - max = v; - } - v = tf2Fabs(m_el[1][2]); - if (v > max) - { - p = 1; - q = 2; - r = 0; - max = v; - } - - tf2Scalar t = threshold * (tf2Fabs(m_el[0][0]) + tf2Fabs(m_el[1][1]) + tf2Fabs(m_el[2][2])); - if (max <= t) - { - if (max <= TF2SIMD_EPSILON * t) - { - return; - } - step = 1; - } - - // compute Jacobi rotation J which leads to a zero for element [p][q] - tf2Scalar mpq = m_el[p][q]; - tf2Scalar theta = (m_el[q][q] - m_el[p][p]) / (2 * mpq); - tf2Scalar theta2 = theta * theta; - tf2Scalar cos; - tf2Scalar sin; - if (theta2 * theta2 < tf2Scalar(10 / TF2SIMD_EPSILON)) - { - t = (theta >= 0) ? 1 / (theta + tf2Sqrt(1 + theta2)) - : 1 / (theta - tf2Sqrt(1 + theta2)); - cos = 1 / tf2Sqrt(1 + t * t); - sin = cos * t; - } - else - { - // approximation for large theta-value, i.e., a nearly diagonal matrix - t = 1 / (theta * (2 + tf2Scalar(0.5) / theta2)); - cos = 1 - tf2Scalar(0.5) * t * t; - sin = cos * t; - } - - // apply rotation to matrix (this = J^T * this * J) - m_el[p][q] = m_el[q][p] = 0; - m_el[p][p] -= t * mpq; - m_el[q][q] += t * mpq; - tf2Scalar mrp = m_el[r][p]; - tf2Scalar mrq = m_el[r][q]; - m_el[r][p] = m_el[p][r] = cos * mrp - sin * mrq; - m_el[r][q] = m_el[q][r] = cos * mrq + sin * mrp; - - // apply rotation to rot (rot = rot * J) - for (int i = 0; i < 3; i++) - { - Vector3& row = rot[i]; - mrp = row[p]; - mrq = row[q]; - row[p] = cos * mrp - sin * mrq; - row[q] = cos * mrq + sin * mrp; - } - } - } - - - - - /**@brief Calculate the matrix cofactor - * @param r1 The first row to use for calculating the cofactor - * @param c1 The first column to use for calculating the cofactor - * @param r1 The second row to use for calculating the cofactor - * @param c1 The second column to use for calculating the cofactor - * See http://en.wikipedia.org/wiki/Cofactor_(linear_algebra) for more details - */ - tf2Scalar cofac(int r1, int c1, int r2, int c2) const - { - return m_el[r1][c1] * m_el[r2][c2] - m_el[r1][c2] * m_el[r2][c1]; - } - - void serialize(struct Matrix3x3Data& dataOut) const; - - void serializeFloat(struct Matrix3x3FloatData& dataOut) const; - - void deSerialize(const struct Matrix3x3Data& dataIn); - - void deSerializeFloat(const struct Matrix3x3FloatData& dataIn); - - void deSerializeDouble(const struct Matrix3x3DoubleData& dataIn); - -}; - - -TF2SIMD_FORCE_INLINE Matrix3x3& -Matrix3x3::operator*=(const Matrix3x3& m) -{ - setValue(m.tdotx(m_el[0]), m.tdoty(m_el[0]), m.tdotz(m_el[0]), - m.tdotx(m_el[1]), m.tdoty(m_el[1]), m.tdotz(m_el[1]), - m.tdotx(m_el[2]), m.tdoty(m_el[2]), m.tdotz(m_el[2])); - return *this; -} - -TF2SIMD_FORCE_INLINE tf2Scalar -Matrix3x3::determinant() const -{ - return tf2Triple((*this)[0], (*this)[1], (*this)[2]); -} - - -TF2SIMD_FORCE_INLINE Matrix3x3 -Matrix3x3::absolute() const -{ - return Matrix3x3( - tf2Fabs(m_el[0].x()), tf2Fabs(m_el[0].y()), tf2Fabs(m_el[0].z()), - tf2Fabs(m_el[1].x()), tf2Fabs(m_el[1].y()), tf2Fabs(m_el[1].z()), - tf2Fabs(m_el[2].x()), tf2Fabs(m_el[2].y()), tf2Fabs(m_el[2].z())); -} - -TF2SIMD_FORCE_INLINE Matrix3x3 -Matrix3x3::transpose() const -{ - return Matrix3x3(m_el[0].x(), m_el[1].x(), m_el[2].x(), - m_el[0].y(), m_el[1].y(), m_el[2].y(), - m_el[0].z(), m_el[1].z(), m_el[2].z()); -} - -TF2SIMD_FORCE_INLINE Matrix3x3 -Matrix3x3::adjoint() const -{ - return Matrix3x3(cofac(1, 1, 2, 2), cofac(0, 2, 2, 1), cofac(0, 1, 1, 2), - cofac(1, 2, 2, 0), cofac(0, 0, 2, 2), cofac(0, 2, 1, 0), - cofac(1, 0, 2, 1), cofac(0, 1, 2, 0), cofac(0, 0, 1, 1)); -} - -TF2SIMD_FORCE_INLINE Matrix3x3 -Matrix3x3::inverse() const -{ - Vector3 co(cofac(1, 1, 2, 2), cofac(1, 2, 2, 0), cofac(1, 0, 2, 1)); - tf2Scalar det = (*this)[0].dot(co); - tf2FullAssert(det != tf2Scalar(0.0)); - tf2Scalar s = tf2Scalar(1.0) / det; - return Matrix3x3(co.x() * s, cofac(0, 2, 2, 1) * s, cofac(0, 1, 1, 2) * s, - co.y() * s, cofac(0, 0, 2, 2) * s, cofac(0, 2, 1, 0) * s, - co.z() * s, cofac(0, 1, 2, 0) * s, cofac(0, 0, 1, 1) * s); -} - -TF2SIMD_FORCE_INLINE Matrix3x3 -Matrix3x3::transposeTimes(const Matrix3x3& m) const -{ - return Matrix3x3( - m_el[0].x() * m[0].x() + m_el[1].x() * m[1].x() + m_el[2].x() * m[2].x(), - m_el[0].x() * m[0].y() + m_el[1].x() * m[1].y() + m_el[2].x() * m[2].y(), - m_el[0].x() * m[0].z() + m_el[1].x() * m[1].z() + m_el[2].x() * m[2].z(), - m_el[0].y() * m[0].x() + m_el[1].y() * m[1].x() + m_el[2].y() * m[2].x(), - m_el[0].y() * m[0].y() + m_el[1].y() * m[1].y() + m_el[2].y() * m[2].y(), - m_el[0].y() * m[0].z() + m_el[1].y() * m[1].z() + m_el[2].y() * m[2].z(), - m_el[0].z() * m[0].x() + m_el[1].z() * m[1].x() + m_el[2].z() * m[2].x(), - m_el[0].z() * m[0].y() + m_el[1].z() * m[1].y() + m_el[2].z() * m[2].y(), - m_el[0].z() * m[0].z() + m_el[1].z() * m[1].z() + m_el[2].z() * m[2].z()); -} - -TF2SIMD_FORCE_INLINE Matrix3x3 -Matrix3x3::timesTranspose(const Matrix3x3& m) const -{ - return Matrix3x3( - m_el[0].dot(m[0]), m_el[0].dot(m[1]), m_el[0].dot(m[2]), - m_el[1].dot(m[0]), m_el[1].dot(m[1]), m_el[1].dot(m[2]), - m_el[2].dot(m[0]), m_el[2].dot(m[1]), m_el[2].dot(m[2])); - -} - -TF2SIMD_FORCE_INLINE Vector3 -operator*(const Matrix3x3& m, const Vector3& v) -{ - return Vector3(m[0].dot(v), m[1].dot(v), m[2].dot(v)); -} - - -TF2SIMD_FORCE_INLINE Vector3 -operator*(const Vector3& v, const Matrix3x3& m) -{ - return Vector3(m.tdotx(v), m.tdoty(v), m.tdotz(v)); -} - -TF2SIMD_FORCE_INLINE Matrix3x3 -operator*(const Matrix3x3& m1, const Matrix3x3& m2) -{ - return Matrix3x3( - m2.tdotx( m1[0]), m2.tdoty( m1[0]), m2.tdotz( m1[0]), - m2.tdotx( m1[1]), m2.tdoty( m1[1]), m2.tdotz( m1[1]), - m2.tdotx( m1[2]), m2.tdoty( m1[2]), m2.tdotz( m1[2])); -} - -/* -TF2SIMD_FORCE_INLINE Matrix3x3 tf2MultTransposeLeft(const Matrix3x3& m1, const Matrix3x3& m2) { -return Matrix3x3( -m1[0][0] * m2[0][0] + m1[1][0] * m2[1][0] + m1[2][0] * m2[2][0], -m1[0][0] * m2[0][1] + m1[1][0] * m2[1][1] + m1[2][0] * m2[2][1], -m1[0][0] * m2[0][2] + m1[1][0] * m2[1][2] + m1[2][0] * m2[2][2], -m1[0][1] * m2[0][0] + m1[1][1] * m2[1][0] + m1[2][1] * m2[2][0], -m1[0][1] * m2[0][1] + m1[1][1] * m2[1][1] + m1[2][1] * m2[2][1], -m1[0][1] * m2[0][2] + m1[1][1] * m2[1][2] + m1[2][1] * m2[2][2], -m1[0][2] * m2[0][0] + m1[1][2] * m2[1][0] + m1[2][2] * m2[2][0], -m1[0][2] * m2[0][1] + m1[1][2] * m2[1][1] + m1[2][2] * m2[2][1], -m1[0][2] * m2[0][2] + m1[1][2] * m2[1][2] + m1[2][2] * m2[2][2]); -} -*/ - -/**@brief Equality operator between two matrices -* It will test all elements are equal. */ -TF2SIMD_FORCE_INLINE bool operator==(const Matrix3x3& m1, const Matrix3x3& m2) -{ - return ( m1[0][0] == m2[0][0] && m1[1][0] == m2[1][0] && m1[2][0] == m2[2][0] && - m1[0][1] == m2[0][1] && m1[1][1] == m2[1][1] && m1[2][1] == m2[2][1] && - m1[0][2] == m2[0][2] && m1[1][2] == m2[1][2] && m1[2][2] == m2[2][2] ); -} - -///for serialization -struct Matrix3x3FloatData -{ - Vector3FloatData m_el[3]; -}; - -///for serialization -struct Matrix3x3DoubleData -{ - Vector3DoubleData m_el[3]; -}; - - - - -TF2SIMD_FORCE_INLINE void Matrix3x3::serialize(struct Matrix3x3Data& dataOut) const -{ - for (int i=0;i<3;i++) - m_el[i].serialize(dataOut.m_el[i]); -} - -TF2SIMD_FORCE_INLINE void Matrix3x3::serializeFloat(struct Matrix3x3FloatData& dataOut) const -{ - for (int i=0;i<3;i++) - m_el[i].serializeFloat(dataOut.m_el[i]); -} - - -TF2SIMD_FORCE_INLINE void Matrix3x3::deSerialize(const struct Matrix3x3Data& dataIn) -{ - for (int i=0;i<3;i++) - m_el[i].deSerialize(dataIn.m_el[i]); -} - -TF2SIMD_FORCE_INLINE void Matrix3x3::deSerializeFloat(const struct Matrix3x3FloatData& dataIn) -{ - for (int i=0;i<3;i++) - m_el[i].deSerializeFloat(dataIn.m_el[i]); -} - -TF2SIMD_FORCE_INLINE void Matrix3x3::deSerializeDouble(const struct Matrix3x3DoubleData& dataIn) -{ - for (int i=0;i<3;i++) - m_el[i].deSerializeDouble(dataIn.m_el[i]); -} - -} -#endif //TF2_MATRIX3x3_H - diff --git a/src/geometry2/tf2/include/tf2/LinearMath/MinMax.h b/src/geometry2/tf2/include/tf2/LinearMath/MinMax.h deleted file mode 100644 index 1dedf09..0000000 --- a/src/geometry2/tf2/include/tf2/LinearMath/MinMax.h +++ /dev/null @@ -1,69 +0,0 @@ -/* -Copyright (c) 2003-2006 Gino van den Bergen / Erwin Coumans http://continuousphysics.com/Bullet/ - -This software is provided 'as-is', without any express or implied warranty. -In no event will the authors be held liable for any damages arising from the use of this software. -Permission is granted to anyone to use this software for any purpose, -including commercial applications, and to alter it and redistribute it freely, -subject to the following restrictions: - -1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. -2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. -3. This notice may not be removed or altered from any source distribution. -*/ - - - -#ifndef GEN_MINMAX_H -#define GEN_MINMAX_H - -template -TF2SIMD_FORCE_INLINE const T& tf2Min(const T& a, const T& b) -{ - return a < b ? a : b ; -} - -template -TF2SIMD_FORCE_INLINE const T& tf2Max(const T& a, const T& b) -{ - return a > b ? a : b; -} - -template -TF2SIMD_FORCE_INLINE const T& GEN_clamped(const T& a, const T& lb, const T& ub) -{ - return a < lb ? lb : (ub < a ? ub : a); -} - -template -TF2SIMD_FORCE_INLINE void tf2SetMin(T& a, const T& b) -{ - if (b < a) - { - a = b; - } -} - -template -TF2SIMD_FORCE_INLINE void tf2SetMax(T& a, const T& b) -{ - if (a < b) - { - a = b; - } -} - -template -TF2SIMD_FORCE_INLINE void GEN_clamp(T& a, const T& lb, const T& ub) -{ - if (a < lb) - { - a = lb; - } - else if (ub < a) - { - a = ub; - } -} - -#endif diff --git a/src/geometry2/tf2/include/tf2/LinearMath/QuadWord.h b/src/geometry2/tf2/include/tf2/LinearMath/QuadWord.h deleted file mode 100644 index 0bfa8a2..0000000 --- a/src/geometry2/tf2/include/tf2/LinearMath/QuadWord.h +++ /dev/null @@ -1,183 +0,0 @@ -/* -Copyright (c) 2003-2006 Gino van den Bergen / Erwin Coumans http://continuousphysics.com/Bullet/ - -This software is provided 'as-is', without any express or implied warranty. -In no event will the authors be held liable for any damages arising from the use of this software. -Permission is granted to anyone to use this software for any purpose, -including commercial applications, and to alter it and redistribute it freely, -subject to the following restrictions: - -1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. -2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. -3. This notice may not be removed or altered from any source distribution. -*/ - - -#ifndef TF2SIMD_QUADWORD_H -#define TF2SIMD_QUADWORD_H - -#include "Scalar.h" -#include "MinMax.h" - - -#if defined (__CELLOS_LV2) && defined (__SPU__) -#include -#endif - -namespace tf2 -{ -/**@brief The QuadWord class is base class for Vector3 and Quaternion. - * Some issues under PS3 Linux with IBM 2.1 SDK, gcc compiler prevent from using aligned quadword. - */ -#ifndef USE_LIBSPE2 -ATTRIBUTE_ALIGNED16(class) QuadWord -#else -class QuadWord -#endif -{ -protected: - -#if defined (__SPU__) && defined (__CELLOS_LV2__) - union { - vec_float4 mVec128; - tf2Scalar m_floats[4]; - }; -public: - vec_float4 get128() const - { - return mVec128; - } -protected: -#else //__CELLOS_LV2__ __SPU__ - tf2Scalar m_floats[4]; -#endif //__CELLOS_LV2__ __SPU__ - - public: - - - /**@brief Return the x value */ - TF2SIMD_FORCE_INLINE const tf2Scalar& getX() const { return m_floats[0]; } - /**@brief Return the y value */ - TF2SIMD_FORCE_INLINE const tf2Scalar& getY() const { return m_floats[1]; } - /**@brief Return the z value */ - TF2SIMD_FORCE_INLINE const tf2Scalar& getZ() const { return m_floats[2]; } - /**@brief Set the x value */ - TF2SIMD_FORCE_INLINE void setX(tf2Scalar x) { m_floats[0] = x;}; - /**@brief Set the y value */ - TF2SIMD_FORCE_INLINE void setY(tf2Scalar y) { m_floats[1] = y;}; - /**@brief Set the z value */ - TF2SIMD_FORCE_INLINE void setZ(tf2Scalar z) { m_floats[2] = z;}; - /**@brief Set the w value */ - TF2SIMD_FORCE_INLINE void setW(tf2Scalar w) { m_floats[3] = w;}; - /**@brief Return the x value */ - TF2SIMD_FORCE_INLINE const tf2Scalar& x() const { return m_floats[0]; } - /**@brief Return the y value */ - TF2SIMD_FORCE_INLINE const tf2Scalar& y() const { return m_floats[1]; } - /**@brief Return the z value */ - TF2SIMD_FORCE_INLINE const tf2Scalar& z() const { return m_floats[2]; } - /**@brief Return the w value */ - TF2SIMD_FORCE_INLINE const tf2Scalar& w() const { return m_floats[3]; } - - //TF2SIMD_FORCE_INLINE tf2Scalar& operator[](int i) { return (&m_floats[0])[i]; } - //TF2SIMD_FORCE_INLINE const tf2Scalar& operator[](int i) const { return (&m_floats[0])[i]; } - ///operator tf2Scalar*() replaces operator[], using implicit conversion. We added operator != and operator == to avoid pointer comparisons. - TF2SIMD_FORCE_INLINE operator tf2Scalar *() { return &m_floats[0]; } - TF2SIMD_FORCE_INLINE operator const tf2Scalar *() const { return &m_floats[0]; } - - TF2SIMD_FORCE_INLINE bool operator==(const QuadWord& other) const - { - return ((m_floats[3]==other.m_floats[3]) && (m_floats[2]==other.m_floats[2]) && (m_floats[1]==other.m_floats[1]) && (m_floats[0]==other.m_floats[0])); - } - - TF2SIMD_FORCE_INLINE bool operator!=(const QuadWord& other) const - { - return !(*this == other); - } - - /**@brief Set x,y,z and zero w - * @param x Value of x - * @param y Value of y - * @param z Value of z - */ - TF2SIMD_FORCE_INLINE void setValue(const tf2Scalar& x, const tf2Scalar& y, const tf2Scalar& z) - { - m_floats[0]=x; - m_floats[1]=y; - m_floats[2]=z; - m_floats[3] = 0.f; - } - -/* void getValue(tf2Scalar *m) const - { - m[0] = m_floats[0]; - m[1] = m_floats[1]; - m[2] = m_floats[2]; - } -*/ -/**@brief Set the values - * @param x Value of x - * @param y Value of y - * @param z Value of z - * @param w Value of w - */ - TF2SIMD_FORCE_INLINE void setValue(const tf2Scalar& x, const tf2Scalar& y, const tf2Scalar& z,const tf2Scalar& w) - { - m_floats[0]=x; - m_floats[1]=y; - m_floats[2]=z; - m_floats[3]=w; - } - /**@brief No initialization constructor */ - TF2SIMD_FORCE_INLINE QuadWord() - // :m_floats[0](tf2Scalar(0.)),m_floats[1](tf2Scalar(0.)),m_floats[2](tf2Scalar(0.)),m_floats[3](tf2Scalar(0.)) - { - } - - /**@brief Three argument constructor (zeros w) - * @param x Value of x - * @param y Value of y - * @param z Value of z - */ - TF2SIMD_FORCE_INLINE QuadWord(const tf2Scalar& x, const tf2Scalar& y, const tf2Scalar& z) - { - m_floats[0] = x, m_floats[1] = y, m_floats[2] = z, m_floats[3] = 0.0f; - } - -/**@brief Initializing constructor - * @param x Value of x - * @param y Value of y - * @param z Value of z - * @param w Value of w - */ - TF2SIMD_FORCE_INLINE QuadWord(const tf2Scalar& x, const tf2Scalar& y, const tf2Scalar& z,const tf2Scalar& w) - { - m_floats[0] = x, m_floats[1] = y, m_floats[2] = z, m_floats[3] = w; - } - - /**@brief Set each element to the max of the current values and the values of another QuadWord - * @param other The other QuadWord to compare with - */ - TF2SIMD_FORCE_INLINE void setMax(const QuadWord& other) - { - tf2SetMax(m_floats[0], other.m_floats[0]); - tf2SetMax(m_floats[1], other.m_floats[1]); - tf2SetMax(m_floats[2], other.m_floats[2]); - tf2SetMax(m_floats[3], other.m_floats[3]); - } - /**@brief Set each element to the min of the current values and the values of another QuadWord - * @param other The other QuadWord to compare with - */ - TF2SIMD_FORCE_INLINE void setMin(const QuadWord& other) - { - tf2SetMin(m_floats[0], other.m_floats[0]); - tf2SetMin(m_floats[1], other.m_floats[1]); - tf2SetMin(m_floats[2], other.m_floats[2]); - tf2SetMin(m_floats[3], other.m_floats[3]); - } - - - -}; - -} -#endif //TF2SIMD_QUADWORD_H diff --git a/src/geometry2/tf2/include/tf2/LinearMath/Quaternion.h b/src/geometry2/tf2/include/tf2/LinearMath/Quaternion.h deleted file mode 100644 index 7e14584..0000000 --- a/src/geometry2/tf2/include/tf2/LinearMath/Quaternion.h +++ /dev/null @@ -1,477 +0,0 @@ -/* -Copyright (c) 2003-2006 Gino van den Bergen / Erwin Coumans http://continuousphysics.com/Bullet/ - -This software is provided 'as-is', without any express or implied warranty. -In no event will the authors be held liable for any damages arising from the use of this software. -Permission is granted to anyone to use this software for any purpose, -including commercial applications, and to alter it and redistribute it freely, -subject to the following restrictions: - -1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. -2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. -3. This notice may not be removed or altered from any source distribution. -*/ - - - -#ifndef TF2_QUATERNION_H_ -#define TF2_QUATERNION_H_ - - -#include "Vector3.h" -#include "QuadWord.h" - -#include - -namespace tf2 -{ - -/**@brief The Quaternion implements quaternion to perform linear algebra rotations in combination with Matrix3x3, Vector3 and Transform. */ -class Quaternion : public QuadWord { -public: - /**@brief No initialization constructor */ - Quaternion() {} - - // template - // explicit Quaternion(const tf2Scalar *v) : Tuple4(v) {} - /**@brief Constructor from scalars */ - Quaternion(const tf2Scalar& x, const tf2Scalar& y, const tf2Scalar& z, const tf2Scalar& w) - : QuadWord(x, y, z, w) - {} - /**@brief Axis angle Constructor - * @param axis The axis which the rotation is around - * @param angle The magnitude of the rotation around the angle (Radians) */ - Quaternion(const Vector3& axis, const tf2Scalar& angle) - { - setRotation(axis, angle); - } - /**@brief Constructor from Euler angles - * @param yaw Angle around Y unless TF2_EULER_DEFAULT_ZYX defined then Z - * @param pitch Angle around X unless TF2_EULER_DEFAULT_ZYX defined then Y - * @param roll Angle around Z unless TF2_EULER_DEFAULT_ZYX defined then X */ - ROS_DEPRECATED Quaternion(const tf2Scalar& yaw, const tf2Scalar& pitch, const tf2Scalar& roll) - { -#ifndef TF2_EULER_DEFAULT_ZYX - setEuler(yaw, pitch, roll); -#else - setRPY(roll, pitch, yaw); -#endif - } - /**@brief Set the rotation using axis angle notation - * @param axis The axis around which to rotate - * @param angle The magnitude of the rotation in Radians */ - void setRotation(const Vector3& axis, const tf2Scalar& angle) - { - tf2Scalar d = axis.length(); - tf2Assert(d != tf2Scalar(0.0)); - tf2Scalar s = tf2Sin(angle * tf2Scalar(0.5)) / d; - setValue(axis.x() * s, axis.y() * s, axis.z() * s, - tf2Cos(angle * tf2Scalar(0.5))); - } - /**@brief Set the quaternion using Euler angles - * @param yaw Angle around Y - * @param pitch Angle around X - * @param roll Angle around Z */ - void setEuler(const tf2Scalar& yaw, const tf2Scalar& pitch, const tf2Scalar& roll) - { - tf2Scalar halfYaw = tf2Scalar(yaw) * tf2Scalar(0.5); - tf2Scalar halfPitch = tf2Scalar(pitch) * tf2Scalar(0.5); - tf2Scalar halfRoll = tf2Scalar(roll) * tf2Scalar(0.5); - tf2Scalar cosYaw = tf2Cos(halfYaw); - tf2Scalar sinYaw = tf2Sin(halfYaw); - tf2Scalar cosPitch = tf2Cos(halfPitch); - tf2Scalar sinPitch = tf2Sin(halfPitch); - tf2Scalar cosRoll = tf2Cos(halfRoll); - tf2Scalar sinRoll = tf2Sin(halfRoll); - setValue(cosRoll * sinPitch * cosYaw + sinRoll * cosPitch * sinYaw, - cosRoll * cosPitch * sinYaw - sinRoll * sinPitch * cosYaw, - sinRoll * cosPitch * cosYaw - cosRoll * sinPitch * sinYaw, - cosRoll * cosPitch * cosYaw + sinRoll * sinPitch * sinYaw); - } - /**@brief Set the quaternion using fixed axis RPY - * @param roll Angle around X - * @param pitch Angle around Y - * @param yaw Angle around Z*/ - void setRPY(const tf2Scalar& roll, const tf2Scalar& pitch, const tf2Scalar& yaw) - { - tf2Scalar halfYaw = tf2Scalar(yaw) * tf2Scalar(0.5); - tf2Scalar halfPitch = tf2Scalar(pitch) * tf2Scalar(0.5); - tf2Scalar halfRoll = tf2Scalar(roll) * tf2Scalar(0.5); - tf2Scalar cosYaw = tf2Cos(halfYaw); - tf2Scalar sinYaw = tf2Sin(halfYaw); - tf2Scalar cosPitch = tf2Cos(halfPitch); - tf2Scalar sinPitch = tf2Sin(halfPitch); - tf2Scalar cosRoll = tf2Cos(halfRoll); - tf2Scalar sinRoll = tf2Sin(halfRoll); - setValue(sinRoll * cosPitch * cosYaw - cosRoll * sinPitch * sinYaw, //x - cosRoll * sinPitch * cosYaw + sinRoll * cosPitch * sinYaw, //y - cosRoll * cosPitch * sinYaw - sinRoll * sinPitch * cosYaw, //z - cosRoll * cosPitch * cosYaw + sinRoll * sinPitch * sinYaw); //formerly yzx - } - /**@brief Set the quaternion using euler angles - * @param yaw Angle around Z - * @param pitch Angle around Y - * @param roll Angle around X */ - ROS_DEPRECATED void setEulerZYX(const tf2Scalar& yaw, const tf2Scalar& pitch, const tf2Scalar& roll) - { - setRPY(roll, pitch, yaw); - } - /**@brief Add two quaternions - * @param q The quaternion to add to this one */ - TF2SIMD_FORCE_INLINE Quaternion& operator+=(const Quaternion& q) - { - m_floats[0] += q.x(); m_floats[1] += q.y(); m_floats[2] += q.z(); m_floats[3] += q.m_floats[3]; - return *this; - } - - /**@brief Sutf2ract out a quaternion - * @param q The quaternion to sutf2ract from this one */ - Quaternion& operator-=(const Quaternion& q) - { - m_floats[0] -= q.x(); m_floats[1] -= q.y(); m_floats[2] -= q.z(); m_floats[3] -= q.m_floats[3]; - return *this; - } - - /**@brief Scale this quaternion - * @param s The scalar to scale by */ - Quaternion& operator*=(const tf2Scalar& s) - { - m_floats[0] *= s; m_floats[1] *= s; m_floats[2] *= s; m_floats[3] *= s; - return *this; - } - - /**@brief Multiply this quaternion by q on the right - * @param q The other quaternion - * Equivilant to this = this * q */ - Quaternion& operator*=(const Quaternion& q) - { - setValue(m_floats[3] * q.x() + m_floats[0] * q.m_floats[3] + m_floats[1] * q.z() - m_floats[2] * q.y(), - m_floats[3] * q.y() + m_floats[1] * q.m_floats[3] + m_floats[2] * q.x() - m_floats[0] * q.z(), - m_floats[3] * q.z() + m_floats[2] * q.m_floats[3] + m_floats[0] * q.y() - m_floats[1] * q.x(), - m_floats[3] * q.m_floats[3] - m_floats[0] * q.x() - m_floats[1] * q.y() - m_floats[2] * q.z()); - return *this; - } - /**@brief Return the dot product between this quaternion and another - * @param q The other quaternion */ - tf2Scalar dot(const Quaternion& q) const - { - return m_floats[0] * q.x() + m_floats[1] * q.y() + m_floats[2] * q.z() + m_floats[3] * q.m_floats[3]; - } - - /**@brief Return the length squared of the quaternion */ - tf2Scalar length2() const - { - return dot(*this); - } - - /**@brief Return the length of the quaternion */ - tf2Scalar length() const - { - return tf2Sqrt(length2()); - } - - /**@brief Normalize the quaternion - * Such that x^2 + y^2 + z^2 +w^2 = 1 */ - Quaternion& normalize() - { - return *this /= length(); - } - - /**@brief Return a scaled version of this quaternion - * @param s The scale factor */ - TF2SIMD_FORCE_INLINE Quaternion - operator*(const tf2Scalar& s) const - { - return Quaternion(x() * s, y() * s, z() * s, m_floats[3] * s); - } - - - /**@brief Return an inversely scaled versionof this quaternion - * @param s The inverse scale factor */ - Quaternion operator/(const tf2Scalar& s) const - { - tf2Assert(s != tf2Scalar(0.0)); - return *this * (tf2Scalar(1.0) / s); - } - - /**@brief Inversely scale this quaternion - * @param s The scale factor */ - Quaternion& operator/=(const tf2Scalar& s) - { - tf2Assert(s != tf2Scalar(0.0)); - return *this *= tf2Scalar(1.0) / s; - } - - /**@brief Return a normalized version of this quaternion */ - Quaternion normalized() const - { - return *this / length(); - } - /**@brief Return the ***half*** angle between this quaternion and the other - * @param q The other quaternion */ - tf2Scalar angle(const Quaternion& q) const - { - tf2Scalar s = tf2Sqrt(length2() * q.length2()); - tf2Assert(s != tf2Scalar(0.0)); - return tf2Acos(dot(q) / s); - } - /**@brief Return the angle between this quaternion and the other along the shortest path - * @param q The other quaternion */ - tf2Scalar angleShortestPath(const Quaternion& q) const - { - tf2Scalar s = tf2Sqrt(length2() * q.length2()); - tf2Assert(s != tf2Scalar(0.0)); - if (dot(q) < 0) // Take care of long angle case see http://en.wikipedia.org/wiki/Slerp - return tf2Acos(dot(-q) / s) * tf2Scalar(2.0); - else - return tf2Acos(dot(q) / s) * tf2Scalar(2.0); - } - /**@brief Return the angle [0, 2Pi] of rotation represented by this quaternion */ - tf2Scalar getAngle() const - { - tf2Scalar s = tf2Scalar(2.) * tf2Acos(m_floats[3]); - return s; - } - - /**@brief Return the angle [0, Pi] of rotation represented by this quaternion along the shortest path */ - tf2Scalar getAngleShortestPath() const - { - tf2Scalar s; - if (m_floats[3] >= 0) - s = tf2Scalar(2.) * tf2Acos(m_floats[3]); - else - s = tf2Scalar(2.) * tf2Acos(-m_floats[3]); - - return s; - } - - /**@brief Return the axis of the rotation represented by this quaternion */ - Vector3 getAxis() const - { - tf2Scalar s_squared = tf2Scalar(1.) - tf2Pow(m_floats[3], tf2Scalar(2.)); - if (s_squared < tf2Scalar(10.) * TF2SIMD_EPSILON) //Check for divide by zero - return Vector3(1.0, 0.0, 0.0); // Arbitrary - tf2Scalar s = tf2Sqrt(s_squared); - return Vector3(m_floats[0] / s, m_floats[1] / s, m_floats[2] / s); - } - - /**@brief Return the inverse of this quaternion */ - Quaternion inverse() const - { - return Quaternion(-m_floats[0], -m_floats[1], -m_floats[2], m_floats[3]); - } - - /**@brief Return the sum of this quaternion and the other - * @param q2 The other quaternion */ - TF2SIMD_FORCE_INLINE Quaternion - operator+(const Quaternion& q2) const - { - const Quaternion& q1 = *this; - return Quaternion(q1.x() + q2.x(), q1.y() + q2.y(), q1.z() + q2.z(), q1.m_floats[3] + q2.m_floats[3]); - } - - /**@brief Return the difference between this quaternion and the other - * @param q2 The other quaternion */ - TF2SIMD_FORCE_INLINE Quaternion - operator-(const Quaternion& q2) const - { - const Quaternion& q1 = *this; - return Quaternion(q1.x() - q2.x(), q1.y() - q2.y(), q1.z() - q2.z(), q1.m_floats[3] - q2.m_floats[3]); - } - - /**@brief Return the negative of this quaternion - * This simply negates each element */ - TF2SIMD_FORCE_INLINE Quaternion operator-() const - { - const Quaternion& q2 = *this; - return Quaternion( - q2.x(), - q2.y(), - q2.z(), - q2.m_floats[3]); - } - /**@todo document this and it's use */ - TF2SIMD_FORCE_INLINE Quaternion farthest( const Quaternion& qd) const - { - Quaternion diff,sum; - diff = *this - qd; - sum = *this + qd; - if( diff.dot(diff) > sum.dot(sum) ) - return qd; - return (-qd); - } - - /**@todo document this and it's use */ - TF2SIMD_FORCE_INLINE Quaternion nearest( const Quaternion& qd) const - { - Quaternion diff,sum; - diff = *this - qd; - sum = *this + qd; - if( diff.dot(diff) < sum.dot(sum) ) - return qd; - return (-qd); - } - - - /**@brief Return the quaternion which is the result of Spherical Linear Interpolation between this and the other quaternion - * @param q The other quaternion to interpolate with - * @param t The ratio between this and q to interpolate. If t = 0 the result is this, if t=1 the result is q. - * Slerp interpolates assuming constant velocity. */ - Quaternion slerp(const Quaternion& q, const tf2Scalar& t) const - { - tf2Scalar theta = angleShortestPath(q) / tf2Scalar(2.0); - if (theta != tf2Scalar(0.0)) - { - tf2Scalar d = tf2Scalar(1.0) / tf2Sin(theta); - tf2Scalar s0 = tf2Sin((tf2Scalar(1.0) - t) * theta); - tf2Scalar s1 = tf2Sin(t * theta); - if (dot(q) < 0) // Take care of long angle case see http://en.wikipedia.org/wiki/Slerp - return Quaternion((m_floats[0] * s0 + -q.x() * s1) * d, - (m_floats[1] * s0 + -q.y() * s1) * d, - (m_floats[2] * s0 + -q.z() * s1) * d, - (m_floats[3] * s0 + -q.m_floats[3] * s1) * d); - else - return Quaternion((m_floats[0] * s0 + q.x() * s1) * d, - (m_floats[1] * s0 + q.y() * s1) * d, - (m_floats[2] * s0 + q.z() * s1) * d, - (m_floats[3] * s0 + q.m_floats[3] * s1) * d); - - } - else - { - return *this; - } - } - - static const Quaternion& getIdentity() - { - static const Quaternion identityQuat(tf2Scalar(0.),tf2Scalar(0.),tf2Scalar(0.),tf2Scalar(1.)); - return identityQuat; - } - - TF2SIMD_FORCE_INLINE const tf2Scalar& getW() const { return m_floats[3]; } - - -}; - - -/**@brief Return the negative of a quaternion */ -TF2SIMD_FORCE_INLINE Quaternion -operator-(const Quaternion& q) -{ - return Quaternion(-q.x(), -q.y(), -q.z(), -q.w()); -} - - - -/**@brief Return the product of two quaternions */ -TF2SIMD_FORCE_INLINE Quaternion -operator*(const Quaternion& q1, const Quaternion& q2) { - return Quaternion(q1.w() * q2.x() + q1.x() * q2.w() + q1.y() * q2.z() - q1.z() * q2.y(), - q1.w() * q2.y() + q1.y() * q2.w() + q1.z() * q2.x() - q1.x() * q2.z(), - q1.w() * q2.z() + q1.z() * q2.w() + q1.x() * q2.y() - q1.y() * q2.x(), - q1.w() * q2.w() - q1.x() * q2.x() - q1.y() * q2.y() - q1.z() * q2.z()); -} - -TF2SIMD_FORCE_INLINE Quaternion -operator*(const Quaternion& q, const Vector3& w) -{ - return Quaternion( q.w() * w.x() + q.y() * w.z() - q.z() * w.y(), - q.w() * w.y() + q.z() * w.x() - q.x() * w.z(), - q.w() * w.z() + q.x() * w.y() - q.y() * w.x(), - -q.x() * w.x() - q.y() * w.y() - q.z() * w.z()); -} - -TF2SIMD_FORCE_INLINE Quaternion -operator*(const Vector3& w, const Quaternion& q) -{ - return Quaternion( w.x() * q.w() + w.y() * q.z() - w.z() * q.y(), - w.y() * q.w() + w.z() * q.x() - w.x() * q.z(), - w.z() * q.w() + w.x() * q.y() - w.y() * q.x(), - -w.x() * q.x() - w.y() * q.y() - w.z() * q.z()); -} - -/**@brief Calculate the dot product between two quaternions */ -TF2SIMD_FORCE_INLINE tf2Scalar -dot(const Quaternion& q1, const Quaternion& q2) -{ - return q1.dot(q2); -} - - -/**@brief Return the length of a quaternion */ -TF2SIMD_FORCE_INLINE tf2Scalar -length(const Quaternion& q) -{ - return q.length(); -} - -/**@brief Return the ***half*** angle between two quaternions*/ -TF2SIMD_FORCE_INLINE tf2Scalar -angle(const Quaternion& q1, const Quaternion& q2) -{ - return q1.angle(q2); -} - -/**@brief Return the shortest angle between two quaternions*/ -TF2SIMD_FORCE_INLINE tf2Scalar -angleShortestPath(const Quaternion& q1, const Quaternion& q2) -{ - return q1.angleShortestPath(q2); -} - -/**@brief Return the inverse of a quaternion*/ -TF2SIMD_FORCE_INLINE Quaternion -inverse(const Quaternion& q) -{ - return q.inverse(); -} - -/**@brief Return the result of spherical linear interpolation betwen two quaternions - * @param q1 The first quaternion - * @param q2 The second quaternion - * @param t The ration between q1 and q2. t = 0 return q1, t=1 returns q2 - * Slerp assumes constant velocity between positions. */ -TF2SIMD_FORCE_INLINE Quaternion -slerp(const Quaternion& q1, const Quaternion& q2, const tf2Scalar& t) -{ - return q1.slerp(q2, t); -} - -TF2SIMD_FORCE_INLINE Vector3 -quatRotate(const Quaternion& rotation, const Vector3& v) -{ - Quaternion q = rotation * v; - q *= rotation.inverse(); - return Vector3(q.getX(),q.getY(),q.getZ()); -} - -TF2SIMD_FORCE_INLINE Quaternion -shortestArcQuat(const Vector3& v0, const Vector3& v1) // Game Programming Gems 2.10. make sure v0,v1 are normalized -{ - Vector3 c = v0.cross(v1); - tf2Scalar d = v0.dot(v1); - - if (d < -1.0 + TF2SIMD_EPSILON) - { - Vector3 n,unused; - tf2PlaneSpace1(v0,n,unused); - return Quaternion(n.x(),n.y(),n.z(),0.0f); // just pick any vector that is orthogonal to v0 - } - - tf2Scalar s = tf2Sqrt((1.0f + d) * 2.0f); - tf2Scalar rs = 1.0f / s; - - return Quaternion(c.getX()*rs,c.getY()*rs,c.getZ()*rs,s * 0.5f); -} - -TF2SIMD_FORCE_INLINE Quaternion -shortestArcQuatNormalize2(Vector3& v0,Vector3& v1) -{ - v0.normalize(); - v1.normalize(); - return shortestArcQuat(v0,v1); -} - -} -#endif - - - - diff --git a/src/geometry2/tf2/include/tf2/LinearMath/Scalar.h b/src/geometry2/tf2/include/tf2/LinearMath/Scalar.h deleted file mode 100644 index 39c1477..0000000 --- a/src/geometry2/tf2/include/tf2/LinearMath/Scalar.h +++ /dev/null @@ -1,417 +0,0 @@ -/* -Copyright (c) 2003-2009 Erwin Coumans http://bullet.googlecode.com - -This software is provided 'as-is', without any express or implied warranty. -In no event will the authors be held liable for any damages arising from the use of this software. -Permission is granted to anyone to use this software for any purpose, -including commercial applications, and to alter it and redistribute it freely, -subject to the following restrictions: - -1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. -2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. -3. This notice may not be removed or altered from any source distribution. -*/ - - - -#ifndef TF2_SCALAR_H -#define TF2_SCALAR_H - -#ifdef TF2_MANAGED_CODE -//Aligned data types not supported in managed code -#pragma unmanaged -#endif - - -#include -#include //size_t for MSVC 6.0 -#include -#include -#include - -#if defined(DEBUG) || defined (_DEBUG) -#define TF2_DEBUG -#endif - - -#ifdef _WIN32 - - #if defined(__MINGW32__) || defined(__CYGWIN__) || (defined (_MSC_VER) && _MSC_VER < 1300) - - #define TF2SIMD_FORCE_INLINE inline - #define ATTRIBUTE_ALIGNED16(a) a - #define ATTRIBUTE_ALIGNED64(a) a - #define ATTRIBUTE_ALIGNED128(a) a - #else - //#define TF2_HAS_ALIGNED_ALLOCATOR - #pragma warning(disable : 4324) // disable padding warning -// #pragma warning(disable:4530) // Disable the exception disable but used in MSCV Stl warning. -// #pragma warning(disable:4996) //Turn off warnings about deprecated C routines -// #pragma warning(disable:4786) // Disable the "debug name too long" warning - - #define TF2SIMD_FORCE_INLINE __forceinline - #define ATTRIBUTE_ALIGNED16(a) __declspec(align(16)) a - #define ATTRIBUTE_ALIGNED64(a) __declspec(align(64)) a - #define ATTRIBUTE_ALIGNED128(a) __declspec (align(128)) a - #ifdef _XBOX - #define TF2_USE_VMX128 - - #include - #define TF2_HAVE_NATIVE_FSEL - #define tf2Fsel(a,b,c) __fsel((a),(b),(c)) - #else - - - #endif//_XBOX - - #endif //__MINGW32__ - - #include -#ifdef TF2_DEBUG - #define tf2Assert assert -#else - #define tf2Assert(x) -#endif - //tf2FullAssert is optional, slows down a lot - #define tf2FullAssert(x) - - #define tf2Likely(_c) _c - #define tf2Unlikely(_c) _c - -#else - -#if defined (__CELLOS_LV2__) - #define TF2SIMD_FORCE_INLINE inline - #define ATTRIBUTE_ALIGNED16(a) a __attribute__ ((aligned (16))) - #define ATTRIBUTE_ALIGNED64(a) a __attribute__ ((aligned (64))) - #define ATTRIBUTE_ALIGNED128(a) a __attribute__ ((aligned (128))) - #ifndef assert - #include - #endif -#ifdef TF2_DEBUG - #define tf2Assert assert -#else - #define tf2Assert(x) -#endif - //tf2FullAssert is optional, slows down a lot - #define tf2FullAssert(x) - - #define tf2Likely(_c) _c - #define tf2Unlikely(_c) _c - -#else - -#ifdef USE_LIBSPE2 - - #define TF2SIMD_FORCE_INLINE __inline - #define ATTRIBUTE_ALIGNED16(a) a __attribute__ ((aligned (16))) - #define ATTRIBUTE_ALIGNED64(a) a __attribute__ ((aligned (64))) - #define ATTRIBUTE_ALIGNED128(a) a __attribute__ ((aligned (128))) - #ifndef assert - #include - #endif -#ifdef TF2_DEBUG - #define tf2Assert assert -#else - #define tf2Assert(x) -#endif - //tf2FullAssert is optional, slows down a lot - #define tf2FullAssert(x) - - - #define tf2Likely(_c) __builtin_expect((_c), 1) - #define tf2Unlikely(_c) __builtin_expect((_c), 0) - - -#else - //non-windows systems - - #define TF2SIMD_FORCE_INLINE inline - ///@todo: check out alignment methods for other platforms/compilers - ///#define ATTRIBUTE_ALIGNED16(a) a __attribute__ ((aligned (16))) - ///#define ATTRIBUTE_ALIGNED64(a) a __attribute__ ((aligned (64))) - ///#define ATTRIBUTE_ALIGNED128(a) a __attribute__ ((aligned (128))) - #define ATTRIBUTE_ALIGNED16(a) a - #define ATTRIBUTE_ALIGNED64(a) a - #define ATTRIBUTE_ALIGNED128(a) a - #ifndef assert - #include - #endif - -#if defined(DEBUG) || defined (_DEBUG) - #define tf2Assert assert -#else - #define tf2Assert(x) -#endif - - //tf2FullAssert is optional, slows down a lot - #define tf2FullAssert(x) - #define tf2Likely(_c) _c - #define tf2Unlikely(_c) _c - -#endif // LIBSPE2 - -#endif //__CELLOS_LV2__ -#endif - - -///The tf2Scalar type abstracts floating point numbers, to easily switch between double and single floating point precision. -typedef double tf2Scalar; -//this number could be bigger in double precision -#define TF2_LARGE_FLOAT 1e30 - - -#define TF2_DECLARE_ALIGNED_ALLOCATOR() \ - TF2SIMD_FORCE_INLINE void* operator new(size_t sizeInBytes) { return tf2AlignedAlloc(sizeInBytes,16); } \ - TF2SIMD_FORCE_INLINE void operator delete(void* ptr) { tf2AlignedFree(ptr); } \ - TF2SIMD_FORCE_INLINE void* operator new(size_t, void* ptr) { return ptr; } \ - TF2SIMD_FORCE_INLINE void operator delete(void*, void*) { } \ - TF2SIMD_FORCE_INLINE void* operator new[](size_t sizeInBytes) { return tf2AlignedAlloc(sizeInBytes,16); } \ - TF2SIMD_FORCE_INLINE void operator delete[](void* ptr) { tf2AlignedFree(ptr); } \ - TF2SIMD_FORCE_INLINE void* operator new[](size_t, void* ptr) { return ptr; } \ - TF2SIMD_FORCE_INLINE void operator delete[](void*, void*) { } \ - - - - -TF2SIMD_FORCE_INLINE tf2Scalar tf2Sqrt(tf2Scalar x) { return sqrt(x); } -TF2SIMD_FORCE_INLINE tf2Scalar tf2Fabs(tf2Scalar x) { return fabs(x); } -TF2SIMD_FORCE_INLINE tf2Scalar tf2Cos(tf2Scalar x) { return cos(x); } -TF2SIMD_FORCE_INLINE tf2Scalar tf2Sin(tf2Scalar x) { return sin(x); } -TF2SIMD_FORCE_INLINE tf2Scalar tf2Tan(tf2Scalar x) { return tan(x); } -TF2SIMD_FORCE_INLINE tf2Scalar tf2Acos(tf2Scalar x) { if (xtf2Scalar(1)) x=tf2Scalar(1); return acos(x); } -TF2SIMD_FORCE_INLINE tf2Scalar tf2Asin(tf2Scalar x) { if (xtf2Scalar(1)) x=tf2Scalar(1); return asin(x); } -TF2SIMD_FORCE_INLINE tf2Scalar tf2Atan(tf2Scalar x) { return atan(x); } -TF2SIMD_FORCE_INLINE tf2Scalar tf2Atan2(tf2Scalar x, tf2Scalar y) { return atan2(x, y); } -TF2SIMD_FORCE_INLINE tf2Scalar tf2Exp(tf2Scalar x) { return exp(x); } -TF2SIMD_FORCE_INLINE tf2Scalar tf2Log(tf2Scalar x) { return log(x); } -TF2SIMD_FORCE_INLINE tf2Scalar tf2Pow(tf2Scalar x,tf2Scalar y) { return pow(x,y); } -TF2SIMD_FORCE_INLINE tf2Scalar tf2Fmod(tf2Scalar x,tf2Scalar y) { return fmod(x,y); } - - -#define TF2SIMD_2_PI tf2Scalar(6.283185307179586232) -#define TF2SIMD_PI (TF2SIMD_2_PI * tf2Scalar(0.5)) -#define TF2SIMD_HALF_PI (TF2SIMD_2_PI * tf2Scalar(0.25)) -#define TF2SIMD_RADS_PER_DEG (TF2SIMD_2_PI / tf2Scalar(360.0)) -#define TF2SIMD_DEGS_PER_RAD (tf2Scalar(360.0) / TF2SIMD_2_PI) -#define TF2SIMDSQRT12 tf2Scalar(0.7071067811865475244008443621048490) - -#define tf2RecipSqrt(x) ((tf2Scalar)(tf2Scalar(1.0)/tf2Sqrt(tf2Scalar(x)))) /* reciprocal square root */ - - -#define TF2SIMD_EPSILON DBL_EPSILON -#define TF2SIMD_INFINITY DBL_MAX - -TF2SIMD_FORCE_INLINE tf2Scalar tf2Atan2Fast(tf2Scalar y, tf2Scalar x) -{ - tf2Scalar coeff_1 = TF2SIMD_PI / 4.0f; - tf2Scalar coeff_2 = 3.0f * coeff_1; - tf2Scalar abs_y = tf2Fabs(y); - tf2Scalar angle; - if (x >= 0.0f) { - tf2Scalar r = (x - abs_y) / (x + abs_y); - angle = coeff_1 - coeff_1 * r; - } else { - tf2Scalar r = (x + abs_y) / (abs_y - x); - angle = coeff_2 - coeff_1 * r; - } - return (y < 0.0f) ? -angle : angle; -} - -TF2SIMD_FORCE_INLINE bool tf2FuzzyZero(tf2Scalar x) { return tf2Fabs(x) < TF2SIMD_EPSILON; } - -TF2SIMD_FORCE_INLINE bool tf2Equal(tf2Scalar a, tf2Scalar eps) { - return (((a) <= eps) && !((a) < -eps)); -} -TF2SIMD_FORCE_INLINE bool tf2GreaterEqual (tf2Scalar a, tf2Scalar eps) { - return (!((a) <= eps)); -} - - -TF2SIMD_FORCE_INLINE int tf2IsNegative(tf2Scalar x) { - return x < tf2Scalar(0.0) ? 1 : 0; -} - -TF2SIMD_FORCE_INLINE tf2Scalar tf2Radians(tf2Scalar x) { return x * TF2SIMD_RADS_PER_DEG; } -TF2SIMD_FORCE_INLINE tf2Scalar tf2Degrees(tf2Scalar x) { return x * TF2SIMD_DEGS_PER_RAD; } - -#define TF2_DECLARE_HANDLE(name) typedef struct name##__ { int unused; } *name - -#ifndef tf2Fsel -TF2SIMD_FORCE_INLINE tf2Scalar tf2Fsel(tf2Scalar a, tf2Scalar b, tf2Scalar c) -{ - return a >= 0 ? b : c; -} -#endif -#define tf2Fsels(a,b,c) (tf2Scalar)tf2Fsel(a,b,c) - - -TF2SIMD_FORCE_INLINE bool tf2MachineIsLittleEndian() -{ - long int i = 1; - const char *p = (const char *) &i; - if (p[0] == 1) // Lowest address contains the least significant byte - return true; - else - return false; -} - - - -///tf2Select avoids branches, which makes performance much better for consoles like Playstation 3 and XBox 360 -///Thanks Phil Knight. See also http://www.cellperformance.com/articles/2006/04/more_techniques_for_eliminatin_1.html -TF2SIMD_FORCE_INLINE unsigned tf2Select(unsigned condition, unsigned valueIfConditionNonZero, unsigned valueIfConditionZero) -{ - // Set testNz to 0xFFFFFFFF if condition is nonzero, 0x00000000 if condition is zero - // Rely on positive value or'ed with its negative having sign bit on - // and zero value or'ed with its negative (which is still zero) having sign bit off - // Use arithmetic shift right, shifting the sign bit through all 32 bits - unsigned testNz = (unsigned)(((int)condition | -(int)condition) >> 31); - unsigned testEqz = ~testNz; - return ((valueIfConditionNonZero & testNz) | (valueIfConditionZero & testEqz)); -} -TF2SIMD_FORCE_INLINE int tf2Select(unsigned condition, int valueIfConditionNonZero, int valueIfConditionZero) -{ - unsigned testNz = (unsigned)(((int)condition | -(int)condition) >> 31); - unsigned testEqz = ~testNz; - return static_cast((valueIfConditionNonZero & testNz) | (valueIfConditionZero & testEqz)); -} -TF2SIMD_FORCE_INLINE float tf2Select(unsigned condition, float valueIfConditionNonZero, float valueIfConditionZero) -{ -#ifdef TF2_HAVE_NATIVE_FSEL - return (float)tf2Fsel((tf2Scalar)condition - tf2Scalar(1.0f), valueIfConditionNonZero, valueIfConditionZero); -#else - return (condition != 0) ? valueIfConditionNonZero : valueIfConditionZero; -#endif -} - -template TF2SIMD_FORCE_INLINE void tf2Swap(T& a, T& b) -{ - T tmp = a; - a = b; - b = tmp; -} - - -//PCK: endian swapping functions -TF2SIMD_FORCE_INLINE unsigned tf2SwapEndian(unsigned val) -{ - return (((val & 0xff000000) >> 24) | ((val & 0x00ff0000) >> 8) | ((val & 0x0000ff00) << 8) | ((val & 0x000000ff) << 24)); -} - -TF2SIMD_FORCE_INLINE unsigned short tf2SwapEndian(unsigned short val) -{ - return static_cast(((val & 0xff00) >> 8) | ((val & 0x00ff) << 8)); -} - -TF2SIMD_FORCE_INLINE unsigned tf2SwapEndian(int val) -{ - return tf2SwapEndian((unsigned)val); -} - -TF2SIMD_FORCE_INLINE unsigned short tf2SwapEndian(short val) -{ - return tf2SwapEndian((unsigned short) val); -} - -///tf2SwapFloat uses using char pointers to swap the endianness -////tf2SwapFloat/tf2SwapDouble will NOT return a float, because the machine might 'correct' invalid floating point values -///Not all values of sign/exponent/mantissa are valid floating point numbers according to IEEE 754. -///When a floating point unit is faced with an invalid value, it may actually change the value, or worse, throw an exception. -///In most systems, running user mode code, you wouldn't get an exception, but instead the hardware/os/runtime will 'fix' the number for you. -///so instead of returning a float/double, we return integer/long long integer -TF2SIMD_FORCE_INLINE unsigned int tf2SwapEndianFloat(float d) -{ - unsigned int a = 0; - unsigned char *dst = (unsigned char *)&a; - unsigned char *src = (unsigned char *)&d; - - dst[0] = src[3]; - dst[1] = src[2]; - dst[2] = src[1]; - dst[3] = src[0]; - return a; -} - -// unswap using char pointers -TF2SIMD_FORCE_INLINE float tf2UnswapEndianFloat(unsigned int a) -{ - float d = 0.0f; - unsigned char *src = (unsigned char *)&a; - unsigned char *dst = (unsigned char *)&d; - - dst[0] = src[3]; - dst[1] = src[2]; - dst[2] = src[1]; - dst[3] = src[0]; - - return d; -} - - -// swap using char pointers -TF2SIMD_FORCE_INLINE void tf2SwapEndianDouble(double d, unsigned char* dst) -{ - unsigned char *src = (unsigned char *)&d; - - dst[0] = src[7]; - dst[1] = src[6]; - dst[2] = src[5]; - dst[3] = src[4]; - dst[4] = src[3]; - dst[5] = src[2]; - dst[6] = src[1]; - dst[7] = src[0]; - -} - -// unswap using char pointers -TF2SIMD_FORCE_INLINE double tf2UnswapEndianDouble(const unsigned char *src) -{ - double d = 0.0; - unsigned char *dst = (unsigned char *)&d; - - dst[0] = src[7]; - dst[1] = src[6]; - dst[2] = src[5]; - dst[3] = src[4]; - dst[4] = src[3]; - dst[5] = src[2]; - dst[6] = src[1]; - dst[7] = src[0]; - - return d; -} - -// returns normalized value in range [-TF2SIMD_PI, TF2SIMD_PI] -TF2SIMD_FORCE_INLINE tf2Scalar tf2NormalizeAngle(tf2Scalar angleInRadians) -{ - angleInRadians = tf2Fmod(angleInRadians, TF2SIMD_2_PI); - if(angleInRadians < -TF2SIMD_PI) - { - return angleInRadians + TF2SIMD_2_PI; - } - else if(angleInRadians > TF2SIMD_PI) - { - return angleInRadians - TF2SIMD_2_PI; - } - else - { - return angleInRadians; - } -} - -///rudimentary class to provide type info -struct tf2TypedObject -{ - tf2TypedObject(int objectType) - :m_objectType(objectType) - { - } - int m_objectType; - inline int getObjectType() const - { - return m_objectType; - } -}; -#endif //TF2SIMD___SCALAR_H diff --git a/src/geometry2/tf2/include/tf2/LinearMath/Transform.h b/src/geometry2/tf2/include/tf2/LinearMath/Transform.h deleted file mode 100644 index 19cc68f..0000000 --- a/src/geometry2/tf2/include/tf2/LinearMath/Transform.h +++ /dev/null @@ -1,305 +0,0 @@ -/* -Copyright (c) 2003-2006 Gino van den Bergen / Erwin Coumans http://continuousphysics.com/Bullet/ - -This software is provided 'as-is', without any express or implied warranty. -In no event will the authors be held liable for any damages arising from the use of this software. -Permission is granted to anyone to use this software for any purpose, -including commercial applications, and to alter it and redistribute it freely, -subject to the following restrictions: - -1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. -2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. -3. This notice may not be removed or altered from any source distribution. -*/ - - - -#ifndef tf2_Transform_H -#define tf2_Transform_H - - -#include "Matrix3x3.h" - - -namespace tf2 -{ - -#define TransformData TransformDoubleData - - -/**@brief The Transform class supports rigid transforms with only translation and rotation and no scaling/shear. - *It can be used in combination with Vector3, Quaternion and Matrix3x3 linear algebra classes. */ -class Transform { - - ///Storage for the rotation - Matrix3x3 m_basis; - ///Storage for the translation - Vector3 m_origin; - -public: - - /**@brief No initialization constructor */ - Transform() {} - /**@brief Constructor from Quaternion (optional Vector3 ) - * @param q Rotation from quaternion - * @param c Translation from Vector (default 0,0,0) */ - explicit TF2SIMD_FORCE_INLINE Transform(const Quaternion& q, - const Vector3& c = Vector3(tf2Scalar(0), tf2Scalar(0), tf2Scalar(0))) - : m_basis(q), - m_origin(c) - {} - - /**@brief Constructor from Matrix3x3 (optional Vector3) - * @param b Rotation from Matrix - * @param c Translation from Vector default (0,0,0)*/ - explicit TF2SIMD_FORCE_INLINE Transform(const Matrix3x3& b, - const Vector3& c = Vector3(tf2Scalar(0), tf2Scalar(0), tf2Scalar(0))) - : m_basis(b), - m_origin(c) - {} - /**@brief Copy constructor */ - TF2SIMD_FORCE_INLINE Transform (const Transform& other) - : m_basis(other.m_basis), - m_origin(other.m_origin) - { - } - /**@brief Assignment Operator */ - TF2SIMD_FORCE_INLINE Transform& operator=(const Transform& other) - { - m_basis = other.m_basis; - m_origin = other.m_origin; - return *this; - } - - /**@brief Set the current transform as the value of the product of two transforms - * @param t1 Transform 1 - * @param t2 Transform 2 - * This = Transform1 * Transform2 */ - TF2SIMD_FORCE_INLINE void mult(const Transform& t1, const Transform& t2) { - m_basis = t1.m_basis * t2.m_basis; - m_origin = t1(t2.m_origin); - } - -/* void multInverseLeft(const Transform& t1, const Transform& t2) { - Vector3 v = t2.m_origin - t1.m_origin; - m_basis = tf2MultTransposeLeft(t1.m_basis, t2.m_basis); - m_origin = v * t1.m_basis; - } - */ - -/**@brief Return the transform of the vector */ - TF2SIMD_FORCE_INLINE Vector3 operator()(const Vector3& x) const - { - return Vector3(m_basis[0].dot(x) + m_origin.x(), - m_basis[1].dot(x) + m_origin.y(), - m_basis[2].dot(x) + m_origin.z()); - } - - /**@brief Return the transform of the vector */ - TF2SIMD_FORCE_INLINE Vector3 operator*(const Vector3& x) const - { - return (*this)(x); - } - - /**@brief Return the transform of the Quaternion */ - TF2SIMD_FORCE_INLINE Quaternion operator*(const Quaternion& q) const - { - return getRotation() * q; - } - - /**@brief Return the basis matrix for the rotation */ - TF2SIMD_FORCE_INLINE Matrix3x3& getBasis() { return m_basis; } - /**@brief Return the basis matrix for the rotation */ - TF2SIMD_FORCE_INLINE const Matrix3x3& getBasis() const { return m_basis; } - - /**@brief Return the origin vector translation */ - TF2SIMD_FORCE_INLINE Vector3& getOrigin() { return m_origin; } - /**@brief Return the origin vector translation */ - TF2SIMD_FORCE_INLINE const Vector3& getOrigin() const { return m_origin; } - - /**@brief Return a quaternion representing the rotation */ - Quaternion getRotation() const { - Quaternion q; - m_basis.getRotation(q); - return q; - } - - - /**@brief Set from an array - * @param m A pointer to a 15 element array (12 rotation(row major padded on the right by 1), and 3 translation */ - void setFromOpenGLMatrix(const tf2Scalar *m) - { - m_basis.setFromOpenGLSubMatrix(m); - m_origin.setValue(m[12],m[13],m[14]); - } - - /**@brief Fill an array representation - * @param m A pointer to a 15 element array (12 rotation(row major padded on the right by 1), and 3 translation */ - void getOpenGLMatrix(tf2Scalar *m) const - { - m_basis.getOpenGLSubMatrix(m); - m[12] = m_origin.x(); - m[13] = m_origin.y(); - m[14] = m_origin.z(); - m[15] = tf2Scalar(1.0); - } - - /**@brief Set the translational element - * @param origin The vector to set the translation to */ - TF2SIMD_FORCE_INLINE void setOrigin(const Vector3& origin) - { - m_origin = origin; - } - - TF2SIMD_FORCE_INLINE Vector3 invXform(const Vector3& inVec) const; - - - /**@brief Set the rotational element by Matrix3x3 */ - TF2SIMD_FORCE_INLINE void setBasis(const Matrix3x3& basis) - { - m_basis = basis; - } - - /**@brief Set the rotational element by Quaternion */ - TF2SIMD_FORCE_INLINE void setRotation(const Quaternion& q) - { - m_basis.setRotation(q); - } - - - /**@brief Set this transformation to the identity */ - void setIdentity() - { - m_basis.setIdentity(); - m_origin.setValue(tf2Scalar(0.0), tf2Scalar(0.0), tf2Scalar(0.0)); - } - - /**@brief Multiply this Transform by another(this = this * another) - * @param t The other transform */ - Transform& operator*=(const Transform& t) - { - m_origin += m_basis * t.m_origin; - m_basis *= t.m_basis; - return *this; - } - - /**@brief Return the inverse of this transform */ - Transform inverse() const - { - Matrix3x3 inv = m_basis.transpose(); - return Transform(inv, inv * -m_origin); - } - - /**@brief Return the inverse of this transform times the other transform - * @param t The other transform - * return this.inverse() * the other */ - Transform inverseTimes(const Transform& t) const; - - /**@brief Return the product of this transform and the other */ - Transform operator*(const Transform& t) const; - - /**@brief Return an identity transform */ - static const Transform& getIdentity() - { - static const Transform identityTransform(Matrix3x3::getIdentity()); - return identityTransform; - } - - void serialize(struct TransformData& dataOut) const; - - void serializeFloat(struct TransformFloatData& dataOut) const; - - void deSerialize(const struct TransformData& dataIn); - - void deSerializeDouble(const struct TransformDoubleData& dataIn); - - void deSerializeFloat(const struct TransformFloatData& dataIn); - -}; - - -TF2SIMD_FORCE_INLINE Vector3 -Transform::invXform(const Vector3& inVec) const -{ - Vector3 v = inVec - m_origin; - return (m_basis.transpose() * v); -} - -TF2SIMD_FORCE_INLINE Transform -Transform::inverseTimes(const Transform& t) const -{ - Vector3 v = t.getOrigin() - m_origin; - return Transform(m_basis.transposeTimes(t.m_basis), - v * m_basis); -} - -TF2SIMD_FORCE_INLINE Transform -Transform::operator*(const Transform& t) const -{ - return Transform(m_basis * t.m_basis, - (*this)(t.m_origin)); -} - -/**@brief Test if two transforms have all elements equal */ -TF2SIMD_FORCE_INLINE bool operator==(const Transform& t1, const Transform& t2) -{ - return ( t1.getBasis() == t2.getBasis() && - t1.getOrigin() == t2.getOrigin() ); -} - - -///for serialization -struct TransformFloatData -{ - Matrix3x3FloatData m_basis; - Vector3FloatData m_origin; -}; - -struct TransformDoubleData -{ - Matrix3x3DoubleData m_basis; - Vector3DoubleData m_origin; -}; - - - -TF2SIMD_FORCE_INLINE void Transform::serialize(TransformData& dataOut) const -{ - m_basis.serialize(dataOut.m_basis); - m_origin.serialize(dataOut.m_origin); -} - -TF2SIMD_FORCE_INLINE void Transform::serializeFloat(TransformFloatData& dataOut) const -{ - m_basis.serializeFloat(dataOut.m_basis); - m_origin.serializeFloat(dataOut.m_origin); -} - - -TF2SIMD_FORCE_INLINE void Transform::deSerialize(const TransformData& dataIn) -{ - m_basis.deSerialize(dataIn.m_basis); - m_origin.deSerialize(dataIn.m_origin); -} - -TF2SIMD_FORCE_INLINE void Transform::deSerializeFloat(const TransformFloatData& dataIn) -{ - m_basis.deSerializeFloat(dataIn.m_basis); - m_origin.deSerializeFloat(dataIn.m_origin); -} - -TF2SIMD_FORCE_INLINE void Transform::deSerializeDouble(const TransformDoubleData& dataIn) -{ - m_basis.deSerializeDouble(dataIn.m_basis); - m_origin.deSerializeDouble(dataIn.m_origin); -} - -} - -#endif - - - - - - diff --git a/src/geometry2/tf2/include/tf2/LinearMath/Vector3.h b/src/geometry2/tf2/include/tf2/LinearMath/Vector3.h deleted file mode 100644 index 96650cd..0000000 --- a/src/geometry2/tf2/include/tf2/LinearMath/Vector3.h +++ /dev/null @@ -1,731 +0,0 @@ -/* -Copyright (c) 2003-2006 Gino van den Bergen / Erwin Coumans http://continuousphysics.com/Bullet/ - -This software is provided 'as-is', without any express or implied warranty. -In no event will the authors be held liable for any damages arising from the use of this software. -Permission is granted to anyone to use this software for any purpose, -including commercial applications, and to alter it and redistribute it freely, -subject to the following restrictions: - -1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. -2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. -3. This notice may not be removed or altered from any source distribution. -*/ - - - -#ifndef TF2_VECTOR3_H -#define TF2_VECTOR3_H - - -#include "Scalar.h" -#include "MinMax.h" - -namespace tf2 -{ - -#define Vector3Data Vector3DoubleData -#define Vector3DataName "Vector3DoubleData" - - - - -/**@brief tf2::Vector3 can be used to represent 3D points and vectors. - * It has an un-used w component to suit 16-byte alignment when tf2::Vector3 is stored in containers. This extra component can be used by derived classes (Quaternion?) or by user - * Ideally, this class should be replaced by a platform optimized TF2SIMD version that keeps the data in registers - */ -ATTRIBUTE_ALIGNED16(class) Vector3 -{ -public: - -#if defined (__SPU__) && defined (__CELLOS_LV2__) - tf2Scalar m_floats[4]; -public: - TF2SIMD_FORCE_INLINE const vec_float4& get128() const - { - return *((const vec_float4*)&m_floats[0]); - } -public: -#else //__CELLOS_LV2__ __SPU__ -#ifdef TF2_USE_SSE // _WIN32 - union { - __m128 mVec128; - tf2Scalar m_floats[4]; - }; - TF2SIMD_FORCE_INLINE __m128 get128() const - { - return mVec128; - } - TF2SIMD_FORCE_INLINE void set128(__m128 v128) - { - mVec128 = v128; - } -#else - tf2Scalar m_floats[4]; -#endif -#endif //__CELLOS_LV2__ __SPU__ - - public: - - /**@brief No initialization constructor */ - TF2SIMD_FORCE_INLINE Vector3() {} - - - - /**@brief Constructor from scalars - * @param x X value - * @param y Y value - * @param z Z value - */ - TF2SIMD_FORCE_INLINE Vector3(const tf2Scalar& x, const tf2Scalar& y, const tf2Scalar& z) - { - m_floats[0] = x; - m_floats[1] = y; - m_floats[2] = z; - m_floats[3] = tf2Scalar(0.); - } - -/**@brief Add a vector to this one - * @param The vector to add to this one */ - TF2SIMD_FORCE_INLINE Vector3& operator+=(const Vector3& v) - { - - m_floats[0] += v.m_floats[0]; m_floats[1] += v.m_floats[1];m_floats[2] += v.m_floats[2]; - return *this; - } - - - /**@brief Sutf2ract a vector from this one - * @param The vector to sutf2ract */ - TF2SIMD_FORCE_INLINE Vector3& operator-=(const Vector3& v) - { - m_floats[0] -= v.m_floats[0]; m_floats[1] -= v.m_floats[1];m_floats[2] -= v.m_floats[2]; - return *this; - } - /**@brief Scale the vector - * @param s Scale factor */ - TF2SIMD_FORCE_INLINE Vector3& operator*=(const tf2Scalar& s) - { - m_floats[0] *= s; m_floats[1] *= s;m_floats[2] *= s; - return *this; - } - - /**@brief Inversely scale the vector - * @param s Scale factor to divide by */ - TF2SIMD_FORCE_INLINE Vector3& operator/=(const tf2Scalar& s) - { - tf2FullAssert(s != tf2Scalar(0.0)); - return *this *= tf2Scalar(1.0) / s; - } - - /**@brief Return the dot product - * @param v The other vector in the dot product */ - TF2SIMD_FORCE_INLINE tf2Scalar dot(const Vector3& v) const - { - return m_floats[0] * v.m_floats[0] + m_floats[1] * v.m_floats[1] +m_floats[2] * v.m_floats[2]; - } - - /**@brief Return the length of the vector squared */ - TF2SIMD_FORCE_INLINE tf2Scalar length2() const - { - return dot(*this); - } - - /**@brief Return the length of the vector */ - TF2SIMD_FORCE_INLINE tf2Scalar length() const - { - return tf2Sqrt(length2()); - } - - /**@brief Return the distance squared between the ends of this and another vector - * This is symantically treating the vector like a point */ - TF2SIMD_FORCE_INLINE tf2Scalar distance2(const Vector3& v) const; - - /**@brief Return the distance between the ends of this and another vector - * This is symantically treating the vector like a point */ - TF2SIMD_FORCE_INLINE tf2Scalar distance(const Vector3& v) const; - - /**@brief Normalize this vector - * x^2 + y^2 + z^2 = 1 */ - TF2SIMD_FORCE_INLINE Vector3& normalize() - { - return *this /= length(); - } - - /**@brief Return a normalized version of this vector */ - TF2SIMD_FORCE_INLINE Vector3 normalized() const; - - /**@brief Rotate this vector - * @param wAxis The axis to rotate about - * @param angle The angle to rotate by */ - TF2SIMD_FORCE_INLINE Vector3 rotate( const Vector3& wAxis, const tf2Scalar angle ) const; - - /**@brief Return the angle between this and another vector - * @param v The other vector */ - TF2SIMD_FORCE_INLINE tf2Scalar angle(const Vector3& v) const - { - tf2Scalar s = tf2Sqrt(length2() * v.length2()); - tf2FullAssert(s != tf2Scalar(0.0)); - return tf2Acos(dot(v) / s); - } - /**@brief Return a vector will the absolute values of each element */ - TF2SIMD_FORCE_INLINE Vector3 absolute() const - { - return Vector3( - tf2Fabs(m_floats[0]), - tf2Fabs(m_floats[1]), - tf2Fabs(m_floats[2])); - } - /**@brief Return the cross product between this and another vector - * @param v The other vector */ - TF2SIMD_FORCE_INLINE Vector3 cross(const Vector3& v) const - { - return Vector3( - m_floats[1] * v.m_floats[2] -m_floats[2] * v.m_floats[1], - m_floats[2] * v.m_floats[0] - m_floats[0] * v.m_floats[2], - m_floats[0] * v.m_floats[1] - m_floats[1] * v.m_floats[0]); - } - - TF2SIMD_FORCE_INLINE tf2Scalar triple(const Vector3& v1, const Vector3& v2) const - { - return m_floats[0] * (v1.m_floats[1] * v2.m_floats[2] - v1.m_floats[2] * v2.m_floats[1]) + - m_floats[1] * (v1.m_floats[2] * v2.m_floats[0] - v1.m_floats[0] * v2.m_floats[2]) + - m_floats[2] * (v1.m_floats[0] * v2.m_floats[1] - v1.m_floats[1] * v2.m_floats[0]); - } - - /**@brief Return the axis with the smallest value - * Note return values are 0,1,2 for x, y, or z */ - TF2SIMD_FORCE_INLINE int minAxis() const - { - return m_floats[0] < m_floats[1] ? (m_floats[0] return this, t=1 => return other) */ - TF2SIMD_FORCE_INLINE Vector3 lerp(const Vector3& v, const tf2Scalar& t) const - { - return Vector3(m_floats[0] + (v.m_floats[0] - m_floats[0]) * t, - m_floats[1] + (v.m_floats[1] - m_floats[1]) * t, - m_floats[2] + (v.m_floats[2] -m_floats[2]) * t); - } - - /**@brief Elementwise multiply this vector by the other - * @param v The other vector */ - TF2SIMD_FORCE_INLINE Vector3& operator*=(const Vector3& v) - { - m_floats[0] *= v.m_floats[0]; m_floats[1] *= v.m_floats[1];m_floats[2] *= v.m_floats[2]; - return *this; - } - - /**@brief Return the x value */ - TF2SIMD_FORCE_INLINE const tf2Scalar& getX() const { return m_floats[0]; } - /**@brief Return the y value */ - TF2SIMD_FORCE_INLINE const tf2Scalar& getY() const { return m_floats[1]; } - /**@brief Return the z value */ - TF2SIMD_FORCE_INLINE const tf2Scalar& getZ() const { return m_floats[2]; } - /**@brief Set the x value */ - TF2SIMD_FORCE_INLINE void setX(tf2Scalar x) { m_floats[0] = x;}; - /**@brief Set the y value */ - TF2SIMD_FORCE_INLINE void setY(tf2Scalar y) { m_floats[1] = y;}; - /**@brief Set the z value */ - TF2SIMD_FORCE_INLINE void setZ(tf2Scalar z) {m_floats[2] = z;}; - /**@brief Set the w value */ - TF2SIMD_FORCE_INLINE void setW(tf2Scalar w) { m_floats[3] = w;}; - /**@brief Return the x value */ - TF2SIMD_FORCE_INLINE const tf2Scalar& x() const { return m_floats[0]; } - /**@brief Return the y value */ - TF2SIMD_FORCE_INLINE const tf2Scalar& y() const { return m_floats[1]; } - /**@brief Return the z value */ - TF2SIMD_FORCE_INLINE const tf2Scalar& z() const { return m_floats[2]; } - /**@brief Return the w value */ - TF2SIMD_FORCE_INLINE const tf2Scalar& w() const { return m_floats[3]; } - - //TF2SIMD_FORCE_INLINE tf2Scalar& operator[](int i) { return (&m_floats[0])[i]; } - //TF2SIMD_FORCE_INLINE const tf2Scalar& operator[](int i) const { return (&m_floats[0])[i]; } - ///operator tf2Scalar*() replaces operator[], using implicit conversion. We added operator != and operator == to avoid pointer comparisons. - TF2SIMD_FORCE_INLINE operator tf2Scalar *() { return &m_floats[0]; } - TF2SIMD_FORCE_INLINE operator const tf2Scalar *() const { return &m_floats[0]; } - - TF2SIMD_FORCE_INLINE bool operator==(const Vector3& other) const - { - return ((m_floats[3]==other.m_floats[3]) && (m_floats[2]==other.m_floats[2]) && (m_floats[1]==other.m_floats[1]) && (m_floats[0]==other.m_floats[0])); - } - - TF2SIMD_FORCE_INLINE bool operator!=(const Vector3& other) const - { - return !(*this == other); - } - - /**@brief Set each element to the max of the current values and the values of another Vector3 - * @param other The other Vector3 to compare with - */ - TF2SIMD_FORCE_INLINE void setMax(const Vector3& other) - { - tf2SetMax(m_floats[0], other.m_floats[0]); - tf2SetMax(m_floats[1], other.m_floats[1]); - tf2SetMax(m_floats[2], other.m_floats[2]); - tf2SetMax(m_floats[3], other.w()); - } - /**@brief Set each element to the min of the current values and the values of another Vector3 - * @param other The other Vector3 to compare with - */ - TF2SIMD_FORCE_INLINE void setMin(const Vector3& other) - { - tf2SetMin(m_floats[0], other.m_floats[0]); - tf2SetMin(m_floats[1], other.m_floats[1]); - tf2SetMin(m_floats[2], other.m_floats[2]); - tf2SetMin(m_floats[3], other.w()); - } - - TF2SIMD_FORCE_INLINE void setValue(const tf2Scalar& x, const tf2Scalar& y, const tf2Scalar& z) - { - m_floats[0]=x; - m_floats[1]=y; - m_floats[2]=z; - m_floats[3] = tf2Scalar(0.); - } - - void getSkewSymmetricMatrix(Vector3* v0,Vector3* v1,Vector3* v2) const - { - v0->setValue(0. ,-z() ,y()); - v1->setValue(z() ,0. ,-x()); - v2->setValue(-y() ,x() ,0.); - } - - void setZero() - { - setValue(tf2Scalar(0.),tf2Scalar(0.),tf2Scalar(0.)); - } - - TF2SIMD_FORCE_INLINE bool isZero() const - { - return m_floats[0] == tf2Scalar(0) && m_floats[1] == tf2Scalar(0) && m_floats[2] == tf2Scalar(0); - } - - TF2SIMD_FORCE_INLINE bool fuzzyZero() const - { - return length2() < TF2SIMD_EPSILON; - } - - TF2SIMD_FORCE_INLINE void serialize(struct Vector3Data& dataOut) const; - - TF2SIMD_FORCE_INLINE void deSerialize(const struct Vector3Data& dataIn); - - TF2SIMD_FORCE_INLINE void serializeFloat(struct Vector3FloatData& dataOut) const; - - TF2SIMD_FORCE_INLINE void deSerializeFloat(const struct Vector3FloatData& dataIn); - - TF2SIMD_FORCE_INLINE void serializeDouble(struct Vector3DoubleData& dataOut) const; - - TF2SIMD_FORCE_INLINE void deSerializeDouble(const struct Vector3DoubleData& dataIn); - -}; - -/**@brief Return the sum of two vectors (Point symantics)*/ -TF2SIMD_FORCE_INLINE Vector3 -operator+(const Vector3& v1, const Vector3& v2) -{ - return Vector3(v1.m_floats[0] + v2.m_floats[0], v1.m_floats[1] + v2.m_floats[1], v1.m_floats[2] + v2.m_floats[2]); -} - -/**@brief Return the elementwise product of two vectors */ -TF2SIMD_FORCE_INLINE Vector3 -operator*(const Vector3& v1, const Vector3& v2) -{ - return Vector3(v1.m_floats[0] * v2.m_floats[0], v1.m_floats[1] * v2.m_floats[1], v1.m_floats[2] * v2.m_floats[2]); -} - -/**@brief Return the difference between two vectors */ -TF2SIMD_FORCE_INLINE Vector3 -operator-(const Vector3& v1, const Vector3& v2) -{ - return Vector3(v1.m_floats[0] - v2.m_floats[0], v1.m_floats[1] - v2.m_floats[1], v1.m_floats[2] - v2.m_floats[2]); -} -/**@brief Return the negative of the vector */ -TF2SIMD_FORCE_INLINE Vector3 -operator-(const Vector3& v) -{ - return Vector3(-v.m_floats[0], -v.m_floats[1], -v.m_floats[2]); -} - -/**@brief Return the vector scaled by s */ -TF2SIMD_FORCE_INLINE Vector3 -operator*(const Vector3& v, const tf2Scalar& s) -{ - return Vector3(v.m_floats[0] * s, v.m_floats[1] * s, v.m_floats[2] * s); -} - -/**@brief Return the vector scaled by s */ -TF2SIMD_FORCE_INLINE Vector3 -operator*(const tf2Scalar& s, const Vector3& v) -{ - return v * s; -} - -/**@brief Return the vector inversely scaled by s */ -TF2SIMD_FORCE_INLINE Vector3 -operator/(const Vector3& v, const tf2Scalar& s) -{ - tf2FullAssert(s != tf2Scalar(0.0)); - return v * (tf2Scalar(1.0) / s); -} - -/**@brief Return the vector inversely scaled by s */ -TF2SIMD_FORCE_INLINE Vector3 -operator/(const Vector3& v1, const Vector3& v2) -{ - return Vector3(v1.m_floats[0] / v2.m_floats[0],v1.m_floats[1] / v2.m_floats[1],v1.m_floats[2] / v2.m_floats[2]); -} - -/**@brief Return the dot product between two vectors */ -TF2SIMD_FORCE_INLINE tf2Scalar -tf2Dot(const Vector3& v1, const Vector3& v2) -{ - return v1.dot(v2); -} - - -/**@brief Return the distance squared between two vectors */ -TF2SIMD_FORCE_INLINE tf2Scalar -tf2Distance2(const Vector3& v1, const Vector3& v2) -{ - return v1.distance2(v2); -} - - -/**@brief Return the distance between two vectors */ -TF2SIMD_FORCE_INLINE tf2Scalar -tf2Distance(const Vector3& v1, const Vector3& v2) -{ - return v1.distance(v2); -} - -/**@brief Return the angle between two vectors */ -TF2SIMD_FORCE_INLINE tf2Scalar -tf2Angle(const Vector3& v1, const Vector3& v2) -{ - return v1.angle(v2); -} - -/**@brief Return the cross product of two vectors */ -TF2SIMD_FORCE_INLINE Vector3 -tf2Cross(const Vector3& v1, const Vector3& v2) -{ - return v1.cross(v2); -} - -TF2SIMD_FORCE_INLINE tf2Scalar -tf2Triple(const Vector3& v1, const Vector3& v2, const Vector3& v3) -{ - return v1.triple(v2, v3); -} - -/**@brief Return the linear interpolation between two vectors - * @param v1 One vector - * @param v2 The other vector - * @param t The ration of this to v (t = 0 => return v1, t=1 => return v2) */ -TF2SIMD_FORCE_INLINE Vector3 -lerp(const Vector3& v1, const Vector3& v2, const tf2Scalar& t) -{ - return v1.lerp(v2, t); -} - - - -TF2SIMD_FORCE_INLINE tf2Scalar Vector3::distance2(const Vector3& v) const -{ - return (v - *this).length2(); -} - -TF2SIMD_FORCE_INLINE tf2Scalar Vector3::distance(const Vector3& v) const -{ - return (v - *this).length(); -} - -TF2SIMD_FORCE_INLINE Vector3 Vector3::normalized() const -{ - return *this / length(); -} - -TF2SIMD_FORCE_INLINE Vector3 Vector3::rotate( const Vector3& wAxis, const tf2Scalar angle ) const -{ - // wAxis must be a unit lenght vector - - Vector3 o = wAxis * wAxis.dot( *this ); - Vector3 x = *this - o; - Vector3 y; - - y = wAxis.cross( *this ); - - return ( o + x * tf2Cos( angle ) + y * tf2Sin( angle ) ); -} - -class tf2Vector4 : public Vector3 -{ -public: - - TF2SIMD_FORCE_INLINE tf2Vector4() {} - - - TF2SIMD_FORCE_INLINE tf2Vector4(const tf2Scalar& x, const tf2Scalar& y, const tf2Scalar& z,const tf2Scalar& w) - : Vector3(x,y,z) - { - m_floats[3] = w; - } - - - TF2SIMD_FORCE_INLINE tf2Vector4 absolute4() const - { - return tf2Vector4( - tf2Fabs(m_floats[0]), - tf2Fabs(m_floats[1]), - tf2Fabs(m_floats[2]), - tf2Fabs(m_floats[3])); - } - - - - tf2Scalar getW() const { return m_floats[3];} - - - TF2SIMD_FORCE_INLINE int maxAxis4() const - { - int maxIndex = -1; - tf2Scalar maxVal = tf2Scalar(-TF2_LARGE_FLOAT); - if (m_floats[0] > maxVal) - { - maxIndex = 0; - maxVal = m_floats[0]; - } - if (m_floats[1] > maxVal) - { - maxIndex = 1; - maxVal = m_floats[1]; - } - if (m_floats[2] > maxVal) - { - maxIndex = 2; - maxVal =m_floats[2]; - } - if (m_floats[3] > maxVal) - { - maxIndex = 3; - } - - - - - return maxIndex; - - } - - - TF2SIMD_FORCE_INLINE int minAxis4() const - { - int minIndex = -1; - tf2Scalar minVal = tf2Scalar(TF2_LARGE_FLOAT); - if (m_floats[0] < minVal) - { - minIndex = 0; - minVal = m_floats[0]; - } - if (m_floats[1] < minVal) - { - minIndex = 1; - minVal = m_floats[1]; - } - if (m_floats[2] < minVal) - { - minIndex = 2; - minVal =m_floats[2]; - } - if (m_floats[3] < minVal) - { - minIndex = 3; - } - - return minIndex; - - } - - - TF2SIMD_FORCE_INLINE int closestAxis4() const - { - return absolute4().maxAxis4(); - } - - - - - /**@brief Set x,y,z and zero w - * @param x Value of x - * @param y Value of y - * @param z Value of z - */ - - -/* void getValue(tf2Scalar *m) const - { - m[0] = m_floats[0]; - m[1] = m_floats[1]; - m[2] =m_floats[2]; - } -*/ -/**@brief Set the values - * @param x Value of x - * @param y Value of y - * @param z Value of z - * @param w Value of w - */ - TF2SIMD_FORCE_INLINE void setValue(const tf2Scalar& x, const tf2Scalar& y, const tf2Scalar& z,const tf2Scalar& w) - { - m_floats[0]=x; - m_floats[1]=y; - m_floats[2]=z; - m_floats[3]=w; - } - - -}; - - -///tf2SwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization -TF2SIMD_FORCE_INLINE void tf2SwapScalarEndian(const tf2Scalar& sourceVal, tf2Scalar& destVal) -{ - unsigned char* dest = (unsigned char*) &destVal; - const unsigned char* src = (const unsigned char*) &sourceVal; - dest[0] = src[7]; - dest[1] = src[6]; - dest[2] = src[5]; - dest[3] = src[4]; - dest[4] = src[3]; - dest[5] = src[2]; - dest[6] = src[1]; - dest[7] = src[0]; -} -///tf2SwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization -TF2SIMD_FORCE_INLINE void tf2SwapVector3Endian(const Vector3& sourceVec, Vector3& destVec) -{ - for (int i=0;i<4;i++) - { - tf2SwapScalarEndian(sourceVec[i],destVec[i]); - } - -} - -///tf2UnSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization -TF2SIMD_FORCE_INLINE void tf2UnSwapVector3Endian(Vector3& vector) -{ - - Vector3 swappedVec; - for (int i=0;i<4;i++) - { - tf2SwapScalarEndian(vector[i],swappedVec[i]); - } - vector = swappedVec; -} - -TF2SIMD_FORCE_INLINE void tf2PlaneSpace1 (const Vector3& n, Vector3& p, Vector3& q) -{ - if (tf2Fabs(n.z()) > TF2SIMDSQRT12) { - // choose p in y-z plane - tf2Scalar a = n[1]*n[1] + n[2]*n[2]; - tf2Scalar k = tf2RecipSqrt (a); - p.setValue(0,-n[2]*k,n[1]*k); - // set q = n x p - q.setValue(a*k,-n[0]*p[2],n[0]*p[1]); - } - else { - // choose p in x-y plane - tf2Scalar a = n.x()*n.x() + n.y()*n.y(); - tf2Scalar k = tf2RecipSqrt (a); - p.setValue(-n.y()*k,n.x()*k,0); - // set q = n x p - q.setValue(-n.z()*p.y(),n.z()*p.x(),a*k); - } -} - - -struct Vector3FloatData -{ - float m_floats[4]; -}; - -struct Vector3DoubleData -{ - double m_floats[4]; - -}; - -TF2SIMD_FORCE_INLINE void Vector3::serializeFloat(struct Vector3FloatData& dataOut) const -{ - ///could also do a memcpy, check if it is worth it - for (int i=0;i<4;i++) - dataOut.m_floats[i] = float(m_floats[i]); -} - -TF2SIMD_FORCE_INLINE void Vector3::deSerializeFloat(const struct Vector3FloatData& dataIn) -{ - for (int i=0;i<4;i++) - m_floats[i] = tf2Scalar(dataIn.m_floats[i]); -} - - -TF2SIMD_FORCE_INLINE void Vector3::serializeDouble(struct Vector3DoubleData& dataOut) const -{ - ///could also do a memcpy, check if it is worth it - for (int i=0;i<4;i++) - dataOut.m_floats[i] = double(m_floats[i]); -} - -TF2SIMD_FORCE_INLINE void Vector3::deSerializeDouble(const struct Vector3DoubleData& dataIn) -{ - for (int i=0;i<4;i++) - m_floats[i] = tf2Scalar(dataIn.m_floats[i]); -} - - -TF2SIMD_FORCE_INLINE void Vector3::serialize(struct Vector3Data& dataOut) const -{ - ///could also do a memcpy, check if it is worth it - for (int i=0;i<4;i++) - dataOut.m_floats[i] = m_floats[i]; -} - -TF2SIMD_FORCE_INLINE void Vector3::deSerialize(const struct Vector3Data& dataIn) -{ - for (int i=0;i<4;i++) - m_floats[i] = dataIn.m_floats[i]; -} - -} - -#endif //TF2_VECTOR3_H diff --git a/src/geometry2/tf2/include/tf2/buffer_core.h b/src/geometry2/tf2/include/tf2/buffer_core.h deleted file mode 100644 index 58ca1a5..0000000 --- a/src/geometry2/tf2/include/tf2/buffer_core.h +++ /dev/null @@ -1,433 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** \author Tully Foote */ - -#ifndef TF2_BUFFER_CORE_H -#define TF2_BUFFER_CORE_H - -#include "transform_storage.h" - -#include - -#include - -#include "ros/duration.h" -#include "ros/time.h" -//#include "geometry_msgs/TwistStamped.h" -#include "geometry_msgs/TransformStamped.h" - -//////////////////////////backwards startup for porting -//#include "tf/tf.h" - -#include -#include -#include -#include - -namespace tf2 -{ - -typedef std::pair P_TimeAndFrameID; -typedef uint32_t TransformableCallbackHandle; -typedef uint64_t TransformableRequestHandle; - -class TimeCacheInterface; -typedef boost::shared_ptr TimeCacheInterfacePtr; - -enum TransformableResult -{ - TransformAvailable, - TransformFailure, -}; - -/** \brief A Class which provides coordinate transforms between any two frames in a system. - * - * This class provides a simple interface to allow recording and lookup of - * relationships between arbitrary frames of the system. - * - * libTF assumes that there is a tree of coordinate frame transforms which define the relationship between all coordinate frames. - * For example your typical robot would have a transform from global to real world. And then from base to hand, and from base to head. - * But Base to Hand really is composed of base to shoulder to elbow to wrist to hand. - * libTF is designed to take care of all the intermediate steps for you. - * - * Internal Representation - * libTF will store frames with the parameters necessary for generating the transform into that frame from it's parent and a reference to the parent frame. - * Frames are designated using an std::string - * 0 is a frame without a parent (the top of a tree) - * The positions of frames over time must be pushed in. - * - * All function calls which pass frame ids can potentially throw the exception tf::LookupException - */ -class BufferCore -{ -public: - /************* Constants ***********************/ - static const int DEFAULT_CACHE_TIME = 10; //!< The default amount of time to cache data in seconds - static const uint32_t MAX_GRAPH_DEPTH = 1000UL; //!< Maximum graph search depth (deeper graphs will be assumed to have loops) - - /** Constructor - * \param interpolating Whether to interpolate, if this is false the closest value will be returned - * \param cache_time How long to keep a history of transforms in nanoseconds - * - */ - BufferCore(ros::Duration cache_time_ = ros::Duration(DEFAULT_CACHE_TIME)); - virtual ~BufferCore(void); - - /** \brief Clear all data */ - void clear(); - - /** \brief Add transform information to the tf data structure - * \param transform The transform to store - * \param authority The source of the information for this transform - * \param is_static Record this transform as a static transform. It will be good across all time. (This cannot be changed after the first call.) - * \return True unless an error occured - */ - bool setTransform(const geometry_msgs::TransformStamped& transform, const std::string & authority, bool is_static = false); - - /*********** Accessors *************/ - - /** \brief Get the transform between two frames by frame ID. - * \param target_frame The frame to which data should be transformed - * \param source_frame The frame where the data originated - * \param time The time at which the value of the transform is desired. (0 will get the latest) - * \return The transform between the frames - * - * Possible exceptions tf2::LookupException, tf2::ConnectivityException, - * tf2::ExtrapolationException, tf2::InvalidArgumentException - */ - geometry_msgs::TransformStamped - lookupTransform(const std::string& target_frame, const std::string& source_frame, - const ros::Time& time) const; - - /** \brief Get the transform between two frames by frame ID assuming fixed frame. - * \param target_frame The frame to which data should be transformed - * \param target_time The time to which the data should be transformed. (0 will get the latest) - * \param source_frame The frame where the data originated - * \param source_time The time at which the source_frame should be evaluated. (0 will get the latest) - * \param fixed_frame The frame in which to assume the transform is constant in time. - * \return The transform between the frames - * - * Possible exceptions tf2::LookupException, tf2::ConnectivityException, - * tf2::ExtrapolationException, tf2::InvalidArgumentException - */ - - geometry_msgs::TransformStamped - lookupTransform(const std::string& target_frame, const ros::Time& target_time, - const std::string& source_frame, const ros::Time& source_time, - const std::string& fixed_frame) const; - - /* \brief Lookup the twist of the tracking_frame with respect to the observation frame in the reference_frame using the reference point - * \param tracking_frame The frame to track - * \param observation_frame The frame from which to measure the twist - * \param reference_frame The reference frame in which to express the twist - * \param reference_point The reference point with which to express the twist - * \param reference_point_frame The frame_id in which the reference point is expressed - * \param time The time at which to get the velocity - * \param duration The period over which to average - * \return twist The twist output - * - * This will compute the average velocity on the interval - * (time - duration/2, time+duration/2). If that is too close to the most - * recent reading, in which case it will shift the interval up to - * duration/2 to prevent extrapolation. - * - * Possible exceptions tf2::LookupException, tf2::ConnectivityException, - * tf2::ExtrapolationException, tf2::InvalidArgumentException - * - * New in geometry 1.1 - */ - /* - geometry_msgs::Twist - lookupTwist(const std::string& tracking_frame, const std::string& observation_frame, const std::string& reference_frame, - const tf::Point & reference_point, const std::string& reference_point_frame, - const ros::Time& time, const ros::Duration& averaging_interval) const; - */ - /* \brief lookup the twist of the tracking frame with respect to the observational frame - * - * This is a simplified version of - * lookupTwist with it assumed that the reference point is the - * origin of the tracking frame, and the reference frame is the - * observation frame. - * - * Possible exceptions tf2::LookupException, tf2::ConnectivityException, - * tf2::ExtrapolationException, tf2::InvalidArgumentException - * - * New in geometry 1.1 - */ - /* - geometry_msgs::Twist - lookupTwist(const std::string& tracking_frame, const std::string& observation_frame, - const ros::Time& time, const ros::Duration& averaging_interval) const; - */ - /** \brief Test if a transform is possible - * \param target_frame The frame into which to transform - * \param source_frame The frame from which to transform - * \param time The time at which to transform - * \param error_msg A pointer to a string which will be filled with why the transform failed, if not NULL - * \return True if the transform is possible, false otherwise - */ - bool canTransform(const std::string& target_frame, const std::string& source_frame, - const ros::Time& time, std::string* error_msg = NULL) const; - - /** \brief Test if a transform is possible - * \param target_frame The frame into which to transform - * \param target_time The time into which to transform - * \param source_frame The frame from which to transform - * \param source_time The time from which to transform - * \param fixed_frame The frame in which to treat the transform as constant in time - * \param error_msg A pointer to a string which will be filled with why the transform failed, if not NULL - * \return True if the transform is possible, false otherwise - */ - bool canTransform(const std::string& target_frame, const ros::Time& target_time, - const std::string& source_frame, const ros::Time& source_time, - const std::string& fixed_frame, std::string* error_msg = NULL) const; - - /** \brief A way to see what frames have been cached in yaml format - * Useful for debugging tools - */ - std::string allFramesAsYAML(double current_time) const; - - /** Backwards compatibility for #84 - */ - std::string allFramesAsYAML() const; - - /** \brief A way to see what frames have been cached - * Useful for debugging - */ - std::string allFramesAsString() const; - - typedef boost::function TransformableCallback; - - /// \brief Internal use only - TransformableCallbackHandle addTransformableCallback(const TransformableCallback& cb); - /// \brief Internal use only - void removeTransformableCallback(TransformableCallbackHandle handle); - /// \brief Internal use only - TransformableRequestHandle addTransformableRequest(TransformableCallbackHandle handle, const std::string& target_frame, const std::string& source_frame, ros::Time time); - /// \brief Internal use only - void cancelTransformableRequest(TransformableRequestHandle handle); - - - - - // Tell the buffer that there are multiple threads serviciing it. - // This is useful for derived classes to know if they can block or not. - void setUsingDedicatedThread(bool value) { using_dedicated_thread_ = value;}; - // Get the state of using_dedicated_thread_ - bool isUsingDedicatedThread() const { return using_dedicated_thread_;}; - - - - - /* Backwards compatability section for tf::Transformer you should not use these - */ - - /** - * \brief Add a callback that happens when a new transform has arrived - * - * \param callback The callback, of the form void func(); - * \return A boost::signals2::connection object that can be used to remove this - * listener - */ - boost::signals2::connection _addTransformsChangedListener(boost::function callback); - void _removeTransformsChangedListener(boost::signals2::connection c); - - - /**@brief Check if a frame exists in the tree - * @param frame_id_str The frame id in question */ - bool _frameExists(const std::string& frame_id_str) const; - - /**@brief Fill the parent of a frame. - * @param frame_id The frame id of the frame in question - * @param parent The reference to the string to fill the parent - * Returns true unless "NO_PARENT" */ - bool _getParent(const std::string& frame_id, ros::Time time, std::string& parent) const; - - /** \brief A way to get a std::vector of available frame ids */ - void _getFrameStrings(std::vector& ids) const; - - - CompactFrameID _lookupFrameNumber(const std::string& frameid_str) const { - return lookupFrameNumber(frameid_str); - } - CompactFrameID _lookupOrInsertFrameNumber(const std::string& frameid_str) { - return lookupOrInsertFrameNumber(frameid_str); - } - - int _getLatestCommonTime(CompactFrameID target_frame, CompactFrameID source_frame, ros::Time& time, std::string* error_string) const { - boost::mutex::scoped_lock lock(frame_mutex_); - return getLatestCommonTime(target_frame, source_frame, time, error_string); - } - - CompactFrameID _validateFrameId(const char* function_name_arg, const std::string& frame_id) const { - return validateFrameId(function_name_arg, frame_id); - } - - /**@brief Get the duration over which this transformer will cache */ - ros::Duration getCacheLength() { return cache_time_;} - - /** \brief Backwards compatabilityA way to see what frames have been cached - * Useful for debugging - */ - std::string _allFramesAsDot(double current_time) const; - std::string _allFramesAsDot() const; - - /** \brief Backwards compatabilityA way to see what frames are in a chain - * Useful for debugging - */ - void _chainAsVector(const std::string & target_frame, ros::Time target_time, const std::string & source_frame, ros::Time source_time, const std::string & fixed_frame, std::vector& output) const; - -private: - - /** \brief A way to see what frames have been cached - * Useful for debugging. Use this call internally. - */ - std::string allFramesAsStringNoLock() const; - - - /******************** Internal Storage ****************/ - - /** \brief The pointers to potential frames that the tree can be made of. - * The frames will be dynamically allocated at run time when set the first time. */ - typedef std::vector V_TimeCacheInterface; - V_TimeCacheInterface frames_; - - /** \brief A mutex to protect testing and allocating new frames on the above vector. */ - mutable boost::mutex frame_mutex_; - - /** \brief A map from string frame ids to CompactFrameID */ - typedef boost::unordered_map M_StringToCompactFrameID; - M_StringToCompactFrameID frameIDs_; - /** \brief A map from CompactFrameID frame_id_numbers to string for debugging and output */ - std::vector frameIDs_reverse; - /** \brief A map to lookup the most recent authority for a given frame */ - std::map frame_authority_; - - - /// How long to cache transform history - ros::Duration cache_time_; - - typedef boost::unordered_map M_TransformableCallback; - M_TransformableCallback transformable_callbacks_; - uint32_t transformable_callbacks_counter_; - boost::mutex transformable_callbacks_mutex_; - - struct TransformableRequest - { - ros::Time time; - TransformableRequestHandle request_handle; - TransformableCallbackHandle cb_handle; - CompactFrameID target_id; - CompactFrameID source_id; - std::string target_string; - std::string source_string; - }; - typedef std::vector V_TransformableRequest; - V_TransformableRequest transformable_requests_; - boost::mutex transformable_requests_mutex_; - uint64_t transformable_requests_counter_; - - struct RemoveRequestByCallback; - struct RemoveRequestByID; - - // Backwards compatability for tf message_filter - typedef boost::signals2::signal TransformsChangedSignal; - /// Signal which is fired whenever new transform data has arrived, from the thread the data arrived in - TransformsChangedSignal _transforms_changed_; - - - /************************* Internal Functions ****************************/ - - /** \brief An accessor to get a frame, which will throw an exception if the frame is no there. - * \param frame_number The frameID of the desired Reference Frame - * - * This is an internal function which will get the pointer to the frame associated with the frame id - * Possible Exception: tf::LookupException - */ - TimeCacheInterfacePtr getFrame(CompactFrameID c_frame_id) const; - - TimeCacheInterfacePtr allocateFrame(CompactFrameID cfid, bool is_static); - - - bool warnFrameId(const char* function_name_arg, const std::string& frame_id) const; - CompactFrameID validateFrameId(const char* function_name_arg, const std::string& frame_id) const; - - /// String to number for frame lookup with dynamic allocation of new frames - CompactFrameID lookupFrameNumber(const std::string& frameid_str) const; - - /// String to number for frame lookup with dynamic allocation of new frames - CompactFrameID lookupOrInsertFrameNumber(const std::string& frameid_str); - - ///Number to string frame lookup may throw LookupException if number invalid - const std::string& lookupFrameString(CompactFrameID frame_id_num) const; - - void createConnectivityErrorString(CompactFrameID source_frame, CompactFrameID target_frame, std::string* out) const; - - /**@brief Return the latest rostime which is common across the spanning set - * zero if fails to cross */ - int getLatestCommonTime(CompactFrameID target_frame, CompactFrameID source_frame, ros::Time& time, std::string* error_string) const; - - template - int walkToTopParent(F& f, ros::Time time, CompactFrameID target_id, CompactFrameID source_id, std::string* error_string) const; - - /**@brief Traverse the transform tree. If frame_chain is not NULL, store the traversed frame tree in vector frame_chain. - * */ - template - int walkToTopParent(F& f, ros::Time time, CompactFrameID target_id, CompactFrameID source_id, std::string* error_string, std::vector *frame_chain) const; - - void testTransformableRequests(); - bool canTransformInternal(CompactFrameID target_id, CompactFrameID source_id, - const ros::Time& time, std::string* error_msg) const; - bool canTransformNoLock(CompactFrameID target_id, CompactFrameID source_id, - const ros::Time& time, std::string* error_msg) const; - - - //Whether it is safe to use canTransform with a timeout. (If another thread is not provided it will always timeout.) - bool using_dedicated_thread_; - -public: - friend class TestBufferCore; // For unit testing - -}; - -/** A helper class for testing internal APIs */ -class TestBufferCore -{ -public: - int _walkToTopParent(BufferCore& buffer, ros::Time time, CompactFrameID target_id, CompactFrameID source_id, std::string* error_string, std::vector *frame_chain) const; - const std::string& _lookupFrameString(BufferCore& buffer, CompactFrameID frame_id_num) const - { - return buffer.lookupFrameString(frame_id_num); - } -}; -} - -#endif //TF2_CORE_H diff --git a/src/geometry2/tf2/include/tf2/convert.h b/src/geometry2/tf2/include/tf2/convert.h deleted file mode 100644 index 36efd5b..0000000 --- a/src/geometry2/tf2/include/tf2/convert.h +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright (c) 2013, Open Source Robotics Foundation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** \author Tully Foote */ - -#ifndef TF2_CONVERT_H -#define TF2_CONVERT_H - - -#include -#include -#include -#include - -namespace tf2 { - -/**\brief The templated function expected to be able to do a transform - * - * This is the method which tf2 will use to try to apply a transform for any given datatype. - * \param data_in The data to be transformed. - * \param data_out A reference to the output data. Note this can point to data in and the method should be mutation safe. - * \param transform The transform to apply to data_in to fill data_out. - * - * This method needs to be implemented by client library developers - */ -template - void doTransform(const T& data_in, T& data_out, const geometry_msgs::TransformStamped& transform); - -/**\brief Get the timestamp from data - * \param t The data input. - * \return The timestamp associated with the data. The lifetime of the returned - * reference is bound to the lifetime of the argument. - */ -template - const ros::Time& getTimestamp(const T& t); - -/**\brief Get the frame_id from data - * \param t The data input. - * \return The frame_id associated with the data. The lifetime of the returned - * reference is bound to the lifetime of the argument. - */ -template - const std::string& getFrameId(const T& t); - - - -/* An implementation for Stamped

datatypes */ -template - const ros::Time& getTimestamp(const tf2::Stamped

& t) - { - return t.stamp_; - } - -/* An implementation for Stamped

datatypes */ -template - const std::string& getFrameId(const tf2::Stamped

& t) - { - return t.frame_id_; - } - -/** Function that converts from one type to a ROS message type. It has to be - * implemented by each data type in tf2_* (except ROS messages) as it is - * used in the "convert" function. - * \param a an object of whatever type - * \return the conversion as a ROS message - */ -template - B toMsg(const A& a); - -/** Function that converts from a ROS message type to another type. It has to be - * implemented by each data type in tf2_* (except ROS messages) as it is used - * in the "convert" function. - * \param a a ROS message to convert from - * \param b the object to convert to - */ -template - void fromMsg(const A&, B& b); - -/** Function that converts any type to any type (messages or not). - * Matching toMsg and from Msg conversion functions need to exist. - * If they don't exist or do not apply (for example, if your two - * classes are ROS messages), just write a specialization of the function. - * \param a an object to convert from - * \param b the object to convert to - */ -template - void convert(const A& a, B& b) - { - //printf("In double type convert\n"); - impl::Converter::value, ros::message_traits::IsMessage::value>::convert(a, b); - } - -template - void convert(const A& a1, A& a2) - { - //printf("In single type convert\n"); - if(&a1 != &a2) - a2 = a1; - } - - -} - -#endif //TF2_CONVERT_H diff --git a/src/geometry2/tf2/include/tf2/exceptions.h b/src/geometry2/tf2/include/tf2/exceptions.h deleted file mode 100644 index 662fa38..0000000 --- a/src/geometry2/tf2/include/tf2/exceptions.h +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** \author Tully Foote */ - -#ifndef TF2_EXCEPTIONS_H -#define TF2_EXCEPTIONS_H - -#include - -namespace tf2{ - -/** \brief A base class for all tf2 exceptions - * This inherits from ros::exception - * which inherits from std::runtime_exception - */ -class TransformException: public std::runtime_error -{ -public: - TransformException(const std::string errorDescription) : std::runtime_error(errorDescription) { ; }; -}; - - - /** \brief An exception class to notify of no connection - * - * This is an exception class to be thrown in the case - * that the Reference Frame tree is not connected between - * the frames requested. */ -class ConnectivityException:public TransformException -{ -public: - ConnectivityException(const std::string errorDescription) : tf2::TransformException(errorDescription) { ; }; -}; - - -/** \brief An exception class to notify of bad frame number - * - * This is an exception class to be thrown in the case that - * a frame not in the graph has been attempted to be accessed. - * The most common reason for this is that the frame is not - * being published, or a parent frame was not set correctly - * causing the tree to be broken. - */ -class LookupException: public TransformException -{ -public: - LookupException(const std::string errorDescription) : tf2::TransformException(errorDescription) { ; }; -}; - - /** \brief An exception class to notify that the requested value would have required extrapolation beyond current limits. - * - */ -class ExtrapolationException: public TransformException -{ -public: - ExtrapolationException(const std::string errorDescription) : tf2::TransformException(errorDescription) { ; }; -}; - -/** \brief An exception class to notify that one of the arguments is invalid - * - * usually it's an uninitalized Quaternion (0,0,0,0) - * - */ -class InvalidArgumentException: public TransformException -{ -public: - InvalidArgumentException(const std::string errorDescription) : tf2::TransformException(errorDescription) { ; }; -}; - -/** \brief An exception class to notify that a timeout has occured - * - * - */ -class TimeoutException: public TransformException -{ -public: - TimeoutException(const std::string errorDescription) : tf2::TransformException(errorDescription) { ; }; -}; - - -} - - -#endif //TF2_EXCEPTIONS_H diff --git a/src/geometry2/tf2/include/tf2/impl/convert.h b/src/geometry2/tf2/include/tf2/impl/convert.h deleted file mode 100644 index 6b68ccd..0000000 --- a/src/geometry2/tf2/include/tf2/impl/convert.h +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright (c) 2013, Open Source Robotics Foundation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef TF2_IMPL_CONVERT_H -#define TF2_IMPL_CONVERT_H - -namespace tf2 { -namespace impl { - -template -class Converter { -public: - template - static void convert(const A& a, B& b); -}; - -// The case where both A and B are messages should not happen: if you have two -// messages that are interchangeable, well, that's against the ROS purpose: -// only use one type. Worst comes to worst, specialize the original convert -// function for your types. -// if B == A, the templated version of convert with only one argument will be -// used. -// -//template <> -//template -//inline void Converter::convert(const A& a, B& b); - -template <> -template -inline void Converter::convert(const A& a, B& b) -{ -#ifdef _MSC_VER - tf2::fromMsg(a, b); -#else - fromMsg(a, b); -#endif -} - -template <> -template -inline void Converter::convert(const A& a, B& b) -{ -#ifdef _MSC_VER - b = tf2::toMsg(a); -#else - b = toMsg(a); -#endif -} - -template <> -template -inline void Converter::convert(const A& a, B& b) -{ -#ifdef _MSC_VER - tf2::fromMsg(tf2::toMsg(a), b); -#else - fromMsg(toMsg(a), b); -#endif -} - -} -} - -#endif //TF2_IMPL_CONVERT_H diff --git a/src/geometry2/tf2/include/tf2/impl/utils.h b/src/geometry2/tf2/include/tf2/impl/utils.h deleted file mode 100644 index 5b545e5..0000000 --- a/src/geometry2/tf2/include/tf2/impl/utils.h +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2014 Open Source Robotics Foundation, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef TF2_IMPL_UTILS_H -#define TF2_IMPL_UTILS_H - -#include -#include -#include - -namespace tf2 { -namespace impl { - -/** Function needed for the generalization of toQuaternion - * \param q a tf2::Quaternion - * \return a copy of the same quaternion - */ -inline -tf2::Quaternion toQuaternion(const tf2::Quaternion& q) { - return q; - } - -/** Function needed for the generalization of toQuaternion - * \param q a geometry_msgs::Quaternion - * \return a copy of the same quaternion as a tf2::Quaternion - */ -inline -tf2::Quaternion toQuaternion(const geometry_msgs::Quaternion& q) { - tf2::Quaternion res; - fromMsg(q, res); - return res; - } - -/** Function needed for the generalization of toQuaternion - * \param q a geometry_msgs::QuaternionStamped - * \return a copy of the same quaternion as a tf2::Quaternion - */ -inline -tf2::Quaternion toQuaternion(const geometry_msgs::QuaternionStamped& q) { - tf2::Quaternion res; - fromMsg(q.quaternion, res); - return res; - } - -/** Function needed for the generalization of toQuaternion - * \param t some tf2::Stamped object - * \return a copy of the same quaternion as a tf2::Quaternion - */ -template - tf2::Quaternion toQuaternion(const tf2::Stamped& t) { - geometry_msgs::QuaternionStamped q = toMsg(t); - return toQuaternion(q); - } - -/** Generic version of toQuaternion. It tries to convert the argument - * to a geometry_msgs::Quaternion - * \param t some object - * \return a copy of the same quaternion as a tf2::Quaternion - */ -template - tf2::Quaternion toQuaternion(const T& t) { - geometry_msgs::Quaternion q = toMsg(t); - return toQuaternion(q); - } - -/** The code below is blantantly copied from urdfdom_headers - * only the normalization has been added. - * It computes the Euler roll, pitch yaw from a tf2::Quaternion - * It is equivalent to tf2::Matrix3x3(q).getEulerYPR(yaw, pitch, roll); - * \param q a tf2::Quaternion - * \param yaw the computed yaw - * \param pitch the computed pitch - * \param roll the computed roll - */ -inline -void getEulerYPR(const tf2::Quaternion& q, double &yaw, double &pitch, double &roll) -{ - double sqw; - double sqx; - double sqy; - double sqz; - - sqx = q.x() * q.x(); - sqy = q.y() * q.y(); - sqz = q.z() * q.z(); - sqw = q.w() * q.w(); - - // Cases derived from https://orbitalstation.wordpress.com/tag/quaternion/ - double sarg = -2 * (q.x()*q.z() - q.w()*q.y()) / (sqx + sqy + sqz + sqw); /* normalization added from urdfom_headers */ - if (sarg <= -0.99999) { - pitch = -0.5*M_PI; - roll = 0; - yaw = -2 * atan2(q.y(), q.x()); - } else if (sarg >= 0.99999) { - pitch = 0.5*M_PI; - roll = 0; - yaw = 2 * atan2(q.y(), q.x()); - } else { - pitch = asin(sarg); - roll = atan2(2 * (q.y()*q.z() + q.w()*q.x()), sqw - sqx - sqy + sqz); - yaw = atan2(2 * (q.x()*q.y() + q.w()*q.z()), sqw + sqx - sqy - sqz); - } -}; - -/** The code below is a simplified version of getEulerRPY that only - * returns the yaw. It is mostly useful in navigation where only yaw - * matters - * \param q a tf2::Quaternion - * \return the computed yaw - */ -inline -double getYaw(const tf2::Quaternion& q) -{ - double yaw; - - double sqw; - double sqx; - double sqy; - double sqz; - - sqx = q.x() * q.x(); - sqy = q.y() * q.y(); - sqz = q.z() * q.z(); - sqw = q.w() * q.w(); - - // Cases derived from https://orbitalstation.wordpress.com/tag/quaternion/ - double sarg = -2 * (q.x()*q.z() - q.w()*q.y()) / (sqx + sqy + sqz + sqw); /* normalization added from urdfom_headers */ - - if (sarg <= -0.99999) { - yaw = -2 * atan2(q.y(), q.x()); - } else if (sarg >= 0.99999) { - yaw = 2 * atan2(q.y(), q.x()); - } else { - yaw = atan2(2 * (q.x()*q.y() + q.w()*q.z()), sqw + sqx - sqy - sqz); - } - return yaw; -}; - -} -} - -#endif //TF2_IMPL_UTILS_H diff --git a/src/geometry2/tf2/include/tf2/time_cache.h b/src/geometry2/tf2/include/tf2/time_cache.h deleted file mode 100644 index 8ce9258..0000000 --- a/src/geometry2/tf2/include/tf2/time_cache.h +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** \author Tully Foote */ - -#ifndef TF2_TIME_CACHE_H -#define TF2_TIME_CACHE_H - -#include "transform_storage.h" - -#include - -#include - -#include -#include - -#include - -namespace geometry_msgs -{ -ROS_DECLARE_MESSAGE(TransformStamped); -} - -namespace tf2 -{ - -typedef std::pair P_TimeAndFrameID; - -class TimeCacheInterface -{ -public: - /** \brief Access data from the cache */ - virtual bool getData(ros::Time time, TransformStorage & data_out, std::string* error_str = 0)=0; //returns false if data unavailable (should be thrown as lookup exception - - /** \brief Insert data into the cache */ - virtual bool insertData(const TransformStorage& new_data)=0; - - /** @brief Clear the list of stored values */ - virtual void clearList()=0; - - /** \brief Retrieve the parent at a specific time */ - virtual CompactFrameID getParent(ros::Time time, std::string* error_str) = 0; - - /** - * \brief Get the latest time stored in this cache, and the parent associated with it. Returns parent = 0 if no data. - */ - virtual P_TimeAndFrameID getLatestTimeAndParent() = 0; - - - /// Debugging information methods - /** @brief Get the length of the stored list */ - virtual unsigned int getListLength()=0; - - /** @brief Get the latest timestamp cached */ - virtual ros::Time getLatestTimestamp()=0; - - /** @brief Get the oldest timestamp cached */ - virtual ros::Time getOldestTimestamp()=0; -}; - -typedef boost::shared_ptr TimeCacheInterfacePtr; - -/** \brief A class to keep a sorted linked list in time - * This builds and maintains a list of timestamped - * data. And provides lookup functions to get - * data out as a function of time. */ -class TimeCache : public TimeCacheInterface -{ - public: - static const int MIN_INTERPOLATION_DISTANCE = 5; //!< Number of nano-seconds to not interpolate below. - static const unsigned int MAX_LENGTH_LINKED_LIST = 1000000; //!< Maximum length of linked list, to make sure not to be able to use unlimited memory. - static const int64_t DEFAULT_MAX_STORAGE_TIME = 10ULL * 1000000000LL; //!< default value of 10 seconds storage - - TimeCache(ros::Duration max_storage_time = ros::Duration().fromNSec(DEFAULT_MAX_STORAGE_TIME)); - - - /// Virtual methods - - virtual bool getData(ros::Time time, TransformStorage & data_out, std::string* error_str = 0); - virtual bool insertData(const TransformStorage& new_data); - virtual void clearList(); - virtual CompactFrameID getParent(ros::Time time, std::string* error_str); - virtual P_TimeAndFrameID getLatestTimeAndParent(); - - /// Debugging information methods - virtual unsigned int getListLength(); - virtual ros::Time getLatestTimestamp(); - virtual ros::Time getOldestTimestamp(); - - -private: - typedef std::deque L_TransformStorage; - L_TransformStorage storage_; - - ros::Duration max_storage_time_; - - - /// A helper function for getData - //Assumes storage is already locked for it - inline uint8_t findClosest(TransformStorage*& one, TransformStorage*& two, ros::Time target_time, std::string* error_str); - - inline void interpolate(const TransformStorage& one, const TransformStorage& two, ros::Time time, TransformStorage& output); - - - void pruneList(); - - - -}; - -class StaticCache : public TimeCacheInterface -{ - public: - /// Virtual methods - - virtual bool getData(ros::Time time, TransformStorage & data_out, std::string* error_str = 0); //returns false if data unavailable (should be thrown as lookup exception - virtual bool insertData(const TransformStorage& new_data); - virtual void clearList(); - virtual CompactFrameID getParent(ros::Time time, std::string* error_str); - virtual P_TimeAndFrameID getLatestTimeAndParent(); - - - /// Debugging information methods - virtual unsigned int getListLength(); - virtual ros::Time getLatestTimestamp(); - virtual ros::Time getOldestTimestamp(); - - -private: - TransformStorage storage_; -}; - -} - -#endif // TF2_TIME_CACHE_H diff --git a/src/geometry2/tf2/include/tf2/transform_datatypes.h b/src/geometry2/tf2/include/tf2/transform_datatypes.h deleted file mode 100644 index e5ea9f9..0000000 --- a/src/geometry2/tf2/include/tf2/transform_datatypes.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** \author Tully Foote */ - -#ifndef TF2_TRANSFORM_DATATYPES_H -#define TF2_TRANSFORM_DATATYPES_H - -#include -#include "ros/time.h" - -namespace tf2 -{ - -/** \brief The data type which will be cross compatable with geometry_msgs - * This is the tf2 datatype equivilant of a MessageStamped */ -template -class Stamped : public T{ - public: - ros::Time stamp_; ///< The timestamp associated with this data - std::string frame_id_; ///< The frame_id associated this data - - /** Default constructor */ - Stamped() :frame_id_ ("NO_ID_STAMPED_DEFAULT_CONSTRUCTION"){}; //Default constructor used only for preallocation - - /** Full constructor */ - Stamped(const T& input, const ros::Time& timestamp, const std::string & frame_id) : - T (input), stamp_ ( timestamp ), frame_id_ (frame_id){ } ; - - /** Copy Constructor */ - Stamped(const Stamped& s): - T (s), - stamp_(s.stamp_), - frame_id_(s.frame_id_) {} - - /** Set the data element */ - void setData(const T& input){*static_cast(this) = input;}; -}; - -/** \brief Comparison Operator for Stamped datatypes */ -template -bool operator==(const Stamped &a, const Stamped &b) { - return a.frame_id_ == b.frame_id_ && a.stamp_ == b.stamp_ && static_cast(a) == static_cast(b); -} - - -} -#endif //TF2_TRANSFORM_DATATYPES_H diff --git a/src/geometry2/tf2/include/tf2/transform_storage.h b/src/geometry2/tf2/include/tf2/transform_storage.h deleted file mode 100644 index 7856bb8..0000000 --- a/src/geometry2/tf2/include/tf2/transform_storage.h +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright (c) 2010, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** \author Tully Foote */ - -#ifndef TF2_TRANSFORM_STORAGE_H -#define TF2_TRANSFORM_STORAGE_H - -#include -#include - -#include -#include -#include - -namespace geometry_msgs -{ -ROS_DECLARE_MESSAGE(TransformStamped) -} - -namespace tf2 -{ - -typedef uint32_t CompactFrameID; - -/** \brief Storage for transforms and their parent */ -class TransformStorage -{ -public: - TransformStorage(); - TransformStorage(const geometry_msgs::TransformStamped& data, CompactFrameID frame_id, CompactFrameID child_frame_id); - - TransformStorage(const TransformStorage& rhs) - { - *this = rhs; - } - - TransformStorage& operator=(const TransformStorage& rhs) - { -#if 01 - rotation_ = rhs.rotation_; - translation_ = rhs.translation_; - stamp_ = rhs.stamp_; - frame_id_ = rhs.frame_id_; - child_frame_id_ = rhs.child_frame_id_; -#endif - return *this; - } - - tf2::Quaternion rotation_; - tf2::Vector3 translation_; - ros::Time stamp_; - CompactFrameID frame_id_; - CompactFrameID child_frame_id_; -}; - -} - -#endif // TF2_TRANSFORM_STORAGE_H - diff --git a/src/geometry2/tf2/include/tf2/utils.h b/src/geometry2/tf2/include/tf2/utils.h deleted file mode 100644 index 54805cc..0000000 --- a/src/geometry2/tf2/include/tf2/utils.h +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2014 Open Source Robotics Foundation, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef TF2_UTILS_H -#define TF2_UTILS_H - -#include -#include -#include - -namespace tf2 { -/** Return the yaw, pitch, roll of anything that can be converted to a tf2::Quaternion - * The conventions are the usual ROS ones defined in tf2/LineMath/Matrix3x3.h - * \param a the object to get data from (it represents a rotation/quaternion) - * \param yaw yaw - * \param pitch pitch - * \param roll roll - */ -template - void getEulerYPR(const A& a, double& yaw, double& pitch, double& roll) - { - tf2::Quaternion q = impl::toQuaternion(a); - impl::getEulerYPR(q, yaw, pitch, roll); - } - -/** Return the yaw of anything that can be converted to a tf2::Quaternion - * The conventions are the usual ROS ones defined in tf2/LineMath/Matrix3x3.h - * This function is a specialization of getEulerYPR and is useful for its - * wide-spread use in navigation - * \param a the object to get data from (it represents a rotation/quaternion) - * \param yaw yaw - */ -template - double getYaw(const A& a) - { - tf2::Quaternion q = impl::toQuaternion(a); - return impl::getYaw(q); - } - -/** Return the identity for any type that can be converted to a tf2::Transform - * \return an object of class A that is an identity transform - */ -template - A getTransformIdentity() - { - tf2::Transform t; - t.setIdentity(); - A a; - convert(t, a); - return a; - } - -} - -#endif //TF2_UTILS_H diff --git a/src/geometry2/tf2/index.rst b/src/geometry2/tf2/index.rst deleted file mode 100644 index 5001a33..0000000 --- a/src/geometry2/tf2/index.rst +++ /dev/null @@ -1,15 +0,0 @@ -tf2 -===== - -This is the Python API reference of the tf2 package. - -.. toctree:: - :maxdepth: 2 - - tf2 - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`search` diff --git a/src/geometry2/tf2/mainpage.dox b/src/geometry2/tf2/mainpage.dox deleted file mode 100644 index 2cba60c..0000000 --- a/src/geometry2/tf2/mainpage.dox +++ /dev/null @@ -1,36 +0,0 @@ -/** -\mainpage -\htmlinclude manifest.html - -\b tf2 is the second generation of the tf library. - -This library implements the interface defined by tf2::BufferCore. - -There is also a Python wrapper with the same API that class this library using CPython bindings. - -\section codeapi Code API - -The main interface is through the tf2::BufferCore interface. - -It uses the exceptions in exceptions.h and the Stamped datatype -in transform_datatypes.h. - -\section conversions Conversion Interface - -tf2 offers a templated conversion interface for external libraries to specify conversions between -tf2-specific data types and user-defined data types. Various templated functions in tf2_ros use the -conversion interface to apply transformations from the tf server to these custom datatypes. - -The conversion interface is defined in tf2/convert.h. - -Some packages that implement this interface: - -- tf2_bullet -- tf2_eigen -- tf2_geometry_msgs -- tf2_kdl -- tf2_sensor_msgs - -More documentation for the conversion interface is available on the ROS Wiki. - -*/ diff --git a/src/geometry2/tf2/package.xml b/src/geometry2/tf2/package.xml deleted file mode 100644 index 65bc8b7..0000000 --- a/src/geometry2/tf2/package.xml +++ /dev/null @@ -1,34 +0,0 @@ - - tf2 - 0.6.7 - - tf2 is the second generation of the transform library, which lets - the user keep track of multiple coordinate frames over time. tf2 - maintains the relationship between coordinate frames in a tree - structure buffered in time, and lets the user transform points, - vectors, etc between any two coordinate frames at any desired - point in time. - - Tully Foote - Eitan Marder-Eppstein - Wim Meeussen - Tully Foote - BSD - - http://www.ros.org/wiki/tf2 - - catkin - - libconsole-bridge-dev - geometry_msgs - rostime - tf2_msgs - - libconsole-bridge-dev - geometry_msgs - rostime - tf2_msgs - - - - diff --git a/src/geometry2/tf2/src/buffer_core.cpp b/src/geometry2/tf2/src/buffer_core.cpp deleted file mode 100644 index 75b827d..0000000 --- a/src/geometry2/tf2/src/buffer_core.cpp +++ /dev/null @@ -1,1656 +0,0 @@ -/* - * Copyright (c) 2010, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** \author Tully Foote */ - -#include "tf2/buffer_core.h" -#include "tf2/time_cache.h" -#include "tf2/exceptions.h" -#include "tf2_msgs/TF2Error.h" - -#include -#include -#include "tf2/LinearMath/Transform.h" -#include - -namespace tf2 -{ - -// Tolerance for acceptable quaternion normalization -static double QUATERNION_NORMALIZATION_TOLERANCE = 10e-3; - -/** \brief convert Transform msg to Transform */ -void transformMsgToTF2(const geometry_msgs::Transform& msg, tf2::Transform& tf2) -{tf2 = tf2::Transform(tf2::Quaternion(msg.rotation.x, msg.rotation.y, msg.rotation.z, msg.rotation.w), tf2::Vector3(msg.translation.x, msg.translation.y, msg.translation.z));} - -/** \brief convert Transform to Transform msg*/ -void transformTF2ToMsg(const tf2::Transform& tf2, geometry_msgs::Transform& msg) -{ - msg.translation.x = tf2.getOrigin().x(); - msg.translation.y = tf2.getOrigin().y(); - msg.translation.z = tf2.getOrigin().z(); - msg.rotation.x = tf2.getRotation().x(); - msg.rotation.y = tf2.getRotation().y(); - msg.rotation.z = tf2.getRotation().z(); - msg.rotation.w = tf2.getRotation().w(); -} - -/** \brief convert Transform to Transform msg*/ -void transformTF2ToMsg(const tf2::Transform& tf2, geometry_msgs::TransformStamped& msg, ros::Time stamp, const std::string& frame_id, const std::string& child_frame_id) -{ - transformTF2ToMsg(tf2, msg.transform); - msg.header.stamp = stamp; - msg.header.frame_id = frame_id; - msg.child_frame_id = child_frame_id; -} - -void transformTF2ToMsg(const tf2::Quaternion& orient, const tf2::Vector3& pos, geometry_msgs::Transform& msg) -{ - msg.translation.x = pos.x(); - msg.translation.y = pos.y(); - msg.translation.z = pos.z(); - msg.rotation.x = orient.x(); - msg.rotation.y = orient.y(); - msg.rotation.z = orient.z(); - msg.rotation.w = orient.w(); -} - -void transformTF2ToMsg(const tf2::Quaternion& orient, const tf2::Vector3& pos, geometry_msgs::TransformStamped& msg, ros::Time stamp, const std::string& frame_id, const std::string& child_frame_id) -{ - transformTF2ToMsg(orient, pos, msg.transform); - msg.header.stamp = stamp; - msg.header.frame_id = frame_id; - msg.child_frame_id = child_frame_id; -} - -void setIdentity(geometry_msgs::Transform& tx) -{ - tx.translation.x = 0; - tx.translation.y = 0; - tx.translation.z = 0; - tx.rotation.x = 0; - tx.rotation.y = 0; - tx.rotation.z = 0; - tx.rotation.w = 1; -} - -bool startsWithSlash(const std::string& frame_id) -{ - if (frame_id.size() > 0) - if (frame_id[0] == '/') - return true; - return false; -} - -std::string stripSlash(const std::string& in) -{ - std::string out = in; - if (startsWithSlash(in)) - out.erase(0,1); - return out; -} - - -bool BufferCore::warnFrameId(const char* function_name_arg, const std::string& frame_id) const -{ - if (frame_id.size() == 0) - { - std::stringstream ss; - ss << "Invalid argument passed to "<< function_name_arg <<" in tf2 frame_ids cannot be empty"; - CONSOLE_BRIDGE_logWarn("%s",ss.str().c_str()); - return true; - } - - if (startsWithSlash(frame_id)) - { - std::stringstream ss; - ss << "Invalid argument \"" << frame_id << "\" passed to "<< function_name_arg <<" in tf2 frame_ids cannot start with a '/' like: "; - CONSOLE_BRIDGE_logWarn("%s",ss.str().c_str()); - return true; - } - - return false; -} - -CompactFrameID BufferCore::validateFrameId(const char* function_name_arg, const std::string& frame_id) const -{ - if (frame_id.empty()) - { - std::stringstream ss; - ss << "Invalid argument passed to "<< function_name_arg <<" in tf2 frame_ids cannot be empty"; - throw tf2::InvalidArgumentException(ss.str().c_str()); - } - - if (startsWithSlash(frame_id)) - { - std::stringstream ss; - ss << "Invalid argument \"" << frame_id << "\" passed to "<< function_name_arg <<" in tf2 frame_ids cannot start with a '/' like: "; - throw tf2::InvalidArgumentException(ss.str().c_str()); - } - - CompactFrameID id = lookupFrameNumber(frame_id); - if (id == 0) - { - std::stringstream ss; - ss << "\"" << frame_id << "\" passed to "<< function_name_arg <<" does not exist. "; - throw tf2::LookupException(ss.str().c_str()); - } - - return id; -} - -BufferCore::BufferCore(ros::Duration cache_time) -: cache_time_(cache_time) -, transformable_callbacks_counter_(0) -, transformable_requests_counter_(0) -, using_dedicated_thread_(false) -{ - frameIDs_["NO_PARENT"] = 0; - frames_.push_back(TimeCacheInterfacePtr()); - frameIDs_reverse.push_back("NO_PARENT"); -} - -BufferCore::~BufferCore() -{ - -} - -void BufferCore::clear() -{ - //old_tf_.clear(); - - - boost::mutex::scoped_lock lock(frame_mutex_); - if ( frames_.size() > 1 ) - { - for (std::vector::iterator cache_it = frames_.begin() + 1; cache_it != frames_.end(); ++cache_it) - { - if (*cache_it) - (*cache_it)->clearList(); - } - } - -} - -bool BufferCore::setTransform(const geometry_msgs::TransformStamped& transform_in, const std::string& authority, bool is_static) -{ - - /////BACKEARDS COMPATABILITY - /* tf::StampedTransform tf_transform; - tf::transformStampedMsgToTF(transform_in, tf_transform); - if (!old_tf_.setTransform(tf_transform, authority)) - { - printf("Warning old setTransform Failed but was not caught\n"); - }*/ - - /////// New implementation - geometry_msgs::TransformStamped stripped = transform_in; - stripped.header.frame_id = stripSlash(stripped.header.frame_id); - stripped.child_frame_id = stripSlash(stripped.child_frame_id); - - - bool error_exists = false; - if (stripped.child_frame_id == stripped.header.frame_id) - { - CONSOLE_BRIDGE_logError("TF_SELF_TRANSFORM: Ignoring transform from authority \"%s\" with frame_id and child_frame_id \"%s\" because they are the same", authority.c_str(), stripped.child_frame_id.c_str()); - error_exists = true; - } - - if (stripped.child_frame_id == "") - { - CONSOLE_BRIDGE_logError("TF_NO_CHILD_FRAME_ID: Ignoring transform from authority \"%s\" because child_frame_id not set ", authority.c_str()); - error_exists = true; - } - - if (stripped.header.frame_id == "") - { - CONSOLE_BRIDGE_logError("TF_NO_FRAME_ID: Ignoring transform with child_frame_id \"%s\" from authority \"%s\" because frame_id not set", stripped.child_frame_id.c_str(), authority.c_str()); - error_exists = true; - } - - if (std::isnan(stripped.transform.translation.x) || std::isnan(stripped.transform.translation.y) || std::isnan(stripped.transform.translation.z)|| - std::isnan(stripped.transform.rotation.x) || std::isnan(stripped.transform.rotation.y) || std::isnan(stripped.transform.rotation.z) || std::isnan(stripped.transform.rotation.w)) - { - CONSOLE_BRIDGE_logError("TF_NAN_INPUT: Ignoring transform for child_frame_id \"%s\" from authority \"%s\" because of a nan value in the transform (%f %f %f) (%f %f %f %f)", - stripped.child_frame_id.c_str(), authority.c_str(), - stripped.transform.translation.x, stripped.transform.translation.y, stripped.transform.translation.z, - stripped.transform.rotation.x, stripped.transform.rotation.y, stripped.transform.rotation.z, stripped.transform.rotation.w - ); - error_exists = true; - } - - bool valid = std::abs((stripped.transform.rotation.w * stripped.transform.rotation.w - + stripped.transform.rotation.x * stripped.transform.rotation.x - + stripped.transform.rotation.y * stripped.transform.rotation.y - + stripped.transform.rotation.z * stripped.transform.rotation.z) - 1.0f) < QUATERNION_NORMALIZATION_TOLERANCE; - - if (!valid) - { - CONSOLE_BRIDGE_logError("TF_DENORMALIZED_QUATERNION: Ignoring transform for child_frame_id \"%s\" from authority \"%s\" because of an invalid quaternion in the transform (%f %f %f %f)", - stripped.child_frame_id.c_str(), authority.c_str(), - stripped.transform.rotation.x, stripped.transform.rotation.y, stripped.transform.rotation.z, stripped.transform.rotation.w); - error_exists = true; - } - - if (error_exists) - return false; - - { - boost::mutex::scoped_lock lock(frame_mutex_); - CompactFrameID frame_number = lookupOrInsertFrameNumber(stripped.child_frame_id); - TimeCacheInterfacePtr frame = getFrame(frame_number); - if (frame == NULL) - frame = allocateFrame(frame_number, is_static); - - if (frame->insertData(TransformStorage(stripped, lookupOrInsertFrameNumber(stripped.header.frame_id), frame_number))) - { - frame_authority_[frame_number] = authority; - } - else - { - CONSOLE_BRIDGE_logWarn("TF_OLD_DATA ignoring data from the past for frame %s at time %g according to authority %s\nPossible reasons are listed at http://wiki.ros.org/tf/Errors%%20explained", stripped.child_frame_id.c_str(), stripped.header.stamp.toSec(), authority.c_str()); - return false; - } - } - - testTransformableRequests(); - - return true; -} - -TimeCacheInterfacePtr BufferCore::allocateFrame(CompactFrameID cfid, bool is_static) -{ - TimeCacheInterfacePtr frame_ptr = frames_[cfid]; - if (is_static) { - frames_[cfid] = TimeCacheInterfacePtr(new StaticCache()); - } else { - frames_[cfid] = TimeCacheInterfacePtr(new TimeCache(cache_time_)); - } - - return frames_[cfid]; -} - -enum WalkEnding -{ - Identity, - TargetParentOfSource, - SourceParentOfTarget, - FullPath, -}; - -// TODO for Jade: Merge walkToTopParent functions; this is now a stub to preserve ABI -template -int BufferCore::walkToTopParent(F& f, ros::Time time, CompactFrameID target_id, CompactFrameID source_id, std::string* error_string) const -{ - return walkToTopParent(f, time, target_id, source_id, error_string, NULL); -} - -template -int BufferCore::walkToTopParent(F& f, ros::Time time, CompactFrameID target_id, - CompactFrameID source_id, std::string* error_string, std::vector - *frame_chain) const -{ - if (frame_chain) - frame_chain->clear(); - - // Short circuit if zero length transform to allow lookups on non existant links - if (source_id == target_id) - { - f.finalize(Identity, time); - return tf2_msgs::TF2Error::NO_ERROR; - } - - //If getting the latest get the latest common time - if (time == ros::Time()) - { - int retval = getLatestCommonTime(target_id, source_id, time, error_string); - if (retval != tf2_msgs::TF2Error::NO_ERROR) - { - return retval; - } - } - - // Walk the tree to its root from the source frame, accumulating the transform - CompactFrameID frame = source_id; - CompactFrameID top_parent = frame; - uint32_t depth = 0; - - std::string extrapolation_error_string; - bool extrapolation_might_have_occurred = false; - - while (frame != 0) - { - TimeCacheInterfacePtr cache = getFrame(frame); - if (frame_chain) - frame_chain->push_back(frame); - - if (!cache) - { - // There will be no cache for the very root of the tree - top_parent = frame; - break; - } - - CompactFrameID parent = f.gather(cache, time, &extrapolation_error_string); - if (parent == 0) - { - // Just break out here... there may still be a path from source -> target - top_parent = frame; - extrapolation_might_have_occurred = true; - break; - } - - // Early out... target frame is a direct parent of the source frame - if (frame == target_id) - { - f.finalize(TargetParentOfSource, time); - return tf2_msgs::TF2Error::NO_ERROR; - } - - f.accum(true); - - top_parent = frame; - frame = parent; - - ++depth; - if (depth > MAX_GRAPH_DEPTH) - { - if (error_string) - { - std::stringstream ss; - ss << "The tf tree is invalid because it contains a loop." << std::endl - << allFramesAsStringNoLock() << std::endl; - *error_string = ss.str(); - } - return tf2_msgs::TF2Error::LOOKUP_ERROR; - } - } - - // Now walk to the top parent from the target frame, accumulating its transform - frame = target_id; - depth = 0; - std::vector reverse_frame_chain; - - while (frame != top_parent) - { - TimeCacheInterfacePtr cache = getFrame(frame); - if (frame_chain) - reverse_frame_chain.push_back(frame); - - if (!cache) - { - break; - } - - CompactFrameID parent = f.gather(cache, time, error_string); - if (parent == 0) - { - if (error_string) - { - std::stringstream ss; - ss << *error_string << ", when looking up transform from frame [" << lookupFrameString(source_id) << "] to frame [" << lookupFrameString(target_id) << "]"; - *error_string = ss.str(); - } - - return tf2_msgs::TF2Error::EXTRAPOLATION_ERROR; - } - - // Early out... source frame is a direct parent of the target frame - if (frame == source_id) - { - f.finalize(SourceParentOfTarget, time); - if (frame_chain) - { - // Use the walk we just did - frame_chain->swap(reverse_frame_chain); - // Reverse it before returning because this is the reverse walk. - std::reverse(frame_chain->begin(), frame_chain->end()); - } - return tf2_msgs::TF2Error::NO_ERROR; - } - - f.accum(false); - - frame = parent; - - ++depth; - if (depth > MAX_GRAPH_DEPTH) - { - if (error_string) - { - std::stringstream ss; - ss << "The tf tree is invalid because it contains a loop." << std::endl - << allFramesAsStringNoLock() << std::endl; - *error_string = ss.str(); - } - return tf2_msgs::TF2Error::LOOKUP_ERROR; - } - } - - if (frame != top_parent) - { - if (extrapolation_might_have_occurred) - { - if (error_string) - { - std::stringstream ss; - ss << extrapolation_error_string << ", when looking up transform from frame [" << lookupFrameString(source_id) << "] to frame [" << lookupFrameString(target_id) << "]"; - *error_string = ss.str(); - } - - return tf2_msgs::TF2Error::EXTRAPOLATION_ERROR; - - } - - createConnectivityErrorString(source_id, target_id, error_string); - return tf2_msgs::TF2Error::CONNECTIVITY_ERROR; - } - else if (frame_chain){ - // append top_parent to reverse_frame_chain for easier matching/trimming - reverse_frame_chain.push_back(frame); - } - - f.finalize(FullPath, time); - if (frame_chain) - { - // Pruning: Compare the chains starting at the parent (end) until they differ - int m = reverse_frame_chain.size()-1; - int n = frame_chain->size()-1; - for (; m >= 0 && n >= 0; --m, --n) - { - if ((*frame_chain)[n] != reverse_frame_chain[m]) - { - break; - } - } - // Erase all duplicate items from frame_chain - if (n > 0) - { - // N is offset by 1 and leave the common parent for this result - frame_chain->erase(frame_chain->begin() + (n + 2), frame_chain->end()); - } - if (m < reverse_frame_chain.size()) - { - for (int i = m; i >= 0; --i) - { - frame_chain->push_back(reverse_frame_chain[i]); - } - } - } - - return tf2_msgs::TF2Error::NO_ERROR; -} - - - -struct TransformAccum -{ - TransformAccum() - : source_to_top_quat(0.0, 0.0, 0.0, 1.0) - , source_to_top_vec(0.0, 0.0, 0.0) - , target_to_top_quat(0.0, 0.0, 0.0, 1.0) - , target_to_top_vec(0.0, 0.0, 0.0) - , result_quat(0.0, 0.0, 0.0, 1.0) - , result_vec(0.0, 0.0, 0.0) - { - } - - CompactFrameID gather(TimeCacheInterfacePtr cache, ros::Time time, std::string* error_string) - { - if (!cache->getData(time, st, error_string)) - { - return 0; - } - - return st.frame_id_; - } - - void accum(bool source) - { - if (source) - { - source_to_top_vec = quatRotate(st.rotation_, source_to_top_vec) + st.translation_; - source_to_top_quat = st.rotation_ * source_to_top_quat; - } - else - { - target_to_top_vec = quatRotate(st.rotation_, target_to_top_vec) + st.translation_; - target_to_top_quat = st.rotation_ * target_to_top_quat; - } - } - - void finalize(WalkEnding end, ros::Time _time) - { - switch (end) - { - case Identity: - break; - case TargetParentOfSource: - result_vec = source_to_top_vec; - result_quat = source_to_top_quat; - break; - case SourceParentOfTarget: - { - tf2::Quaternion inv_target_quat = target_to_top_quat.inverse(); - tf2::Vector3 inv_target_vec = quatRotate(inv_target_quat, -target_to_top_vec); - result_vec = inv_target_vec; - result_quat = inv_target_quat; - break; - } - case FullPath: - { - tf2::Quaternion inv_target_quat = target_to_top_quat.inverse(); - tf2::Vector3 inv_target_vec = quatRotate(inv_target_quat, -target_to_top_vec); - - result_vec = quatRotate(inv_target_quat, source_to_top_vec) + inv_target_vec; - result_quat = inv_target_quat * source_to_top_quat; - } - break; - }; - - time = _time; - } - - TransformStorage st; - ros::Time time; - tf2::Quaternion source_to_top_quat; - tf2::Vector3 source_to_top_vec; - tf2::Quaternion target_to_top_quat; - tf2::Vector3 target_to_top_vec; - - tf2::Quaternion result_quat; - tf2::Vector3 result_vec; -}; - -geometry_msgs::TransformStamped BufferCore::lookupTransform(const std::string& target_frame, - const std::string& source_frame, - const ros::Time& time) const -{ - boost::mutex::scoped_lock lock(frame_mutex_); - - if (target_frame == source_frame) { - geometry_msgs::TransformStamped identity; - identity.header.frame_id = target_frame; - identity.child_frame_id = source_frame; - identity.transform.rotation.w = 1; - - if (time == ros::Time()) - { - CompactFrameID target_id = lookupFrameNumber(target_frame); - TimeCacheInterfacePtr cache = getFrame(target_id); - if (cache) - identity.header.stamp = cache->getLatestTimestamp(); - else - identity.header.stamp = time; - } - else - identity.header.stamp = time; - - return identity; - } - - //Identify case does not need to be validated above - CompactFrameID target_id = validateFrameId("lookupTransform argument target_frame", target_frame); - CompactFrameID source_id = validateFrameId("lookupTransform argument source_frame", source_frame); - - std::string error_string; - TransformAccum accum; - int retval = walkToTopParent(accum, time, target_id, source_id, &error_string); - if (retval != tf2_msgs::TF2Error::NO_ERROR) - { - switch (retval) - { - case tf2_msgs::TF2Error::CONNECTIVITY_ERROR: - throw ConnectivityException(error_string); - case tf2_msgs::TF2Error::EXTRAPOLATION_ERROR: - throw ExtrapolationException(error_string); - case tf2_msgs::TF2Error::LOOKUP_ERROR: - throw LookupException(error_string); - default: - CONSOLE_BRIDGE_logError("Unknown error code: %d", retval); - assert(0); - } - } - - geometry_msgs::TransformStamped output_transform; - transformTF2ToMsg(accum.result_quat, accum.result_vec, output_transform, accum.time, target_frame, source_frame); - return output_transform; -} - - -geometry_msgs::TransformStamped BufferCore::lookupTransform(const std::string& target_frame, - const ros::Time& target_time, - const std::string& source_frame, - const ros::Time& source_time, - const std::string& fixed_frame) const -{ - validateFrameId("lookupTransform argument target_frame", target_frame); - validateFrameId("lookupTransform argument source_frame", source_frame); - validateFrameId("lookupTransform argument fixed_frame", fixed_frame); - - geometry_msgs::TransformStamped output; - geometry_msgs::TransformStamped temp1 = lookupTransform(fixed_frame, source_frame, source_time); - geometry_msgs::TransformStamped temp2 = lookupTransform(target_frame, fixed_frame, target_time); - - tf2::Transform tf1, tf2; - transformMsgToTF2(temp1.transform, tf1); - transformMsgToTF2(temp2.transform, tf2); - transformTF2ToMsg(tf2*tf1, output.transform); - output.header.stamp = temp2.header.stamp; - output.header.frame_id = target_frame; - output.child_frame_id = source_frame; - return output; -} - - - -/* -geometry_msgs::Twist BufferCore::lookupTwist(const std::string& tracking_frame, - const std::string& observation_frame, - const ros::Time& time, - const ros::Duration& averaging_interval) const -{ - try - { - geometry_msgs::Twist t; - old_tf_.lookupTwist(tracking_frame, observation_frame, - time, averaging_interval, t); - return t; - } - catch (tf::LookupException& ex) - { - throw tf2::LookupException(ex.what()); - } - catch (tf::ConnectivityException& ex) - { - throw tf2::ConnectivityException(ex.what()); - } - catch (tf::ExtrapolationException& ex) - { - throw tf2::ExtrapolationException(ex.what()); - } - catch (tf::InvalidArgument& ex) - { - throw tf2::InvalidArgumentException(ex.what()); - } -} - -geometry_msgs::Twist BufferCore::lookupTwist(const std::string& tracking_frame, - const std::string& observation_frame, - const std::string& reference_frame, - const tf2::Point & reference_point, - const std::string& reference_point_frame, - const ros::Time& time, - const ros::Duration& averaging_interval) const -{ - try{ - geometry_msgs::Twist t; - old_tf_.lookupTwist(tracking_frame, observation_frame, reference_frame, reference_point, reference_point_frame, - time, averaging_interval, t); - return t; - } - catch (tf::LookupException& ex) - { - throw tf2::LookupException(ex.what()); - } - catch (tf::ConnectivityException& ex) - { - throw tf2::ConnectivityException(ex.what()); - } - catch (tf::ExtrapolationException& ex) - { - throw tf2::ExtrapolationException(ex.what()); - } - catch (tf::InvalidArgument& ex) - { - throw tf2::InvalidArgumentException(ex.what()); - } -} -*/ - -struct CanTransformAccum -{ - CompactFrameID gather(TimeCacheInterfacePtr cache, ros::Time time, std::string* error_string) - { - return cache->getParent(time, error_string); - } - - void accum(bool source) - { - } - - void finalize(WalkEnding end, ros::Time _time) - { - } - - TransformStorage st; -}; - -bool BufferCore::canTransformNoLock(CompactFrameID target_id, CompactFrameID source_id, - const ros::Time& time, std::string* error_msg) const -{ - if (target_id == 0 || source_id == 0) - { - if (error_msg) - { - if (target_id == 0) - { - *error_msg += std::string("target_frame: " + lookupFrameString(target_id ) + " does not exist."); - } - if (source_id == 0) - { - if (target_id == 0) - { - *error_msg += std::string(" "); - } - *error_msg += std::string("source_frame: " + lookupFrameString(source_id) + " " + lookupFrameString(source_id ) + " does not exist."); - } - } - return false; - } - - if (target_id == source_id) - { - return true; - } - - CanTransformAccum accum; - if (walkToTopParent(accum, time, target_id, source_id, error_msg) == tf2_msgs::TF2Error::NO_ERROR) - { - return true; - } - - return false; -} - -bool BufferCore::canTransformInternal(CompactFrameID target_id, CompactFrameID source_id, - const ros::Time& time, std::string* error_msg) const -{ - boost::mutex::scoped_lock lock(frame_mutex_); - return canTransformNoLock(target_id, source_id, time, error_msg); -} - -bool BufferCore::canTransform(const std::string& target_frame, const std::string& source_frame, - const ros::Time& time, std::string* error_msg) const -{ - // Short circuit if target_frame == source_frame - if (target_frame == source_frame) - return true; - - if (warnFrameId("canTransform argument target_frame", target_frame)) - return false; - if (warnFrameId("canTransform argument source_frame", source_frame)) - return false; - - boost::mutex::scoped_lock lock(frame_mutex_); - - CompactFrameID target_id = lookupFrameNumber(target_frame); - CompactFrameID source_id = lookupFrameNumber(source_frame); - - if (target_id == 0 || source_id == 0) - { - if (error_msg) - { - if (target_id == 0) - { - *error_msg += std::string("canTransform: target_frame " + target_frame + " does not exist."); - } - if (source_id == 0) - { - if (target_id == 0) - { - *error_msg += std::string(" "); - } - *error_msg += std::string("canTransform: source_frame " + source_frame + " does not exist."); - } - } - return false; - } - return canTransformNoLock(target_id, source_id, time, error_msg); -} - -bool BufferCore::canTransform(const std::string& target_frame, const ros::Time& target_time, - const std::string& source_frame, const ros::Time& source_time, - const std::string& fixed_frame, std::string* error_msg) const -{ - if (warnFrameId("canTransform argument target_frame", target_frame)) - return false; - if (warnFrameId("canTransform argument source_frame", source_frame)) - return false; - if (warnFrameId("canTransform argument fixed_frame", fixed_frame)) - return false; - - boost::mutex::scoped_lock lock(frame_mutex_); - CompactFrameID target_id = lookupFrameNumber(target_frame); - CompactFrameID source_id = lookupFrameNumber(source_frame); - CompactFrameID fixed_id = lookupFrameNumber(fixed_frame); - - if (target_id == 0 || source_id == 0 || fixed_id == 0) - { - if (error_msg) - { - if (target_id == 0) - { - *error_msg += std::string("canTransform: target_frame " + target_frame + " does not exist."); - } - if (source_id == 0) - { - if (target_id == 0) - { - *error_msg += std::string(" "); - } - *error_msg += std::string("canTransform: source_frame " + source_frame + " does not exist."); - } - if (source_id == 0) - { - if (target_id == 0 || source_id == 0) - { - *error_msg += std::string(" "); - } - *error_msg += std::string("fixed_frame: " + fixed_frame + "does not exist."); - } - } - return false; - } - return canTransformNoLock(target_id, fixed_id, target_time, error_msg) && canTransformNoLock(fixed_id, source_id, source_time, error_msg); -} - - -tf2::TimeCacheInterfacePtr BufferCore::getFrame(CompactFrameID frame_id) const -{ - if (frame_id >= frames_.size()) - return TimeCacheInterfacePtr(); - else - { - return frames_[frame_id]; - } -} - -CompactFrameID BufferCore::lookupFrameNumber(const std::string& frameid_str) const -{ - CompactFrameID retval; - M_StringToCompactFrameID::const_iterator map_it = frameIDs_.find(frameid_str); - if (map_it == frameIDs_.end()) - { - retval = CompactFrameID(0); - } - else - retval = map_it->second; - return retval; -} - -CompactFrameID BufferCore::lookupOrInsertFrameNumber(const std::string& frameid_str) -{ - CompactFrameID retval = 0; - M_StringToCompactFrameID::iterator map_it = frameIDs_.find(frameid_str); - if (map_it == frameIDs_.end()) - { - retval = CompactFrameID(frames_.size()); - frames_.push_back(TimeCacheInterfacePtr());//Just a place holder for iteration - frameIDs_[frameid_str] = retval; - frameIDs_reverse.push_back(frameid_str); - } - else - retval = frameIDs_[frameid_str]; - - return retval; -} - -const std::string& BufferCore::lookupFrameString(CompactFrameID frame_id_num) const -{ - if (frame_id_num >= frameIDs_reverse.size()) - { - std::stringstream ss; - ss << "Reverse lookup of frame id " << frame_id_num << " failed!"; - throw tf2::LookupException(ss.str()); - } - else - return frameIDs_reverse[frame_id_num]; -} - -void BufferCore::createConnectivityErrorString(CompactFrameID source_frame, CompactFrameID target_frame, std::string* out) const -{ - if (!out) - { - return; - } - *out = std::string("Could not find a connection between '"+lookupFrameString(target_frame)+"' and '"+ - lookupFrameString(source_frame)+"' because they are not part of the same tree."+ - "Tf has two or more unconnected trees."); -} - -std::string BufferCore::allFramesAsString() const -{ - boost::mutex::scoped_lock lock(frame_mutex_); - return this->allFramesAsStringNoLock(); -} - -std::string BufferCore::allFramesAsStringNoLock() const -{ - std::stringstream mstream; - - TransformStorage temp; - - // for (std::vector< TimeCache*>::iterator it = frames_.begin(); it != frames_.end(); ++it) - - ///regular transforms - for (unsigned int counter = 1; counter < frames_.size(); counter ++) - { - TimeCacheInterfacePtr frame_ptr = getFrame(CompactFrameID(counter)); - if (frame_ptr == NULL) - continue; - CompactFrameID frame_id_num; - if( frame_ptr->getData(ros::Time(), temp)) - frame_id_num = temp.frame_id_; - else - { - frame_id_num = 0; - } - mstream << "Frame "<< frameIDs_reverse[counter] << " exists with parent " << frameIDs_reverse[frame_id_num] << "." <getLatestTimestamp(); - else - time = ros::Time(); - return tf2_msgs::TF2Error::NO_ERROR; - } - - std::vector lct_cache; - - // Walk the tree to its root from the source frame, accumulating the list of parent/time as well as the latest time - // in the target is a direct parent - CompactFrameID frame = source_id; - P_TimeAndFrameID temp; - uint32_t depth = 0; - ros::Time common_time = ros::TIME_MAX; - while (frame != 0) - { - TimeCacheInterfacePtr cache = getFrame(frame); - - if (!cache) - { - // There will be no cache for the very root of the tree - break; - } - - P_TimeAndFrameID latest = cache->getLatestTimeAndParent(); - - if (latest.second == 0) - { - // Just break out here... there may still be a path from source -> target - break; - } - - if (!latest.first.isZero()) - { - common_time = std::min(latest.first, common_time); - } - - lct_cache.push_back(latest); - - frame = latest.second; - - // Early out... target frame is a direct parent of the source frame - if (frame == target_id) - { - time = common_time; - if (time == ros::TIME_MAX) - { - time = ros::Time(); - } - return tf2_msgs::TF2Error::NO_ERROR; - } - - ++depth; - if (depth > MAX_GRAPH_DEPTH) - { - if (error_string) - { - std::stringstream ss; - ss<<"The tf tree is invalid because it contains a loop." << std::endl - << allFramesAsStringNoLock() << std::endl; - *error_string = ss.str(); - } - return tf2_msgs::TF2Error::LOOKUP_ERROR; - } - } - - // Now walk to the top parent from the target frame, accumulating the latest time and looking for a common parent - frame = target_id; - depth = 0; - common_time = ros::TIME_MAX; - CompactFrameID common_parent = 0; - while (true) - { - TimeCacheInterfacePtr cache = getFrame(frame); - - if (!cache) - { - break; - } - - P_TimeAndFrameID latest = cache->getLatestTimeAndParent(); - - if (latest.second == 0) - { - break; - } - - if (!latest.first.isZero()) - { - common_time = std::min(latest.first, common_time); - } - - std::vector::iterator it = std::find_if(lct_cache.begin(), lct_cache.end(), TimeAndFrameIDFrameComparator(latest.second)); - if (it != lct_cache.end()) // found a common parent - { - common_parent = it->second; - break; - } - - frame = latest.second; - - // Early out... source frame is a direct parent of the target frame - if (frame == source_id) - { - time = common_time; - if (time == ros::TIME_MAX) - { - time = ros::Time(); - } - return tf2_msgs::TF2Error::NO_ERROR; - } - - ++depth; - if (depth > MAX_GRAPH_DEPTH) - { - if (error_string) - { - std::stringstream ss; - ss<<"The tf tree is invalid because it contains a loop." << std::endl - << allFramesAsStringNoLock() << std::endl; - *error_string = ss.str(); - } - return tf2_msgs::TF2Error::LOOKUP_ERROR; - } - } - - if (common_parent == 0) - { - createConnectivityErrorString(source_id, target_id, error_string); - return tf2_msgs::TF2Error::CONNECTIVITY_ERROR; - } - - // Loop through the source -> root list until we hit the common parent - { - std::vector::iterator it = lct_cache.begin(); - std::vector::iterator end = lct_cache.end(); - for (; it != end; ++it) - { - if (!it->first.isZero()) - { - common_time = std::min(common_time, it->first); - } - - if (it->second == common_parent) - { - break; - } - } - } - - if (common_time == ros::TIME_MAX) - { - common_time = ros::Time(); - } - - time = common_time; - return tf2_msgs::TF2Error::NO_ERROR; -} - -std::string BufferCore::allFramesAsYAML(double current_time) const -{ - std::stringstream mstream; - boost::mutex::scoped_lock lock(frame_mutex_); - - TransformStorage temp; - - if (frames_.size() ==1) - mstream <<"{}"; - - mstream.precision(3); - mstream.setf(std::ios::fixed,std::ios::floatfield); - - // for (std::vector< TimeCache*>::iterator it = frames_.begin(); it != frames_.end(); ++it) - for (unsigned int counter = 1; counter < frames_.size(); counter ++)//one referenced for 0 is no frame - { - CompactFrameID cfid = CompactFrameID(counter); - CompactFrameID frame_id_num; - TimeCacheInterfacePtr cache = getFrame(cfid); - if (!cache) - { - continue; - } - - if(!cache->getData(ros::Time(), temp)) - { - continue; - } - - frame_id_num = temp.frame_id_; - - std::string authority = "no recorded authority"; - std::map::const_iterator it = frame_authority_.find(cfid); - if (it != frame_authority_.end()) { - authority = it->second; - } - - double rate = cache->getListLength() / std::max((cache->getLatestTimestamp().toSec() - - cache->getOldestTimestamp().toSec() ), 0.0001); - - mstream << std::fixed; //fixed point notation - mstream.precision(3); //3 decimal places - mstream << frameIDs_reverse[cfid] << ": " << std::endl; - mstream << " parent: '" << frameIDs_reverse[frame_id_num] << "'" << std::endl; - mstream << " broadcaster: '" << authority << "'" << std::endl; - mstream << " rate: " << rate << std::endl; - mstream << " most_recent_transform: " << (cache->getLatestTimestamp()).toSec() << std::endl; - mstream << " oldest_transform: " << (cache->getOldestTimestamp()).toSec() << std::endl; - if ( current_time > 0 ) { - mstream << " transform_delay: " << current_time - cache->getLatestTimestamp().toSec() << std::endl; - } - mstream << " buffer_length: " << (cache->getLatestTimestamp() - cache->getOldestTimestamp()).toSec() << std::endl; - } - - return mstream.str(); -} - -std::string BufferCore::allFramesAsYAML() const -{ - return this->allFramesAsYAML(0.0); -} - -TransformableCallbackHandle BufferCore::addTransformableCallback(const TransformableCallback& cb) -{ - boost::mutex::scoped_lock lock(transformable_callbacks_mutex_); - TransformableCallbackHandle handle = ++transformable_callbacks_counter_; - while (!transformable_callbacks_.insert(std::make_pair(handle, cb)).second) - { - handle = ++transformable_callbacks_counter_; - } - - return handle; -} - -struct BufferCore::RemoveRequestByCallback -{ - RemoveRequestByCallback(TransformableCallbackHandle handle) - : handle_(handle) - {} - - bool operator()(const TransformableRequest& req) - { - return req.cb_handle == handle_; - } - - TransformableCallbackHandle handle_; -}; - -void BufferCore::removeTransformableCallback(TransformableCallbackHandle handle) -{ - { - boost::mutex::scoped_lock lock(transformable_callbacks_mutex_); - transformable_callbacks_.erase(handle); - } - - { - boost::mutex::scoped_lock lock(transformable_requests_mutex_); - V_TransformableRequest::iterator it = std::remove_if(transformable_requests_.begin(), transformable_requests_.end(), RemoveRequestByCallback(handle)); - transformable_requests_.erase(it, transformable_requests_.end()); - } -} - -TransformableRequestHandle BufferCore::addTransformableRequest(TransformableCallbackHandle handle, const std::string& target_frame, const std::string& source_frame, ros::Time time) -{ - // shortcut if target == source - if (target_frame == source_frame) - { - return 0; - } - - TransformableRequest req; - req.target_id = lookupFrameNumber(target_frame); - req.source_id = lookupFrameNumber(source_frame); - - // First check if the request is already transformable. If it is, return immediately - if (canTransformInternal(req.target_id, req.source_id, time, 0)) - { - return 0; - } - - // Might not be transformable at all, ever (if it's too far in the past) - if (req.target_id && req.source_id) - { - ros::Time latest_time; - // TODO: This is incorrect, but better than nothing. Really we want the latest time for - // any of the frames - getLatestCommonTime(req.target_id, req.source_id, latest_time, 0); - if (!latest_time.isZero() && time + cache_time_ < latest_time) - { - return 0xffffffffffffffffULL; - } - } - - req.cb_handle = handle; - req.time = time; - req.request_handle = ++transformable_requests_counter_; - if (req.request_handle == 0 || req.request_handle == 0xffffffffffffffffULL) - { - req.request_handle = 1; - } - - if (req.target_id == 0) - { - req.target_string = target_frame; - } - - if (req.source_id == 0) - { - req.source_string = source_frame; - } - - boost::mutex::scoped_lock lock(transformable_requests_mutex_); - transformable_requests_.push_back(req); - - return req.request_handle; -} - -struct BufferCore::RemoveRequestByID -{ - RemoveRequestByID(TransformableRequestHandle handle) - : handle_(handle) - {} - - bool operator()(const TransformableRequest& req) - { - return req.request_handle == handle_; - } - - TransformableCallbackHandle handle_; -}; - -void BufferCore::cancelTransformableRequest(TransformableRequestHandle handle) -{ - boost::mutex::scoped_lock lock(transformable_requests_mutex_); - V_TransformableRequest::iterator it = std::remove_if(transformable_requests_.begin(), transformable_requests_.end(), RemoveRequestByID(handle)); - - if (it != transformable_requests_.end()) - { - transformable_requests_.erase(it, transformable_requests_.end()); - } -} - - - -// backwards compability for tf methods -boost::signals2::connection BufferCore::_addTransformsChangedListener(boost::function callback) -{ - boost::mutex::scoped_lock lock(transformable_requests_mutex_); - return _transforms_changed_.connect(callback); -} - -void BufferCore::_removeTransformsChangedListener(boost::signals2::connection c) -{ - boost::mutex::scoped_lock lock(transformable_requests_mutex_); - c.disconnect(); -} - - -bool BufferCore::_frameExists(const std::string& frame_id_str) const -{ - boost::mutex::scoped_lock lock(frame_mutex_); - return frameIDs_.count(frame_id_str); -} - -bool BufferCore::_getParent(const std::string& frame_id, ros::Time time, std::string& parent) const -{ - - boost::mutex::scoped_lock lock(frame_mutex_); - CompactFrameID frame_number = lookupFrameNumber(frame_id); - TimeCacheInterfacePtr frame = getFrame(frame_number); - - if (! frame) - return false; - - CompactFrameID parent_id = frame->getParent(time, NULL); - if (parent_id == 0) - return false; - - parent = lookupFrameString(parent_id); - return true; -}; - -void BufferCore::_getFrameStrings(std::vector & vec) const -{ - vec.clear(); - - boost::mutex::scoped_lock lock(frame_mutex_); - - TransformStorage temp; - - // for (std::vector< TimeCache*>::iterator it = frames_.begin(); it != frames_.end(); ++it) - for (unsigned int counter = 1; counter < frameIDs_reverse.size(); counter ++) - { - vec.push_back(frameIDs_reverse[counter]); - } - return; -} - - - - -void BufferCore::testTransformableRequests() -{ - boost::mutex::scoped_lock lock(transformable_requests_mutex_); - V_TransformableRequest::iterator it = transformable_requests_.begin(); - - typedef boost::tuple TransformableTuple; - std::vector transformables; - - for (; it != transformable_requests_.end();) - { - TransformableRequest& req = *it; - - // One or both of the frames may not have existed when the request was originally made. - if (req.target_id == 0) - { - req.target_id = lookupFrameNumber(req.target_string); - } - - if (req.source_id == 0) - { - req.source_id = lookupFrameNumber(req.source_string); - } - - ros::Time latest_time; - bool do_cb = false; - TransformableResult result = TransformAvailable; - // TODO: This is incorrect, but better than nothing. Really we want the latest time for - // any of the frames - getLatestCommonTime(req.target_id, req.source_id, latest_time, 0); - if (!latest_time.isZero() && req.time + cache_time_ < latest_time) - { - do_cb = true; - result = TransformFailure; - } - else if (canTransformInternal(req.target_id, req.source_id, req.time, 0)) - { - do_cb = true; - result = TransformAvailable; - } - - if (do_cb) - { - { - boost::mutex::scoped_lock lock2(transformable_callbacks_mutex_); - M_TransformableCallback::iterator it = transformable_callbacks_.find(req.cb_handle); - if (it != transformable_callbacks_.end()) - { - transformables.push_back(boost::make_tuple(boost::ref(it->second), - req.request_handle, - lookupFrameString(req.target_id), - lookupFrameString(req.source_id), - boost::ref(req.time), - boost::ref(result))); - } - } - - if (transformable_requests_.size() > 1) - { - transformable_requests_[it - transformable_requests_.begin()] = transformable_requests_.back(); - } - - transformable_requests_.erase(transformable_requests_.end() - 1); - } - else - { - ++it; - } - } - - // unlock before allowing possible user callbacks to avoid potential deadlock (#91) - lock.unlock(); - - BOOST_FOREACH (TransformableTuple tt, transformables) - { - tt.get<0>()(tt.get<1>(), tt.get<2>(), tt.get<3>(), tt.get<4>(), tt.get<5>()); - } - - // Backwards compatability callback for tf - _transforms_changed_(); -} - - -std::string BufferCore::_allFramesAsDot(double current_time) const -{ - std::stringstream mstream; - mstream << "digraph G {" << std::endl; - boost::mutex::scoped_lock lock(frame_mutex_); - - TransformStorage temp; - - if (frames_.size() == 1) { - mstream <<"\"no tf data recieved\""; - } - mstream.precision(3); - mstream.setf(std::ios::fixed,std::ios::floatfield); - - for (unsigned int counter = 1; counter < frames_.size(); counter ++) // one referenced for 0 is no frame - { - unsigned int frame_id_num; - TimeCacheInterfacePtr counter_frame = getFrame(counter); - if (!counter_frame) { - continue; - } - if(!counter_frame->getData(ros::Time(), temp)) { - continue; - } else { - frame_id_num = temp.frame_id_; - } - std::string authority = "no recorded authority"; - std::map::const_iterator it = frame_authority_.find(counter); - if (it != frame_authority_.end()) - authority = it->second; - - double rate = counter_frame->getListLength() / std::max((counter_frame->getLatestTimestamp().toSec() - - counter_frame->getOldestTimestamp().toSec()), 0.0001); - - mstream << std::fixed; //fixed point notation - mstream.precision(3); //3 decimal places - mstream << "\"" << frameIDs_reverse[frame_id_num] << "\"" << " -> " - << "\"" << frameIDs_reverse[counter] << "\"" << "[label=\"" - //<< "Time: " << current_time.toSec() << "\\n" - << "Broadcaster: " << authority << "\\n" - << "Average rate: " << rate << " Hz\\n" - << "Most recent transform: " << (counter_frame->getLatestTimestamp()).toSec() <<" "; - if (current_time > 0) - mstream << "( "<< current_time - counter_frame->getLatestTimestamp().toSec() << " sec old)"; - mstream << "\\n" - // << "(time: " << getFrame(counter)->getLatestTimestamp().toSec() << ")\\n" - // << "Oldest transform: " << (current_time - getFrame(counter)->getOldestTimestamp()).toSec() << " sec old \\n" - // << "(time: " << (getFrame(counter)->getOldestTimestamp()).toSec() << ")\\n" - << "Buffer length: " << (counter_frame->getLatestTimestamp()-counter_frame->getOldestTimestamp()).toSec() << " sec\\n" - <<"\"];" < 0) { - mstream << "edge [style=invis];" <" << "\"" << frameIDs_reverse[counter] << "\";" << std::endl; - } - continue; - } - if (counter_frame->getData(ros::Time(), temp)) { - frame_id_num = temp.frame_id_; - } else { - frame_id_num = 0; - } - - if(frameIDs_reverse[frame_id_num]=="NO_PARENT") - { - mstream << "edge [style=invis];" < 0) - mstream << "\"Recorded at time: " << current_time << "\"[ shape=plaintext ] ;\n "; - mstream << "}" << "->" << "\"" << frameIDs_reverse[counter] << "\";" << std::endl; - } - } - mstream << "}"; - return mstream.str(); -} - -std::string BufferCore::_allFramesAsDot() const -{ - return _allFramesAsDot(0.0); -} - -void BufferCore::_chainAsVector(const std::string & target_frame, ros::Time target_time, const std::string & source_frame, ros::Time source_time, const std::string& fixed_frame, std::vector& output) const -{ - std::string error_string; - - output.clear(); //empty vector - - std::stringstream mstream; - boost::mutex::scoped_lock lock(frame_mutex_); - - TransformAccum accum; - - // Get source frame/time using getFrame - CompactFrameID source_id = lookupFrameNumber(source_frame); - CompactFrameID fixed_id = lookupFrameNumber(fixed_frame); - CompactFrameID target_id = lookupFrameNumber(target_frame); - - std::vector source_frame_chain; - int retval = walkToTopParent(accum, source_time, fixed_id, source_id, &error_string, &source_frame_chain); - - if (retval != tf2_msgs::TF2Error::NO_ERROR) - { - switch (retval) - { - case tf2_msgs::TF2Error::CONNECTIVITY_ERROR: - throw ConnectivityException(error_string); - case tf2_msgs::TF2Error::EXTRAPOLATION_ERROR: - throw ExtrapolationException(error_string); - case tf2_msgs::TF2Error::LOOKUP_ERROR: - throw LookupException(error_string); - default: - CONSOLE_BRIDGE_logError("Unknown error code: %d", retval); - assert(0); - } - } - - std::vector target_frame_chain; - retval = walkToTopParent(accum, target_time, target_id, fixed_id, &error_string, &target_frame_chain); - - if (retval != tf2_msgs::TF2Error::NO_ERROR) - { - switch (retval) - { - case tf2_msgs::TF2Error::CONNECTIVITY_ERROR: - throw ConnectivityException(error_string); - case tf2_msgs::TF2Error::EXTRAPOLATION_ERROR: - throw ExtrapolationException(error_string); - case tf2_msgs::TF2Error::LOOKUP_ERROR: - throw LookupException(error_string); - default: - CONSOLE_BRIDGE_logError("Unknown error code: %d", retval); - assert(0); - } - } - // If the two chains overlap clear the overlap - if (source_frame_chain.size() > 0 && target_frame_chain.size() > 0 && - source_frame_chain.back() == target_frame_chain.front()) - { - source_frame_chain.pop_back(); - } - // Join the two walks - for (unsigned int i = 0; i < target_frame_chain.size(); ++i) - { - source_frame_chain.push_back(target_frame_chain[i]); - } - - - // Write each element of source_frame_chain as string - for (unsigned int i = 0; i < source_frame_chain.size(); ++i) - { - output.push_back(lookupFrameString(source_frame_chain[i])); - } -} - -int TestBufferCore::_walkToTopParent(BufferCore& buffer, ros::Time time, CompactFrameID target_id, CompactFrameID source_id, std::string* error_string, std::vector *frame_chain) const -{ - TransformAccum accum; - return buffer.walkToTopParent(accum, time, target_id, source_id, error_string, frame_chain); -} - -} // namespace tf2 diff --git a/src/geometry2/tf2/src/cache.cpp b/src/geometry2/tf2/src/cache.cpp deleted file mode 100644 index edbbc76..0000000 --- a/src/geometry2/tf2/src/cache.cpp +++ /dev/null @@ -1,313 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** \author Tully Foote */ - -#include "tf2/time_cache.h" -#include "tf2/exceptions.h" - -#include -#include -#include -#include -#include - -namespace tf2 { - -TransformStorage::TransformStorage() -{ -} - -TransformStorage::TransformStorage(const geometry_msgs::TransformStamped& data, CompactFrameID frame_id, - CompactFrameID child_frame_id) -: stamp_(data.header.stamp) -, frame_id_(frame_id) -, child_frame_id_(child_frame_id) -{ - const geometry_msgs::Quaternion& o = data.transform.rotation; - rotation_ = tf2::Quaternion(o.x, o.y, o.z, o.w); - const geometry_msgs::Vector3& v = data.transform.translation; - translation_ = tf2::Vector3(v.x, v.y, v.z); -} - -TimeCache::TimeCache(ros::Duration max_storage_time) -: max_storage_time_(max_storage_time) -{} - -namespace cache { // Avoid ODR collisions https://github.com/ros/geometry2/issues/175 -// hoisting these into separate functions causes an ~8% speedup. Removing calling them altogether adds another ~10% -void createExtrapolationException1(ros::Time t0, ros::Time t1, std::string* error_str) -{ - if (error_str) - { - std::stringstream ss; - ss << "Lookup would require extrapolation at time " << t0 << ", but only time " << t1 << " is in the buffer"; - *error_str = ss.str(); - } -} - -void createExtrapolationException2(ros::Time t0, ros::Time t1, std::string* error_str) -{ - if (error_str) - { - std::stringstream ss; - ss << "Lookup would require extrapolation into the future. Requested time " << t0 << " but the latest data is at time " << t1; - *error_str = ss.str(); - } -} - -void createExtrapolationException3(ros::Time t0, ros::Time t1, std::string* error_str) -{ - if (error_str) - { - std::stringstream ss; - ss << "Lookup would require extrapolation into the past. Requested time " << t0 << " but the earliest data is at time " << t1; - *error_str = ss.str(); - } -} -} // namespace cache - -bool operator>(const TransformStorage& lhs, const TransformStorage& rhs) -{ - return lhs.stamp_ > rhs.stamp_; -} - -uint8_t TimeCache::findClosest(TransformStorage*& one, TransformStorage*& two, ros::Time target_time, std::string* error_str) -{ - //No values stored - if (storage_.empty()) - { - return 0; - } - - //If time == 0 return the latest - if (target_time.isZero()) - { - one = &storage_.front(); - return 1; - } - - // One value stored - if (++storage_.begin() == storage_.end()) - { - TransformStorage& ts = *storage_.begin(); - if (ts.stamp_ == target_time) - { - one = &ts; - return 1; - } - else - { - cache::createExtrapolationException1(target_time, ts.stamp_, error_str); - return 0; - } - } - - ros::Time latest_time = (*storage_.begin()).stamp_; - ros::Time earliest_time = (*(storage_.rbegin())).stamp_; - - if (target_time == latest_time) - { - one = &(*storage_.begin()); - return 1; - } - else if (target_time == earliest_time) - { - one = &(*storage_.rbegin()); - return 1; - } - // Catch cases that would require extrapolation - else if (target_time > latest_time) - { - cache::createExtrapolationException2(target_time, latest_time, error_str); - return 0; - } - else if (target_time < earliest_time) - { - cache::createExtrapolationException3(target_time, earliest_time, error_str); - return 0; - } - - //At least 2 values stored - //Find the first value less than the target value - L_TransformStorage::iterator storage_it; - TransformStorage storage_target_time; - storage_target_time.stamp_ = target_time; - - storage_it = std::lower_bound( - storage_.begin(), - storage_.end(), - storage_target_time, std::greater()); - - //Finally the case were somewhere in the middle Guarenteed no extrapolation :-) - one = &*(storage_it); //Older - two = &*(--storage_it); //Newer - return 2; - - -} - -void TimeCache::interpolate(const TransformStorage& one, const TransformStorage& two, ros::Time time, TransformStorage& output) -{ - // Check for zero distance case - if( two.stamp_ == one.stamp_ ) - { - output = two; - return; - } - //Calculate the ratio - tf2Scalar ratio = (time - one.stamp_).toSec() / (two.stamp_ - one.stamp_).toSec(); - - //Interpolate translation - output.translation_.setInterpolate3(one.translation_, two.translation_, ratio); - - //Interpolate rotation - output.rotation_ = slerp( one.rotation_, two.rotation_, ratio); - - output.stamp_ = time; - output.frame_id_ = one.frame_id_; - output.child_frame_id_ = one.child_frame_id_; -} - -bool TimeCache::getData(ros::Time time, TransformStorage & data_out, std::string* error_str) //returns false if data not available -{ - TransformStorage* p_temp_1; - TransformStorage* p_temp_2; - - int num_nodes = findClosest(p_temp_1, p_temp_2, time, error_str); - if (num_nodes == 0) - { - return false; - } - else if (num_nodes == 1) - { - data_out = *p_temp_1; - } - else if (num_nodes == 2) - { - if( p_temp_1->frame_id_ == p_temp_2->frame_id_) - { - interpolate(*p_temp_1, *p_temp_2, time, data_out); - } - else - { - data_out = *p_temp_1; - } - } - else - { - assert(0); - } - - return true; -} - -CompactFrameID TimeCache::getParent(ros::Time time, std::string* error_str) -{ - TransformStorage* p_temp_1; - TransformStorage* p_temp_2; - - int num_nodes = findClosest(p_temp_1, p_temp_2, time, error_str); - if (num_nodes == 0) - { - return 0; - } - - return p_temp_1->frame_id_; -} - -bool TimeCache::insertData(const TransformStorage& new_data) -{ - L_TransformStorage::iterator storage_it = storage_.begin(); - - if(storage_it != storage_.end()) - { - if (storage_it->stamp_ > new_data.stamp_ + max_storage_time_) - { - return false; - } - } - - - while(storage_it != storage_.end()) - { - if (storage_it->stamp_ <= new_data.stamp_) - break; - storage_it++; - } - storage_.insert(storage_it, new_data); - - pruneList(); - return true; -} - -void TimeCache::clearList() -{ - storage_.clear(); -} - -unsigned int TimeCache::getListLength() -{ - return storage_.size(); -} - -P_TimeAndFrameID TimeCache::getLatestTimeAndParent() -{ - if (storage_.empty()) - { - return std::make_pair(ros::Time(), 0); - } - - const TransformStorage& ts = storage_.front(); - return std::make_pair(ts.stamp_, ts.frame_id_); -} - -ros::Time TimeCache::getLatestTimestamp() -{ - if (storage_.empty()) return ros::Time(); //empty list case - return storage_.front().stamp_; -} - -ros::Time TimeCache::getOldestTimestamp() -{ - if (storage_.empty()) return ros::Time(); //empty list case - return storage_.back().stamp_; -} - -void TimeCache::pruneList() -{ - ros::Time latest_time = storage_.begin()->stamp_; - - while(!storage_.empty() && storage_.back().stamp_ + max_storage_time_ < latest_time) - { - storage_.pop_back(); - } - -} // namespace tf2 -} diff --git a/src/geometry2/tf2/src/static_cache.cpp b/src/geometry2/tf2/src/static_cache.cpp deleted file mode 100644 index cb588c5..0000000 --- a/src/geometry2/tf2/src/static_cache.cpp +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** \author Tully Foote */ - -#include "tf2/time_cache.h" -#include "tf2/exceptions.h" - -#include "tf2/LinearMath/Transform.h" - - -using namespace tf2; - - -bool StaticCache::getData(ros::Time time, TransformStorage & data_out, std::string* error_str) //returns false if data not available -{ - data_out = storage_; - data_out.stamp_ = time; - return true; -}; - -bool StaticCache::insertData(const TransformStorage& new_data) -{ - storage_ = new_data; - return true; -}; - - - - -void StaticCache::clearList() { return; }; - -unsigned int StaticCache::getListLength() { return 1; }; - -CompactFrameID StaticCache::getParent(ros::Time time, std::string* error_str) -{ - return storage_.frame_id_; -} - -P_TimeAndFrameID StaticCache::getLatestTimeAndParent() -{ - return std::make_pair(ros::Time(), storage_.frame_id_); -} - -ros::Time StaticCache::getLatestTimestamp() -{ - return ros::Time(); -}; - -ros::Time StaticCache::getOldestTimestamp() -{ - return ros::Time(); -}; - diff --git a/src/geometry2/tf2/test/cache_unittest.cpp b/src/geometry2/tf2/test/cache_unittest.cpp deleted file mode 100644 index 2077791..0000000 --- a/src/geometry2/tf2/test/cache_unittest.cpp +++ /dev/null @@ -1,414 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include "tf2/LinearMath/Quaternion.h" -#include - -#include - -#include - -std::vector values; -unsigned int step = 0; - -void seed_rand() -{ - values.clear(); - for (unsigned int i = 0; i < 1000; i++) - { - int pseudo_rand = std::floor(i * M_PI); - values.push_back(( pseudo_rand % 100)/50.0 - 1.0); - //printf("Seeding with %f\n", values.back()); - } -}; - - -double get_rand() -{ - if (values.size() == 0) throw std::runtime_error("you need to call seed_rand first"); - if (step >= values.size()) - step = 0; - else - step++; - return values[step]; -} - -using namespace tf2; - - -void setIdentity(TransformStorage& stor) -{ - stor.translation_.setValue(0.0, 0.0, 0.0); - stor.rotation_.setValue(0.0, 0.0, 0.0, 1.0); -} - -TEST(TimeCache, Repeatability) -{ - unsigned int runs = 100; - - tf2::TimeCache cache; - - TransformStorage stor; - setIdentity(stor); - - for ( uint64_t i = 1; i < runs ; i++ ) - { - stor.frame_id_ = i; - stor.stamp_ = ros::Time().fromNSec(i); - - cache.insertData(stor); - } - - for ( uint64_t i = 1; i < runs ; i++ ) - - { - cache.getData(ros::Time().fromNSec(i), stor); - EXPECT_EQ(stor.frame_id_, i); - EXPECT_EQ(stor.stamp_, ros::Time().fromNSec(i)); - } - -} - -TEST(TimeCache, RepeatabilityReverseInsertOrder) -{ - unsigned int runs = 100; - - tf2::TimeCache cache; - - TransformStorage stor; - setIdentity(stor); - - for ( int i = runs -1; i >= 0 ; i-- ) - { - stor.frame_id_ = i; - stor.stamp_ = ros::Time().fromNSec(i); - - cache.insertData(stor); - } - for ( uint64_t i = 1; i < runs ; i++ ) - - { - cache.getData(ros::Time().fromNSec(i), stor); - EXPECT_EQ(stor.frame_id_, i); - EXPECT_EQ(stor.stamp_, ros::Time().fromNSec(i)); - } - -} - -#if 0 // jfaust: this doesn't seem to actually be testing random insertion? -TEST(TimeCache, RepeatabilityRandomInsertOrder) -{ - - seed_rand(); - - tf2::TimeCache cache; - double my_vals[] = {13,2,5,4,9,7,3,11,15,14,12,1,6,10,0,8}; - std::vector values (my_vals, my_vals + sizeof(my_vals)/sizeof(double)); - unsigned int runs = values.size(); - - TransformStorage stor; - setIdentity(stor); - for ( uint64_t i = 0; i xvalues(2); - std::vector yvalues(2); - std::vector zvalues(2); - - uint64_t offset = 200; - - TransformStorage stor; - setIdentity(stor); - - for ( uint64_t i = 1; i < runs ; i++ ) - { - - for (uint64_t step = 0; step < 2 ; step++) - { - xvalues[step] = 10.0 * get_rand(); - yvalues[step] = 10.0 * get_rand(); - zvalues[step] = 10.0 * get_rand(); - - stor.translation_.setValue(xvalues[step], yvalues[step], zvalues[step]); - stor.frame_id_ = 2; - stor.stamp_ = ros::Time().fromNSec(step * 100 + offset); - cache.insertData(stor); - } - - for (int pos = 0; pos < 100 ; pos ++) - { - uint64_t time = offset + pos; - cache.getData(ros::Time().fromNSec(time), stor); - uint64_t time_out = stor.stamp_.toNSec(); - double x_out = stor.translation_.x(); - double y_out = stor.translation_.y(); - double z_out = stor.translation_.z(); - // printf("pose %d, %f %f %f, expected %f %f %f\n", pos, x_out, y_out, z_out, - // xvalues[0] + (xvalues[1] - xvalues[0]) * (double)pos/100., - // yvalues[0] + (yvalues[1] - yvalues[0]) * (double)pos/100.0, - // zvalues[0] + (xvalues[1] - zvalues[0]) * (double)pos/100.0); - EXPECT_EQ(time, time_out); - EXPECT_NEAR(xvalues[0] + (xvalues[1] - xvalues[0]) * (double)pos/100.0, x_out, epsilon); - EXPECT_NEAR(yvalues[0] + (yvalues[1] - yvalues[0]) * (double)pos/100.0, y_out, epsilon); - EXPECT_NEAR(zvalues[0] + (zvalues[1] - zvalues[0]) * (double)pos/100.0, z_out, epsilon); - } - - - cache.clearList(); - } - - -} - -/** \brief Make sure we dont' interpolate across reparented data */ -TEST(TimeCache, ReparentingInterpolationProtection) -{ - double epsilon = 1e-6; - uint64_t offset = 555; - - seed_rand(); - - tf2::TimeCache cache; - std::vector xvalues(2); - std::vector yvalues(2); - std::vector zvalues(2); - - TransformStorage stor; - setIdentity(stor); - - for (uint64_t step = 0; step < 2 ; step++) - { - xvalues[step] = 10.0 * get_rand(); - yvalues[step] = 10.0 * get_rand(); - zvalues[step] = 10.0 * get_rand(); - - stor.translation_.setValue(xvalues[step], yvalues[step], zvalues[step]); - stor.frame_id_ = step + 4; - stor.stamp_ = ros::Time().fromNSec(step * 100 + offset); - cache.insertData(stor); - } - - for (int pos = 0; pos < 100 ; pos ++) - { - EXPECT_TRUE(cache.getData(ros::Time().fromNSec(offset + pos), stor)); - double x_out = stor.translation_.x(); - double y_out = stor.translation_.y(); - double z_out = stor.translation_.z(); - EXPECT_NEAR(xvalues[0], x_out, epsilon); - EXPECT_NEAR(yvalues[0], y_out, epsilon); - EXPECT_NEAR(zvalues[0], z_out, epsilon); - } -} - -TEST(Bullet, Slerp) -{ - - uint64_t runs = 100; - seed_rand(); - - tf2::Quaternion q1, q2; - q1.setEuler(0,0,0); - - for (uint64_t i = 0 ; i < runs ; i++) - { - q2.setEuler(1.0 * get_rand(), - 1.0 * get_rand(), - 1.0 * get_rand()); - - - tf2::Quaternion q3 = slerp(q1,q2,0.5); - - EXPECT_NEAR(q3.angle(q1), q2.angle(q3), 1e-5); - } - -} - - -TEST(TimeCache, AngularInterpolation) -{ - uint64_t runs = 100; - double epsilon = 1e-6; - seed_rand(); - - tf2::TimeCache cache; - std::vector yawvalues(2); - std::vector pitchvalues(2); - std::vector rollvalues(2); - uint64_t offset = 200; - - std::vector quats(2); - - TransformStorage stor; - setIdentity(stor); - - for ( uint64_t i = 1; i < runs ; i++ ) - { - - for (uint64_t step = 0; step < 2 ; step++) - { - yawvalues[step] = 10.0 * get_rand() / 100.0; - pitchvalues[step] = 0;//10.0 * get_rand(); - rollvalues[step] = 0;//10.0 * get_rand(); - quats[step].setRPY(yawvalues[step], pitchvalues[step], rollvalues[step]); - stor.rotation_ = quats[step]; - stor.frame_id_ = 3; - stor.stamp_ = ros::Time().fromNSec(offset + (step * 100)); //step = 0 or 1 - cache.insertData(stor); - } - - for (int pos = 0; pos < 100 ; pos ++) - { - uint64_t time = offset + pos; - cache.getData(ros::Time().fromNSec(time), stor); - uint64_t time_out = stor.stamp_.toNSec(); - tf2::Quaternion quat (stor.rotation_); - - //Generate a ground truth quaternion directly calling slerp - tf2::Quaternion ground_truth = quats[0].slerp(quats[1], pos/100.0); - - //Make sure the transformed one and the direct call match - EXPECT_EQ(time, time_out); - EXPECT_NEAR(0, angle(ground_truth, quat), epsilon); - - } - - cache.clearList(); - } - - -} - -TEST(TimeCache, DuplicateEntries) -{ - - TimeCache cache; - - TransformStorage stor; - setIdentity(stor); - stor.frame_id_ = 3; - stor.stamp_ = ros::Time().fromNSec(1); - - cache.insertData(stor); - - cache.insertData(stor); - - - cache.getData(ros::Time().fromNSec(1), stor); - - //printf(" stor is %f\n", stor.translation_.x()); - EXPECT_TRUE(!std::isnan(stor.translation_.x())); - EXPECT_TRUE(!std::isnan(stor.translation_.y())); - EXPECT_TRUE(!std::isnan(stor.translation_.z())); - EXPECT_TRUE(!std::isnan(stor.rotation_.x())); - EXPECT_TRUE(!std::isnan(stor.rotation_.y())); - EXPECT_TRUE(!std::isnan(stor.rotation_.z())); - EXPECT_TRUE(!std::isnan(stor.rotation_.w())); -} - -int main(int argc, char **argv){ - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} - diff --git a/src/geometry2/tf2/test/simple_tf2_core.cpp b/src/geometry2/tf2/test/simple_tf2_core.cpp deleted file mode 100644 index 79ea4d7..0000000 --- a/src/geometry2/tf2/test/simple_tf2_core.cpp +++ /dev/null @@ -1,320 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include "tf2/LinearMath/Vector3.h" -#include "tf2/exceptions.h" - -TEST(tf2, setTransformFail) -{ - tf2::BufferCore tfc; - geometry_msgs::TransformStamped st; - EXPECT_FALSE(tfc.setTransform(st, "authority1")); - -} - -TEST(tf2, setTransformValid) -{ - tf2::BufferCore tfc; - geometry_msgs::TransformStamped st; - st.header.frame_id = "foo"; - st.header.stamp = ros::Time(1.0); - st.child_frame_id = "child"; - st.transform.rotation.w = 1; - EXPECT_TRUE(tfc.setTransform(st, "authority1")); - -} - -TEST(tf2, setTransformInvalidQuaternion) -{ - tf2::BufferCore tfc; - geometry_msgs::TransformStamped st; - st.header.frame_id = "foo"; - st.header.stamp = ros::Time(1.0); - st.child_frame_id = "child"; - st.transform.rotation.w = 0; - EXPECT_FALSE(tfc.setTransform(st, "authority1")); - -} - -TEST(tf2_lookupTransform, LookupException_Nothing_Exists) -{ - tf2::BufferCore tfc; - EXPECT_THROW(tfc.lookupTransform("a", "b", ros::Time().fromSec(1.0)), tf2::LookupException); - -} - -TEST(tf2_canTransform, Nothing_Exists) -{ - tf2::BufferCore tfc; - EXPECT_FALSE(tfc.canTransform("a", "b", ros::Time().fromSec(1.0))); - - std::string error_msg = std::string(); - EXPECT_FALSE(tfc.canTransform("a", "b", ros::Time().fromSec(1.0), &error_msg)); - ASSERT_STREQ(error_msg.c_str(), "canTransform: target_frame a does not exist. canTransform: source_frame b does not exist."); - -} - -TEST(tf2_lookupTransform, LookupException_One_Exists) -{ - tf2::BufferCore tfc; - geometry_msgs::TransformStamped st; - st.header.frame_id = "foo"; - st.header.stamp = ros::Time(1.0); - st.child_frame_id = "child"; - st.transform.rotation.w = 1; - EXPECT_TRUE(tfc.setTransform(st, "authority1")); - EXPECT_THROW(tfc.lookupTransform("foo", "bar", ros::Time().fromSec(1.0)), tf2::LookupException); - -} - -TEST(tf2_canTransform, One_Exists) -{ - tf2::BufferCore tfc; - geometry_msgs::TransformStamped st; - st.header.frame_id = "foo"; - st.header.stamp = ros::Time(1.0); - st.child_frame_id = "child"; - st.transform.rotation.w = 1; - EXPECT_TRUE(tfc.setTransform(st, "authority1")); - EXPECT_FALSE(tfc.canTransform("foo", "bar", ros::Time().fromSec(1.0))); -} - -TEST(tf2_chainAsVector, chain_v_configuration) -{ - tf2::BufferCore mBC; - tf2::TestBufferCore tBC; - - geometry_msgs::TransformStamped st; - st.header.stamp = ros::Time(0); - st.transform.rotation.w = 1; - - st.header.frame_id = "a"; - st.child_frame_id = "b"; - mBC.setTransform(st, "authority1"); - - st.header.frame_id = "b"; - st.child_frame_id = "c"; - mBC.setTransform(st, "authority1"); - - st.header.frame_id = "a"; - st.child_frame_id = "d"; - mBC.setTransform(st, "authority1"); - - st.header.frame_id = "d"; - st.child_frame_id = "e"; - mBC.setTransform(st, "authority1"); - - std::vector chain; - - - mBC._chainAsVector( "c", ros::Time(), "c", ros::Time(), "c", chain); - EXPECT_EQ( 0, chain.size()); - - mBC._chainAsVector( "a", ros::Time(), "c", ros::Time(), "c", chain); - EXPECT_EQ( 3, chain.size()); - if( chain.size() >= 1 ) EXPECT_EQ( chain[0], "c" ); - if( chain.size() >= 2 ) EXPECT_EQ( chain[1], "b" ); - if( chain.size() >= 3 ) EXPECT_EQ( chain[2], "a" ); - - mBC._chainAsVector( "c", ros::Time(), "a", ros::Time(), "c", chain); - EXPECT_EQ( 3, chain.size()); - if( chain.size() >= 1 ) EXPECT_EQ( chain[0], "a" ); - if( chain.size() >= 2 ) EXPECT_EQ( chain[1], "b" ); - if( chain.size() >= 3 ) EXPECT_EQ( chain[2], "c" ); - - mBC._chainAsVector( "a", ros::Time(), "c", ros::Time(), "a", chain); - EXPECT_EQ( 3, chain.size()); - if( chain.size() >= 1 ) EXPECT_EQ( chain[0], "c" ); - if( chain.size() >= 2 ) EXPECT_EQ( chain[1], "b" ); - if( chain.size() >= 3 ) EXPECT_EQ( chain[2], "a" ); - - mBC._chainAsVector( "c", ros::Time(), "a", ros::Time(), "a", chain); - EXPECT_EQ( 3, chain.size()); - if( chain.size() >= 1 ) EXPECT_EQ( chain[0], "a" ); - if( chain.size() >= 2 ) EXPECT_EQ( chain[1], "b" ); - if( chain.size() >= 3 ) EXPECT_EQ( chain[2], "c" ); - - mBC._chainAsVector( "c", ros::Time(), "e", ros::Time(), "c", chain); - - EXPECT_EQ( 5, chain.size()); - if( chain.size() >= 1 ) EXPECT_EQ( chain[0], "e" ); - if( chain.size() >= 2 ) EXPECT_EQ( chain[1], "d" ); - if( chain.size() >= 3 ) EXPECT_EQ( chain[2], "a" ); - if( chain.size() >= 4 ) EXPECT_EQ( chain[3], "b" ); - if( chain.size() >= 5 ) EXPECT_EQ( chain[4], "c" ); - - mBC._chainAsVector( "c", ros::Time(), "e", ros::Time(), "a", chain); - - EXPECT_EQ( 5, chain.size()); - if( chain.size() >= 1 ) EXPECT_EQ( chain[0], "e" ); - if( chain.size() >= 2 ) EXPECT_EQ( chain[1], "d" ); - if( chain.size() >= 3 ) EXPECT_EQ( chain[2], "a" ); - if( chain.size() >= 4 ) EXPECT_EQ( chain[3], "b" ); - if( chain.size() >= 5 ) EXPECT_EQ( chain[4], "c" ); - - mBC._chainAsVector( "c", ros::Time(), "e", ros::Time(), "e", chain); - - EXPECT_EQ( 5, chain.size()); - if( chain.size() >= 1 ) EXPECT_EQ( chain[0], "e" ); - if( chain.size() >= 2 ) EXPECT_EQ( chain[1], "d" ); - if( chain.size() >= 3 ) EXPECT_EQ( chain[2], "a" ); - if( chain.size() >= 4 ) EXPECT_EQ( chain[3], "b" ); - if( chain.size() >= 5 ) EXPECT_EQ( chain[4], "c" ); -} - -TEST(tf2_walkToTopParent, walk_i_configuration) -{ - tf2::BufferCore mBC; - tf2::TestBufferCore tBC; - - geometry_msgs::TransformStamped st; - st.header.stamp = ros::Time(0); - st.transform.rotation.w = 1; - - st.header.frame_id = "a"; - st.child_frame_id = "b"; - mBC.setTransform(st, "authority1"); - - st.header.frame_id = "b"; - st.child_frame_id = "c"; - mBC.setTransform(st, "authority1"); - - st.header.frame_id = "c"; - st.child_frame_id = "d"; - mBC.setTransform(st, "authority1"); - - st.header.frame_id = "d"; - st.child_frame_id = "e"; - mBC.setTransform(st, "authority1"); - - std::vector id_chain; - tBC._walkToTopParent(mBC, ros::Time(), mBC._lookupFrameNumber("a"), mBC._lookupFrameNumber("e"), 0, &id_chain); - - EXPECT_EQ(5, id_chain.size() ); - if( id_chain.size() >= 1 ) EXPECT_EQ("e", tBC._lookupFrameString(mBC, id_chain[0])); - if( id_chain.size() >= 2 ) EXPECT_EQ("d", tBC._lookupFrameString(mBC, id_chain[1])); - if( id_chain.size() >= 3 ) EXPECT_EQ("c", tBC._lookupFrameString(mBC, id_chain[2])); - if( id_chain.size() >= 4 ) EXPECT_EQ("b", tBC._lookupFrameString(mBC, id_chain[3])); - if( id_chain.size() >= 5 ) EXPECT_EQ("a", tBC._lookupFrameString(mBC, id_chain[4])); - - id_chain.clear(); - tBC._walkToTopParent(mBC, ros::Time(), mBC._lookupFrameNumber("e"), mBC._lookupFrameNumber("a"), 0, &id_chain); - - EXPECT_EQ(5, id_chain.size() ); - if( id_chain.size() >= 1 ) EXPECT_EQ("a", tBC._lookupFrameString(mBC, id_chain[0])); - if( id_chain.size() >= 2 ) EXPECT_EQ("b", tBC._lookupFrameString(mBC, id_chain[1])); - if( id_chain.size() >= 3 ) EXPECT_EQ("c", tBC._lookupFrameString(mBC, id_chain[2])); - if( id_chain.size() >= 4 ) EXPECT_EQ("d", tBC._lookupFrameString(mBC, id_chain[3])); - if( id_chain.size() >= 5 ) EXPECT_EQ("e", tBC._lookupFrameString(mBC, id_chain[4])); - -} - -TEST(tf2_walkToTopParent, walk_v_configuration) -{ - tf2::BufferCore mBC; - tf2::TestBufferCore tBC; - - geometry_msgs::TransformStamped st; - st.header.stamp = ros::Time(0); - st.transform.rotation.w = 1; - - // st.header.frame_id = "aaa"; - // st.child_frame_id = "aa"; - // mBC.setTransform(st, "authority1"); - // - // st.header.frame_id = "aa"; - // st.child_frame_id = "a"; - // mBC.setTransform(st, "authority1"); - - st.header.frame_id = "a"; - st.child_frame_id = "b"; - mBC.setTransform(st, "authority1"); - - st.header.frame_id = "b"; - st.child_frame_id = "c"; - mBC.setTransform(st, "authority1"); - - st.header.frame_id = "a"; - st.child_frame_id = "d"; - mBC.setTransform(st, "authority1"); - - st.header.frame_id = "d"; - st.child_frame_id = "e"; - mBC.setTransform(st, "authority1"); - - std::vector id_chain; - tBC._walkToTopParent(mBC, ros::Time(), mBC._lookupFrameNumber("e"), mBC._lookupFrameNumber("c"), 0, &id_chain); - - EXPECT_EQ(5, id_chain.size()); - if( id_chain.size() >= 1 ) EXPECT_EQ("c", tBC._lookupFrameString(mBC, id_chain[0])); - if( id_chain.size() >= 2 ) EXPECT_EQ("b", tBC._lookupFrameString(mBC, id_chain[1])); - if( id_chain.size() >= 3 ) EXPECT_EQ("a", tBC._lookupFrameString(mBC, id_chain[2])); - if( id_chain.size() >= 4 ) EXPECT_EQ("d", tBC._lookupFrameString(mBC, id_chain[3])); - if( id_chain.size() >= 5 ) EXPECT_EQ("e", tBC._lookupFrameString(mBC, id_chain[4])); - - tBC._walkToTopParent(mBC, ros::Time(), mBC._lookupFrameNumber("c"), mBC._lookupFrameNumber("e"), 0, &id_chain); - EXPECT_EQ(5, id_chain.size()); - if( id_chain.size() >= 1 ) EXPECT_EQ("e", tBC._lookupFrameString(mBC, id_chain[0])); - if( id_chain.size() >= 2 ) EXPECT_EQ("d", tBC._lookupFrameString(mBC, id_chain[1])); - if( id_chain.size() >= 3 ) EXPECT_EQ("a", tBC._lookupFrameString(mBC, id_chain[2])); - if( id_chain.size() >= 4 ) EXPECT_EQ("b", tBC._lookupFrameString(mBC, id_chain[3])); - if( id_chain.size() >= 5 ) EXPECT_EQ("c", tBC._lookupFrameString(mBC, id_chain[4])); - - tBC._walkToTopParent(mBC, ros::Time(), mBC._lookupFrameNumber("a"), mBC._lookupFrameNumber("e"), 0, &id_chain); - EXPECT_EQ( id_chain.size(), 3 ); - if( id_chain.size() >= 1 ) EXPECT_EQ("e", tBC._lookupFrameString(mBC, id_chain[0])); - if( id_chain.size() >= 2 ) EXPECT_EQ("d", tBC._lookupFrameString(mBC, id_chain[1])); - if( id_chain.size() >= 3 ) EXPECT_EQ("a", tBC._lookupFrameString(mBC, id_chain[2])); - - tBC._walkToTopParent(mBC, ros::Time(), mBC._lookupFrameNumber("e"), mBC._lookupFrameNumber("a"), 0, &id_chain); - EXPECT_EQ( id_chain.size(), 3 ); - if( id_chain.size() >= 1 ) EXPECT_EQ("a", tBC._lookupFrameString(mBC, id_chain[0])); - if( id_chain.size() >= 2 ) EXPECT_EQ("d", tBC._lookupFrameString(mBC, id_chain[1])); - if( id_chain.size() >= 3 ) EXPECT_EQ("e", tBC._lookupFrameString(mBC, id_chain[2])); - - tBC._walkToTopParent(mBC, ros::Time(), mBC._lookupFrameNumber("e"), mBC._lookupFrameNumber("d"), 0, &id_chain); - EXPECT_EQ( id_chain.size(), 2 ); - if( id_chain.size() >= 1 ) EXPECT_EQ("d", tBC._lookupFrameString(mBC, id_chain[0])); - if( id_chain.size() >= 2 ) EXPECT_EQ("e", tBC._lookupFrameString(mBC, id_chain[1])); - - tBC._walkToTopParent(mBC, ros::Time(), mBC._lookupFrameNumber("d"), mBC._lookupFrameNumber("e"), 0, &id_chain); - EXPECT_EQ( id_chain.size(), 2 ); - if( id_chain.size() >= 1 ) EXPECT_EQ("e", tBC._lookupFrameString(mBC, id_chain[0])); - if( id_chain.size() >= 2 ) EXPECT_EQ("d", tBC._lookupFrameString(mBC, id_chain[1])); -} - - -int main(int argc, char **argv){ - testing::InitGoogleTest(&argc, argv); - ros::Time::init(); //needed for ros::TIme::now() - return RUN_ALL_TESTS(); -} diff --git a/src/geometry2/tf2/test/speed_test.cpp b/src/geometry2/tf2/test/speed_test.cpp deleted file mode 100644 index c63ca18..0000000 --- a/src/geometry2/tf2/test/speed_test.cpp +++ /dev/null @@ -1,225 +0,0 @@ -/* - * Copyright (c) 2010, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include - -#include -#include - -#include - -int main(int argc, char** argv) -{ - uint32_t num_levels = 10; - if (argc > 1) - { - num_levels = boost::lexical_cast(argv[1]); - } - double time_interval = 1.0; - if (argc > 2) - { - time_interval = boost::lexical_cast(argv[2]); - } - - console_bridge::setLogLevel(console_bridge::CONSOLE_BRIDGE_LOG_INFO); - - tf2::BufferCore bc; - geometry_msgs::TransformStamped t; - t.header.stamp = ros::Time(1); - t.header.frame_id = "root"; - t.child_frame_id = "0"; - t.transform.translation.x = 1; - t.transform.rotation.w = 1.0; - bc.setTransform(t, "me"); - t.header.stamp = ros::Time(2); - bc.setTransform(t, "me"); - - for (uint32_t i = 1; i < num_levels / 2; ++i) - { - for (double j = time_interval; j < 2.0 + time_interval; j += time_interval) - { - std::stringstream parent_ss; - parent_ss << (i - 1); - std::stringstream child_ss; - child_ss << i; - - t.header.stamp = ros::Time(j); - t.header.frame_id = parent_ss.str(); - t.child_frame_id = child_ss.str(); - bc.setTransform(t, "me"); - } - } - - t.header.frame_id = "root"; - std::stringstream ss; - ss << num_levels/2; - t.header.stamp = ros::Time(1); - t.child_frame_id = ss.str(); - bc.setTransform(t, "me"); - t.header.stamp = ros::Time(2); - bc.setTransform(t, "me"); - - for (uint32_t i = num_levels/2 + 1; i < num_levels; ++i) - { - for (double j = time_interval; j < 2.0 + time_interval; j += time_interval) - { - std::stringstream parent_ss; - parent_ss << (i - 1); - std::stringstream child_ss; - child_ss << i; - - t.header.stamp = ros::Time(j); - t.header.frame_id = parent_ss.str(); - t.child_frame_id = child_ss.str(); - bc.setTransform(t, "me"); - } - } - - //logInfo_STREAM(bc.allFramesAsYAML()); - - std::string v_frame0 = boost::lexical_cast(num_levels - 1); - std::string v_frame1 = boost::lexical_cast(num_levels/2 - 1); - CONSOLE_BRIDGE_logInform("%s to %s", v_frame0.c_str(), v_frame1.c_str()); - geometry_msgs::TransformStamped out_t; - - const uint32_t count = 1000000; - CONSOLE_BRIDGE_logInform("Doing %d %d-level %lf-interval tests", count, num_levels, time_interval); - -#if 01 - { - ros::WallTime start = ros::WallTime::now(); - for (uint32_t i = 0; i < count; ++i) - { - out_t = bc.lookupTransform(v_frame1, v_frame0, ros::Time(0)); - } - ros::WallTime end = ros::WallTime::now(); - ros::WallDuration dur = end - start; - //ROS_INFO_STREAM(out_t); - CONSOLE_BRIDGE_logInform("lookupTransform at Time(0) took %f for an average of %.9f", dur.toSec(), dur.toSec() / (double)count); - } -#endif - -#if 01 - { - ros::WallTime start = ros::WallTime::now(); - for (uint32_t i = 0; i < count; ++i) - { - out_t = bc.lookupTransform(v_frame1, v_frame0, ros::Time(1)); - } - ros::WallTime end = ros::WallTime::now(); - ros::WallDuration dur = end - start; - //ROS_INFO_STREAM(out_t); - CONSOLE_BRIDGE_logInform("lookupTransform at Time(1) took %f for an average of %.9f", dur.toSec(), dur.toSec() / (double)count); - } -#endif - -#if 01 - { - ros::WallTime start = ros::WallTime::now(); - for (uint32_t i = 0; i < count; ++i) - { - out_t = bc.lookupTransform(v_frame1, v_frame0, ros::Time(1.5)); - } - ros::WallTime end = ros::WallTime::now(); - ros::WallDuration dur = end - start; - //ROS_INFO_STREAM(out_t); - CONSOLE_BRIDGE_logInform("lookupTransform at Time(1.5) took %f for an average of %.9f", dur.toSec(), dur.toSec() / (double)count); - } -#endif - -#if 01 - { - ros::WallTime start = ros::WallTime::now(); - for (uint32_t i = 0; i < count; ++i) - { - out_t = bc.lookupTransform(v_frame1, v_frame0, ros::Time(2)); - } - ros::WallTime end = ros::WallTime::now(); - ros::WallDuration dur = end - start; - //ROS_INFO_STREAM(out_t); - CONSOLE_BRIDGE_logInform("lookupTransform at Time(2) took %f for an average of %.9f", dur.toSec(), dur.toSec() / (double)count); - } -#endif - -#if 01 - { - ros::WallTime start = ros::WallTime::now(); - for (uint32_t i = 0; i < count; ++i) - { - bc.canTransform(v_frame1, v_frame0, ros::Time(0)); - } - ros::WallTime end = ros::WallTime::now(); - ros::WallDuration dur = end - start; - //ROS_INFO_STREAM(out_t); - CONSOLE_BRIDGE_logInform("canTransform at Time(0) took %f for an average of %.9f", dur.toSec(), dur.toSec() / (double)count); - } -#endif - -#if 01 - { - ros::WallTime start = ros::WallTime::now(); - for (uint32_t i = 0; i < count; ++i) - { - bc.canTransform(v_frame1, v_frame0, ros::Time(1)); - } - ros::WallTime end = ros::WallTime::now(); - ros::WallDuration dur = end - start; - //ROS_INFO_STREAM(out_t); - CONSOLE_BRIDGE_logInform("canTransform at Time(1) took %f for an average of %.9f", dur.toSec(), dur.toSec() / (double)count); - } -#endif - -#if 01 - { - ros::WallTime start = ros::WallTime::now(); - for (uint32_t i = 0; i < count; ++i) - { - bc.canTransform(v_frame1, v_frame0, ros::Time(1.5)); - } - ros::WallTime end = ros::WallTime::now(); - ros::WallDuration dur = end - start; - //ROS_INFO_STREAM(out_t); - CONSOLE_BRIDGE_logInform("canTransform at Time(1.5) took %f for an average of %.9f", dur.toSec(), dur.toSec() / (double)count); - } -#endif - -#if 01 - { - ros::WallTime start = ros::WallTime::now(); - for (uint32_t i = 0; i < count; ++i) - { - bc.canTransform(v_frame1, v_frame0, ros::Time(2)); - } - ros::WallTime end = ros::WallTime::now(); - ros::WallDuration dur = end - start; - //ROS_INFO_STREAM(out_t); - CONSOLE_BRIDGE_logInform("canTransform at Time(2) took %f for an average of %.9f", dur.toSec(), dur.toSec() / (double)count); - } -#endif -} diff --git a/src/geometry2/tf2/test/static_cache_test.cpp b/src/geometry2/tf2/test/static_cache_test.cpp deleted file mode 100644 index f7a4622..0000000 --- a/src/geometry2/tf2/test/static_cache_test.cpp +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include - -#include - -#include - -using namespace tf2; - - -void setIdentity(TransformStorage& stor) -{ - stor.translation_.setValue(0.0, 0.0, 0.0); - stor.rotation_.setValue(0.0, 0.0, 0.0, 1.0); -} - -TEST(StaticCache, Repeatability) -{ - unsigned int runs = 100; - - tf2::StaticCache cache; - - TransformStorage stor; - setIdentity(stor); - - for ( uint64_t i = 1; i < runs ; i++ ) - { - stor.frame_id_ = CompactFrameID(i); - stor.stamp_ = ros::Time().fromNSec(i); - - cache.insertData(stor); - - - cache.getData(ros::Time().fromNSec(i), stor); - EXPECT_EQ(stor.frame_id_, i); - EXPECT_EQ(stor.stamp_, ros::Time().fromNSec(i)); - - } -} - -TEST(StaticCache, DuplicateEntries) -{ - - tf2::StaticCache cache; - - TransformStorage stor; - setIdentity(stor); - stor.frame_id_ = CompactFrameID(3); - stor.stamp_ = ros::Time().fromNSec(1); - - cache.insertData(stor); - - cache.insertData(stor); - - - cache.getData(ros::Time().fromNSec(1), stor); - - //printf(" stor is %f\n", stor.transform.translation.x); - EXPECT_TRUE(!std::isnan(stor.translation_.x())); - EXPECT_TRUE(!std::isnan(stor.translation_.y())); - EXPECT_TRUE(!std::isnan(stor.translation_.z())); - EXPECT_TRUE(!std::isnan(stor.rotation_.x())); - EXPECT_TRUE(!std::isnan(stor.rotation_.y())); - EXPECT_TRUE(!std::isnan(stor.rotation_.z())); - EXPECT_TRUE(!std::isnan(stor.rotation_.w())); -} - -int main(int argc, char **argv){ - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/src/geometry2/tf2_bullet/CHANGELOG.rst b/src/geometry2/tf2_bullet/CHANGELOG.rst deleted file mode 100644 index 2a47cf5..0000000 --- a/src/geometry2/tf2_bullet/CHANGELOG.rst +++ /dev/null @@ -1,181 +0,0 @@ -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Changelog for package tf2_bullet -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -0.6.7 (2020-03-09) ------------------- -* [windows][melodic] more portable fixes. (`#443 `_) -* Contributors: Sean Yen - -0.6.6 (2020-01-09) ------------------- -* Fix compile error missing ros/ros.h (`#400 `_) - * ros/ros.h -> ros/time.h - * tf2_bullet doesn't need ros.h - * tf2_eigen doesn't need ros/ros.h -* use find_package when pkg_check_modules doesn't work (`#364 `_) -* Contributors: James Xu, Shane Loretz - -0.6.5 (2018-11-16) ------------------- - -0.6.4 (2018-11-06) ------------------- - -0.6.3 (2018-07-09) ------------------- - -0.6.2 (2018-05-02) ------------------- - -0.6.1 (2018-03-21) ------------------- - -0.6.0 (2018-03-21) ------------------- - -0.5.17 (2018-01-01) -------------------- - -0.5.16 (2017-07-14) -------------------- -* store gtest return value as int (`#229 `_) -* Contributors: dhood - -0.5.15 (2017-01-24) -------------------- - -0.5.14 (2017-01-16) -------------------- -* Improve documentation -* Contributors: Jackie Kay - -0.5.13 (2016-03-04) -------------------- -* Don't export catkin includes - They only point to the temporary include in the build directory. -* Contributors: Jochen Sprickerhof - -0.5.12 (2015-08-05) -------------------- - -0.5.11 (2015-04-22) -------------------- - -0.5.10 (2015-04-21) -------------------- - -0.5.9 (2015-03-25) ------------------- - -0.5.8 (2015-03-17) ------------------- -* remove useless Makefile files -* fix ODR violations -* Contributors: Vincent Rabaud - -0.5.7 (2014-12-23) ------------------- -* fixing install rules and adding backwards compatible include with #warning -* Contributors: Tully Foote - -0.5.6 (2014-09-18) ------------------- - -0.5.5 (2014-06-23) ------------------- - -0.5.4 (2014-05-07) ------------------- - -0.5.3 (2014-02-21) ------------------- - -0.5.2 (2014-02-20) ------------------- - -0.5.1 (2014-02-14) ------------------- - -0.5.0 (2014-02-14) ------------------- - -0.4.10 (2013-12-26) -------------------- - -0.4.9 (2013-11-06) ------------------- -* adding missing buildtool dependency on pkg-config - -0.4.8 (2013-11-06) ------------------- - -0.4.7 (2013-08-28) ------------------- - -0.4.6 (2013-08-28) ------------------- - -0.4.5 (2013-07-11) ------------------- - -0.4.4 (2013-07-09) ------------------- -* making repo use CATKIN_ENABLE_TESTING correctly and switching rostest to be a test_depend with that change. - -0.4.3 (2013-07-05) ------------------- - -0.4.2 (2013-07-05) ------------------- -* removing unused dependency on rostest - -0.4.1 (2013-07-05) ------------------- -* stripping tf2_ros dependency from tf2_bullet. Test was moved to test_tf2 - -0.4.0 (2013-06-27) ------------------- -* moving convert methods back into tf2 because it does not have any ros dependencies beyond ros::Time which is already a dependency of tf2 -* Cleaning up unnecessary dependency on roscpp -* converting contents of tf2_ros to be properly namespaced in the tf2_ros namespace -* Cleaning up packaging of tf2 including: - removing unused nodehandle - cleaning up a few dependencies and linking - removing old backup of package.xml - making diff minimally different from tf version of library -* Restoring test packages and bullet packages. - reverting 3570e8c42f9b394ecbfd9db076b920b41300ad55 to get back more of the packages previously implemented - reverting 04cf29d1b58c660fdc999ab83563a5d4b76ab331 to fix `#7 `_ - -0.3.6 (2013-03-03) ------------------- - -0.3.5 (2013-02-15 14:46) ------------------------- - -0.3.4 (2013-02-15 13:14) ------------------------- - -0.3.3 (2013-02-15 11:30) ------------------------- - -0.3.2 (2013-02-15 00:42) ------------------------- - -0.3.1 (2013-02-14) ------------------- - -0.3.0 (2013-02-13) ------------------- -* fixing groovy-devel -* removing bullet and kdl related packages -* catkinizing geometry-experimental -* catkinizing tf2_bullet -* merge tf2_cpp and tf2_py into tf2_ros -* A working first version of transforming and converting between different types -* Fixing tests now that Buffer creates a NodeHandle -* add frame unit tests to kdl and bullet -* add first regression tests for kdl and bullet tf -* add btTransform transform -* add bullet transforms, and create tests for bullet and kdl diff --git a/src/geometry2/tf2_bullet/CMakeLists.txt b/src/geometry2/tf2_bullet/CMakeLists.txt deleted file mode 100644 index 15a8110..0000000 --- a/src/geometry2/tf2_bullet/CMakeLists.txt +++ /dev/null @@ -1,32 +0,0 @@ -cmake_minimum_required(VERSION 2.8.3) -project(tf2_bullet) - -find_package(PkgConfig REQUIRED) - -set(bullet_FOUND 0) -pkg_check_modules(bullet bullet) -if(NOT bullet_FOUND) - # windows build bullet3 doesn't come with pkg-config by default and it only comes with CMake config files - # so pkg_check_modules will fail - find_package(bullet REQUIRED) - - # https://github.com/bulletphysics/bullet3/blob/master/BulletConfig.cmake.in - set(bullet_INCLUDE_DIRS "${BULLET_INCLUDE_DIRS}") -endif() - -find_package(catkin REQUIRED COMPONENTS geometry_msgs tf2) - -include_directories(include ${bullet_INCLUDE_DIRS} ${catkin_INCLUDE_DIRS}) - -catkin_package(INCLUDE_DIRS include ${bullet_INCLUDE_DIRS}) - -install(DIRECTORY include/${PROJECT_NAME}/ - DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}) - - -if(CATKIN_ENABLE_TESTING) - -catkin_add_gtest(test_bullet test/test_tf2_bullet.cpp) -target_link_libraries(test_bullet ${catkin_LIBRARIES} ${GTEST_LIBRARIES}) - -endif() diff --git a/src/geometry2/tf2_bullet/include/tf2_bullet/tf2_bullet.h b/src/geometry2/tf2_bullet/include/tf2_bullet/tf2_bullet.h deleted file mode 100644 index 63e88f5..0000000 --- a/src/geometry2/tf2_bullet/include/tf2_bullet/tf2_bullet.h +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** \author Wim Meeussen */ - -#ifndef TF2_BULLET_H -#define TF2_BULLET_H - -#include -#include -#include - - -namespace tf2 -{ -/** \brief Convert a timestamped transform to the equivalent Bullet data type. - * \param t The transform to convert, as a geometry_msgs TransformedStamped message. - * \return The transform message converted to a Bullet btTransform. - */ -inline -btTransform transformToBullet(const geometry_msgs::TransformStamped& t) - { - return btTransform(btQuaternion(t.transform.rotation.x, t.transform.rotation.y, - t.transform.rotation.z, t.transform.rotation.w), - btVector3(t.transform.translation.x, t.transform.translation.y, t.transform.translation.z)); - } - - -/** \brief Apply a geometry_msgs TransformStamped to a Bullet-specific Vector3 type. - * This function is a specialization of the doTransform template defined in tf2/convert.h - * \param t_in The vector to transform, as a timestamped Bullet btVector3 data type. - * \param t_out The transformed vector, as a timestamped Bullet btVector3 data type. - * \param transform The timestamped transform to apply, as a TransformStamped message. - */ -template <> -inline - void doTransform(const tf2::Stamped& t_in, tf2::Stamped& t_out, const geometry_msgs::TransformStamped& transform) - { - t_out = tf2::Stamped(transformToBullet(transform) * t_in, transform.header.stamp, transform.header.frame_id); - } - -/** \brief Convert a stamped Bullet Vector3 type to a PointStamped message. - * This function is a specialization of the toMsg template defined in tf2/convert.h - * \param in The timestamped Bullet btVector3 to convert. - * \return The vector converted to a PointStamped message. - */ -inline -geometry_msgs::PointStamped toMsg(const tf2::Stamped& in) -{ - geometry_msgs::PointStamped msg; - msg.header.stamp = in.stamp_; - msg.header.frame_id = in.frame_id_; - msg.point.x = in[0]; - msg.point.y = in[1]; - msg.point.z = in[2]; - return msg; -} - -/** \brief Convert a PointStamped message type to a stamped Bullet-specific Vector3 type. - * This function is a specialization of the fromMsg template defined in tf2/convert.h - * \param msg The PointStamped message to convert. - * \param out The point converted to a timestamped Bullet Vector3. - */ -inline -void fromMsg(const geometry_msgs::PointStamped& msg, tf2::Stamped& out) -{ - out.stamp_ = msg.header.stamp; - out.frame_id_ = msg.header.frame_id; - out[0] = msg.point.x; - out[1] = msg.point.y; - out[2] = msg.point.z; -} - - -/** \brief Apply a geometry_msgs TransformStamped to a Bullet-specific Transform data type. - * This function is a specialization of the doTransform template defined in tf2/convert.h - * \param t_in The frame to transform, as a timestamped Bullet btTransform. - * \param t_out The transformed frame, as a timestamped Bullet btTransform. - * \param transform The timestamped transform to apply, as a TransformStamped message. - */ -template <> -inline - void doTransform(const tf2::Stamped& t_in, tf2::Stamped& t_out, const geometry_msgs::TransformStamped& transform) - { - t_out = tf2::Stamped(transformToBullet(transform) * t_in, transform.header.stamp, transform.header.frame_id); - } - - -} // namespace - -#endif // TF2_BULLET_H diff --git a/src/geometry2/tf2_bullet/include/tf2_bullet/tf2_bullet/tf2_bullet.h b/src/geometry2/tf2_bullet/include/tf2_bullet/tf2_bullet/tf2_bullet.h deleted file mode 100644 index 2b257ee..0000000 --- a/src/geometry2/tf2_bullet/include/tf2_bullet/tf2_bullet/tf2_bullet.h +++ /dev/null @@ -1,3 +0,0 @@ -#warning This header is at the wrong path you should include - -#include diff --git a/src/geometry2/tf2_bullet/mainpage.dox b/src/geometry2/tf2_bullet/mainpage.dox deleted file mode 100644 index a2ec58c..0000000 --- a/src/geometry2/tf2_bullet/mainpage.dox +++ /dev/null @@ -1,19 +0,0 @@ -/** -\mainpage -\htmlinclude manifest.html - -\b tf2_bullet contains functions for converting between geometry_msgs and Bullet data types. - -This library is an implementation of the templated conversion interface specified in tf/convert.h. -It enables easy conversion from geometry_msgs Transform and Point types to the types specified -by the Bullet physics engine API (see http://bulletphysics.org). - -See the Conversions overview -wiki page for more information about datatype conversion in tf2. - -\section codeapi Code API - -This library consists of one header only, tf2_bullet/tf2_bullet.h, which consists mostly of -specializations of template functions defined in tf2/convert.h. - -*/ diff --git a/src/geometry2/tf2_bullet/package.xml b/src/geometry2/tf2_bullet/package.xml deleted file mode 100644 index 236db56..0000000 --- a/src/geometry2/tf2_bullet/package.xml +++ /dev/null @@ -1,25 +0,0 @@ - - tf2_bullet - 0.6.7 - - tf2_bullet - - Wim Meeussen - Tully Foote - BSD - - http://www.ros.org/wiki/tf2_bullet - - catkin - pkg-config - - tf2 - bullet - geometry_msgs - - tf2 - bullet - geometry_msgs - - - diff --git a/src/geometry2/tf2_bullet/test/test_tf2_bullet.cpp b/src/geometry2/tf2_bullet/test/test_tf2_bullet.cpp deleted file mode 100644 index 9ecda51..0000000 --- a/src/geometry2/tf2_bullet/test/test_tf2_bullet.cpp +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** \author Wim Meeussen */ - - -#include -#include -#include - -static const double EPS = 1e-3; - - -TEST(TfBullet, ConvertVector) -{ - btVector3 v(1,2,3); - - btVector3 v1 = v; - tf2::convert(v1, v1); - - EXPECT_EQ(v, v1); - - btVector3 v2(3,4,5); - tf2::convert(v1, v2); - - EXPECT_EQ(v, v2); - EXPECT_EQ(v1, v2); -} - - - -int main(int argc, char **argv){ - testing::InitGoogleTest(&argc, argv); - - int ret = RUN_ALL_TESTS(); - return ret; -} diff --git a/src/geometry2/tf2_eigen/CHANGELOG.rst b/src/geometry2/tf2_eigen/CHANGELOG.rst deleted file mode 100644 index 338ca76..0000000 --- a/src/geometry2/tf2_eigen/CHANGELOG.rst +++ /dev/null @@ -1,107 +0,0 @@ -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Changelog for package tf2_eigen -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -0.6.7 (2020-03-09) ------------------- -* Revert "rework Eigen functions namespace hack" (`#436 `_) -* Contributors: Tully Foote - -0.6.6 (2020-01-09) ------------------- -* Fix compile error missing ros/ros.h (`#400 `_) - * ros/ros.h -> ros/time.h - * tf2_bullet doesn't need ros.h - * tf2_eigen doesn't need ros/ros.h -* rework Eigen functions namespace hack -* separate transform function declarations into transform_functions.h -* Contributors: James Xu, Shane Loretz, Tully Foote - -0.6.5 (2018-11-16) ------------------- - -0.6.4 (2018-11-06) ------------------- -* improve comments -* add Eigen::Isometry3d conversions -* normalize quaternions to be in half-space w >= 0 as in tf1 -* improve computation efficiency -* Contributors: Robert Haschke - -0.6.3 (2018-07-09) ------------------- - -0.6.2 (2018-05-02) ------------------- -* Adds toMsg & fromMsg for Eigen Vector3 (`#294 `_) - - Adds toMsg for geometry_msgs::Vector3& with dual argument syntax to - avoid an overload conflict with - geometry_msgs::Point& toMsg(contst Eigen::Vector3d& in) - - Adds corresponding fromMsg for Eigen Vector3d and - geometry_msgs::Vector3 - - Fixed typos in description of fromMsg for Twist and Eigen 6x1 Matrix -* Adds additional conversions for tf2, KDL, Eigen (`#292 `_) - - adds non-stamped Eigen to Transform function - - converts Eigen Matrix Vectors to and from geometry_msgs::Twist - - adds to/from message for geometry_msgs::Pose and KDL::Frame -* Contributors: Ian McMahon - -0.6.1 (2018-03-21) ------------------- - -0.6.0 (2018-03-21) ------------------- - -0.5.17 (2018-01-01) -------------------- - -0.5.16 (2017-07-14) -------------------- -* fix return value to prevent warnings on windows (`#237 `_) -* fixing include directory order to support overlays (`#231 `_) -* tf2_eigen: added support for Quaternion and QuaternionStamped (`#230 `_) -* Remove an unused variable from the tf2_eigen test. (`#215 `_) -* Find eigen in a much nicer way. -* Switch tf2_eigen to use package.xml format 2. (`#216 `_) -* Contributors: Chris Lalancette, Mikael Arguedas, Tully Foote, cwecht - -0.5.15 (2017-01-24) -------------------- -* fixup `#186 `_: inline template specializations (`#200 `_) -* Contributors: Robert Haschke - -0.5.14 (2017-01-16) -------------------- -* Add tf2_eigen conversions for Pose and Point (not stamped) (`#186 `_) - * tf2_eigen: added conversions for Point msg type (not timestamped) to Eigen::Vector3d - * tf2_eigen: added conversions for Pose msg type (not timestamped) to Eigen::Affine3d - * tf2_eigen: new functions are inline now - * tf2_eigen test compiling again - * tf2_eigen: added tests for Affine3d and Vector3d conversion - * tf2_eigen: added redefinitions of non-stamped conversion function to make usage in tf2::convert() possible - * tf2_eigen: reduced redundancy by reusing non-stamped conversion-functions in their stamped counterparts - * tf2_eigen: added notes at doTransform-implementations which can not work with tf2_ros::BufferInterface::transform - * tf2_eigen: fixed typos -* Don't export local include dirs (`#180 `_) -* Improve documentation. -* Contributors: Jackie Kay, Jochen Sprickerhof, cwecht - -0.5.13 (2016-03-04) -------------------- -* Added missing inline -* Added unit test - - Testing conversion to msg forward/backward -* Added eigenTotransform function -* Contributors: Davide Tateo, boris-il-forte - -0.5.12 (2015-08-05) -------------------- - -0.5.11 (2015-04-22) -------------------- - -0.5.10 (2015-04-21) -------------------- -* fixing CMakeLists.txt from `#97 `_ -* create tf2_eigen. -* Contributors: Tully Foote, koji diff --git a/src/geometry2/tf2_eigen/CMakeLists.txt b/src/geometry2/tf2_eigen/CMakeLists.txt deleted file mode 100644 index a2b4c40..0000000 --- a/src/geometry2/tf2_eigen/CMakeLists.txt +++ /dev/null @@ -1,45 +0,0 @@ -cmake_minimum_required(VERSION 2.8.3) -project(tf2_eigen) - -find_package(catkin REQUIRED COMPONENTS - cmake_modules - geometry_msgs - tf2 -) - -# Finding Eigen is somewhat complicated because of our need to support Ubuntu -# all the way back to saucy. First we look for the Eigen3 cmake module -# provided by the libeigen3-dev on newer Ubuntu. If that fails, then we -# fall-back to the version provided by cmake_modules, which is a stand-in. -find_package(Eigen3 QUIET) -if(NOT EIGEN3_FOUND) - find_package(cmake_modules REQUIRED) - find_package(Eigen REQUIRED) - set(EIGEN3_INCLUDE_DIRS ${EIGEN_INCLUDE_DIRS}) -endif() - -# Note that eigen 3.2 (on Ubuntu Wily) only provides EIGEN3_INCLUDE_DIR, -# not EIGEN3_INCLUDE_DIRS, so we have to set the latter from the former. -if(NOT EIGEN3_INCLUDE_DIRS) - set(EIGEN3_INCLUDE_DIRS ${EIGEN3_INCLUDE_DIR}) -endif() - -include_directories(include - ${EIGEN3_INCLUDE_DIRS} - ${catkin_INCLUDE_DIRS}) - -catkin_package( - INCLUDE_DIRS include - CATKIN_DEPENDS tf2 - DEPENDS EIGEN3) - -install(DIRECTORY include/${PROJECT_NAME}/ - DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}) - - -if(CATKIN_ENABLE_TESTING) - - catkin_add_gtest(tf2_eigen-test test/tf2_eigen-test.cpp) - target_link_libraries(tf2_eigen-test ${catkin_LIBRARIES} ${GTEST_LIBRARIES}) - -endif() diff --git a/src/geometry2/tf2_eigen/include/tf2_eigen/tf2_eigen.h b/src/geometry2/tf2_eigen/include/tf2_eigen/tf2_eigen.h deleted file mode 100644 index 00c789b..0000000 --- a/src/geometry2/tf2_eigen/include/tf2_eigen/tf2_eigen.h +++ /dev/null @@ -1,585 +0,0 @@ -/* - * Copyright (c) Koji Terada - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** \author Koji Terada */ - -#ifndef TF2_EIGEN_H -#define TF2_EIGEN_H - -#include -#include -#include -#include -#include -#include - - -namespace tf2 -{ - -/** \brief Convert a timestamped transform to the equivalent Eigen data type. - * \param t The transform to convert, as a geometry_msgs Transform message. - * \return The transform message converted to an Eigen Isometry3d transform. - */ - inline - Eigen::Isometry3d transformToEigen(const geometry_msgs::Transform& t) { - return Eigen::Isometry3d(Eigen::Translation3d(t.translation.x, t.translation.y, t.translation.z) - * Eigen::Quaterniond(t.rotation.w, t.rotation.x, t.rotation.y, t.rotation.z)); -} - -/** \brief Convert a timestamped transform to the equivalent Eigen data type. - * \param t The transform to convert, as a geometry_msgs TransformedStamped message. - * \return The transform message converted to an Eigen Isometry3d transform. - */ -inline -Eigen::Isometry3d transformToEigen(const geometry_msgs::TransformStamped& t) { - return transformToEigen(t.transform); -} - -/** \brief Convert an Eigen Affine3d transform to the equivalent geometry_msgs message type. - * \param T The transform to convert, as an Eigen Affine3d transform. - * \return The transform converted to a TransformStamped message. - */ -inline -geometry_msgs::TransformStamped eigenToTransform(const Eigen::Affine3d& T) -{ - geometry_msgs::TransformStamped t; - t.transform.translation.x = T.translation().x(); - t.transform.translation.y = T.translation().y(); - t.transform.translation.z = T.translation().z(); - - Eigen::Quaterniond q(T.linear()); // assuming that upper 3x3 matrix is orthonormal - t.transform.rotation.x = q.x(); - t.transform.rotation.y = q.y(); - t.transform.rotation.z = q.z(); - t.transform.rotation.w = q.w(); - - return t; -} - -/** \brief Convert an Eigen Isometry3d transform to the equivalent geometry_msgs message type. - * \param T The transform to convert, as an Eigen Isometry3d transform. - * \return The transform converted to a TransformStamped message. - */ -inline -geometry_msgs::TransformStamped eigenToTransform(const Eigen::Isometry3d& T) -{ - geometry_msgs::TransformStamped t; - t.transform.translation.x = T.translation().x(); - t.transform.translation.y = T.translation().y(); - t.transform.translation.z = T.translation().z(); - - Eigen::Quaterniond q(T.rotation()); - t.transform.rotation.x = q.x(); - t.transform.rotation.y = q.y(); - t.transform.rotation.z = q.z(); - t.transform.rotation.w = q.w(); - - return t; -} - -/** \brief Apply a geometry_msgs TransformStamped to an Eigen-specific Vector3d type. - * This function is a specialization of the doTransform template defined in tf2/convert.h, - * although it can not be used in tf2_ros::BufferInterface::transform because this - * functions rely on the existence of a time stamp and a frame id in the type which should - * get transformed. - * \param t_in The vector to transform, as a Eigen Vector3d data type. - * \param t_out The transformed vector, as a Eigen Vector3d data type. - * \param transform The timestamped transform to apply, as a TransformStamped message. - */ -template <> -inline -void doTransform(const Eigen::Vector3d& t_in, Eigen::Vector3d& t_out, const geometry_msgs::TransformStamped& transform) -{ - t_out = Eigen::Vector3d(transformToEigen(transform) * t_in); -} - -/** \brief Convert a Eigen Vector3d type to a Point message. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in The timestamped Eigen Vector3d to convert. - * \return The vector converted to a Point message. - */ -inline -geometry_msgs::Point toMsg(const Eigen::Vector3d& in) -{ - geometry_msgs::Point msg; - msg.x = in.x(); - msg.y = in.y(); - msg.z = in.z(); - return msg; -} - -/** \brief Convert a Point message type to a Eigen-specific Vector3d type. - * This function is a specialization of the fromMsg template defined in tf2/convert.h - * \param msg The Point message to convert. - * \param out The point converted to a Eigen Vector3d. - */ -inline -void fromMsg(const geometry_msgs::Point& msg, Eigen::Vector3d& out) -{ - out.x() = msg.x; - out.y() = msg.y; - out.z() = msg.z; -} - -/** \brief Convert an Eigen Vector3d type to a Vector3 message. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in The Eigen Vector3d to convert. - * \return The vector converted to a Vector3 message. - */ -inline -geometry_msgs::Vector3& toMsg(const Eigen::Vector3d& in, geometry_msgs::Vector3& out) -{ - out.x = in.x(); - out.y = in.y(); - out.z = in.z(); - return out; -} - -/** \brief Convert a Vector3 message type to a Eigen-specific Vector3d type. - * This function is a specialization of the fromMsg template defined in tf2/convert.h - * \param msg The Vector3 message to convert. - * \param out The vector converted to a Eigen Vector3d. - */ -inline -void fromMsg(const geometry_msgs::Vector3& msg, Eigen::Vector3d& out) -{ - out.x() = msg.x; - out.y() = msg.y; - out.z() = msg.z; -} - -/** \brief Apply a geometry_msgs TransformStamped to an Eigen-specific Vector3d type. - * This function is a specialization of the doTransform template defined in tf2/convert.h. - * \param t_in The vector to transform, as a timestamped Eigen Vector3d data type. - * \param t_out The transformed vector, as a timestamped Eigen Vector3d data type. - * \param transform The timestamped transform to apply, as a TransformStamped message. - */ -template <> -inline -void doTransform(const tf2::Stamped& t_in, - tf2::Stamped& t_out, - const geometry_msgs::TransformStamped& transform) { - t_out = tf2::Stamped(transformToEigen(transform) * t_in, - transform.header.stamp, - transform.header.frame_id); -} - -/** \brief Convert a stamped Eigen Vector3d type to a PointStamped message. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in The timestamped Eigen Vector3d to convert. - * \return The vector converted to a PointStamped message. - */ -inline -geometry_msgs::PointStamped toMsg(const tf2::Stamped& in) -{ - geometry_msgs::PointStamped msg; - msg.header.stamp = in.stamp_; - msg.header.frame_id = in.frame_id_; - msg.point = toMsg(static_cast(in)); - return msg; -} - -/** \brief Convert a PointStamped message type to a stamped Eigen-specific Vector3d type. - * This function is a specialization of the fromMsg template defined in tf2/convert.h - * \param msg The PointStamped message to convert. - * \param out The point converted to a timestamped Eigen Vector3d. - */ -inline -void fromMsg(const geometry_msgs::PointStamped& msg, tf2::Stamped& out) { - out.stamp_ = msg.header.stamp; - out.frame_id_ = msg.header.frame_id; - fromMsg(msg.point, static_cast(out)); -} - -/** \brief Apply a geometry_msgs Transform to an Eigen Affine3d transform. - * This function is a specialization of the doTransform template defined in tf2/convert.h, - * although it can not be used in tf2_ros::BufferInterface::transform because this - * function relies on the existence of a time stamp and a frame id in the type which should - * get transformed. - * \param t_in The frame to transform, as a Eigen Affine3d transform. - * \param t_out The transformed frame, as a Eigen Affine3d transform. - * \param transform The timestamped transform to apply, as a TransformStamped message. - */ -template <> -inline -void doTransform(const Eigen::Affine3d& t_in, - Eigen::Affine3d& t_out, - const geometry_msgs::TransformStamped& transform) { - t_out = Eigen::Affine3d(transformToEigen(transform) * t_in); -} - -template <> -inline -void doTransform(const Eigen::Isometry3d& t_in, - Eigen::Isometry3d& t_out, - const geometry_msgs::TransformStamped& transform) { - t_out = Eigen::Isometry3d(transformToEigen(transform) * t_in); -} - -/** \brief Convert a Eigen Quaterniond type to a Quaternion message. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in The Eigen Quaterniond to convert. - * \return The quaternion converted to a Quaterion message. - */ -inline -geometry_msgs::Quaternion toMsg(const Eigen::Quaterniond& in) { - geometry_msgs::Quaternion msg; - msg.w = in.w(); - msg.x = in.x(); - msg.y = in.y(); - msg.z = in.z(); - return msg; -} - -/** \brief Convert a Quaternion message type to a Eigen-specific Quaterniond type. - * This function is a specialization of the fromMsg template defined in tf2/convert.h - * \param msg The Quaternion message to convert. - * \param out The quaternion converted to a Eigen Quaterniond. - */ -inline -void fromMsg(const geometry_msgs::Quaternion& msg, Eigen::Quaterniond& out) { - out = Eigen::Quaterniond(msg.w, msg.x, msg.y, msg.z); -} - -/** \brief Apply a geometry_msgs TransformStamped to an Eigen-specific Quaterniond type. - * This function is a specialization of the doTransform template defined in tf2/convert.h, - * although it can not be used in tf2_ros::BufferInterface::transform because this - * functions rely on the existence of a time stamp and a frame id in the type which should - * get transformed. - * \param t_in The vector to transform, as a Eigen Quaterniond data type. - * \param t_out The transformed vector, as a Eigen Quaterniond data type. - * \param transform The timestamped transform to apply, as a TransformStamped message. - */ -template<> -inline -void doTransform(const Eigen::Quaterniond& t_in, - Eigen::Quaterniond& t_out, - const geometry_msgs::TransformStamped& transform) { - Eigen::Quaterniond t; - fromMsg(transform.transform.rotation, t); - t_out = t.inverse() * t_in * t; -} - -/** \brief Convert a stamped Eigen Quaterniond type to a QuaternionStamped message. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in The timestamped Eigen Quaterniond to convert. - * \return The quaternion converted to a QuaternionStamped message. - */ -inline -geometry_msgs::QuaternionStamped toMsg(const Stamped& in) { - geometry_msgs::QuaternionStamped msg; - msg.header.stamp = in.stamp_; - msg.header.frame_id = in.frame_id_; - msg.quaternion = toMsg(static_cast(in)); - return msg; -} - -/** \brief Convert a QuaternionStamped message type to a stamped Eigen-specific Quaterniond type. - * This function is a specialization of the fromMsg template defined in tf2/convert.h - * \param msg The QuaternionStamped message to convert. - * \param out The quaternion converted to a timestamped Eigen Quaterniond. - */ -inline -void fromMsg(const geometry_msgs::QuaternionStamped& msg, Stamped& out) { - out.frame_id_ = msg.header.frame_id; - out.stamp_ = msg.header.stamp; - fromMsg(msg.quaternion, static_cast(out)); -} - -/** \brief Apply a geometry_msgs TransformStamped to an Eigen-specific Quaterniond type. - * This function is a specialization of the doTransform template defined in tf2/convert.h. - * \param t_in The vector to transform, as a timestamped Eigen Quaterniond data type. - * \param t_out The transformed vector, as a timestamped Eigen Quaterniond data type. - * \param transform The timestamped transform to apply, as a TransformStamped message. - */ -template <> -inline -void doTransform(const tf2::Stamped& t_in, - tf2::Stamped& t_out, - const geometry_msgs::TransformStamped& transform) { - t_out.frame_id_ = transform.header.frame_id; - t_out.stamp_ = transform.header.stamp; - doTransform(static_cast(t_in), static_cast(t_out), transform); -} - -/** \brief Convert a Eigen Affine3d transform type to a Pose message. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in The Eigen Affine3d to convert. - * \return The Eigen transform converted to a Pose message. - */ -inline -geometry_msgs::Pose toMsg(const Eigen::Affine3d& in) { - geometry_msgs::Pose msg; - msg.position.x = in.translation().x(); - msg.position.y = in.translation().y(); - msg.position.z = in.translation().z(); - Eigen::Quaterniond q(in.linear()); - msg.orientation.x = q.x(); - msg.orientation.y = q.y(); - msg.orientation.z = q.z(); - msg.orientation.w = q.w(); - if (msg.orientation.w < 0) { - msg.orientation.x *= -1; - msg.orientation.y *= -1; - msg.orientation.z *= -1; - msg.orientation.w *= -1; - } - return msg; -} - -/** \brief Convert a Eigen Isometry3d transform type to a Pose message. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in The Eigen Isometry3d to convert. - * \return The Eigen transform converted to a Pose message. - */ -inline -geometry_msgs::Pose toMsg(const Eigen::Isometry3d& in) { - geometry_msgs::Pose msg; - msg.position.x = in.translation().x(); - msg.position.y = in.translation().y(); - msg.position.z = in.translation().z(); - Eigen::Quaterniond q(in.linear()); - msg.orientation.x = q.x(); - msg.orientation.y = q.y(); - msg.orientation.z = q.z(); - msg.orientation.w = q.w(); - if (msg.orientation.w < 0) { - msg.orientation.x *= -1; - msg.orientation.y *= -1; - msg.orientation.z *= -1; - msg.orientation.w *= -1; - } - return msg; -} - -/** \brief Convert a Pose message transform type to a Eigen Affine3d. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param msg The Pose message to convert. - * \param out The pose converted to a Eigen Affine3d. - */ -inline -void fromMsg(const geometry_msgs::Pose& msg, Eigen::Affine3d& out) { - out = Eigen::Affine3d( - Eigen::Translation3d(msg.position.x, msg.position.y, msg.position.z) * - Eigen::Quaterniond(msg.orientation.w, - msg.orientation.x, - msg.orientation.y, - msg.orientation.z)); -} - -/** \brief Convert a Pose message transform type to a Eigen Isometry3d. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param msg The Pose message to convert. - * \param out The pose converted to a Eigen Isometry3d. - */ -inline -void fromMsg(const geometry_msgs::Pose& msg, Eigen::Isometry3d& out) { - out = Eigen::Isometry3d( - Eigen::Translation3d(msg.position.x, msg.position.y, msg.position.z) * - Eigen::Quaterniond(msg.orientation.w, - msg.orientation.x, - msg.orientation.y, - msg.orientation.z)); -} - -/** \brief Convert a Eigen 6x1 Matrix type to a Twist message. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in The 6x1 Eigen Matrix to convert. - * \return The Eigen Matrix converted to a Twist message. - */ -inline -geometry_msgs::Twist toMsg(const Eigen::Matrix& in) { - geometry_msgs::Twist msg; - msg.linear.x = in[0]; - msg.linear.y = in[1]; - msg.linear.z = in[2]; - msg.angular.x = in[3]; - msg.angular.y = in[4]; - msg.angular.z = in[5]; - return msg; -} - -/** \brief Convert a Twist message transform type to a Eigen 6x1 Matrix. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param msg The Twist message to convert. - * \param out The twist converted to a Eigen 6x1 Matrix. - */ -inline -void fromMsg(const geometry_msgs::Twist &msg, Eigen::Matrix& out) { - out[0] = msg.linear.x; - out[1] = msg.linear.y; - out[2] = msg.linear.z; - out[3] = msg.angular.x; - out[4] = msg.angular.y; - out[5] = msg.angular.z; -} - -/** \brief Apply a geometry_msgs TransformStamped to an Eigen Affine3d transform. - * This function is a specialization of the doTransform template defined in tf2/convert.h, - * although it can not be used in tf2_ros::BufferInterface::transform because this - * function relies on the existence of a time stamp and a frame id in the type which should - * get transformed. - * \param t_in The frame to transform, as a timestamped Eigen Affine3d transform. - * \param t_out The transformed frame, as a timestamped Eigen Affine3d transform. - * \param transform The timestamped transform to apply, as a TransformStamped message. - */ -template <> -inline -void doTransform(const tf2::Stamped& t_in, - tf2::Stamped& t_out, - const geometry_msgs::TransformStamped& transform) { - t_out = tf2::Stamped(transformToEigen(transform) * t_in, transform.header.stamp, transform.header.frame_id); -} - -/** \brief Apply a geometry_msgs TransformStamped to an Eigen Isometry transform. - * This function is a specialization of the doTransform template defined in tf2/convert.h, - * although it can not be used in tf2_ros::BufferInterface::transform because this - * function relies on the existence of a time stamp and a frame id in the type which should - * get transformed. - * \param t_in The frame to transform, as a timestamped Eigen Isometry transform. - * \param t_out The transformed frame, as a timestamped Eigen Isometry transform. - * \param transform The timestamped transform to apply, as a TransformStamped message. - */ -template <> -inline -void doTransform(const tf2::Stamped& t_in, - tf2::Stamped& t_out, - const geometry_msgs::TransformStamped& transform) { - t_out = tf2::Stamped(transformToEigen(transform) * t_in, transform.header.stamp, transform.header.frame_id); -} - -/** \brief Convert a stamped Eigen Affine3d transform type to a Pose message. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in The timestamped Eigen Affine3d to convert. - * \return The Eigen transform converted to a PoseStamped message. - */ -inline -geometry_msgs::PoseStamped toMsg(const tf2::Stamped& in) -{ - geometry_msgs::PoseStamped msg; - msg.header.stamp = in.stamp_; - msg.header.frame_id = in.frame_id_; - msg.pose = toMsg(static_cast(in)); - return msg; -} - -inline -geometry_msgs::PoseStamped toMsg(const tf2::Stamped& in) -{ - geometry_msgs::PoseStamped msg; - msg.header.stamp = in.stamp_; - msg.header.frame_id = in.frame_id_; - msg.pose = toMsg(static_cast(in)); - return msg; -} - -/** \brief Convert a Pose message transform type to a stamped Eigen Affine3d. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param msg The PoseStamped message to convert. - * \param out The pose converted to a timestamped Eigen Affine3d. - */ -inline -void fromMsg(const geometry_msgs::PoseStamped& msg, tf2::Stamped& out) -{ - out.stamp_ = msg.header.stamp; - out.frame_id_ = msg.header.frame_id; - fromMsg(msg.pose, static_cast(out)); -} - -inline -void fromMsg(const geometry_msgs::PoseStamped& msg, tf2::Stamped& out) -{ - out.stamp_ = msg.header.stamp; - out.frame_id_ = msg.header.frame_id; - fromMsg(msg.pose, static_cast(out)); -} - -} // namespace - - -namespace Eigen { -// This is needed to make the usage of the following conversion functions usable in tf2::convert(). -// According to clangs error note 'fromMsg'/'toMsg' should be declared prior to the call site or -// in an associated namespace of one of its arguments. The stamped versions of this conversion -// functions work because they have tf2::Stamped as an argument which is the same namespace as -// which 'fromMsg'/'toMsg' is defined in. The non-stamped versions have no argument which is -// defined in tf2, so it take the following definitions in Eigen namespace to make them usable in -// tf2::convert(). - -inline -geometry_msgs::Pose toMsg(const Eigen::Affine3d& in) { - return tf2::toMsg(in); -} - -inline -geometry_msgs::Pose toMsg(const Eigen::Isometry3d& in) { - return tf2::toMsg(in); -} - -inline -void fromMsg(const geometry_msgs::Point& msg, Eigen::Vector3d& out) { - tf2::fromMsg(msg, out); -} - -inline -geometry_msgs::Point toMsg(const Eigen::Vector3d& in) { - return tf2::toMsg(in); -} - -inline -void fromMsg(const geometry_msgs::Pose& msg, Eigen::Affine3d& out) { - tf2::fromMsg(msg, out); -} - -inline -void fromMsg(const geometry_msgs::Pose& msg, Eigen::Isometry3d& out) { - tf2::fromMsg(msg, out); -} - -inline -geometry_msgs::Quaternion toMsg(const Eigen::Quaterniond& in) { - return tf2::toMsg(in); -} - -inline -void fromMsg(const geometry_msgs::Quaternion& msg, Eigen::Quaterniond& out) { - tf2::fromMsg(msg, out); -} - -inline -geometry_msgs::Twist toMsg(const Eigen::Matrix& in) { - return tf2::toMsg(in); -} - -inline -void fromMsg(const geometry_msgs::Twist &msg, Eigen::Matrix& out) { - tf2::fromMsg(msg, out); -} - -} // namespace - -#endif // TF2_EIGEN_H diff --git a/src/geometry2/tf2_eigen/mainpage.dox b/src/geometry2/tf2_eigen/mainpage.dox deleted file mode 100644 index d129175..0000000 --- a/src/geometry2/tf2_eigen/mainpage.dox +++ /dev/null @@ -1,19 +0,0 @@ -/** -\mainpage -\htmlinclude manifest.html - -\b tf2_eigen contains functions for converting between geometry_msgs and Eigen data types. - -This library is an implementation of the templated conversion interface specified in tf/convert.h. -It enables easy conversion from geometry_msgs Transform and Point types to the types specified -by the Eigen matrix algebra library (see http://eigen.tuxfamily.org). - -See the Conversions overview -wiki page for more information about datatype conversion in tf2. - -\section codeapi Code API - -This library consists of one header only, tf2_eigen/tf2_eigen.h, which consists mostly of -specializations of template functions defined in tf2/convert.h. - -*/ diff --git a/src/geometry2/tf2_eigen/package.xml b/src/geometry2/tf2_eigen/package.xml deleted file mode 100644 index 7751dfa..0000000 --- a/src/geometry2/tf2_eigen/package.xml +++ /dev/null @@ -1,20 +0,0 @@ - - - tf2_eigen - 0.6.7 - tf2_eigen - Koji Terada - Koji Terada - BSD - - catkin - - geometry_msgs - tf2 - - cmake_modules - eigen - - eigen - - diff --git a/src/geometry2/tf2_eigen/test/tf2_eigen-test.cpp b/src/geometry2/tf2_eigen/test/tf2_eigen-test.cpp deleted file mode 100644 index f175e6c..0000000 --- a/src/geometry2/tf2_eigen/test/tf2_eigen-test.cpp +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Copyright (c) Koji Terada - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** \author Wim Meeussen */ - - -#include -#include -#include - -TEST(TfEigen, ConvertVector3dStamped) -{ - const tf2::Stamped v(Eigen::Vector3d(1,2,3), ros::Time(5), "test"); - - tf2::Stamped v1; - geometry_msgs::PointStamped p1; - tf2::convert(v, p1); - tf2::convert(p1, v1); - - EXPECT_EQ(v, v1); -} - -TEST(TfEigen, ConvertVector3d) -{ - const Eigen::Vector3d v(1,2,3); - - Eigen::Vector3d v1; - geometry_msgs::Point p1; - tf2::convert(v, p1); - tf2::convert(p1, v1); - - EXPECT_EQ(v, v1); -} - -TEST(TfEigen, ConvertQuaterniondStamped) -{ - const tf2::Stamped v(Eigen::Quaterniond(1,2,3,4), ros::Time(5), "test"); - - tf2::Stamped v1; - geometry_msgs::QuaternionStamped p1; - tf2::convert(v, p1); - tf2::convert(p1, v1); - - EXPECT_EQ(v.frame_id_, v1.frame_id_); - EXPECT_EQ(v.stamp_, v1.stamp_); - EXPECT_EQ(v.w(), v1.w()); - EXPECT_EQ(v.x(), v1.x()); - EXPECT_EQ(v.y(), v1.y()); - EXPECT_EQ(v.z(), v1.z()); -} - -TEST(TfEigen, ConvertQuaterniond) -{ - const Eigen::Quaterniond v(1,2,3,4); - - Eigen::Quaterniond v1; - geometry_msgs::Quaternion p1; - tf2::convert(v, p1); - tf2::convert(p1, v1); - - EXPECT_EQ(v.w(), v1.w()); - EXPECT_EQ(v.x(), v1.x()); - EXPECT_EQ(v.y(), v1.y()); - EXPECT_EQ(v.z(), v1.z()); -} - -TEST(TfEigen, TransformQuaterion) { - const tf2::Stamped in(Eigen::Quaterniond(Eigen::AngleAxisd(1, Eigen::Vector3d::UnitX())), ros::Time(5), "test"); - const Eigen::Isometry3d iso(Eigen::AngleAxisd(M_PI/2, Eigen::Vector3d::UnitY())); - const Eigen::Affine3d affine(iso); - const tf2::Stamped expected(Eigen::Quaterniond(Eigen::AngleAxisd(1, Eigen::Vector3d::UnitZ())), ros::Time(10), "expected"); - - geometry_msgs::TransformStamped trafo = tf2::eigenToTransform(affine); - trafo.header.stamp = ros::Time(10); - trafo.header.frame_id = "expected"; - - tf2::Stamped out; - tf2::doTransform(in, out, trafo); - - EXPECT_TRUE(out.isApprox(expected)); - EXPECT_EQ(expected.stamp_, out.stamp_); - EXPECT_EQ(expected.frame_id_, out.frame_id_); - - // the same using Isometry - trafo = tf2::eigenToTransform(iso); - trafo.header.stamp = ros::Time(10); - trafo.header.frame_id = "expected"; - tf2::doTransform(in, out, trafo); - - EXPECT_TRUE(out.isApprox(expected)); - EXPECT_EQ(expected.stamp_, out.stamp_); - EXPECT_EQ(expected.frame_id_, out.frame_id_); -} - -TEST(TfEigen, ConvertAffine3dStamped) -{ - const Eigen::Affine3d v_nonstamped(Eigen::Translation3d(1,2,3) * Eigen::AngleAxis(1, Eigen::Vector3d::UnitX())); - const tf2::Stamped v(v_nonstamped, ros::Time(42), "test_frame"); - - tf2::Stamped v1; - geometry_msgs::PoseStamped p1; - tf2::convert(v, p1); - tf2::convert(p1, v1); - - EXPECT_EQ(v.translation(), v1.translation()); - EXPECT_EQ(v.rotation(), v1.rotation()); - EXPECT_EQ(v.frame_id_, v1.frame_id_); - EXPECT_EQ(v.stamp_, v1.stamp_); -} - -TEST(TfEigen, ConvertIsometry3dStamped) -{ - const Eigen::Isometry3d v_nonstamped(Eigen::Translation3d(1,2,3) * Eigen::AngleAxis(1, Eigen::Vector3d::UnitX())); - const tf2::Stamped v(v_nonstamped, ros::Time(42), "test_frame"); - - tf2::Stamped v1; - geometry_msgs::PoseStamped p1; - tf2::convert(v, p1); - tf2::convert(p1, v1); - - EXPECT_EQ(v.translation(), v1.translation()); - EXPECT_EQ(v.rotation(), v1.rotation()); - EXPECT_EQ(v.frame_id_, v1.frame_id_); - EXPECT_EQ(v.stamp_, v1.stamp_); -} - -TEST(TfEigen, ConvertAffine3d) -{ - const Eigen::Affine3d v(Eigen::Translation3d(1,2,3) * Eigen::AngleAxis(1, Eigen::Vector3d::UnitX())); - - Eigen::Affine3d v1; - geometry_msgs::Pose p1; - tf2::convert(v, p1); - tf2::convert(p1, v1); - - EXPECT_EQ(v.translation(), v1.translation()); - EXPECT_EQ(v.rotation(), v1.rotation()); -} - -TEST(TfEigen, ConvertIsometry3d) -{ - const Eigen::Isometry3d v(Eigen::Translation3d(1,2,3) * Eigen::AngleAxis(1, Eigen::Vector3d::UnitX())); - - Eigen::Isometry3d v1; - geometry_msgs::Pose p1; - tf2::convert(v, p1); - tf2::convert(p1, v1); - - EXPECT_EQ(v.translation(), v1.translation()); - EXPECT_EQ(v.rotation(), v1.rotation()); -} - -TEST(TfEigen, ConvertTransform) -{ - Eigen::Matrix4d tm; - - double alpha = M_PI/4.0; - double theta = M_PI/6.0; - double gamma = M_PI/12.0; - - tm << cos(theta)*cos(gamma),-cos(theta)*sin(gamma),sin(theta), 1, // - cos(alpha)*sin(gamma)+sin(alpha)*sin(theta)*cos(gamma),cos(alpha)*cos(gamma)-sin(alpha)*sin(theta)*sin(gamma),-sin(alpha)*cos(theta), 2, // - sin(alpha)*sin(gamma)-cos(alpha)*sin(theta)*cos(gamma),cos(alpha)*sin(theta)*sin(gamma)+sin(alpha)*cos(gamma),cos(alpha)*cos(theta), 3, // - 0, 0, 0, 1; - - Eigen::Affine3d T(tm); - - geometry_msgs::TransformStamped msg = tf2::eigenToTransform(T); - Eigen::Affine3d Tback = tf2::transformToEigen(msg); - - EXPECT_TRUE(T.isApprox(Tback)); - EXPECT_TRUE(tm.isApprox(Tback.matrix())); - - // same for Isometry - Eigen::Isometry3d I(tm); - - msg = tf2::eigenToTransform(T); - Eigen::Isometry3d Iback = tf2::transformToEigen(msg); - - EXPECT_TRUE(I.isApprox(Iback)); - EXPECT_TRUE(tm.isApprox(Iback.matrix())); -} - - - -int main(int argc, char **argv){ - testing::InitGoogleTest(&argc, argv); - - return RUN_ALL_TESTS(); -} diff --git a/src/geometry2/tf2_geometry_msgs/CHANGELOG.rst b/src/geometry2/tf2_geometry_msgs/CHANGELOG.rst deleted file mode 100644 index e9bb12c..0000000 --- a/src/geometry2/tf2_geometry_msgs/CHANGELOG.rst +++ /dev/null @@ -1,223 +0,0 @@ -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Changelog for package tf2_geometry_msgs -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -0.6.7 (2020-03-09) ------------------- - -0.6.6 (2020-01-09) ------------------- -* Make kdl headers available (`#419 `_) -* Fix python3 compatibility for noetic (`#416 `_) -* add from STL (`#366 `_) -* use ROS_DEPRECATED macro for portability (`#362 `_) -* Contributors: James Xu, Shane Loretz, Tully Foote - -0.6.5 (2018-11-16) ------------------- -* Fix python3 import error -* Contributors: Timon Engelke - -0.6.4 (2018-11-06) ------------------- - -0.6.3 (2018-07-09) ------------------- -* Changed access to Vector to prevent memory leak (`#305 `_) -* Added WrenchStamped transformation (`#302 `_) -* Contributors: Denis Štogl, Markus Grimm - -0.6.2 (2018-05-02) ------------------- - -0.6.1 (2018-03-21) ------------------- - -0.6.0 (2018-03-21) ------------------- -* Boilerplate for Sphinx (`#284 `_) - Fixes `#264 `_ -* tf2_geometry_msgs added doTransform implementations for not stamped types (`#262 `_) - * tf2_geometry_msgs added doTransform implementations for not stamped Point, Quaterion, Pose and Vector3 message types -* New functionality to transform PoseWithCovarianceStamped messages. (`#282 `_) - * New functionality to transform PoseWithCovarianceStamped messages. -* Contributors: Blake Anderson, Tully Foote, cwecht - -0.5.17 (2018-01-01) -------------------- - -0.5.16 (2017-07-14) -------------------- -* remove explicit templating to standardize on overloading. But provide backwards compatibility with deprecation. -* adding unit tests for conversions -* Copy transform before altering it in do_transform_vector3 [issue 233] (`#235 `_) -* store gtest return value as int (`#229 `_) -* Document the lifetime of the returned reference for getFrameId and getTimestamp -* tf2_geometry_msgs: using tf2::Transform in doTransform-functions, marked gmTransformToKDL as deprecated -* Switch tf2_geometry_msgs to use package.xml format 2 (`#217 `_) -* tf2_geometry_msgs: added missing conversion functions -* Contributors: Christopher Wecht, Sebastian Wagner, Tully Foote, dhood, pAIgn10 - -0.5.15 (2017-01-24) -------------------- - -0.5.14 (2017-01-16) -------------------- -* Add doxygen documentation for tf2_geometry_msgs -* Contributors: Jackie Kay - -0.5.13 (2016-03-04) -------------------- -* Add missing python_orocos_kdl dependency -* make example into unit test -* vector3 not affected by translation -* Contributors: Daniel Claes, chapulina - -0.5.12 (2015-08-05) -------------------- -* Merge pull request `#112 `_ from vrabaud/getYaw - Get yaw -* add toMsg and fromMsg for QuaternionStamped -* Contributors: Tully Foote, Vincent Rabaud - -0.5.11 (2015-04-22) -------------------- - -0.5.10 (2015-04-21) -------------------- - -0.5.9 (2015-03-25) ------------------- - -0.5.8 (2015-03-17) ------------------- -* remove useless Makefile files -* tf2 optimizations -* add conversions of type between tf2 and geometry_msgs -* fix ODR violations -* Contributors: Vincent Rabaud - -0.5.7 (2014-12-23) ------------------- -* fixing transitive dependency for kdl. Fixes `#53 `_ -* Contributors: Tully Foote - -0.5.6 (2014-09-18) ------------------- - -0.5.5 (2014-06-23) ------------------- - -0.5.4 (2014-05-07) ------------------- - -0.5.3 (2014-02-21) ------------------- - -0.5.2 (2014-02-20) ------------------- - -0.5.1 (2014-02-14) ------------------- - -0.5.0 (2014-02-14) ------------------- - -0.4.10 (2013-12-26) -------------------- - -0.4.9 (2013-11-06) ------------------- - -0.4.8 (2013-11-06) ------------------- - -0.4.7 (2013-08-28) ------------------- - -0.4.6 (2013-08-28) ------------------- - -0.4.5 (2013-07-11) ------------------- - -0.4.4 (2013-07-09) ------------------- -* making repo use CATKIN_ENABLE_TESTING correctly and switching rostest to be a test_depend with that change. - -0.4.3 (2013-07-05) ------------------- - -0.4.2 (2013-07-05) ------------------- - -0.4.1 (2013-07-05) ------------------- - -0.4.0 (2013-06-27) ------------------- -* moving convert methods back into tf2 because it does not have any ros dependencies beyond ros::Time which is already a dependency of tf2 -* Cleaning up unnecessary dependency on roscpp -* converting contents of tf2_ros to be properly namespaced in the tf2_ros namespace -* Cleaning up packaging of tf2 including: - removing unused nodehandle - cleaning up a few dependencies and linking - removing old backup of package.xml - making diff minimally different from tf version of library -* Restoring test packages and bullet packages. - reverting 3570e8c42f9b394ecbfd9db076b920b41300ad55 to get back more of the packages previously implemented - reverting 04cf29d1b58c660fdc999ab83563a5d4b76ab331 to fix `#7 `_ - -0.3.6 (2013-03-03) ------------------- - -0.3.5 (2013-02-15 14:46) ------------------------- -* 0.3.4 -> 0.3.5 - -0.3.4 (2013-02-15 13:14) ------------------------- -* 0.3.3 -> 0.3.4 - -0.3.3 (2013-02-15 11:30) ------------------------- -* 0.3.2 -> 0.3.3 - -0.3.2 (2013-02-15 00:42) ------------------------- -* 0.3.1 -> 0.3.2 - -0.3.1 (2013-02-14) ------------------- -* 0.3.0 -> 0.3.1 - -0.3.0 (2013-02-13) ------------------- -* switching to version 0.3.0 -* add setup.py -* added setup.py etc to tf2_geometry_msgs -* adding tf2 dependency to tf2_geometry_msgs -* adding tf2_geometry_msgs to groovy-devel (unit tests disabled) -* fixing groovy-devel -* removing bullet and kdl related packages -* disabling tf2_geometry_msgs due to missing kdl dependency -* catkinizing geometry-experimental -* catkinizing tf2_geometry_msgs -* add twist, wrench and pose conversion to kdl, fix message to message conversion by adding specific conversion functions -* merge tf2_cpp and tf2_py into tf2_ros -* Got transform with types working in python -* A working first version of transforming and converting between different types -* Moving from camelCase to undescores to be in line with python style guides -* Fixing tests now that Buffer creates a NodeHandle -* add posestamped -* import vector3stamped -* add support for Vector3Stamped and PoseStamped -* add support for PointStamped geometry_msgs -* add regression tests for geometry_msgs point, vector and pose -* Fixing missing export, compiling version of buffer_client test -* add bullet transforms, and create tests for bullet and kdl -* working transformations of messages -* add support for PoseStamped message -* test for pointstamped -* add PointStamped message transform methods -* transform for vector3stamped message diff --git a/src/geometry2/tf2_geometry_msgs/CMakeLists.txt b/src/geometry2/tf2_geometry_msgs/CMakeLists.txt deleted file mode 100644 index 4757437..0000000 --- a/src/geometry2/tf2_geometry_msgs/CMakeLists.txt +++ /dev/null @@ -1,52 +0,0 @@ -cmake_minimum_required(VERSION 2.8.3) -project(tf2_geometry_msgs) - -find_package(orocos_kdl) -find_package(catkin REQUIRED COMPONENTS geometry_msgs tf2_ros tf2) -find_package(Boost COMPONENTS thread REQUIRED) - -# Issue #53 -find_library(KDL_LIBRARY REQUIRED NAMES orocos-kdl HINTS ${orocos_kdl_LIBRARY_DIRS}) - -catkin_package( - LIBRARIES ${KDL_LIBRARY} - INCLUDE_DIRS include - DEPENDS orocos_kdl - CATKIN_DEPENDS geometry_msgs tf2_ros tf2) - - -include_directories(include - ${catkin_INCLUDE_DIRS} -) - -link_directories(${orocos_kdl_LIBRARY_DIRS}) - - - -install(DIRECTORY include/${PROJECT_NAME}/ - DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -) - -catkin_python_setup() - -if(CATKIN_ENABLE_TESTING) - -catkin_add_gtest(test_tomsg_frommsg test/test_tomsg_frommsg.cpp) -target_include_directories(test_tomsg_frommsg PUBLIC ${orocos_kdl_INCLUDE_DIRS}) -target_link_libraries(test_tomsg_frommsg ${catkin_LIBRARIES} ${GTEST_LIBRARIES} ${orocos_kdl_LIBRARIES}) - -find_package(catkin REQUIRED COMPONENTS geometry_msgs rostest tf2_ros tf2) - -add_executable(test_geometry_msgs EXCLUDE_FROM_ALL test/test_tf2_geometry_msgs.cpp) -target_include_directories(test_geometry_msgs PUBLIC ${orocos_kdl_INCLUDE_DIRS}) -target_link_libraries(test_geometry_msgs ${catkin_LIBRARIES} ${GTEST_LIBRARIES} ${orocos_kdl_LIBRARIES}) -add_rostest(${CMAKE_CURRENT_SOURCE_DIR}/test/test.launch) -add_rostest(${CMAKE_CURRENT_SOURCE_DIR}/test/test_python.launch) - - -if(TARGET tests) - add_dependencies(tests test_geometry_msgs) -endif() - - -endif() diff --git a/src/geometry2/tf2_geometry_msgs/conf.py b/src/geometry2/tf2_geometry_msgs/conf.py deleted file mode 100644 index d358e5b..0000000 --- a/src/geometry2/tf2_geometry_msgs/conf.py +++ /dev/null @@ -1,290 +0,0 @@ -# -*- coding: utf-8 -*- -# -# tf2_geometry_msgs documentation build configuration file, created by -# sphinx-quickstart on Tue Feb 13 15:34:25 2018. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.doctest', - 'sphinx.ext.intersphinx', - 'sphinx.ext.todo', - 'sphinx.ext.pngmath', - 'sphinx.ext.viewcode', -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# source_suffix = ['.rst', '.md'] -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'tf2_geometry_msgs' -copyright = u'2018, Open Source Robotics Foundation, Inc.' -author = u'Tully Foote' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = u'0.1' -# The full version, including alpha/beta/rc tags. -release = u'0.1' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ['_build'] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False - -# -- Options for HTML output --------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -html_theme = 'default' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -#html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (relative to this directory) to use as a favicon of -# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -#html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -#html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# Now only 'ja' uses this config value -#html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -#html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'tf2_geometry_msgsdoc' - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', - -# Latex figure (float) alignment -#'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, 'tf2_geometry_msgs.tex', u'tf2\\_geometry\\_msgs Documentation', - u'Tully Foote', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# If true, show page references after internal links. -#latex_show_pagerefs = False - -# If true, show URL addresses after external links. -#latex_show_urls = False - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'tf2_geometry_msgs', u'tf2_geometry_msgs Documentation', - [author], 1) -] - -# If true, show URL addresses after external links. -#man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - (master_doc, 'tf2_geometry_msgs', u'tf2_geometry_msgs Documentation', - author, 'tf2_geometry_msgs', 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -#texinfo_appendices = [] - -# If false, no module index is generated. -#texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -#texinfo_no_detailmenu = False - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'https://docs.python.org/': None} diff --git a/src/geometry2/tf2_geometry_msgs/include/tf2_geometry_msgs/tf2_geometry_msgs.h b/src/geometry2/tf2_geometry_msgs/include/tf2_geometry_msgs/tf2_geometry_msgs.h deleted file mode 100644 index 717b5eb..0000000 --- a/src/geometry2/tf2_geometry_msgs/include/tf2_geometry_msgs/tf2_geometry_msgs.h +++ /dev/null @@ -1,1058 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** \author Wim Meeussen */ - -#ifndef TF2_GEOMETRY_MSGS_H -#define TF2_GEOMETRY_MSGS_H - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "ros/macros.h" - -namespace tf2 -{ - -/** \brief Convert a TransformStamped message to a KDL frame. - * \param t TransformStamped message to convert. - * \return The converted KDL Frame. - * \deprecated - */ -inline -ROS_DEPRECATED KDL::Frame gmTransformToKDL(const geometry_msgs::TransformStamped& t); -inline -KDL::Frame gmTransformToKDL(const geometry_msgs::TransformStamped& t) - { - return KDL::Frame(KDL::Rotation::Quaternion(t.transform.rotation.x, t.transform.rotation.y, - t.transform.rotation.z, t.transform.rotation.w), - KDL::Vector(t.transform.translation.x, t.transform.translation.y, t.transform.translation.z)); - } - - -/*************/ -/** Vector3 **/ -/*************/ - -/** \brief Convert a tf2 Vector3 type to its equivalent geometry_msgs representation. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in A tf2 Vector3 object. - * \return The Vector3 converted to a geometry_msgs message type. - */ -inline -geometry_msgs::Vector3 toMsg(const tf2::Vector3& in) -{ - geometry_msgs::Vector3 out; - out.x = in.getX(); - out.y = in.getY(); - out.z = in.getZ(); - return out; -} - -/** \brief Convert a Vector3 message to its equivalent tf2 representation. - * This function is a specialization of the fromMsg template defined in tf2/convert.h. - * \param in A Vector3 message type. - * \param out The Vector3 converted to a tf2 type. - */ -inline -void fromMsg(const geometry_msgs::Vector3& in, tf2::Vector3& out) -{ - out = tf2::Vector3(in.x, in.y, in.z); -} - - -/********************/ -/** Vector3Stamped **/ -/********************/ - -/** \brief Extract a timestamp from the header of a Vector message. - * This function is a specialization of the getTimestamp template defined in tf2/convert.h. - * \param t VectorStamped message to extract the timestamp from. - * \return The timestamp of the message. The lifetime of the returned reference - * is bound to the lifetime of the argument. - */ -template <> -inline - const ros::Time& getTimestamp(const geometry_msgs::Vector3Stamped& t) {return t.header.stamp;} - -/** \brief Extract a frame ID from the header of a Vector message. - * This function is a specialization of the getFrameId template defined in tf2/convert.h. - * \param t VectorStamped message to extract the frame ID from. - * \return A string containing the frame ID of the message. The lifetime of the - * returned reference is bound to the lifetime of the argument. - */ -template <> -inline - const std::string& getFrameId(const geometry_msgs::Vector3Stamped& t) {return t.header.frame_id;} - - -/** \brief Trivial "conversion" function for Vector3 message type. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in A Vector3Stamped message. - * \return The input argument. - */ -inline -geometry_msgs::Vector3Stamped toMsg(const geometry_msgs::Vector3Stamped& in) -{ - return in; -} - -/** \brief Trivial "conversion" function for Vector3 message type. - * This function is a specialization of the fromMsg template defined in tf2/convert.h. - * \param msg A Vector3Stamped message. - * \param out The input argument. - */ -inline -void fromMsg(const geometry_msgs::Vector3Stamped& msg, geometry_msgs::Vector3Stamped& out) -{ - out = msg; -} - -/** \brief Convert as stamped tf2 Vector3 type to its equivalent geometry_msgs representation. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in An instance of the tf2::Vector3 specialization of the tf2::Stamped template. - * \return The Vector3Stamped converted to a geometry_msgs Vector3Stamped message type. - */ -inline -geometry_msgs::Vector3Stamped toMsg(const tf2::Stamped& in) -{ - geometry_msgs::Vector3Stamped out; - out.header.stamp = in.stamp_; - out.header.frame_id = in.frame_id_; - out.vector.x = in.getX(); - out.vector.y = in.getY(); - out.vector.z = in.getZ(); - return out; -} - -/** \brief Convert a Vector3Stamped message to its equivalent tf2 representation. - * This function is a specialization of the fromMsg template defined in tf2/convert.h. - * \param msg A Vector3Stamped message. - * \param out The Vector3Stamped converted to the equivalent tf2 type. - */ -inline -void fromMsg(const geometry_msgs::Vector3Stamped& msg, tf2::Stamped& out) -{ - out.stamp_ = msg.header.stamp; - out.frame_id_ = msg.header.frame_id; - out.setData(tf2::Vector3(msg.vector.x, msg.vector.y, msg.vector.z)); -} - - -/***********/ -/** Point **/ -/***********/ - -/** \brief Convert a tf2 Vector3 type to its equivalent geometry_msgs representation. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in A tf2 Vector3 object. - * \return The Vector3 converted to a geometry_msgs message type. - */ -inline -geometry_msgs::Point& toMsg(const tf2::Vector3& in, geometry_msgs::Point& out) -{ - out.x = in.getX(); - out.y = in.getY(); - out.z = in.getZ(); - return out; -} - -/** \brief Convert a Vector3 message to its equivalent tf2 representation. - * This function is a specialization of the fromMsg template defined in tf2/convert.h. - * \param in A Vector3 message type. - * \param out The Vector3 converted to a tf2 type. - */ -inline -void fromMsg(const geometry_msgs::Point& in, tf2::Vector3& out) -{ - out = tf2::Vector3(in.x, in.y, in.z); -} - - -/******************/ -/** PointStamped **/ -/******************/ - -/** \brief Extract a timestamp from the header of a Point message. - * This function is a specialization of the getTimestamp template defined in tf2/convert.h. - * \param t PointStamped message to extract the timestamp from. - * \return The timestamp of the message. The lifetime of the returned reference - * is bound to the lifetime of the argument. - */ -template <> -inline - const ros::Time& getTimestamp(const geometry_msgs::PointStamped& t) {return t.header.stamp;} - -/** \brief Extract a frame ID from the header of a Point message. - * This function is a specialization of the getFrameId template defined in tf2/convert.h. - * \param t PointStamped message to extract the frame ID from. - * \return A string containing the frame ID of the message. The lifetime of the - * returned reference is bound to the lifetime of the argument. - */ -template <> -inline - const std::string& getFrameId(const geometry_msgs::PointStamped& t) {return t.header.frame_id;} - -/** \brief Trivial "conversion" function for Point message type. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in A PointStamped message. - * \return The input argument. - */ -inline -geometry_msgs::PointStamped toMsg(const geometry_msgs::PointStamped& in) -{ - return in; -} - -/** \brief Trivial "conversion" function for Point message type. - * This function is a specialization of the fromMsg template defined in tf2/convert.h. - * \param msg A PointStamped message. - * \param out The input argument. - */ -inline -void fromMsg(const geometry_msgs::PointStamped& msg, geometry_msgs::PointStamped& out) -{ - out = msg; -} - -/** \brief Convert as stamped tf2 Vector3 type to its equivalent geometry_msgs representation. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in An instance of the tf2::Vector3 specialization of the tf2::Stamped template. - * \return The Vector3Stamped converted to a geometry_msgs PointStamped message type. - */ -inline -geometry_msgs::PointStamped toMsg(const tf2::Stamped& in, geometry_msgs::PointStamped & out) -{ - out.header.stamp = in.stamp_; - out.header.frame_id = in.frame_id_; - out.point.x = in.getX(); - out.point.y = in.getY(); - out.point.z = in.getZ(); - return out; -} - -/** \brief Convert a PointStamped message to its equivalent tf2 representation. - * This function is a specialization of the fromMsg template defined in tf2/convert.h. - * \param msg A PointStamped message. - * \param out The PointStamped converted to the equivalent tf2 type. - */ -inline -void fromMsg(const geometry_msgs::PointStamped& msg, tf2::Stamped& out) -{ - out.stamp_ = msg.header.stamp; - out.frame_id_ = msg.header.frame_id; - out.setData(tf2::Vector3(msg.point.x, msg.point.y, msg.point.z)); -} - - -/****************/ -/** Quaternion **/ -/****************/ - -/** \brief Convert a tf2 Quaternion type to its equivalent geometry_msgs representation. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in A tf2 Quaternion object. - * \return The Quaternion converted to a geometry_msgs message type. - */ -inline -geometry_msgs::Quaternion toMsg(const tf2::Quaternion& in) -{ - geometry_msgs::Quaternion out; - out.w = in.getW(); - out.x = in.getX(); - out.y = in.getY(); - out.z = in.getZ(); - return out; -} - -/** \brief Convert a Quaternion message to its equivalent tf2 representation. - * This function is a specialization of the fromMsg template defined in tf2/convert.h. - * \param in A Quaternion message type. - * \param out The Quaternion converted to a tf2 type. - */ -inline -void fromMsg(const geometry_msgs::Quaternion& in, tf2::Quaternion& out) -{ - // w at the end in the constructor - out = tf2::Quaternion(in.x, in.y, in.z, in.w); -} - - -/***********************/ -/** QuaternionStamped **/ -/***********************/ - -/** \brief Extract a timestamp from the header of a Quaternion message. - * This function is a specialization of the getTimestamp template defined in tf2/convert.h. - * \param t QuaternionStamped message to extract the timestamp from. - * \return The timestamp of the message. The lifetime of the returned reference - * is bound to the lifetime of the argument. - */ -template <> -inline -const ros::Time& getTimestamp(const geometry_msgs::QuaternionStamped& t) {return t.header.stamp;} - -/** \brief Extract a frame ID from the header of a Quaternion message. - * This function is a specialization of the getFrameId template defined in tf2/convert.h. - * \param t QuaternionStamped message to extract the frame ID from. - * \return A string containing the frame ID of the message. The lifetime of the - * returned reference is bound to the lifetime of the argument. - */ -template <> -inline -const std::string& getFrameId(const geometry_msgs::QuaternionStamped& t) {return t.header.frame_id;} - -/** \brief Trivial "conversion" function for Quaternion message type. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in A QuaternionStamped message. - * \return The input argument. - */ -inline -geometry_msgs::QuaternionStamped toMsg(const geometry_msgs::QuaternionStamped& in) -{ - return in; -} - -/** \brief Trivial "conversion" function for Quaternion message type. - * This function is a specialization of the fromMsg template defined in tf2/convert.h. - * \param msg A QuaternionStamped message. - * \param out The input argument. - */ -inline -void fromMsg(const geometry_msgs::QuaternionStamped& msg, geometry_msgs::QuaternionStamped& out) -{ - out = msg; -} - -/** \brief Convert as stamped tf2 Quaternion type to its equivalent geometry_msgs representation. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in An instance of the tf2::Quaternion specialization of the tf2::Stamped template. - * \return The QuaternionStamped converted to a geometry_msgs QuaternionStamped message type. - */ -inline -geometry_msgs::QuaternionStamped toMsg(const tf2::Stamped& in) -{ - geometry_msgs::QuaternionStamped out; - out.header.stamp = in.stamp_; - out.header.frame_id = in.frame_id_; - out.quaternion.w = in.getW(); - out.quaternion.x = in.getX(); - out.quaternion.y = in.getY(); - out.quaternion.z = in.getZ(); - return out; -} - -template <> -inline -ROS_DEPRECATED geometry_msgs::QuaternionStamped toMsg(const tf2::Stamped& in); - - -//Backwards compatibility remove when forked for Lunar or newer -template <> -inline -geometry_msgs::QuaternionStamped toMsg(const tf2::Stamped& in) -{ - return toMsg(in); -} - -/** \brief Convert a QuaternionStamped message to its equivalent tf2 representation. - * This function is a specialization of the fromMsg template defined in tf2/convert.h. - * \param in A QuaternionStamped message type. - * \param out The QuaternionStamped converted to the equivalent tf2 type. - */ -inline -void fromMsg(const geometry_msgs::QuaternionStamped& in, tf2::Stamped& out) -{ - out.stamp_ = in.header.stamp; - out.frame_id_ = in.header.frame_id; - tf2::Quaternion tmp; - fromMsg(in.quaternion, tmp); - out.setData(tmp); -} - -template<> -inline -ROS_DEPRECATED void fromMsg(const geometry_msgs::QuaternionStamped& in, tf2::Stamped& out); - -//Backwards compatibility remove when forked for Lunar or newer -template<> -inline -void fromMsg(const geometry_msgs::QuaternionStamped& in, tf2::Stamped& out) -{ - fromMsg(in, out); -} - -/**********/ -/** Pose **/ -/**********/ - -/** \brief Convert a tf2 Transform type to an equivalent geometry_msgs Pose message. - * \param in A tf2 Transform object. - * \param out The Transform converted to a geometry_msgs Pose message type. - */ -inline -geometry_msgs::Pose& toMsg(const tf2::Transform& in, geometry_msgs::Pose& out) -{ - toMsg(in.getOrigin(), out.position); - out.orientation = toMsg(in.getRotation()); - return out; -} - -/** \brief Convert a geometry_msgs Pose message to an equivalent tf2 Transform type. - * \param in A Pose message. - * \param out The Pose converted to a tf2 Transform type. - */ -inline -void fromMsg(const geometry_msgs::Pose& in, tf2::Transform& out) -{ - out.setOrigin(tf2::Vector3(in.position.x, in.position.y, in.position.z)); - // w at the end in the constructor - out.setRotation(tf2::Quaternion(in.orientation.x, in.orientation.y, in.orientation.z, in.orientation.w)); -} - - - -/*****************/ -/** PoseStamped **/ -/*****************/ - -/** \brief Extract a timestamp from the header of a Pose message. - * This function is a specialization of the getTimestamp template defined in tf2/convert.h. - * \param t PoseStamped message to extract the timestamp from. - * \return The timestamp of the message. - */ -template <> -inline - const ros::Time& getTimestamp(const geometry_msgs::PoseStamped& t) {return t.header.stamp;} - -/** \brief Extract a frame ID from the header of a Pose message. - * This function is a specialization of the getFrameId template defined in tf2/convert.h. - * \param t PoseStamped message to extract the frame ID from. - * \return A string containing the frame ID of the message. - */ -template <> -inline - const std::string& getFrameId(const geometry_msgs::PoseStamped& t) {return t.header.frame_id;} - -/** \brief Trivial "conversion" function for Pose message type. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in A PoseStamped message. - * \return The input argument. - */ -inline -geometry_msgs::PoseStamped toMsg(const geometry_msgs::PoseStamped& in) -{ - return in; -} - -/** \brief Trivial "conversion" function for Pose message type. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param msg A PoseStamped message. - * \param out The input argument. - */ -inline -void fromMsg(const geometry_msgs::PoseStamped& msg, geometry_msgs::PoseStamped& out) -{ - out = msg; -} - -/** \brief Convert as stamped tf2 Pose type to its equivalent geometry_msgs representation. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in An instance of the tf2::Pose specialization of the tf2::Stamped template. - * \return The PoseStamped converted to a geometry_msgs PoseStamped message type. - */ -inline -geometry_msgs::PoseStamped toMsg(const tf2::Stamped& in, geometry_msgs::PoseStamped & out) -{ - out.header.stamp = in.stamp_; - out.header.frame_id = in.frame_id_; - toMsg(in.getOrigin(), out.pose.position); - out.pose.orientation = toMsg(in.getRotation()); - return out; -} - -/** \brief Convert a PoseStamped message to its equivalent tf2 representation. - * This function is a specialization of the fromMsg template defined in tf2/convert.h. - * \param msg A PoseStamped message. - * \param out The PoseStamped converted to the equivalent tf2 type. - */ -inline -void fromMsg(const geometry_msgs::PoseStamped& msg, tf2::Stamped& out) -{ - out.stamp_ = msg.header.stamp; - out.frame_id_ = msg.header.frame_id; - tf2::Transform tmp; - fromMsg(msg.pose, tmp); - out.setData(tmp); -} - -/*******************************/ -/** PoseWithCovarianceStamped **/ -/*******************************/ - -/** \brief Extract a timestamp from the header of a PoseWithCovarianceStamped message. - * This function is a specialization of the getTimestamp template defined in tf2/convert.h. - * \param t PoseWithCovarianceStamped message to extract the timestamp from. - * \return The timestamp of the message. - */ -template <> -inline - const ros::Time& getTimestamp(const geometry_msgs::PoseWithCovarianceStamped& t) {return t.header.stamp;} - -/** \brief Extract a frame ID from the header of a PoseWithCovarianceStamped message. - * This function is a specialization of the getFrameId template defined in tf2/convert.h. - * \param t PoseWithCovarianceStamped message to extract the frame ID from. - * \return A string containing the frame ID of the message. - */ -template <> -inline - const std::string& getFrameId(const geometry_msgs::PoseWithCovarianceStamped& t) {return t.header.frame_id;} - -/** \brief Trivial "conversion" function for PoseWithCovarianceStamped message type. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in A PoseWithCovarianceStamped message. - * \return The input argument. - */ -inline -geometry_msgs::PoseWithCovarianceStamped toMsg(const geometry_msgs::PoseWithCovarianceStamped& in) -{ - return in; -} - -/** \brief Trivial "conversion" function for PoseWithCovarianceStamped message type. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param msg A PoseWithCovarianceStamped message. - * \param out The input argument. - */ -inline -void fromMsg(const geometry_msgs::PoseWithCovarianceStamped& msg, geometry_msgs::PoseWithCovarianceStamped& out) -{ - out = msg; -} - -/** \brief Convert as stamped tf2 PoseWithCovarianceStamped type to its equivalent geometry_msgs representation. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in An instance of the tf2::Pose specialization of the tf2::Stamped template. - * \return The PoseWithCovarianceStamped converted to a geometry_msgs PoseWithCovarianceStamped message type. - */ -inline -geometry_msgs::PoseWithCovarianceStamped toMsg(const tf2::Stamped& in, geometry_msgs::PoseWithCovarianceStamped & out) -{ - out.header.stamp = in.stamp_; - out.header.frame_id = in.frame_id_; - toMsg(in.getOrigin(), out.pose.pose.position); - out.pose.pose.orientation = toMsg(in.getRotation()); - return out; -} - -/** \brief Convert a PoseWithCovarianceStamped message to its equivalent tf2 representation. - * This function is a specialization of the fromMsg template defined in tf2/convert.h. - * \param msg A PoseWithCovarianceStamped message. - * \param out The PoseWithCovarianceStamped converted to the equivalent tf2 type. - */ -inline -void fromMsg(const geometry_msgs::PoseWithCovarianceStamped& msg, tf2::Stamped& out) -{ - out.stamp_ = msg.header.stamp; - out.frame_id_ = msg.header.frame_id; - tf2::Transform tmp; - fromMsg(msg.pose, tmp); - out.setData(tmp); -} - -/***************/ -/** Transform **/ -/***************/ - -/** \brief Convert a tf2 Transform type to its equivalent geometry_msgs representation. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in A tf2 Transform object. - * \return The Transform converted to a geometry_msgs message type. - */ -inline -geometry_msgs::Transform toMsg(const tf2::Transform& in) -{ - geometry_msgs::Transform out; - out.translation = toMsg(in.getOrigin()); - out.rotation = toMsg(in.getRotation()); - return out; -} - -/** \brief Convert a Transform message to its equivalent tf2 representation. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in A Transform message type. - * \param out The Transform converted to a tf2 type. - */ -inline -void fromMsg(const geometry_msgs::Transform& in, tf2::Transform& out) -{ - tf2::Vector3 v; - fromMsg(in.translation, v); - out.setOrigin(v); - // w at the end in the constructor - tf2::Quaternion q; - fromMsg(in.rotation, q); - out.setRotation(q); -} - - -/**********************/ -/** TransformStamped **/ -/**********************/ - -/** \brief Extract a timestamp from the header of a Transform message. - * This function is a specialization of the getTimestamp template defined in tf2/convert.h. - * \param t TransformStamped message to extract the timestamp from. - * \return The timestamp of the message. - */ -template <> -inline -const ros::Time& getTimestamp(const geometry_msgs::TransformStamped& t) {return t.header.stamp;} - -/** \brief Extract a frame ID from the header of a Transform message. - * This function is a specialization of the getFrameId template defined in tf2/convert.h. - * \param t TransformStamped message to extract the frame ID from. - * \return A string containing the frame ID of the message. - */ -template <> -inline -const std::string& getFrameId(const geometry_msgs::TransformStamped& t) {return t.header.frame_id;} - -/** \brief Trivial "conversion" function for Transform message type. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in A TransformStamped message. - * \return The input argument. - */ -inline -geometry_msgs::TransformStamped toMsg(const geometry_msgs::TransformStamped& in) -{ - return in; -} - -/** \brief Convert a TransformStamped message to its equivalent tf2 representation. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param msg A TransformStamped message type. - * \param out The TransformStamped converted to the equivalent tf2 type. - */ -inline -void fromMsg(const geometry_msgs::TransformStamped& msg, geometry_msgs::TransformStamped& out) -{ - out = msg; -} - -/** \brief Convert as stamped tf2 Transform type to its equivalent geometry_msgs representation. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in An instance of the tf2::Transform specialization of the tf2::Stamped template. - * \return The tf2::Stamped converted to a geometry_msgs TransformStamped message type. - */ -inline -geometry_msgs::TransformStamped toMsg(const tf2::Stamped& in) -{ - geometry_msgs::TransformStamped out; - out.header.stamp = in.stamp_; - out.header.frame_id = in.frame_id_; - out.transform.translation = toMsg(in.getOrigin()); - out.transform.rotation = toMsg(in.getRotation()); - return out; -} - - -/** \brief Convert a TransformStamped message to its equivalent tf2 representation. - * This function is a specialization of the fromMsg template defined in tf2/convert.h. - * \param msg A TransformStamped message. - * \param out The TransformStamped converted to the equivalent tf2 type. - */ -inline -void fromMsg(const geometry_msgs::TransformStamped& msg, tf2::Stamped& out) -{ - out.stamp_ = msg.header.stamp; - out.frame_id_ = msg.header.frame_id; - tf2::Transform tmp; - fromMsg(msg.transform, tmp); - out.setData(tmp); -} - -/** \brief Apply a geometry_msgs TransformStamped to an geometry_msgs Point type. - * This function is a specialization of the doTransform template defined in tf2/convert.h. - * \param t_in The point to transform, as a Point3 message. - * \param t_out The transformed point, as a Point3 message. - * \param transform The timestamped transform to apply, as a TransformStamped message. - */ -template <> -inline - void doTransform(const geometry_msgs::Point& t_in, geometry_msgs::Point& t_out, const geometry_msgs::TransformStamped& transform) - { - tf2::Transform t; - fromMsg(transform.transform, t); - tf2::Vector3 v_in; - fromMsg(t_in, v_in); - tf2::Vector3 v_out = t * v_in; - toMsg(v_out, t_out); - } - -/** \brief Apply a geometry_msgs TransformStamped to an stamped geometry_msgs Point type. - * This function is a specialization of the doTransform template defined in tf2/convert.h. - * \param t_in The point to transform, as a timestamped Point3 message. - * \param t_out The transformed point, as a timestamped Point3 message. - * \param transform The timestamped transform to apply, as a TransformStamped message. - */ -template <> -inline - void doTransform(const geometry_msgs::PointStamped& t_in, geometry_msgs::PointStamped& t_out, const geometry_msgs::TransformStamped& transform) - { - doTransform(t_in.point, t_out.point, transform); - t_out.header.stamp = transform.header.stamp; - t_out.header.frame_id = transform.header.frame_id; - } - -/** \brief Apply a geometry_msgs TransformStamped to an geometry_msgs Quaternion type. - * This function is a specialization of the doTransform template defined in tf2/convert.h. - * \param t_in The quaternion to transform, as a Quaternion3 message. - * \param t_out The transformed quaternion, as a Quaternion3 message. - * \param transform The timestamped transform to apply, as a TransformStamped message. - */ -template <> -inline -void doTransform(const geometry_msgs::Quaternion& t_in, geometry_msgs::Quaternion& t_out, const geometry_msgs::TransformStamped& transform) -{ - tf2::Quaternion t, q_in; - fromMsg(transform.transform.rotation, t); - fromMsg(t_in, q_in); - - tf2::Quaternion q_out = t * q_in; - t_out = toMsg(q_out); -} - -/** \brief Apply a geometry_msgs TransformStamped to an stamped geometry_msgs Quaternion type. - * This function is a specialization of the doTransform template defined in tf2/convert.h. - * \param t_in The quaternion to transform, as a timestamped Quaternion3 message. - * \param t_out The transformed quaternion, as a timestamped Quaternion3 message. - * \param transform The timestamped transform to apply, as a TransformStamped message. - */ -template <> -inline -void doTransform(const geometry_msgs::QuaternionStamped& t_in, geometry_msgs::QuaternionStamped& t_out, const geometry_msgs::TransformStamped& transform) -{ - doTransform(t_in.quaternion, t_out.quaternion, transform); - t_out.header.stamp = transform.header.stamp; - t_out.header.frame_id = transform.header.frame_id; -} - - -/** \brief Apply a geometry_msgs TransformStamped to an geometry_msgs Pose type. -* This function is a specialization of the doTransform template defined in tf2/convert.h. -* \param t_in The pose to transform, as a Pose3 message. -* \param t_out The transformed pose, as a Pose3 message. -* \param transform The timestamped transform to apply, as a TransformStamped message. -*/ -template <> -inline -void doTransform(const geometry_msgs::Pose& t_in, geometry_msgs::Pose& t_out, const geometry_msgs::TransformStamped& transform) -{ - tf2::Vector3 v; - fromMsg(t_in.position, v); - tf2::Quaternion r; - fromMsg(t_in.orientation, r); - - tf2::Transform t; - fromMsg(transform.transform, t); - tf2::Transform v_out = t * tf2::Transform(r, v); - toMsg(v_out, t_out); -} - -/** \brief Apply a geometry_msgs TransformStamped to an stamped geometry_msgs Pose type. -* This function is a specialization of the doTransform template defined in tf2/convert.h. -* \param t_in The pose to transform, as a timestamped Pose3 message. -* \param t_out The transformed pose, as a timestamped Pose3 message. -* \param transform The timestamped transform to apply, as a TransformStamped message. -*/ -template <> -inline -void doTransform(const geometry_msgs::PoseStamped& t_in, geometry_msgs::PoseStamped& t_out, const geometry_msgs::TransformStamped& transform) -{ - doTransform(t_in.pose, t_out.pose, transform); - t_out.header.stamp = transform.header.stamp; - t_out.header.frame_id = transform.header.frame_id; -} - -/** \brief Transform the covariance matrix of a PoseWithCovarianceStamped message to a new frame. -* \param t_in The covariance matrix to transform. -* \param transform The timestamped transform to apply, as a TransformStamped message. -* \return The transformed covariance matrix. -*/ -inline -geometry_msgs::PoseWithCovariance::_covariance_type transformCovariance(const geometry_msgs::PoseWithCovariance::_covariance_type& cov_in, const tf2::Transform& transform) -{ - /** - * To transform a covariance matrix: - * - * [R 0] COVARIANCE [R' 0 ] - * [0 R] [0 R'] - * - * Where: - * R is the rotation matrix (3x3). - * R' is the transpose of the rotation matrix. - * COVARIANCE is the 6x6 covariance matrix to be transformed. - */ - - // get rotation matrix transpose - const tf2::Matrix3x3 R_transpose = transform.getBasis().transpose(); - - // convert the covariance matrix into four 3x3 blocks - const tf2::Matrix3x3 cov_11(cov_in[0], cov_in[1], cov_in[2], - cov_in[6], cov_in[7], cov_in[8], - cov_in[12], cov_in[13], cov_in[14]); - const tf2::Matrix3x3 cov_12(cov_in[3], cov_in[4], cov_in[5], - cov_in[9], cov_in[10], cov_in[11], - cov_in[15], cov_in[16], cov_in[17]); - const tf2::Matrix3x3 cov_21(cov_in[18], cov_in[19], cov_in[20], - cov_in[24], cov_in[25], cov_in[26], - cov_in[30], cov_in[31], cov_in[32]); - const tf2::Matrix3x3 cov_22(cov_in[21], cov_in[22], cov_in[23], - cov_in[27], cov_in[28], cov_in[29], - cov_in[33], cov_in[34], cov_in[35]); - - // perform blockwise matrix multiplication - const tf2::Matrix3x3 result_11 = transform.getBasis()*cov_11*R_transpose; - const tf2::Matrix3x3 result_12 = transform.getBasis()*cov_12*R_transpose; - const tf2::Matrix3x3 result_21 = transform.getBasis()*cov_21*R_transpose; - const tf2::Matrix3x3 result_22 = transform.getBasis()*cov_22*R_transpose; - - // form the output - geometry_msgs::PoseWithCovariance::_covariance_type output; - output[0] = result_11[0][0]; - output[1] = result_11[0][1]; - output[2] = result_11[0][2]; - output[6] = result_11[1][0]; - output[7] = result_11[1][1]; - output[8] = result_11[1][2]; - output[12] = result_11[2][0]; - output[13] = result_11[2][1]; - output[14] = result_11[2][2]; - - output[3] = result_12[0][0]; - output[4] = result_12[0][1]; - output[5] = result_12[0][2]; - output[9] = result_12[1][0]; - output[10] = result_12[1][1]; - output[11] = result_12[1][2]; - output[15] = result_12[2][0]; - output[16] = result_12[2][1]; - output[17] = result_12[2][2]; - - output[18] = result_21[0][0]; - output[19] = result_21[0][1]; - output[20] = result_21[0][2]; - output[24] = result_21[1][0]; - output[25] = result_21[1][1]; - output[26] = result_21[1][2]; - output[30] = result_21[2][0]; - output[31] = result_21[2][1]; - output[32] = result_21[2][2]; - - output[21] = result_22[0][0]; - output[22] = result_22[0][1]; - output[23] = result_22[0][2]; - output[27] = result_22[1][0]; - output[28] = result_22[1][1]; - output[29] = result_22[1][2]; - output[33] = result_22[2][0]; - output[34] = result_22[2][1]; - output[35] = result_22[2][2]; - - return output; -} - -/** \brief Apply a geometry_msgs TransformStamped to an geometry_msgs PoseWithCovarianceStamped type. -* This function is a specialization of the doTransform template defined in tf2/convert.h. -* \param t_in The pose to transform, as a timestamped PoseWithCovarianceStamped message. -* \param t_out The transformed pose, as a timestamped PoseWithCovarianceStamped message. -* \param transform The timestamped transform to apply, as a TransformStamped message. -*/ -template <> -inline -void doTransform(const geometry_msgs::PoseWithCovarianceStamped& t_in, geometry_msgs::PoseWithCovarianceStamped& t_out, const geometry_msgs::TransformStamped& transform) -{ - tf2::Vector3 v; - fromMsg(t_in.pose.pose.position, v); - tf2::Quaternion r; - fromMsg(t_in.pose.pose.orientation, r); - - tf2::Transform t; - fromMsg(transform.transform, t); - tf2::Transform v_out = t * tf2::Transform(r, v); - toMsg(v_out, t_out.pose.pose); - t_out.header.stamp = transform.header.stamp; - t_out.header.frame_id = transform.header.frame_id; - - t_out.pose.covariance = transformCovariance(t_in.pose.covariance, t); -} - -/** \brief Apply a geometry_msgs TransformStamped to an geometry_msgs Transform type. - * This function is a specialization of the doTransform template defined in tf2/convert.h. - * \param t_in The frame to transform, as a timestamped Transform3 message. - * \param t_out The frame transform, as a timestamped Transform3 message. - * \param transform The timestamped transform to apply, as a TransformStamped message. - */ -template <> -inline -void doTransform(const geometry_msgs::TransformStamped& t_in, geometry_msgs::TransformStamped& t_out, const geometry_msgs::TransformStamped& transform) - { - tf2::Transform input; - fromMsg(t_in.transform, input); - - tf2::Transform t; - fromMsg(transform.transform, t); - tf2::Transform v_out = t * input; - - t_out.transform = toMsg(v_out); - t_out.header.stamp = transform.header.stamp; - t_out.header.frame_id = transform.header.frame_id; - } - -/** \brief Apply a geometry_msgs TransformStamped to an geometry_msgs Vector type. - * This function is a specialization of the doTransform template defined in tf2/convert.h. - * \param t_in The vector to transform, as a Vector3 message. - * \param t_out The transformed vector, as a Vector3 message. - * \param transform The timestamped transform to apply, as a TransformStamped message. - */ -template <> -inline - void doTransform(const geometry_msgs::Vector3& t_in, geometry_msgs::Vector3& t_out, const geometry_msgs::TransformStamped& transform) - { - tf2::Transform t; - fromMsg(transform.transform, t); - tf2::Vector3 v_out = t.getBasis() * tf2::Vector3(t_in.x, t_in.y, t_in.z); - t_out.x = v_out[0]; - t_out.y = v_out[1]; - t_out.z = v_out[2]; - } - -/** \brief Apply a geometry_msgs TransformStamped to an stamped geometry_msgs Vector type. - * This function is a specialization of the doTransform template defined in tf2/convert.h. - * \param t_in The vector to transform, as a timestamped Vector3 message. - * \param t_out The transformed vector, as a timestamped Vector3 message. - * \param transform The timestamped transform to apply, as a TransformStamped message. - */ -template <> -inline - void doTransform(const geometry_msgs::Vector3Stamped& t_in, geometry_msgs::Vector3Stamped& t_out, const geometry_msgs::TransformStamped& transform) - { - doTransform(t_in.vector, t_out.vector, transform); - t_out.header.stamp = transform.header.stamp; - t_out.header.frame_id = transform.header.frame_id; - } - - -/**********************/ -/*** WrenchStamped ****/ -/**********************/ -template <> -inline -const ros::Time& getTimestamp(const geometry_msgs::WrenchStamped& t) {return t.header.stamp;} - - -template <> -inline -const std::string& getFrameId(const geometry_msgs::WrenchStamped& t) {return t.header.frame_id;} - - -inline -geometry_msgs::WrenchStamped toMsg(const geometry_msgs::WrenchStamped& in) -{ - return in; -} - -inline -void fromMsg(const geometry_msgs::WrenchStamped& msg, geometry_msgs::WrenchStamped& out) -{ - out = msg; -} - - -inline -geometry_msgs::WrenchStamped toMsg(const tf2::Stamped>& in, geometry_msgs::WrenchStamped & out) -{ - out.header.stamp = in.stamp_; - out.header.frame_id = in.frame_id_; - out.wrench.force = toMsg(in[0]); - out.wrench.torque = toMsg(in[1]); - return out; -} - - -inline -void fromMsg(const geometry_msgs::WrenchStamped& msg, tf2::Stamped>& out) -{ - out.stamp_ = msg.header.stamp; - out.frame_id_ = msg.header.frame_id; - tf2::Vector3 tmp; - fromMsg(msg.wrench.force, tmp); - tf2::Vector3 tmp1; - fromMsg(msg.wrench.torque, tmp1); - std::array tmp_array; - tmp_array[0] = tmp; - tmp_array[1] = tmp1; - out.setData(tmp_array); -} - -template<> -inline -void doTransform(const geometry_msgs::Wrench& t_in, geometry_msgs::Wrench& t_out, const geometry_msgs::TransformStamped& transform) -{ - doTransform(t_in.force, t_out.force, transform); - doTransform(t_in.torque, t_out.torque, transform); -} - - -template<> -inline -void doTransform(const geometry_msgs::WrenchStamped& t_in, geometry_msgs::WrenchStamped& t_out, const geometry_msgs::TransformStamped& transform) -{ - doTransform(t_in.wrench, t_out.wrench, transform); - t_out.header.stamp = transform.header.stamp; - t_out.header.frame_id = transform.header.frame_id; -} - -} // namespace - -#endif // TF2_GEOMETRY_MSGS_H diff --git a/src/geometry2/tf2_geometry_msgs/index.rst b/src/geometry2/tf2_geometry_msgs/index.rst deleted file mode 100644 index b2cbcad..0000000 --- a/src/geometry2/tf2_geometry_msgs/index.rst +++ /dev/null @@ -1,17 +0,0 @@ -Welcome to tf2_geometry_msgs's documentation! -============================================= - -Contents: - -.. toctree:: - :maxdepth: 2 - - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` - diff --git a/src/geometry2/tf2_geometry_msgs/mainpage.dox b/src/geometry2/tf2_geometry_msgs/mainpage.dox deleted file mode 100644 index 3641532..0000000 --- a/src/geometry2/tf2_geometry_msgs/mainpage.dox +++ /dev/null @@ -1,19 +0,0 @@ -/** -\mainpage -\htmlinclude manifest.html - -\b tf2_geometry_msgs contains functions for converting between various geometry_msgs data types. - -This library is an implementation of the templated conversion interface specified in tf/convert.h. -It offers conversion and transform convenience functions between various geometry_msgs data types, -such as Vector, Point, Pose, Transform, Quaternion, etc. - -See the Conversions overview -wiki page for more information about datatype conversion in tf2. - -\section codeapi Code API - -This library consists of one header only, tf2_geometry_msgs/tf2_geometry_msgs.h, which consists mostly of -specializations of template functions defined in tf2/convert.h. - -*/ diff --git a/src/geometry2/tf2_geometry_msgs/package.xml b/src/geometry2/tf2_geometry_msgs/package.xml deleted file mode 100644 index 43704ab..0000000 --- a/src/geometry2/tf2_geometry_msgs/package.xml +++ /dev/null @@ -1,27 +0,0 @@ - - tf2_geometry_msgs - 0.6.7 - - tf2_geometry_msgs - - Wim Meeussen - Tully Foote - BSD - - http://www.ros.org/wiki/tf2_ros - - catkin - - geometry_msgs - orocos_kdl - tf2 - tf2_ros - - python_orocos_kdl - - python_orocos_kdl - - ros_environment - rostest - - diff --git a/src/geometry2/tf2_geometry_msgs/rosdoc.yaml b/src/geometry2/tf2_geometry_msgs/rosdoc.yaml deleted file mode 100644 index a1d78b9..0000000 --- a/src/geometry2/tf2_geometry_msgs/rosdoc.yaml +++ /dev/null @@ -1,7 +0,0 @@ - - builder: doxygen - name: C++ API - output_dir: c++ - file_patterns: '*.c *.cpp *.h *.cc *.hh *.dox' - - builder: sphinx - name: Python API - output_dir: python diff --git a/src/geometry2/tf2_geometry_msgs/scripts/test.py b/src/geometry2/tf2_geometry_msgs/scripts/test.py deleted file mode 100755 index 25555da..0000000 --- a/src/geometry2/tf2_geometry_msgs/scripts/test.py +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env python - -import unittest -import rospy -import PyKDL -import tf2_ros -import tf2_geometry_msgs -from geometry_msgs.msg import TransformStamped, PointStamped, Vector3Stamped, PoseStamped, WrenchStamped - -class GeometryMsgs(unittest.TestCase): - def test_transform(self): - b = tf2_ros.Buffer() - t = TransformStamped() - t.transform.translation.x = 1 - t.transform.rotation.x = 1 - t.header.stamp = rospy.Time(2.0) - t.header.frame_id = 'a' - t.child_frame_id = 'b' - b.set_transform(t, 'eitan_rocks') - out = b.lookup_transform('a','b', rospy.Time(2.0), rospy.Duration(2.0)) - self.assertEqual(out.transform.translation.x, 1) - self.assertEqual(out.transform.rotation.x, 1) - self.assertEqual(out.header.frame_id, 'a') - self.assertEqual(out.child_frame_id, 'b') - - v = PointStamped() - v.header.stamp = rospy.Time(2) - v.header.frame_id = 'a' - v.point.x = 1 - v.point.y = 2 - v.point.z = 3 - out = b.transform(v, 'b') - self.assertEqual(out.point.x, 0) - self.assertEqual(out.point.y, -2) - self.assertEqual(out.point.z, -3) - - v = PoseStamped() - v.header.stamp = rospy.Time(2) - v.header.frame_id = 'a' - v.pose.position.x = 1 - v.pose.position.y = 2 - v.pose.position.z = 3 - v.pose.orientation.x = 1 - out = b.transform(v, 'b') - self.assertEqual(out.pose.position.x, 0) - self.assertEqual(out.pose.position.y, -2) - self.assertEqual(out.pose.position.z, -3) - - # Translation shouldn't affect Vector3 - t = TransformStamped() - t.transform.translation.x = 1 - t.transform.translation.y = 2 - t.transform.translation.z = 3 - t.transform.rotation.w = 1 - v = Vector3Stamped() - v.vector.x = 1 - v.vector.y = 0 - v.vector.z = 0 - out = tf2_geometry_msgs.do_transform_vector3(v, t) - self.assertEqual(out.vector.x, 1) - self.assertEqual(out.vector.y, 0) - self.assertEqual(out.vector.z, 0) - - # Rotate Vector3 180 deg about y - t = TransformStamped() - t.transform.translation.x = 1 - t.transform.translation.y = 2 - t.transform.translation.z = 3 - t.transform.rotation.y = 1 - - v = Vector3Stamped() - v.vector.x = 1 - v.vector.y = 0 - v.vector.z = 0 - - out = tf2_geometry_msgs.do_transform_vector3(v, t) - self.assertEqual(out.vector.x, -1) - self.assertEqual(out.vector.y, 0) - self.assertEqual(out.vector.z, 0) - - v = WrenchStamped() - v.wrench.force.x = 1 - v.wrench.force.y = 0 - v.wrench.force.z = 0 - v.wrench.torque.x = 1 - v.wrench.torque.y = 0 - v.wrench.torque.z = 0 - - out = tf2_geometry_msgs.do_transform_wrench(v, t) - self.assertEqual(out.wrench.force.x, -1) - self.assertEqual(out.wrench.force.y, 0) - self.assertEqual(out.wrench.force.z, 0) - self.assertEqual(out.wrench.torque.x, -1) - self.assertEqual(out.wrench.torque.y, 0) - self.assertEqual(out.wrench.torque.z, 0) - -if __name__ == '__main__': - import rosunit - rospy.init_node('test_tf2_geometry_msgs_python') - rosunit.unitrun("test_tf2_geometry_msgs", "test_tf2_geometry_msgs_python", GeometryMsgs) diff --git a/src/geometry2/tf2_geometry_msgs/setup.py b/src/geometry2/tf2_geometry_msgs/setup.py deleted file mode 100644 index debf796..0000000 --- a/src/geometry2/tf2_geometry_msgs/setup.py +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env python - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -d = generate_distutils_setup( - packages=['tf2_geometry_msgs'], - package_dir={'': 'src'}, - requires={'rospy','geometry_msgs','tf2_ros','orocos_kdl'} -) - -setup(**d) - diff --git a/src/geometry2/tf2_geometry_msgs/src/tf2_geometry_msgs/__init__.py b/src/geometry2/tf2_geometry_msgs/src/tf2_geometry_msgs/__init__.py deleted file mode 100644 index e072bac..0000000 --- a/src/geometry2/tf2_geometry_msgs/src/tf2_geometry_msgs/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .tf2_geometry_msgs import * diff --git a/src/geometry2/tf2_geometry_msgs/src/tf2_geometry_msgs/tf2_geometry_msgs.py b/src/geometry2/tf2_geometry_msgs/src/tf2_geometry_msgs/tf2_geometry_msgs.py deleted file mode 100644 index f0edee2..0000000 --- a/src/geometry2/tf2_geometry_msgs/src/tf2_geometry_msgs/tf2_geometry_msgs.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (c) 2008, Willow Garage, Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of the Willow Garage, Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -# author: Wim Meeussen - -from geometry_msgs.msg import PoseStamped, Vector3Stamped, PointStamped, WrenchStamped -import PyKDL -import rospy -import tf2_ros -import copy - -def to_msg_msg(msg): - return msg - -tf2_ros.ConvertRegistration().add_to_msg(Vector3Stamped, to_msg_msg) -tf2_ros.ConvertRegistration().add_to_msg(PoseStamped, to_msg_msg) -tf2_ros.ConvertRegistration().add_to_msg(PointStamped, to_msg_msg) - -def from_msg_msg(msg): - return msg - -tf2_ros.ConvertRegistration().add_from_msg(Vector3Stamped, from_msg_msg) -tf2_ros.ConvertRegistration().add_from_msg(PoseStamped, from_msg_msg) -tf2_ros.ConvertRegistration().add_from_msg(PointStamped, from_msg_msg) - -def transform_to_kdl(t): - return PyKDL.Frame(PyKDL.Rotation.Quaternion(t.transform.rotation.x, t.transform.rotation.y, - t.transform.rotation.z, t.transform.rotation.w), - PyKDL.Vector(t.transform.translation.x, - t.transform.translation.y, - t.transform.translation.z)) - - -# PointStamped -def do_transform_point(point, transform): - p = transform_to_kdl(transform) * PyKDL.Vector(point.point.x, point.point.y, point.point.z) - res = PointStamped() - res.point.x = p[0] - res.point.y = p[1] - res.point.z = p[2] - res.header = transform.header - return res -tf2_ros.TransformRegistration().add(PointStamped, do_transform_point) - - -# Vector3Stamped -def do_transform_vector3(vector3, transform): - transform = copy.deepcopy(transform) - transform.transform.translation.x = 0; - transform.transform.translation.y = 0; - transform.transform.translation.z = 0; - p = transform_to_kdl(transform) * PyKDL.Vector(vector3.vector.x, vector3.vector.y, vector3.vector.z) - res = Vector3Stamped() - res.vector.x = p[0] - res.vector.y = p[1] - res.vector.z = p[2] - res.header = transform.header - return res -tf2_ros.TransformRegistration().add(Vector3Stamped, do_transform_vector3) - -# PoseStamped -def do_transform_pose(pose, transform): - f = transform_to_kdl(transform) * PyKDL.Frame(PyKDL.Rotation.Quaternion(pose.pose.orientation.x, pose.pose.orientation.y, - pose.pose.orientation.z, pose.pose.orientation.w), - PyKDL.Vector(pose.pose.position.x, pose.pose.position.y, pose.pose.position.z)) - res = PoseStamped() - res.pose.position.x = f[(0, 3)] - res.pose.position.y = f[(1, 3)] - res.pose.position.z = f[(2, 3)] - (res.pose.orientation.x, res.pose.orientation.y, res.pose.orientation.z, res.pose.orientation.w) = f.M.GetQuaternion() - res.header = transform.header - return res -tf2_ros.TransformRegistration().add(PoseStamped, do_transform_pose) - -# WrenchStamped -def do_transform_wrench(wrench, transform): - force = Vector3Stamped() - torque = Vector3Stamped() - force.vector = wrench.wrench.force - torque.vector = wrench.wrench.torque - res = WrenchStamped() - res.wrench.force = do_transform_vector3(force, transform).vector - res.wrench.torque = do_transform_vector3(torque, transform).vector - res.header = transform.header - return res -tf2_ros.TransformRegistration().add(WrenchStamped, do_transform_wrench) diff --git a/src/geometry2/tf2_geometry_msgs/test/test.launch b/src/geometry2/tf2_geometry_msgs/test/test.launch deleted file mode 100644 index 53bab78..0000000 --- a/src/geometry2/tf2_geometry_msgs/test/test.launch +++ /dev/null @@ -1,3 +0,0 @@ - - - \ No newline at end of file diff --git a/src/geometry2/tf2_geometry_msgs/test/test_python.launch b/src/geometry2/tf2_geometry_msgs/test/test_python.launch deleted file mode 100644 index b6bc793..0000000 --- a/src/geometry2/tf2_geometry_msgs/test/test_python.launch +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/src/geometry2/tf2_geometry_msgs/test/test_tf2_geometry_msgs.cpp b/src/geometry2/tf2_geometry_msgs/test/test_tf2_geometry_msgs.cpp deleted file mode 100644 index 860db53..0000000 --- a/src/geometry2/tf2_geometry_msgs/test/test_tf2_geometry_msgs.cpp +++ /dev/null @@ -1,380 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** \author Wim Meeussen */ - - -#include -#include -#include -#include -#include -#include - -tf2_ros::Buffer* tf_buffer; -geometry_msgs::TransformStamped t; -static const double EPS = 1e-3; - - -TEST(TfGeometry, Frame) -{ - geometry_msgs::PoseStamped v1; - v1.pose.position.x = 1; - v1.pose.position.y = 2; - v1.pose.position.z = 3; - v1.pose.orientation.x = 1; - v1.header.stamp = ros::Time(2); - v1.header.frame_id = "A"; - - // simple api - geometry_msgs::PoseStamped v_simple = tf_buffer->transform(v1, "B", ros::Duration(2.0)); - EXPECT_NEAR(v_simple.pose.position.x, -9, EPS); - EXPECT_NEAR(v_simple.pose.position.y, 18, EPS); - EXPECT_NEAR(v_simple.pose.position.z, 27, EPS); - EXPECT_NEAR(v_simple.pose.orientation.x, 0.0, EPS); - EXPECT_NEAR(v_simple.pose.orientation.y, 0.0, EPS); - EXPECT_NEAR(v_simple.pose.orientation.z, 0.0, EPS); - EXPECT_NEAR(v_simple.pose.orientation.w, 1.0, EPS); - - // advanced api - geometry_msgs::PoseStamped v_advanced = tf_buffer->transform(v1, "B", ros::Time(2.0), - "A", ros::Duration(3.0)); - EXPECT_NEAR(v_advanced.pose.position.x, -9, EPS); - EXPECT_NEAR(v_advanced.pose.position.y, 18, EPS); - EXPECT_NEAR(v_advanced.pose.position.z, 27, EPS); - EXPECT_NEAR(v_advanced.pose.orientation.x, 0.0, EPS); - EXPECT_NEAR(v_advanced.pose.orientation.y, 0.0, EPS); - EXPECT_NEAR(v_advanced.pose.orientation.z, 0.0, EPS); - EXPECT_NEAR(v_advanced.pose.orientation.w, 1.0, EPS); -} - -TEST(TfGeometry, PoseWithCovarianceStamped) -{ - geometry_msgs::PoseWithCovarianceStamped v1; - v1.pose.pose.position.x = 1; - v1.pose.pose.position.y = 2; - v1.pose.pose.position.z = 3; - v1.pose.pose.orientation.x = 1; - v1.header.stamp = ros::Time(2); - v1.header.frame_id = "A"; - v1.pose.covariance[0] = 1; - v1.pose.covariance[7] = 1; - v1.pose.covariance[14] = 1; - v1.pose.covariance[21] = 1; - v1.pose.covariance[28] = 1; - v1.pose.covariance[35] = 1; - - // simple api - const geometry_msgs::PoseWithCovarianceStamped v_simple = tf_buffer->transform(v1, "B", ros::Duration(2.0)); - EXPECT_NEAR(v_simple.pose.pose.position.x, -9, EPS); - EXPECT_NEAR(v_simple.pose.pose.position.y, 18, EPS); - EXPECT_NEAR(v_simple.pose.pose.position.z, 27, EPS); - EXPECT_NEAR(v_simple.pose.pose.orientation.x, 0.0, EPS); - EXPECT_NEAR(v_simple.pose.pose.orientation.y, 0.0, EPS); - EXPECT_NEAR(v_simple.pose.pose.orientation.z, 0.0, EPS); - EXPECT_NEAR(v_simple.pose.pose.orientation.w, 1.0, EPS); - - // no rotation in this transformation, so no change to covariance - EXPECT_NEAR(v_simple.pose.covariance[0], 1.0, EPS); - EXPECT_NEAR(v_simple.pose.covariance[7], 1.0, EPS); - EXPECT_NEAR(v_simple.pose.covariance[14], 1.0, EPS); - EXPECT_NEAR(v_simple.pose.covariance[21], 1.0, EPS); - EXPECT_NEAR(v_simple.pose.covariance[28], 1.0, EPS); - EXPECT_NEAR(v_simple.pose.covariance[35], 1.0, EPS); - - // advanced api - const geometry_msgs::PoseWithCovarianceStamped v_advanced = tf_buffer->transform(v1, "B", ros::Time(2.0), - "A", ros::Duration(3.0)); - EXPECT_NEAR(v_advanced.pose.pose.position.x, -9, EPS); - EXPECT_NEAR(v_advanced.pose.pose.position.y, 18, EPS); - EXPECT_NEAR(v_advanced.pose.pose.position.z, 27, EPS); - EXPECT_NEAR(v_advanced.pose.pose.orientation.x, 0.0, EPS); - EXPECT_NEAR(v_advanced.pose.pose.orientation.y, 0.0, EPS); - EXPECT_NEAR(v_advanced.pose.pose.orientation.z, 0.0, EPS); - EXPECT_NEAR(v_advanced.pose.pose.orientation.w, 1.0, EPS); - - // no rotation in this transformation, so no change to covariance - EXPECT_NEAR(v_advanced.pose.covariance[0], 1.0, EPS); - EXPECT_NEAR(v_advanced.pose.covariance[7], 1.0, EPS); - EXPECT_NEAR(v_advanced.pose.covariance[14], 1.0, EPS); - EXPECT_NEAR(v_advanced.pose.covariance[21], 1.0, EPS); - EXPECT_NEAR(v_advanced.pose.covariance[28], 1.0, EPS); - EXPECT_NEAR(v_advanced.pose.covariance[35], 1.0, EPS); - - /** now add rotation to transform to test the effect on covariance **/ - - // rotate pi/2 radians about x-axis - geometry_msgs::TransformStamped t_rot; - t_rot.transform.rotation = tf2::toMsg(tf2::Quaternion(tf2::Vector3(1,0,0), M_PI/2)); - t_rot.header.stamp = ros::Time(2.0); - t_rot.header.frame_id = "A"; - t_rot.child_frame_id = "rotated"; - tf_buffer->setTransform(t_rot, "rotation_test"); - - // need to put some covariance in the matrix - v1.pose.covariance[1] = 1; - v1.pose.covariance[6] = 1; - v1.pose.covariance[12] = 1; - - // perform rotation - const geometry_msgs::PoseWithCovarianceStamped v_rotated = tf_buffer->transform(v1, "rotated", ros::Duration(2.0)); - - // the covariance matrix should now be transformed - EXPECT_NEAR(v_rotated.pose.covariance[0], 1.0, EPS); - EXPECT_NEAR(v_rotated.pose.covariance[1], 0.0, EPS); - EXPECT_NEAR(v_rotated.pose.covariance[2],-1.0, EPS); - EXPECT_NEAR(v_rotated.pose.covariance[6], 1.0, EPS); - EXPECT_NEAR(v_rotated.pose.covariance[7], 1.0, EPS); - EXPECT_NEAR(v_rotated.pose.covariance[8], 0.0, EPS); - EXPECT_NEAR(v_rotated.pose.covariance[12],-1.0, EPS); - EXPECT_NEAR(v_rotated.pose.covariance[13], 0.0, EPS); - EXPECT_NEAR(v_rotated.pose.covariance[14], 1.0, EPS); - - // set buffer back to original transform - tf_buffer->setTransform(t, "test"); -} - -TEST(TfGeometry, Transform) -{ - geometry_msgs::TransformStamped v1; - v1.transform.translation.x = 1; - v1.transform.translation.y = 2; - v1.transform.translation.z = 3; - v1.transform.rotation.x = 1; - v1.header.stamp = ros::Time(2); - v1.header.frame_id = "A"; - - // simple api - geometry_msgs::TransformStamped v_simple = tf_buffer->transform(v1, "B", ros::Duration(2.0)); - EXPECT_NEAR(v_simple.transform.translation.x, -9, EPS); - EXPECT_NEAR(v_simple.transform.translation.y, 18, EPS); - EXPECT_NEAR(v_simple.transform.translation.z, 27, EPS); - EXPECT_NEAR(v_simple.transform.rotation.x, 0.0, EPS); - EXPECT_NEAR(v_simple.transform.rotation.y, 0.0, EPS); - EXPECT_NEAR(v_simple.transform.rotation.z, 0.0, EPS); - EXPECT_NEAR(v_simple.transform.rotation.w, 1.0, EPS); - - - // advanced api - geometry_msgs::TransformStamped v_advanced = tf_buffer->transform(v1, "B", ros::Time(2.0), - "A", ros::Duration(3.0)); - EXPECT_NEAR(v_advanced.transform.translation.x, -9, EPS); - EXPECT_NEAR(v_advanced.transform.translation.y, 18, EPS); - EXPECT_NEAR(v_advanced.transform.translation.z, 27, EPS); - EXPECT_NEAR(v_advanced.transform.rotation.x, 0.0, EPS); - EXPECT_NEAR(v_advanced.transform.rotation.y, 0.0, EPS); - EXPECT_NEAR(v_advanced.transform.rotation.z, 0.0, EPS); - EXPECT_NEAR(v_advanced.transform.rotation.w, 1.0, EPS); -} - -TEST(TfGeometry, Vector) -{ - geometry_msgs::Vector3Stamped v1, res; - v1.vector.x = 1; - v1.vector.y = 2; - v1.vector.z = 3; - v1.header.stamp = ros::Time(2.0); - v1.header.frame_id = "A"; - - // simple api - geometry_msgs::Vector3Stamped v_simple = tf_buffer->transform(v1, "B", ros::Duration(2.0)); - EXPECT_NEAR(v_simple.vector.x, 1, EPS); - EXPECT_NEAR(v_simple.vector.y, -2, EPS); - EXPECT_NEAR(v_simple.vector.z, -3, EPS); - - // advanced api - geometry_msgs::Vector3Stamped v_advanced = tf_buffer->transform(v1, "B", ros::Time(2.0), - "A", ros::Duration(3.0)); - EXPECT_NEAR(v_advanced.vector.x, 1, EPS); - EXPECT_NEAR(v_advanced.vector.y, -2, EPS); - EXPECT_NEAR(v_advanced.vector.z, -3, EPS); -} - - -TEST(TfGeometry, Point) -{ - geometry_msgs::PointStamped v1, res; - v1.point.x = 1; - v1.point.y = 2; - v1.point.z = 3; - v1.header.stamp = ros::Time(2.0); - v1.header.frame_id = "A"; - - // simple api - geometry_msgs::PointStamped v_simple = tf_buffer->transform(v1, "B", ros::Duration(2.0)); - EXPECT_NEAR(v_simple.point.x, -9, EPS); - EXPECT_NEAR(v_simple.point.y, 18, EPS); - EXPECT_NEAR(v_simple.point.z, 27, EPS); - - // advanced api - geometry_msgs::PointStamped v_advanced = tf_buffer->transform(v1, "B", ros::Time(2.0), - "A", ros::Duration(3.0)); - EXPECT_NEAR(v_advanced.point.x, -9, EPS); - EXPECT_NEAR(v_advanced.point.y, 18, EPS); - EXPECT_NEAR(v_advanced.point.z, 27, EPS); -} - -TEST(TfGeometry, doTransformPoint) -{ - geometry_msgs::Point v1, res; - v1.x = 2; - v1.y = 1; - v1.z = 3; - - geometry_msgs::TransformStamped trafo; - trafo.transform.translation.x = -1; - trafo.transform.translation.y = 2; - trafo.transform.translation.z = -3; - trafo.transform.rotation = tf2::toMsg(tf2::Quaternion(tf2::Vector3(0,0,1), -M_PI / 2.0)); - - tf2::doTransform(v1, res, trafo); - - EXPECT_NEAR(res.x, 0, EPS); - EXPECT_NEAR(res.y, 0, EPS); - EXPECT_NEAR(res.z, 0, EPS); -} - -TEST(TfGeometry, doTransformQuaterion) -{ - geometry_msgs::Quaternion v1, res; - v1.w = 1; - - geometry_msgs::TransformStamped trafo; - trafo.transform.translation.x = -1; - trafo.transform.translation.y = 2; - trafo.transform.translation.z = -3; - trafo.transform.rotation = tf2::toMsg(tf2::Quaternion(tf2::Vector3(0,0,1), -M_PI / 2.0)); - - tf2::doTransform(v1, res, trafo); - - EXPECT_NEAR(res.x, trafo.transform.rotation.x, EPS); - EXPECT_NEAR(res.y, trafo.transform.rotation.y, EPS); - EXPECT_NEAR(res.z, trafo.transform.rotation.z, EPS); - EXPECT_NEAR(res.w, trafo.transform.rotation.w, EPS); -} - -TEST(TfGeometry, doTransformPose) -{ - geometry_msgs::Pose v1, res; - v1.position.x = 2; - v1.position.y = 1; - v1.position.z = 3; - v1.orientation.w = 1; - - geometry_msgs::TransformStamped trafo; - trafo.transform.translation.x = -1; - trafo.transform.translation.y = 2; - trafo.transform.translation.z = -3; - trafo.transform.rotation = tf2::toMsg(tf2::Quaternion(tf2::Vector3(0,0,1), -M_PI / 2.0)); - - tf2::doTransform(v1, res, trafo); - - EXPECT_NEAR(res.position.x, 0, EPS); - EXPECT_NEAR(res.position.y, 0, EPS); - EXPECT_NEAR(res.position.z, 0, EPS); - - EXPECT_NEAR(res.orientation.x, trafo.transform.rotation.x, EPS); - EXPECT_NEAR(res.orientation.y, trafo.transform.rotation.y, EPS); - EXPECT_NEAR(res.orientation.z, trafo.transform.rotation.z, EPS); - EXPECT_NEAR(res.orientation.w, trafo.transform.rotation.w, EPS); -} - -TEST(TfGeometry, doTransformVector3) -{ - geometry_msgs::Vector3 v1, res; - v1.x = 2; - v1.y = 1; - v1.z = 3; - - geometry_msgs::TransformStamped trafo; - trafo.transform.translation.x = -1; - trafo.transform.translation.y = 2; - trafo.transform.translation.z = -3; - trafo.transform.rotation = tf2::toMsg(tf2::Quaternion(tf2::Vector3(0,0,1), -M_PI / 2.0)); - - tf2::doTransform(v1, res, trafo); - - EXPECT_NEAR(res.x, 1, EPS); - EXPECT_NEAR(res.y, -2, EPS); - EXPECT_NEAR(res.z, 3, EPS); -} - -TEST(TfGeometry, doTransformWrench) -{ - geometry_msgs::Wrench v1, res; - v1.force.x = 2; - v1.force.y = 1; - v1.force.z = 3; - v1.torque.x = 2; - v1.torque.y = 1; - v1.torque.z = 3; - - geometry_msgs::TransformStamped trafo; - trafo.transform.translation.x = -1; - trafo.transform.translation.y = 2; - trafo.transform.translation.z = -3; - trafo.transform.rotation = tf2::toMsg(tf2::Quaternion(tf2::Vector3(0,0,1), -M_PI / 2.0)); - - tf2::doTransform(v1, res, trafo); - EXPECT_NEAR(res.force.x, 1, EPS); - EXPECT_NEAR(res.force.y, -2, EPS); - EXPECT_NEAR(res.force.z, 3, EPS); - - EXPECT_NEAR(res.torque.x, 1, EPS); - EXPECT_NEAR(res.torque.y, -2, EPS); - EXPECT_NEAR(res.torque.z, 3, EPS); -} - -int main(int argc, char **argv){ - testing::InitGoogleTest(&argc, argv); - ros::init(argc, argv, "test"); - ros::NodeHandle n; - - tf_buffer = new tf2_ros::Buffer(); - tf_buffer->setUsingDedicatedThread(true); - - // populate buffer - t.transform.translation.x = 10; - t.transform.translation.y = 20; - t.transform.translation.z = 30; - t.transform.rotation.x = 1; - t.header.stamp = ros::Time(2.0); - t.header.frame_id = "A"; - t.child_frame_id = "B"; - tf_buffer->setTransform(t, "test"); - - int ret = RUN_ALL_TESTS(); - delete tf_buffer; - return ret; -} - - - - - diff --git a/src/geometry2/tf2_geometry_msgs/test/test_tomsg_frommsg.cpp b/src/geometry2/tf2_geometry_msgs/test/test_tomsg_frommsg.cpp deleted file mode 100644 index 3139f23..0000000 --- a/src/geometry2/tf2_geometry_msgs/test/test_tomsg_frommsg.cpp +++ /dev/null @@ -1,405 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** \author Wim Meeussen */ - - -#include -#include - -static const double EPS = 1e-6; - -tf2::Vector3 get_tf2_vector() -{ - return tf2::Vector3(1.0, 2.0, 3.0); -} - -geometry_msgs::Vector3& value_initialize(geometry_msgs::Vector3 &m1) -{ - m1.x = 1; - m1.y = 2; - m1.z = 3; - return m1; -} - -std_msgs::Header& value_initialize(std_msgs::Header & h) -{ - h.stamp = ros::Time(10); - h.frame_id = "foobar"; - return h; -} - -geometry_msgs::Vector3Stamped& value_initialize(geometry_msgs::Vector3Stamped &m1) -{ - value_initialize(m1.header); - value_initialize(m1.vector); - return m1; -} - -geometry_msgs::Point& value_initialize(geometry_msgs::Point &m1) -{ - m1.x = 1; - m1.y = 2; - m1.z = 3; - return m1; -} - -geometry_msgs::PointStamped& value_initialize(geometry_msgs::PointStamped &m1) -{ - value_initialize(m1.header); - value_initialize(m1.point); - return m1; -} - -geometry_msgs::Quaternion & value_initialize(geometry_msgs::Quaternion &m1) -{ - m1.x = 0; - m1.y = 0; - m1.z = 0.7071067811; - m1.w = 0.7071067811; - return m1; -} - -geometry_msgs::QuaternionStamped& value_initialize(geometry_msgs::QuaternionStamped &m1) -{ - value_initialize(m1.header); - value_initialize(m1.quaternion); - return m1; -} - -geometry_msgs::Pose & value_initialize(geometry_msgs::Pose & m1) -{ - value_initialize(m1.position); - value_initialize(m1.orientation); - return m1; -} - -geometry_msgs::PoseStamped& value_initialize(geometry_msgs::PoseStamped &m1) -{ - value_initialize(m1.header); - value_initialize(m1.pose); - return m1; -} - -geometry_msgs::Transform & value_initialize(geometry_msgs::Transform & m1) -{ - value_initialize(m1.translation); - value_initialize(m1.rotation); - return m1; -} - -geometry_msgs::TransformStamped& value_initialize(geometry_msgs::TransformStamped &m1) -{ - value_initialize(m1.header); - value_initialize(m1.transform); - return m1; -} - -void expect_near(const std_msgs::Header & h1, const std_msgs::Header & h2) -{ - EXPECT_NEAR(h1.stamp.toSec(), h2.stamp.toSec(), EPS); - EXPECT_STREQ(h1.frame_id.c_str(), h2.frame_id.c_str()); -} - -/* - * Vector3 - */ -void expect_near(const geometry_msgs::Vector3 & v1, const tf2::Vector3 & v2) -{ - EXPECT_NEAR(v1.x, v2.x(), EPS); - EXPECT_NEAR(v1.y, v2.y(), EPS); - EXPECT_NEAR(v1.z, v2.z(), EPS); -} - -void expect_near(const geometry_msgs::Vector3 & v1, const geometry_msgs::Vector3 & v2) -{ - EXPECT_NEAR(v1.x, v2.x, EPS); - EXPECT_NEAR(v1.y, v2.y, EPS); - EXPECT_NEAR(v1.z, v2.z, EPS); -} - -void expect_near(const tf2::Vector3 & v1, const tf2::Vector3 & v2) -{ - EXPECT_NEAR(v1.x(), v2.x(), EPS); - EXPECT_NEAR(v1.y(), v2.y(), EPS); - EXPECT_NEAR(v1.z(), v2.z(), EPS); -} - -void expect_near(const geometry_msgs::Vector3Stamped & p1, const geometry_msgs::Vector3Stamped & p2) -{ - expect_near(p1.header, p2.header); - expect_near(p1.vector, p2.vector); -} - -/* - * Point - */ -void expect_near(const geometry_msgs::Point & p1, const tf2::Vector3 & v2) -{ - EXPECT_NEAR(p1.x, v2.x(), EPS); - EXPECT_NEAR(p1.y, v2.y(), EPS); - EXPECT_NEAR(p1.z, v2.z(), EPS); -} - -void expect_near(const geometry_msgs::Point & p1, const geometry_msgs::Point & v2) -{ - EXPECT_NEAR(p1.x, v2.x, EPS); - EXPECT_NEAR(p1.y, v2.y, EPS); - EXPECT_NEAR(p1.z, v2.z, EPS); -} - -void expect_near(const geometry_msgs::PointStamped & p1, const geometry_msgs::PointStamped & p2) -{ - expect_near(p1.header, p2.header); - expect_near(p1.point, p2.point); -} - - -/* - * Quaternion - */ -void expect_near(const geometry_msgs::Quaternion & q1, const tf2::Quaternion & v2) -{ - EXPECT_NEAR(q1.x, v2.x(), EPS); - EXPECT_NEAR(q1.y, v2.y(), EPS); - EXPECT_NEAR(q1.z, v2.z(), EPS); -} - -void expect_near(const geometry_msgs::Quaternion & q1, const geometry_msgs::Quaternion & v2) -{ - EXPECT_NEAR(q1.x, v2.x, EPS); - EXPECT_NEAR(q1.y, v2.y, EPS); - EXPECT_NEAR(q1.z, v2.z, EPS); -} - -void expect_near(const geometry_msgs::QuaternionStamped & p1, const geometry_msgs::QuaternionStamped & p2) -{ - expect_near(p1.header, p2.header); - expect_near(p1.quaternion, p2.quaternion); -} - -/* - * Pose - */ -void expect_near(const geometry_msgs::Pose & p, const tf2::Transform & t) -{ - expect_near(p.position, t.getOrigin()); - expect_near(p.orientation, t.getRotation()); -} - -void expect_near(const geometry_msgs::Pose & p1, const geometry_msgs::Pose & p2) -{ - expect_near(p1.position, p2.position); - expect_near(p1.orientation, p2.orientation); -} - -void expect_near(const geometry_msgs::PoseStamped & p1, const geometry_msgs::PoseStamped & p2) -{ - expect_near(p1.header, p2.header); - expect_near(p1.pose, p2.pose); -} - -/* - * Transform - */ -void expect_near(const geometry_msgs::Transform & p, const tf2::Transform & t) -{ - expect_near(p.translation, t.getOrigin()); - expect_near(p.rotation, t.getRotation()); -} - -void expect_near(const geometry_msgs::Transform & p1, const geometry_msgs::Transform & p2) -{ - expect_near(p1.translation, p2.translation); - expect_near(p1.rotation, p2.rotation); -} - -void expect_near(const geometry_msgs::TransformStamped & p1, const geometry_msgs::TransformStamped & p2) -{ - expect_near(p1.header, p2.header); - expect_near(p1.transform, p2.transform); -} - -/* - * Stamped templated expect_near - */ - -template -void expect_near(const tf2::Stamped& s1, const tf2::Stamped& s2) -{ - expect_near((T)s1, (T)s2); -} - -/********************* - * Tests - *********************/ - -TEST(tf2_geometry_msgs, Vector3) -{ - geometry_msgs::Vector3 m1; - value_initialize(m1); - tf2::Vector3 v1; - fromMsg(m1, v1); - SCOPED_TRACE("m1 v1"); - expect_near(m1, v1); - geometry_msgs::Vector3 m2 = toMsg(v1); - SCOPED_TRACE("m1 m2"); - expect_near(m1, m2); -} - -TEST(tf2_geometry_msgs, Point) -{ - geometry_msgs::Point m1; - value_initialize(m1); - tf2::Vector3 v1; - SCOPED_TRACE("m1 v1"); - fromMsg(m1, v1); - expect_near(m1, v1); - geometry_msgs::Point m2 = toMsg(v1, m2); - SCOPED_TRACE("m1 m2"); - expect_near(m1, m2); -} - -TEST(tf2_geometry_msgs, Quaternion) -{ - geometry_msgs::Quaternion m1; - value_initialize(m1); - tf2::Quaternion q1; - SCOPED_TRACE("m1 q1"); - fromMsg(m1, q1); - expect_near(m1, q1); - geometry_msgs::Quaternion m2 = toMsg(q1); - SCOPED_TRACE("m1 m2"); - expect_near(m1, m2); -} - -TEST(tf2_geometry_msgs, Pose) -{ - geometry_msgs::Pose m1; - value_initialize(m1); - tf2::Transform t1; - fromMsg(m1, t1); - SCOPED_TRACE("m1 t1"); - expect_near(m1, t1); - geometry_msgs::Pose m2 = toMsg(t1, m2); - SCOPED_TRACE("m1 m2"); - expect_near(m1, m2); -} - -TEST(tf2_geometry_msgs, Transform) -{ - geometry_msgs::Transform m1; - value_initialize(m1); - tf2::Transform t1; - fromMsg(m1, t1); - SCOPED_TRACE("m1 t1"); - expect_near(m1, t1); - geometry_msgs::Transform m2 = toMsg(t1); - SCOPED_TRACE("m1 m2"); - expect_near(m1, m2); -} - -TEST(tf2_geometry_msgs, Vector3Stamped) -{ - geometry_msgs::Vector3Stamped m1; - value_initialize(m1); - tf2::Stamped v1; - fromMsg(m1, v1); - SCOPED_TRACE("m1 v1"); - // expect_near(m1, v1); - geometry_msgs::Vector3Stamped m2; - m2 = toMsg(v1); - SCOPED_TRACE("m1 m2"); - expect_near(m1, m2); -} - -TEST(tf2_geometry_msgs, PointStamped) -{ - geometry_msgs::PointStamped m1; - value_initialize(m1); - tf2::Stamped v1; - fromMsg(m1, v1); - SCOPED_TRACE("m1 v1"); - // expect_near(m1, v1); //TODO implement cross verification explicityly - geometry_msgs::PointStamped m2; - m2 = toMsg(v1, m2); - SCOPED_TRACE("m1 m2"); - expect_near(m1, m2); -} - -TEST(tf2_geometry_msgs, QuaternionStamped) -{ - geometry_msgs::QuaternionStamped m1; - value_initialize(m1); - tf2::Stamped v1; - fromMsg(m1, v1); - SCOPED_TRACE("m1 v1"); - // expect_near(m1, v1); //TODO implement cross verification explicityly - geometry_msgs::QuaternionStamped m2; - m2 = tf2::toMsg(v1); - SCOPED_TRACE("m1 m2"); - expect_near(m1, m2); -} - -TEST(tf2_geometry_msgs, PoseStamped) -{ - geometry_msgs::PoseStamped m1; - value_initialize(m1); - tf2::Stamped v1; - SCOPED_TRACE("m1 v1"); - fromMsg(m1, v1); - // expect_near(m1, v1); //TODO implement cross verification explicityly - geometry_msgs::PoseStamped m2; - m2 = tf2::toMsg(v1, m2); - SCOPED_TRACE("m1 m2"); - expect_near(m1, m2); -} - -TEST(tf2_geometry_msgs, TransformStamped) -{ - geometry_msgs::TransformStamped m1; - value_initialize(m1); - tf2::Stamped v1; - fromMsg(m1, v1); - SCOPED_TRACE("m1 v1"); - // expect_near(m1, v1); - geometry_msgs::TransformStamped m2; - m2 = tf2::toMsg(v1); - SCOPED_TRACE("m1 m2"); - expect_near(m1, m2); -} - - - - -int main(int argc, char **argv){ - testing::InitGoogleTest(&argc, argv); - int ret = RUN_ALL_TESTS(); - return ret; -} diff --git a/src/geometry2/tf2_kdl/CHANGELOG.rst b/src/geometry2/tf2_kdl/CHANGELOG.rst deleted file mode 100644 index a99b498..0000000 --- a/src/geometry2/tf2_kdl/CHANGELOG.rst +++ /dev/null @@ -1,221 +0,0 @@ -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Changelog for package tf2_kdl -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -0.6.7 (2020-03-09) ------------------- - -0.6.6 (2020-01-09) ------------------- -* Make kdl headers available (`#419 `_) -* Fix python3 compatibility for noetic (`#416 `_) -* Remove roslib.load_manifest `#404 `_ from otamachan/remove-load-manifest -* Python 3 compatibility: relative imports and print statement -* Contributors: Shane Loretz, Tamaki Nishino, Timon Engelke, Tully Foote - -0.6.5 (2018-11-16) ------------------- - -0.6.4 (2018-11-06) ------------------- - -0.6.3 (2018-07-09) ------------------- - -0.6.2 (2018-05-02) ------------------- -* Adds additional conversions for tf2, KDL, Eigen (`#292 `_) - - adds non-stamped Eigen to Transform function - - converts Eigen Matrix Vectors to and from geometry_msgs::Twist - - adds to/from message for geometry_msgs::Pose and KDL::Frame -* Contributors: Ian McMahon - -0.6.1 (2018-03-21) ------------------- - -0.6.0 (2018-03-21) ------------------- - -0.5.17 (2018-01-01) -------------------- -* Merge pull request `#257 `_ from delftrobotics-forks/python3 - Make tf2_py python3 compatible again -* Use python3 print function. -* Contributors: Maarten de Vries, Tully Foote - -0.5.16 (2017-07-14) -------------------- -* store gtest return value as int (`#229 `_) -* Find eigen in a much nicer way. -* Switch tf2_kdl over to package.xml format 2. -* Contributors: Chris Lalancette, dhood - -0.5.15 (2017-01-24) -------------------- - -0.5.14 (2017-01-16) -------------------- -* Add Python documentation for tf2_kdl -* Document kdl -* Contributors: Jackie Kay - -0.5.13 (2016-03-04) -------------------- -* converting python test script into unit test -* Don't export catkin includes -* Contributors: Jochen Sprickerhof, Tully Foote - -0.5.12 (2015-08-05) -------------------- -* Add kdl::Frame to TransformStamped conversion -* Contributors: Paul Bovbel - -0.5.11 (2015-04-22) -------------------- - -0.5.10 (2015-04-21) -------------------- - -0.5.9 (2015-03-25) ------------------- - -0.5.8 (2015-03-17) ------------------- -* remove useless Makefile files -* fix ODR violations -* Contributors: Vincent Rabaud - -0.5.7 (2014-12-23) ------------------- -* fixing install rules and adding backwards compatible include with #warning -* Contributors: Tully Foote - -0.5.6 (2014-09-18) ------------------- - -0.5.5 (2014-06-23) ------------------- - -0.5.4 (2014-05-07) ------------------- - -0.5.3 (2014-02-21) ------------------- -* finding eigen from cmake_modules instead of from catkin -* Contributors: Tully Foote - -0.5.2 (2014-02-20) ------------------- -* add cmake_modules dependency for eigen find_package rules -* Contributors: Tully Foote - -0.5.1 (2014-02-14) ------------------- - -0.5.0 (2014-02-14) ------------------- - -0.4.10 (2013-12-26) -------------------- - -0.4.9 (2013-11-06) ------------------- - -0.4.8 (2013-11-06) ------------------- - -0.4.7 (2013-08-28) ------------------- - -0.4.6 (2013-08-28) ------------------- - -0.4.5 (2013-07-11) ------------------- - -0.4.4 (2013-07-09) ------------------- -* making repo use CATKIN_ENABLE_TESTING correctly and switching rostest to be a test_depend with that change. - -0.4.3 (2013-07-05) ------------------- - -0.4.2 (2013-07-05) ------------------- - -0.4.1 (2013-07-05) ------------------- - -0.4.0 (2013-06-27) ------------------- -* moving convert methods back into tf2 because it does not have any ros dependencies beyond ros::Time which is already a dependency of tf2 -* Cleaning up unnecessary dependency on roscpp -* converting contents of tf2_ros to be properly namespaced in the tf2_ros namespace -* Cleaning up packaging of tf2 including: - removing unused nodehandle - cleaning up a few dependencies and linking - removing old backup of package.xml - making diff minimally different from tf version of library -* Restoring test packages and bullet packages. - reverting 3570e8c42f9b394ecbfd9db076b920b41300ad55 to get back more of the packages previously implemented - reverting 04cf29d1b58c660fdc999ab83563a5d4b76ab331 to fix `#7 `_ -* passing unit tests - -0.3.6 (2013-03-03) ------------------- -* fix compilation under Oneiric - -0.3.5 (2013-02-15 14:46) ------------------------- -* 0.3.4 -> 0.3.5 - -0.3.4 (2013-02-15 13:14) ------------------------- -* 0.3.3 -> 0.3.4 - -0.3.3 (2013-02-15 11:30) ------------------------- -* 0.3.2 -> 0.3.3 - -0.3.2 (2013-02-15 00:42) ------------------------- -* 0.3.1 -> 0.3.2 -* fixed missing include export & tf2_ros dependecy - -0.3.1 (2013-02-14) ------------------- -* fixing version number in tf2_kdl -* catkinized tf2_kdl - -0.3.0 (2013-02-13) ------------------- -* fixing groovy-devel -* removing bullet and kdl related packages -* catkinizing geometry-experimental -* catkinizing tf2_kdl -* fix for kdl rotaiton constrition -* add twist, wrench and pose conversion to kdl, fix message to message conversion by adding specific conversion functions -* merge tf2_cpp and tf2_py into tf2_ros -* Got transform with types working in python -* A working first version of transforming and converting between different types -* Moving from camelCase to undescores to be in line with python style guides -* kdl unittest passing -* whitespace test -* add support for PointStamped geometry_msgs -* Fixing script -* set transform for test -* add advanced api -* working to transform kdl objects with dummy buffer_core -* plugin for py kdl -* add regression tests for geometry_msgs point, vector and pose -* add frame unit tests to kdl and bullet -* add first regression tests for kdl and bullet tf -* add bullet transforms, and create tests for bullet and kdl -* transform for vector3stamped message -* move implementation into library -* add advanced api -* compiling again with new design -* renaming classes -* compiling now -* almost compiling version of template code -* add test to start compiling diff --git a/src/geometry2/tf2_kdl/CMakeLists.txt b/src/geometry2/tf2_kdl/CMakeLists.txt deleted file mode 100644 index 5bd4e50..0000000 --- a/src/geometry2/tf2_kdl/CMakeLists.txt +++ /dev/null @@ -1,61 +0,0 @@ -cmake_minimum_required(VERSION 2.8.3) -project(tf2_kdl) - -find_package(orocos_kdl) -find_package(catkin REQUIRED COMPONENTS cmake_modules tf2 tf2_ros tf2_msgs) - -# Finding Eigen is somewhat complicated because of our need to support Ubuntu -# all the way back to saucy. First we look for the Eigen3 cmake module -# provided by the libeigen3-dev on newer Ubuntu. If that fails, then we -# fall-back to the version provided by cmake_modules, which is a stand-in. -find_package(Eigen3 QUIET) -if(NOT EIGEN3_FOUND) - find_package(cmake_modules REQUIRED) - find_package(Eigen REQUIRED) - set(EIGEN3_INCLUDE_DIRS ${EIGEN_INCLUDE_DIRS}) -endif() - -# Note that eigen 3.2 (on Ubuntu Wily) only provides EIGEN3_INCLUDE_DIR, -# not EIGEN3_INCLUDE_DIRS, so we have to set the latter from the former. -if(NOT EIGEN3_INCLUDE_DIRS) - set(EIGEN3_INCLUDE_DIRS ${EIGEN3_INCLUDE_DIR}) -endif() - - -catkin_package( - INCLUDE_DIRS include ${EIGEN3_INCLUDE_DIRS} - DEPENDS EIGEN3 orocos_kdl -) - - -catkin_python_setup() -link_directories(${orocos_kdl_LIBRARY_DIRS}) - -include_directories(include ${catkin_INCLUDE_DIRS} ${EIGEN3_INCLUDE_DIRS} ${GTEST_INCLUDE_DIRS}) - - -install(DIRECTORY include/${PROJECT_NAME}/ - DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}) - -install(PROGRAMS scripts/test.py - DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -) - - -if(CATKIN_ENABLE_TESTING) - - find_package(catkin REQUIRED COMPONENTS rostest tf2 tf2_ros tf2_msgs) - - add_executable(test_kdl EXCLUDE_FROM_ALL test/test_tf2_kdl.cpp) - find_package(Threads) - target_include_directories(test_kdl PUBLIC ${orocos_kdl_INCLUDE_DIRS}) - target_link_libraries(test_kdl ${catkin_LIBRARIES} ${GTEST_LIBRARIES} ${orocos_kdl_LIBRARIES} ${GTEST_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) - - add_rostest(${CMAKE_CURRENT_SOURCE_DIR}/test/test.launch) - add_rostest(${CMAKE_CURRENT_SOURCE_DIR}/test/test_python.launch) - - if(TARGET tests) - add_dependencies(tests test_kdl) - endif() - -endif() diff --git a/src/geometry2/tf2_kdl/conf.py b/src/geometry2/tf2_kdl/conf.py deleted file mode 100644 index fb9bed5..0000000 --- a/src/geometry2/tf2_kdl/conf.py +++ /dev/null @@ -1,206 +0,0 @@ -# -*- coding: utf-8 -*- -# -# tf2 documentation build configuration file, created by -# sphinx-quickstart on Mon Jun 1 14:21:53 2009. -# -# This file is execfile()d with the current directory set to its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import roslib -#roslib.load_manifest('tf2_kdl') -import sys, os - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.append(os.path.abspath('./src/tf2_kdl')) - -# -- General configuration ----------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.pngmath'] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'tf2_kdl' -copyright = u'2016, Open Source Robotics Foundation' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = '0.5' -# The full version, including alpha/beta/rc tags. -release = '0.5.13' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -#language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of documents that shouldn't be included in the build. -#unused_docs = [] - -# List of directories, relative to source directory, that shouldn't be searched -# for source files. -exclude_trees = ['_build'] - -exclude_patterns = ['_CHANGELOG.rst'] - -# The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - - -# -- Options for HTML output --------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -html_theme = 'default' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -#html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -#html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_use_modindex = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = '' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'tfdoc' - - -# -- Options for LaTeX output -------------------------------------------------- - -# The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass [howto/manual]). -latex_documents = [ - ('index', 'tf.tex', u'stereo\\_utils Documentation', - u'Tully Foote and Eitan Marder-Eppstein', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# Additional stuff for the LaTeX preamble. -#latex_preamble = '' - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_use_modindex = True - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - 'http://docs.python.org/': None, - 'http://docs.opencv.org/3.0-last-rst/': None, - 'http://docs.scipy.org/doc/numpy' : None - } diff --git a/src/geometry2/tf2_kdl/include/tf2_kdl/tf2_kdl.h b/src/geometry2/tf2_kdl/include/tf2_kdl/tf2_kdl.h deleted file mode 100644 index e1f28bf..0000000 --- a/src/geometry2/tf2_kdl/include/tf2_kdl/tf2_kdl.h +++ /dev/null @@ -1,309 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** \author Wim Meeussen */ - -#ifndef TF2_KDL_H -#define TF2_KDL_H - -#include -#include -#include -#include -#include -#include -#include - - -namespace tf2 -{ -/** \brief Convert a timestamped transform to the equivalent KDL data type. - * \param t The transform to convert, as a geometry_msgs TransformedStamped message. - * \return The transform message converted to an KDL Frame. - */ -inline -KDL::Frame transformToKDL(const geometry_msgs::TransformStamped& t) - { - return KDL::Frame(KDL::Rotation::Quaternion(t.transform.rotation.x, t.transform.rotation.y, - t.transform.rotation.z, t.transform.rotation.w), - KDL::Vector(t.transform.translation.x, t.transform.translation.y, t.transform.translation.z)); - } - -/** \brief Convert an KDL Frame to the equivalent geometry_msgs message type. - * \param k The transform to convert, as an KDL Frame. - * \return The transform converted to a TransformStamped message. - */ -inline -geometry_msgs::TransformStamped kdlToTransform(const KDL::Frame& k) -{ - geometry_msgs::TransformStamped t; - t.transform.translation.x = k.p.x(); - t.transform.translation.y = k.p.y(); - t.transform.translation.z = k.p.z(); - k.M.GetQuaternion(t.transform.rotation.x, t.transform.rotation.y, t.transform.rotation.z, t.transform.rotation.w); - return t; -} - -// --------------------- -// Vector -// --------------------- -/** \brief Apply a geometry_msgs TransformStamped to an KDL-specific Vector type. - * This function is a specialization of the doTransform template defined in tf2/convert.h. - * \param t_in The vector to transform, as a timestamped KDL Vector data type. - * \param t_out The transformed vector, as a timestamped KDL Vector data type. - * \param transform The timestamped transform to apply, as a TransformStamped message. - */ -template <> -inline - void doTransform(const tf2::Stamped& t_in, tf2::Stamped& t_out, const geometry_msgs::TransformStamped& transform) - { - t_out = tf2::Stamped(transformToKDL(transform) * t_in, transform.header.stamp, transform.header.frame_id); - } - -/** \brief Convert a stamped KDL Vector type to a PointStamped message. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in The timestamped KDL Vector to convert. - * \return The vector converted to a PointStamped message. - */ -inline -geometry_msgs::PointStamped toMsg(const tf2::Stamped& in) -{ - geometry_msgs::PointStamped msg; - msg.header.stamp = in.stamp_; - msg.header.frame_id = in.frame_id_; - msg.point.x = in[0]; - msg.point.y = in[1]; - msg.point.z = in[2]; - return msg; -} - -/** \brief Convert a PointStamped message type to a stamped KDL-specific Vector type. - * This function is a specialization of the fromMsg template defined in tf2/convert.h - * \param msg The PointStamped message to convert. - * \param out The point converted to a timestamped KDL Vector. - */ -inline -void fromMsg(const geometry_msgs::PointStamped& msg, tf2::Stamped& out) -{ - out.stamp_ = msg.header.stamp; - out.frame_id_ = msg.header.frame_id; - out[0] = msg.point.x; - out[1] = msg.point.y; - out[2] = msg.point.z; -} - -// --------------------- -// Twist -// --------------------- -/** \brief Apply a geometry_msgs TransformStamped to an KDL-specific Twist type. - * This function is a specialization of the doTransform template defined in tf2/convert.h. - * \param t_in The twist to transform, as a timestamped KDL Twist data type. - * \param t_out The transformed Twist, as a timestamped KDL Frame data type. - * \param transform The timestamped transform to apply, as a TransformStamped message. - */ -template <> -inline - void doTransform(const tf2::Stamped& t_in, tf2::Stamped& t_out, const geometry_msgs::TransformStamped& transform) - { - t_out = tf2::Stamped(transformToKDL(transform) * t_in, transform.header.stamp, transform.header.frame_id); - } - -/** \brief Convert a stamped KDL Twist type to a TwistStamped message. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in The timestamped KDL Twist to convert. - * \return The twist converted to a TwistStamped message. - */ -inline -geometry_msgs::TwistStamped toMsg(const tf2::Stamped& in) -{ - geometry_msgs::TwistStamped msg; - msg.header.stamp = in.stamp_; - msg.header.frame_id = in.frame_id_; - msg.twist.linear.x = in.vel[0]; - msg.twist.linear.y = in.vel[1]; - msg.twist.linear.z = in.vel[2]; - msg.twist.angular.x = in.rot[0]; - msg.twist.angular.y = in.rot[1]; - msg.twist.angular.z = in.rot[2]; - return msg; -} - -/** \brief Convert a TwistStamped message type to a stamped KDL-specific Twist type. - * This function is a specialization of the fromMsg template defined in tf2/convert.h - * \param msg The TwistStamped message to convert. - * \param out The twist converted to a timestamped KDL Twist. - */ -inline -void fromMsg(const geometry_msgs::TwistStamped& msg, tf2::Stamped& out) -{ - out.stamp_ = msg.header.stamp; - out.frame_id_ = msg.header.frame_id; - out.vel[0] = msg.twist.linear.x; - out.vel[1] = msg.twist.linear.y; - out.vel[2] = msg.twist.linear.z; - out.rot[0] = msg.twist.angular.x; - out.rot[1] = msg.twist.angular.y; - out.rot[2] = msg.twist.angular.z; -} - - -// --------------------- -// Wrench -// --------------------- -/** \brief Apply a geometry_msgs TransformStamped to an KDL-specific Wrench type. - * This function is a specialization of the doTransform template defined in tf2/convert.h. - * \param t_in The wrench to transform, as a timestamped KDL Wrench data type. - * \param t_out The transformed Wrench, as a timestamped KDL Frame data type. - * \param transform The timestamped transform to apply, as a TransformStamped message. - */ -template <> -inline - void doTransform(const tf2::Stamped& t_in, tf2::Stamped& t_out, const geometry_msgs::TransformStamped& transform) - { - t_out = tf2::Stamped(transformToKDL(transform) * t_in, transform.header.stamp, transform.header.frame_id); - } - -/** \brief Convert a stamped KDL Wrench type to a WrenchStamped message. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in The timestamped KDL Wrench to convert. - * \return The wrench converted to a WrenchStamped message. - */ -inline -geometry_msgs::WrenchStamped toMsg(const tf2::Stamped& in) -{ - geometry_msgs::WrenchStamped msg; - msg.header.stamp = in.stamp_; - msg.header.frame_id = in.frame_id_; - msg.wrench.force.x = in.force[0]; - msg.wrench.force.y = in.force[1]; - msg.wrench.force.z = in.force[2]; - msg.wrench.torque.x = in.torque[0]; - msg.wrench.torque.y = in.torque[1]; - msg.wrench.torque.z = in.torque[2]; - return msg; -} - -/** \brief Convert a WrenchStamped message type to a stamped KDL-specific Wrench type. - * This function is a specialization of the fromMsg template defined in tf2/convert.h - * \param msg The WrenchStamped message to convert. - * \param out The wrench converted to a timestamped KDL Wrench. - */ -inline -void fromMsg(const geometry_msgs::WrenchStamped& msg, tf2::Stamped& out) -{ - out.stamp_ = msg.header.stamp; - out.frame_id_ = msg.header.frame_id; - out.force[0] = msg.wrench.force.x; - out.force[1] = msg.wrench.force.y; - out.force[2] = msg.wrench.force.z; - out.torque[0] = msg.wrench.torque.x; - out.torque[1] = msg.wrench.torque.y; - out.torque[2] = msg.wrench.torque.z; -} - - - - -// --------------------- -// Frame -// --------------------- -/** \brief Apply a geometry_msgs TransformStamped to a KDL-specific Frame data type. - * This function is a specialization of the doTransform template defined in tf2/convert.h. - * \param t_in The frame to transform, as a timestamped KDL Frame. - * \param t_out The transformed frame, as a timestamped KDL Frame. - * \param transform The timestamped transform to apply, as a TransformStamped message. - */ -template <> -inline - void doTransform(const tf2::Stamped& t_in, tf2::Stamped& t_out, const geometry_msgs::TransformStamped& transform) - { - t_out = tf2::Stamped(transformToKDL(transform) * t_in, transform.header.stamp, transform.header.frame_id); - } - -/** \brief Convert a stamped KDL Frame type to a Pose message. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in The timestamped KDL Frame to convert. - * \return The frame converted to a Pose message. - */ -inline -geometry_msgs::Pose toMsg(const KDL::Frame& in) -{ - geometry_msgs::Pose msg; - msg.position.x = in.p[0]; - msg.position.y = in.p[1]; - msg.position.z = in.p[2]; - in.M.GetQuaternion(msg.orientation.x, msg.orientation.y, msg.orientation.z, msg.orientation.w); - return msg; -} - -/** \brief Convert a Pose message type to a KDL Frame. - * This function is a specialization of the fromMsg template defined in tf2/convert.h. - * \param msg The Pose message to convert. - * \param out The pose converted to a KDL Frame. - */ -inline -void fromMsg(const geometry_msgs::Pose& msg, KDL::Frame& out) -{ - out.p[0] = msg.position.x; - out.p[1] = msg.position.y; - out.p[2] = msg.position.z; - out.M = KDL::Rotation::Quaternion(msg.orientation.x, msg.orientation.y, msg.orientation.z, msg.orientation.w); -} - -/** \brief Convert a stamped KDL Frame type to a Pose message. - * This function is a specialization of the toMsg template defined in tf2/convert.h. - * \param in The timestamped KDL Frame to convert. - * \return The frame converted to a PoseStamped message. - */ -inline -geometry_msgs::PoseStamped toMsg(const tf2::Stamped& in) -{ - geometry_msgs::PoseStamped msg; - msg.header.stamp = in.stamp_; - msg.header.frame_id = in.frame_id_; - msg.pose = toMsg(static_cast(in)); - return msg; -} - -/** \brief Convert a Pose message transform type to a stamped KDL Frame. - * This function is a specialization of the fromMsg template defined in tf2/convert.h. - * \param msg The PoseStamped message to convert. - * \param out The pose converted to a timestamped KDL Frame. - */ -inline -void fromMsg(const geometry_msgs::PoseStamped& msg, tf2::Stamped& out) -{ - out.stamp_ = msg.header.stamp; - out.frame_id_ = msg.header.frame_id; - fromMsg(msg.pose, static_cast(out)); -} - -} // namespace - -#endif // TF2_KDL_H diff --git a/src/geometry2/tf2_kdl/include/tf2_kdl/tf2_kdl/tf2_kdl.h b/src/geometry2/tf2_kdl/include/tf2_kdl/tf2_kdl/tf2_kdl.h deleted file mode 100644 index 8fa45b2..0000000 --- a/src/geometry2/tf2_kdl/include/tf2_kdl/tf2_kdl/tf2_kdl.h +++ /dev/null @@ -1,3 +0,0 @@ -#warning This header is at the wrong path you should include - -#include diff --git a/src/geometry2/tf2_kdl/index.rst b/src/geometry2/tf2_kdl/index.rst deleted file mode 100644 index 73d0ff9..0000000 --- a/src/geometry2/tf2_kdl/index.rst +++ /dev/null @@ -1,14 +0,0 @@ -tf2_kdl documentation -===================== - -This is the Python API reference of the tf2_kdl package. - -.. automodule:: tf2_kdl.tf2_kdl - :members: - :undoc-members: - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`search` diff --git a/src/geometry2/tf2_kdl/mainpage.dox b/src/geometry2/tf2_kdl/mainpage.dox deleted file mode 100644 index a3bbc8b..0000000 --- a/src/geometry2/tf2_kdl/mainpage.dox +++ /dev/null @@ -1,19 +0,0 @@ -/** -\mainpage -\htmlinclude manifest.html - -\b tf2_kdl contains functions for converting between geometry_msgs and KDL data types. - -This library is an implementation of the templated conversion interface specified in tf/convert.h. -It enables easy conversion from geometry_msgs Transform and Point types to the types specified -by the Orocos KDL (Kinematics and Dynamics Library) API (see http://www.orocos.org/kdl). - -See the Conversions overview -wiki page for more information about datatype conversion in tf2. - -\section codeapi Code API - -This library consists of one header only, tf2_kdl/tf2_kdl.h, which consists mostly of -specializations of template functions defined in tf2/convert.h. - -*/ diff --git a/src/geometry2/tf2_kdl/package.xml b/src/geometry2/tf2_kdl/package.xml deleted file mode 100644 index 44b2637..0000000 --- a/src/geometry2/tf2_kdl/package.xml +++ /dev/null @@ -1,27 +0,0 @@ - - tf2_kdl - 0.6.7 - - KDL binding for tf2 - - Tully Foote - Wim Meeussen - BSD - - http://ros.org/wiki/tf2 - - catkin - - cmake_modules - eigen - - eigen - - orocos_kdl - tf2 - tf2_ros - - ros_environment - rostest - - diff --git a/src/geometry2/tf2_kdl/rosdoc.yaml b/src/geometry2/tf2_kdl/rosdoc.yaml deleted file mode 100644 index a1d78b9..0000000 --- a/src/geometry2/tf2_kdl/rosdoc.yaml +++ /dev/null @@ -1,7 +0,0 @@ - - builder: doxygen - name: C++ API - output_dir: c++ - file_patterns: '*.c *.cpp *.h *.cc *.hh *.dox' - - builder: sphinx - name: Python API - output_dir: python diff --git a/src/geometry2/tf2_kdl/scripts/test.py b/src/geometry2/tf2_kdl/scripts/test.py deleted file mode 100755 index e37579b..0000000 --- a/src/geometry2/tf2_kdl/scripts/test.py +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/env python - -from __future__ import print_function - -import unittest -import rospy -import PyKDL -import tf2_ros -import tf2_kdl -from geometry_msgs.msg import TransformStamped -from copy import deepcopy - -class KDLConversions(unittest.TestCase): - def test_transform(self): - b = tf2_ros.Buffer() - t = TransformStamped() - t.transform.translation.x = 1 - t.transform.rotation.x = 1 - t.header.stamp = rospy.Time(2.0) - t.header.frame_id = 'a' - t.child_frame_id = 'b' - b.set_transform(t, 'eitan_rocks') - out = b.lookup_transform('a','b', rospy.Time(2.0), rospy.Duration(2.0)) - self.assertEqual(out.transform.translation.x, 1) - self.assertEqual(out.transform.rotation.x, 1) - self.assertEqual(out.header.frame_id, 'a') - self.assertEqual(out.child_frame_id, 'b') - - v = PyKDL.Vector(1,2,3) - out = b.transform(tf2_ros.Stamped(v, rospy.Time(2), 'a'), 'b') - self.assertEqual(out.x(), 0) - self.assertEqual(out.y(), -2) - self.assertEqual(out.z(), -3) - - f = PyKDL.Frame(PyKDL.Rotation.RPY(1,2,3), PyKDL.Vector(1,2,3)) - out = b.transform(tf2_ros.Stamped(f, rospy.Time(2), 'a'), 'b') - print(out) - self.assertEqual(out.p.x(), 0) - self.assertEqual(out.p.y(), -2) - self.assertEqual(out.p.z(), -3) - # TODO(tfoote) check values of rotation - - t = PyKDL.Twist(PyKDL.Vector(1,2,3), PyKDL.Vector(4,5,6)) - out = b.transform(tf2_ros.Stamped(t, rospy.Time(2), 'a'), 'b') - self.assertEqual(out.vel.x(), 1) - self.assertEqual(out.vel.y(), -8) - self.assertEqual(out.vel.z(), 2) - self.assertEqual(out.rot.x(), 4) - self.assertEqual(out.rot.y(), -5) - self.assertEqual(out.rot.z(), -6) - - w = PyKDL.Wrench(PyKDL.Vector(1,2,3), PyKDL.Vector(4,5,6)) - out = b.transform(tf2_ros.Stamped(w, rospy.Time(2), 'a'), 'b') - self.assertEqual(out.force.x(), 1) - self.assertEqual(out.force.y(), -2) - self.assertEqual(out.force.z(), -3) - self.assertEqual(out.torque.x(), 4) - self.assertEqual(out.torque.y(), -8) - self.assertEqual(out.torque.z(), -4) - - def test_convert(self): - v = PyKDL.Vector(1,2,3) - vs = tf2_ros.Stamped(v, rospy.Time(2), 'a') - vs2 = tf2_ros.convert(vs, PyKDL.Vector) - self.assertEqual(vs.x(), 1) - self.assertEqual(vs.y(), 2) - self.assertEqual(vs.z(), 3) - self.assertEqual(vs2.x(), 1) - self.assertEqual(vs2.y(), 2) - self.assertEqual(vs2.z(), 3) - - -if __name__ == '__main__': - import rosunit - rospy.init_node('test_tf2_kdl_python') - rosunit.unitrun("test_tf2_kdl", "test_tf2_kdl_python", KDLConversions) diff --git a/src/geometry2/tf2_kdl/setup.py b/src/geometry2/tf2_kdl/setup.py deleted file mode 100644 index cdb0706..0000000 --- a/src/geometry2/tf2_kdl/setup.py +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env python - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -d = generate_distutils_setup( - ## don't do this unless you want a globally visible script - # scripts=['script/test.py'], - packages=['tf2_kdl'], - package_dir={'': 'src'} -) - -setup(**d) diff --git a/src/geometry2/tf2_kdl/src/tf2_kdl/__init__.py b/src/geometry2/tf2_kdl/src/tf2_kdl/__init__.py deleted file mode 100644 index 97eef01..0000000 --- a/src/geometry2/tf2_kdl/src/tf2_kdl/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .tf2_kdl import * diff --git a/src/geometry2/tf2_kdl/src/tf2_kdl/tf2_kdl.py b/src/geometry2/tf2_kdl/src/tf2_kdl/tf2_kdl.py deleted file mode 100644 index d01205a..0000000 --- a/src/geometry2/tf2_kdl/src/tf2_kdl/tf2_kdl.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright (c) 2008, Willow Garage, Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of the Willow Garage, Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -# author: Wim Meeussen - -import PyKDL -import rospy -import tf2_ros -from geometry_msgs.msg import PointStamped - -def transform_to_kdl(t): - """Convert a geometry_msgs Transform message to a PyKDL Frame. - - :param t: The Transform message to convert. - :type t: geometry_msgs.msg.TransformStamped - :return: The converted PyKDL frame. - :rtype: PyKDL.Frame - """ - - return PyKDL.Frame(PyKDL.Rotation.Quaternion(t.transform.rotation.x, t.transform.rotation.y, - t.transform.rotation.z, t.transform.rotation.w), - PyKDL.Vector(t.transform.translation.x, - t.transform.translation.y, - t.transform.translation.z)) - - -def do_transform_vector(vector, transform): - """Apply a transform in the form of a geometry_msgs message to a PyKDL vector. - - :param vector: The PyKDL vector to transform. - :type vector: PyKDL.Vector - :param transform: The transform to apply. - :type transform: geometry_msgs.msg.TransformStamped - :return: The transformed vector. - :rtype: PyKDL.Vector - """ - res = transform_to_kdl(transform) * vector - res.header = transform.header - return res - -tf2_ros.TransformRegistration().add(PyKDL.Vector, do_transform_vector) - -def to_msg_vector(vector): - """Convert a PyKDL Vector to a geometry_msgs PointStamped message. - - :param vector: The vector to convert. - :type vector: PyKDL.Vector - :return: The converted vector/point. - :rtype: geometry_msgs.msg.PointStamped - """ - msg = PointStamped() - msg.header = vector.header - msg.point.x = vector[0] - msg.point.y = vector[1] - msg.point.z = vector[2] - return msg - -tf2_ros.ConvertRegistration().add_to_msg(PyKDL.Vector, to_msg_vector) - -def from_msg_vector(msg): - """Convert a PointStamped message to a stamped PyKDL Vector. - - :param msg: The PointStamped message to convert. - :type msg: geometry_msgs.msg.PointStamped - :return: The timestamped converted PyKDL vector. - :rtype: PyKDL.Vector - """ - vector = PyKDL.Vector(msg.point.x, msg.point.y, msg.point.z) - return tf2_ros.Stamped(vector, msg.header.stamp, msg.header.frame_id) - -tf2_ros.ConvertRegistration().add_from_msg(PyKDL.Vector, from_msg_vector) - -def convert_vector(vector): - """Convert a generic stamped triplet message to a stamped PyKDL Vector. - - :param vector: The message to convert. - :return: The timestamped converted PyKDL vector. - :rtype: PyKDL.Vector - """ - return tf2_ros.Stamped(PyKDL.Vector(vector), vector.header.stamp, vector.header.frame_id) - -tf2_ros.ConvertRegistration().add_convert((PyKDL.Vector, PyKDL.Vector), convert_vector) - -def do_transform_frame(frame, transform): - """Apply a transform in the form of a geometry_msgs message to a PyKDL Frame. - - :param frame: The PyKDL frame to transform. - :type frame: PyKDL.Frame - :param transform: The transform to apply. - :type transform: geometry_msgs.msg.TransformStamped - :return: The transformed PyKDL frame. - :rtype: PyKDL.Frame - """ - res = transform_to_kdl(transform) * frame - res.header = transform.header - return res -tf2_ros.TransformRegistration().add(PyKDL.Frame, do_transform_frame) - -def do_transform_twist(twist, transform): - """Apply a transform in the form of a geometry_msgs message to a PyKDL Twist. - - :param twist: The PyKDL twist to transform. - :type twist: PyKDL.Twist - :param transform: The transform to apply. - :type transform: geometry_msgs.msg.TransformStamped - :return: The transformed PyKDL twist. - :rtype: PyKDL.Twist - """ - res = transform_to_kdl(transform) * twist - res.header = transform.header - return res -tf2_ros.TransformRegistration().add(PyKDL.Twist, do_transform_twist) - - -# Wrench -def do_transform_wrench(wrench, transform): - """Apply a transform in the form of a geometry_msgs message to a PyKDL Wrench. - - :param wrench: The PyKDL wrench to transform. - :type wrench: PyKDL.Wrench - :param transform: The transform to apply. - :type transform: geometry_msgs.msg.TransformStamped - :return: The transformed PyKDL wrench. - :rtype: PyKDL.Wrench - """ - res = transform_to_kdl(transform) * wrench - res.header = transform.header - return res -tf2_ros.TransformRegistration().add(PyKDL.Wrench, do_transform_wrench) diff --git a/src/geometry2/tf2_kdl/test/test.launch b/src/geometry2/tf2_kdl/test/test.launch deleted file mode 100644 index bfad1de..0000000 --- a/src/geometry2/tf2_kdl/test/test.launch +++ /dev/null @@ -1,3 +0,0 @@ - - - \ No newline at end of file diff --git a/src/geometry2/tf2_kdl/test/test_python.launch b/src/geometry2/tf2_kdl/test/test_python.launch deleted file mode 100644 index f06b403..0000000 --- a/src/geometry2/tf2_kdl/test/test_python.launch +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/src/geometry2/tf2_kdl/test/test_tf2_kdl.cpp b/src/geometry2/tf2_kdl/test/test_tf2_kdl.cpp deleted file mode 100644 index a0fdfd1..0000000 --- a/src/geometry2/tf2_kdl/test/test_tf2_kdl.cpp +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** \author Wim Meeussen */ - - -#include -#include -#include -#include "tf2_ros/buffer.h" - - -tf2_ros::Buffer* tf_buffer; -static const double EPS = 1e-3; - -TEST(TfKDL, Frame) -{ - tf2::Stamped v1(KDL::Frame(KDL::Rotation::RPY(M_PI, 0, 0), KDL::Vector(1,2,3)), ros::Time(2.0), "A"); - - - // simple api - KDL::Frame v_simple = tf_buffer->transform(v1, "B", ros::Duration(2.0)); - EXPECT_NEAR(v_simple.p[0], -9, EPS); - EXPECT_NEAR(v_simple.p[1], 18, EPS); - EXPECT_NEAR(v_simple.p[2], 27, EPS); - double r, p, y; - v_simple.M.GetRPY(r, p, y); - EXPECT_NEAR(r, 0.0, EPS); - EXPECT_NEAR(p, 0.0, EPS); - EXPECT_NEAR(y, 0.0, EPS); - - - // advanced api - KDL::Frame v_advanced = tf_buffer->transform(v1, "B", ros::Time(2.0), - "A", ros::Duration(3.0)); - EXPECT_NEAR(v_advanced.p[0], -9, EPS); - EXPECT_NEAR(v_advanced.p[1], 18, EPS); - EXPECT_NEAR(v_advanced.p[2], 27, EPS); - v_advanced.M.GetRPY(r, p, y); - EXPECT_NEAR(r, 0.0, EPS); - EXPECT_NEAR(p, 0.0, EPS); - EXPECT_NEAR(y, 0.0, EPS); - -} - - - -TEST(TfKDL, Vector) -{ - tf2::Stamped v1(KDL::Vector(1,2,3), ros::Time(2.0), "A"); - - - // simple api - KDL::Vector v_simple = tf_buffer->transform(v1, "B", ros::Duration(2.0)); - EXPECT_NEAR(v_simple[0], -9, EPS); - EXPECT_NEAR(v_simple[1], 18, EPS); - EXPECT_NEAR(v_simple[2], 27, EPS); - - // advanced api - KDL::Vector v_advanced = tf_buffer->transform(v1, "B", ros::Time(2.0), - "A", ros::Duration(3.0)); - EXPECT_NEAR(v_advanced[0], -9, EPS); - EXPECT_NEAR(v_advanced[1], 18, EPS); - EXPECT_NEAR(v_advanced[2], 27, EPS); -} - -TEST(TfKDL, ConvertVector) -{ - tf2::Stamped v(KDL::Vector(1,2,3), ros::Time(), "my_frame"); - - tf2::Stamped v1 = v; - tf2::convert(v1, v1); - - EXPECT_EQ(v, v1); - - tf2::Stamped v2(KDL::Vector(3,4,5), ros::Time(), "my_frame2"); - tf2::convert(v1, v2); - - EXPECT_EQ(v, v2); - EXPECT_EQ(v1, v2); -} - - -int main(int argc, char **argv){ - testing::InitGoogleTest(&argc, argv); - ros::init(argc, argv, "test"); - ros::NodeHandle n; - - tf_buffer = new tf2_ros::Buffer(); - - // populate buffer - geometry_msgs::TransformStamped t; - t.transform.translation.x = 10; - t.transform.translation.y = 20; - t.transform.translation.z = 30; - t.transform.rotation.x = 1; - t.header.stamp = ros::Time(2.0); - t.header.frame_id = "A"; - t.child_frame_id = "B"; - tf_buffer->setTransform(t, "test"); - - int retval = RUN_ALL_TESTS(); - delete tf_buffer; - return retval; -} diff --git a/src/geometry2/tf2_msgs/CHANGELOG.rst b/src/geometry2/tf2_msgs/CHANGELOG.rst deleted file mode 100644 index 078e365..0000000 --- a/src/geometry2/tf2_msgs/CHANGELOG.rst +++ /dev/null @@ -1,165 +0,0 @@ -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Changelog for package tf2_msgs -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -0.6.7 (2020-03-09) ------------------- - -0.6.6 (2020-01-09) ------------------- - -0.6.5 (2018-11-16) ------------------- - -0.6.4 (2018-11-06) ------------------- - -0.6.3 (2018-07-09) ------------------- - -0.6.2 (2018-05-02) ------------------- - -0.6.1 (2018-03-21) ------------------- - -0.6.0 (2018-03-21) ------------------- - -0.5.17 (2018-01-01) -------------------- - -0.5.16 (2017-07-14) -------------------- - -0.5.15 (2017-01-24) -------------------- - -0.5.14 (2017-01-16) -------------------- - -0.5.13 (2016-03-04) -------------------- - -0.5.12 (2015-08-05) -------------------- - -0.5.11 (2015-04-22) -------------------- - -0.5.10 (2015-04-21) -------------------- - -0.5.9 (2015-03-25) ------------------- - -0.5.8 (2015-03-17) ------------------- -* remove useless Makefile files -* Contributors: Vincent Rabaud - -0.5.7 (2014-12-23) ------------------- - -0.5.6 (2014-09-18) ------------------- - -0.5.5 (2014-06-23) ------------------- - -0.5.4 (2014-05-07) ------------------- - -0.5.3 (2014-02-21) ------------------- - -0.5.2 (2014-02-20) ------------------- - -0.5.1 (2014-02-14) ------------------- - -0.5.0 (2014-02-14) ------------------- - -0.4.10 (2013-12-26) -------------------- - -0.4.9 (2013-11-06) ------------------- - -0.4.8 (2013-11-06) ------------------- - -0.4.7 (2013-08-28) ------------------- - -0.4.6 (2013-08-28) ------------------- - -0.4.5 (2013-07-11) ------------------- - -0.4.4 (2013-07-09) ------------------- - -0.4.3 (2013-07-05) ------------------- - -0.4.2 (2013-07-05) ------------------- - -0.4.1 (2013-07-05) ------------------- - -0.4.0 (2013-06-27) ------------------- -* Restoring test packages and bullet packages. - reverting 3570e8c42f9b394ecbfd9db076b920b41300ad55 to get back more of the packages previously implemented - reverting 04cf29d1b58c660fdc999ab83563a5d4b76ab331 to fix `#7 `_ - -0.3.6 (2013-03-03) ------------------- - -0.3.5 (2013-02-15 14:46) ------------------------- -* 0.3.4 -> 0.3.5 - -0.3.4 (2013-02-15 13:14) ------------------------- -* 0.3.3 -> 0.3.4 - -0.3.3 (2013-02-15 11:30) ------------------------- -* 0.3.2 -> 0.3.3 - -0.3.2 (2013-02-15 00:42) ------------------------- -* 0.3.1 -> 0.3.2 - -0.3.1 (2013-02-14) ------------------- -* 0.3.0 -> 0.3.1 - -0.3.0 (2013-02-13) ------------------- -* switching to version 0.3.0 -* removing packages with missing deps -* adding include folder -* adding tf2_msgs/srv/FrameGraph.srv -* catkin fixes -* catkinizing geometry-experimental -* catkinizing tf2_msgs -* Adding ROS service interface to cpp Buffer -* fix tf messages dependency and name -* add python transform listener -* Compiling version of the buffer server -* Compiling version of the buffer client -* Adding a message that encapsulates errors that can be returned by tf -* A fully specified version of the LookupTransform.action -* Commiting so I can merge -* Adding action for LookupTransform -* Updating CMake to call genaction -* Moving tfMessage to TFMessage to adhere to naming conventions -* Copying tfMessage from tf to new tf2_msgs package -* Creating a package for new tf messages diff --git a/src/geometry2/tf2_msgs/CMakeLists.txt b/src/geometry2/tf2_msgs/CMakeLists.txt deleted file mode 100644 index 7c06a2a..0000000 --- a/src/geometry2/tf2_msgs/CMakeLists.txt +++ /dev/null @@ -1,20 +0,0 @@ -cmake_minimum_required(VERSION 2.8.3) -project(tf2_msgs) - -find_package(catkin REQUIRED COMPONENTS message_generation geometry_msgs actionlib_msgs) -find_package(Boost COMPONENTS thread REQUIRED) - -add_message_files(DIRECTORY msg FILES TF2Error.msg TFMessage.msg) -add_service_files(DIRECTORY srv FILES FrameGraph.srv) - -add_action_files(DIRECTORY action FILES LookupTransform.action) -generate_messages( - DEPENDENCIES actionlib_msgs std_msgs geometry_msgs -) - -catkin_package( - INCLUDE_DIRS include - CATKIN_DEPENDS message_generation geometry_msgs actionlib_msgs) - - - diff --git a/src/geometry2/tf2_msgs/action/LookupTransform.action b/src/geometry2/tf2_msgs/action/LookupTransform.action deleted file mode 100644 index 126e169..0000000 --- a/src/geometry2/tf2_msgs/action/LookupTransform.action +++ /dev/null @@ -1,17 +0,0 @@ -#Simple API -string target_frame -string source_frame -time source_time -duration timeout - -#Advanced API -time target_time -string fixed_frame - -#Whether or not to use the advanced API -bool advanced - ---- -geometry_msgs/TransformStamped transform -tf2_msgs/TF2Error error ---- diff --git a/src/geometry2/tf2_msgs/include/foo b/src/geometry2/tf2_msgs/include/foo deleted file mode 100644 index e69de29..0000000 diff --git a/src/geometry2/tf2_msgs/mainpage.dox b/src/geometry2/tf2_msgs/mainpage.dox deleted file mode 100644 index 19b1334..0000000 --- a/src/geometry2/tf2_msgs/mainpage.dox +++ /dev/null @@ -1,26 +0,0 @@ -/** -\mainpage -\htmlinclude manifest.html - -\b tf2_msgs is ... - - - - -\section codeapi Code API - - - - -*/ diff --git a/src/geometry2/tf2_msgs/msg/TF2Error.msg b/src/geometry2/tf2_msgs/msg/TF2Error.msg deleted file mode 100644 index e737b64..0000000 --- a/src/geometry2/tf2_msgs/msg/TF2Error.msg +++ /dev/null @@ -1,10 +0,0 @@ -uint8 NO_ERROR = 0 -uint8 LOOKUP_ERROR = 1 -uint8 CONNECTIVITY_ERROR = 2 -uint8 EXTRAPOLATION_ERROR = 3 -uint8 INVALID_ARGUMENT_ERROR = 4 -uint8 TIMEOUT_ERROR = 5 -uint8 TRANSFORM_ERROR = 6 - -uint8 error -string error_string diff --git a/src/geometry2/tf2_msgs/msg/TFMessage.msg b/src/geometry2/tf2_msgs/msg/TFMessage.msg deleted file mode 100644 index fda1e4d..0000000 --- a/src/geometry2/tf2_msgs/msg/TFMessage.msg +++ /dev/null @@ -1 +0,0 @@ -geometry_msgs/TransformStamped[] transforms diff --git a/src/geometry2/tf2_msgs/package.xml b/src/geometry2/tf2_msgs/package.xml deleted file mode 100644 index ad5b51a..0000000 --- a/src/geometry2/tf2_msgs/package.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - tf2_msgs - 0.6.7 - - tf2_msgs - - Eitan Marder-Eppstein - Tully Foote - BSD - - http://www.ros.org/wiki/tf2_msgs - - catkin - - actionlib_msgs - geometry_msgs - message_generation - - actionlib_msgs - geometry_msgs - message_generation - - - - diff --git a/src/geometry2/tf2_msgs/srv/FrameGraph.srv b/src/geometry2/tf2_msgs/srv/FrameGraph.srv deleted file mode 100755 index 568d196..0000000 --- a/src/geometry2/tf2_msgs/srv/FrameGraph.srv +++ /dev/null @@ -1,2 +0,0 @@ ---- -string frame_yaml diff --git a/src/geometry2/tf2_py/CHANGELOG.rst b/src/geometry2/tf2_py/CHANGELOG.rst deleted file mode 100644 index be05691..0000000 --- a/src/geometry2/tf2_py/CHANGELOG.rst +++ /dev/null @@ -1,152 +0,0 @@ -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Changelog for package tf2_py -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -0.6.7 (2020-03-09) ------------------- -* [Windows][melodic-devel] Fix install locations (`#442 `_) -* Contributors: Sean Yen - -0.6.6 (2020-01-09) ------------------- -* use .pyd instead of .so on Windows and export symbols `#363 `_ from kejxu/fix_tf2_py_export -* limit MSVC-only change to MSVC scope (`#10 `_) -* Fix the pyd extension and export the init function. -* use windows counterpart for .so extension -* Contributors: James Xu, Sean Yen, Tully Foote - -0.6.5 (2018-11-16) ------------------- - -0.6.4 (2018-11-06) ------------------- -* fix translation vs rotation typo - Fixes `#324 `_ -* Add python3.7 compatibility. -* Contributors: Hans Gaiser, Tully Foote - -0.6.3 (2018-07-09) ------------------- - -0.6.2 (2018-05-02) ------------------- - -0.6.1 (2018-03-21) ------------------- - -0.6.0 (2018-03-21) ------------------- - -0.5.17 (2018-01-01) -------------------- -* Merge pull request `#266 `_ from randoms/indigo-devel - fix METH_OLDARGS is no longer supported error in python3 -* Merge pull request `#260 `_ from randoms/indigo-devel - fix python3 import error -* Merge pull request `#257 `_ from delftrobotics-forks/python3 - Make tf2_py python3 compatible again -* Use string conversion from python_compat.h. -* Contributors: Maarten de Vries, Tully Foote, randoms - -0.5.16 (2017-07-14) -------------------- -* fix memory leak calling Py_DECREF for all created PyObject -* replaced dependencies on tf2_msgs_gencpp by exported dependencies -* Relax strict type checks at setTransform to only check for members (`#221 `_) -* expose deprecated methods in tf2_py API to support better backwards compatibility. Fixes `#206 `_ -* Contributors: Christopher Wecht, Sergio Ramos, Tully Foote, alex - -0.5.15 (2017-01-24) -------------------- - -0.5.14 (2017-01-16) -------------------- -* Improve tf compatibility (`#192 `_) - getLatestCommonTime() is needed to implement the TF API. - See `ros/geometry#134 `_ -* Add missing type checks at Python/C++ tf2 transform interface `#159 `_ (`#197 `_) -* Make tf2_py compatible with python3. (`#173 `_) - * tf2_py: Use PyUnicode objects for text in python3. - * tf2_py: Make module initialization python3 compatible. - * tf2_py: Fix type definition for python3. - * tf2_py: Move and rename PyObject_BorrowAttrString. -* Contributors: Maarten de Vries, Timo Röhling, alex - -0.5.13 (2016-03-04) -------------------- - -0.5.12 (2015-08-05) -------------------- - -0.5.11 (2015-04-22) -------------------- - -0.5.10 (2015-04-21) -------------------- - -0.5.9 (2015-03-25) ------------------- - -0.5.8 (2015-03-17) ------------------- - -0.5.7 (2014-12-23) ------------------- - -0.5.6 (2014-09-18) ------------------- - -0.5.5 (2014-06-23) ------------------- - -0.5.4 (2014-05-07) ------------------- - -0.5.3 (2014-02-21) ------------------- - -0.5.2 (2014-02-20) ------------------- - -0.5.1 (2014-02-14) ------------------- - -0.5.0 (2014-02-14) ------------------- - -0.4.10 (2013-12-26) -------------------- -* adding support for static transforms in python listener. Fixes `#46 `_ -* Contributors: Tully Foote - -0.4.9 (2013-11-06) ------------------- - -0.4.8 (2013-11-06) ------------------- - -0.4.7 (2013-08-28) ------------------- - -0.4.6 (2013-08-28) ------------------- - -0.4.5 (2013-07-11) ------------------- - -0.4.4 (2013-07-09) ------------------- -* tf2_py: Fixes warning, implicit conversion of NULL - -0.4.3 (2013-07-05) ------------------- - -0.4.2 (2013-07-05) ------------------- - -0.4.1 (2013-07-05) ------------------- - -0.4.0 (2013-06-27) ------------------- -* splitting rospy dependency into tf2_py so tf2 is pure c++ library. diff --git a/src/geometry2/tf2_py/CMakeLists.txt b/src/geometry2/tf2_py/CMakeLists.txt deleted file mode 100644 index 4b5d835..0000000 --- a/src/geometry2/tf2_py/CMakeLists.txt +++ /dev/null @@ -1,162 +0,0 @@ -cmake_minimum_required(VERSION 2.8.3) -project(tf2_py) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS rospy tf2) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -find_package(PythonLibs 2 REQUIRED) -include_directories(${PYTHON_INCLUDE_PATH} ${catkin_INCLUDE_DIRS}) - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -catkin_python_setup() - -####################################### -## Declare ROS messages and services ## -####################################### - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Message1.msg -# Message2.msg -# ) - -## Generate services in the 'srv' folder -# add_service_files( -# FILES -# Service1.srv -# Service2.srv -# ) - -## Generate added messages and services with any dependencies listed here -# generate_messages( -# DEPENDENCIES -# std_msgs # Or other packages containing msgs -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES tf2_py - CATKIN_DEPENDS rospy tf2 -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -# include_directories(include ${catkin_INCLUDE_DIRS} ${Boost_INCLUDE_DIRS}) - -## Declare a cpp library -# add_library(tf2_py -# src/${PROJECT_NAME}/tf2_py.cpp -# ) - -## Declare a cpp executable -# add_executable(tf2_py_node src/tf2_py_node.cpp) - -## Add cmake target dependencies of the executable/library -## as an example, message headers may need to be generated before nodes -# add_dependencies(tf2_py_node tf2_py_generate_messages_cpp) - -## Specify libraries to link a library or executable target against -# target_link_libraries(tf2_py_node -# ${catkin_LIBRARIES} -# ) - - -# Check for SSE -#!!! rosbuild_check_for_sse() - -# Dynamic linking with tf worked OK, except for exception propagation, which failed in the unit test. -# so build with the objects directly instead. - -link_libraries(${PYTHON_LIBRARIES}) -add_library(tf2_py src/tf2_py.cpp) -target_link_libraries(tf2_py ${catkin_LIBRARIES}) -add_dependencies(tf2_py ${catkin_EXPORTED_TARGETS}) - -if(WIN32) - # use .pyd extension on Windows - set_target_properties(tf2_py PROPERTIES OUTPUT_NAME "_tf2" SUFFIX ".pyd") -else() - set_target_properties(tf2_py PROPERTIES COMPILE_FLAGS "-g -Wno-missing-field-initializers") - set_target_properties(tf2_py PROPERTIES OUTPUT_NAME tf2 PREFIX "_" SUFFIX ".so") -endif() -set_target_properties(tf2_py PROPERTIES - ARCHIVE_OUTPUT_DIRECTORY ${CATKIN_DEVEL_PREFIX}/${CATKIN_PACKAGE_PYTHON_DESTINATION} - LIBRARY_OUTPUT_DIRECTORY ${CATKIN_DEVEL_PREFIX}/${CATKIN_PACKAGE_PYTHON_DESTINATION} - RUNTIME_OUTPUT_DIRECTORY ${CATKIN_DEVEL_PREFIX}/${CATKIN_PACKAGE_PYTHON_DESTINATION} -) -#!! rosbuild_add_compile_flags(tf2_py ${SSE_FLAGS}) #conditionally adds sse flags if available - - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# install(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables and/or libraries for installation -# install(TARGETS tf2_py tf2_py_node -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -install(FILES $ - DESTINATION ${CATKIN_PACKAGE_PYTHON_DESTINATION} -) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_tf2_py.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/src/geometry2/tf2_py/package.xml b/src/geometry2/tf2_py/package.xml deleted file mode 100644 index 3413010..0000000 --- a/src/geometry2/tf2_py/package.xml +++ /dev/null @@ -1,55 +0,0 @@ - - - tf2_py - 0.6.7 - The tf2_py package - - - Tully Foote - - - - - - BSD - - - - - - http://ros.org/wiki/tf2_py - - - - - - - - - - - - - - - - - - - - catkin - tf2 - rospy - tf2 - rospy - - - - - - - - - - - \ No newline at end of file diff --git a/src/geometry2/tf2_py/setup.py b/src/geometry2/tf2_py/setup.py deleted file mode 100644 index a21d885..0000000 --- a/src/geometry2/tf2_py/setup.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -d = generate_distutils_setup( - packages=['tf2_py'], - package_dir={'': 'src'}, - requires=['rospy', 'geometry_msgs', 'tf2_msgs'] -) - -setup(**d) diff --git a/src/geometry2/tf2_py/src/python_compat.h b/src/geometry2/tf2_py/src/python_compat.h deleted file mode 100644 index cb4c58c..0000000 --- a/src/geometry2/tf2_py/src/python_compat.h +++ /dev/null @@ -1,54 +0,0 @@ -#ifndef TF2_PY_PYTHON_COMPAT_H -#define TF2_PY_PYTHON_COMPAT_H - -#include - -#include - -inline PyObject *stringToPython(const std::string &input) -{ -#if PY_MAJOR_VERSION >= 3 - return PyUnicode_FromStringAndSize(input.c_str(), input.size()); -#else - return PyString_FromStringAndSize(input.c_str(), input.size()); -#endif -} - -inline PyObject *stringToPython(const char *input) -{ -#if PY_MAJOR_VERSION >= 3 - return PyUnicode_FromString(input); -#else - return PyString_FromString(input); -#endif -} - -inline std::string stringFromPython(PyObject * input) -{ - Py_ssize_t size; -#if PY_MAJOR_VERSION >= 3 - const char * data; - data = PyUnicode_AsUTF8AndSize(input, &size); -#else - char * data; - PyString_AsStringAndSize(input, &data, &size); -#endif - return std::string(data, size); -} - -inline PyObject *pythonImport(const std::string & name) -{ - PyObject *py_name = stringToPython(name); - PyObject *module = PyImport_Import(py_name); - Py_XDECREF(py_name); - return module; -} - -inline PyObject *pythonBorrowAttrString(PyObject* o, const char *name) -{ - PyObject *r = PyObject_GetAttrString(o, name); - Py_XDECREF(r); - return r; -} - -#endif diff --git a/src/geometry2/tf2_py/src/tf2_py.cpp b/src/geometry2/tf2_py/src/tf2_py.cpp deleted file mode 100644 index e6c2741..0000000 --- a/src/geometry2/tf2_py/src/tf2_py.cpp +++ /dev/null @@ -1,644 +0,0 @@ -#include - -#include -#include - -#include "python_compat.h" - -// Run x (a tf method, catching TF's exceptions and reraising them as Python exceptions) -// -#define WRAP(x) \ - do { \ - try \ - { \ - x; \ - } \ - catch (const tf2::ConnectivityException &e) \ - { \ - PyErr_SetString(tf2_connectivityexception, e.what()); \ - return NULL; \ - } \ - catch (const tf2::LookupException &e) \ - { \ - PyErr_SetString(tf2_lookupexception, e.what()); \ - return NULL; \ - } \ - catch (const tf2::ExtrapolationException &e) \ - { \ - PyErr_SetString(tf2_extrapolationexception, e.what()); \ - return NULL; \ - } \ - catch (const tf2::InvalidArgumentException &e) \ - { \ - PyErr_SetString(tf2_invalidargumentexception, e.what()); \ - return NULL; \ - } \ - catch (const tf2::TimeoutException &e) \ - { \ - PyErr_SetString(tf2_timeoutexception, e.what()); \ - return NULL; \ - } \ - catch (const tf2::TransformException &e) \ - { \ - PyErr_SetString(tf2_exception, e.what()); \ - return NULL; \ - } \ - } while (0) - -static PyObject *pModulerospy = NULL; -static PyObject *pModulegeometrymsgs = NULL; -static PyObject *tf2_exception = NULL; -static PyObject *tf2_connectivityexception = NULL, *tf2_lookupexception = NULL, *tf2_extrapolationexception = NULL, - *tf2_invalidargumentexception = NULL, *tf2_timeoutexception = NULL; - -struct buffer_core_t { - PyObject_HEAD - tf2::BufferCore *bc; -}; - - -static PyTypeObject buffer_core_Type = { -#if PY_MAJOR_VERSION < 3 - PyObject_HEAD_INIT(NULL) - 0, /*size*/ -# else - PyVarObject_HEAD_INIT(NULL, 0) -#endif - "_tf2.BufferCore", /*name*/ - sizeof(buffer_core_t), /*basicsize*/ -}; - -static PyObject *transform_converter(const geometry_msgs::TransformStamped* transform) -{ - PyObject *pclass, *pargs, *pinst = NULL; - pclass = PyObject_GetAttrString(pModulegeometrymsgs, "TransformStamped"); - if(pclass == NULL) - { - printf("Can't get geometry_msgs.msg.TransformedStamped\n"); - return NULL; - } - - pargs = Py_BuildValue("()"); - if(pargs == NULL) - { - printf("Can't build argument list\n"); - return NULL; - } - - pinst = PyEval_CallObject(pclass, pargs); - Py_DECREF(pclass); - Py_DECREF(pargs); - if(pinst == NULL) - { - printf("Can't create class\n"); - return NULL; - } - - //we need to convert the time to python - PyObject *rospy_time = PyObject_GetAttrString(pModulerospy, "Time"); - PyObject *args = Py_BuildValue("ii", transform->header.stamp.sec, transform->header.stamp.nsec); - PyObject *time_obj = PyObject_CallObject(rospy_time, args); - Py_DECREF(args); - Py_DECREF(rospy_time); - - PyObject* pheader = PyObject_GetAttrString(pinst, "header"); - PyObject_SetAttrString(pheader, "stamp", time_obj); - Py_DECREF(time_obj); - - PyObject *frame_id = stringToPython(transform->header.frame_id); - PyObject_SetAttrString(pheader, "frame_id", frame_id); - Py_DECREF(frame_id); - Py_DECREF(pheader); - - PyObject *ptransform = PyObject_GetAttrString(pinst, "transform"); - PyObject *ptranslation = PyObject_GetAttrString(ptransform, "translation"); - PyObject *protation = PyObject_GetAttrString(ptransform, "rotation"); - Py_DECREF(ptransform); - - PyObject *child_frame_id = stringToPython(transform->child_frame_id); - PyObject_SetAttrString(pinst, "child_frame_id", child_frame_id); - Py_DECREF(child_frame_id); - - PyObject *trans_x = PyFloat_FromDouble(transform->transform.translation.x); - PyObject *trans_y = PyFloat_FromDouble(transform->transform.translation.y); - PyObject *trans_z = PyFloat_FromDouble(transform->transform.translation.z); - PyObject_SetAttrString(ptranslation, "x", trans_x); - PyObject_SetAttrString(ptranslation, "y", trans_y); - PyObject_SetAttrString(ptranslation, "z", trans_z); - Py_DECREF(trans_x); - Py_DECREF(trans_y); - Py_DECREF(trans_z); - Py_DECREF(ptranslation); - - PyObject *rot_x = PyFloat_FromDouble(transform->transform.rotation.x); - PyObject *rot_y = PyFloat_FromDouble(transform->transform.rotation.y); - PyObject *rot_z = PyFloat_FromDouble(transform->transform.rotation.z); - PyObject *rot_w = PyFloat_FromDouble(transform->transform.rotation.w); - PyObject_SetAttrString(protation, "x", rot_x); - PyObject_SetAttrString(protation, "y", rot_y); - PyObject_SetAttrString(protation, "z", rot_z); - PyObject_SetAttrString(protation, "w", rot_w); - Py_DECREF(rot_x); - Py_DECREF(rot_y); - Py_DECREF(rot_z); - Py_DECREF(rot_w); - Py_DECREF(protation); - - return pinst; -} - -static int rostime_converter(PyObject *obj, ros::Time *rt) -{ - PyObject *tsr = PyObject_CallMethod(obj, (char*)"to_sec", NULL); - if (tsr == NULL) { - PyErr_SetString(PyExc_TypeError, "time must have a to_sec method, e.g. rospy.Time or rospy.Duration"); - return 0; - } else { - (*rt).fromSec(PyFloat_AsDouble(tsr)); - Py_DECREF(tsr); - return 1; - } -} - -static int rosduration_converter(PyObject *obj, ros::Duration *rt) -{ - PyObject *tsr = PyObject_CallMethod(obj, (char*)"to_sec", NULL); - if (tsr == NULL) { - PyErr_SetString(PyExc_TypeError, "time must have a to_sec method, e.g. rospy.Time or rospy.Duration"); - return 0; - } else { - (*rt).fromSec(PyFloat_AsDouble(tsr)); - Py_DECREF(tsr); - return 1; - } -} - -static int BufferCore_init(PyObject *self, PyObject *args, PyObject *kw) -{ - ros::Duration cache_time; - - cache_time.fromSec(tf2::BufferCore::DEFAULT_CACHE_TIME); - - if (!PyArg_ParseTuple(args, "|O&", rosduration_converter, &cache_time)) - return -1; - - ((buffer_core_t*)self)->bc = new tf2::BufferCore(cache_time); - - return 0; -} - -/* This may need to be implemented later if we decide to have it in the core -static PyObject *getTFPrefix(PyObject *self, PyObject *args) -{ - if (!PyArg_ParseTuple(args, "")) - return NULL; - tf::Transformer *t = ((transformer_t*)self)->t; - return stringToPython(t->getTFPrefix()); -} -*/ - -static PyObject *allFramesAsYAML(PyObject *self, PyObject *args) -{ - tf2::BufferCore *bc = ((buffer_core_t*)self)->bc; - return stringToPython(bc->allFramesAsYAML()); -} - -static PyObject *allFramesAsString(PyObject *self, PyObject *args) -{ - tf2::BufferCore *bc = ((buffer_core_t*)self)->bc; - return stringToPython(bc->allFramesAsString()); -} - -static PyObject *canTransformCore(PyObject *self, PyObject *args, PyObject *kw) -{ - tf2::BufferCore *bc = ((buffer_core_t*)self)->bc; - char *target_frame, *source_frame; - ros::Time time; - static const char *keywords[] = { "target_frame", "source_frame", "time", NULL }; - - if (!PyArg_ParseTupleAndKeywords(args, kw, "ssO&", (char**)keywords, &target_frame, &source_frame, rostime_converter, &time)) - return NULL; - std::string error_msg; - bool can_transform = bc->canTransform(target_frame, source_frame, time, &error_msg); - //return PyBool_FromLong(t->canTransform(target_frame, source_frame, time)); - return Py_BuildValue("bs", can_transform, error_msg.c_str()); -} - -static PyObject *canTransformFullCore(PyObject *self, PyObject *args, PyObject *kw) -{ - tf2::BufferCore *bc = ((buffer_core_t*)self)->bc; - char *target_frame, *source_frame, *fixed_frame; - ros::Time target_time, source_time; - static const char *keywords[] = { "target_frame", "target_time", "source_frame", "source_time", "fixed_frame", NULL }; - - if (!PyArg_ParseTupleAndKeywords(args, kw, "sO&sO&s", (char**)keywords, - &target_frame, - rostime_converter, - &target_time, - &source_frame, - rostime_converter, - &source_time, - &fixed_frame)) - return NULL; - std::string error_msg; - bool can_transform = bc->canTransform(target_frame, target_time, source_frame, source_time, fixed_frame, &error_msg); - //return PyBool_FromLong(t->canTransform(target_frame, target_time, source_frame, source_time, fixed_frame)); - return Py_BuildValue("bs", can_transform, error_msg.c_str()); -} - -static PyObject *asListOfStrings(std::vector< std::string > los) -{ - PyObject *r = PyList_New(los.size()); - size_t i; - for (i = 0; i < los.size(); i++) { - PyList_SetItem(r, i, stringToPython(los[i])); - } - return r; -} - -static PyObject *_chain(PyObject *self, PyObject *args, PyObject *kw) -{ - tf2::BufferCore *bc = ((buffer_core_t*)self)->bc; - char *target_frame, *source_frame, *fixed_frame; - ros::Time target_time, source_time; - std::vector< std::string > output; - static const char *keywords[] = { "target_frame", "target_time", "source_frame", "source_time", "fixed_frame", NULL }; - - if (!PyArg_ParseTupleAndKeywords(args, kw, "sO&sO&s", (char**)keywords, - &target_frame, - rostime_converter, - &target_time, - &source_frame, - rostime_converter, - &source_time, - &fixed_frame)) - return NULL; - - WRAP(bc->_chainAsVector(target_frame, target_time, source_frame, source_time, fixed_frame, output)); - return asListOfStrings(output); -} - -static PyObject *getLatestCommonTime(PyObject *self, PyObject *args) -{ - tf2::BufferCore *bc = ((buffer_core_t*)self)->bc; - char *target_frame, *source_frame; - tf2::CompactFrameID target_id, source_id; - ros::Time time; - std::string error_string; - - if (!PyArg_ParseTuple(args, "ss", &target_frame, &source_frame)) - return NULL; - WRAP(target_id = bc->_validateFrameId("get_latest_common_time", target_frame)); - WRAP(source_id = bc->_validateFrameId("get_latest_common_time", source_frame)); - int r = bc->_getLatestCommonTime(target_id, source_id, time, &error_string); - if (r == 0) { - PyObject *rospy_time = PyObject_GetAttrString(pModulerospy, "Time"); - PyObject *args = Py_BuildValue("ii", time.sec, time.nsec); - PyObject *ob = PyObject_CallObject(rospy_time, args); - Py_DECREF(args); - Py_DECREF(rospy_time); - return ob; - } else { - PyErr_SetString(tf2_exception, error_string.c_str()); - return NULL; - } -} - -static PyObject *lookupTransformCore(PyObject *self, PyObject *args, PyObject *kw) -{ - tf2::BufferCore *bc = ((buffer_core_t*)self)->bc; - char *target_frame, *source_frame; - ros::Time time; - static const char *keywords[] = { "target_frame", "source_frame", "time", NULL }; - - if (!PyArg_ParseTupleAndKeywords(args, kw, "ssO&", (char**)keywords, &target_frame, &source_frame, rostime_converter, &time)) - return NULL; - geometry_msgs::TransformStamped transform; - WRAP(transform = bc->lookupTransform(target_frame, source_frame, time)); - geometry_msgs::Vector3 origin = transform.transform.translation; - geometry_msgs::Quaternion rotation = transform.transform.rotation; - //TODO: Create a converter that will actually return a python message - return Py_BuildValue("O&", transform_converter, &transform); - //return Py_BuildValue("(ddd)(dddd)", - // origin.x, origin.y, origin.z, - // rotation.x, rotation.y, rotation.z, rotation.w); -} - -static PyObject *lookupTransformFullCore(PyObject *self, PyObject *args, PyObject *kw) -{ - tf2::BufferCore *bc = ((buffer_core_t*)self)->bc; - char *target_frame, *source_frame, *fixed_frame; - ros::Time target_time, source_time; - static const char *keywords[] = { "target_frame", "target_time", "source_frame", "source_time", "fixed_frame", NULL }; - - if (!PyArg_ParseTupleAndKeywords(args, kw, "sO&sO&s", (char**)keywords, - &target_frame, - rostime_converter, - &target_time, - &source_frame, - rostime_converter, - &source_time, - &fixed_frame)) - return NULL; - geometry_msgs::TransformStamped transform; - WRAP(transform = bc->lookupTransform(target_frame, target_time, source_frame, source_time, fixed_frame)); - geometry_msgs::Vector3 origin = transform.transform.translation; - geometry_msgs::Quaternion rotation = transform.transform.rotation; - //TODO: Create a converter that will actually return a python message - return Py_BuildValue("O&", transform_converter, &transform); -} -/* -static PyObject *lookupTwistCore(PyObject *self, PyObject *args, PyObject *kw) -{ - tf2::BufferCore *bc = ((buffer_core_t*)self)->bc; - char *tracking_frame, *observation_frame; - ros::Time time; - ros::Duration averaging_interval; - static const char *keywords[] = { "tracking_frame", "observation_frame", "time", "averaging_interval", NULL }; - - if (!PyArg_ParseTupleAndKeywords(args, kw, "ssO&O&", (char**)keywords, &tracking_frame, &observation_frame, rostime_converter, &time, rosduration_converter, &averaging_interval)) - return NULL; - geometry_msgs::Twist twist; - WRAP(twist = bc->lookupTwist(tracking_frame, observation_frame, time, averaging_interval)); - - return Py_BuildValue("(ddd)(ddd)", - twist.linear.x, twist.linear.y, twist.linear.z, - twist.angular.x, twist.angular.y, twist.angular.z); -} - -static PyObject *lookupTwistFullCore(PyObject *self, PyObject *args) -{ - tf2::BufferCore *bc = ((buffer_core_t*)self)->bc; - char *tracking_frame, *observation_frame, *reference_frame, *reference_point_frame; - ros::Time time; - ros::Duration averaging_interval; - double px, py, pz; - - if (!PyArg_ParseTuple(args, "sss(ddd)sO&O&", - &tracking_frame, - &observation_frame, - &reference_frame, - &px, &py, &pz, - &reference_point_frame, - rostime_converter, &time, - rosduration_converter, &averaging_interval)) - return NULL; - geometry_msgs::Twist twist; - tf::Point pt(px, py, pz); - WRAP(twist = bc->lookupTwist(tracking_frame, observation_frame, reference_frame, pt, reference_point_frame, time, averaging_interval)); - - return Py_BuildValue("(ddd)(ddd)", - twist.linear.x, twist.linear.y, twist.linear.z, - twist.angular.x, twist.angular.y, twist.angular.z); -} -*/ -static inline int checkTranslationType(PyObject* o) -{ - PyTypeObject *translation_type = (PyTypeObject*) PyObject_GetAttrString(pModulegeometrymsgs, "Vector3"); - int type_check = PyObject_TypeCheck(o, translation_type); - int attr_check = PyObject_HasAttrString(o, "x") && - PyObject_HasAttrString(o, "y") && - PyObject_HasAttrString(o, "z"); - if (!type_check) { - PyErr_WarnEx(PyExc_UserWarning, "translation should be of type Vector3", 1); - } - return attr_check; -} - -static inline int checkRotationType(PyObject* o) -{ - PyTypeObject *rotation_type = (PyTypeObject*) PyObject_GetAttrString(pModulegeometrymsgs, "Quaternion"); - int type_check = PyObject_TypeCheck(o, rotation_type); - int attr_check = PyObject_HasAttrString(o, "w") && - PyObject_HasAttrString(o, "x") && - PyObject_HasAttrString(o, "y") && - PyObject_HasAttrString(o, "z"); - if (!type_check) { - PyErr_WarnEx(PyExc_UserWarning, "rotation should be of type Quaternion", 1); - } - return attr_check; -} - -static PyObject *setTransform(PyObject *self, PyObject *args) -{ - tf2::BufferCore *bc = ((buffer_core_t*)self)->bc; - PyObject *py_transform; - char *authority; - - if (!PyArg_ParseTuple(args, "Os", &py_transform, &authority)) - return NULL; - - geometry_msgs::TransformStamped transform; - PyObject *header = pythonBorrowAttrString(py_transform, "header"); - transform.child_frame_id = stringFromPython(pythonBorrowAttrString(py_transform, "child_frame_id")); - transform.header.frame_id = stringFromPython(pythonBorrowAttrString(header, "frame_id")); - if (rostime_converter(pythonBorrowAttrString(header, "stamp"), &transform.header.stamp) != 1) - return NULL; - - PyObject *mtransform = pythonBorrowAttrString(py_transform, "transform"); - - PyObject *translation = pythonBorrowAttrString(mtransform, "translation"); - if (!checkTranslationType(translation)) { - PyErr_SetString(PyExc_TypeError, "transform.translation must have members x, y, z"); - return NULL; - } - - transform.transform.translation.x = PyFloat_AsDouble(pythonBorrowAttrString(translation, "x")); - transform.transform.translation.y = PyFloat_AsDouble(pythonBorrowAttrString(translation, "y")); - transform.transform.translation.z = PyFloat_AsDouble(pythonBorrowAttrString(translation, "z")); - - PyObject *rotation = pythonBorrowAttrString(mtransform, "rotation"); - if (!checkRotationType(rotation)) { - PyErr_SetString(PyExc_TypeError, "transform.rotation must have members w, x, y, z"); - return NULL; - } - - transform.transform.rotation.x = PyFloat_AsDouble(pythonBorrowAttrString(rotation, "x")); - transform.transform.rotation.y = PyFloat_AsDouble(pythonBorrowAttrString(rotation, "y")); - transform.transform.rotation.z = PyFloat_AsDouble(pythonBorrowAttrString(rotation, "z")); - transform.transform.rotation.w = PyFloat_AsDouble(pythonBorrowAttrString(rotation, "w")); - - bc->setTransform(transform, authority); - Py_RETURN_NONE; -} - -static PyObject *setTransformStatic(PyObject *self, PyObject *args) -{ - tf2::BufferCore *bc = ((buffer_core_t*)self)->bc; - PyObject *py_transform; - char *authority; - - if (!PyArg_ParseTuple(args, "Os", &py_transform, &authority)) - return NULL; - - geometry_msgs::TransformStamped transform; - PyObject *header = pythonBorrowAttrString(py_transform, "header"); - transform.child_frame_id = stringFromPython(pythonBorrowAttrString(py_transform, "child_frame_id")); - transform.header.frame_id = stringFromPython(pythonBorrowAttrString(header, "frame_id")); - if (rostime_converter(pythonBorrowAttrString(header, "stamp"), &transform.header.stamp) != 1) - return NULL; - - PyObject *mtransform = pythonBorrowAttrString(py_transform, "transform"); - PyObject *translation = pythonBorrowAttrString(mtransform, "translation"); - if (!checkTranslationType(translation)) { - PyErr_SetString(PyExc_TypeError, "transform.translation must be of type Vector3"); - return NULL; - } - - transform.transform.translation.x = PyFloat_AsDouble(pythonBorrowAttrString(translation, "x")); - transform.transform.translation.y = PyFloat_AsDouble(pythonBorrowAttrString(translation, "y")); - transform.transform.translation.z = PyFloat_AsDouble(pythonBorrowAttrString(translation, "z")); - - PyObject *rotation = pythonBorrowAttrString(mtransform, "rotation"); - if (!checkRotationType(rotation)) { - PyErr_SetString(PyExc_TypeError, "transform.rotation must be of type Quaternion"); - return NULL; - } - - transform.transform.rotation.x = PyFloat_AsDouble(pythonBorrowAttrString(rotation, "x")); - transform.transform.rotation.y = PyFloat_AsDouble(pythonBorrowAttrString(rotation, "y")); - transform.transform.rotation.z = PyFloat_AsDouble(pythonBorrowAttrString(rotation, "z")); - transform.transform.rotation.w = PyFloat_AsDouble(pythonBorrowAttrString(rotation, "w")); - - // only difference to above is is_static == True - bc->setTransform(transform, authority, true); - Py_RETURN_NONE; -} - -static PyObject *clear(PyObject *self, PyObject *args) -{ - tf2::BufferCore *bc = ((buffer_core_t*)self)->bc; - bc->clear(); - Py_RETURN_NONE; -} - -static PyObject *_frameExists(PyObject *self, PyObject *args) -{ - tf2::BufferCore *bc = ((buffer_core_t*)self)->bc; - char *frame_id_str; - if (!PyArg_ParseTuple(args, "s", &frame_id_str)) - return NULL; - return PyBool_FromLong(bc->_frameExists(frame_id_str)); -} - -static PyObject *_getFrameStrings(PyObject *self, PyObject *args) -{ - tf2::BufferCore *bc = ((buffer_core_t*)self)->bc; - std::vector< std::string > ids; - bc->_getFrameStrings(ids); - return asListOfStrings(ids); -} - -static PyObject *_allFramesAsDot(PyObject *self, PyObject *args, PyObject *kw) -{ - tf2::BufferCore *bc = ((buffer_core_t*)self)->bc; - static const char *keywords[] = { "time", NULL }; - ros::Time time; - if (!PyArg_ParseTupleAndKeywords(args, kw, "|O&", (char**)keywords, rostime_converter, &time)) - return NULL; - return stringToPython(bc->_allFramesAsDot(time.toSec())); -} - - -static struct PyMethodDef buffer_core_methods[] = -{ - {"all_frames_as_yaml", allFramesAsYAML, METH_VARARGS}, - {"all_frames_as_string", allFramesAsString, METH_VARARGS}, - {"set_transform", setTransform, METH_VARARGS}, - {"set_transform_static", setTransformStatic, METH_VARARGS}, - {"can_transform_core", (PyCFunction)canTransformCore, METH_VARARGS | METH_KEYWORDS}, - {"can_transform_full_core", (PyCFunction)canTransformFullCore, METH_VARARGS | METH_KEYWORDS}, - {"_chain", (PyCFunction)_chain, METH_VARARGS | METH_KEYWORDS}, - {"clear", (PyCFunction)clear, METH_VARARGS | METH_KEYWORDS}, - {"_frameExists", (PyCFunction)_frameExists, METH_VARARGS}, - {"_getFrameStrings", (PyCFunction)_getFrameStrings, METH_VARARGS}, - {"_allFramesAsDot", (PyCFunction)_allFramesAsDot, METH_VARARGS | METH_KEYWORDS}, - {"get_latest_common_time", (PyCFunction)getLatestCommonTime, METH_VARARGS}, - {"lookup_transform_core", (PyCFunction)lookupTransformCore, METH_VARARGS | METH_KEYWORDS}, - {"lookup_transform_full_core", (PyCFunction)lookupTransformFullCore, METH_VARARGS | METH_KEYWORDS}, - //{"lookupTwistCore", (PyCFunction)lookupTwistCore, METH_VARARGS | METH_KEYWORDS}, - //{"lookupTwistFullCore", lookupTwistFullCore, METH_VARARGS}, - //{"getTFPrefix", (PyCFunction)getTFPrefix, METH_VARARGS}, - {NULL, NULL} -}; - -static PyMethodDef module_methods[] = { - // {"Transformer", mkTransformer, METH_VARARGS}, - {0, 0, 0}, -}; - -bool staticInit() { -#if PYTHON_API_VERSION >= 1007 - tf2_exception = PyErr_NewException((char*)"tf2.TransformException", NULL, NULL); - tf2_connectivityexception = PyErr_NewException((char*)"tf2.ConnectivityException", tf2_exception, NULL); - tf2_lookupexception = PyErr_NewException((char*)"tf2.LookupException", tf2_exception, NULL); - tf2_extrapolationexception = PyErr_NewException((char*)"tf2.ExtrapolationException", tf2_exception, NULL); - tf2_invalidargumentexception = PyErr_NewException((char*)"tf2.InvalidArgumentException", tf2_exception, NULL); - tf2_timeoutexception = PyErr_NewException((char*)"tf2.TimeoutException", tf2_exception, NULL); -#else - tf2_exception = stringToPython("tf2.error"); - tf2_connectivityexception = stringToPython("tf2.ConnectivityException"); - tf2_lookupexception = stringToPython("tf2.LookupException"); - tf2_extrapolationexception = stringToPython("tf2.ExtrapolationException"); - tf2_invalidargumentexception = stringToPython("tf2.InvalidArgumentException"); - tf2_timeoutexception = stringToPython("tf2.TimeoutException"); -#endif - - pModulerospy = pythonImport("rospy"); - pModulegeometrymsgs = pythonImport("geometry_msgs.msg"); - - if(pModulegeometrymsgs == NULL) - { - printf("Cannot load geometry_msgs module"); - return false; - } - - buffer_core_Type.tp_alloc = PyType_GenericAlloc; - buffer_core_Type.tp_new = PyType_GenericNew; - buffer_core_Type.tp_init = BufferCore_init; - buffer_core_Type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE; - buffer_core_Type.tp_methods = buffer_core_methods; - if (PyType_Ready(&buffer_core_Type) != 0) - return false; - return true; -} - -PyObject *moduleInit(PyObject *m) { - PyModule_AddObject(m, "BufferCore", (PyObject *)&buffer_core_Type); - PyObject *d = PyModule_GetDict(m); - PyDict_SetItemString(d, "TransformException", tf2_exception); - PyDict_SetItemString(d, "ConnectivityException", tf2_connectivityexception); - PyDict_SetItemString(d, "LookupException", tf2_lookupexception); - PyDict_SetItemString(d, "ExtrapolationException", tf2_extrapolationexception); - PyDict_SetItemString(d, "InvalidArgumentException", tf2_invalidargumentexception); - PyDict_SetItemString(d, "TimeoutException", tf2_timeoutexception); - return m; -} - -#if PY_MAJOR_VERSION < 3 -extern "C" -{ - ROS_HELPER_EXPORT void init_tf2() - { - if (!staticInit()) - return; - moduleInit(Py_InitModule("_tf2", module_methods)); - } -} - -#else -struct PyModuleDef tf_module = { - PyModuleDef_HEAD_INIT, // base - "_tf2", // name - NULL, // docstring - -1, // state size (but we're using globals) - module_methods // methods -}; - -PyMODINIT_FUNC PyInit__tf2() -{ - if (!staticInit()) - return NULL; - return moduleInit(PyModule_Create(&tf_module)); -} -#endif diff --git a/src/geometry2/tf2_py/src/tf2_py/__init__.py b/src/geometry2/tf2_py/src/tf2_py/__init__.py deleted file mode 100644 index 5a38341..0000000 --- a/src/geometry2/tf2_py/src/tf2_py/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -#! /usr/bin/python -#*********************************************************** -#* Software License Agreement (BSD License) -#* -#* Copyright (c) 2009, Willow Garage, Inc. -#* All rights reserved. -#* -#* Redistribution and use in source and binary forms, with or without -#* modification, are permitted provided that the following conditions -#* are met: -#* -#* * Redistributions of source code must retain the above copyright -#* notice, this list of conditions and the following disclaimer. -#* * Redistributions in binary form must reproduce the above -#* copyright notice, this list of conditions and the following -#* disclaimer in the documentation and/or other materials provided -#* with the distribution. -#* * Neither the name of Willow Garage, Inc. nor the names of its -#* contributors may be used to endorse or promote products derived -#* from this software without specific prior written permission. -#* -#* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -#* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -#* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -#* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -#* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -#* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -#* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -#* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -#* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -#* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -#* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -#* POSSIBILITY OF SUCH DAMAGE. -#* -#* Author: Eitan Marder-Eppstein -#*********************************************************** -from __future__ import absolute_import -from ._tf2 import * diff --git a/src/geometry2/tf2_ros/CHANGELOG.rst b/src/geometry2/tf2_ros/CHANGELOG.rst deleted file mode 100644 index f2f3e4f..0000000 --- a/src/geometry2/tf2_ros/CHANGELOG.rst +++ /dev/null @@ -1,327 +0,0 @@ -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Changelog for package tf2_ros -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -0.6.7 (2020-03-09) ------------------- -* [windows][melodic] more portable fixes. (`#443 `_) -* [Windows][melodic-devel] Fix install locations (`#442 `_) -* Fixed warnings in message_filter.h (`#434 `_) - the variables are not used in function body and caused -Wunused-parameter to trigger with -Wall -* Contributors: Sean Yen, moooeeeep - -0.6.6 (2020-01-09) ------------------- -* Remove roslib.load_manifest `#404 `_ -* Fix message filter `#402 `_ -* resolve virtual function call in destructor -* remove pending callbacks in clear() -* spelling fix: seperate -> separate `#372 `_ -* Fix dangling iterator references in buffer_server.cpp `#369 `_ -* Remove some useless code from buffer_server_main.cpp `#368 `_ -* Mark check_frequency as deprecated in docstring. -* Follow `#337 `_: use actionlib API in BufferClient::processGoal() -* Test for equality to None with 'is' instead of '==' `#355 `_ -* added parameter to advertise tf2-frames as a service, if needed -* Contributors: Daniel Ingram, Emre Sahin, JonasTietz, Lucas Walter, Michael Grupp, Robert Haschke, Tamaki Nishino, Tully Foote - -0.6.5 (2018-11-16) ------------------- -* Protect the time reset logic from a race condition. - Fixes `#341 `_ - This could incorrectly trigger a buffer clear if two concurrent callbacks were invoked. -* Contributors: Tully Foote - -0.6.4 (2018-11-06) ------------------- -* fix(buffer-client): Use actionlib api for obtaining result - Use the API provided by actionlib for waiting for result. This will improve the response time and prevent problems with custom solutions (see `#178 `_). This change makes constructor parameter check_frequency obsolute and deprecates it. -* Add check to buffer_client.py to make sure result is available - Related issue: `#178 `_ -* Add check to reset buffer when rostime goes backwards -* Fixed the value of expected_success_count\_ -* Added a tf2_ros message filter unittest with multiple target frames and non-zero time tolerance -* Contributors: Ewoud Pool, Jørgen Borgesen, Stephen Williams - -0.6.3 (2018-07-09) ------------------- - -0.6.2 (2018-05-02) ------------------- -* update buffer_server_name (`#296 `_) - * use nodename as namespace - * Update `#209 `_ to provide backwards compatibility. -* Contributors: Jihoon Lee, Tully Foote - -0.6.1 (2018-03-21) ------------------- - -0.6.0 (2018-03-21) ------------------- -* tf2_ros::Buffer: canTransform can now deal with timeouts smaller than 10ms by using the hunderdth of the timeout for sleeping (`#286 `_) -* More spinning to make sure the message gets through for `#129 `_ `#283 `_ -* Contributors: Tully Foote, cwecht - -0.5.17 (2018-01-01) -------------------- -* Merge pull request `#260 `_ from randoms/indigo-devel - fix python3 import error -* Merge pull request `#257 `_ from delftrobotics-forks/python3 - Make tf2_py python3 compatible again -* Use python3 print function. -* Contributors: Maarten de Vries, Tully Foote, randoms - -0.5.16 (2017-07-14) -------------------- -* Merge pull request `#144 `_ from clearpathrobotics/dead_lock_fix - Solve a bug that causes a deadlock in MessageFilter -* Clear error string if it exists from the external entry points. - Fixes `#117 `_ -* Make buff_size and tcp_nodelay and subscriber queue size mutable. -* Remove generate_rand_vectors() from a number of tests. (`#227 `_) - * Remove generate_rand_vectors() from a number of tests. -* Log jump duration on backwards time jump detection. (`#234 `_) -* replaced dependencies on tf2_msgs_gencpp by exported dependencies -* Use new-style objects in python 2 -* Solve a bug that causes a deadlock in MessageFilter -* Contributors: Adel Fakih, Chris Lalancette, Christopher Wecht, Eric Wieser, Koji Terada, Stephan, Tully Foote, koji_terada - -0.5.15 (2017-01-24) -------------------- -* tf2_ros: add option to unregister TransformListener (`#201 `_) -* Contributors: Hans-Joachim Krauch - -0.5.14 (2017-01-16) -------------------- -* Drop roslib.load_manifest (`#191 `_) -* Adds ability to load TF from the ROS parameter server. -* Code linting & reorganization -* Fix indexing beyond end of array -* added a static transform broadcaster in python -* lots more documentation -* remove BufferCore doc, add BufferClient/BufferServer doc for C++, add Buffer/BufferInterface Python documentation -* Better overview for Python -* Contributors: Eric Wieser, Felix Duvallet, Jackie Kay, Mikael Arguedas, Mike Purvis - -0.5.13 (2016-03-04) -------------------- -* fix documentation warnings -* Adding tests to package -* Contributors: Laurent GEORGE, Vincent Rabaud - -0.5.12 (2015-08-05) -------------------- -* remove annoying gcc warning - This is because the roslog macro cannot have two arguments that are - formatting strings: we need to concatenate them first. -* break canTransform loop only for non-tiny negative time deltas - (At least) with Python 2 ros.Time.now() is not necessarily monotonic - and one can experience negative time deltas (usually well below 1s) - on real hardware under full load. This check was originally introduced - to allow for backjumps with rosbag replays, and only there it makes sense. - So we'll add a small duration threshold to ignore backjumps due to - non-monotonic clocks. -* Contributors: Vincent Rabaud, v4hn - -0.5.11 (2015-04-22) -------------------- -* do not short circuit waitForTransform timeout when running inside pytf. Fixes `#102 `_ - roscpp is not initialized inside pytf which means that ros::ok is not - valid. This was causing the timer to abort immediately. - This breaks support for pytf with respect to early breaking out of a loop re `#26 `_. - This is conceptually broken in pytf, and is fixed in tf2_ros python implementation. - If you want this behavior I recommend switching to the tf2 python bindings. -* inject timeout information into error string for canTransform with timeout -* Contributors: Tully Foote - -0.5.10 (2015-04-21) -------------------- -* switch to use a shared lock with upgrade instead of only a unique lock. For `#91 `_ -* Update message_filter.h -* filters: fix unsupported old messages with frame_id starting with '/' -* Enabled tf2 documentation -* make sure the messages get processed before testing the effects. Fixes `#88 `_ -* allowing to use message filters with PCL types -* Contributors: Brice Rebsamen, Jackie Kay, Tully Foote, Vincent Rabaud, jmtatsch - -0.5.9 (2015-03-25) ------------------- -* changed queue_size in Python transform boradcaster to match that in c++ -* Contributors: mrath - -0.5.8 (2015-03-17) ------------------- -* fix deadlock `#79 `_ -* break out of loop if ros is shutdown. Fixes `#26 `_ -* remove useless Makefile files -* Fix static broadcaster with rpy args -* Contributors: Paul Bovbel, Tully Foote, Vincent Rabaud - -0.5.7 (2014-12-23) ------------------- -* Added 6 param transform again - Yes, using Euler angles is a bad habit. But it is much more convenient if you just need a rotation by 90° somewhere to set it up in Euler angles. So I added the option to supply only the 3 angles. -* Remove tf2_py dependency for Android -* Contributors: Achim Königs, Gary Servin - -0.5.6 (2014-09-18) ------------------- -* support if canTransform(...): in python `#57 `_ -* Support clearing the cache when time jumps backwards `#68 `_ -* Contributors: Tully Foote - -0.5.5 (2014-06-23) ------------------- - -0.5.4 (2014-05-07) ------------------- -* surpressing autostart on the server objects to not incur warnings -* switch to boost signals2 following `ros/ros_comm#267 `_, blocking `ros/geometry#23 `_ -* fix compilation with gcc 4.9 -* make can_transform correctly wait -* explicitly set the publish queue size for rospy -* Contributors: Tully Foote, Vincent Rabaud, v4hn - -0.5.3 (2014-02-21) ------------------- - -0.5.2 (2014-02-20) ------------------- - -0.5.1 (2014-02-14) ------------------- -* adding const to MessageEvent data -* Contributors: Tully Foote - -0.5.0 (2014-02-14) ------------------- -* TF2 uses message events to get connection header information -* Contributors: Kevin Watts - -0.4.10 (2013-12-26) -------------------- -* adding support for static transforms in python listener. Fixes `#46 `_ -* Contributors: Tully Foote - -0.4.9 (2013-11-06) ------------------- - -0.4.8 (2013-11-06) ------------------- -* fixing pytf failing to sleep https://github.com/ros/geometry/issues/30 -* moving python documentation to tf2_ros from tf2 to follow the code -* Fixed static_transform_publisher duplicate check, added rostest. - -0.4.7 (2013-08-28) ------------------- -* fixing new conditional to cover the case that time has not progressed yet port forward of `ros/geometry#35 `_ in the python implementation -* fixing new conditional to cover the case that time has not progressed yet port forward of `ros/geometry#35 `_ - -0.4.6 (2013-08-28) ------------------- -* patching python implementation for `#24 `_ as well -* Stop waiting if time jumps backwards. fixes `#24 `_ -* patch to work around uninitiaized time. `#30 https://github.com/ros/geometry/issues/30`_ -* Removing unnecessary CATKIN_DEPENDS `#18 `_ - -0.4.5 (2013-07-11) ------------------- -* Revert "cherrypicking groovy patch for `#10 `_ into hydro" - This reverts commit 296d4916706d64f719b8c1592ab60d3686f994e1. - It was not starting up correctly. -* fixing usage string to show quaternions and using quaternions in the test app -* cherrypicking groovy patch for `#10 `_ into hydro - -0.4.4 (2013-07-09) ------------------- -* making repo use CATKIN_ENABLE_TESTING correctly and switching rostest to be a test_depend with that change. -* reviving unrun unittest and adding CATKIN_ENABLE_TESTING guards - -0.4.3 (2013-07-05) ------------------- - -0.4.2 (2013-07-05) ------------------- - -0.4.1 (2013-07-05) ------------------- -* adding queue accessors lost in the new API -* exposing dedicated thread logic in BufferCore and checking in Buffer -* adding methods to enable backwards compatability for passing through to tf::Transformer - -0.4.0 (2013-06-27) ------------------- -* splitting rospy dependency into tf2_py so tf2 is pure c++ library. -* moving convert methods back into tf2 because it does not have any ros dependencies beyond ros::Time which is already a dependency of tf2 -* Cleaning up unnecessary dependency on roscpp -* converting contents of tf2_ros to be properly namespaced in the tf2_ros namespace -* fixing return by value for tranform method without preallocatoin -* Cleaning up packaging of tf2 including: - removing unused nodehandle - cleaning up a few dependencies and linking - removing old backup of package.xml - making diff minimally different from tf version of library -* Restoring test packages and bullet packages. - reverting 3570e8c42f9b394ecbfd9db076b920b41300ad55 to get back more of the packages previously implemented - reverting 04cf29d1b58c660fdc999ab83563a5d4b76ab331 to fix `#7 `_ -* Added link against catkin_LIBRARIES for tf2_ros lib, also CMakeLists.txt clean up - -0.3.6 (2013-03-03) ------------------- - -0.3.5 (2013-02-15 14:46) ------------------------- -* 0.3.4 -> 0.3.5 - -0.3.4 (2013-02-15 13:14) ------------------------- -* 0.3.3 -> 0.3.4 - -0.3.3 (2013-02-15 11:30) ------------------------- -* 0.3.2 -> 0.3.3 - -0.3.2 (2013-02-15 00:42) ------------------------- -* 0.3.1 -> 0.3.2 - -0.3.1 (2013-02-14) ------------------- -* 0.3.0 -> 0.3.1 - -0.3.0 (2013-02-13) ------------------- -* switching to version 0.3.0 -* Merge pull request `#2 `_ from KaijenHsiao/groovy-devel - added setup.py and catkin_python_setup() to tf2_ros -* added setup.py and catkin_python_setup() to tf2_ros -* fixing cmake target collisions -* fixing catkin message dependencies -* removing packages with missing deps -* catkin fixes -* catkinizing geometry-experimental -* catkinizing tf2_ros -* catching None result in buffer client before it becomes an AttributeError, raising tf2.TransformException instead -* oneiric linker fixes, bump version to 0.2.3 -* fix deprecated use of Header -* merged faust's changes 864 and 865 into non_optimized branch: BufferCore instead of Buffer in TransformListener, and added a constructor that takes a NodeHandle. -* add buffer server binary -* fix compilation on 32bit -* add missing file -* build buffer server -* TransformListener only needs a BufferCore -* Add TransformListener constructor that takes a NodeHandle so you can specify a callback queue to use -* Add option to use a callback queue in the message filter -* move the message filter to tf2_ros -* add missing std_msgs dependency -* missed 2 lines in last commit -* removing auto clearing from listener for it's unexpected from a library -* static transform tested and working -* subscriptions to tf_static unshelved -* static transform publisher executable running -* latching static transform publisher -* cleaning out old commented code -* Only query rospy.Time.now() when the timeout is greater than 0 -* debug comments removed -* move to tf2_ros completed. tests pass again -* merge tf2_cpp and tf2_py into tf2_ros diff --git a/src/geometry2/tf2_ros/CMakeLists.txt b/src/geometry2/tf2_ros/CMakeLists.txt deleted file mode 100644 index 201cb1c..0000000 --- a/src/geometry2/tf2_ros/CMakeLists.txt +++ /dev/null @@ -1,153 +0,0 @@ -cmake_minimum_required(VERSION 2.8.3) -project(tf2_ros) - -if(NOT ANDROID) -set(TF2_PY tf2_py) -endif() - -find_package(catkin REQUIRED COMPONENTS - actionlib - actionlib_msgs - geometry_msgs - message_filters - roscpp - rosgraph - rospy - tf2 - tf2_msgs - ${TF2_PY} -) -find_package(Boost REQUIRED COMPONENTS thread) - -catkin_python_setup() - -catkin_package( - INCLUDE_DIRS include - LIBRARIES ${PROJECT_NAME} - CATKIN_DEPENDS - actionlib - actionlib_msgs - geometry_msgs - message_filters - roscpp - rosgraph - tf2 - tf2_msgs - ${TF2_PY} -) - -include_directories(include ${catkin_INCLUDE_DIRS}) - -# tf2_ros library -add_library(${PROJECT_NAME} - src/buffer.cpp - src/transform_listener.cpp - src/buffer_client.cpp - src/buffer_server.cpp - src/transform_broadcaster.cpp - src/static_transform_broadcaster.cpp -) -add_dependencies(${PROJECT_NAME} ${catkin_EXPORTED_TARGETS}) -target_link_libraries(${PROJECT_NAME} ${catkin_LIBRARIES}) - -# buffer_server executable -add_executable(${PROJECT_NAME}_buffer_server src/buffer_server_main.cpp) -add_dependencies(${PROJECT_NAME}_buffer_server ${catkin_EXPORTED_TARGETS}) -target_link_libraries(${PROJECT_NAME}_buffer_server - ${PROJECT_NAME} - ${catkin_LIBRARIES} - ${Boost_LIBRARIES} -) -set_target_properties(${PROJECT_NAME}_buffer_server - PROPERTIES OUTPUT_NAME buffer_server -) - -# static_transform_publisher -add_executable(${PROJECT_NAME}_static_transform_publisher - src/static_transform_broadcaster_program.cpp -) -add_dependencies(${PROJECT_NAME}_static_transform_publisher ${catkin_EXPORTED_TARGETS}) -target_link_libraries(${PROJECT_NAME}_static_transform_publisher - ${PROJECT_NAME} - ${catkin_LIBRARIES} -) -set_target_properties(${PROJECT_NAME}_static_transform_publisher - PROPERTIES OUTPUT_NAME static_transform_publisher -) - -# Install library -install(TARGETS - ${PROJECT_NAME} - ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} - LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} - RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -) - -# Install executable -install(TARGETS - ${PROJECT_NAME}_buffer_server ${PROJECT_NAME}_static_transform_publisher - ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} - LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} - RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -) - -install(DIRECTORY include/${PROJECT_NAME}/ - DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -) - - -# Tests -if(CATKIN_ENABLE_TESTING) - -# new requirements for testing -find_package(catkin REQUIRED COMPONENTS - actionlib - actionlib_msgs - geometry_msgs - message_filters - roscpp - rosgraph - rospy - rostest - tf2 - tf2_msgs - ${TF2_PY} -) - - - -# tf2_ros_test_listener -add_executable(${PROJECT_NAME}_test_listener EXCLUDE_FROM_ALL test/listener_unittest.cpp) -add_dependencies(${PROJECT_NAME}_test_listener ${catkin_EXPORTED_TARGETS}) -add_executable(${PROJECT_NAME}_test_time_reset EXCLUDE_FROM_ALL test/time_reset_test.cpp) -add_dependencies(${PROJECT_NAME}_test_time_reset ${catkin_EXPORTED_TARGETS}) -add_executable(${PROJECT_NAME}_test_message_filter EXCLUDE_FROM_ALL test/message_filter_test.cpp) -add_dependencies(${PROJECT_NAME}_test_message_filter ${catkin_EXPORTED_TARGETS}) - -target_link_libraries(${PROJECT_NAME}_test_listener - ${PROJECT_NAME} - ${catkin_LIBRARIES} - ${GTEST_LIBRARIES} -) - -target_link_libraries(${PROJECT_NAME}_test_time_reset - ${PROJECT_NAME} - ${catkin_LIBRARIES} - ${GTEST_LIBRARIES} -) - -target_link_libraries(${PROJECT_NAME}_test_message_filter - ${PROJECT_NAME} - ${catkin_LIBRARIES} - ${GTEST_LIBRARIES} -) - -add_dependencies(tests ${PROJECT_NAME}_test_listener) -add_dependencies(tests ${PROJECT_NAME}_test_time_reset) -add_dependencies(tests ${PROJECT_NAME}_test_message_filter) - -add_rostest(test/transform_listener_unittest.launch) -add_rostest(test/transform_listener_time_reset_test.launch) -add_rostest(test/message_filter_test.launch) - -endif() diff --git a/src/geometry2/tf2_ros/doc/conf.py b/src/geometry2/tf2_ros/doc/conf.py deleted file mode 100644 index cee1a41..0000000 --- a/src/geometry2/tf2_ros/doc/conf.py +++ /dev/null @@ -1,204 +0,0 @@ -# -*- coding: utf-8 -*- -# -# tf documentation build configuration file, created by -# sphinx-quickstart on Mon Jun 1 14:21:53 2009. -# -# This file is execfile()d with the current directory set to its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys, os - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.append(os.path.abspath('.')) - -# -- General configuration ----------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.pngmath'] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'tf' -copyright = u'2009, Willow Garage, Inc.' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = '0.1' -# The full version, including alpha/beta/rc tags. -release = '0.1.0' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -#language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of documents that shouldn't be included in the build. -#unused_docs = [] - -# List of directories, relative to source directory, that shouldn't be searched -# for source files. -exclude_trees = ['_build'] - -# The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - - -# -- Options for HTML output --------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -html_theme = 'default' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -#html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -#html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_use_modindex = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = '' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'tfdoc' - - -# -- Options for LaTeX output -------------------------------------------------- - -# The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass [howto/manual]). -latex_documents = [ - ('index', 'tf.tex', u'stereo\\_utils Documentation', - u'Tully Foote and Eitan Marder-Eppstein', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# Additional stuff for the LaTeX preamble. -#latex_preamble = '' - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_use_modindex = True - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - 'http://docs.python.org/': None, - 'http://docs.opencv.org/3.0-last-rst/': None, - 'http://docs.scipy.org/doc/numpy' : None - } - -autoclass_content = "both" diff --git a/src/geometry2/tf2_ros/doc/index.rst b/src/geometry2/tf2_ros/doc/index.rst deleted file mode 100644 index 083feb1..0000000 --- a/src/geometry2/tf2_ros/doc/index.rst +++ /dev/null @@ -1,48 +0,0 @@ -tf2_ros Overview -================ - -This is the Python API reference for the tf2_ros package. - -To broadcast transforms using ROS: -- Call :meth:`rospy.init` to initialize a node. -- Construct a :class:`tf2_ros.TransformBroadcaster`. -- Pass a :class:`geometry_msgs.TransformStamped` message to :meth:`tf2_ros.TransformBroadcaster.sendTransform`. - - - Alternatively, pass a vector of :class:`geometry_msgs.TransformStamped` messages. - -To listen for transforms using ROS: -- Construct an instance of a class that implements :class:`tf2_ros.BufferInterface`. - - - :class:`tf2_ros.Buffer` is the standard implementation which offers a tf2_frames service that can respond to requests with a :class:`tf2_msgs.FrameGraph`. - - :class:`tf2_ros.BufferClient` uses an :class:`actionlib.SimpleActionClient` to wait for the requested transform to become available. - -- Pass the :class:`tf2_ros.Buffer` to the constructor of :class:`tf2_ros.TransformListener`. - - Optionally, pass a :class:`ros.NodeHandle` (otherwise TransformListener will connect to the node for the process). - - Optionally, specify if the TransformListener runs in its own thread or not. - -- Use :meth:`tf2_ros.BufferInterface.transform` to apply a transform on the tf server to an input frame. - - Or, check if a transform is available with :meth:`tf2_ros.BufferInterface.can_transform`. - - Then, call :meth:`tf2_ros.BufferInterface.lookup_transform` to get the transform between two frames. - -For more information, see the tf2 tutorials: http://wiki.ros.org/tf2/Tutorials - -Or, get an `overview`_ of data type conversion methods in geometry_experimental packages. - -See http://wiki.ros.org/tf2/Tutorials for more detailed usage. - -.. _overview: http://wiki.ros.org/tf2/Tutorials/Migration/DataConversions - -Classes and Exceptions -====================== - -.. toctree:: - :maxdepth: 2 - - tf2_ros - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`search` diff --git a/src/geometry2/tf2_ros/doc/mainpage.dox b/src/geometry2/tf2_ros/doc/mainpage.dox deleted file mode 100644 index 420a674..0000000 --- a/src/geometry2/tf2_ros/doc/mainpage.dox +++ /dev/null @@ -1,41 +0,0 @@ -/** -\mainpage -\htmlinclude manifest.html - -\b tf2_ros is the C++ ROS wrapper around the tf2 transform library. - -\section codeapi Code API - -To broadcast transforms using ROS: -- Call ros::init() to initialize a node. -- Construct a tf2_ros::TransformBroadcaster. -- Pass a geometry_msgs::TransformStamped message to tf2_ros::TransformBroadcaster::sendTransform(). - - Alternatively, pass a vector of geometry_msgs::TransformStamped messages. -- Use tf2_ros::StaticTransformBroadcaster for "latching" behavior when transforms that are not expected to change. - -To listen for transforms using ROS: -- Construct an instance of a class that implements tf2_ros::BufferInterface. - - tf2_ros::Buffer is the standard implementation which offers a tf2_frames service that can respond to requests with a tf2_msgs::FrameGraph. - - tf2_ros::BufferClient uses an actionlib::SimpleActionClient to wait for the requested transform to become available. - - It should be used with a tf2_ros::BufferServer, which offers the corresponding actionlib::ActionSErver. -- Pass the tf2_ros::Buffer to the constructor of tf2_ros::TransformListener. - - Optionally, pass a ros::NodeHandle (otherwise TransformListener will connect to the node for the process). - - Optionally, specify if the TransformListener runs in its own thread or not. -- Use tf2_ros::BufferInterface::transform() to apply a transform on the tf server to an input frame. - - Or, check if a transform is available with tf2_ros::BufferInterface::canTransform(). - - Then, call tf2_ros::BufferInterface::lookupTransform() to get the transform between two frames. -- Construct a tf2_ros::MessageFilter with the TransformListener to apply a transformation to incoming frames. - - This is especially useful when streaming sensor data. - -List of exceptions thrown in this library: -- tf2::LookupException -- tf2::ConnectivityException -- tf2::ExtrapolationException -- tf2::InvalidArgumentException -- tf2::TimeoutException -- tf2::TransformException - -For more information, see the tf2 tutorials: http://wiki.ros.org/tf2/Tutorials - -Or, get an overview of data type conversion methods in geometry_experimental packages. -*/ diff --git a/src/geometry2/tf2_ros/doc/tf2_ros.rst b/src/geometry2/tf2_ros/doc/tf2_ros.rst deleted file mode 100644 index 10845ed..0000000 --- a/src/geometry2/tf2_ros/doc/tf2_ros.rst +++ /dev/null @@ -1,73 +0,0 @@ -tf_ros2 Python API -================== - -Exceptions ----------- - -.. exception:: tf2.TransformException - - base class for tf exceptions. Because :exc:`tf2.TransformException` is the - base class for other exceptions, you can catch all tf exceptions - by writing:: - - try: - # do some tf2 work - except tf2.TransformException: - print "some tf2 exception happened" - - -.. exception:: tf2.ConnectivityException - - subclass of :exc:`TransformException`. - Raised when that the fixed_frame tree is not connected between the frames requested. - -.. exception:: tf2.LookupException - - subclass of :exc:`TransformException`. - Raised when a tf method has attempted to access a frame, but - the frame is not in the graph. - The most common reason for this is that the frame is not - being published, or a parent frame was not set correctly - causing the tree to be broken. - -.. exception:: tf2.ExtrapolationException - - subclass of :exc:`TransformException` - Raised when a tf method would have required extrapolation beyond current limits. - - -.. exception:: tf2.InvalidArgumentException - - subclass of :exc:`TransformException`. - Raised when the arguments to the method are called improperly formed. An example of why this might be raised is if an argument is nan. - -.. autoexception:: tf2_ros.buffer_interface.TypeException - -.. autoexception:: tf2_ros.buffer_interface.NotImplementedException - - -BufferInterface ---------------- -.. autoclass:: tf2_ros.buffer_interface.BufferInterface - :members: - -Buffer ------- -.. autoclass:: tf2_ros.buffer.Buffer - :members: - -BufferClient ------------- -.. autoclass:: tf2_ros.buffer_client.BufferClient - :members: - - -TransformBroadcaster --------------------- -.. autoclass:: tf2_ros.transform_broadcaster.TransformBroadcaster - :members: - -TransformListener ------------------ -.. autoclass:: tf2_ros.transform_listener.TransformListener - :members: diff --git a/src/geometry2/tf2_ros/include/tf2_ros/buffer.h b/src/geometry2/tf2_ros/include/tf2_ros/buffer.h deleted file mode 100644 index 88ac1be..0000000 --- a/src/geometry2/tf2_ros/include/tf2_ros/buffer.h +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** \author Wim Meeussen */ - -#ifndef TF2_ROS_BUFFER_H -#define TF2_ROS_BUFFER_H - -#include -#include -#include -#include -#include - - -namespace tf2_ros -{ - - /** \brief Standard implementation of the tf2_ros::BufferInterface abstract data type. - * - * Inherits tf2_ros::BufferInterface and tf2::BufferCore. - * Stores known frames and offers a ROS service, "tf_frames", which responds to client requests - * with a response containing a tf2_msgs::FrameGraph representing the relationship of known frames. - */ - class Buffer: public BufferInterface, public tf2::BufferCore - { - public: - using tf2::BufferCore::lookupTransform; - using tf2::BufferCore::canTransform; - - /** - * @brief Constructor for a Buffer object - * @param cache_time How long to keep a history of transforms - * @param debug Whether to advertise the view_frames service that exposes debugging information from the buffer - * @return - */ - Buffer(ros::Duration cache_time = ros::Duration(BufferCore::DEFAULT_CACHE_TIME), bool debug = false); - - /** \brief Get the transform between two frames by frame ID. - * \param target_frame The frame to which data should be transformed - * \param source_frame The frame where the data originated - * \param time The time at which the value of the transform is desired. (0 will get the latest) - * \param timeout How long to block before failing - * \return The transform between the frames - * - * Possible exceptions tf2::LookupException, tf2::ConnectivityException, - * tf2::ExtrapolationException, tf2::InvalidArgumentException - */ - virtual geometry_msgs::TransformStamped - lookupTransform(const std::string& target_frame, const std::string& source_frame, - const ros::Time& time, const ros::Duration timeout) const; - - /** \brief Get the transform between two frames by frame ID assuming fixed frame. - * \param target_frame The frame to which data should be transformed - * \param target_time The time to which the data should be transformed. (0 will get the latest) - * \param source_frame The frame where the data originated - * \param source_time The time at which the source_frame should be evaluated. (0 will get the latest) - * \param fixed_frame The frame in which to assume the transform is constant in time. - * \param timeout How long to block before failing - * \return The transform between the frames - * - * Possible exceptions tf2::LookupException, tf2::ConnectivityException, - * tf2::ExtrapolationException, tf2::InvalidArgumentException - */ - virtual geometry_msgs::TransformStamped - lookupTransform(const std::string& target_frame, const ros::Time& target_time, - const std::string& source_frame, const ros::Time& source_time, - const std::string& fixed_frame, const ros::Duration timeout) const; - - - /** \brief Test if a transform is possible - * \param target_frame The frame into which to transform - * \param source_frame The frame from which to transform - * \param target_time The time at which to transform - * \param timeout How long to block before failing - * \param errstr A pointer to a string which will be filled with why the transform failed, if not NULL - * \return True if the transform is possible, false otherwise - */ - virtual bool - canTransform(const std::string& target_frame, const std::string& source_frame, - const ros::Time& target_time, const ros::Duration timeout, std::string* errstr = NULL) const; - - /** \brief Test if a transform is possible - * \param target_frame The frame into which to transform - * \param target_time The time into which to transform - * \param source_frame The frame from which to transform - * \param source_time The time from which to transform - * \param fixed_frame The frame in which to treat the transform as constant in time - * \param timeout How long to block before failing - * \param errstr A pointer to a string which will be filled with why the transform failed, if not NULL - * \return True if the transform is possible, false otherwise - */ - virtual bool - canTransform(const std::string& target_frame, const ros::Time& target_time, - const std::string& source_frame, const ros::Time& source_time, - const std::string& fixed_frame, const ros::Duration timeout, std::string* errstr = NULL) const; - - - - - private: - bool getFrames(tf2_msgs::FrameGraph::Request& req, tf2_msgs::FrameGraph::Response& res) ; - - - // conditionally error if dedicated_thread unset. - bool checkAndErrorDedicatedThreadPresent(std::string* errstr) const; - - ros::ServiceServer frames_server_; - - - }; // class - -static const std::string threading_error = "Do not call canTransform or lookupTransform with a timeout unless you are using another thread for populating data. Without a dedicated thread it will always timeout. If you have a separate thread servicing tf messages, call setUsingDedicatedThread(true) on your Buffer instance."; - - -} // namespace - -#endif // TF2_ROS_BUFFER_H diff --git a/src/geometry2/tf2_ros/include/tf2_ros/buffer_client.h b/src/geometry2/tf2_ros/include/tf2_ros/buffer_client.h deleted file mode 100644 index a5b0c08..0000000 --- a/src/geometry2/tf2_ros/include/tf2_ros/buffer_client.h +++ /dev/null @@ -1,139 +0,0 @@ -/********************************************************************* -* -* Software License Agreement (BSD License) -* -* Copyright (c) 2009, Willow Garage, Inc. -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions -* are met: -* -* * Redistributions of source code must retain the above copyright -* notice, this list of conditions and the following disclaimer. -* * Redistributions in binary form must reproduce the above -* copyright notice, this list of conditions and the following -* disclaimer in the documentation and/or other materials provided -* with the distribution. -* * Neither the name of Willow Garage, Inc. nor the names of its -* contributors may be used to endorse or promote products derived -* from this software without specific prior written permission. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -* -* Author: Eitan Marder-Eppstein -*********************************************************************/ -#ifndef TF2_ROS_BUFFER_CLIENT_H_ -#define TF2_ROS_BUFFER_CLIENT_H_ - -#include -#include -#include - -namespace tf2_ros -{ - /** \brief Action client-based implementation of the tf2_ros::BufferInterface abstract data type. - * - * BufferClient uses actionlib to coordinate waiting for available transforms. - * - * You can use this class with a tf2_ros::BufferServer and tf2_ros::TransformListener in a separate process. - */ - class BufferClient : public BufferInterface - { - public: - typedef actionlib::SimpleActionClient LookupActionClient; - - /** \brief BufferClient constructor - * \param ns The namespace in which to look for a BufferServer - * \param check_frequency Deprecated, not used anymore - * \param timeout_padding The amount of time to allow passed the desired timeout on the client side for communication lag - */ - BufferClient(std::string ns, double check_frequency = 10.0, ros::Duration timeout_padding = ros::Duration(2.0)); - - /** \brief Get the transform between two frames by frame ID. - * \param target_frame The frame to which data should be transformed - * \param source_frame The frame where the data originated - * \param time The time at which the value of the transform is desired. (0 will get the latest) - * \param timeout How long to block before failing - * \return The transform between the frames - * - * Possible exceptions tf2::LookupException, tf2::ConnectivityException, - * tf2::ExtrapolationException, tf2::InvalidArgumentException - */ - virtual geometry_msgs::TransformStamped - lookupTransform(const std::string& target_frame, const std::string& source_frame, - const ros::Time& time, const ros::Duration timeout = ros::Duration(0.0)) const; - - /** \brief Get the transform between two frames by frame ID assuming fixed frame. - * \param target_frame The frame to which data should be transformed - * \param target_time The time to which the data should be transformed. (0 will get the latest) - * \param source_frame The frame where the data originated - * \param source_time The time at which the source_frame should be evaluated. (0 will get the latest) - * \param fixed_frame The frame in which to assume the transform is constant in time. - * \param timeout How long to block before failing - * \return The transform between the frames - * - * Possible exceptions tf2::LookupException, tf2::ConnectivityException, - * tf2::ExtrapolationException, tf2::InvalidArgumentException - */ - virtual geometry_msgs::TransformStamped - lookupTransform(const std::string& target_frame, const ros::Time& target_time, - const std::string& source_frame, const ros::Time& source_time, - const std::string& fixed_frame, const ros::Duration timeout = ros::Duration(0.0)) const; - - /** \brief Test if a transform is possible - * \param target_frame The frame into which to transform - * \param source_frame The frame from which to transform - * \param time The time at which to transform - * \param timeout How long to block before failing - * \param errstr A pointer to a string which will be filled with why the transform failed, if not NULL - * \return True if the transform is possible, false otherwise - */ - virtual bool - canTransform(const std::string& target_frame, const std::string& source_frame, - const ros::Time& time, const ros::Duration timeout = ros::Duration(0.0), std::string* errstr = NULL) const; - - /** \brief Test if a transform is possible - * \param target_frame The frame into which to transform - * \param target_time The time into which to transform - * \param source_frame The frame from which to transform - * \param source_time The time from which to transform - * \param fixed_frame The frame in which to treat the transform as constant in time - * \param timeout How long to block before failing - * \param errstr A pointer to a string which will be filled with why the transform failed, if not NULL - * \return True if the transform is possible, false otherwise - */ - virtual bool - canTransform(const std::string& target_frame, const ros::Time& target_time, - const std::string& source_frame, const ros::Time& source_time, - const std::string& fixed_frame, const ros::Duration timeout = ros::Duration(0.0), std::string* errstr = NULL) const; - - /** \brief Block until the action server is ready to respond to requests. - * \param timeout Time to wait for the server. - * \return True if the server is ready, false otherwise. - */ - bool waitForServer(const ros::Duration& timeout = ros::Duration(0)) - { - return client_.waitForServer(timeout); - } - - private: - geometry_msgs::TransformStamped processGoal(const tf2_msgs::LookupTransformGoal& goal) const; - geometry_msgs::TransformStamped processResult(const tf2_msgs::LookupTransformResult& result) const; - mutable LookupActionClient client_; - double check_frequency_; - ros::Duration timeout_padding_; - }; -}; -#endif diff --git a/src/geometry2/tf2_ros/include/tf2_ros/buffer_interface.h b/src/geometry2/tf2_ros/include/tf2_ros/buffer_interface.h deleted file mode 100644 index 9215008..0000000 --- a/src/geometry2/tf2_ros/include/tf2_ros/buffer_interface.h +++ /dev/null @@ -1,267 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** \author Wim Meeussen */ - -#ifndef TF2_ROS_BUFFER_INTERFACE_H -#define TF2_ROS_BUFFER_INTERFACE_H - -#include -#include -#include -#include -#include -#include - -namespace tf2_ros -{ - -/** \brief Abstract interface for wrapping tf2::BufferCore in a ROS-based API. - * Implementations include tf2_ros::Buffer and tf2_ros::BufferClient. - */ -class BufferInterface -{ -public: - - /** \brief Get the transform between two frames by frame ID. - * \param target_frame The frame to which data should be transformed - * \param source_frame The frame where the data originated - * \param time The time at which the value of the transform is desired. (0 will get the latest) - * \param timeout How long to block before failing - * \return The transform between the frames - * - * Possible exceptions tf2::LookupException, tf2::ConnectivityException, - * tf2::ExtrapolationException, tf2::InvalidArgumentException - */ - virtual geometry_msgs::TransformStamped - lookupTransform(const std::string& target_frame, const std::string& source_frame, - const ros::Time& time, const ros::Duration timeout) const = 0; - - /** \brief Get the transform between two frames by frame ID assuming fixed frame. - * \param target_frame The frame to which data should be transformed - * \param target_time The time to which the data should be transformed. (0 will get the latest) - * \param source_frame The frame where the data originated - * \param source_time The time at which the source_frame should be evaluated. (0 will get the latest) - * \param fixed_frame The frame in which to assume the transform is constant in time. - * \param timeout How long to block before failing - * \return The transform between the frames - * - * Possible exceptions tf2::LookupException, tf2::ConnectivityException, - * tf2::ExtrapolationException, tf2::InvalidArgumentException - */ - virtual geometry_msgs::TransformStamped - lookupTransform(const std::string& target_frame, const ros::Time& target_time, - const std::string& source_frame, const ros::Time& source_time, - const std::string& fixed_frame, const ros::Duration timeout) const = 0; - - - /** \brief Test if a transform is possible - * \param target_frame The frame into which to transform - * \param source_frame The frame from which to transform - * \param time The time at which to transform - * \param timeout How long to block before failing - * \param errstr A pointer to a string which will be filled with why the transform failed, if not NULL - * \return True if the transform is possible, false otherwise - */ - virtual bool - canTransform(const std::string& target_frame, const std::string& source_frame, - const ros::Time& time, const ros::Duration timeout, std::string* errstr = NULL) const = 0; - - /** \brief Test if a transform is possible - * \param target_frame The frame into which to transform - * \param target_time The time into which to transform - * \param source_frame The frame from which to transform - * \param source_time The time from which to transform - * \param fixed_frame The frame in which to treat the transform as constant in time - * \param timeout How long to block before failing - * \param errstr A pointer to a string which will be filled with why the transform failed, if not NULL - * \return True if the transform is possible, false otherwise - */ - virtual bool - canTransform(const std::string& target_frame, const ros::Time& target_time, - const std::string& source_frame, const ros::Time& source_time, - const std::string& fixed_frame, const ros::Duration timeout, std::string* errstr = NULL) const = 0; - - /** \brief Transform an input into the target frame. - * This function is templated and can take as input any valid mathematical object that tf knows - * how to apply a transform to, by way of the templated math conversions interface. - * For example, the template type could be a Transform, Pose, Vector, or Quaternion message - * type (as defined in geometry_msgs). - * \tparam T The type of the object to transform. - * \param in The object to transform - * \param out The transformed output, preallocated by the caller. - * \param target_frame The string identifer for the frame to transform into. - * \param timeout How long to wait for the target frame. Default value is zero (no blocking). - */ - template - T& transform(const T& in, T& out, - const std::string& target_frame, ros::Duration timeout=ros::Duration(0.0)) const - { - // do the transform - tf2::doTransform(in, out, lookupTransform(target_frame, tf2::getFrameId(in), tf2::getTimestamp(in), timeout)); - return out; - } - - /** \brief Transform an input into the target frame. - * This function is templated and can take as input any valid mathematical object that tf knows - * how to apply a transform to, by way of the templated math conversions interface. - * For example, the template type could be a Transform, Pose, Vector, or Quaternion message - * type (as defined in geometry_msgs). - * \tparam T The type of the object to transform. - * \param in The object to transform. - * \param target_frame The string identifer for the frame to transform into. - * \param timeout How long to wait for the target frame. Default value is zero (no blocking). - * \return The transformed output. - */ - template - T transform(const T& in, - const std::string& target_frame, ros::Duration timeout=ros::Duration(0.0)) const - { - T out; - return transform(in, out, target_frame, timeout); - } - - /** \brief Transform an input into the target frame and convert to a specified output type. - * It is templated on two types: the type of the input object and the type of the - * transformed output. - * For example, the template types could be Transform, Pose, Vector, or Quaternion messages - * type (as defined in geometry_msgs). - * The function will calculate the transformation and then convert the result into the - * specified output type. - * Compilation will fail if a known conversion does not exist bewteen the two template - * parameters. - * \tparam A The type of the object to transform. - * \tparam B The type of the transformed output. - * \param in The object to transform - * \param out The transformed output, converted to the specified type. - * \param target_frame The string identifer for the frame to transform into. - * \param timeout How long to wait for the target frame. Default value is zero (no blocking). - * \return The transformed output, converted to the specified type. - */ - template - B& transform(const A& in, B& out, - const std::string& target_frame, ros::Duration timeout=ros::Duration(0.0)) const - { - A copy = transform(in, target_frame, timeout); - tf2::convert(copy, out); - return out; - } - - /** \brief Transform an input into the target frame (advanced). - * This function is templated and can take as input any valid mathematical object that tf knows - * how to apply a transform to, by way of the templated math conversions interface. - * For example, the template type could be a Transform, Pose, Vector, or Quaternion message - * type (as defined in geometry_msgs). - * This function follows the advanced API, which allows transforming between different time - * points, and specifying a fixed frame that does not varying in time. - * \tparam T The type of the object to transform. - * \param in The object to transform - * \param out The transformed output, preallocated by the caller. - * \param target_frame The string identifer for the frame to transform into. - * \param target_time The time into which to transform - * \param fixed_frame The frame in which to treat the transform as constant in time. - * \param timeout How long to wait for the target frame. Default value is zero (no blocking). - */ - template - T& transform(const T& in, T& out, - const std::string& target_frame, const ros::Time& target_time, - const std::string& fixed_frame, ros::Duration timeout=ros::Duration(0.0)) const - { - // do the transform - tf2::doTransform(in, out, lookupTransform(target_frame, target_time, - tf2::getFrameId(in), tf2::getTimestamp(in), - fixed_frame, timeout)); - return out; - } - - - /** \brief Transform an input into the target frame (advanced). - * This function is templated and can take as input any valid mathematical object that tf knows - * how to apply a transform to, by way of the templated math conversions interface. - * For example, the template type could be a Transform, Pose, Vector, or Quaternion message - * type (as defined in geometry_msgs). - * This function follows the advanced API, which allows transforming between different time - * points, and specifying a fixed frame that does not varying in time. - * \tparam T The type of the object to transform. - * \param in The object to transform - * \param target_frame The string identifer for the frame to transform into. - * \param target_time The time into which to transform - * \param fixed_frame The frame in which to treat the transform as constant in time. - * \param timeout How long to wait for the target frame. Default value is zero (no blocking). - * \return The transformed output. - */ - template - T transform(const T& in, - const std::string& target_frame, const ros::Time& target_time, - const std::string& fixed_frame, ros::Duration timeout=ros::Duration(0.0)) const - { - T out; - return transform(in, out, target_frame, target_time, fixed_frame, timeout); - } - - - /** \brief Transform an input into the target frame and convert to a specified output type (advanced). - * It is templated on two types: the type of the input object and the type of the - * transformed output. - * For example, the template type could be a Transform, Pose, Vector, or Quaternion message - * type (as defined in geometry_msgs). - * The function will calculate the transformation and then convert the result into the - * specified output type. - * Compilation will fail if a known conversion does not exist bewteen the two template - * parameters. - * This function follows the advanced API, which allows transforming between different time - * points, and specifying a fixed frame that does not varying in time. - * \tparam A The type of the object to transform. - * \tparam B The type of the transformed output. - * \param in The object to transform - * \param out The transformed output, converted to the specified output type. - * \param target_frame The string identifer for the frame to transform into. - * \param target_time The time into which to transform - * \param fixed_frame The frame in which to treat the transform as constant in time. - * \param timeout How long to wait for the target frame. Default value is zero (no blocking). - * \return The transformed output, converted to the specified output type. - */ - template - B& transform(const A& in, B& out, - const std::string& target_frame, const ros::Time& target_time, - const std::string& fixed_frame, ros::Duration timeout=ros::Duration(0.0)) const - { - // do the transform - A copy = transform(in, target_frame, target_time, fixed_frame, timeout); - tf2::convert(copy, out); - return out; - } - - - }; // class - - -} // namespace - -#endif // TF2_ROS_BUFFER_INTERFACE_H diff --git a/src/geometry2/tf2_ros/include/tf2_ros/buffer_server.h b/src/geometry2/tf2_ros/include/tf2_ros/buffer_server.h deleted file mode 100644 index 7481466..0000000 --- a/src/geometry2/tf2_ros/include/tf2_ros/buffer_server.h +++ /dev/null @@ -1,92 +0,0 @@ -/********************************************************************* -* -* Software License Agreement (BSD License) -* -* Copyright (c) 2009, Willow Garage, Inc. -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions -* are met: -* -* * Redistributions of source code must retain the above copyright -* notice, this list of conditions and the following disclaimer. -* * Redistributions in binary form must reproduce the above -* copyright notice, this list of conditions and the following -* disclaimer in the documentation and/or other materials provided -* with the distribution. -* * Neither the name of Willow Garage, Inc. nor the names of its -* contributors may be used to endorse or promote products derived -* from this software without specific prior written permission. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -* -* Author: Eitan Marder-Eppstein -*********************************************************************/ -#ifndef TF2_ROS_BUFFER_SERVER_H_ -#define TF2_ROS_BUFFER_SERVER_H_ - -#include -#include -#include -#include - -namespace tf2_ros -{ - /** \brief Action server for the actionlib-based implementation of tf2_ros::BufferInterface. - * - * Use this class with a tf2_ros::TransformListener in the same process. - * You can use this class with a tf2_ros::BufferClient in a different process. - */ - class BufferServer - { - private: - typedef actionlib::ActionServer LookupTransformServer; - typedef LookupTransformServer::GoalHandle GoalHandle; - - struct GoalInfo - { - GoalHandle handle; - ros::Time end_time; - }; - - public: - /** \brief Constructor - * \param buffer The Buffer that this BufferServer will wrap. - * \param ns The namespace in which to look for action clients. - * \param auto_start Pass argument to the constructor of the ActionServer. - * \param check_period How often to check for changes to known transforms (via a timer event). - */ - BufferServer(const Buffer& buffer, const std::string& ns, - bool auto_start = true, ros::Duration check_period = ros::Duration(0.01)); - - /** \brief Start the action server. - */ - void start(); - - private: - void goalCB(GoalHandle gh); - void cancelCB(GoalHandle gh); - void checkTransforms(const ros::TimerEvent& e); - bool canTransform(GoalHandle gh); - geometry_msgs::TransformStamped lookupTransform(GoalHandle gh); - - const Buffer& buffer_; - LookupTransformServer server_; - std::list active_goals_; - boost::mutex mutex_; - ros::Timer check_timer_; - }; -} -#endif diff --git a/src/geometry2/tf2_ros/include/tf2_ros/message_filter.h b/src/geometry2/tf2_ros/include/tf2_ros/message_filter.h deleted file mode 100644 index d741042..0000000 --- a/src/geometry2/tf2_ros/include/tf2_ros/message_filter.h +++ /dev/null @@ -1,716 +0,0 @@ -/* - * Copyright (c) 2010, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** \author Josh Faust */ - -#ifndef TF2_ROS_MESSAGE_FILTER_H -#define TF2_ROS_MESSAGE_FILTER_H - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include -#include - -#define TF2_ROS_MESSAGEFILTER_DEBUG(fmt, ...) \ - ROS_DEBUG_NAMED("message_filter", std::string(std::string("MessageFilter [target=%s]: ") + std::string(fmt)).c_str(), getTargetFramesString().c_str(), __VA_ARGS__) - -#define TF2_ROS_MESSAGEFILTER_WARN(fmt, ...) \ - ROS_WARN_NAMED("message_filter", std::string(std::string("MessageFilter [target=%s]: ") + std::string(fmt)).c_str(), getTargetFramesString().c_str(), __VA_ARGS__) - -namespace tf2_ros -{ - -namespace filter_failure_reasons -{ -enum FilterFailureReason -{ - /// The message buffer overflowed, and this message was pushed off the back of the queue, but the reason it was unable to be transformed is unknown. - Unknown, - /// The timestamp on the message is more than the cache length earlier than the newest data in the transform cache - OutTheBack, - /// The frame_id on the message is empty - EmptyFrameID, -}; -} -typedef filter_failure_reasons::FilterFailureReason FilterFailureReason; - -class MessageFilterBase -{ -public: - typedef std::vector V_string; - - virtual ~MessageFilterBase(){} - virtual void clear() = 0; - virtual void setTargetFrame(const std::string& target_frame) = 0; - virtual void setTargetFrames(const V_string& target_frames) = 0; - virtual void setTolerance(const ros::Duration& tolerance) = 0; -}; - -/** - * \brief Follows the patterns set by the message_filters package to implement a filter which only passes messages through once there is transform data available - * - * The callbacks used in this class are of the same form as those used by roscpp's message callbacks. - * - * MessageFilter is templated on a message type. - * - * \section example_usage Example Usage - * - * If you want to hook a MessageFilter into a ROS topic: -\verbatim -message_filters::Subscriber sub(node_handle_, "topic", 10); -tf::MessageFilter tf_filter(sub, tf_listener_, "/map", 10); -tf_filter.registerCallback(&MyClass::myCallback, this); -\endverbatim - */ -template -class MessageFilter : public MessageFilterBase, public message_filters::SimpleFilter -{ -public: - typedef boost::shared_ptr MConstPtr; - typedef ros::MessageEvent MEvent; - typedef boost::function FailureCallback; - typedef boost::signals2::signal FailureSignal; - - // If you hit this assert your message does not have a header, or does not have the HasHeader trait defined for it - // Actually, we need to check that the message has a header, or that it - // has the FrameId and Stamp traits. However I don't know how to do that - // so simply commenting out for now. - //ROS_STATIC_ASSERT(ros::message_traits::HasHeader::value); - - /** - * \brief Constructor - * - * \param bc The tf2::BufferCore this filter should use - * \param target_frame The frame this filter should attempt to transform to. To use multiple frames, pass an empty string here and use the setTargetFrames() function. - * \param queue_size The number of messages to queue up before throwing away old ones. 0 means infinite (dangerous). - * \param nh The NodeHandle whose callback queue we should add callbacks to - */ - MessageFilter(tf2::BufferCore& bc, const std::string& target_frame, uint32_t queue_size, const ros::NodeHandle& nh) - : bc_(bc) - , queue_size_(queue_size) - , callback_queue_(nh.getCallbackQueue()) - { - init(); - - setTargetFrame(target_frame); - } - - /** - * \brief Constructor - * - * \param f The filter to connect this filter's input to. Often will be a message_filters::Subscriber. - * \param bc The tf2::BufferCore this filter should use - * \param target_frame The frame this filter should attempt to transform to. To use multiple frames, pass an empty string here and use the setTargetFrames() function. - * \param queue_size The number of messages to queue up before throwing away old ones. 0 means infinite (dangerous). - * \param nh The NodeHandle whose callback queue we should add callbacks to - */ - template - MessageFilter(F& f, tf2::BufferCore& bc, const std::string& target_frame, uint32_t queue_size, const ros::NodeHandle& nh) - : bc_(bc) - , queue_size_(queue_size) - , callback_queue_(nh.getCallbackQueue()) - { - init(); - - setTargetFrame(target_frame); - - connectInput(f); - } - - /** - * \brief Constructor - * - * \param bc The tf2::BufferCore this filter should use - * \param target_frame The frame this filter should attempt to transform to. To use multiple frames, pass an empty string here and use the setTargetFrames() function. - * \param queue_size The number of messages to queue up before throwing away old ones. 0 means infinite (dangerous). - * \param cbqueue The callback queue to add callbacks to. If NULL, callbacks will happen from whatever thread either - * a) add() is called, which will generally be when the previous filter in the chain outputs a message, or - * b) tf2::BufferCore::setTransform() is called - */ - MessageFilter(tf2::BufferCore& bc, const std::string& target_frame, uint32_t queue_size, ros::CallbackQueueInterface* cbqueue) - : bc_(bc) - , queue_size_(queue_size) - , callback_queue_(cbqueue) - { - init(); - - setTargetFrame(target_frame); - } - - /** - * \brief Constructor - * - * \param f The filter to connect this filter's input to. Often will be a message_filters::Subscriber. - * \param bc The tf2::BufferCore this filter should use - * \param target_frame The frame this filter should attempt to transform to. To use multiple frames, pass an empty string here and use the setTargetFrames() function. - * \param queue_size The number of messages to queue up before throwing away old ones. 0 means infinite (dangerous). - * \param cbqueue The callback queue to add callbacks to. If NULL, callbacks will happen from whatever thread either - * a) add() is called, which will generally be when the previous filter in the chain outputs a message, or - * b) tf2::BufferCore::setTransform() is called - */ - template - MessageFilter(F& f, tf2::BufferCore& bc, const std::string& target_frame, uint32_t queue_size, ros::CallbackQueueInterface* cbqueue) - : bc_(bc) - , queue_size_(queue_size) - , callback_queue_(cbqueue) - { - init(); - - setTargetFrame(target_frame); - - connectInput(f); - } - - /** - * \brief Connect this filter's input to another filter's output. If this filter is already connected, disconnects first. - */ - template - void connectInput(F& f) - { - message_connection_.disconnect(); - message_connection_ = f.registerCallback(&MessageFilter::incomingMessage, this); - } - - /** - * \brief Destructor - */ - ~MessageFilter() - { - message_connection_.disconnect(); - - MessageFilter::clear(); - - TF2_ROS_MESSAGEFILTER_DEBUG("Successful Transforms: %llu, Discarded due to age: %llu, Transform messages received: %llu, Messages received: %llu, Total dropped: %llu", - (long long unsigned int)successful_transform_count_, - (long long unsigned int)failed_out_the_back_count_, (long long unsigned int)transform_message_count_, - (long long unsigned int)incoming_message_count_, (long long unsigned int)dropped_message_count_); - - } - - /** - * \brief Set the frame you need to be able to transform to before getting a message callback - */ - void setTargetFrame(const std::string& target_frame) - { - V_string frames; - frames.push_back(target_frame); - setTargetFrames(frames); - } - - /** - * \brief Set the frames you need to be able to transform to before getting a message callback - */ - void setTargetFrames(const V_string& target_frames) - { - boost::mutex::scoped_lock frames_lock(target_frames_mutex_); - - target_frames_.resize(target_frames.size()); - std::transform(target_frames.begin(), target_frames.end(), target_frames_.begin(), this->stripSlash); - expected_success_count_ = target_frames_.size() * (time_tolerance_.isZero() ? 1 : 2); - - std::stringstream ss; - for (V_string::iterator it = target_frames_.begin(); it != target_frames_.end(); ++it) - { - ss << *it << " "; - } - target_frames_string_ = ss.str(); - } - /** - * \brief Get the target frames as a string for debugging - */ - std::string getTargetFramesString() - { - boost::mutex::scoped_lock lock(target_frames_mutex_); - return target_frames_string_; - }; - - /** - * \brief Set the required tolerance for the notifier to return true - */ - void setTolerance(const ros::Duration& tolerance) - { - boost::mutex::scoped_lock lock(target_frames_mutex_); - time_tolerance_ = tolerance; - expected_success_count_ = target_frames_.size() * (time_tolerance_.isZero() ? 1 : 2); - } - - /** - * \brief Clear any messages currently in the queue - */ - void clear() - { - boost::unique_lock< boost::shared_mutex > unique_lock(messages_mutex_); - - TF2_ROS_MESSAGEFILTER_DEBUG("%s", "Cleared"); - - bc_.removeTransformableCallback(callback_handle_); - callback_handle_ = bc_.addTransformableCallback(boost::bind(&MessageFilter::transformable, this, _1, _2, _3, _4, _5)); - - messages_.clear(); - message_count_ = 0; - - // remove pending callbacks in callback queue as well - if (callback_queue_) - callback_queue_->removeByID((uint64_t)this); - - warned_about_empty_frame_id_ = false; - } - - void add(const MEvent& evt) - { - if (target_frames_.empty()) - { - return; - } - - namespace mt = ros::message_traits; - const MConstPtr& message = evt.getMessage(); - std::string frame_id = stripSlash(mt::FrameId::value(*message)); - ros::Time stamp = mt::TimeStamp::value(*message); - - if (frame_id.empty()) - { - messageDropped(evt, filter_failure_reasons::EmptyFrameID); - return; - } - - // iterate through the target frames and add requests for each of them - MessageInfo info; - info.handles.reserve(expected_success_count_); - { - V_string target_frames_copy; - // Copy target_frames_ to avoid deadlock from #79 - { - boost::mutex::scoped_lock frames_lock(target_frames_mutex_); - target_frames_copy = target_frames_; - } - - V_string::iterator it = target_frames_copy.begin(); - V_string::iterator end = target_frames_copy.end(); - for (; it != end; ++it) - { - const std::string& target_frame = *it; - tf2::TransformableRequestHandle handle = bc_.addTransformableRequest(callback_handle_, target_frame, frame_id, stamp); - if (handle == 0xffffffffffffffffULL) // never transformable - { - messageDropped(evt, filter_failure_reasons::OutTheBack); - return; - } - else if (handle == 0) - { - ++info.success_count; - } - else - { - info.handles.push_back(handle); - } - - if (!time_tolerance_.isZero()) - { - handle = bc_.addTransformableRequest(callback_handle_, target_frame, frame_id, stamp + time_tolerance_); - if (handle == 0xffffffffffffffffULL) // never transformable - { - messageDropped(evt, filter_failure_reasons::OutTheBack); - return; - } - else if (handle == 0) - { - ++info.success_count; - } - else - { - info.handles.push_back(handle); - } - } - } - } - - - // We can transform already - if (info.success_count == expected_success_count_) - { - messageReady(evt); - } - else - { - boost::unique_lock< boost::shared_mutex > unique_lock(messages_mutex_); - // If this message is about to push us past our queue size, erase the oldest message - if (queue_size_ != 0 && message_count_ + 1 > queue_size_) - { - ++dropped_message_count_; - const MessageInfo& front = messages_.front(); - TF2_ROS_MESSAGEFILTER_DEBUG("Removed oldest message because buffer is full, count now %d (frame_id=%s, stamp=%f)", message_count_, - (mt::FrameId::value(*front.event.getMessage())).c_str(), mt::TimeStamp::value(*front.event.getMessage()).toSec()); - - V_TransformableRequestHandle::const_iterator it = front.handles.begin(); - V_TransformableRequestHandle::const_iterator end = front.handles.end(); - - for (; it != end; ++it) - { - bc_.cancelTransformableRequest(*it); - } - - messageDropped(front.event, filter_failure_reasons::Unknown); - messages_.pop_front(); - --message_count_; - } - - // Add the message to our list - info.event = evt; - messages_.push_back(info); - ++message_count_; - } - - TF2_ROS_MESSAGEFILTER_DEBUG("Added message in frame %s at time %.3f, count now %d", frame_id.c_str(), stamp.toSec(), message_count_); - - ++incoming_message_count_; - } - - /** - * \brief Manually add a message into this filter. - * \note If the message (or any other messages in the queue) are immediately transformable this will immediately call through to the output callback, possibly - * multiple times - */ - void add(const MConstPtr& message) - { - boost::shared_ptr > header(new std::map); - (*header)["callerid"] = "unknown"; - ros::WallTime n = ros::WallTime::now(); - ros::Time t(n.sec, n.nsec); - add(MEvent(message, header, t)); - } - - /** - * \brief Register a callback to be called when a message is about to be dropped - * \param callback The callback to call - */ - message_filters::Connection registerFailureCallback(const FailureCallback& callback) - { - boost::mutex::scoped_lock lock(failure_signal_mutex_); - return message_filters::Connection(boost::bind(&MessageFilter::disconnectFailure, this, _1), failure_signal_.connect(callback)); - } - - virtual void setQueueSize( uint32_t new_queue_size ) - { - queue_size_ = new_queue_size; - } - - virtual uint32_t getQueueSize() - { - return queue_size_; - } - - -private: - - void init() - { - message_count_ = 0; - successful_transform_count_ = 0; - failed_out_the_back_count_ = 0; - transform_message_count_ = 0; - incoming_message_count_ = 0; - dropped_message_count_ = 0; - time_tolerance_ = ros::Duration(0.0); - warned_about_empty_frame_id_ = false; - expected_success_count_ = 1; - - callback_handle_ = bc_.addTransformableCallback(boost::bind(&MessageFilter::transformable, this, _1, _2, _3, _4, _5)); - } - - void transformable(tf2::TransformableRequestHandle request_handle, const std::string& /* target_frame */, const std::string& /* source_frame */, - ros::Time /* time */, tf2::TransformableResult result) - { - namespace mt = ros::message_traits; - - boost::upgrade_lock< boost::shared_mutex > lock(messages_mutex_); - - // find the message this request is associated with - typename L_MessageInfo::iterator msg_it = messages_.begin(); - typename L_MessageInfo::iterator msg_end = messages_.end(); - for (; msg_it != msg_end; ++msg_it) - { - MessageInfo& info = *msg_it; - V_TransformableRequestHandle::const_iterator handle_it = std::find(info.handles.begin(), info.handles.end(), request_handle); - if (handle_it != info.handles.end()) - { - // found msg_it - ++info.success_count; - break; - } - } - - if (msg_it == msg_end) - { - return; - } - - const MessageInfo& info = *msg_it; - if (info.success_count < expected_success_count_) - { - return; - } - - bool can_transform = true; - const MConstPtr& message = info.event.getMessage(); - std::string frame_id = stripSlash(mt::FrameId::value(*message)); - ros::Time stamp = mt::TimeStamp::value(*message); - - if (result == tf2::TransformAvailable) - { - boost::mutex::scoped_lock frames_lock(target_frames_mutex_); - // make sure we can still perform all the necessary transforms - typename V_string::iterator it = target_frames_.begin(); - typename V_string::iterator end = target_frames_.end(); - for (; it != end; ++it) - { - const std::string& target = *it; - if (!bc_.canTransform(target, frame_id, stamp)) - { - can_transform = false; - break; - } - - if (!time_tolerance_.isZero()) - { - if (!bc_.canTransform(target, frame_id, stamp + time_tolerance_)) - { - can_transform = false; - break; - } - } - } - } - else - { - can_transform = false; - } - - // We will be mutating messages now, require unique lock - boost::upgrade_to_unique_lock< boost::shared_mutex > uniqueLock(lock); - if (can_transform) - { - TF2_ROS_MESSAGEFILTER_DEBUG("Message ready in frame %s at time %.3f, count now %d", frame_id.c_str(), stamp.toSec(), message_count_ - 1); - - ++successful_transform_count_; - - messageReady(info.event); - - } - else - { - ++dropped_message_count_; - - TF2_ROS_MESSAGEFILTER_DEBUG("Discarding message in frame %s at time %.3f, count now %d", frame_id.c_str(), stamp.toSec(), message_count_ - 1); - messageDropped(info.event, filter_failure_reasons::Unknown); - } - - messages_.erase(msg_it); - --message_count_; - } - - /** - * \brief Callback that happens when we receive a message on the message topic - */ - void incomingMessage(const ros::MessageEvent& evt) - { - add(evt); - } - - void checkFailures() - { - if (next_failure_warning_.isZero()) - { - next_failure_warning_ = ros::WallTime::now() + ros::WallDuration(15); - } - - if (ros::WallTime::now() >= next_failure_warning_) - { - if (incoming_message_count_ - message_count_ == 0) - { - return; - } - - double dropped_pct = (double)dropped_message_count_ / (double)(incoming_message_count_ - message_count_); - if (dropped_pct > 0.95) - { - TF2_ROS_MESSAGEFILTER_WARN("Dropped %.2f%% of messages so far. Please turn the [%s.message_notifier] rosconsole logger to DEBUG for more information.", dropped_pct*100, ROSCONSOLE_DEFAULT_NAME); - next_failure_warning_ = ros::WallTime::now() + ros::WallDuration(60); - - if ((double)failed_out_the_back_count_ / (double)dropped_message_count_ > 0.5) - { - TF2_ROS_MESSAGEFILTER_WARN(" The majority of dropped messages were due to messages growing older than the TF cache time. The last message's timestamp was: %f, and the last frame_id was: %s", last_out_the_back_stamp_.toSec(), last_out_the_back_frame_.c_str()); - } - } - } - } - - struct CBQueueCallback : public ros::CallbackInterface - { - CBQueueCallback(MessageFilter* filter, const MEvent& event, bool success, FilterFailureReason reason) - : event_(event) - , filter_(filter) - , reason_(reason) - , success_(success) - {} - - - virtual CallResult call() - { - if (success_) - { - filter_->signalMessage(event_); - } - else - { - filter_->signalFailure(event_, reason_); - } - - return Success; - } - - private: - MEvent event_; - MessageFilter* filter_; - FilterFailureReason reason_; - bool success_; - }; - - void messageDropped(const MEvent& evt, FilterFailureReason reason) - { - if (callback_queue_) - { - ros::CallbackInterfacePtr cb(new CBQueueCallback(this, evt, false, reason)); - callback_queue_->addCallback(cb, (uint64_t)this); - } - else - { - signalFailure(evt, reason); - } - } - - void messageReady(const MEvent& evt) - { - if (callback_queue_) - { - ros::CallbackInterfacePtr cb(new CBQueueCallback(this, evt, true, filter_failure_reasons::Unknown)); - callback_queue_->addCallback(cb, (uint64_t)this); - } - else - { - this->signalMessage(evt); - } - } - - void disconnectFailure(const message_filters::Connection& c) - { - boost::mutex::scoped_lock lock(failure_signal_mutex_); - c.getBoostConnection().disconnect(); - } - - void signalFailure(const MEvent& evt, FilterFailureReason reason) - { - boost::mutex::scoped_lock lock(failure_signal_mutex_); - failure_signal_(evt.getMessage(), reason); - } - - static - std::string stripSlash(const std::string& in) - { - if ( !in.empty() && (in[0] == '/')) - { - std::string out = in; - out.erase(0, 1); - return out; - } - return in; - } - - tf2::BufferCore& bc_; ///< The Transformer used to determine if transformation data is available - V_string target_frames_; ///< The frames we need to be able to transform to before a message is ready - std::string target_frames_string_; - boost::mutex target_frames_mutex_; ///< A mutex to protect access to the target_frames_ list and target_frames_string. - uint32_t queue_size_; ///< The maximum number of messages we queue up - tf2::TransformableCallbackHandle callback_handle_; - - typedef std::vector V_TransformableRequestHandle; - struct MessageInfo - { - MessageInfo() - : success_count(0) - {} - - MEvent event; - V_TransformableRequestHandle handles; - uint32_t success_count; - }; - typedef std::list L_MessageInfo; - L_MessageInfo messages_; - uint32_t message_count_; ///< The number of messages in the list. Used because \.size() may have linear cost - boost::shared_mutex messages_mutex_; ///< The mutex used for locking message list operations - uint32_t expected_success_count_; - - bool warned_about_empty_frame_id_; - - uint64_t successful_transform_count_; - uint64_t failed_out_the_back_count_; - uint64_t transform_message_count_; - uint64_t incoming_message_count_; - uint64_t dropped_message_count_; - - ros::Time last_out_the_back_stamp_; - std::string last_out_the_back_frame_; - - ros::WallTime next_failure_warning_; - - ros::Duration time_tolerance_; ///< Provide additional tolerance on time for messages which are stamped but can have associated duration - - message_filters::Connection message_connection_; - - FailureSignal failure_signal_; - boost::mutex failure_signal_mutex_; - - ros::CallbackQueueInterface* callback_queue_; -}; - -} // namespace tf2 - -#endif diff --git a/src/geometry2/tf2_ros/include/tf2_ros/static_transform_broadcaster.h b/src/geometry2/tf2_ros/include/tf2_ros/static_transform_broadcaster.h deleted file mode 100644 index 823062e..0000000 --- a/src/geometry2/tf2_ros/include/tf2_ros/static_transform_broadcaster.h +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - - -/** \author Tully Foote */ - -#ifndef TF2_ROS_STATICTRANSFORMBROADCASTER_H -#define TF2_ROS_STATICTRANSFORMBROADCASTER_H - - - -#include "ros/ros.h" -#include "geometry_msgs/TransformStamped.h" -#include "tf2_msgs/TFMessage.h" - -namespace tf2_ros -{ - - -/** \brief This class provides an easy way to publish coordinate frame transform information. - * It will handle all the messaging and stuffing of messages. And the function prototypes lay out all the - * necessary data needed for each message. */ - -class StaticTransformBroadcaster{ -public: - /** \brief Constructor (needs a ros::Node reference) */ - StaticTransformBroadcaster(); - - /** \brief Send a TransformStamped message - * The stamped data structure includes frame_id, and time, and parent_id already. */ - void sendTransform(const geometry_msgs::TransformStamped & transform); - - /** \brief Send a vector of TransformStamped messages - * The stamped data structure includes frame_id, and time, and parent_id already. */ - void sendTransform(const std::vector & transforms); - -private: - /// Internal reference to ros::Node - ros::NodeHandle node_; - ros::Publisher publisher_; - tf2_msgs::TFMessage net_message_; - -}; - -} - -#endif //TF_STATICTRANSFORMBROADCASTER_H diff --git a/src/geometry2/tf2_ros/include/tf2_ros/transform_broadcaster.h b/src/geometry2/tf2_ros/include/tf2_ros/transform_broadcaster.h deleted file mode 100644 index 4f5f35c..0000000 --- a/src/geometry2/tf2_ros/include/tf2_ros/transform_broadcaster.h +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - - -/** \author Tully Foote */ - -#ifndef TF2_ROS_TRANSFORMBROADCASTER_H -#define TF2_ROS_TRANSFORMBROADCASTER_H - - - -#include "ros/ros.h" -#include "geometry_msgs/TransformStamped.h" -namespace tf2_ros -{ - - -/** \brief This class provides an easy way to publish coordinate frame transform information. - * It will handle all the messaging and stuffing of messages. And the function prototypes lay out all the - * necessary data needed for each message. */ - -class TransformBroadcaster{ -public: - /** \brief Constructor (needs a ros::Node reference) */ - TransformBroadcaster(); - - /** \brief Send a StampedTransform - * The stamped data structure includes frame_id, and time, and parent_id already. */ - // void sendTransform(const StampedTransform & transform); - - /** \brief Send a vector of StampedTransforms - * The stamped data structure includes frame_id, and time, and parent_id already. */ - //void sendTransform(const std::vector & transforms); - - /** \brief Send a TransformStamped message - * The stamped data structure includes frame_id, and time, and parent_id already. */ - void sendTransform(const geometry_msgs::TransformStamped & transform); - - /** \brief Send a vector of TransformStamped messages - * The stamped data structure includes frame_id, and time, and parent_id already. */ - void sendTransform(const std::vector & transforms); - -private: - /// Internal reference to ros::Node - ros::NodeHandle node_; - ros::Publisher publisher_; - -}; - -} - -#endif //TF_TRANSFORMBROADCASTER_H diff --git a/src/geometry2/tf2_ros/include/tf2_ros/transform_listener.h b/src/geometry2/tf2_ros/include/tf2_ros/transform_listener.h deleted file mode 100644 index d96864f..0000000 --- a/src/geometry2/tf2_ros/include/tf2_ros/transform_listener.h +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** \author Tully Foote */ - -#ifndef TF2_ROS_TRANSFORMLISTENER_H -#define TF2_ROS_TRANSFORMLISTENER_H - -#include "std_msgs/Empty.h" -#include "tf2_msgs/TFMessage.h" -#include "ros/ros.h" -#include "ros/callback_queue.h" - -#include "tf2_ros/buffer.h" - -#include "boost/thread.hpp" - -namespace tf2_ros{ - -/** \brief This class provides an easy way to request and receive coordinate frame transform information. - */ -class TransformListener -{ - -public: - /**@brief Constructor for transform listener */ - TransformListener(tf2::BufferCore& buffer, bool spin_thread = true); - TransformListener(tf2::BufferCore& buffer, const ros::NodeHandle& nh, bool spin_thread = true); - - ~TransformListener(); - -private: - - /// Initialize this transform listener, subscribing, advertising services, etc. - void init(); - void initWithThread(); - - /// Callback function for ros message subscription - void subscription_callback(const ros::MessageEvent& msg_evt); - void static_subscription_callback(const ros::MessageEvent& msg_evt); - void subscription_callback_impl(const ros::MessageEvent& msg_evt, bool is_static); - - ros::CallbackQueue tf_message_callback_queue_; - boost::thread* dedicated_listener_thread_; - ros::NodeHandle node_; - ros::Subscriber message_subscriber_tf_; - ros::Subscriber message_subscriber_tf_static_; - tf2::BufferCore& buffer_; - bool using_dedicated_thread_; - ros::Time last_update_; - - void dedicatedListenerThread() - { - while (using_dedicated_thread_) - { - tf_message_callback_queue_.callAvailable(ros::WallDuration(0.01)); - } - }; - -}; -} - -#endif //TF_TRANSFORMLISTENER_H diff --git a/src/geometry2/tf2_ros/package.xml b/src/geometry2/tf2_ros/package.xml deleted file mode 100644 index 8133349..0000000 --- a/src/geometry2/tf2_ros/package.xml +++ /dev/null @@ -1,43 +0,0 @@ - - tf2_ros - 0.6.7 - - This package contains the ROS bindings for the tf2 library, for both Python and C++. - - Eitan Marder-Eppstein - Wim Meeussen - Tully Foote - BSD - - http://www.ros.org/wiki/tf2_ros - - catkin - - actionlib - actionlib_msgs - geometry_msgs - message_filters - roscpp - rosgraph - rospy - std_msgs - tf2 - tf2_msgs - tf2_py - xmlrpcpp - - actionlib - actionlib_msgs - geometry_msgs - message_filters - roscpp - rosgraph - rospy - std_msgs - tf2 - tf2_msgs - tf2_py - xmlrpcpp - - rostest - diff --git a/src/geometry2/tf2_ros/rosdoc.yaml b/src/geometry2/tf2_ros/rosdoc.yaml deleted file mode 100644 index 0efc7fd..0000000 --- a/src/geometry2/tf2_ros/rosdoc.yaml +++ /dev/null @@ -1,8 +0,0 @@ - - builder: doxygen - name: C++ API - output_dir: c++ - file_patterns: '*.c *.cpp *.h *.cc *.hh *.dox' - - builder: sphinx - name: Python API - output_dir: python - sphinx_root_dir: doc diff --git a/src/geometry2/tf2_ros/setup.py b/src/geometry2/tf2_ros/setup.py deleted file mode 100644 index 4473aa4..0000000 --- a/src/geometry2/tf2_ros/setup.py +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env python - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -d = generate_distutils_setup( - packages=['tf2_ros'], - package_dir={'': 'src'}, - requires=['rospy', 'actionlib', 'actionlib_msgs', 'tf2_msgs', - 'tf2_py', 'geometry_msgs'] -) - -setup(**d) diff --git a/src/geometry2/tf2_ros/src/buffer.cpp b/src/geometry2/tf2_ros/src/buffer.cpp deleted file mode 100644 index 8030301..0000000 --- a/src/geometry2/tf2_ros/src/buffer.cpp +++ /dev/null @@ -1,201 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** \author Wim Meeussen */ - - -#include "tf2_ros/buffer.h" - -#include -#include - -namespace tf2_ros -{ - -static const double CAN_TRANSFORM_POLLING_SCALE = 0.01; - -Buffer::Buffer(ros::Duration cache_time, bool debug) : - BufferCore(cache_time) -{ - if(debug && !ros::service::exists("~tf2_frames", false)) - { - ros::NodeHandle n("~"); - frames_server_ = n.advertiseService("tf2_frames", &Buffer::getFrames, this); - } -} - -geometry_msgs::TransformStamped -Buffer::lookupTransform(const std::string& target_frame, const std::string& source_frame, - const ros::Time& time, const ros::Duration timeout) const -{ - canTransform(target_frame, source_frame, time, timeout); - return lookupTransform(target_frame, source_frame, time); -} - - -geometry_msgs::TransformStamped -Buffer::lookupTransform(const std::string& target_frame, const ros::Time& target_time, - const std::string& source_frame, const ros::Time& source_time, - const std::string& fixed_frame, const ros::Duration timeout) const -{ - canTransform(target_frame, target_time, source_frame, source_time, fixed_frame, timeout); - return lookupTransform(target_frame, target_time, source_frame, source_time, fixed_frame); -} - -/** This is a workaround for the case that we're running inside of - rospy and ros::Time is not initialized inside the c++ instance. - This makes the system fall back to Wall time if not initialized. -*/ -ros::Time now_fallback_to_wall() -{ - try - { - return ros::Time::now(); - } - catch (ros::TimeNotInitializedException ex) - { - ros::WallTime wt = ros::WallTime::now(); - return ros::Time(wt.sec, wt.nsec); - } -} - -/** This is a workaround for the case that we're running inside of - rospy and ros::Time is not initialized inside the c++ instance. - This makes the system fall back to Wall time if not initialized. - https://github.com/ros/geometry/issues/30 -*/ -void sleep_fallback_to_wall(const ros::Duration& d) -{ - try - { - d.sleep(); - } - catch (ros::TimeNotInitializedException ex) - { - ros::WallDuration wd = ros::WallDuration(d.sec, d.nsec); - wd.sleep(); - } -} - -void conditionally_append_timeout_info(std::string * errstr, const ros::Time& start_time, - const ros::Duration& timeout) -{ - if (errstr) - { - std::stringstream ss; - ss << ". canTransform returned after "<< (now_fallback_to_wall() - start_time).toSec() \ - <<" timeout was " << timeout.toSec() << "."; - (*errstr) += ss.str(); - } -} - -bool -Buffer::canTransform(const std::string& target_frame, const std::string& source_frame, - const ros::Time& time, const ros::Duration timeout, std::string* errstr) const -{ - // Clear the errstr before populating it if it's valid. - if (errstr) - { - errstr->clear(); - } - - if (!checkAndErrorDedicatedThreadPresent(errstr)) - return false; - - // poll for transform if timeout is set - ros::Time start_time = now_fallback_to_wall(); - const ros::Duration sleep_duration = timeout * CAN_TRANSFORM_POLLING_SCALE; - while (now_fallback_to_wall() < start_time + timeout && - !canTransform(target_frame, source_frame, time) && - (now_fallback_to_wall()+ros::Duration(3.0) >= start_time) && //don't wait when we detect a bag loop - (ros::ok() || !ros::isInitialized())) // Make sure we haven't been stopped (won't work for pytf) - { - sleep_fallback_to_wall(sleep_duration); - } - bool retval = canTransform(target_frame, source_frame, time, errstr); - conditionally_append_timeout_info(errstr, start_time, timeout); - return retval; -} - - -bool -Buffer::canTransform(const std::string& target_frame, const ros::Time& target_time, - const std::string& source_frame, const ros::Time& source_time, - const std::string& fixed_frame, const ros::Duration timeout, std::string* errstr) const -{ - // Clear the errstr before populating it if it's valid. - if (errstr) - { - errstr->clear(); - } - - if (!checkAndErrorDedicatedThreadPresent(errstr)) - return false; - - // poll for transform if timeout is set - ros::Time start_time = now_fallback_to_wall(); - const ros::Duration sleep_duration = timeout * CAN_TRANSFORM_POLLING_SCALE; - while (now_fallback_to_wall() < start_time + timeout && - !canTransform(target_frame, target_time, source_frame, source_time, fixed_frame) && - (now_fallback_to_wall()+ros::Duration(3.0) >= start_time) && //don't wait when we detect a bag loop - (ros::ok() || !ros::isInitialized())) // Make sure we haven't been stopped (won't work for pytf) - { - sleep_fallback_to_wall(sleep_duration); - } - bool retval = canTransform(target_frame, target_time, source_frame, source_time, fixed_frame, errstr); - conditionally_append_timeout_info(errstr, start_time, timeout); - return retval; -} - - -bool Buffer::getFrames(tf2_msgs::FrameGraph::Request& req, tf2_msgs::FrameGraph::Response& res) -{ - res.frame_yaml = allFramesAsYAML(); - return true; -} - - - -bool Buffer::checkAndErrorDedicatedThreadPresent(std::string* error_str) const -{ - if (isUsingDedicatedThread()) - return true; - - - - if (error_str) - *error_str = tf2_ros::threading_error; - - ROS_ERROR("%s", tf2_ros::threading_error.c_str()); - return false; -} - - - -} diff --git a/src/geometry2/tf2_ros/src/buffer_client.cpp b/src/geometry2/tf2_ros/src/buffer_client.cpp deleted file mode 100644 index 98c56dd..0000000 --- a/src/geometry2/tf2_ros/src/buffer_client.cpp +++ /dev/null @@ -1,162 +0,0 @@ -/********************************************************************* -* -* Software License Agreement (BSD License) -* -* Copyright (c) 2009, Willow Garage, Inc. -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions -* are met: -* -* * Redistributions of source code must retain the above copyright -* notice, this list of conditions and the following disclaimer. -* * Redistributions in binary form must reproduce the above -* copyright notice, this list of conditions and the following -* disclaimer in the documentation and/or other materials provided -* with the distribution. -* * Neither the name of Willow Garage, Inc. nor the names of its -* contributors may be used to endorse or promote products derived -* from this software without specific prior written permission. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -* -* Author: Eitan Marder-Eppstein -*********************************************************************/ -#include - -namespace tf2_ros -{ - BufferClient::BufferClient(std::string ns, double check_frequency, ros::Duration timeout_padding): - client_(ns), - check_frequency_(check_frequency), - timeout_padding_(timeout_padding) - { - } - - geometry_msgs::TransformStamped BufferClient::lookupTransform(const std::string& target_frame, const std::string& source_frame, - const ros::Time& time, const ros::Duration timeout) const - { - //populate the goal message - tf2_msgs::LookupTransformGoal goal; - goal.target_frame = target_frame; - goal.source_frame = source_frame; - goal.source_time = time; - goal.timeout = timeout; - goal.advanced = false; - - return processGoal(goal); - } - - geometry_msgs::TransformStamped BufferClient::lookupTransform(const std::string& target_frame, const ros::Time& target_time, - const std::string& source_frame, const ros::Time& source_time, - const std::string& fixed_frame, const ros::Duration timeout) const - { - //populate the goal message - tf2_msgs::LookupTransformGoal goal; - goal.target_frame = target_frame; - goal.source_frame = source_frame; - goal.source_time = source_time; - goal.timeout = timeout; - goal.target_time = target_time; - goal.fixed_frame = fixed_frame; - goal.advanced = true; - - return processGoal(goal); - } - - geometry_msgs::TransformStamped BufferClient::processGoal(const tf2_msgs::LookupTransformGoal& goal) const - { - client_.sendGoal(goal); - - //this shouldn't happen, but could in rare cases where the server hangs - if(!client_.waitForResult(goal.timeout + timeout_padding_)) - { - //make sure to cancel the goal the server is pursuing - client_.cancelGoal(); - throw tf2::TimeoutException("The LookupTransform goal sent to the BufferServer did not come back in the specified time. Something is likely wrong with the server."); - } - - if(client_.getState() != actionlib::SimpleClientGoalState::SUCCEEDED) - throw tf2::TimeoutException("The LookupTransform goal sent to the BufferServer did not come back with SUCCEEDED status. Something is likely wrong with the server."); - - //process the result for errors and return it - return processResult(*client_.getResult()); - } - - geometry_msgs::TransformStamped BufferClient::processResult(const tf2_msgs::LookupTransformResult& result) const - { - //if there's no error, then we'll just return the transform - if(result.error.error != result.error.NO_ERROR){ - //otherwise, we'll have to throw the appropriate exception - if(result.error.error == result.error.LOOKUP_ERROR) - throw tf2::LookupException(result.error.error_string); - - if(result.error.error == result.error.CONNECTIVITY_ERROR) - throw tf2::ConnectivityException(result.error.error_string); - - if(result.error.error == result.error.EXTRAPOLATION_ERROR) - throw tf2::ExtrapolationException(result.error.error_string); - - if(result.error.error == result.error.INVALID_ARGUMENT_ERROR) - throw tf2::InvalidArgumentException(result.error.error_string); - - if(result.error.error == result.error.TIMEOUT_ERROR) - throw tf2::TimeoutException(result.error.error_string); - - throw tf2::TransformException(result.error.error_string); - } - - return result.transform; - } - - bool BufferClient::canTransform(const std::string& target_frame, const std::string& source_frame, - const ros::Time& time, const ros::Duration timeout, std::string* errstr) const - { - try - { - lookupTransform(target_frame, source_frame, time, timeout); - return true; - } - catch(tf2::TransformException& ex) - { - if(errstr) - { - errstr->clear(); - *errstr = ex.what(); - } - return false; - } - } - - bool BufferClient::canTransform(const std::string& target_frame, const ros::Time& target_time, - const std::string& source_frame, const ros::Time& source_time, - const std::string& fixed_frame, const ros::Duration timeout, std::string* errstr) const - { - try - { - lookupTransform(target_frame, target_time, source_frame, source_time, fixed_frame, timeout); - return true; - } - catch(tf2::TransformException& ex) - { - if(errstr) - { - errstr->clear(); - *errstr = ex.what(); - } - return false; - } - } -}; diff --git a/src/geometry2/tf2_ros/src/buffer_server.cpp b/src/geometry2/tf2_ros/src/buffer_server.cpp deleted file mode 100644 index ebfdd2f..0000000 --- a/src/geometry2/tf2_ros/src/buffer_server.cpp +++ /dev/null @@ -1,222 +0,0 @@ -/********************************************************************* -* -* Software License Agreement (BSD License) -* -* Copyright (c) 2009, Willow Garage, Inc. -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions -* are met: -* -* * Redistributions of source code must retain the above copyright -* notice, this list of conditions and the following disclaimer. -* * Redistributions in binary form must reproduce the above -* copyright notice, this list of conditions and the following -* disclaimer in the documentation and/or other materials provided -* with the distribution. -* * Neither the name of Willow Garage, Inc. nor the names of its -* contributors may be used to endorse or promote products derived -* from this software without specific prior written permission. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -* -* Author: Eitan Marder-Eppstein -*********************************************************************/ -#include - -namespace tf2_ros -{ - BufferServer::BufferServer(const Buffer& buffer, const std::string& ns, bool auto_start, ros::Duration check_period): - buffer_(buffer), - server_(ros::NodeHandle(), - ns, - boost::bind(&BufferServer::goalCB, this, _1), - boost::bind(&BufferServer::cancelCB, this, _1), - auto_start) - { - ros::NodeHandle n; - check_timer_ = n.createTimer(check_period, boost::bind(&BufferServer::checkTransforms, this, _1)); - } - - void BufferServer::checkTransforms(const ros::TimerEvent& e) - { - (void) e; //Unused - boost::mutex::scoped_lock l(mutex_); - for(std::list::iterator it = active_goals_.begin(); it != active_goals_.end();) - { - GoalInfo& info = *it; - - //we want to lookup a transform if the time on the goal - //has expired, or a transform is available - if(canTransform(info.handle) || info.end_time < ros::Time::now()) - { - tf2_msgs::LookupTransformResult result; - - //try to populate the result, catching exceptions if they occur - try - { - result.transform = lookupTransform(info.handle); - } - catch (tf2::ConnectivityException &ex) - { - result.error.error = result.error.CONNECTIVITY_ERROR; - result.error.error_string = ex.what(); - } - catch (tf2::LookupException &ex) - { - result.error.error = result.error.LOOKUP_ERROR; - result.error.error_string = ex.what(); - } - catch (tf2::ExtrapolationException &ex) - { - result.error.error = result.error.EXTRAPOLATION_ERROR; - result.error.error_string = ex.what(); - } - catch (tf2::InvalidArgumentException &ex) - { - result.error.error = result.error.INVALID_ARGUMENT_ERROR; - result.error.error_string = ex.what(); - } - catch (tf2::TimeoutException &ex) - { - result.error.error = result.error.TIMEOUT_ERROR; - result.error.error_string = ex.what(); - } - catch (tf2::TransformException &ex) - { - result.error.error = result.error.TRANSFORM_ERROR; - result.error.error_string = ex.what(); - } - - //make sure to pass the result to the client - //even failed transforms are considered a success - //since the request was successfully processed - info.handle.setSucceeded(result); - it = active_goals_.erase(it); - } - else - ++it; - } - } - - void BufferServer::cancelCB(GoalHandle gh) - { - boost::mutex::scoped_lock l(mutex_); - //we need to find the goal in the list and remove it... also setting it as canceled - //if its not in the list, we won't do anything since it will have already been set - //as completed - for(std::list::iterator it = active_goals_.begin(); it != active_goals_.end();) - { - GoalInfo& info = *it; - if(info.handle == gh) - { - info.handle.setCanceled(); - it = active_goals_.erase(it); - return; - } - else - ++it; - } - } - - void BufferServer::goalCB(GoalHandle gh) - { - //we'll accept all goals we get - gh.setAccepted(); - - //if the transform isn't immediately available, we'll push it onto our list to check - //along with the time that the goal will end - GoalInfo goal_info; - goal_info.handle = gh; - goal_info.end_time = ros::Time::now() + gh.getGoal()->timeout; - - //we can do a quick check here to see if the transform is valid - //we'll also do this if the end time has been reached - if(canTransform(gh) || goal_info.end_time <= ros::Time::now()) - { - tf2_msgs::LookupTransformResult result; - try - { - result.transform = lookupTransform(gh); - } - catch (tf2::ConnectivityException &ex) - { - result.error.error = result.error.CONNECTIVITY_ERROR; - result.error.error_string = ex.what(); - } - catch (tf2::LookupException &ex) - { - result.error.error = result.error.LOOKUP_ERROR; - result.error.error_string = ex.what(); - } - catch (tf2::ExtrapolationException &ex) - { - result.error.error = result.error.EXTRAPOLATION_ERROR; - result.error.error_string = ex.what(); - } - catch (tf2::InvalidArgumentException &ex) - { - result.error.error = result.error.INVALID_ARGUMENT_ERROR; - result.error.error_string = ex.what(); - } - catch (tf2::TimeoutException &ex) - { - result.error.error = result.error.TIMEOUT_ERROR; - result.error.error_string = ex.what(); - } - catch (tf2::TransformException &ex) - { - result.error.error = result.error.TRANSFORM_ERROR; - result.error.error_string = ex.what(); - } - - gh.setSucceeded(result); - return; - } - - boost::mutex::scoped_lock l(mutex_); - active_goals_.push_back(goal_info); - } - - bool BufferServer::canTransform(GoalHandle gh) - { - const tf2_msgs::LookupTransformGoal::ConstPtr& goal = gh.getGoal(); - - //check whether we need to used the advanced or simple api - if(!goal->advanced) - return buffer_.canTransform(goal->target_frame, goal->source_frame, goal->source_time); - - return buffer_.canTransform(goal->target_frame, goal->target_time, - goal->source_frame, goal->source_time, goal->fixed_frame); - } - - geometry_msgs::TransformStamped BufferServer::lookupTransform(GoalHandle gh) - { - const tf2_msgs::LookupTransformGoal::ConstPtr& goal = gh.getGoal(); - - //check whether we need to used the advanced or simple api - if(!goal->advanced) - return buffer_.lookupTransform(goal->target_frame, goal->source_frame, goal->source_time); - - return buffer_.lookupTransform(goal->target_frame, goal->target_time, - goal->source_frame, goal->source_time, goal->fixed_frame); - } - - void BufferServer::start() - { - server_.start(); - } - -}; diff --git a/src/geometry2/tf2_ros/src/buffer_server_main.cpp b/src/geometry2/tf2_ros/src/buffer_server_main.cpp deleted file mode 100644 index 85a79c2..0000000 --- a/src/geometry2/tf2_ros/src/buffer_server_main.cpp +++ /dev/null @@ -1,71 +0,0 @@ -/********************************************************************* -* -* Software License Agreement (BSD License) -* -* Copyright (c) 2009, Willow Garage, Inc. -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions -* are met: -* -* * Redistributions of source code must retain the above copyright -* notice, this list of conditions and the following disclaimer. -* * Redistributions in binary form must reproduce the above -* copyright notice, this list of conditions and the following -* disclaimer in the documentation and/or other materials provided -* with the distribution. -* * Neither the name of Willow Garage, Inc. nor the names of its -* contributors may be used to endorse or promote products derived -* from this software without specific prior written permission. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -* -* Author: Wim Meeussen -*********************************************************************/ -#include -#include -#include - -int main(int argc, char** argv) -{ - ros::init(argc, argv, "tf_buffer"); - ros::NodeHandle nh; - - double buffer_size; - nh.param("buffer_size", buffer_size, 120.0); - - bool publish_frame_service; - nh.param("publish_frame_service", publish_frame_service, false); - - // Legacy behavior re: #209 - bool use_node_namespace; - nh.param("use_node_namespace", use_node_namespace, false); - std::string node_name; - if (use_node_namespace) - { - node_name = ros::this_node::getName(); - } - else - { - node_name = "tf2_buffer_server"; - } - - tf2_ros::Buffer buffer_core(ros::Duration(buffer_size), publish_frame_service); - tf2_ros::TransformListener listener(buffer_core); - tf2_ros::BufferServer buffer_server(buffer_core, node_name , false); - buffer_server.start(); - - ros::spin(); -} diff --git a/src/geometry2/tf2_ros/src/static_transform_broadcaster.cpp b/src/geometry2/tf2_ros/src/static_transform_broadcaster.cpp deleted file mode 100644 index df17ed9..0000000 --- a/src/geometry2/tf2_ros/src/static_transform_broadcaster.cpp +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - - -/** \author Tully Foote */ - - -#include "ros/ros.h" -#include "tf2_msgs/TFMessage.h" -#include "tf2_ros/static_transform_broadcaster.h" - -namespace tf2_ros { - -StaticTransformBroadcaster::StaticTransformBroadcaster() -{ - publisher_ = node_.advertise("/tf_static", 100, true); -}; - -void StaticTransformBroadcaster::sendTransform(const geometry_msgs::TransformStamped & msgtf) -{ - std::vector v1; - v1.push_back(msgtf); - sendTransform(v1); -} - - -void StaticTransformBroadcaster::sendTransform(const std::vector & msgtf) -{ - for (std::vector::const_iterator it_in = msgtf.begin(); it_in != msgtf.end(); ++it_in) - { - bool match_found = false; - for (std::vector::iterator it_msg = net_message_.transforms.begin(); it_msg != net_message_.transforms.end(); ++it_msg) - { - if (it_in->child_frame_id == it_msg->child_frame_id) - { - *it_msg = *it_in; - match_found = true; - break; - } - } - if (! match_found) - net_message_.transforms.push_back(*it_in); - } - - publisher_.publish(net_message_); -} - - -} - - diff --git a/src/geometry2/tf2_ros/src/static_transform_broadcaster_program.cpp b/src/geometry2/tf2_ros/src/static_transform_broadcaster_program.cpp deleted file mode 100644 index 3485d1d..0000000 --- a/src/geometry2/tf2_ros/src/static_transform_broadcaster_program.cpp +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include "tf2_ros/static_transform_broadcaster.h" - - -bool validateXmlRpcTf(XmlRpc::XmlRpcValue tf_data) { - // Validate a TF stored in XML RPC format: ensures the appropriate fields - // exist. Note this does not check data types. - return tf_data.hasMember("child_frame_id") && - tf_data.hasMember("header") && - tf_data["header"].hasMember("frame_id") && - tf_data.hasMember("transform") && - tf_data["transform"].hasMember("translation") && - tf_data["transform"]["translation"].hasMember("x") && - tf_data["transform"]["translation"].hasMember("y") && - tf_data["transform"]["translation"].hasMember("z") && - tf_data["transform"].hasMember("rotation") && - tf_data["transform"]["rotation"].hasMember("x") && - tf_data["transform"]["rotation"].hasMember("y") && - tf_data["transform"]["rotation"].hasMember("z") && - tf_data["transform"]["rotation"].hasMember("w"); -}; - -int main(int argc, char ** argv) -{ - //Initialize ROS - ros::init(argc, argv,"static_transform_publisher", ros::init_options::AnonymousName); - tf2_ros::StaticTransformBroadcaster broadcaster; - geometry_msgs::TransformStamped msg; - - if(argc == 10) - { - msg.transform.translation.x = atof(argv[1]); - msg.transform.translation.y = atof(argv[2]); - msg.transform.translation.z = atof(argv[3]); - msg.transform.rotation.x = atof(argv[4]); - msg.transform.rotation.y = atof(argv[5]); - msg.transform.rotation.z = atof(argv[6]); - msg.transform.rotation.w = atof(argv[7]); - msg.header.stamp = ros::Time::now(); - msg.header.frame_id = argv[8]; - msg.child_frame_id = argv[9]; - } - else if (argc == 9) - { - msg.transform.translation.x = atof(argv[1]); - msg.transform.translation.y = atof(argv[2]); - msg.transform.translation.z = atof(argv[3]); - - tf2::Quaternion quat; - quat.setRPY(atof(argv[6]), atof(argv[5]), atof(argv[4])); - msg.transform.rotation.x = quat.x(); - msg.transform.rotation.y = quat.y(); - msg.transform.rotation.z = quat.z(); - msg.transform.rotation.w = quat.w(); - - msg.header.stamp = ros::Time::now(); - msg.header.frame_id = argv[7]; - msg.child_frame_id = argv[8]; - } - else if (argc == 2) { - const std::string param_name = argv[1]; - ROS_INFO_STREAM("Looking for TF in parameter: " << param_name); - XmlRpc::XmlRpcValue tf_data; - - if (!ros::param::has(param_name) || !ros::param::get(param_name, tf_data)) { - ROS_FATAL_STREAM("Could not read TF from parameter server: " << param_name); - return -1; - } - - // Check that all required members are present & of the right type. - if (!validateXmlRpcTf(tf_data)) { - ROS_FATAL_STREAM("Could not validate XmlRpcC for TF data: " << tf_data); - return -1; - } - - msg.transform.translation.x = (double) tf_data["transform"]["translation"]["x"]; - msg.transform.translation.y = (double) tf_data["transform"]["translation"]["y"]; - msg.transform.translation.z = (double) tf_data["transform"]["translation"]["z"]; - msg.transform.rotation.x = (double) tf_data["transform"]["rotation"]["x"]; - msg.transform.rotation.y = (double) tf_data["transform"]["rotation"]["y"]; - msg.transform.rotation.z = (double) tf_data["transform"]["rotation"]["z"]; - msg.transform.rotation.w = (double) tf_data["transform"]["rotation"]["w"]; - msg.header.stamp = ros::Time::now(); - msg.header.frame_id = (std::string) tf_data["header"]["frame_id"]; - msg.child_frame_id = (std::string) tf_data["child_frame_id"]; - } - else - { - printf("A command line utility for manually sending a transform.\n"); - //printf("It will periodicaly republish the given transform. \n"); - printf("Usage: static_transform_publisher x y z qx qy qz qw frame_id child_frame_id \n"); - printf("OR \n"); - printf("Usage: static_transform_publisher x y z yaw pitch roll frame_id child_frame_id \n"); - printf("OR \n"); - printf("Usage: static_transform_publisher /param_name \n"); - printf("\nThis transform is the transform of the coordinate frame from frame_id into the coordinate frame \n"); - printf("of the child_frame_id. \n"); - ROS_ERROR("static_transform_publisher exited due to not having the right number of arguments"); - return -1; - } - - // Checks: frames should not be the same. - if (msg.header.frame_id == msg.child_frame_id) - { - ROS_FATAL("target_frame and source frame are the same (%s, %s) this cannot work", - msg.header.frame_id.c_str(), msg.child_frame_id.c_str()); - return 1; - } - - broadcaster.sendTransform(msg); - ROS_INFO("Spinning until killed publishing %s to %s", - msg.header.frame_id.c_str(), msg.child_frame_id.c_str()); - ros::spin(); - return 0; -}; diff --git a/src/geometry2/tf2_ros/src/tf2_ros/__init__.py b/src/geometry2/tf2_ros/src/tf2_ros/__init__.py deleted file mode 100644 index af5642c..0000000 --- a/src/geometry2/tf2_ros/src/tf2_ros/__init__.py +++ /dev/null @@ -1,44 +0,0 @@ -#! /usr/bin/python -#*********************************************************** -#* Software License Agreement (BSD License) -#* -#* Copyright (c) 2009, Willow Garage, Inc. -#* All rights reserved. -#* -#* Redistribution and use in source and binary forms, with or without -#* modification, are permitted provided that the following conditions -#* are met: -#* -#* * Redistributions of source code must retain the above copyright -#* notice, this list of conditions and the following disclaimer. -#* * Redistributions in binary form must reproduce the above -#* copyright notice, this list of conditions and the following -#* disclaimer in the documentation and/or other materials provided -#* with the distribution. -#* * Neither the name of Willow Garage, Inc. nor the names of its -#* contributors may be used to endorse or promote products derived -#* from this software without specific prior written permission. -#* -#* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -#* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -#* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -#* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -#* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -#* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -#* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -#* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -#* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -#* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -#* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -#* POSSIBILITY OF SUCH DAMAGE. -#* -#* Author: Eitan Marder-Eppstein -#*********************************************************** -from __future__ import absolute_import -from tf2_py import * -from .buffer_interface import * -from .buffer import * -from .buffer_client import * -from .transform_listener import * -from .transform_broadcaster import * -from .static_transform_broadcaster import * diff --git a/src/geometry2/tf2_ros/src/tf2_ros/buffer.py b/src/geometry2/tf2_ros/src/tf2_ros/buffer.py deleted file mode 100644 index 3d14ac8..0000000 --- a/src/geometry2/tf2_ros/src/tf2_ros/buffer.py +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright (c) 2008, Willow Garage, Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of the Willow Garage, Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -# author: Wim Meeussen - -import rospy -import tf2_py as tf2 -import tf2_ros -from tf2_msgs.srv import FrameGraph, FrameGraphResponse -import rosgraph.masterapi - -class Buffer(tf2.BufferCore, tf2_ros.BufferInterface): - """ - Standard implementation of the :class:`tf2_ros.BufferInterface` abstract data type. - - Inherits from :class:`tf2_ros.buffer_interface.BufferInterface` and :class:`tf2.BufferCore`. - - Stores known frames and offers a ROS service, "tf_frames", which responds to client requests - with a response containing a :class:`tf2_msgs.FrameGraph` representing the relationship of - known frames. - """ - - def __init__(self, cache_time = None, debug = True): - """ - .. function:: __init__(cache_time = None, debug = True) - - Constructor. - - :param cache_time: (Optional) How long to retain past information in BufferCore. - :param debug: (Optional) If true, check if another tf2_frames service has been advertised. - """ - if cache_time != None: - tf2.BufferCore.__init__(self, cache_time) - else: - tf2.BufferCore.__init__(self) - tf2_ros.BufferInterface.__init__(self) - - if debug: - #Check to see if the service has already been advertised in this node - try: - m = rosgraph.masterapi.Master(rospy.get_name()) - m.lookupService('~tf2_frames') - except (rosgraph.masterapi.Error, rosgraph.masterapi.Failure): - self.frame_server = rospy.Service('~tf2_frames', FrameGraph, self.__get_frames) - - def __get_frames(self, req): - return FrameGraphResponse(self.all_frames_as_yaml()) - - def lookup_transform(self, target_frame, source_frame, time, timeout=rospy.Duration(0.0)): - """ - Get the transform from the source frame to the target frame. - - :param target_frame: Name of the frame to transform into. - :param source_frame: Name of the input frame. - :param time: The time at which to get the transform. (0 will get the latest) - :param timeout: (Optional) Time to wait for the target frame to become available. - :return: The transform between the frames. - :rtype: :class:`geometry_msgs.msg.TransformStamped` - """ - - self.can_transform(target_frame, source_frame, time, timeout) - return self.lookup_transform_core(target_frame, source_frame, time) - - def lookup_transform_full(self, target_frame, target_time, source_frame, source_time, fixed_frame, timeout=rospy.Duration(0.0)): - """ - Get the transform from the source frame to the target frame using the advanced API. - - :param target_frame: Name of the frame to transform into. - :param target_time: The time to transform to. (0 will get the latest) - :param source_frame: Name of the input frame. - :param source_time: The time at which source_frame will be evaluated. (0 will get the latest) - :param fixed_frame: Name of the frame to consider constant in time. - :param timeout: (Optional) Time to wait for the target frame to become available. - :return: The transform between the frames. - :rtype: :class:`geometry_msgs.msg.TransformStamped` - """ - self.can_transform_full(target_frame, target_time, source_frame, source_time, fixed_frame, timeout) - return self.lookup_transform_full_core(target_frame, target_time, source_frame, source_time, fixed_frame) - - - def can_transform(self, target_frame, source_frame, time, timeout=rospy.Duration(0.0), return_debug_tuple=False): - """ - Check if a transform from the source frame to the target frame is possible. - - :param target_frame: Name of the frame to transform into. - :param source_frame: Name of the input frame. - :param time: The time at which to get the transform. (0 will get the latest) - :param timeout: (Optional) Time to wait for the target frame to become available. - :param return_debug_type: (Optional) If true, return a tuple representing debug information. - :return: True if the transform is possible, false otherwise. - :rtype: bool - """ - if timeout != rospy.Duration(0.0): - start_time = rospy.Time.now() - r= rospy.Rate(20) - while (rospy.Time.now() < start_time + timeout and - not self.can_transform_core(target_frame, source_frame, time)[0] and - (rospy.Time.now()+rospy.Duration(3.0)) >= start_time): # big jumps in time are likely bag loops, so break for them - r.sleep() - core_result = self.can_transform_core(target_frame, source_frame, time) - if return_debug_tuple: - return core_result - return core_result[0] - - def can_transform_full(self, target_frame, target_time, source_frame, source_time, fixed_frame, timeout=rospy.Duration(0.0), - - return_debug_tuple=False): - """ - Check if a transform from the source frame to the target frame is possible (advanced API). - - Must be implemented by a subclass of BufferInterface. - - :param target_frame: Name of the frame to transform into. - :param target_time: The time to transform to. (0 will get the latest) - :param source_frame: Name of the input frame. - :param source_time: The time at which source_frame will be evaluated. (0 will get the latest) - :param fixed_frame: Name of the frame to consider constant in time. - :param timeout: (Optional) Time to wait for the target frame to become available. - :param return_debug_type: (Optional) If true, return a tuple representing debug information. - :return: True if the transform is possible, false otherwise. - :rtype: bool - """ - if timeout != rospy.Duration(0.0): - start_time = rospy.Time.now() - r= rospy.Rate(20) - while (rospy.Time.now() < start_time + timeout and - not self.can_transform_full_core(target_frame, target_time, source_frame, source_time, fixed_frame)[0] and - (rospy.Time.now()+rospy.Duration(3.0)) >= start_time): # big jumps in time are likely bag loops, so break for them - r.sleep() - core_result = self.can_transform_full_core(target_frame, target_time, source_frame, source_time, fixed_frame) - if return_debug_tuple: - return core_result - return core_result[0] - diff --git a/src/geometry2/tf2_ros/src/tf2_ros/buffer_client.py b/src/geometry2/tf2_ros/src/tf2_ros/buffer_client.py deleted file mode 100644 index ae4d9fe..0000000 --- a/src/geometry2/tf2_ros/src/tf2_ros/buffer_client.py +++ /dev/null @@ -1,196 +0,0 @@ -#! /usr/bin/python -#*********************************************************** -#* Software License Agreement (BSD License) -#* -#* Copyright (c) 2009, Willow Garage, Inc. -#* All rights reserved. -#* -#* Redistribution and use in source and binary forms, with or without -#* modification, are permitted provided that the following conditions -#* are met: -#* -#* * Redistributions of source code must retain the above copyright -#* notice, this list of conditions and the following disclaimer. -#* * Redistributions in binary form must reproduce the above -#* copyright notice, this list of conditions and the following -#* disclaimer in the documentation and/or other materials provided -#* with the distribution. -#* * Neither the name of Willow Garage, Inc. nor the names of its -#* contributors may be used to endorse or promote products derived -#* from this software without specific prior written permission. -#* -#* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -#* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -#* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -#* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -#* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -#* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -#* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -#* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -#* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -#* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -#* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -#* POSSIBILITY OF SUCH DAMAGE. -#* -#* Author: Eitan Marder-Eppstein -#*********************************************************** -import rospy -import actionlib -import tf2_py as tf2 -import tf2_ros - -from tf2_msgs.msg import LookupTransformAction, LookupTransformGoal -from actionlib_msgs.msg import GoalStatus - -class BufferClient(tf2_ros.BufferInterface): - """ - Action client-based implementation of BufferInterface. - """ - def __init__(self, ns, check_frequency = None, timeout_padding = rospy.Duration.from_sec(2.0)): - """ - .. function:: __init__(ns, check_frequency = None, timeout_padding = rospy.Duration.from_sec(2.0)) - - Constructor. - - :param ns: The namespace in which to look for a BufferServer. - :param check_frequency: How frequently to check for updates to known transforms. - :param timeout_padding: A constant timeout to add to blocking calls. - """ - tf2_ros.BufferInterface.__init__(self) - self.client = actionlib.SimpleActionClient(ns, LookupTransformAction) - self.timeout_padding = timeout_padding - - if check_frequency is not None: - rospy.logwarn('Argument check_frequency is deprecated and should not be used.') - - def wait_for_server(self, timeout = rospy.Duration()): - """ - Block until the action server is ready to respond to requests. - - :param timeout: Time to wait for the server. - :return: True if the server is ready, false otherwise. - :rtype: bool - """ - return self.client.wait_for_server(timeout) - - # lookup, simple api - def lookup_transform(self, target_frame, source_frame, time, timeout=rospy.Duration(0.0)): - """ - Get the transform from the source frame to the target frame. - - :param target_frame: Name of the frame to transform into. - :param source_frame: Name of the input frame. - :param time: The time at which to get the transform. (0 will get the latest) - :param timeout: (Optional) Time to wait for the target frame to become available. - :return: The transform between the frames. - :rtype: :class:`geometry_msgs.msg.TransformStamped` - """ - goal = LookupTransformGoal() - goal.target_frame = target_frame; - goal.source_frame = source_frame; - goal.source_time = time; - goal.timeout = timeout; - goal.advanced = False; - - return self.__process_goal(goal) - - # lookup, advanced api - def lookup_transform_full(self, target_frame, target_time, source_frame, source_time, fixed_frame, timeout=rospy.Duration(0.0)): - """ - Get the transform from the source frame to the target frame using the advanced API. - - :param target_frame: Name of the frame to transform into. - :param target_time: The time to transform to. (0 will get the latest) - :param source_frame: Name of the input frame. - :param source_time: The time at which source_frame will be evaluated. (0 will get the latest) - :param fixed_frame: Name of the frame to consider constant in time. - :param timeout: (Optional) Time to wait for the target frame to become available. - :return: The transform between the frames. - :rtype: :class:`geometry_msgs.msg.TransformStamped` - """ - goal = LookupTransformGoal() - goal.target_frame = target_frame; - goal.source_frame = source_frame; - goal.source_time = source_time; - goal.timeout = timeout; - goal.target_time = target_time; - goal.fixed_frame = fixed_frame; - goal.advanced = True; - - return self.__process_goal(goal) - - # can, simple api - def can_transform(self, target_frame, source_frame, time, timeout=rospy.Duration(0.0)): - """ - Check if a transform from the source frame to the target frame is possible. - - :param target_frame: Name of the frame to transform into. - :param source_frame: Name of the input frame. - :param time: The time at which to get the transform. (0 will get the latest) - :param timeout: (Optional) Time to wait for the target frame to become available. - :param return_debug_type: (Optional) If true, return a tuple representing debug information. - :return: True if the transform is possible, false otherwise. - :rtype: bool - """ - try: - self.lookup_transform(target_frame, source_frame, time, timeout) - return True - except tf2.TransformException: - return False - - - # can, advanced api - def can_transform_full(self, target_frame, target_time, source_frame, source_time, fixed_frame, timeout=rospy.Duration(0.0)): - """ - Check if a transform from the source frame to the target frame is possible (advanced API). - - Must be implemented by a subclass of BufferInterface. - - :param target_frame: Name of the frame to transform into. - :param target_time: The time to transform to. (0 will get the latest) - :param source_frame: Name of the input frame. - :param source_time: The time at which source_frame will be evaluated. (0 will get the latest) - :param fixed_frame: Name of the frame to consider constant in time. - :param timeout: (Optional) Time to wait for the target frame to become available. - :param return_debug_type: (Optional) If true, return a tuple representing debug information. - :return: True if the transform is possible, false otherwise. - :rtype: bool - """ - try: - self.lookup_transform_full(target_frame, target_time, source_frame, source_time, fixed_frame, timeout) - return True - except tf2.TransformException: - return False - - def __process_goal(self, goal): - self.client.send_goal(goal) - - if not self.client.wait_for_result(goal.timeout + self.timeout_padding): - #This shouldn't happen, but could in rare cases where the server hangs - raise tf2.TimeoutException("The LookupTransform goal sent to the BufferServer did not come back in the specified time. Something is likely wrong with the server") - - if self.client.get_state() != GoalStatus.SUCCEEDED: - raise tf2.TimeoutException("The LookupTransform goal sent to the BufferServer did not come back with SUCCEEDED status. Something is likely wrong with the server.") - - return self.__process_result(self.client.get_result()) - - def __process_result(self, result): - if not result: - raise tf2.TransformException("The BufferServer returned None for result! Something is likely wrong with the server.") - if not result.error: - raise tf2.TransformException("The BufferServer returned None for result.error! Something is likely wrong with the server.") - if result.error.error != result.error.NO_ERROR: - if result.error.error == result.error.LOOKUP_ERROR: - raise tf2.LookupException(result.error.error_string) - if result.error.error == result.error.CONNECTIVITY_ERROR: - raise tf2.ConnectivityException(result.error.error_string) - if result.error.error == result.error.EXTRAPOLATION_ERROR: - raise tf2.ExtrapolationException(result.error.error_string) - if result.error.error == result.error.INVALID_ARGUMENT_ERROR: - raise tf2.InvalidArgumentException(result.error.error_string) - if result.error.error == result.error.TIMEOUT_ERROR: - raise tf2.TimeoutException(result.error.error_string) - - raise tf2.TransformException(result.error.error_string) - - return result.transform diff --git a/src/geometry2/tf2_ros/src/tf2_ros/buffer_interface.py b/src/geometry2/tf2_ros/src/tf2_ros/buffer_interface.py deleted file mode 100644 index 2cb753e..0000000 --- a/src/geometry2/tf2_ros/src/tf2_ros/buffer_interface.py +++ /dev/null @@ -1,254 +0,0 @@ -# Copyright (c) 2008, Willow Garage, Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of the Willow Garage, Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -# author: Wim Meeussen - -from __future__ import print_function - -import rospy -import tf2_py as tf2 -import tf2_ros -from copy import deepcopy -from std_msgs.msg import Header - -class BufferInterface: - """ - Abstract interface for wrapping the Python bindings for the tf2 library in - a ROS-based convenience API. - Implementations include :class:tf2_ros.buffer.Buffer and - :class:tf2_ros.buffer_client.BufferClient. - """ - def __init__(self): - self.registration = tf2_ros.TransformRegistration() - - # transform, simple api - def transform(self, object_stamped, target_frame, timeout=rospy.Duration(0.0), new_type = None): - """ - Transform an input into the target frame. - - The input must be a known transformable type (by way of the tf2 data type conversion interface). - - If new_type is not None, the type specified must have a valid conversion from the input type, - else the function will raise an exception. - - :param object_stamped: The timestamped object the transform. - :param target_frame: Name of the frame to transform the input into. - :param timeout: (Optional) Time to wait for the target frame to become available. - :param new_type: (Optional) Type to convert the object to. - :return: The transformed, timestamped output, possibly converted to a new type. - """ - do_transform = self.registration.get(type(object_stamped)) - res = do_transform(object_stamped, self.lookup_transform(target_frame, object_stamped.header.frame_id, - object_stamped.header.stamp, timeout)) - if not new_type: - return res - - return convert(res, new_type) - - # transform, advanced api - def transform_full(self, object_stamped, target_frame, target_time, fixed_frame, timeout=rospy.Duration(0.0), new_type = None): - """ - Transform an input into the target frame (advanced API). - - The input must be a known transformable type (by way of the tf2 data type conversion interface). - - If new_type is not None, the type specified must have a valid conversion from the input type, - else the function will raise an exception. - - This function follows the advanced API, which allows tranforming between different time points, - as well as specifying a frame to be considered fixed in time. - - :param object_stamped: The timestamped object the transform. - :param target_frame: Name of the frame to transform the input into. - :param target_time: Time to transform the input into. - :param fixed_frame: Name of the frame to consider constant in time. - :param timeout: (Optional) Time to wait for the target frame to become available. - :param new_type: (Optional) Type to convert the object to. - :return: The transformed, timestamped output, possibly converted to a new type. - """ - do_transform = self.registration.get(type(object_stamped)) - res = do_transform(object_stamped, self.lookup_transform_full(target_frame, target_time, - object_stamped.header.frame_id, object_stamped.header.stamp, - fixed_frame, timeout)) - if not new_type: - return res - - return convert(res, new_type) - - def lookup_transform(self, target_frame, source_frame, time, timeout=rospy.Duration(0.0)): - """ - Get the transform from the source frame to the target frame. - - Must be implemented by a subclass of BufferInterface. - - :param target_frame: Name of the frame to transform into. - :param source_frame: Name of the input frame. - :param time: The time at which to get the transform. (0 will get the latest) - :param timeout: (Optional) Time to wait for the target frame to become available. - :return: The transform between the frames. - :rtype: :class:`geometry_msgs.msg.TransformStamped` - """ - raise NotImplementedException() - - def lookup_transform_full(self, target_frame, target_time, source_frame, source_time, fixed_frame, timeout=rospy.Duration(0.0)): - """ - Get the transform from the source frame to the target frame using the advanced API. - - Must be implemented by a subclass of BufferInterface. - - :param target_frame: Name of the frame to transform into. - :param target_time: The time to transform to. (0 will get the latest) - :param source_frame: Name of the input frame. - :param source_time: The time at which source_frame will be evaluated. (0 will get the latest) - :param fixed_frame: Name of the frame to consider constant in time. - :param timeout: (Optional) Time to wait for the target frame to become available. - :return: The transform between the frames. - :rtype: :class:`geometry_msgs.msg.TransformStamped` - """ - raise NotImplementedException() - - # can, simple api - def can_transform(self, target_frame, source_frame, time, timeout=rospy.Duration(0.0)): - """ - Check if a transform from the source frame to the target frame is possible. - - Must be implemented by a subclass of BufferInterface. - - :param target_frame: Name of the frame to transform into. - :param source_frame: Name of the input frame. - :param time: The time at which to get the transform. (0 will get the latest) - :param timeout: (Optional) Time to wait for the target frame to become available. - :return: True if the transform is possible, false otherwise. - :rtype: bool - """ - raise NotImplementedException() - - # can, advanced api - def can_transform_full(self, target_frame, target_time, source_frame, source_time, fixed_frame, timeout=rospy.Duration(0.0)): - """ - Check if a transform from the source frame to the target frame is possible (advanced API). - - Must be implemented by a subclass of BufferInterface. - - :param target_frame: Name of the frame to transform into. - :param target_time: The time to transform to. (0 will get the latest) - :param source_frame: Name of the input frame. - :param source_time: The time at which source_frame will be evaluated. (0 will get the latest) - :param fixed_frame: Name of the frame to consider constant in time. - :param timeout: (Optional) Time to wait for the target frame to become available. - :return: True if the transform is possible, false otherwise. - :rtype: bool - """ - raise NotImplementedException() - - -def Stamped(obj, stamp, frame_id): - obj.header = Header(frame_id=frame_id, stamp=stamp) - return obj - - - -class TypeException(Exception): - """ - Raised when an unexpected type is received while registering a transform - in :class:`tf2_ros.buffer_interface.BufferInterface`. - """ - def __init__(self, errstr): - self.errstr = errstr - -class NotImplementedException(Exception): - """ - Raised when can_transform or lookup_transform is not implemented in a - subclass of :class:`tf2_ros.buffer_interface.BufferInterface`. - """ - def __init__(self): - self.errstr = 'CanTransform or LookupTransform not implemented' - - -class TransformRegistration(): - __type_map = {} - - def print_me(self): - print(TransformRegistration.__type_map) - - def add(self, key, callback): - TransformRegistration.__type_map[key] = callback - - def get(self, key): - if not key in TransformRegistration.__type_map: - raise TypeException('Type %s if not loaded or supported'% str(key)) - else: - return TransformRegistration.__type_map[key] - -class ConvertRegistration(): - __to_msg_map = {} - __from_msg_map = {} - __convert_map = {} - - def add_from_msg(self, key, callback): - ConvertRegistration.__from_msg_map[key] = callback - - def add_to_msg(self, key, callback): - ConvertRegistration.__to_msg_map[key] = callback - - def add_convert(self, key, callback): - ConvertRegistration.__convert_map[key] = callback - - def get_from_msg(self, key): - if not key in ConvertRegistration.__from_msg_map: - raise TypeException('Type %s if not loaded or supported'% str(key)) - else: - return ConvertRegistration.__from_msg_map[key] - - def get_to_msg(self, key): - if not key in ConvertRegistration.__to_msg_map: - raise TypeException('Type %s if not loaded or supported'%str(key)) - else: - return ConvertRegistration.__to_msg_map[key] - - def get_convert(self, key): - if not key in ConvertRegistration.__convert_map: - raise TypeException("Type %s if not loaded or supported" % str(key)) - else: - return ConvertRegistration.__convert_map[key] - -def convert(a, b_type): - c = ConvertRegistration() - #check if an efficient conversion function between the types exists - try: - f = c.get_convert((type(a), b_type)) - print("efficient copy") - return f(a) - except TypeException: - if type(a) == b_type: - print("deep copy") - return deepcopy(a) - - f_to = c.get_to_msg(type(a)) - f_from = c.get_from_msg(b_type) - print("message copy") - return f_from(f_to(a)) diff --git a/src/geometry2/tf2_ros/src/tf2_ros/static_transform_broadcaster.py b/src/geometry2/tf2_ros/src/tf2_ros/static_transform_broadcaster.py deleted file mode 100644 index 064a687..0000000 --- a/src/geometry2/tf2_ros/src/tf2_ros/static_transform_broadcaster.py +++ /dev/null @@ -1,51 +0,0 @@ -# Software License Agreement (BSD License) -# -# Copyright (c) 2008, Willow Garage, Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# * Neither the name of the Willow Garage nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -import rospy -from tf2_msgs.msg import TFMessage -from geometry_msgs.msg import TransformStamped - - -class StaticTransformBroadcaster(object): - """ - :class:`StaticTransformBroadcaster` is a convenient way to send static transformation on the ``"/tf_static"`` message topic. - """ - - def __init__(self): - self.pub_tf = rospy.Publisher("/tf_static", TFMessage, queue_size=100, latch=True) - - def sendTransform(self, transform): - if not isinstance(transform, list): - transform = [transform] - self.pub_tf.publish(TFMessage(transform)) - - diff --git a/src/geometry2/tf2_ros/src/tf2_ros/transform_broadcaster.py b/src/geometry2/tf2_ros/src/tf2_ros/transform_broadcaster.py deleted file mode 100644 index e86e835..0000000 --- a/src/geometry2/tf2_ros/src/tf2_ros/transform_broadcaster.py +++ /dev/null @@ -1,56 +0,0 @@ -# Software License Agreement (BSD License) -# -# Copyright (c) 2008, Willow Garage, Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# * Neither the name of the Willow Garage nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -import rospy -from tf2_msgs.msg import TFMessage -from geometry_msgs.msg import TransformStamped - - -class TransformBroadcaster: - """ - :class:`TransformBroadcaster` is a convenient way to send transformation updates on the ``"/tf"`` message topic. - """ - - def __init__(self): - self.pub_tf = rospy.Publisher("/tf", TFMessage, queue_size=100) - - def sendTransform(self, transform): - """ - Send a transform, or a list of transforms, to the Buffer associated with this TransformBroadcaster. - - :param transform: A transform or list of transforms to send. - """ - if not isinstance(transform, list): - transform = [transform] - self.pub_tf.publish(TFMessage(transform)) - - diff --git a/src/geometry2/tf2_ros/src/tf2_ros/transform_listener.py b/src/geometry2/tf2_ros/src/tf2_ros/transform_listener.py deleted file mode 100644 index 6a53c1a..0000000 --- a/src/geometry2/tf2_ros/src/tf2_ros/transform_listener.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright (c) 2008, Willow Garage, Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of the Willow Garage, Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -# author: Wim Meeussen - -import threading - -import rospy -import tf2_ros -from tf2_msgs.msg import TFMessage - -class TransformListener(): - """ - :class:`TransformListener` is a convenient way to listen for coordinate frame transformation info. - This class takes an object that instantiates the :class:`BufferInterface` interface, to which - it propagates changes to the tf frame graph. - """ - def __init__(self, buffer, queue_size=None, buff_size=65536, tcp_nodelay=False): - """ - .. function:: __init__(buffer) - - Constructor. - - :param buffer: The buffer to propagate changes to when tf info updates. - :param queue_size (int) - maximum number of messages to receive at a time. This will generally be 1 or None (infinite, default). buff_size should be increased if this parameter is set as incoming data still needs to sit in the incoming buffer before being discarded. Setting queue_size buff_size to a non-default value affects all subscribers to this topic in this process. - :param buff_size (int) - incoming message buffer size in bytes. If queue_size is set, this should be set to a number greater than the queue_size times the average message size. Setting buff_size to a non-default value affects all subscribers to this topic in this process. - :param tcp_nodelay (bool) - if True, request TCP_NODELAY from publisher. Use of this option is not generally recommended in most cases as it is better to rely on timestamps in message data. Setting tcp_nodelay to True enables TCP_NODELAY for all subscribers in the same python process. - """ - self.buffer = buffer - self.last_update = rospy.Time.now() - self.last_update_lock = threading.Lock() - self.tf_sub = rospy.Subscriber("/tf", TFMessage, self.callback, queue_size=queue_size, buff_size=buff_size, tcp_nodelay=tcp_nodelay) - self.tf_static_sub = rospy.Subscriber("/tf_static", TFMessage, self.static_callback, queue_size=queue_size, buff_size=buff_size, tcp_nodelay=tcp_nodelay) - - def __del__(self): - self.unregister() - - def unregister(self): - """ - Unregisters all tf subscribers. - """ - self.tf_sub.unregister() - self.tf_static_sub.unregister() - - def check_for_reset(self): - # Lock to prevent different threads racing on this test and update. - # https://github.com/ros/geometry2/issues/341 - with self.last_update_lock: - now = rospy.Time.now() - if now < self.last_update: - rospy.logwarn("Detected jump back in time of %fs. Clearing TF buffer." % (self.last_update - now).to_sec()) - self.buffer.clear() - self.last_update = now - - def callback(self, data): - self.check_for_reset() - who = data._connection_header.get('callerid', "default_authority") - for transform in data.transforms: - self.buffer.set_transform(transform, who) - - def static_callback(self, data): - self.check_for_reset() - who = data._connection_header.get('callerid', "default_authority") - for transform in data.transforms: - self.buffer.set_transform_static(transform, who) diff --git a/src/geometry2/tf2_ros/src/transform_broadcaster.cpp b/src/geometry2/tf2_ros/src/transform_broadcaster.cpp deleted file mode 100644 index 94cce47..0000000 --- a/src/geometry2/tf2_ros/src/transform_broadcaster.cpp +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - - -/** \author Tully Foote */ - - -#include "ros/ros.h" -#include "tf2_msgs/TFMessage.h" -#include "tf2_ros/transform_broadcaster.h" - -namespace tf2_ros { - -TransformBroadcaster::TransformBroadcaster() -{ - publisher_ = node_.advertise("/tf", 100); -}; - -void TransformBroadcaster::sendTransform(const geometry_msgs::TransformStamped & msgtf) -{ - std::vector v1; - v1.push_back(msgtf); - sendTransform(v1); -} - - -void TransformBroadcaster::sendTransform(const std::vector & msgtf) -{ - tf2_msgs::TFMessage message; - for (std::vector::const_iterator it = msgtf.begin(); it != msgtf.end(); ++it) - { - message.transforms.push_back(*it); - } - publisher_.publish(message); -} - - -} - - diff --git a/src/geometry2/tf2_ros/src/transform_listener.cpp b/src/geometry2/tf2_ros/src/transform_listener.cpp deleted file mode 100644 index f130be6..0000000 --- a/src/geometry2/tf2_ros/src/transform_listener.cpp +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** \author Tully Foote */ - -#include "tf2_ros/transform_listener.h" - - -using namespace tf2_ros; - - -TransformListener::TransformListener(tf2::BufferCore& buffer, bool spin_thread): - dedicated_listener_thread_(NULL), buffer_(buffer), using_dedicated_thread_(false) -{ - if (spin_thread) - initWithThread(); - else - init(); -} - -TransformListener::TransformListener(tf2::BufferCore& buffer, const ros::NodeHandle& nh, bool spin_thread) -: dedicated_listener_thread_(NULL) -, node_(nh) -, buffer_(buffer) -, using_dedicated_thread_(false) -{ - if (spin_thread) - initWithThread(); - else - init(); -} - - -TransformListener::~TransformListener() -{ - using_dedicated_thread_ = false; - if (dedicated_listener_thread_) - { - dedicated_listener_thread_->join(); - delete dedicated_listener_thread_; - } -} - -void TransformListener::init() -{ - message_subscriber_tf_ = node_.subscribe("/tf", 100, boost::bind(&TransformListener::subscription_callback, this, _1)); ///\todo magic number - message_subscriber_tf_static_ = node_.subscribe("/tf_static", 100, boost::bind(&TransformListener::static_subscription_callback, this, _1)); ///\todo magic number -} - -void TransformListener::initWithThread() -{ - using_dedicated_thread_ = true; - ros::SubscribeOptions ops_tf = ros::SubscribeOptions::create("/tf", 100, boost::bind(&TransformListener::subscription_callback, this, _1), ros::VoidPtr(), &tf_message_callback_queue_); ///\todo magic number - message_subscriber_tf_ = node_.subscribe(ops_tf); - - ros::SubscribeOptions ops_tf_static = ros::SubscribeOptions::create("/tf_static", 100, boost::bind(&TransformListener::static_subscription_callback, this, _1), ros::VoidPtr(), &tf_message_callback_queue_); ///\todo magic number - message_subscriber_tf_static_ = node_.subscribe(ops_tf_static); - - dedicated_listener_thread_ = new boost::thread(boost::bind(&TransformListener::dedicatedListenerThread, this)); - - //Tell the buffer we have a dedicated thread to enable timeouts - buffer_.setUsingDedicatedThread(true); -} - - - -void TransformListener::subscription_callback(const ros::MessageEvent& msg_evt) -{ - subscription_callback_impl(msg_evt, false); -} -void TransformListener::static_subscription_callback(const ros::MessageEvent& msg_evt) -{ - subscription_callback_impl(msg_evt, true); -} - -void TransformListener::subscription_callback_impl(const ros::MessageEvent& msg_evt, bool is_static) -{ - ros::Time now = ros::Time::now(); - if(now < last_update_){ - ROS_WARN_STREAM("Detected jump back in time of " << (last_update_ - now).toSec() << "s. Clearing TF buffer."); - buffer_.clear(); - } - last_update_ = now; - - - - const tf2_msgs::TFMessage& msg_in = *(msg_evt.getConstMessage()); - std::string authority = msg_evt.getPublisherName(); // lookup the authority - for (unsigned int i = 0; i < msg_in.transforms.size(); i++) - { - try - { - buffer_.setTransform(msg_in.transforms[i], authority, is_static); - } - - catch (tf2::TransformException& ex) - { - ///\todo Use error reporting - std::string temp = ex.what(); - ROS_ERROR("Failure to set recieved transform from %s to %s with error: %s\n", msg_in.transforms[i].child_frame_id.c_str(), msg_in.transforms[i].header.frame_id.c_str(), temp.c_str()); - } - } -}; - - - - - diff --git a/src/geometry2/tf2_ros/test/listener_unittest.cpp b/src/geometry2/tf2_ros/test/listener_unittest.cpp deleted file mode 100644 index 4b9594f..0000000 --- a/src/geometry2/tf2_ros/test/listener_unittest.cpp +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include - -using namespace tf2; - -TEST(tf2_ros_transform, transform_listener) -{ - tf2_ros::Buffer buffer; - tf2_ros::TransformListener tfl(buffer); -} - -int main(int argc, char **argv){ - testing::InitGoogleTest(&argc, argv); - ros::init(argc, argv, "transform_listener_unittest"); - return RUN_ALL_TESTS(); -} diff --git a/src/geometry2/tf2_ros/test/message_filter_test.cpp b/src/geometry2/tf2_ros/test/message_filter_test.cpp deleted file mode 100644 index b613d24..0000000 --- a/src/geometry2/tf2_ros/test/message_filter_test.cpp +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright (c) 2014, Open Source Robotics Foundation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - - -void spin_for_a_second() -{ - ros::spinOnce(); - for (uint8_t i = 0; i < 10; ++i) - { - std::this_thread::sleep_for(std::chrono::microseconds(100)); - ros::spinOnce(); - } -} - -bool filter_callback_fired = false; -void filter_callback(const geometry_msgs::PointStamped& msg) -{ - filter_callback_fired = true; -} - -TEST(tf2_ros_message_filter, multiple_frames_and_time_tolerance) -{ - ros::NodeHandle nh; - message_filters::Subscriber sub; - sub.subscribe(nh, "point", 10); - - tf2_ros::Buffer buffer; - tf2_ros::TransformListener tfl(buffer); - tf2_ros::MessageFilter filter(buffer, "map", 10, nh); - filter.connectInput(sub); - filter.registerCallback(&filter_callback); - // Register multiple target frames - std::vector frames; - frames.push_back("odom"); - frames.push_back("map"); - filter.setTargetFrames(frames); - // Set a non-zero time tolerance - filter.setTolerance(ros::Duration(1, 0)); - - // Publish static transforms so the frame transformations will always be valid - tf2_ros::StaticTransformBroadcaster tfb; - geometry_msgs::TransformStamped map_to_odom; - map_to_odom.header.stamp = ros::Time(0, 0); - map_to_odom.header.frame_id = "map"; - map_to_odom.child_frame_id = "odom"; - map_to_odom.transform.translation.x = 0.0; - map_to_odom.transform.translation.y = 0.0; - map_to_odom.transform.translation.z = 0.0; - map_to_odom.transform.rotation.x = 0.0; - map_to_odom.transform.rotation.y = 0.0; - map_to_odom.transform.rotation.z = 0.0; - map_to_odom.transform.rotation.w = 1.0; - tfb.sendTransform(map_to_odom); - - geometry_msgs::TransformStamped odom_to_base; - odom_to_base.header.stamp = ros::Time(0, 0); - odom_to_base.header.frame_id = "odom"; - odom_to_base.child_frame_id = "base"; - odom_to_base.transform.translation.x = 0.0; - odom_to_base.transform.translation.y = 0.0; - odom_to_base.transform.translation.z = 0.0; - odom_to_base.transform.rotation.x = 0.0; - odom_to_base.transform.rotation.y = 0.0; - odom_to_base.transform.rotation.z = 0.0; - odom_to_base.transform.rotation.w = 1.0; - tfb.sendTransform(odom_to_base); - - // Publish a Point message in the "base" frame - ros::Publisher pub = nh.advertise("point", 10); - geometry_msgs::PointStamped point; - point.header.stamp = ros::Time::now(); - point.header.frame_id = "base"; - pub.publish(point); - - // make sure it arrives - spin_for_a_second(); - - // The filter callback should have been fired because all required transforms are available - ASSERT_TRUE(filter_callback_fired); -} - -int main(int argc, char **argv){ - testing::InitGoogleTest(&argc, argv); - ros::init(argc, argv, "tf2_ros_message_filter"); - return RUN_ALL_TESTS(); -} diff --git a/src/geometry2/tf2_ros/test/message_filter_test.launch b/src/geometry2/tf2_ros/test/message_filter_test.launch deleted file mode 100644 index b32f8dc..0000000 --- a/src/geometry2/tf2_ros/test/message_filter_test.launch +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/src/geometry2/tf2_ros/test/time_reset_test.cpp b/src/geometry2/tf2_ros/test/time_reset_test.cpp deleted file mode 100644 index 8142fd9..0000000 --- a/src/geometry2/tf2_ros/test/time_reset_test.cpp +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright (c) 2014, Open Source Robotics Foundation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include - -using namespace tf2; - -void spin_for_a_second() -{ - ros::spinOnce(); - for (uint8_t i = 0; i < 10; ++i) - { - std::this_thread::sleep_for(std::chrono::microseconds(100)); - ros::spinOnce(); - } -} - -TEST(tf2_ros_transform_listener, time_backwards) -{ - - tf2_ros::Buffer buffer; - tf2_ros::TransformListener tfl(buffer); - tf2_ros::TransformBroadcaster tfb; - - ros::NodeHandle nh = ros::NodeHandle(); - - ros::Publisher clock = nh.advertise("/clock", 5); - - rosgraph_msgs::Clock c; - c.clock = ros::Time(100); - clock.publish(c); - - // basic test - ASSERT_FALSE(buffer.canTransform("foo", "bar", ros::Time(101, 0))); - - // set the transform - geometry_msgs::TransformStamped msg; - msg.header.stamp = ros::Time(100, 0); - msg.header.frame_id = "foo"; - msg.child_frame_id = "bar"; - msg.transform.rotation.x = 1.0; - tfb.sendTransform(msg); - msg.header.stamp = ros::Time(102, 0); - tfb.sendTransform(msg); - - - // make sure it arrives - spin_for_a_second(); - - // verify it's been set - ASSERT_TRUE(buffer.canTransform("foo", "bar", ros::Time(101, 0))); - - c.clock = ros::Time(90); - clock.publish(c); - - // make sure it arrives - spin_for_a_second(); - - //Send another message to trigger clock test on an unrelated frame - msg.header.stamp = ros::Time(110, 0); - msg.header.frame_id = "foo2"; - msg.child_frame_id = "bar2"; - tfb.sendTransform(msg); - - // make sure it arrives - spin_for_a_second(); - - //verify the data's been cleared - ASSERT_FALSE(buffer.canTransform("foo", "bar", ros::Time(101, 0))); - -} - - - - -int main(int argc, char **argv){ - testing::InitGoogleTest(&argc, argv); - ros::init(argc, argv, "transform_listener_backwards_reset"); - return RUN_ALL_TESTS(); -} diff --git a/src/geometry2/tf2_ros/test/transform_listener_time_reset_test.launch b/src/geometry2/tf2_ros/test/transform_listener_time_reset_test.launch deleted file mode 100644 index ae31a90..0000000 --- a/src/geometry2/tf2_ros/test/transform_listener_time_reset_test.launch +++ /dev/null @@ -1,4 +0,0 @@ - - - - \ No newline at end of file diff --git a/src/geometry2/tf2_ros/test/transform_listener_unittest.launch b/src/geometry2/tf2_ros/test/transform_listener_unittest.launch deleted file mode 100644 index 42a8149..0000000 --- a/src/geometry2/tf2_ros/test/transform_listener_unittest.launch +++ /dev/null @@ -1,3 +0,0 @@ - - - \ No newline at end of file diff --git a/src/geometry2/tf2_sensor_msgs/CHANGELOG.rst b/src/geometry2/tf2_sensor_msgs/CHANGELOG.rst deleted file mode 100644 index 7de91fb..0000000 --- a/src/geometry2/tf2_sensor_msgs/CHANGELOG.rst +++ /dev/null @@ -1,88 +0,0 @@ -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Changelog for package tf2_sensor_msgs -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -0.6.7 (2020-03-09) ------------------- - -0.6.6 (2020-01-09) ------------------- -* Affine->Isometry `#378 `_ -* Python 3 compatibility: relative imports and print statement -* Contributors: Martin Pecka, Timon Engelke, Tully Foote - -0.6.5 (2018-11-16) ------------------- - -0.6.4 (2018-11-06) ------------------- - -0.6.3 (2018-07-09) ------------------- - -0.6.2 (2018-05-02) ------------------- - -0.6.1 (2018-03-21) ------------------- - -0.6.0 (2018-03-21) ------------------- - -0.5.17 (2018-01-01) -------------------- -* Merge pull request `#257 `_ from delftrobotics-forks/python3 - Make tf2_py python3 compatible again -* Use python3 print function. -* Contributors: Maarten de Vries, Tully Foote - -0.5.16 (2017-07-14) -------------------- -* Fix do_transform_cloud for multi-channelled pointcloud2. (`#241 `_) -* store gtest return value as int (`#229 `_) -* Document the lifetime of the returned reference for getFrameId and getTimestamp -* Find eigen in a much nicer way. -* Switch tf2_sensor_msgs over to package format 2. -* Contributors: Atsushi Watanabe, Chris Lalancette, dhood - -0.5.15 (2017-01-24) -------------------- - -0.5.14 (2017-01-16) -------------------- - -0.5.13 (2016-03-04) -------------------- -* add missing Python runtime dependency -* fix wrong comment -* Adding tests to package -* Fixing do_transform_cloud for python - The previous code was not used at all (it was a mistake in the __init_\_.py so - the do_transform_cloud was not available to the python users). - The python code need some little correction (e.g there is no method named - read_cloud but it's read_points for instance, and as we are in python we can't - use the same trick as in c++ when we got an immutable) -* Contributors: Laurent GEORGE, Vincent Rabaud - -0.5.12 (2015-08-05) -------------------- - -0.5.11 (2015-04-22) -------------------- - -0.5.10 (2015-04-21) -------------------- - -0.5.9 (2015-03-25) ------------------- - -0.5.8 (2015-03-17) ------------------- -* ODR violation fixes and more conversions -* Fix keeping original pointcloud header in transformed pointcloud -* Contributors: Paul Bovbel, Tully Foote, Vincent Rabaud - -0.5.7 (2014-12-23) ------------------- -* add support for transforming sensor_msgs::PointCloud2 -* Contributors: Vincent Rabaud diff --git a/src/geometry2/tf2_sensor_msgs/CMakeLists.txt b/src/geometry2/tf2_sensor_msgs/CMakeLists.txt deleted file mode 100644 index d9d2fb7..0000000 --- a/src/geometry2/tf2_sensor_msgs/CMakeLists.txt +++ /dev/null @@ -1,56 +0,0 @@ -cmake_minimum_required(VERSION 2.8.3) -project(tf2_sensor_msgs) - -find_package(catkin REQUIRED COMPONENTS cmake_modules sensor_msgs tf2_ros tf2) -find_package(Boost COMPONENTS thread REQUIRED) - -# Finding Eigen is somewhat complicated because of our need to support Ubuntu -# all the way back to saucy. First we look for the Eigen3 cmake module -# provided by the libeigen3-dev on newer Ubuntu. If that fails, then we -# fall-back to the version provided by cmake_modules, which is a stand-in. -find_package(Eigen3 QUIET) -if(NOT EIGEN3_FOUND) - find_package(cmake_modules REQUIRED) - find_package(Eigen REQUIRED) - set(EIGEN3_INCLUDE_DIRS ${EIGEN_INCLUDE_DIRS}) -endif() - -# Note that eigen 3.2 (on Ubuntu Wily) only provides EIGEN3_INCLUDE_DIR, -# not EIGEN3_INCLUDE_DIRS, so we have to set the latter from the former. -if(NOT EIGEN3_INCLUDE_DIRS) - set(EIGEN3_INCLUDE_DIRS ${EIGEN3_INCLUDE_DIR}) -endif() - -catkin_package( - INCLUDE_DIRS include - CATKIN_DEPENDS sensor_msgs tf2_ros tf2 - DEPENDS EIGEN3 -) - - -include_directories(include - ${catkin_INCLUDE_DIRS} - ${EIGEN3_INCLUDE_DIRS} -) - -install(DIRECTORY include/${PROJECT_NAME}/ - DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -) - -catkin_python_setup() - -if(CATKIN_ENABLE_TESTING) - catkin_add_nosetests(test/test_tf2_sensor_msgs.py) - - find_package(catkin REQUIRED COMPONENTS geometry_msgs sensor_msgs rostest tf2_ros tf2) - - add_executable(test_tf2_sensor_msgs_cpp EXCLUDE_FROM_ALL test/test_tf2_sensor_msgs.cpp) - target_link_libraries(test_tf2_sensor_msgs_cpp ${catkin_LIBRARIES} ${GTEST_LIBRARIES}) - - - if(TARGET tests) - add_dependencies(tests test_tf2_sensor_msgs_cpp) - endif() - add_rostest(${CMAKE_CURRENT_SOURCE_DIR}/test/test.launch) - -endif() diff --git a/src/geometry2/tf2_sensor_msgs/include/tf2_sensor_msgs/tf2_sensor_msgs.h b/src/geometry2/tf2_sensor_msgs/include/tf2_sensor_msgs/tf2_sensor_msgs.h deleted file mode 100644 index 9e16e0d..0000000 --- a/src/geometry2/tf2_sensor_msgs/include/tf2_sensor_msgs/tf2_sensor_msgs.h +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef TF2_SENSOR_MSGS_H -#define TF2_SENSOR_MSGS_H - -#include -#include -#include -#include -#include - -namespace tf2 -{ - -/********************/ -/** PointCloud2 **/ -/********************/ - -/** \brief Extract a timestamp from the header of a PointCloud2 message. - * This function is a specialization of the getTimestamp template defined in tf2/convert.h. - * \param t PointCloud2 message to extract the timestamp from. - * \return The timestamp of the message. The lifetime of the returned reference - * is bound to the lifetime of the argument. - */ -template <> -inline -const ros::Time& getTimestamp(const sensor_msgs::PointCloud2& p) {return p.header.stamp;} - -/** \brief Extract a frame ID from the header of a PointCloud2 message. - * This function is a specialization of the getFrameId template defined in tf2/convert.h. - * \param t PointCloud2 message to extract the frame ID from. - * \return A string containing the frame ID of the message. The lifetime of the - * returned reference is bound to the lifetime of the argument. - */ -template <> -inline -const std::string& getFrameId(const sensor_msgs::PointCloud2 &p) {return p.header.frame_id;} - -// this method needs to be implemented by client library developers -template <> -inline -void doTransform(const sensor_msgs::PointCloud2 &p_in, sensor_msgs::PointCloud2 &p_out, const geometry_msgs::TransformStamped& t_in) -{ - p_out = p_in; - p_out.header = t_in.header; - Eigen::Transform t = Eigen::Translation3f(t_in.transform.translation.x, t_in.transform.translation.y, - t_in.transform.translation.z) * Eigen::Quaternion( - t_in.transform.rotation.w, t_in.transform.rotation.x, - t_in.transform.rotation.y, t_in.transform.rotation.z); - - sensor_msgs::PointCloud2ConstIterator x_in(p_in, "x"); - sensor_msgs::PointCloud2ConstIterator y_in(p_in, "y"); - sensor_msgs::PointCloud2ConstIterator z_in(p_in, "z"); - - sensor_msgs::PointCloud2Iterator x_out(p_out, "x"); - sensor_msgs::PointCloud2Iterator y_out(p_out, "y"); - sensor_msgs::PointCloud2Iterator z_out(p_out, "z"); - - Eigen::Vector3f point; - for(; x_in != x_in.end(); ++x_in, ++y_in, ++z_in, ++x_out, ++y_out, ++z_out) { - point = t * Eigen::Vector3f(*x_in, *y_in, *z_in); - *x_out = point.x(); - *y_out = point.y(); - *z_out = point.z(); - } -} -inline -sensor_msgs::PointCloud2 toMsg(const sensor_msgs::PointCloud2 &in) -{ - return in; -} -inline -void fromMsg(const sensor_msgs::PointCloud2 &msg, sensor_msgs::PointCloud2 &out) -{ - out = msg; -} - -} // namespace - -#endif // TF2_SENSOR_MSGS_H diff --git a/src/geometry2/tf2_sensor_msgs/package.xml b/src/geometry2/tf2_sensor_msgs/package.xml deleted file mode 100644 index c0f8686..0000000 --- a/src/geometry2/tf2_sensor_msgs/package.xml +++ /dev/null @@ -1,31 +0,0 @@ - - tf2_sensor_msgs - 0.6.7 - - Small lib to transform sensor_msgs with tf. Most notably, PointCloud2 - - Vincent Rabaud - Vincent Rabaud - BSD - - http://www.ros.org/wiki/tf2_ros - - catkin - - cmake_modules - eigen - - sensor_msgs - tf2 - tf2_ros - - python_orocos_kdl - rospy - - eigen - - rostest - geometry_msgs - - - diff --git a/src/geometry2/tf2_sensor_msgs/setup.py b/src/geometry2/tf2_sensor_msgs/setup.py deleted file mode 100644 index 7ec9abe..0000000 --- a/src/geometry2/tf2_sensor_msgs/setup.py +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env python - -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -d = generate_distutils_setup( - packages=['tf2_sensor_msgs'], - package_dir={'': 'src'}, - requires={'rospy','sensor_msgs','tf2_ros','orocos_kdl'} -) - -setup(**d) - diff --git a/src/geometry2/tf2_sensor_msgs/src/tf2_sensor_msgs/__init__.py b/src/geometry2/tf2_sensor_msgs/src/tf2_sensor_msgs/__init__.py deleted file mode 100644 index 187bd6b..0000000 --- a/src/geometry2/tf2_sensor_msgs/src/tf2_sensor_msgs/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .tf2_sensor_msgs import * diff --git a/src/geometry2/tf2_sensor_msgs/src/tf2_sensor_msgs/tf2_sensor_msgs.py b/src/geometry2/tf2_sensor_msgs/src/tf2_sensor_msgs/tf2_sensor_msgs.py deleted file mode 100644 index 74cacd3..0000000 --- a/src/geometry2/tf2_sensor_msgs/src/tf2_sensor_msgs/tf2_sensor_msgs.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) 2008, Willow Garage, Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of the Willow Garage, Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -from sensor_msgs.msg import PointCloud2 -from sensor_msgs.point_cloud2 import read_points, create_cloud -import PyKDL -import rospy -import tf2_ros - -def to_msg_msg(msg): - return msg - -tf2_ros.ConvertRegistration().add_to_msg(PointCloud2, to_msg_msg) - -def from_msg_msg(msg): - return msg - -tf2_ros.ConvertRegistration().add_from_msg(PointCloud2, from_msg_msg) - -def transform_to_kdl(t): - return PyKDL.Frame(PyKDL.Rotation.Quaternion(t.transform.rotation.x, t.transform.rotation.y, - t.transform.rotation.z, t.transform.rotation.w), - PyKDL.Vector(t.transform.translation.x, - t.transform.translation.y, - t.transform.translation.z)) - -# PointStamped -def do_transform_cloud(cloud, transform): - t_kdl = transform_to_kdl(transform) - points_out = [] - for p_in in read_points(cloud): - p_out = t_kdl * PyKDL.Vector(p_in[0], p_in[1], p_in[2]) - points_out.append((p_out[0], p_out[1], p_out[2]) + p_in[3:]) - res = create_cloud(transform.header, cloud.fields, points_out) - return res -tf2_ros.TransformRegistration().add(PointCloud2, do_transform_cloud) diff --git a/src/geometry2/tf2_sensor_msgs/test/test.launch b/src/geometry2/tf2_sensor_msgs/test/test.launch deleted file mode 100644 index a948a06..0000000 --- a/src/geometry2/tf2_sensor_msgs/test/test.launch +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/src/geometry2/tf2_sensor_msgs/test/test_tf2_sensor_msgs.cpp b/src/geometry2/tf2_sensor_msgs/test/test_tf2_sensor_msgs.cpp deleted file mode 100644 index 8607501..0000000 --- a/src/geometry2/tf2_sensor_msgs/test/test_tf2_sensor_msgs.cpp +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - - -#include -#include -#include -#include -#include -#include - -tf2_ros::Buffer* tf_buffer; -static const double EPS = 1e-3; - - -TEST(Tf2Sensor, PointCloud2) -{ - sensor_msgs::PointCloud2 cloud; - sensor_msgs::PointCloud2Modifier modifier(cloud); - modifier.setPointCloud2FieldsByString(2, "xyz", "rgb"); - modifier.resize(1); - - sensor_msgs::PointCloud2Iterator iter_x(cloud, "x"); - sensor_msgs::PointCloud2Iterator iter_y(cloud, "y"); - sensor_msgs::PointCloud2Iterator iter_z(cloud, "z"); - - *iter_x = 1; - *iter_y = 2; - *iter_z = 3; - - cloud.header.stamp = ros::Time(2); - cloud.header.frame_id = "A"; - - // simple api - sensor_msgs::PointCloud2 cloud_simple = tf_buffer->transform(cloud, "B", ros::Duration(2.0)); - sensor_msgs::PointCloud2Iterator iter_x_after(cloud_simple, "x"); - sensor_msgs::PointCloud2Iterator iter_y_after(cloud_simple, "y"); - sensor_msgs::PointCloud2Iterator iter_z_after(cloud_simple, "z"); - EXPECT_NEAR(*iter_x_after, -9, EPS); - EXPECT_NEAR(*iter_y_after, 18, EPS); - EXPECT_NEAR(*iter_z_after, 27, EPS); - - // advanced api - sensor_msgs::PointCloud2 cloud_advanced = tf_buffer->transform(cloud, "B", ros::Time(2.0), - "A", ros::Duration(3.0)); - sensor_msgs::PointCloud2Iterator iter_x_advanced(cloud_advanced, "x"); - sensor_msgs::PointCloud2Iterator iter_y_advanced(cloud_advanced, "y"); - sensor_msgs::PointCloud2Iterator iter_z_advanced(cloud_advanced, "z"); - EXPECT_NEAR(*iter_x_advanced, -9, EPS); - EXPECT_NEAR(*iter_y_advanced, 18, EPS); - EXPECT_NEAR(*iter_z_advanced, 27, EPS); -} - -int main(int argc, char **argv){ - testing::InitGoogleTest(&argc, argv); - ros::init(argc, argv, "test"); - ros::NodeHandle n; - - tf_buffer = new tf2_ros::Buffer(); - - // populate buffer - geometry_msgs::TransformStamped t; - t.transform.translation.x = 10; - t.transform.translation.y = 20; - t.transform.translation.z = 30; - t.transform.rotation.x = 1; - t.transform.rotation.y = 0; - t.transform.rotation.z = 0; - t.transform.rotation.w = 0; - t.header.stamp = ros::Time(2.0); - t.header.frame_id = "A"; - t.child_frame_id = "B"; - tf_buffer->setTransform(t, "test"); - - int ret = RUN_ALL_TESTS(); - delete tf_buffer; - return ret; -} diff --git a/src/geometry2/tf2_sensor_msgs/test/test_tf2_sensor_msgs.py b/src/geometry2/tf2_sensor_msgs/test/test_tf2_sensor_msgs.py deleted file mode 100644 index b797b7a..0000000 --- a/src/geometry2/tf2_sensor_msgs/test/test_tf2_sensor_msgs.py +++ /dev/null @@ -1,103 +0,0 @@ -#!/usr/bin/env python - -from __future__ import print_function - -import unittest -import struct -import tf2_sensor_msgs -from sensor_msgs import point_cloud2 -from sensor_msgs.msg import PointField -from tf2_ros import TransformStamped -import copy - -## A sample python unit test -class PointCloudConversions(unittest.TestCase): - def setUp(self): - self.point_cloud_in = point_cloud2.PointCloud2() - self.point_cloud_in.fields = [PointField('x', 0, PointField.FLOAT32, 1), - PointField('y', 4, PointField.FLOAT32, 1), - PointField('z', 8, PointField.FLOAT32, 1)] - - self.point_cloud_in.point_step = 4 * 3 - self.point_cloud_in.height = 1 - # we add two points (with x, y, z to the cloud) - self.point_cloud_in.width = 2 - self.point_cloud_in.row_step = self.point_cloud_in.point_step * self.point_cloud_in.width - - points = [1, 2, 0, 10, 20, 30] - self.point_cloud_in.data = struct.pack('%sf' % len(points), *points) - - - self.transform_translate_xyz_300 = TransformStamped() - self.transform_translate_xyz_300.transform.translation.x = 300 - self.transform_translate_xyz_300.transform.translation.y = 300 - self.transform_translate_xyz_300.transform.translation.z = 300 - self.transform_translate_xyz_300.transform.rotation.w = 1 # no rotation so we only set w - - assert(list(point_cloud2.read_points(self.point_cloud_in)) == [(1.0, 2.0, 0.0), (10.0, 20.0, 30.0)]) - - def test_simple_transform(self): - old_data = copy.deepcopy(self.point_cloud_in.data) # deepcopy is not required here because we have a str for now - point_cloud_transformed = tf2_sensor_msgs.do_transform_cloud(self.point_cloud_in, self.transform_translate_xyz_300) - - k = 300 - expected_coordinates = [(1+k, 2+k, 0+k), (10+k, 20+k, 30+k)] - new_points = list(point_cloud2.read_points(point_cloud_transformed)) - print("new_points are %s" % new_points) - assert(expected_coordinates == new_points) - assert(old_data == self.point_cloud_in.data) # checking no modification in input cloud - - -## A simple unit test for tf2_sensor_msgs.do_transform_cloud (multi channel version) -class PointCloudConversionsMultichannel(unittest.TestCase): - TRANSFORM_OFFSET_DISTANCE = 300 - - def setUp(self): - self.point_cloud_in = point_cloud2.PointCloud2() - self.point_cloud_in.fields = [PointField('x', 0, PointField.FLOAT32, 1), - PointField('y', 4, PointField.FLOAT32, 1), - PointField('z', 8, PointField.FLOAT32, 1), - PointField('index', 12, PointField.INT32, 1)] - - self.point_cloud_in.point_step = 4 * 4 - self.point_cloud_in.height = 1 - # we add two points (with x, y, z to the cloud) - self.point_cloud_in.width = 2 - self.point_cloud_in.row_step = self.point_cloud_in.point_step * self.point_cloud_in.width - - self.points = [(1.0, 2.0, 0.0, 123), (10.0, 20.0, 30.0, 456)] - for point in self.points: - self.point_cloud_in.data += struct.pack('3fi', *point) - - self.transform_translate_xyz_300 = TransformStamped() - self.transform_translate_xyz_300.transform.translation.x = self.TRANSFORM_OFFSET_DISTANCE - self.transform_translate_xyz_300.transform.translation.y = self.TRANSFORM_OFFSET_DISTANCE - self.transform_translate_xyz_300.transform.translation.z = self.TRANSFORM_OFFSET_DISTANCE - self.transform_translate_xyz_300.transform.rotation.w = 1 # no rotation so we only set w - - assert(list(point_cloud2.read_points(self.point_cloud_in)) == self.points) - - def test_simple_transform_multichannel(self): - old_data = copy.deepcopy(self.point_cloud_in.data) # deepcopy is not required here because we have a str for now - point_cloud_transformed = tf2_sensor_msgs.do_transform_cloud(self.point_cloud_in, self.transform_translate_xyz_300) - - expected_coordinates = [] - for point in self.points: - expected_coordinates += [( - point[0] + self.TRANSFORM_OFFSET_DISTANCE, - point[1] + self.TRANSFORM_OFFSET_DISTANCE, - point[2] + self.TRANSFORM_OFFSET_DISTANCE, - point[3] # index channel must be kept same - )] - - new_points = list(point_cloud2.read_points(point_cloud_transformed)) - print("new_points are %s" % new_points) - assert(expected_coordinates == new_points) - assert(old_data == self.point_cloud_in.data) # checking no modification in input cloud - - -if __name__ == '__main__': - import rosunit - rosunit.unitrun("test_tf2_sensor_msgs", "test_point_cloud_conversion", PointCloudConversions) - rosunit.unitrun("test_tf2_sensor_msgs", "test_point_cloud_conversion", PointCloudConversionsMultichannel) - diff --git a/src/geometry2/tf2_tools/CHANGELOG.rst b/src/geometry2/tf2_tools/CHANGELOG.rst deleted file mode 100644 index e1e503a..0000000 --- a/src/geometry2/tf2_tools/CHANGELOG.rst +++ /dev/null @@ -1,191 +0,0 @@ -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Changelog for package tf2_tools -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -0.6.7 (2020-03-09) ------------------- - -0.6.6 (2020-01-09) ------------------- -* Allow to choose output precision in echo `#377 `_ -* use yaml.safe_load instead of deprecated yaml.load `#373 `_ -* Python 3 compatibility: relative imports and print statement -* Contributors: Mikael Arguedas, Timon Engelke, Tully Foote, Victor Lamoine - -0.6.5 (2018-11-16) ------------------- - -0.6.4 (2018-11-06) ------------------- - -0.6.3 (2018-07-09) ------------------- - -0.6.2 (2018-05-02) ------------------- -* Tf2 tools echo (`#289 `_) - * tf2_tools echo is working but not yet printing the rotation `#287 `_ - * install echo.py - * Added quaternion output but importing from tf1 for euler_from_quaternion seems wrong (`#222 `_) so not doing that yet. Also made count exit after n counts even if exceptions occurred, also printing time of lookup for exceptions `#287 `_ - * Fixed time query option, also changing message text to be more clear `#287 `_ - * Added bsd license, code from transform3d transformations.py `#287 `_ - * Get rid of tabs - * docstring for each function -* Contributors: Lucas Walter - -0.6.1 (2018-03-21) ------------------- - -0.6.0 (2018-03-21) ------------------- - -0.5.17 (2018-01-01) -------------------- -* Merge pull request `#268 `_ from smnogar/indigo-devel - Fixed for cases of non-standard python install -* Contributors: Steve Nogar, Tully Foote - -0.5.16 (2017-07-14) -------------------- - -0.5.15 (2017-01-24) -------------------- - -0.5.14 (2017-01-16) -------------------- -* Remove old load_manifest from view_frames (`#182 `_) -* Contributors: Jochen Sprickerhof - -0.5.13 (2016-03-04) -------------------- -* casted el to string in view_frames -* Contributors: g_gemignani - -0.5.12 (2015-08-05) -------------------- - -0.5.11 (2015-04-22) -------------------- - -0.5.10 (2015-04-21) -------------------- - -0.5.9 (2015-03-25) ------------------- - -0.5.8 (2015-03-17) ------------------- -* remove useless Makefile files -* Contributors: Vincent Rabaud - -0.5.7 (2014-12-23) ------------------- - -0.5.6 (2014-09-18) ------------------- - -0.5.5 (2014-06-23) ------------------- - -0.5.4 (2014-05-07) ------------------- - -0.5.3 (2014-02-21) ------------------- - -0.5.2 (2014-02-20) ------------------- - -0.5.1 (2014-02-14) ------------------- - -0.5.0 (2014-02-14) ------------------- - -0.4.10 (2013-12-26) -------------------- - -0.4.9 (2013-11-06) ------------------- - -0.4.8 (2013-11-06) ------------------- -* updating install rule for view_frames.py fixes `#44 `_ - -0.4.7 (2013-08-28) ------------------- - -0.4.6 (2013-08-28) ------------------- - -0.4.5 (2013-07-11) ------------------- - -0.4.4 (2013-07-09) ------------------- - -0.4.3 (2013-07-05) ------------------- - -0.4.2 (2013-07-05) ------------------- - -0.4.1 (2013-07-05) ------------------- - -0.4.0 (2013-06-27) ------------------- -* splitting rospy dependency into tf2_py so tf2 is pure c++ library. -* Restoring test packages and bullet packages. - reverting 3570e8c42f9b394ecbfd9db076b920b41300ad55 to get back more of the packages previously implemented - reverting 04cf29d1b58c660fdc999ab83563a5d4b76ab331 to fix `#7 `_ - -0.3.6 (2013-03-03) ------------------- - -0.3.5 (2013-02-15 14:46) ------------------------- -* 0.3.4 -> 0.3.5 - -0.3.4 (2013-02-15 13:14) ------------------------- -* 0.3.3 -> 0.3.4 - -0.3.3 (2013-02-15 11:30) ------------------------- -* 0.3.2 -> 0.3.3 - -0.3.2 (2013-02-15 00:42) ------------------------- -* 0.3.1 -> 0.3.2 - -0.3.1 (2013-02-14) ------------------- -* 0.3.0 -> 0.3.1 - -0.3.0 (2013-02-13) ------------------- -* switching to version 0.3.0 -* removing packages with missing deps -* catkinizing geometry-experimental -* catkinizing tf2_tools -* strip out rx dependencies -* Some fixes to make things work with rxbag -* Threading ns list -* merge tf2_cpp and tf2_py into tf2_ros -* Now catching exceptions correctly with echo -* Working version of tf echo -* Making sure to clear details when switching frames -* Changing file format to tf -* First cut at loading, saving, and exporting support -* tf frame viewer is now an rxbag plugin -* Can now connect to any node in the system that has a tf2 buffer -* Now populates namespaces as well -* Now populates a frame list on the fly -* Got the GUI set up for a bunch of features, now just have to implement the backend of them -* Persistent service call to speed things up. Also, coloring on click -* Adding a first version of frame_viewer -* Adding xdot as a dep in prep for frame_viewer -* working view frames -* call new service -* new version of view_frames in new tf2_tools package diff --git a/src/geometry2/tf2_tools/CMakeLists.txt b/src/geometry2/tf2_tools/CMakeLists.txt deleted file mode 100644 index 3f4b5e0..0000000 --- a/src/geometry2/tf2_tools/CMakeLists.txt +++ /dev/null @@ -1,16 +0,0 @@ -cmake_minimum_required(VERSION 2.8.3) -project(tf2_tools) - -find_package(catkin REQUIRED COMPONENTS tf2 - tf2_msgs - tf2_ros -) - -catkin_package( - CATKIN_DEPENDS tf2 - tf2_msgs - tf2_ros) - -install(PROGRAMS scripts/view_frames.py scripts/echo.py - DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -) diff --git a/src/geometry2/tf2_tools/mainpage.dox b/src/geometry2/tf2_tools/mainpage.dox deleted file mode 100644 index 4ea7a70..0000000 --- a/src/geometry2/tf2_tools/mainpage.dox +++ /dev/null @@ -1,26 +0,0 @@ -/** -\mainpage -\htmlinclude manifest.html - -\b tf2_tools is ... - - - - -\section codeapi Code API - - - - -*/ diff --git a/src/geometry2/tf2_tools/package.xml b/src/geometry2/tf2_tools/package.xml deleted file mode 100644 index 783a443..0000000 --- a/src/geometry2/tf2_tools/package.xml +++ /dev/null @@ -1,24 +0,0 @@ - - tf2_tools - 0.6.7 - - tf2_tools - - Wim Meeussen - Tully Foote - BSD - - http://www.ros.org/wiki/tf2_tools - - catkin - - tf2_msgs - tf2 - tf2_ros - - tf2_msgs - tf2 - tf2_ros - - - diff --git a/src/geometry2/tf2_tools/scripts/echo.py b/src/geometry2/tf2_tools/scripts/echo.py deleted file mode 100755 index 9bae2d5..0000000 --- a/src/geometry2/tf2_tools/scripts/echo.py +++ /dev/null @@ -1,249 +0,0 @@ -#!/usr/bin/env python - -# tf2 echo code Copyright (c) 2018, Lucas Walter -# transformations.py code Copyright (c) 2006-2017, Christoph Gohlke -# transformations.py code Copyright (c) 2006-2017, The Regents of the University of California -# Produced at the Laboratory for Fluorescence Dynamics -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of the copyright holders nor the names of any -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -from __future__ import print_function - -import argparse -import math -import numpy -import rospy -import sys -import tf2_py as tf2 -import tf2_ros - -from geometry_msgs.msg import TransformStamped -# https://github.com/ros/geometry2/issues/222 -# from tf import transformations - -""" -The following euler conversion functions are from https://github.com/matthew-brett/transforms3d -which adapted it from transformations.py, it is needed here until transforms3d is available -as a dependency. - -They are for internal use only. -""" - -# epsilon for testing whether a number is close to zero -_EPS = numpy.finfo(float).eps * 4.0 - -# axis sequences for Euler angles -_NEXT_AXIS = [1, 2, 0, 1] - -# TODO(lucasw) if sxyz works then eliminate the other possibilities -# map axes strings to/from tuples of inner axis, parity, repetition, frame -_AXES2TUPLE = { - 'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0), - 'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0), - 'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0), - 'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0), - 'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1), - 'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1), - 'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1), - 'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)} - -def _euler_from_matrix(matrix, axes='sxyz'): - """temporaray import from https://github.com/matthew-brett/transforms3d/blob/master/transforms3d/_gohlketransforms.py for internal use only""" - try: - firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()] - except (AttributeError, KeyError): - _TUPLE2AXES[axes] # validation - firstaxis, parity, repetition, frame = axes - - i = firstaxis - j = _NEXT_AXIS[i+parity] - k = _NEXT_AXIS[i-parity+1] - - M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3] - if repetition: - sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k]) - if sy > _EPS: - ax = math.atan2( M[i, j], M[i, k]) - ay = math.atan2( sy, M[i, i]) - az = math.atan2( M[j, i], -M[k, i]) - else: - ax = math.atan2(-M[j, k], M[j, j]) - ay = math.atan2( sy, M[i, i]) - az = 0.0 - else: - cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i]) - if cy > _EPS: - ax = math.atan2( M[k, j], M[k, k]) - ay = math.atan2(-M[k, i], cy) - az = math.atan2( M[j, i], M[i, i]) - else: - ax = math.atan2(-M[j, k], M[j, j]) - ay = math.atan2(-M[k, i], cy) - az = 0.0 - - if parity: - ax, ay, az = -ax, -ay, -az - if frame: - ax, az = az, ax - return ax, ay, az - -def _quaternion_matrix(quaternion): - """temporaray import from https://github.com/matthew-brett/transforms3d/blob/master/transforms3d/_gohlketransforms.py for internal use only""" - q = numpy.array(quaternion, dtype=numpy.float64, copy=True) - n = numpy.dot(q, q) - if n < _EPS: - return numpy.identity(4) - q *= math.sqrt(2.0 / n) - q = numpy.outer(q, q) - return numpy.array([ - [1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0], - [ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0], - [ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0], - [ 0.0, 0.0, 0.0, 1.0]]) - -def _euler_from_quaternion(quaternion, axes='sxyz'): - """temporaray import from https://github.com/matthew-brett/transforms3d/blob/master/transforms3d/_gohlketransforms.py for internal use only""" - return _euler_from_matrix(_quaternion_matrix(quaternion), axes) - -def _euler_from_quaternion_msg(quaternion): - # the above code is from transform3 which changed convention from old transformations.py - # from xyzw to wxyz - # return transformations.euler_from_quaternion([quaternion.x, quaternion.y, quaternion.z, quaternion.w]) - return _euler_from_quaternion([quaternion.w, - quaternion.x, - quaternion.y, - quaternion.z]) - -class Echo(): - def __init__(self, args): - self.tf_buffer = tf2_ros.Buffer(cache_time=args.cache_time) - self.tf_listener = tf2_ros.TransformListener(self.tf_buffer) - self.args = args - - self.count = 0 - self.timer = rospy.Timer(rospy.Duration(1.0 / self.args.rate), self.lookup) - - def lookup(self, event): - self.count += 1 - if self.args.limit: - if self.count > self.args.limit: - # TODO(lucasw) is there a better method to stop the spin()? - rospy.signal_shutdown("tf echo finished") - return - - cur_time = rospy.Time.now() - # If the transform is from tf_static the ts.header.stamp will be 0.0 - # when offset == 0 or lookup_time is rospy.Time() - if self.args.time: - lookup_time = rospy.Time(self.args.time) - elif self.args.offset: - # If the transform is static this will always work - lookup_time = cur_time + rospy.Duration(self.args.offset) - else: - # Get the most recent transform - lookup_time = rospy.Time() - - try: - ts = self.tf_buffer.lookup_transform(self.args.source_frame, - self.args.target_frame, - lookup_time) - except tf2.LookupException as ex: - msg = "At time {}, (current time {}) ".format(lookup_time.to_sec(), cur_time.to_sec()) - rospy.logerr(msg + str(ex)) - return - except tf2.ExtrapolationException as ex: - msg = "(current time {}) ".format(cur_time.to_sec()) - rospy.logerr(msg + str(ex)) - return - - # The old tf1 static_transform_publisher (which published into /tf, not /tf_static) - # publishes transforms 0.5 seconds into future so the cur_time and header stamp - # will be identical. - msg = "At time {}, (current time {})".format(ts.header.stamp.to_sec(), cur_time.to_sec()) - xyz = ts.transform.translation - msg += "\n- Translation: [{:.{p}f}, {:.{p}f}, {:.{p}f}]\n".format(xyz.x, xyz.y, xyz.z, p=self.args.precision) - quat = ts.transform.rotation - msg += "- Rotation: in Quaternion [{:.{p}f}, {:.{p}f}, {:.{p}f}, {:.{p}f}]\n".format(quat.x, quat.y, quat.z, quat.w, p=self.args.precision) - # TODO(lucasw) need to get quaternion to euler from somewhere, but not tf1 - # or a dependency that isn't in Ubuntu or ros repos - euler = _euler_from_quaternion_msg(quat) - msg += " in RPY (radian) " - msg += "[{:.{p}f}, {:.{p}f}, {:.{p}f}]\n".format(euler[0], euler[1], euler[2], p=self.args.precision) - msg += " in RPY (degree) " - msg += "[{:.{p}f}, {:.{p}f}, {:.{p}f}]".format(math.degrees(euler[0]), - math.degrees(euler[1]), - math.degrees(euler[2]), p=self.args.precision) - print(msg) - -def positive_float(x): - x = float(x) - if x <= 0.0: - raise argparse.ArgumentTypeError("{} must be > 0.0".format(x)) - return x - -def positive_int(x): - x = int(x) - if x <= 0: - raise argparse.ArgumentTypeError("{} must be > 0".format(x)) - return x - -if __name__ == '__main__': - rospy.init_node("echo") - - other_args = rospy.myargv(argv=sys.argv) - precision=3 - try: - precision = rospy.get_param('~precision') - rospy.loginfo("Precision default value was overriden, new value: %d", precision) - except KeyError: - pass - - parser = argparse.ArgumentParser() - parser.add_argument("source_frame") # parent - parser.add_argument("target_frame") # child - parser.add_argument("-r", "--rate", - help="update rate, must be > 0.0", - default=1.0, - type=positive_float) - parser.add_argument("-c", "--cache_time", - help="length of tf buffer cache in seconds", - type=positive_float) - parser.add_argument("-o", "--offset", - help="offset the lookup from current time, ignored if using -t", - type=float) - parser.add_argument("-t", "--time", - help="fixed time to do the lookup", - type=float) - parser.add_argument("-l", "--limit", - help="lookup fixed number of times", - type=positive_int) - parser.add_argument("-p", "--precision", - help="output precision", - default=precision, - type=positive_int) - args = parser.parse_args(other_args[1:]) # Remove first arg - echo = Echo(args) - rospy.spin() diff --git a/src/geometry2/tf2_tools/scripts/view_frames.py b/src/geometry2/tf2_tools/scripts/view_frames.py deleted file mode 100755 index e135e72..0000000 --- a/src/geometry2/tf2_tools/scripts/view_frames.py +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2008, Willow Garage, Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of the Willow Garage, Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -# author: Wim Meeussen - -import rospy -import tf2_py as tf2 -import yaml -import subprocess -from tf2_msgs.srv import FrameGraph -import tf2_ros - -def main(): - rospy.init_node('view_frames') - - # listen to tf for 5 seconds - rospy.loginfo('Listening to tf data during 5 seconds...') - rospy.sleep(0.00001) - buffer = tf2_ros.Buffer() - listener = tf2_ros.TransformListener(buffer) - rospy.sleep(5.0) - - rospy.loginfo('Generating graph in frames.pdf file...') - rospy.wait_for_service('~tf2_frames') - srv = rospy.ServiceProxy('~tf2_frames', FrameGraph) - data = yaml.safe_load(srv().frame_yaml) - with open('frames.gv', 'w') as f: - f.write(generate_dot(data)) - subprocess.Popen('dot -Tpdf frames.gv -o frames.pdf'.split(' ')).communicate() - -def generate_dot(data): - if len(data) == 0: - return 'digraph G { "No tf data received" }' - - dot = 'digraph G {\n' - for el in data: - map = data[el] - dot += '"'+map['parent']+'" -> "'+str(el)+'"' - dot += '[label=" ' - dot += 'Broadcaster: '+map['broadcaster']+'\\n' - dot += 'Average rate: '+str(map['rate'])+'\\n' - dot += 'Buffer length: '+str(map['buffer_length'])+'\\n' - dot += 'Most recent transform: '+str(map['most_recent_transform'])+'\\n' - dot += 'Oldest transform: '+str(map['oldest_transform'])+'\\n' - dot += '"];\n' - if not map['parent'] in data: - root = map['parent'] - dot += 'edge [style=invis];\n' - dot += ' subgraph cluster_legend { style=bold; color=black; label ="view_frames Result";\n' - dot += '"Recorded at time: '+str(rospy.Time.now().to_sec())+'"[ shape=plaintext ] ;\n' - dot += '}->"'+root+'";\n}' - return dot - - -if __name__ == '__main__': - main() diff --git a/src/maintain/CMakeLists.txt b/src/maintain/CMakeLists.txt deleted file mode 100644 index ffa483b..0000000 --- a/src/maintain/CMakeLists.txt +++ /dev/null @@ -1,202 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2...3.26.3) -project(maintain) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -# catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Message1.msg -# Message2.msg -# ) - -## Generate services in the 'srv' folder -# add_service_files( -# FILES -# Service1.srv -# Service2.srv -# ) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here -# generate_messages( -# DEPENDENCIES -# std_msgs # Or other packages containing msgs -# ) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES maintain -# CATKIN_DEPENDS other_catkin_pkg -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include -# ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/maintain.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/maintain_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -# catkin_install_python(PROGRAMS -# scripts/my_python_script -# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_maintain.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/src/maintain/launch/maintain.launch b/src/maintain/launch/maintain.launch deleted file mode 100644 index fbb70b0..0000000 --- a/src/maintain/launch/maintain.launch +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/src/maintain/package.xml b/src/maintain/package.xml deleted file mode 100644 index fbfb12b..0000000 --- a/src/maintain/package.xml +++ /dev/null @@ -1,59 +0,0 @@ - - - maintain - 0.0.0 - The maintain package - - - - - wxchen - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - - - - - - - - diff --git a/src/maintain/scripts/test copy.py b/src/maintain/scripts/test copy.py deleted file mode 100755 index 59582a1..0000000 --- a/src/maintain/scripts/test copy.py +++ /dev/null @@ -1,93 +0,0 @@ -#! /home/wxchen/.conda/envs/gsmini/bin/python - -import numpy as np -import cv2 as cv -from matplotlib import pyplot as plt -import rospy -from sensor_msgs.msg import Image -import message_filters -from cv_bridge import CvBridge, CvBridgeError -import rospkg - -MIN_MATCH_COUNT = 10 -pkg_path = rospkg.RosPack().get_path('maintain') -rospy.loginfo(pkg_path) -img_template = cv.imread(pkg_path + '/scripts/tt.png',0) - -def callback(rgb, depth): - rospy.loginfo("callback") - bridge = CvBridge() - # rospy.loginfo(rgb.header.stamp) - # rospy.loginfo(depth.header.stamp) - try: - rgb_image = bridge.imgmsg_to_cv2(rgb, 'bgr8') - depth_image = bridge.imgmsg_to_cv2(depth, '16UC1') - - img_matcher = matcher(rgb_image) - cv.imshow("img_matcher", img_matcher) - cv.waitKey(1000) - - except CvBridgeError as e: - print(e) - -def matcher(img): - - try: - # Initiate SIFT detector - sift = cv.SIFT_create() - - # find the keypoints and descriptors with SIFT - kp1, des1 = sift.detectAndCompute(img_template,None) - kp2, des2 = sift.detectAndCompute(img,None) - - FLANN_INDEX_KDTREE = 1 - index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) - search_params = dict(checks = 50) - - flann = cv.FlannBasedMatcher(index_params, search_params) - matches = flann.knnMatch(des1,des2,k=2) - - # store all the good matches as per Lowe's ratio test. - good = [] - for m,n in matches: - if m.distance < 0.7*n.distance: - good.append(m) - - if len(good)>MIN_MATCH_COUNT: - src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2) - dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2) - - M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC,5.0) - matchesMask = mask.ravel().tolist() - - h,w = img_template.shape - pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) - dst = cv.perspectiveTransform(pts,M) - - roi = img[np.int32(dst)[0][0][1]:np.int32(dst)[2][0][1], np.int32(dst)[0][0][0]:np.int32(dst)[2][0][0]] - # roi = detect_black(roi) - - # img2 = cv.polylines(img2,[np.int32(dst)],True,255,3, cv.LINE_AA) - else: - print( "Not enough matches are found - {}/{}".format(len(good), MIN_MATCH_COUNT) ) - - return roi - except Exception as e: - print(e) - - - - -if __name__ == "__main__": - - rospy.init_node("maintain") - rospy.loginfo("maintain task start ......") - - rgb_sub = message_filters.Subscriber("/camera/color/image_raw", Image) - depth_sub = message_filters.Subscriber("/camera/aligned_depth_to_color/image_raw", Image) - - ts = message_filters.TimeSynchronizer([rgb_sub, depth_sub], 1) - ts.registerCallback(callback) - - - rospy.spin() \ No newline at end of file diff --git a/src/maintain/scripts/test.py b/src/maintain/scripts/test.py deleted file mode 100755 index bf48f94..0000000 --- a/src/maintain/scripts/test.py +++ /dev/null @@ -1,182 +0,0 @@ -#! /home/wxchen/.conda/envs/gsmini/bin/python - -import numpy as np -import cv2 as cv -from matplotlib import pyplot as plt -import rospy -import tf2_ros -import tf -from sensor_msgs.msg import Image , CameraInfo -from geometry_msgs.msg import PoseStamped, TransformStamped, Quaternion -import message_filters -from cv_bridge import CvBridge, CvBridgeError -import rospkg - -import os -import sys -from rostopic import get_topic_type -from detection_msgs.msg import BoundingBox, BoundingBoxes - -bridge = CvBridge() -annulus_width = 10 - -def calculate_image_edge_plane_normal(depth_roi): - # Get the shape of the depth_roi - height, width = depth_roi.shape - - # Get the edges of the ROI - left_edge = [(0, y) for y in range(height)] - right_edge = [(width-1, y) for y in range(height)] - top_edge = [(x, 0) for x in range(width)] - bottom_edge = [(x, height-1) for x in range(width)] - edges = left_edge + right_edge + top_edge + bottom_edge - - # Create a 2D grid of X and Y coordinates - X, Y = np.meshgrid(np.arange(width), np.arange(height)) - - # Reshape the X, Y, and depth_roi arrays into one-dimensional arrays - X = X.reshape(-1) - Y = Y.reshape(-1) - Z = depth_roi.reshape(-1) - - # Stack the X, Y, and depth_roi arrays vertically to create a 3D array of points in the form of [X, Y, Z] - points = np.vstack([X, Y, Z]).T - - # Compute the mean depth value of the edges - edge_depths = [] - for edge_point in edges: - edge_depths.append(depth_roi[edge_point[1], edge_point[0]]) - mean_depth = np.mean(edge_depths) - - # Create a mask to extract the points on the edges - mask = np.zeros_like(depth_roi, dtype=np.uint8) - for edge_point in edges: - mask[edge_point[1], edge_point[0]] = 1 - masked_depth_roi = depth_roi * mask - - # Extract the 3D coordinates of the points on the edges - edge_points = [] - for edge_point in edges: - edge_points.append([edge_point[0], edge_point[1], masked_depth_roi[edge_point[1], edge_point[0]]]) - - # Convert the list of edge points to a numpy array - edge_points = np.array(edge_points) - - # Shift the edge points so that the mean depth value is at the origin - edge_points = edge_points - np.array([width/2, height/2, mean_depth]) - - # Compute the singular value decomposition (SVD) of the edge points - U, S, V = np.linalg.svd(edge_points) - - # Extract the normal vector of the plane that best fits the edge points from the right-singular vector corresponding to the smallest singular value - normal = V[2] - - return normal - -def filter_quaternion(quat, quat_prev, alpha): - if quat_prev is None: - quat_prev = quat - # Apply low-pass filter to quaternion - quat_filtered = np.zeros(4) - for i in range(4): - quat_filtered[i] = alpha * quat[i] + (1-alpha) * quat_prev[i] - # Normalize the quaternion - quat_filtered = quat_filtered / np.linalg.norm(quat_filtered) - return quat_filtered - - -def box_callback(box, depth, color_info): - try: - color_intrinsics = color_info.K - depth_image = bridge.imgmsg_to_cv2(depth, '16UC1') - # get the center of screw - boundingBox = box.bounding_boxes[0] - screw_x = (boundingBox.xmax + boundingBox.xmin) / 2 - screw_y = (boundingBox.ymax + boundingBox.ymin) / 2 - # print(screw_x,screw_y) - - depth_array = np.array(depth_image, dtype=np.float32) - depth_roi = depth_array[boundingBox.ymin:boundingBox.ymax, boundingBox.xmin:boundingBox.xmax] - - z = np.mean(depth_roi) * 0.001 - x = (screw_x - color_intrinsics[2]) * z / color_intrinsics[0] - y = (screw_y - color_intrinsics[5]) * z / color_intrinsics[4] - # rospy.loginfo("screw pose: x: %f, y: %f, z: %f", x, y, z) - # calculate normal direction of screw area - - annulus_roi = depth_array[boundingBox.ymin-annulus_width:boundingBox.ymax+annulus_width, boundingBox.xmin-annulus_width:boundingBox.xmax+annulus_width] - normal = calculate_image_edge_plane_normal(annulus_roi) - # print(normal) - - # publish screw pose - # screw_pose = PoseStamped() - # screw_pose.header.stamp = rospy.Time.now() - # screw_pose.header.frame_id = "camera_color_optical_frame" - # screw_pose.pose.position.x = x - # screw_pose.pose.position.y = y - # screw_pose.pose.position.z = z - # screw_pose.pose.orientation.x = 0 - # screw_pose.pose.orientation.y = 0 - # screw_pose.pose.orientation.z = 0 - # screw_pose.pose.orientation.w = 1 - - # pose_pub.publish(screw_pose) - - # normal vector to quaternion - screw_quat = tf.transformations.quaternion_from_euler(0, 0, 0) - screw_quat[0] = normal[0] - screw_quat[1] = normal[1] - screw_quat[2] = normal[2] - screw_quat[3] = 0 - # quaternion to euler - screw_euler = tf.transformations.euler_from_quaternion(screw_quat) - screw_quat = tf.transformations.quaternion_from_euler(screw_euler[0], screw_euler[1], 0) - - - # Apply low-pass filter to screw quaternion - alpha = 0.4 - global screw_quat_prev - screw_quat_filtered = filter_quaternion(screw_quat, screw_quat_prev, alpha) - screw_quat_prev = screw_quat_filtered - - - # publish screw tf - screw_tf = TransformStamped() - screw_tf.header.stamp = rospy.Time.now() - screw_tf.header.frame_id = "camera_color_optical_frame" - screw_tf.child_frame_id = "screw" - screw_tf.transform.translation.x = x - screw_tf.transform.translation.y = y - screw_tf.transform.translation.z = z - screw_tf.transform.rotation.x = screw_quat_filtered[0] - screw_tf.transform.rotation.y = screw_quat_filtered[1] - screw_tf.transform.rotation.z = screw_quat_filtered[2] - screw_tf.transform.rotation.w = screw_quat_filtered[3] - - tf_broadcaster.sendTransform(screw_tf) - - - - except Exception as e: - print(e) - - - -if __name__ == "__main__": - global screw_quat_prev - screw_quat_prev = None - - rospy.init_node("maintain") - rospy.loginfo("maintain task start ......") - - box_sub = message_filters.Subscriber("/yolov5/detections", BoundingBoxes) - depth_sub = message_filters.Subscriber("/camera/aligned_depth_to_color/image_raw", Image) - color_info = message_filters.Subscriber("/camera/color/camera_info", CameraInfo) - - tf_broadcaster = tf2_ros.TransformBroadcaster() - - ts = message_filters.TimeSynchronizer([box_sub, depth_sub, color_info], 1) - ts.registerCallback(box_callback) - - - rospy.spin() \ No newline at end of file diff --git a/src/vision_opencv/README.rst b/src/vision_opencv/README.rst deleted file mode 100644 index d9387bf..0000000 --- a/src/vision_opencv/README.rst +++ /dev/null @@ -1,7 +0,0 @@ -vision_opencv -============= - -.. image:: https://travis-ci.org/ros-perception/vision_opencv.svg?branch=indigo - :target: https://travis-ci.org/ros-perception/vision_opencv - -Packages for interfacing ROS with OpenCV, a library of programming functions for real time computer vision. diff --git a/src/vision_opencv/cv_bridge/CHANGELOG.rst b/src/vision_opencv/cv_bridge/CHANGELOG.rst deleted file mode 100644 index 4e5d873..0000000 --- a/src/vision_opencv/cv_bridge/CHANGELOG.rst +++ /dev/null @@ -1,431 +0,0 @@ -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Changelog for package cv_bridge -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -1.13.1 (2022-10-03) -------------------- -* Fix endian mismatch issue per boostorg/python PR `#218 `_ -* Update CMakeLists.txt for Windows build environment (`#265 `_) -* Remove path splash separator from 'package_dir' (`#267 `_) -* Fix travis. (`#269 `_) -* Contributors: Duan Yutong, James Xu, Kenji Brameld, Sean Yen - -1.13.0 (2018-04-30) -------------------- -* Use rosdep OpenCV and not ROS one. - We defintely don't need the whole OpenCV. - We need to clean the rosdep keys. -* Contributors: Vincent Rabaud - -1.12.8 (2018-04-17) -------------------- -* Merge pull request `#191 `_ from patrickelectric/kinetic - cv2_to_imgmsg: step must be int -* cv2_to_imgmsg: step must be int - Signed-off-by: Patrick José Pereira -* Contributors: Patrick José Pereira, Vincent Rabaud - -1.12.7 (2017-11-12) -------------------- - -1.12.6 (2017-11-11) -------------------- -* fix endianness issues -* Contributors: Vincent Rabaud - -1.12.5 (2017-11-05) -------------------- -* add version_gte for opencv3 - @vrabaud If you'll update opencv3 version as discussed in https://discourse.ros.org/t/opencv-3-3/2674/4, I think we'd better to add 'version_gte' tag so that apt-get install ros-kinetic-cv-bridge also pulls openv3.3 from repository, to avoid API breaking issue between opencv2 and opencv3. -* Simplify the dependency components of cv_bridge - Fixes `#183 `_ -* Fixes `#177 `_ - The Python bridge was wrong on OpenCV2 with mono8 (and any Mat - with only two dimensions btw). Took the official Python bridge - from OpenCV. -* Add missing test file - This fixes `#171 `_ -* Properly deal with alpha in image compression. - That fixes `#169 `_ -* Silence warnings about un-used variables -* export OpenCV variables -* Contributors: Kei Okada, Victor Lamoine, Vincent Rabaud - -1.12.4 (2017-01-29) -------------------- -* properly find Boost Python 2 or 3 - This fixes `#158 `_ -* Contributors: Vincent Rabaud - -1.12.3 (2016-12-04) -------------------- -* Use api in sensor_msgs to get byte_depth and num channels -* Implement cpp conversion of N channel image - This is cpp version of https://github.com/ros-perception/vision_opencv/pull/141, - which is one for python. -* Fill black color to depth nan region -* address gcc6 build error in cv_bridge and tune - With gcc6, compiling fails with `stdlib.h: No such file or directory`, - as including '-isystem /usr/include' breaks with gcc6, cf., - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70129 - This commit addresses this issue for cv_bridge in the same way - it was done in the commit ead421b8 [1] for image_geometry. - This issue was also addressed in various other ROS packages. - A list of related commits and pull requests is at: - https://github.com/ros/rosdistro/issues/12783 - [1] https://github.com/ros-perception/vision_opencv/commit/ead421b85eeb750cbf7988657015296ed6789bcf - Signed-off-by: Lukas Bulwahn -* cv_bridge: Add missing test_depend on numpy -* Contributors: Kentaro Wada, Lukas Bulwahn, Maarten de Vries - -1.12.2 (2016-09-24) -------------------- -* Specify background label when colorizing label image -* Adjust to arbitrary image channels like 32FC40 - Proper fix for `#141 `_ -* Remove unexpectedly included print statement -* Contributors: Kentaro Wada, Vincent Rabaud - -1.12.1 (2016-07-11) -------------------- -* split the conversion tests out of enumerants -* support is_bigendian in Python - Fixes `#114 `_ - Also fixes mono16 test -* Support compressed Images messages in python for indigo - - Add cv2_to_comprssed_imgmsg: Convert from cv2 image to compressed image ros msg. - - Add comprssed_imgmsg_to_cv2: Convert the compress message to a new image. - - Add compressed image tests. - - Add time to msgs (compressed and regular). - add enumerants test for compressed image. - merge the compressed tests with the regular ones. - better comment explanation. I will squash this commit. - Fix indentation - fix typo mistage: from .imgmsg_to_compressed_cv2 to .compressed_imgmsg_to_cv2. - remove cv2.CV_8UC1 - remove rospy and time depndency. - change from IMREAD_COLOR to IMREAD_ANYCOLOR. - - make indentaion of 4. - - remove space trailer. - - remove space from empty lines. - - another set of for loops, it will make things easier to track. In that new set, just have the number of channels in ([],1,3,4) (ignore two for jpg). from: https://github.com/ros-perception/vision_opencv/pull/132#discussion_r66721943 - - keep the OpenCV error message. from: https://github.com/ros-perception/vision_opencv/pull/132#discussion_r66721013 - add debug print for test. - add case for 4 channels in test. - remove 4 channels case from compressed test. - add debug print for test. - change typo of format. - fix typo in format. change from dip to dib. - change to IMREAD_ANYCOLOR as python code. (as it should). - rename TIFF to tiff - Sperate the tests one for regular images and one for compressed. - update comment -* Add CvtColorForDisplayOptions with new colormap param -* fix doc jobs -* Add python binding for cv_bridge::cvtColorForDisplay -* Don't colorize float image as label image - This is a bug and image whose encoding is other than 32SC1 should not be - colorized. (currently, depth images with 32FC1 is also colorized.) -* Fix compilation of cv_bridge with opencv3 and python3. -* Contributors: Kentaro Wada, Maarten de Vries, Vincent Rabaud, talregev - -1.12.0 (2016-03-18) -------------------- -* depend on OpenCV3 only -* Contributors: Vincent Rabaud - -1.11.12 (2016-03-10) --------------------- -* Fix my typo -* Remove another eval - Because `cvtype2_to_dtype_with_channels('8UCimport os; os.system("rm -rf /")')` should never have a chance of happening. -* Remove eval, and other fixes - Also, extend from object, so as not to get a python 2.2-style class, and use the new-style raise statement -* Contributors: Eric Wieser - -1.11.11 (2016-01-31) --------------------- -* clean up the doc files -* fix a few warnings in doc jobs -* Contributors: Vincent Rabaud - -1.11.10 (2016-01-16) --------------------- -* fix OpenCV3 build -* Describe about converting label to bgr image in cvtColorForDisplay -* Convert label to BGR image to display -* Add test for rgb_colors.cpp -* Add rgb_colors util -* Update doc for converting to BGR in cvtColorForDisplay -* Convert to BGR from any encoding -* Refactor: sensor_msgs::image_encodings -> enc -* Contributors: Kentaro Wada, Vincent Rabaud - -1.11.9 (2015-11-29) -------------------- -* deal with endianness -* add cvtColorForDisplay -* Improved efficiency by using toCvShare instead of toCvCopy. -* Add format enum for easy use and choose format. -* fix compilation warnings -* start to extend the cv_bridge with cvCompressedImage class, that will convert from cv::Mat opencv images to CompressedImage ros messages and vice versa -* Contributors: Carlos Costa, Vincent Rabaud, talregev - -1.11.8 (2015-07-15) -------------------- -* Simplify some OpenCV3 distinction -* fix tests -* fix test under OpenCV3 -* Remove Python for Android -* Contributors: Gary Servin, Vincent Rabaud - -1.11.7 (2014-12-14) -------------------- -* check that the type is indeed a Numpy one - This is in response to `#51 `_ -* Contributors: Vincent Rabaud - -1.11.6 (2014-11-16) -------------------- -* chnage the behavior when there is only one channel -* cleanup tests -* Contributors: Vincent Rabaud - -1.11.5 (2014-09-21) -------------------- -* get code to work with OpenCV3 - actually fixes `#46 `_ properly -* Contributors: Vincent Rabaud - -1.11.4 (2014-07-27) -------------------- -* Fix `#42 `_ -* Contributors: Libor Wagner - -1.11.3 (2014-06-08) -------------------- -* Correct dependency from non-existent package to cv_bridge -* Contributors: Isaac Isao Saito - -1.11.2 (2014-04-28) -------------------- -* Add depend on python for cv_bridge -* Contributors: Scott K Logan - -1.11.1 (2014-04-16) -------------------- -* fixes `#34 `_ -* Contributors: Vincent Rabaud - -1.11.0 (2014-02-15) -------------------- -* remove deprecated API and fixes `#33 `_ -* fix OpenCV dependencies -* Contributors: Vincent Rabaud - -1.10.15 (2014-02-07) --------------------- -* fix python 3 error at configure time -* Contributors: Dirk Thomas - -1.10.14 (2013-11-23 16:17) --------------------------- -* update changelog -* Find NumPy include directory -* Contributors: Brian Jensen, Vincent Rabaud - -1.10.13 (2013-11-23 09:19) --------------------------- -* fix compilation on older NumPy -* Contributors: Vincent Rabaud - -1.10.12 (2013-11-22) --------------------- -* bump changelog -* Fixed issue with image message step size -* fix crash for non char data -* fix `#26 `_ -* Contributors: Brian Jensen, Vincent Rabaud - -1.10.11 (2013-10-23) --------------------- -* fix bad image check and improve it too -* Contributors: Vincent Rabaud - -1.10.10 (2013-10-19) --------------------- -* fixes `#25 `_ -* Contributors: Vincent Rabaud - -1.10.9 (2013-10-07) -------------------- -* fixes `#20 `_ -* Contributors: Vincent Rabaud - -1.10.8 (2013-09-09) -------------------- -* fixes `#22 `_ -* fixes `#17 `_ -* check for CATKIN_ENABLE_TESTING -* fixes `#16 `_ -* update email address -* Contributors: Lukas Bulwahn, Vincent Rabaud - -1.10.7 (2013-07-17) -------------------- - -1.10.6 (2013-03-01) -------------------- -* make sure conversion are applied for depth differences -* Contributors: Vincent Rabaud - -1.10.5 (2013-02-11) -------------------- - -1.10.4 (2013-02-02) -------------------- -* fix installation of the boost package -* Contributors: Vincent Rabaud - -1.10.3 (2013-01-17) -------------------- -* Link against PTYHON_LIBRARIES -* Contributors: William Woodall - -1.10.2 (2013-01-13) -------------------- -* use CATKIN_DEVEL_PREFIX instead of obsolete CATKIN_BUILD_PREFIX -* Contributors: Dirk Thomas - -1.10.1 (2013-01-10) -------------------- -* add licenses -* fixes `#5 `_ by removing the logic from Python and using wrapped C++ and adding a test for it -* fix a bug discovered when running the opencv_tests -* use some C++ logic -* add a Boost Python module to have the C++ logix used directly in Python -* Contributors: Vincent Rabaud - -1.10.0 (2013-01-03) -------------------- -* add conversion from Bayer to gray -* Contributors: Vincent Rabaud - -1.9.15 (2013-01-02) -------------------- -* use the reverted isColor behavior -* Contributors: Vincent Rabaud - -1.9.14 (2012-12-30) -------------------- - -1.9.13 (2012-12-15) -------------------- -* use the catkin macros for the setup.py -* fix `#3 `_ -* Contributors: Vincent Rabaud - -1.9.12 (2012-12-14) -------------------- -* buildtool_depend catkin fix -* CMakeLists.txt clean up. -* Contributors: William Woodall - -1.9.11 (2012-12-10) -------------------- -* fix issue `#1 `_ -* Cleanup of package.xml -* Contributors: Vincent Rabaud, William Woodall - -1.9.10 (2012-10-04) -------------------- -* fix the bad include folder -* Contributors: Vincent Rabaud - -1.9.9 (2012-10-01) ------------------- -* fix dependencies -* Contributors: Vincent Rabaud - -1.9.8 (2012-09-30) ------------------- -* fix some dependencies -* add rosconsole as a dependency -* fix missing Python at install and fix some dependencies -* Contributors: Vincent Rabaud - -1.9.7 (2012-09-28 21:07) ------------------------- -* add missing stuff -* make sure we find catkin -* Contributors: Vincent Rabaud - -1.9.6 (2012-09-28 15:17) ------------------------- -* move the test to where it belongs -* fix the tests and the API to not handle conversion from CV_TYPE to Color type (does not make sense) -* comply to the new Catkin API -* backport the YUV422 bug fix from Fuerte -* apply patch from https://code.ros.org/trac/ros-pkg/ticket/5556 -* Contributors: Vincent Rabaud - -1.9.5 (2012-09-15) ------------------- -* remove dependencies to the opencv2 ROS package -* Contributors: Vincent Rabaud - -1.9.4 (2012-09-13) ------------------- -* make sure the include folders are copied to the right place -* Contributors: Vincent Rabaud - -1.9.3 (2012-09-12) ------------------- - -1.9.2 (2012-09-07) ------------------- -* be more compliant to the latest catkin -* added catkin_project() to cv_bridge, image_geometry, and opencv_tests -* Contributors: Jonathan Binney, Vincent Rabaud - -1.9.1 (2012-08-28 22:06) ------------------------- -* remove things that were marked as ROS_DEPRECATED -* Contributors: Vincent Rabaud - -1.9.0 (2012-08-28 14:29) ------------------------- -* catkinized opencv_tests by Jon Binney -* catkinized cv_bridge package... others disable for now by Jon Binney -* remove the version check, let's trust OpenCV :) -* revert the removal of opencv2 -* vision_opencv: Export OpenCV flags in manifests for image_geometry, cv_bridge. -* finally get rid of opencv2 as it is a system dependency now -* bump REQUIRED version of OpenCV to 2.3.2, which is what's in ros-fuerte-opencv -* switch rosdep name to opencv2, to refer to ros-fuerte-opencv2 -* added missing header -* Added constructor to CvImage to make converting a cv::Mat to sensor_msgs::Image less verbose. -* cv_bridge: Added unit test for `#5206 `_ -* cv_bridge: Applied patch from mdesnoyer to fix handling of non-continuous OpenCV images. `#5206 `_ -* Adding opencv2 to all manifests, so that client packages may - not break when using them. -* baking in opencv debs and attempting a pre-release -* cv_bridge: Support for new 16-bit encodings. -* cv_bridge: Deprecate old C++ cv_bridge API. -* cv_bridge: Correctly scale for MONO8 <-> MONO16 conversions. -* cv_bridge: Fixed issue where pointer version to toCvCopy would ignore the requested encoding (http://answers.ros.org/question/258/converting-kinect-rgb-image-to-opencv-gives-wrong). -* fixed doc build by taking a static snapshot -* cv_bridge: Marking doc reviewed. -* cv_bridge: Tweaks to make docs look better. -* cv_bridge: Added cvtColor(). License notices. Documented that CvBridge class is obsolete. -* cv_bridge: Added redesigned C++ cv_bridge. -* Doc cleanup -* Trigger doc rebuild -* mono16 -> bgr conversion tested and fixed in C -* Added Ubuntu platform tags to manifest -* Handle mono16 properly -* Raise exception when imgMsgToCv() gets an image encoding it does not recognise, `#3489 `_ -* Remove use of deprecated rosbuild macros -* Fixed example -* cv_bridge split from opencv2 -* Contributors: Vincent Rabaud, ethanrublee, gerkey, jamesb, mihelich, vrabaud, wheeler diff --git a/src/vision_opencv/cv_bridge/CMakeLists.txt b/src/vision_opencv/cv_bridge/CMakeLists.txt deleted file mode 100644 index 997bef3..0000000 --- a/src/vision_opencv/cv_bridge/CMakeLists.txt +++ /dev/null @@ -1,48 +0,0 @@ -cmake_minimum_required(VERSION 2.8) -project(cv_bridge) - -find_package(catkin REQUIRED COMPONENTS rosconsole sensor_msgs) - -if(NOT ANDROID) - find_package(PythonLibs) - if(PYTHONLIBS_VERSION_STRING VERSION_LESS 3) - find_package(Boost REQUIRED python) - else() - find_package(Boost REQUIRED python3) - endif() -else() -find_package(Boost REQUIRED) -endif() -find_package(OpenCV 3 REQUIRED - COMPONENTS - opencv_core - opencv_imgproc - opencv_imgcodecs - CONFIG -) - -catkin_package( - INCLUDE_DIRS include - LIBRARIES ${PROJECT_NAME} - CATKIN_DEPENDS rosconsole sensor_msgs - DEPENDS OpenCV - CFG_EXTRAS cv_bridge-extras.cmake -) - -catkin_python_setup() - -include_directories(include ${Boost_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS} ${catkin_INCLUDE_DIRS}) - -if(NOT ANDROID) -add_subdirectory(python) -endif() -add_subdirectory(src) -if(CATKIN_ENABLE_TESTING) - add_subdirectory(test) -endif() - -# install the include folder -install( - DIRECTORY include/${PROJECT_NAME}/ - DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -) diff --git a/src/vision_opencv/cv_bridge/cmake/cv_bridge-extras.cmake.in b/src/vision_opencv/cv_bridge/cmake/cv_bridge-extras.cmake.in deleted file mode 100644 index a4beda5..0000000 --- a/src/vision_opencv/cv_bridge/cmake/cv_bridge-extras.cmake.in +++ /dev/null @@ -1,12 +0,0 @@ -set(OpenCV_VERSION @OpenCV_VERSION@) -set(OpenCV_VERSION_MAJOR @OpenCV_VERSION_MAJOR@) -set(OpenCV_VERSION_MINOR @OpenCV_VERSION_MINOR@) -set(OpenCV_VERSION_PATCH @OpenCV_VERSION_PATCH@) -set(OpenCV_SHARED @OpenCV_SHARED@) -set(OpenCV_CONFIG_PATH @OpenCV_CONFIG_PATH@) -set(OpenCV_INSTALL_PATH @OpenCV_INSTALL_PATH@) -set(OpenCV_LIB_COMPONENTS @OpenCV_LIB_COMPONENTS@) -set(OpenCV_USE_MANGLED_PATHS @OpenCV_USE_MANGLED_PATHS@) -set(OpenCV_MODULES_SUFFIX @OpenCV_MODULES_SUFFIX@) - - diff --git a/src/vision_opencv/cv_bridge/doc/conf.py b/src/vision_opencv/cv_bridge/doc/conf.py deleted file mode 100644 index c2af74f..0000000 --- a/src/vision_opencv/cv_bridge/doc/conf.py +++ /dev/null @@ -1,201 +0,0 @@ -# -*- coding: utf-8 -*- -# -# cv_bridge documentation build configuration file, created by -# sphinx-quickstart on Mon Jun 1 14:21:53 2009. -# -# This file is execfile()d with the current directory set to its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys, os - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.append(os.path.abspath('.')) - -# -- General configuration ----------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.pngmath'] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'cv_bridge' -copyright = u'2009, Willow Garage, Inc.' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = '0.1' -# The full version, including alpha/beta/rc tags. -release = '0.1.0' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -#language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of documents that shouldn't be included in the build. -#unused_docs = [] - -# List of directories, relative to source directory, that shouldn't be searched -# for source files. -exclude_trees = ['_build'] - -# The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - - -# -- Options for HTML output --------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -html_theme = 'default' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -#html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -#html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_use_modindex = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = '' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'cv_bridgedoc' - - -# -- Options for LaTeX output -------------------------------------------------- - -# The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass [howto/manual]). -latex_documents = [ - ('index', 'cv_bridge.tex', u'stereo\\_utils Documentation', - u'James Bowman', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# Additional stuff for the LaTeX preamble. -#latex_preamble = '' - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_use_modindex = True - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - 'http://docs.python.org/': None, - 'http://docs.scipy.org/doc/numpy' : None, - } diff --git a/src/vision_opencv/cv_bridge/doc/index.rst b/src/vision_opencv/cv_bridge/doc/index.rst deleted file mode 100644 index c455220..0000000 --- a/src/vision_opencv/cv_bridge/doc/index.rst +++ /dev/null @@ -1,18 +0,0 @@ -cv_bridge -========= - -``cv_bridge`` contains a single class :class:`CvBridge` that converts ROS Image messages to -OpenCV images. - -.. module:: cv_bridge - -.. autoclass:: cv_bridge.CvBridge - :members: - -.. autoclass:: cv_bridge.CvBridgeError - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`search` diff --git a/src/vision_opencv/cv_bridge/doc/mainpage.dox b/src/vision_opencv/cv_bridge/doc/mainpage.dox deleted file mode 100644 index e4ed1cb..0000000 --- a/src/vision_opencv/cv_bridge/doc/mainpage.dox +++ /dev/null @@ -1,14 +0,0 @@ -/** -\mainpage -\htmlinclude manifest.html - -\b cv_bridge contains classes for easily converting between ROS -sensor_msgs/Image messages and OpenCV images. - -\section codeapi Code API - - - cv_bridge::CvImage - - toCvCopy() - - toCvShare() - -*/ diff --git a/src/vision_opencv/cv_bridge/include/cv_bridge/cv_bridge.h b/src/vision_opencv/cv_bridge/include/cv_bridge/cv_bridge.h deleted file mode 100644 index 3e7b9aa..0000000 --- a/src/vision_opencv/cv_bridge/include/cv_bridge/cv_bridge.h +++ /dev/null @@ -1,429 +0,0 @@ -/********************************************************************* -* Software License Agreement (BSD License) -* -* Copyright (c) 2011, Willow Garage, Inc, -* Copyright (c) 2015, Tal Regev. -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions -* are met: -* -* * Redistributions of source code must retain the above copyright -* notice, this list of conditions and the following disclaimer. -* * Redistributions in binary form must reproduce the above -* copyright notice, this list of conditions and the following -* disclaimer in the documentation and/or other materials provided -* with the distribution. -* * Neither the name of the Willow Garage nor the names of its -* contributors may be used to endorse or promote products derived -* from this software without specific prior written permission. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -*********************************************************************/ - -#ifndef CV_BRIDGE_CV_BRIDGE_H -#define CV_BRIDGE_CV_BRIDGE_H - -#include -#include -#include -#include -#include -#include -#include -#include - -namespace cv_bridge { - -class Exception : public std::runtime_error -{ -public: - Exception(const std::string& description) : std::runtime_error(description) {} -}; - -class CvImage; - -typedef boost::shared_ptr CvImagePtr; -typedef boost::shared_ptr CvImageConstPtr; - -//from: http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#Mat imread(const string& filename, int flags) -typedef enum { - BMP, DIB, - JPG, JPEG, JPE, - JP2, - PNG, - PBM, PGM, PPM, - SR, RAS, - TIFF, TIF, -} Format; - -/** - * \brief Image message class that is interoperable with sensor_msgs/Image but uses a - * more convenient cv::Mat representation for the image data. - */ -class CvImage -{ -public: - std_msgs::Header header; //!< ROS header - std::string encoding; //!< Image encoding ("mono8", "bgr8", etc.) - cv::Mat image; //!< Image data for use with OpenCV - - /** - * \brief Empty constructor. - */ - CvImage() {} - - /** - * \brief Constructor. - */ - CvImage(const std_msgs::Header& header, const std::string& encoding, - const cv::Mat& image = cv::Mat()) - : header(header), encoding(encoding), image(image) - { - } - - /** - * \brief Convert this message to a ROS sensor_msgs::Image message. - * - * The returned sensor_msgs::Image message contains a copy of the image data. - */ - sensor_msgs::ImagePtr toImageMsg() const; - - /** - * dst_format is compress the image to desire format. - * Default value is empty string that will convert to jpg format. - * can be: jpg, jp2, bmp, png, tif at the moment - * support this format from opencv: - * http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#Mat imread(const string& filename, int flags) - */ - sensor_msgs::CompressedImagePtr toCompressedImageMsg(const Format dst_format = JPG) const; - - /** - * \brief Copy the message data to a ROS sensor_msgs::Image message. - * - * This overload is intended mainly for aggregate messages such as stereo_msgs::DisparityImage, - * which contains a sensor_msgs::Image as a data member. - */ - void toImageMsg(sensor_msgs::Image& ros_image) const; - - /** - * dst_format is compress the image to desire format. - * Default value is empty string that will convert to jpg format. - * can be: jpg, jp2, bmp, png, tif at the moment - * support this format from opencv: - * http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#Mat imread(const string& filename, int flags) - */ - void toCompressedImageMsg(sensor_msgs::CompressedImage& ros_image, const Format dst_format = JPG) const; - - - typedef boost::shared_ptr Ptr; - typedef boost::shared_ptr ConstPtr; - -protected: - boost::shared_ptr tracked_object_; // for sharing ownership - - /// @cond DOXYGEN_IGNORE - friend - CvImageConstPtr toCvShare(const sensor_msgs::Image& source, - const boost::shared_ptr& tracked_object, - const std::string& encoding); - /// @endcond -}; - - -/** - * \brief Convert a sensor_msgs::Image message to an OpenCV-compatible CvImage, copying the - * image data. - * - * \param source A shared_ptr to a sensor_msgs::Image message - * \param encoding The desired encoding of the image data, one of the following strings: - * - \c "mono8" - * - \c "bgr8" - * - \c "bgra8" - * - \c "rgb8" - * - \c "rgba8" - * - \c "mono16" - * - * If \a encoding is the empty string (the default), the returned CvImage has the same encoding - * as \a source. - */ -CvImagePtr toCvCopy(const sensor_msgs::ImageConstPtr& source, - const std::string& encoding = std::string()); - -CvImagePtr toCvCopy(const sensor_msgs::CompressedImageConstPtr& source, - const std::string& encoding = std::string()); - -/** - * \brief Convert a sensor_msgs::Image message to an OpenCV-compatible CvImage, copying the - * image data. - * - * \param source A sensor_msgs::Image message - * \param encoding The desired encoding of the image data, one of the following strings: - * - \c "mono8" - * - \c "bgr8" - * - \c "bgra8" - * - \c "rgb8" - * - \c "rgba8" - * - \c "mono16" - * - * If \a encoding is the empty string (the default), the returned CvImage has the same encoding - * as \a source. - * If the source is 8bit and the encoding 16 or vice-versa, a scaling is applied (65535/255 and - * 255/65535 respectively). Otherwise, no scaling is applied and the rules from the convertTo OpenCV - * function are applied (capping): http://docs.opencv.org/modules/core/doc/basic_structures.html#mat-convertto - */ -CvImagePtr toCvCopy(const sensor_msgs::Image& source, - const std::string& encoding = std::string()); - -CvImagePtr toCvCopy(const sensor_msgs::CompressedImage& source, - const std::string& encoding = std::string()); - -/** - * \brief Convert an immutable sensor_msgs::Image message to an OpenCV-compatible CvImage, sharing - * the image data if possible. - * - * If the source encoding and desired encoding are the same, the returned CvImage will share - * the image data with \a source without copying it. The returned CvImage cannot be modified, as that - * could modify the \a source data. - * - * \param source A shared_ptr to a sensor_msgs::Image message - * \param encoding The desired encoding of the image data, one of the following strings: - * - \c "mono8" - * - \c "bgr8" - * - \c "bgra8" - * - \c "rgb8" - * - \c "rgba8" - * - \c "mono16" - * - * If \a encoding is the empty string (the default), the returned CvImage has the same encoding - * as \a source. - */ -CvImageConstPtr toCvShare(const sensor_msgs::ImageConstPtr& source, - const std::string& encoding = std::string()); - -/** - * \brief Convert an immutable sensor_msgs::Image message to an OpenCV-compatible CvImage, sharing - * the image data if possible. - * - * If the source encoding and desired encoding are the same, the returned CvImage will share - * the image data with \a source without copying it. The returned CvImage cannot be modified, as that - * could modify the \a source data. - * - * This overload is useful when you have a shared_ptr to a message that contains a - * sensor_msgs::Image, and wish to share ownership with the containing message. - * - * \param source The sensor_msgs::Image message - * \param tracked_object A shared_ptr to an object owning the sensor_msgs::Image - * \param encoding The desired encoding of the image data, one of the following strings: - * - \c "mono8" - * - \c "bgr8" - * - \c "bgra8" - * - \c "rgb8" - * - \c "rgba8" - * - \c "mono16" - * - * If \a encoding is the empty string (the default), the returned CvImage has the same encoding - * as \a source. - */ -CvImageConstPtr toCvShare(const sensor_msgs::Image& source, - const boost::shared_ptr& tracked_object, - const std::string& encoding = std::string()); - -/** - * \brief Convert a CvImage to another encoding using the same rules as toCvCopy - */ -CvImagePtr cvtColor(const CvImageConstPtr& source, - const std::string& encoding); - -struct CvtColorForDisplayOptions { - CvtColorForDisplayOptions() : - do_dynamic_scaling(false), - min_image_value(0.0), - max_image_value(0.0), - colormap(-1), - bg_label(-1) {} - bool do_dynamic_scaling; - double min_image_value; - double max_image_value; - int colormap; - int bg_label; -}; - - -/** - * \brief Converts an immutable sensor_msgs::Image message to another CvImage for display purposes, - * using practical conversion rules if needed. - * - * Data will be shared between input and output if possible. - * - * Recall: sensor_msgs::image_encodings::isColor and isMono tell whether an image contains R,G,B,A, mono - * (or any combination/subset) with 8 or 16 bit depth. - * - * The following rules apply: - * - if the output encoding is empty, the fact that the input image is mono or multiple-channel is - * preserved in the ouput image. The bit depth will be 8. it tries to convert to BGR no matter what - * encoding image is passed. - * - if the output encoding is not empty, it must have sensor_msgs::image_encodings::isColor and - * isMono return true. It must also be 8 bit in depth - * - if the input encoding is an OpenCV format (e.g. 8UC1), and if we have 1,3 or 4 channels, it is - * respectively converted to mono, BGR or BGRA. - * - if the input encoding is 32SC1, this estimate that image as label image and will convert it as - * bgr image with different colors for each label. - * - * \param source A shared_ptr to a sensor_msgs::Image message - * \param encoding Either an encoding string that returns true in sensor_msgs::image_encodings::isColor - * isMono or the empty string as explained above. - * \param options (cv_bridge::CvtColorForDisplayOptions) Options to convert the source image with. - * - do_dynamic_scaling If true, the image is dynamically scaled between its minimum and maximum value - * before being converted to its final encoding. - * - min_image_value Independently from do_dynamic_scaling, if min_image_value and max_image_value are - * different, the image is scaled between these two values before being converted to its final encoding. - * - max_image_value Maximum image value - * - colormap Colormap which the source image converted with. - */ -CvImageConstPtr cvtColorForDisplay(const CvImageConstPtr& source, - const std::string& encoding = std::string(), - const CvtColorForDisplayOptions options = CvtColorForDisplayOptions()); - -/** - * \brief Get the OpenCV type enum corresponding to the encoding. - * - * For example, "bgr8" -> CV_8UC3, "32FC1" -> CV_32FC1, and "32FC10" -> CV_32FC10. - */ -int getCvType(const std::string& encoding); - -} // namespace cv_bridge - - -// CvImage as a first class message type - -// The rest of this file hooks into the roscpp serialization API to make CvImage -// a first-class message type you can publish and subscribe to directly. -// Unfortunately this doesn't yet work with image_transport, so don't rewrite all -// your callbacks to use CvImage! It might be useful for specific tasks, like -// processing bag files. - -/// @cond DOXYGEN_IGNORE -namespace ros { - -namespace message_traits { - -template<> struct MD5Sum -{ - static const char* value() { return MD5Sum::value(); } - static const char* value(const cv_bridge::CvImage&) { return value(); } - - static const uint64_t static_value1 = MD5Sum::static_value1; - static const uint64_t static_value2 = MD5Sum::static_value2; - - // If the definition of sensor_msgs/Image changes, we'll get a compile error here. - ROS_STATIC_ASSERT(MD5Sum::static_value1 == 0x060021388200f6f0ULL); - ROS_STATIC_ASSERT(MD5Sum::static_value2 == 0xf447d0fcd9c64743ULL); -}; - -template<> struct DataType -{ - static const char* value() { return DataType::value(); } - static const char* value(const cv_bridge::CvImage&) { return value(); } -}; - -template<> struct Definition -{ - static const char* value() { return Definition::value(); } - static const char* value(const cv_bridge::CvImage&) { return value(); } -}; - -template<> struct HasHeader : TrueType {}; - -} // namespace ros::message_traits - -namespace serialization { - -template<> struct Serializer -{ - /// @todo Still ignoring endianness... - - template - inline static void write(Stream& stream, const cv_bridge::CvImage& m) - { - stream.next(m.header); - stream.next((uint32_t)m.image.rows); // height - stream.next((uint32_t)m.image.cols); // width - stream.next(m.encoding); - uint8_t is_bigendian = 0; - stream.next(is_bigendian); - stream.next((uint32_t)m.image.step); - size_t data_size = m.image.step*m.image.rows; - stream.next((uint32_t)data_size); - if (data_size > 0) - memcpy(stream.advance(data_size), m.image.data, data_size); - } - - template - inline static void read(Stream& stream, cv_bridge::CvImage& m) - { - stream.next(m.header); - uint32_t height, width; - stream.next(height); - stream.next(width); - stream.next(m.encoding); - uint8_t is_bigendian; - stream.next(is_bigendian); - uint32_t step, data_size; - stream.next(step); - stream.next(data_size); - int type = cv_bridge::getCvType(m.encoding); - // Construct matrix pointing to the stream data, then copy it to m.image - cv::Mat tmp((int)height, (int)width, type, stream.advance(data_size), (size_t)step); - tmp.copyTo(m.image); - } - - inline static uint32_t serializedLength(const cv_bridge::CvImage& m) - { - size_t data_size = m.image.step*m.image.rows; - return serializationLength(m.header) + serializationLength(m.encoding) + 17 + data_size; - } -}; - -} // namespace ros::serialization - -namespace message_operations { - -template<> struct Printer -{ - template - static void stream(Stream&, const std::string&, const cv_bridge::CvImage&) - { - /// @todo Replicate printing for sensor_msgs::Image - } -}; - -} // namespace ros::message_operations - -} // namespace ros - -namespace cv_bridge { - -inline std::ostream& operator<<(std::ostream& s, const CvImage& m) -{ - ros::message_operations::Printer::stream(s, "", m); - return s; -} - -} // namespace cv_bridge - -/// @endcond - -#endif diff --git a/src/vision_opencv/cv_bridge/include/cv_bridge/rgb_colors.h b/src/vision_opencv/cv_bridge/include/cv_bridge/rgb_colors.h deleted file mode 100644 index 1eaa88b..0000000 --- a/src/vision_opencv/cv_bridge/include/cv_bridge/rgb_colors.h +++ /dev/null @@ -1,211 +0,0 @@ -// -*- mode: c++ -*- -/********************************************************************* - * Original color definition is at scikit-image distributed with - * following license disclaimer: - * - * Copyright (C) 2011, the scikit-image team - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * 3. Neither the name of skimage nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING - * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - *********************************************************************/ - -#ifndef CV_BRIDGE_RGB_COLORS_H_ -#define CV_BRIDGE_RGB_COLORS_H_ - -#include - - -namespace cv_bridge -{ - -namespace rgb_colors -{ - - /** - * @brief - * 146 rgb colors - */ - enum Colors { - ALICEBLUE, - ANTIQUEWHITE, - AQUA, - AQUAMARINE, - AZURE, - BEIGE, - BISQUE, - BLACK, - BLANCHEDALMOND, - BLUE, - BLUEVIOLET, - BROWN, - BURLYWOOD, - CADETBLUE, - CHARTREUSE, - CHOCOLATE, - CORAL, - CORNFLOWERBLUE, - CORNSILK, - CRIMSON, - CYAN, - DARKBLUE, - DARKCYAN, - DARKGOLDENROD, - DARKGRAY, - DARKGREEN, - DARKGREY, - DARKKHAKI, - DARKMAGENTA, - DARKOLIVEGREEN, - DARKORANGE, - DARKORCHID, - DARKRED, - DARKSALMON, - DARKSEAGREEN, - DARKSLATEBLUE, - DARKSLATEGRAY, - DARKSLATEGREY, - DARKTURQUOISE, - DARKVIOLET, - DEEPPINK, - DEEPSKYBLUE, - DIMGRAY, - DIMGREY, - DODGERBLUE, - FIREBRICK, - FLORALWHITE, - FORESTGREEN, - FUCHSIA, - GAINSBORO, - GHOSTWHITE, - GOLD, - GOLDENROD, - GRAY, - GREEN, - GREENYELLOW, - GREY, - HONEYDEW, - HOTPINK, - INDIANRED, - INDIGO, - IVORY, - KHAKI, - LAVENDER, - LAVENDERBLUSH, - LAWNGREEN, - LEMONCHIFFON, - LIGHTBLUE, - LIGHTCORAL, - LIGHTCYAN, - LIGHTGOLDENRODYELLOW, - LIGHTGRAY, - LIGHTGREEN, - LIGHTGREY, - LIGHTPINK, - LIGHTSALMON, - LIGHTSEAGREEN, - LIGHTSKYBLUE, - LIGHTSLATEGRAY, - LIGHTSLATEGREY, - LIGHTSTEELBLUE, - LIGHTYELLOW, - LIME, - LIMEGREEN, - LINEN, - MAGENTA, - MAROON, - MEDIUMAQUAMARINE, - MEDIUMBLUE, - MEDIUMORCHID, - MEDIUMPURPLE, - MEDIUMSEAGREEN, - MEDIUMSLATEBLUE, - MEDIUMSPRINGGREEN, - MEDIUMTURQUOISE, - MEDIUMVIOLETRED, - MIDNIGHTBLUE, - MINTCREAM, - MISTYROSE, - MOCCASIN, - NAVAJOWHITE, - NAVY, - OLDLACE, - OLIVE, - OLIVEDRAB, - ORANGE, - ORANGERED, - ORCHID, - PALEGOLDENROD, - PALEGREEN, - PALEVIOLETRED, - PAPAYAWHIP, - PEACHPUFF, - PERU, - PINK, - PLUM, - POWDERBLUE, - PURPLE, - RED, - ROSYBROWN, - ROYALBLUE, - SADDLEBROWN, - SALMON, - SANDYBROWN, - SEAGREEN, - SEASHELL, - SIENNA, - SILVER, - SKYBLUE, - SLATEBLUE, - SLATEGRAY, - SLATEGREY, - SNOW, - SPRINGGREEN, - STEELBLUE, - TAN, - TEAL, - THISTLE, - TOMATO, - TURQUOISE, - VIOLET, - WHEAT, - WHITE, - WHITESMOKE, - YELLOW, - YELLOWGREEN, - }; - - /** - * @brief - * get rgb color with enum. - */ - cv::Vec3d getRGBColor(const int color); - -} // namespace rgb_colors - -} // namespace cv_bridge - -#endif diff --git a/src/vision_opencv/cv_bridge/package.xml b/src/vision_opencv/cv_bridge/package.xml deleted file mode 100644 index 69fc01f..0000000 --- a/src/vision_opencv/cv_bridge/package.xml +++ /dev/null @@ -1,41 +0,0 @@ - - cv_bridge - 1.13.1 - - This contains CvBridge, which converts between ROS - Image messages and OpenCV images. - - Patrick Mihelich - James Bowman - Vincent Rabaud - BSD - http://www.ros.org/wiki/cv_bridge - https://github.com/ros-perception/vision_opencv - https://github.com/ros-perception/vision_opencv/issues - - - - - - catkin - - boost - libopencv-dev - python - python-opencv - rosconsole - sensor_msgs - - boost - libopencv-dev - python - python-opencv - rosconsole - libopencv-dev - sensor_msgs - - rostest - python-numpy - - dvipng - diff --git a/src/vision_opencv/cv_bridge/python/CMakeLists.txt b/src/vision_opencv/cv_bridge/python/CMakeLists.txt deleted file mode 100644 index 1b677d3..0000000 --- a/src/vision_opencv/cv_bridge/python/CMakeLists.txt +++ /dev/null @@ -1,8 +0,0 @@ -configure_file(__init__.py.plain.in - ${CATKIN_DEVEL_PREFIX}/${CATKIN_PACKAGE_PYTHON_DESTINATION}/boost/__init__.py - @ONLY -) - -install(FILES ${CATKIN_DEVEL_PREFIX}/${CATKIN_PACKAGE_PYTHON_DESTINATION}/boost/__init__.py - DESTINATION ${CATKIN_PACKAGE_PYTHON_DESTINATION}/boost/ -) diff --git a/src/vision_opencv/cv_bridge/python/__init__.py.plain.in b/src/vision_opencv/cv_bridge/python/__init__.py.plain.in deleted file mode 100644 index e69de29..0000000 diff --git a/src/vision_opencv/cv_bridge/python/cv_bridge/__init__.py b/src/vision_opencv/cv_bridge/python/cv_bridge/__init__.py deleted file mode 100644 index 5189c1a..0000000 --- a/src/vision_opencv/cv_bridge/python/cv_bridge/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from .core import CvBridge, CvBridgeError - -# python bindings -try: - # This try is just to satisfy doc jobs that are built differently. - from cv_bridge.boost.cv_bridge_boost import cvtColorForDisplay, getCvType -except ImportError: - pass diff --git a/src/vision_opencv/cv_bridge/python/cv_bridge/__pycache__/core.cpython-38.pyc b/src/vision_opencv/cv_bridge/python/cv_bridge/__pycache__/core.cpython-38.pyc deleted file mode 100644 index b1dd76e..0000000 Binary files a/src/vision_opencv/cv_bridge/python/cv_bridge/__pycache__/core.cpython-38.pyc and /dev/null differ diff --git a/src/vision_opencv/cv_bridge/python/cv_bridge/core.py b/src/vision_opencv/cv_bridge/python/cv_bridge/core.py deleted file mode 100644 index a47bbd7..0000000 --- a/src/vision_opencv/cv_bridge/python/cv_bridge/core.py +++ /dev/null @@ -1,266 +0,0 @@ -# Software License Agreement (BSD License) -# -# Copyright (c) 2011, Willow Garage, Inc. -# Copyright (c) 2016, Tal Regev. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# * Neither the name of Willow Garage, Inc. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -import sensor_msgs.msg -import sys - - -class CvBridgeError(TypeError): - """ - This is the error raised by :class:`cv_bridge.CvBridge` methods when they fail. - """ - pass - - -class CvBridge(object): - """ - The CvBridge is an object that converts between OpenCV Images and ROS Image messages. - - .. doctest:: - :options: -ELLIPSIS, +NORMALIZE_WHITESPACE - - >>> import cv2 - >>> import numpy as np - >>> from cv_bridge import CvBridge - >>> br = CvBridge() - >>> dtype, n_channels = br.encoding_as_cvtype2('8UC3') - >>> im = np.ndarray(shape=(480, 640, n_channels), dtype=dtype) - >>> msg = br.cv2_to_imgmsg(im) # Convert the image to a message - >>> im2 = br.imgmsg_to_cv2(msg) # Convert the message to a new image - >>> cmprsmsg = br.cv2_to_compressed_imgmsg(im) # Convert the image to a compress message - >>> im22 = br.compressed_imgmsg_to_cv2(msg) # Convert the compress message to a new image - >>> cv2.imwrite("this_was_a_message_briefly.png", im2) - - """ - - def __init__(self): - import cv2 - self.cvtype_to_name = {} - self.cvdepth_to_numpy_depth = {cv2.CV_8U: 'uint8', cv2.CV_8S: 'int8', cv2.CV_16U: 'uint16', - cv2.CV_16S: 'int16', cv2.CV_32S:'int32', cv2.CV_32F:'float32', - cv2.CV_64F: 'float64'} - - for t in ["8U", "8S", "16U", "16S", "32S", "32F", "64F"]: - for c in [1, 2, 3, 4]: - nm = "%sC%d" % (t, c) - self.cvtype_to_name[getattr(cv2, "CV_%s" % nm)] = nm - - self.numpy_type_to_cvtype = {'uint8': '8U', 'int8': '8S', 'uint16': '16U', - 'int16': '16S', 'int32': '32S', 'float32': '32F', - 'float64': '64F'} - self.numpy_type_to_cvtype.update(dict((v, k) for (k, v) in self.numpy_type_to_cvtype.items())) - - def dtype_with_channels_to_cvtype2(self, dtype, n_channels): - return '%sC%d' % (self.numpy_type_to_cvtype[dtype.name], n_channels) - - def cvtype2_to_dtype_with_channels(self, cvtype): - from cv_bridge.boost.cv_bridge_boost import CV_MAT_CNWrap, CV_MAT_DEPTHWrap - return self.cvdepth_to_numpy_depth[CV_MAT_DEPTHWrap(cvtype)], CV_MAT_CNWrap(cvtype) - - def encoding_to_cvtype2(self, encoding): - from cv_bridge.boost.cv_bridge_boost import getCvType - - try: - return getCvType(encoding) - except RuntimeError as e: - raise CvBridgeError(e) - - def encoding_to_dtype_with_channels(self, encoding): - return self.cvtype2_to_dtype_with_channels(self.encoding_to_cvtype2(encoding)) - - def compressed_imgmsg_to_cv2(self, cmprs_img_msg, desired_encoding = "passthrough"): - """ - Convert a sensor_msgs::CompressedImage message to an OpenCV :cpp:type:`cv::Mat`. - - :param cmprs_img_msg: A :cpp:type:`sensor_msgs::CompressedImage` message - :param desired_encoding: The encoding of the image data, one of the following strings: - - * ``"passthrough"`` - * one of the standard strings in sensor_msgs/image_encodings.h - - :rtype: :cpp:type:`cv::Mat` - :raises CvBridgeError: when conversion is not possible. - - If desired_encoding is ``"passthrough"``, then the returned image has the same format as img_msg. - Otherwise desired_encoding must be one of the standard image encodings - - This function returns an OpenCV :cpp:type:`cv::Mat` message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure. - - If the image only has one channel, the shape has size 2 (width and height) - """ - import cv2 - import numpy as np - - str_msg = cmprs_img_msg.data - buf = np.ndarray(shape=(1, len(str_msg)), - dtype=np.uint8, buffer=cmprs_img_msg.data) - im = cv2.imdecode(buf, cv2.IMREAD_ANYCOLOR) - - if desired_encoding == "passthrough": - return im - - from cv_bridge.boost.cv_bridge_boost import cvtColor2 - - try: - res = cvtColor2(im, "bgr8", desired_encoding) - except RuntimeError as e: - raise CvBridgeError(e) - - return res - - def imgmsg_to_cv2(self, img_msg, desired_encoding = "passthrough"): - """ - Convert a sensor_msgs::Image message to an OpenCV :cpp:type:`cv::Mat`. - - :param img_msg: A :cpp:type:`sensor_msgs::Image` message - :param desired_encoding: The encoding of the image data, one of the following strings: - - * ``"passthrough"`` - * one of the standard strings in sensor_msgs/image_encodings.h - - :rtype: :cpp:type:`cv::Mat` - :raises CvBridgeError: when conversion is not possible. - - If desired_encoding is ``"passthrough"``, then the returned image has the same format as img_msg. - Otherwise desired_encoding must be one of the standard image encodings - - This function returns an OpenCV :cpp:type:`cv::Mat` message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure. - - If the image only has one channel, the shape has size 2 (width and height) - """ - import cv2 - import numpy as np - dtype, n_channels = self.encoding_to_dtype_with_channels(img_msg.encoding) - dtype = np.dtype(dtype) - dtype = dtype.newbyteorder('>' if img_msg.is_bigendian else '<') - if n_channels == 1: - im = np.ndarray(shape=(img_msg.height, img_msg.width), - dtype=dtype, buffer=img_msg.data) - else: - im = np.ndarray(shape=(img_msg.height, img_msg.width, n_channels), - dtype=dtype, buffer=img_msg.data) - # If the byt order is different between the message and the system. - if img_msg.is_bigendian == (sys.byteorder == 'little'): - im = im.byteswap().newbyteorder() - - if desired_encoding == "passthrough": - return im - - from cv_bridge.boost.cv_bridge_boost import cvtColor2 - - try: - res = cvtColor2(im, img_msg.encoding, desired_encoding) - except RuntimeError as e: - raise CvBridgeError(e) - - return res - - def cv2_to_compressed_imgmsg(self, cvim, dst_format = "jpg"): - """ - Convert an OpenCV :cpp:type:`cv::Mat` type to a ROS sensor_msgs::CompressedImage message. - - :param cvim: An OpenCV :cpp:type:`cv::Mat` - :param dst_format: The format of the image data, one of the following strings: - - * from http://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html - * from http://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#Mat imread(const string& filename, int flags) - * bmp, dib - * jpeg, jpg, jpe - * jp2 - * png - * pbm, pgm, ppm - * sr, ras - * tiff, tif - - :rtype: A sensor_msgs.msg.CompressedImage message - :raises CvBridgeError: when the ``cvim`` has a type that is incompatible with ``format`` - - - This function returns a sensor_msgs::Image message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure. - """ - import cv2 - import numpy as np - if not isinstance(cvim, (np.ndarray, np.generic)): - raise TypeError('Your input type is not a numpy array') - cmprs_img_msg = sensor_msgs.msg.CompressedImage() - cmprs_img_msg.format = dst_format - ext_format = '.' + dst_format - try: - cmprs_img_msg.data = np.array(cv2.imencode(ext_format, cvim)[1]).tostring() - except RuntimeError as e: - raise CvBridgeError(e) - - return cmprs_img_msg - - def cv2_to_imgmsg(self, cvim, encoding = "passthrough"): - """ - Convert an OpenCV :cpp:type:`cv::Mat` type to a ROS sensor_msgs::Image message. - - :param cvim: An OpenCV :cpp:type:`cv::Mat` - :param encoding: The encoding of the image data, one of the following strings: - - * ``"passthrough"`` - * one of the standard strings in sensor_msgs/image_encodings.h - - :rtype: A sensor_msgs.msg.Image message - :raises CvBridgeError: when the ``cvim`` has a type that is incompatible with ``encoding`` - - If encoding is ``"passthrough"``, then the message has the same encoding as the image's OpenCV type. - Otherwise desired_encoding must be one of the standard image encodings - - This function returns a sensor_msgs::Image message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure. - """ - import cv2 - import numpy as np - if not isinstance(cvim, (np.ndarray, np.generic)): - raise TypeError('Your input type is not a numpy array') - img_msg = sensor_msgs.msg.Image() - img_msg.height = cvim.shape[0] - img_msg.width = cvim.shape[1] - if len(cvim.shape) < 3: - cv_type = self.dtype_with_channels_to_cvtype2(cvim.dtype, 1) - else: - cv_type = self.dtype_with_channels_to_cvtype2(cvim.dtype, cvim.shape[2]) - if encoding == "passthrough": - img_msg.encoding = cv_type - else: - img_msg.encoding = encoding - # Verify that the supplied encoding is compatible with the type of the OpenCV image - if self.cvtype_to_name[self.encoding_to_cvtype2(encoding)] != cv_type: - raise CvBridgeError("encoding specified as %s, but image has incompatible type %s" % (encoding, cv_type)) - if cvim.dtype.byteorder == '>': - img_msg.is_bigendian = True - img_msg.data = cvim.tostring() - img_msg.step = len(img_msg.data) // img_msg.height - - return img_msg diff --git a/src/vision_opencv/cv_bridge/rosdoc.yaml b/src/vision_opencv/cv_bridge/rosdoc.yaml deleted file mode 100644 index 0efc7fd..0000000 --- a/src/vision_opencv/cv_bridge/rosdoc.yaml +++ /dev/null @@ -1,8 +0,0 @@ - - builder: doxygen - name: C++ API - output_dir: c++ - file_patterns: '*.c *.cpp *.h *.cc *.hh *.dox' - - builder: sphinx - name: Python API - output_dir: python - sphinx_root_dir: doc diff --git a/src/vision_opencv/cv_bridge/setup.py b/src/vision_opencv/cv_bridge/setup.py deleted file mode 100644 index 65ae95d..0000000 --- a/src/vision_opencv/cv_bridge/setup.py +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env python -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -d = generate_distutils_setup() - -d['packages'] = ['cv_bridge'] -d['package_dir'] = {'' : 'python'} - -setup(**d) diff --git a/src/vision_opencv/cv_bridge/src/CMakeLists.txt b/src/vision_opencv/cv_bridge/src/CMakeLists.txt deleted file mode 100644 index d5d8ee2..0000000 --- a/src/vision_opencv/cv_bridge/src/CMakeLists.txt +++ /dev/null @@ -1,61 +0,0 @@ -# add library -include_directories(./) -add_library(${PROJECT_NAME} cv_bridge.cpp rgb_colors.cpp) -add_dependencies(${PROJECT_NAME} ${catkin_EXPORTED_TARGETS}) -target_link_libraries(${PROJECT_NAME} ${OpenCV_LIBRARIES} ${catkin_LIBRARIES}) - -install(TARGETS ${PROJECT_NAME} DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}) - -if(NOT ANDROID) -# add a Boost Python library -find_package(PythonInterp REQUIRED) -find_package(PythonLibs "${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}") - -#Get the numpy include directory from its python module -if(NOT PYTHON_NUMPY_INCLUDE_DIR) - execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import numpy; print(numpy.get_include())" - RESULT_VARIABLE PYTHON_NUMPY_PROCESS - OUTPUT_VARIABLE PYTHON_NUMPY_INCLUDE_DIR - OUTPUT_STRIP_TRAILING_WHITESPACE) - - if(PYTHON_NUMPY_PROCESS EQUAL 0) - file(TO_CMAKE_PATH "${PYTHON_NUMPY_INCLUDE_DIR}" PYTHON_NUMPY_INCLUDE_CMAKE_PATH) - set(PYTHON_NUMPY_INCLUDE_DIR ${PYTHON_NUMPY_INCLUDE_CMAKE_PATH} CACHE PATH "Numpy include directory") - else(PYTHON_NUMPY_PROCESS EQUAL 0) - message(SEND_ERROR "Could not determine the NumPy include directory, verify that NumPy was installed correctly.") - endif(PYTHON_NUMPY_PROCESS EQUAL 0) - endif(NOT PYTHON_NUMPY_INCLUDE_DIR) - -include_directories(${PYTHON_INCLUDE_PATH} ${Boost_INCLUDE_DIRS} ${PYTHON_NUMPY_INCLUDE_DIR}) - -if (PYTHON_VERSION_MAJOR VERSION_EQUAL 3) - add_definitions(-DPYTHON3) -endif() - -if (OpenCV_VERSION_MAJOR VERSION_EQUAL 3) -add_library(${PROJECT_NAME}_boost module.cpp module_opencv3.cpp) -else() -add_library(${PROJECT_NAME}_boost module.cpp module_opencv2.cpp) -endif() -target_link_libraries(${PROJECT_NAME}_boost ${Boost_LIBRARIES} - ${catkin_LIBRARIES} - ${PYTHON_LIBRARIES} - ${PROJECT_NAME} -) - -set_target_properties(${PROJECT_NAME}_boost PROPERTIES - LIBRARY_OUTPUT_DIRECTORY ${CATKIN_DEVEL_PREFIX}/${CATKIN_GLOBAL_PYTHON_DESTINATION}/${PROJECT_NAME}/boost/ - RUNTIME_OUTPUT_DIRECTORY ${CATKIN_DEVEL_PREFIX}/${CATKIN_GLOBAL_PYTHON_DESTINATION}/${PROJECT_NAME}/boost/ - PREFIX "" -) -if(APPLE) - set_target_properties(${PROJECT_NAME}_boost PROPERTIES - SUFFIX ".so") -endif() -if(MSVC) - set_target_properties(${PROJECT_NAME}_boost PROPERTIES - SUFFIX ".pyd") -endif() - -install(TARGETS ${PROJECT_NAME}_boost DESTINATION ${CATKIN_PACKAGE_PYTHON_DESTINATION}/boost/) -endif() diff --git a/src/vision_opencv/cv_bridge/src/boost/README b/src/vision_opencv/cv_bridge/src/boost/README deleted file mode 100644 index f3a3d3c..0000000 --- a/src/vision_opencv/cv_bridge/src/boost/README +++ /dev/null @@ -1,2 +0,0 @@ -this code is taken from Boost at https://github.com/boostorg/endian.git -We should remove this folder once Boost 1.58 or above is the default. diff --git a/src/vision_opencv/cv_bridge/src/boost/core/scoped_enum.hpp b/src/vision_opencv/cv_bridge/src/boost/core/scoped_enum.hpp deleted file mode 100644 index 78c548b..0000000 --- a/src/vision_opencv/cv_bridge/src/boost/core/scoped_enum.hpp +++ /dev/null @@ -1,192 +0,0 @@ -// scoped_enum.hpp ---------------------------------------------------------// - -// Copyright Beman Dawes, 2009 -// Copyright (C) 2011-2012 Vicente J. Botet Escriba -// Copyright (C) 2012 Anthony Williams - -// Distributed under the Boost Software License, Version 1.0. -// See http://www.boost.org/LICENSE_1_0.txt - -#ifndef BOOST_CORE_SCOPED_ENUM_HPP -#define BOOST_CORE_SCOPED_ENUM_HPP - -#include - -#ifdef BOOST_HAS_PRAGMA_ONCE -#pragma once -#endif - -namespace boost -{ - -#ifdef BOOST_NO_CXX11_SCOPED_ENUMS - - /** - * Meta-function to get the native enum type associated to an enum class or its emulation. - */ - template - struct native_type - { - /** - * The member typedef type names the native enum type associated to the scoped enum, - * which is it self if the compiler supports scoped enums or EnumType::enum_type if it is an emulated scoped enum. - */ - typedef typename EnumType::enum_type type; - }; - - /** - * Casts a scoped enum to its underlying type. - * - * This function is useful when working with scoped enum classes, which doens't implicitly convert to the underlying type. - * @param v A scoped enum. - * @returns The underlying type. - * @throws No-throws. - */ - template - UnderlyingType underlying_cast(EnumType v) - { - return v.get_underlying_value_(); - } - - /** - * Casts a scoped enum to its native enum type. - * - * This function is useful to make programs portable when the scoped enum emulation can not be use where native enums can. - * - * EnumType the scoped enum type - * - * @param v A scoped enum. - * @returns The native enum value. - * @throws No-throws. - */ - template - inline - typename EnumType::enum_type native_value(EnumType e) - { - return e.get_native_value_(); - } - -#else // BOOST_NO_CXX11_SCOPED_ENUMS - - template - struct native_type - { - typedef EnumType type; - }; - - template - UnderlyingType underlying_cast(EnumType v) - { - return static_cast(v); - } - - template - inline - EnumType native_value(EnumType e) - { - return e; - } - -#endif // BOOST_NO_CXX11_SCOPED_ENUMS -} - - -#ifdef BOOST_NO_CXX11_SCOPED_ENUMS - -#ifndef BOOST_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS - -#define BOOST_SCOPED_ENUM_UT_DECLARE_CONVERSION_OPERATOR \ - explicit operator underlying_type() const BOOST_NOEXCEPT { return get_underlying_value_(); } - -#else - -#define BOOST_SCOPED_ENUM_UT_DECLARE_CONVERSION_OPERATOR - -#endif - -/** - * Start a declaration of a scoped enum. - * - * @param EnumType The new scoped enum. - * @param UnderlyingType The underlying type. - */ -#define BOOST_SCOPED_ENUM_UT_DECLARE_BEGIN(EnumType, UnderlyingType) \ - struct EnumType { \ - typedef void is_boost_scoped_enum_tag; \ - typedef UnderlyingType underlying_type; \ - EnumType() BOOST_NOEXCEPT {} \ - explicit EnumType(underlying_type v) BOOST_NOEXCEPT : v_(v) {} \ - underlying_type get_underlying_value_() const BOOST_NOEXCEPT { return v_; } \ - BOOST_SCOPED_ENUM_UT_DECLARE_CONVERSION_OPERATOR \ - private: \ - underlying_type v_; \ - typedef EnumType self_type; \ - public: \ - enum enum_type - -#define BOOST_SCOPED_ENUM_DECLARE_END2() \ - enum_type get_native_value_() const BOOST_NOEXCEPT { return enum_type(v_); } \ - friend bool operator ==(self_type lhs, self_type rhs) BOOST_NOEXCEPT { return enum_type(lhs.v_)==enum_type(rhs.v_); } \ - friend bool operator ==(self_type lhs, enum_type rhs) BOOST_NOEXCEPT { return enum_type(lhs.v_)==rhs; } \ - friend bool operator ==(enum_type lhs, self_type rhs) BOOST_NOEXCEPT { return lhs==enum_type(rhs.v_); } \ - friend bool operator !=(self_type lhs, self_type rhs) BOOST_NOEXCEPT { return enum_type(lhs.v_)!=enum_type(rhs.v_); } \ - friend bool operator !=(self_type lhs, enum_type rhs) BOOST_NOEXCEPT { return enum_type(lhs.v_)!=rhs; } \ - friend bool operator !=(enum_type lhs, self_type rhs) BOOST_NOEXCEPT { return lhs!=enum_type(rhs.v_); } \ - friend bool operator <(self_type lhs, self_type rhs) BOOST_NOEXCEPT { return enum_type(lhs.v_)(self_type lhs, self_type rhs) BOOST_NOEXCEPT { return enum_type(lhs.v_)>enum_type(rhs.v_); } \ - friend bool operator >(self_type lhs, enum_type rhs) BOOST_NOEXCEPT { return enum_type(lhs.v_)>rhs; } \ - friend bool operator >(enum_type lhs, self_type rhs) BOOST_NOEXCEPT { return lhs>enum_type(rhs.v_); } \ - friend bool operator >=(self_type lhs, self_type rhs) BOOST_NOEXCEPT { return enum_type(lhs.v_)>=enum_type(rhs.v_); } \ - friend bool operator >=(self_type lhs, enum_type rhs) BOOST_NOEXCEPT { return enum_type(lhs.v_)>=rhs; } \ - friend bool operator >=(enum_type lhs, self_type rhs) BOOST_NOEXCEPT { return lhs>=enum_type(rhs.v_); } \ - }; - -#define BOOST_SCOPED_ENUM_DECLARE_END(EnumType) \ - ; \ - EnumType(enum_type v) BOOST_NOEXCEPT : v_(v) {} \ - BOOST_SCOPED_ENUM_DECLARE_END2() - -/** - * Starts a declaration of a scoped enum with the default int underlying type. - * - * @param EnumType The new scoped enum. - */ -#define BOOST_SCOPED_ENUM_DECLARE_BEGIN(EnumType) \ - BOOST_SCOPED_ENUM_UT_DECLARE_BEGIN(EnumType,int) - -/** - * Name of the native enum type. - * - * @param EnumType The new scoped enum. - */ -#define BOOST_SCOPED_ENUM_NATIVE(EnumType) EnumType::enum_type -/** - * Forward declares an scoped enum. - * - * @param EnumType The scoped enum. - */ -#define BOOST_SCOPED_ENUM_FORWARD_DECLARE(EnumType) struct EnumType - -#else // BOOST_NO_CXX11_SCOPED_ENUMS - -#define BOOST_SCOPED_ENUM_UT_DECLARE_BEGIN(EnumType,UnderlyingType) enum class EnumType : UnderlyingType -#define BOOST_SCOPED_ENUM_DECLARE_BEGIN(EnumType) enum class EnumType -#define BOOST_SCOPED_ENUM_DECLARE_END2() -#define BOOST_SCOPED_ENUM_DECLARE_END(EnumType) ; - -#define BOOST_SCOPED_ENUM_NATIVE(EnumType) EnumType -#define BOOST_SCOPED_ENUM_FORWARD_DECLARE(EnumType) enum class EnumType - -#endif // BOOST_NO_CXX11_SCOPED_ENUMS - -// Deprecated macros -#define BOOST_SCOPED_ENUM_START(name) BOOST_SCOPED_ENUM_DECLARE_BEGIN(name) -#define BOOST_SCOPED_ENUM_END BOOST_SCOPED_ENUM_DECLARE_END2() -#define BOOST_SCOPED_ENUM(name) BOOST_SCOPED_ENUM_NATIVE(name) - -#endif // BOOST_CORE_SCOPED_ENUM_HPP diff --git a/src/vision_opencv/cv_bridge/src/boost/endian/conversion.hpp b/src/vision_opencv/cv_bridge/src/boost/endian/conversion.hpp deleted file mode 100644 index 7c145d9..0000000 --- a/src/vision_opencv/cv_bridge/src/boost/endian/conversion.hpp +++ /dev/null @@ -1,488 +0,0 @@ -// boost/endian/conversion.hpp -------------------------------------------------------// - -// Copyright Beman Dawes 2010, 2011, 2014 - -// Distributed under the Boost Software License, Version 1.0. -// http://www.boost.org/LICENSE_1_0.txt - -#ifndef BOOST_ENDIAN_CONVERSION_HPP -#define BOOST_ENDIAN_CONVERSION_HPP - -#include -#include -#include -#include -#include -#include -#include -#include // for memcpy - -//------------------------------------- synopsis ---------------------------------------// - -namespace boost -{ -namespace endian -{ - BOOST_SCOPED_ENUM_START(order) - { - big, little, -# ifdef BOOST_BIG_ENDIAN - native = big -# else - native = little -# endif - }; BOOST_SCOPED_ENUM_END - -//--------------------------------------------------------------------------------------// -// // -// return-by-value interfaces // -// suggested by Phil Endecott // -// // -// user-defined types (UDTs) // -// // -// All return-by-value conversion function templates are required to be implemented in // -// terms of an unqualified call to "endian_reverse(x)", a function returning the // -// value of x with endianness reversed. This provides a customization point for any // -// UDT that provides a "endian_reverse" free-function meeting the requirements. // -// It must be defined in the same namespace as the UDT itself so that it will be found // -// by argument dependent lookup (ADL). // -// // -//--------------------------------------------------------------------------------------// - - // customization for exact-length arithmetic types. See doc/conversion.html/#FAQ. - // Note: The omission of an overloads for the arithmetic type (typically long, or - // long long) not assigned to one of the exact length typedefs is a deliberate - // design decision. Such overloads would be non-portable and thus error prone. - - inline int8_t endian_reverse(int8_t x) BOOST_NOEXCEPT; - inline int16_t endian_reverse(int16_t x) BOOST_NOEXCEPT; - inline int32_t endian_reverse(int32_t x) BOOST_NOEXCEPT; - inline int64_t endian_reverse(int64_t x) BOOST_NOEXCEPT; - inline uint8_t endian_reverse(uint8_t x) BOOST_NOEXCEPT; - inline uint16_t endian_reverse(uint16_t x) BOOST_NOEXCEPT; - inline uint32_t endian_reverse(uint32_t x) BOOST_NOEXCEPT; - inline uint64_t endian_reverse(uint64_t x) BOOST_NOEXCEPT; - - // reverse byte order unless native endianness is big - template - inline EndianReversible big_to_native(EndianReversible x) BOOST_NOEXCEPT; - // Returns: x if native endian order is big, otherwise endian_reverse(x) - template - inline EndianReversible native_to_big(EndianReversible x) BOOST_NOEXCEPT; - // Returns: x if native endian order is big, otherwise endian_reverse(x) - - // reverse byte order unless native endianness is little - template - inline EndianReversible little_to_native(EndianReversible x) BOOST_NOEXCEPT; - // Returns: x if native endian order is little, otherwise endian_reverse(x) - template - inline EndianReversible native_to_little(EndianReversible x) BOOST_NOEXCEPT; - // Returns: x if native endian order is little, otherwise endian_reverse(x) - - // generic conditional reverse byte order - template - inline EndianReversible conditional_reverse(EndianReversible from) BOOST_NOEXCEPT; - // Returns: If From == To have different values, from. - // Otherwise endian_reverse(from). - // Remarks: The From == To test, and as a consequence which form the return takes, is - // is determined at compile time. - - // runtime conditional reverse byte order - template - inline EndianReversible conditional_reverse(EndianReversible from, - BOOST_SCOPED_ENUM(order) from_order, BOOST_SCOPED_ENUM(order) to_order) - BOOST_NOEXCEPT; - // Returns: from_order == to_order ? from : endian_reverse(from). - - //------------------------------------------------------------------------------------// - - - // Q: What happended to bswap, htobe, and the other synonym functions based on names - // popularized by BSD, OS X, and Linux? - // A: Turned out these may be implemented as macros on some systems. Ditto POSIX names - // for such functionality. Since macros would cause endless problems with functions - // of the same names, and these functions are just synonyms anyhow, they have been - // removed. - - - //------------------------------------------------------------------------------------// - // // - // reverse in place interfaces // - // // - // user-defined types (UDTs) // - // // - // All reverse in place function templates are required to be implemented in terms // - // of an unqualified call to "endian_reverse_inplace(x)", a function reversing // - // the endianness of x, which is a non-const reference. This provides a // - // customization point for any UDT that provides a "reverse_inplace" free-function // - // meeting the requirements. The free-function must be declared in the same // - // namespace as the UDT itself so that it will be found by argument-dependent // - // lookup (ADL). // - // // - //------------------------------------------------------------------------------------// - - // reverse in place - template - inline void endian_reverse_inplace(EndianReversible& x) BOOST_NOEXCEPT; - // Effects: x = endian_reverse(x) - - // reverse in place unless native endianness is big - template - inline void big_to_native_inplace(EndianReversibleInplace& x) BOOST_NOEXCEPT; - // Effects: none if native byte-order is big, otherwise endian_reverse_inplace(x) - template - inline void native_to_big_inplace(EndianReversibleInplace& x) BOOST_NOEXCEPT; - // Effects: none if native byte-order is big, otherwise endian_reverse_inplace(x) - - // reverse in place unless native endianness is little - template - inline void little_to_native_inplace(EndianReversibleInplace& x) BOOST_NOEXCEPT; - // Effects: none if native byte-order is little, otherwise endian_reverse_inplace(x); - template - inline void native_to_little_inplace(EndianReversibleInplace& x) BOOST_NOEXCEPT; - // Effects: none if native byte-order is little, otherwise endian_reverse_inplace(x); - - // generic conditional reverse in place - template - inline void conditional_reverse_inplace(EndianReversibleInplace& x) BOOST_NOEXCEPT; - - // runtime reverse in place - template - inline void conditional_reverse_inplace(EndianReversibleInplace& x, - BOOST_SCOPED_ENUM(order) from_order, BOOST_SCOPED_ENUM(order) to_order) - BOOST_NOEXCEPT; - -//----------------------------------- end synopsis -------------------------------------// - - namespace detail - { - // generic reverse function template implementation approach using std::reverse - // suggested by Mathias Gaunard. Primary motivation for inclusion is to have an - // independent implementation to test against. - - template - inline T std_endian_reverse(T x) BOOST_NOEXCEPT - { - T tmp(x); - std::reverse( - reinterpret_cast(&tmp), - reinterpret_cast(&tmp) + sizeof(T)); - return tmp; - } - - // conditional unaligned reverse copy, patterned after std::reverse_copy - template - inline void big_reverse_copy(T from, char* to) BOOST_NOEXCEPT; - template - inline void big_reverse_copy(const char* from, T& to) BOOST_NOEXCEPT; - template - inline void little_reverse_copy(T from, char* to) BOOST_NOEXCEPT; - template - inline void little_reverse_copy(const char* from, T& to) BOOST_NOEXCEPT; - } // namespace detail - -//--------------------------------------------------------------------------------------// -// // -// return-by-value implementation // -// // -// -- portable approach suggested by tymofey, with avoidance of undefined behavior // -// as suggested by Giovanni Piero Deretta, with a further refinement suggested // -// by Pyry Jahkola. // -// -- intrinsic approach suggested by reviewers, and by David Stone, who provided // -// his Boost licensed macro implementation (detail/intrinsic.hpp) // -// // -//--------------------------------------------------------------------------------------// - - inline int8_t endian_reverse(int8_t x) BOOST_NOEXCEPT - { - return x; - } - - inline int16_t endian_reverse(int16_t x) BOOST_NOEXCEPT - { -# ifdef BOOST_ENDIAN_NO_INTRINSICS - return (static_cast(x) << 8) - | (static_cast(x) >> 8); -# else - return BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_2(static_cast(x)); -# endif - } - - inline int32_t endian_reverse(int32_t x) BOOST_NOEXCEPT - { -# ifdef BOOST_ENDIAN_NO_INTRINSICS - uint32_t step16; - step16 = static_cast(x) << 16 | static_cast(x) >> 16; - return - ((static_cast(step16) << 8) & 0xff00ff00) - | ((static_cast(step16) >> 8) & 0x00ff00ff); -# else - return BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_4(static_cast(x)); -# endif - } - - inline int64_t endian_reverse(int64_t x) BOOST_NOEXCEPT - { -# ifdef BOOST_ENDIAN_NO_INTRINSICS - uint64_t step32, step16; - step32 = static_cast(x) << 32 | static_cast(x) >> 32; - step16 = (step32 & 0x0000FFFF0000FFFFULL) << 16 - | (step32 & 0xFFFF0000FFFF0000ULL) >> 16; - return static_cast((step16 & 0x00FF00FF00FF00FFULL) << 8 - | (step16 & 0xFF00FF00FF00FF00ULL) >> 8); -# else - return BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_8(static_cast(x)); -# endif - } - - inline uint8_t endian_reverse(uint8_t x) BOOST_NOEXCEPT - { - return x; - } - - inline uint16_t endian_reverse(uint16_t x) BOOST_NOEXCEPT - { -# ifdef BOOST_ENDIAN_NO_INTRINSICS - return (x << 8) - | (x >> 8); -# else - return BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_2(x); -# endif - } - - inline uint32_t endian_reverse(uint32_t x) BOOST_NOEXCEPT - { -# ifdef BOOST_ENDIAN_NO_INTRINSICS - uint32_t step16; - step16 = x << 16 | x >> 16; - return - ((step16 << 8) & 0xff00ff00) - | ((step16 >> 8) & 0x00ff00ff); -# else - return BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_4(x); -# endif - } - - inline uint64_t endian_reverse(uint64_t x) BOOST_NOEXCEPT - { -# ifdef BOOST_ENDIAN_NO_INTRINSICS - uint64_t step32, step16; - step32 = x << 32 | x >> 32; - step16 = (step32 & 0x0000FFFF0000FFFFULL) << 16 - | (step32 & 0xFFFF0000FFFF0000ULL) >> 16; - return (step16 & 0x00FF00FF00FF00FFULL) << 8 - | (step16 & 0xFF00FF00FF00FF00ULL) >> 8; -# else - return BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_8(x); -# endif - } - - template - inline EndianReversible big_to_native(EndianReversible x) BOOST_NOEXCEPT - { -# ifdef BOOST_BIG_ENDIAN - return x; -# else - return endian_reverse(x); -# endif - } - - template - inline EndianReversible native_to_big(EndianReversible x) BOOST_NOEXCEPT - { -# ifdef BOOST_BIG_ENDIAN - return x; -# else - return endian_reverse(x); -# endif - } - - template - inline EndianReversible little_to_native(EndianReversible x) BOOST_NOEXCEPT - { -# ifdef BOOST_LITTLE_ENDIAN - return x; -# else - return endian_reverse(x); -# endif - } - - template - inline EndianReversible native_to_little(EndianReversible x) BOOST_NOEXCEPT - { -# ifdef BOOST_LITTLE_ENDIAN - return x; -# else - return endian_reverse(x); -# endif - } - - namespace detail - { - // Primary template and specializations to support endian_reverse(). - // See rationale in endian_reverse() below. - template - class value_converter ; // primary template - template class value_converter - {public: T operator()(T x) BOOST_NOEXCEPT {return x;}}; - template class value_converter - {public: T operator()(T x) BOOST_NOEXCEPT {return x;}}; - template class value_converter - {public: T operator()(T x) BOOST_NOEXCEPT {return endian_reverse(x);}}; - template class value_converter - {public: T operator()(T x) BOOST_NOEXCEPT {return endian_reverse(x);}}; - } - - // generic conditional reverse - template - inline EndianReversible conditional_reverse(EndianReversible from) BOOST_NOEXCEPT { - // work around lack of function template partial specialization by instantiating - // a function object of a class that is partially specialized on the two order - // template parameters, and then calling its operator(). - detail::value_converter tmp; - return tmp(from); - } - - // runtime conditional reverse - template - inline EndianReversible conditional_reverse(EndianReversible from, - BOOST_SCOPED_ENUM(order) from_order, BOOST_SCOPED_ENUM(order) to_order) BOOST_NOEXCEPT - { - return from_order == to_order ? from : endian_reverse(from); - } - -//--------------------------------------------------------------------------------------// -// reverse-in-place implementation // -//--------------------------------------------------------------------------------------// - - // reverse in place - template - inline void endian_reverse_inplace(EndianReversible& x) BOOST_NOEXCEPT - { - x = endian_reverse(x); - } - - template -# ifdef BOOST_BIG_ENDIAN - inline void big_to_native_inplace(EndianReversibleInplace&) BOOST_NOEXCEPT {} -# else - inline void big_to_native_inplace(EndianReversibleInplace& x) BOOST_NOEXCEPT - { endian_reverse_inplace(x); } -# endif - template -# ifdef BOOST_BIG_ENDIAN - inline void native_to_big_inplace(EndianReversibleInplace&) BOOST_NOEXCEPT {} -# else - inline void native_to_big_inplace(EndianReversibleInplace& x) BOOST_NOEXCEPT - { - endian_reverse_inplace(x); - } -# endif - - template -# ifdef BOOST_LITTLE_ENDIAN - inline void little_to_native_inplace(EndianReversibleInplace&) BOOST_NOEXCEPT {} -# else - inline void little_to_native_inplace(EndianReversibleInplace& x) BOOST_NOEXCEPT - { endian_reverse_inplace(x); } -# endif - template -# ifdef BOOST_LITTLE_ENDIAN - inline void native_to_little_inplace(EndianReversibleInplace&) BOOST_NOEXCEPT {} -# else - inline void native_to_little_inplace(EndianReversibleInplace& x) BOOST_NOEXCEPT - { - endian_reverse_inplace(x); - } -# endif - - namespace detail - { - // Primary template and specializations support generic - // endian_reverse_inplace(). - // See rationale in endian_reverse_inplace() below. - template - class converter; // primary template - template class converter - {public: void operator()(T&) BOOST_NOEXCEPT {/*no effect*/}}; - template class converter - {public: void operator()(T&) BOOST_NOEXCEPT {/*no effect*/}}; - template class converter - {public: void operator()(T& x) BOOST_NOEXCEPT { endian_reverse_inplace(x); }}; - template class converter - {public: void operator()(T& x) BOOST_NOEXCEPT { endian_reverse_inplace(x); }}; - } // namespace detail - - // generic conditional reverse in place - template - inline void conditional_reverse_inplace(EndianReversibleInplace& x) BOOST_NOEXCEPT - { - // work around lack of function template partial specialization by instantiating - // a function object of a class that is partially specialized on the two order - // template parameters, and then calling its operator(). - detail::converter tmp; - tmp(x); // call operator () - } - - // runtime reverse in place - template - inline void conditional_reverse_inplace(EndianReversibleInplace& x, - BOOST_SCOPED_ENUM(order) from_order, BOOST_SCOPED_ENUM(order) to_order) - BOOST_NOEXCEPT - { - if (from_order != to_order) - endian_reverse_inplace(x); - } - - - namespace detail - { - template - inline void big_reverse_copy(T from, char* to) BOOST_NOEXCEPT - { -# ifdef BOOST_BIG_ENDIAN - std::memcpy(to, reinterpret_cast(&from), sizeof(T)); -# else - std::reverse_copy(reinterpret_cast(&from), - reinterpret_cast(&from) + sizeof(T), to); -# endif - } - template - inline void big_reverse_copy(const char* from, T& to) BOOST_NOEXCEPT - { -# ifdef BOOST_BIG_ENDIAN - std::memcpy(reinterpret_cast(&to), from, sizeof(T)); -# else - std::reverse_copy(from, from + sizeof(T), reinterpret_cast(&to)); -# endif - } - template - inline void little_reverse_copy(T from, char* to) BOOST_NOEXCEPT - { -# ifdef BOOST_LITTLE_ENDIAN - std::memcpy(to, reinterpret_cast(&from), sizeof(T)); -# else - std::reverse_copy(reinterpret_cast(&from), - reinterpret_cast(&from) + sizeof(T), to); -# endif - } - template - inline void little_reverse_copy(const char* from, T& to) BOOST_NOEXCEPT - { -# ifdef BOOST_LITTLE_ENDIAN - std::memcpy(reinterpret_cast(&to), from, sizeof(T)); -# else - std::reverse_copy(from, from + sizeof(T), reinterpret_cast(&to)); -# endif - } - } // namespace detail -} // namespace endian -} // namespace boost - -#endif // BOOST_ENDIAN_CONVERSION_HPP diff --git a/src/vision_opencv/cv_bridge/src/boost/endian/detail/intrinsic.hpp b/src/vision_opencv/cv_bridge/src/boost/endian/detail/intrinsic.hpp deleted file mode 100644 index 6ead681..0000000 --- a/src/vision_opencv/cv_bridge/src/boost/endian/detail/intrinsic.hpp +++ /dev/null @@ -1,64 +0,0 @@ -// endian/detail/intrinsic.hpp -------------------------------------------------------// - -// Copyright (C) 2012 David Stone -// Copyright Beman Dawes 2013 - -// Distributed under the Boost Software License, Version 1.0. -// http://www.boost.org/LICENSE_1_0.txt - -#ifndef BOOST_ENDIAN_INTRINSIC_HPP -#define BOOST_ENDIAN_INTRINSIC_HPP - -// Allow user to force BOOST_ENDIAN_NO_INTRINSICS in case they aren't available for a -// particular platform/compiler combination. Please report such platform/compiler -// combinations to the Boost mailing list. -#ifndef BOOST_ENDIAN_NO_INTRINSICS - -#ifndef __has_builtin // Optional of course - #define __has_builtin(x) 0 // Compatibility with non-clang compilers -#endif - -// GCC and Clang recent versions provide intrinsic byte swaps via builtins -#if (defined(__clang__) && __has_builtin(__builtin_bswap32) && __has_builtin(__builtin_bswap64)) \ - || (defined(__GNUC__ ) && \ - (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))) -# define BOOST_ENDIAN_INTRINSIC_MSG "__builtin_bswap16, etc." -// prior to 4.8, gcc did not provide __builtin_bswap16 on some platforms so we emulate it -// see http://gcc.gnu.org/bugzilla/show_bug.cgi?id=52624 -// Clang has a similar problem, but their feature test macros make it easier to detect -# if (defined(__clang__) && __has_builtin(__builtin_bswap16)) \ - || (defined(__GNUC__) &&(__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) -# define BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_2(x) __builtin_bswap16(x) -# else -# define BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_2(x) __builtin_bswap32((x) << 16) -# endif -# define BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_4(x) __builtin_bswap32(x) -# define BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_8(x) __builtin_bswap64(x) - -// Linux systems provide the byteswap.h header, with -#elif defined(__linux__) -// don't check for obsolete forms defined(linux) and defined(__linux) on the theory that -// compilers that predefine only these are so old that byteswap.h probably isn't present. -# define BOOST_ENDIAN_INTRINSIC_MSG "byteswap.h bswap_16, etc." -# include -# define BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_2(x) bswap_16(x) -# define BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_4(x) bswap_32(x) -# define BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_8(x) bswap_64(x) - -#elif defined(_MSC_VER) -// Microsoft documents these as being compatible since Windows 95 and specificly -// lists runtime library support since Visual Studio 2003 (aka 7.1). -# define BOOST_ENDIAN_INTRINSIC_MSG "cstdlib _byteswap_ushort, etc." -# include -# define BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_2(x) _byteswap_ushort(x) -# define BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_4(x) _byteswap_ulong(x) -# define BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_8(x) _byteswap_uint64(x) -#else -# define BOOST_ENDIAN_NO_INTRINSICS -# define BOOST_ENDIAN_INTRINSIC_MSG "no byte swap intrinsics" -#endif - -#elif !defined(BOOST_ENDIAN_INTRINSIC_MSG) -# define BOOST_ENDIAN_INTRINSIC_MSG "no byte swap intrinsics" -#endif // BOOST_ENDIAN_NO_INTRINSICS -#endif // BOOST_ENDIAN_INTRINSIC_HPP diff --git a/src/vision_opencv/cv_bridge/src/boost/predef/detail/_cassert.h b/src/vision_opencv/cv_bridge/src/boost/predef/detail/_cassert.h deleted file mode 100644 index 940e944..0000000 --- a/src/vision_opencv/cv_bridge/src/boost/predef/detail/_cassert.h +++ /dev/null @@ -1,17 +0,0 @@ -/* -Copyright Rene Rivera 2011-2012 -Distributed under the Boost Software License, Version 1.0. -(See accompanying file LICENSE_1_0.txt or copy at -http://www.boost.org/LICENSE_1_0.txt) -*/ - -#ifndef BOOST_PREDEF_DETAIL__CASSERT_H -#define BOOST_PREDEF_DETAIL__CASSERT_H - -#if defined(__cplusplus) -#include -#else -#include -#endif - -#endif diff --git a/src/vision_opencv/cv_bridge/src/boost/predef/detail/endian_compat.h b/src/vision_opencv/cv_bridge/src/boost/predef/detail/endian_compat.h deleted file mode 100644 index 7725e68..0000000 --- a/src/vision_opencv/cv_bridge/src/boost/predef/detail/endian_compat.h +++ /dev/null @@ -1,26 +0,0 @@ -/* -Copyright Rene Rivera 2013 -Distributed under the Boost Software License, Version 1.0. -(See accompanying file LICENSE_1_0.txt or copy at -http://www.boost.org/LICENSE_1_0.txt) -*/ - -#ifndef BOOST_PREDEF_DETAIL_ENDIAN_COMPAT_H -#define BOOST_PREDEF_DETAIL_ENDIAN_COMPAT_H - -#include - -#if BOOST_ENDIAN_BIG_BYTE -# define BOOST_BIG_ENDIAN -# define BOOST_BYTE_ORDER 4321 -#endif -#if BOOST_ENDIAN_LITTLE_BYTE -# define BOOST_LITTLE_ENDIAN -# define BOOST_BYTE_ORDER 1234 -#endif -#if BOOST_ENDIAN_LITTLE_WORD -# define BOOST_PDP_ENDIAN -# define BOOST_BYTE_ORDER 2134 -#endif - -#endif diff --git a/src/vision_opencv/cv_bridge/src/boost/predef/detail/test.h b/src/vision_opencv/cv_bridge/src/boost/predef/detail/test.h deleted file mode 100644 index 546a9e4..0000000 --- a/src/vision_opencv/cv_bridge/src/boost/predef/detail/test.h +++ /dev/null @@ -1,17 +0,0 @@ -/* -Copyright Rene Rivera 2011-2012 -Distributed under the Boost Software License, Version 1.0. -(See accompanying file LICENSE_1_0.txt or copy at -http://www.boost.org/LICENSE_1_0.txt) -*/ - -#ifndef BOOST_PREDEF_DETAIL_TEST_H -#define BOOST_PREDEF_DETAIL_TEST_H - -#if !defined(BOOST_PREDEF_INTERNAL_GENERATE_TESTS) - -#define BOOST_PREDEF_DECLARE_TEST(x,s) - -#endif - -#endif diff --git a/src/vision_opencv/cv_bridge/src/boost/predef/library/c/_prefix.h b/src/vision_opencv/cv_bridge/src/boost/predef/library/c/_prefix.h deleted file mode 100644 index 12bcb0f..0000000 --- a/src/vision_opencv/cv_bridge/src/boost/predef/library/c/_prefix.h +++ /dev/null @@ -1,13 +0,0 @@ -/* -Copyright Rene Rivera 2008-2013 -Distributed under the Boost Software License, Version 1.0. -(See accompanying file LICENSE_1_0.txt or copy at -http://www.boost.org/LICENSE_1_0.txt) -*/ - -#ifndef BOOST_PREDEF_LIBRARY_C__PREFIX_H -#define BOOST_PREDEF_LIBRARY_C__PREFIX_H - -#include - -#endif diff --git a/src/vision_opencv/cv_bridge/src/boost/predef/library/c/gnu.h b/src/vision_opencv/cv_bridge/src/boost/predef/library/c/gnu.h deleted file mode 100644 index 9e4ca89..0000000 --- a/src/vision_opencv/cv_bridge/src/boost/predef/library/c/gnu.h +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright Rene Rivera 2008-2015 -Distributed under the Boost Software License, Version 1.0. -(See accompanying file LICENSE_1_0.txt or copy at -http://www.boost.org/LICENSE_1_0.txt) -*/ - -#ifndef BOOST_PREDEF_LIBRARY_C_GNU_H -#define BOOST_PREDEF_LIBRARY_C_GNU_H - -#include -#include - -#include - -#if defined(__STDC__) -#include -#elif defined(__cplusplus) -#include -#endif - -/*` -[heading `BOOST_LIB_C_GNU`] - -[@http://en.wikipedia.org/wiki/Glibc GNU glibc] Standard C library. -Version number available as major, and minor. - -[table - [[__predef_symbol__] [__predef_version__]] - - [[`__GLIBC__`] [__predef_detection__]] - [[`__GNU_LIBRARY__`] [__predef_detection__]] - - [[`__GLIBC__`, `__GLIBC_MINOR__`] [V.R.0]] - [[`__GNU_LIBRARY__`, `__GNU_LIBRARY_MINOR__`] [V.R.0]] - ] - */ - -#define BOOST_LIB_C_GNU BOOST_VERSION_NUMBER_NOT_AVAILABLE - -#if defined(__GLIBC__) || defined(__GNU_LIBRARY__) -# undef BOOST_LIB_C_GNU -# if defined(__GLIBC__) -# define BOOST_LIB_C_GNU \ - BOOST_VERSION_NUMBER(__GLIBC__,__GLIBC_MINOR__,0) -# else -# define BOOST_LIB_C_GNU \ - BOOST_VERSION_NUMBER(__GNU_LIBRARY__,__GNU_LIBRARY_MINOR__,0) -# endif -#endif - -#if BOOST_LIB_C_GNU -# define BOOST_LIB_C_GNU_AVAILABLE -#endif - -#define BOOST_LIB_C_GNU_NAME "GNU" - -#endif - -#include -BOOST_PREDEF_DECLARE_TEST(BOOST_LIB_C_GNU,BOOST_LIB_C_GNU_NAME) diff --git a/src/vision_opencv/cv_bridge/src/boost/predef/make.h b/src/vision_opencv/cv_bridge/src/boost/predef/make.h deleted file mode 100644 index 4f2f9ee..0000000 --- a/src/vision_opencv/cv_bridge/src/boost/predef/make.h +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright Rene Rivera 2008-2015 -Distributed under the Boost Software License, Version 1.0. -(See accompanying file LICENSE_1_0.txt or copy at -http://www.boost.org/LICENSE_1_0.txt) -*/ -#include - -#ifndef BOOST_PREDEF_MAKE_H -#define BOOST_PREDEF_MAKE_H - -/* -Shorthands for the common version number formats used by vendors... -*/ - -/*` -[heading `BOOST_PREDEF_MAKE_..` macros] - -These set of macros decompose common vendor version number -macros which are composed version, revision, and patch digits. -The naming convention indicates: - -* The base of the specified version number. "`BOOST_PREDEF_MAKE_0X`" for - hexadecimal digits, and "`BOOST_PREDEF_MAKE_10`" for decimal digits. -* The format of the vendor version number. Where "`V`" indicates the version digits, - "`R`" indicates the revision digits, "`P`" indicates the patch digits, and "`0`" - indicates an ignored digit. - -Macros are: -*/ -/*` `BOOST_PREDEF_MAKE_0X_VRP(V)` */ -#define BOOST_PREDEF_MAKE_0X_VRP(V) BOOST_VERSION_NUMBER((V&0xF00)>>8,(V&0xF0)>>4,(V&0xF)) -/*` `BOOST_PREDEF_MAKE_0X_VVRP(V)` */ -#define BOOST_PREDEF_MAKE_0X_VVRP(V) BOOST_VERSION_NUMBER((V&0xFF00)>>8,(V&0xF0)>>4,(V&0xF)) -/*` `BOOST_PREDEF_MAKE_0X_VRPP(V)` */ -#define BOOST_PREDEF_MAKE_0X_VRPP(V) BOOST_VERSION_NUMBER((V&0xF000)>>12,(V&0xF00)>>8,(V&0xFF)) -/*` `BOOST_PREDEF_MAKE_0X_VVRR(V)` */ -#define BOOST_PREDEF_MAKE_0X_VVRR(V) BOOST_VERSION_NUMBER((V&0xFF00)>>8,(V&0xFF),0) -/*` `BOOST_PREDEF_MAKE_0X_VRRPPPP(V)` */ -#define BOOST_PREDEF_MAKE_0X_VRRPPPP(V) BOOST_VERSION_NUMBER((V&0xF000000)>>24,(V&0xFF0000)>>16,(V&0xFFFF)) -/*` `BOOST_PREDEF_MAKE_0X_VVRRP(V)` */ -#define BOOST_PREDEF_MAKE_0X_VVRRP(V) BOOST_VERSION_NUMBER((V&0xFF000)>>12,(V&0xFF0)>>4,(V&0xF)) -/*` `BOOST_PREDEF_MAKE_0X_VRRPP000(V)` */ -#define BOOST_PREDEF_MAKE_0X_VRRPP000(V) BOOST_VERSION_NUMBER((V&0xF0000000)>>28,(V&0xFF00000)>>20,(V&0xFF000)>>12) -/*` `BOOST_PREDEF_MAKE_0X_VVRRPP(V)` */ -#define BOOST_PREDEF_MAKE_0X_VVRRPP(V) BOOST_VERSION_NUMBER((V&0xFF0000)>>16,(V&0xFF00)>>8,(V&0xFF)) -/*` `BOOST_PREDEF_MAKE_10_VPPP(V)` */ -#define BOOST_PREDEF_MAKE_10_VPPP(V) BOOST_VERSION_NUMBER(((V)/1000)%10,0,(V)%1000) -/*` `BOOST_PREDEF_MAKE_10_VRP(V)` */ -#define BOOST_PREDEF_MAKE_10_VRP(V) BOOST_VERSION_NUMBER(((V)/100)%10,((V)/10)%10,(V)%10) -/*` `BOOST_PREDEF_MAKE_10_VRP000(V)` */ -#define BOOST_PREDEF_MAKE_10_VRP000(V) BOOST_VERSION_NUMBER(((V)/100000)%10,((V)/10000)%10,((V)/1000)%10) -/*` `BOOST_PREDEF_MAKE_10_VRPP(V)` */ -#define BOOST_PREDEF_MAKE_10_VRPP(V) BOOST_VERSION_NUMBER(((V)/1000)%10,((V)/100)%10,(V)%100) -/*` `BOOST_PREDEF_MAKE_10_VRR(V)` */ -#define BOOST_PREDEF_MAKE_10_VRR(V) BOOST_VERSION_NUMBER(((V)/100)%10,(V)%100,0) -/*` `BOOST_PREDEF_MAKE_10_VRRPP(V)` */ -#define BOOST_PREDEF_MAKE_10_VRRPP(V) BOOST_VERSION_NUMBER(((V)/10000)%10,((V)/100)%100,(V)%100) -/*` `BOOST_PREDEF_MAKE_10_VRR000(V)` */ -#define BOOST_PREDEF_MAKE_10_VRR000(V) BOOST_VERSION_NUMBER(((V)/100000)%10,((V)/1000)%100,0) -/*` `BOOST_PREDEF_MAKE_10_VV00(V)` */ -#define BOOST_PREDEF_MAKE_10_VV00(V) BOOST_VERSION_NUMBER(((V)/100)%100,0,0) -/*` `BOOST_PREDEF_MAKE_10_VVRR(V)` */ -#define BOOST_PREDEF_MAKE_10_VVRR(V) BOOST_VERSION_NUMBER(((V)/100)%100,(V)%100,0) -/*` `BOOST_PREDEF_MAKE_10_VVRRPP(V)` */ -#define BOOST_PREDEF_MAKE_10_VVRRPP(V) BOOST_VERSION_NUMBER(((V)/10000)%100,((V)/100)%100,(V)%100) -/*` `BOOST_PREDEF_MAKE_10_VVRR0PP00(V)` */ -#define BOOST_PREDEF_MAKE_10_VVRR0PP00(V) BOOST_VERSION_NUMBER(((V)/10000000)%100,((V)/100000)%100,((V)/100)%100) -/*` `BOOST_PREDEF_MAKE_10_VVRR0PPPP(V)` */ -#define BOOST_PREDEF_MAKE_10_VVRR0PPPP(V) BOOST_VERSION_NUMBER(((V)/10000000)%100,((V)/100000)%100,(V)%10000) -/*` `BOOST_PREDEF_MAKE_10_VVRR00PP00(V)` */ -#define BOOST_PREDEF_MAKE_10_VVRR00PP00(V) BOOST_VERSION_NUMBER(((V)/100000000)%100,((V)/1000000)%100,((V)/100)%100) -/*` -[heading `BOOST_PREDEF_MAKE_*..` date macros] - -Date decomposition macros return a date in the relative to the 1970 -Epoch date. If the month is not available, January 1st is used as the month and day. -If the day is not available, but the month is, the 1st of the month is used as the day. -*/ -/*` `BOOST_PREDEF_MAKE_DATE(Y,M,D)` */ -#define BOOST_PREDEF_MAKE_DATE(Y,M,D) BOOST_VERSION_NUMBER((Y)%10000-1970,(M)%100,(D)%100) -/*` `BOOST_PREDEF_MAKE_YYYYMMDD(V)` */ -#define BOOST_PREDEF_MAKE_YYYYMMDD(V) BOOST_PREDEF_MAKE_DATE(((V)/10000)%10000,((V)/100)%100,(V)%100) -/*` `BOOST_PREDEF_MAKE_YYYY(V)` */ -#define BOOST_PREDEF_MAKE_YYYY(V) BOOST_PREDEF_MAKE_DATE(V,1,1) -/*` `BOOST_PREDEF_MAKE_YYYYMM(V)` */ -#define BOOST_PREDEF_MAKE_YYYYMM(V) BOOST_PREDEF_MAKE_DATE((V)/100,(V)%100,1) - -#endif diff --git a/src/vision_opencv/cv_bridge/src/boost/predef/os/android.h b/src/vision_opencv/cv_bridge/src/boost/predef/os/android.h deleted file mode 100644 index 00836e7..0000000 --- a/src/vision_opencv/cv_bridge/src/boost/predef/os/android.h +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright Rene Rivera 2015 -Distributed under the Boost Software License, Version 1.0. -(See accompanying file LICENSE_1_0.txt or copy at -http://www.boost.org/LICENSE_1_0.txt) -*/ - -#ifndef BOOST_PREDEF_OS_ADROID_H -#define BOOST_PREDEF_OS_ADROID_H - -#include -#include - -/*` -[heading `BOOST_OS_ANDROID`] - -[@http://en.wikipedia.org/wiki/Android_%28operating_system%29 Android] operating system. - -[table - [[__predef_symbol__] [__predef_version__]] - - [[`__ANDROID__`] [__predef_detection__]] - ] - */ - -#define BOOST_OS_ANDROID BOOST_VERSION_NUMBER_NOT_AVAILABLE - -#if !defined(BOOST_PREDEF_DETAIL_OS_DETECTED) && ( \ - defined(__ANDROID__) \ - ) -# undef BOOST_OS_ANDROID -# define BOOST_OS_ANDROID BOOST_VERSION_NUMBER_AVAILABLE -#endif - -#if BOOST_OS_ANDROID -# define BOOST_OS_ANDROID_AVAILABLE -# include -#endif - -#define BOOST_OS_ANDROID_NAME "Android" - -#endif - -#include -BOOST_PREDEF_DECLARE_TEST(BOOST_OS_ANDROID,BOOST_OS_ANDROID_NAME) diff --git a/src/vision_opencv/cv_bridge/src/boost/predef/os/bsd.h b/src/vision_opencv/cv_bridge/src/boost/predef/os/bsd.h deleted file mode 100644 index fad9aed..0000000 --- a/src/vision_opencv/cv_bridge/src/boost/predef/os/bsd.h +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright Rene Rivera 2008-2015 -Distributed under the Boost Software License, Version 1.0. -(See accompanying file LICENSE_1_0.txt or copy at -http://www.boost.org/LICENSE_1_0.txt) -*/ - -#ifndef BOOST_PREDEF_OS_BSD_H -#define BOOST_PREDEF_OS_BSD_H - -/* Special case: OSX will define BSD predefs if the sys/param.h - * header is included. We can guard against that, but only if we - * detect OSX first. Hence we will force include OSX detection - * before doing any BSD detection. - */ -#include - -#include -#include - -/*` -[heading `BOOST_OS_BSD`] - -[@http://en.wikipedia.org/wiki/Berkeley_Software_Distribution BSD] operating system. - -BSD has various branch operating systems possible and each detected -individually. This detects the following variations and sets a specific -version number macro to match: - -* `BOOST_OS_BSD_DRAGONFLY` [@http://en.wikipedia.org/wiki/DragonFly_BSD DragonFly BSD] -* `BOOST_OS_BSD_FREE` [@http://en.wikipedia.org/wiki/Freebsd FreeBSD] -* `BOOST_OS_BSD_BSDI` [@http://en.wikipedia.org/wiki/BSD/OS BSDi BSD/OS] -* `BOOST_OS_BSD_NET` [@http://en.wikipedia.org/wiki/Netbsd NetBSD] -* `BOOST_OS_BSD_OPEN` [@http://en.wikipedia.org/wiki/Openbsd OpenBSD] - -[note The general `BOOST_OS_BSD` is set in all cases to indicate some form -of BSD. If the above variants is detected the corresponding macro is also set.] - -[table - [[__predef_symbol__] [__predef_version__]] - - [[`BSD`] [__predef_detection__]] - [[`_SYSTYPE_BSD`] [__predef_detection__]] - - [[`BSD4_2`] [4.2.0]] - [[`BSD4_3`] [4.3.0]] - [[`BSD4_4`] [4.4.0]] - [[`BSD`] [V.R.0]] - ] - */ - -#include -#include -#include -#include -#include - -#ifndef BOOST_OS_BSD -#define BOOST_OS_BSD BOOST_VERSION_NUMBER_NOT_AVAILABLE -#endif - -#if !defined(BOOST_PREDEF_DETAIL_OS_DETECTED) && ( \ - defined(BSD) || \ - defined(_SYSTYPE_BSD) \ - ) -# undef BOOST_OS_BSD -# include -# if !defined(BOOST_OS_BSD) && defined(BSD4_4) -# define BOOST_OS_BSD BOOST_VERSION_NUMBER(4,4,0) -# endif -# if !defined(BOOST_OS_BSD) && defined(BSD4_3) -# define BOOST_OS_BSD BOOST_VERSION_NUMBER(4,3,0) -# endif -# if !defined(BOOST_OS_BSD) && defined(BSD4_2) -# define BOOST_OS_BSD BOOST_VERSION_NUMBER(4,2,0) -# endif -# if !defined(BOOST_OS_BSD) && defined(BSD) -# define BOOST_OS_BSD BOOST_PREDEF_MAKE_10_VVRR(BSD) -# endif -# if !defined(BOOST_OS_BSD) -# define BOOST_OS_BSD BOOST_VERSION_NUMBER_AVAILABLE -# endif -#endif - -#if BOOST_OS_BSD -# define BOOST_OS_BSD_AVAILABLE -# include -#endif - -#define BOOST_OS_BSD_NAME "BSD" - -#else - -#include -#include -#include -#include -#include - -#endif - -#include -BOOST_PREDEF_DECLARE_TEST(BOOST_OS_BSD,BOOST_OS_BSD_NAME) diff --git a/src/vision_opencv/cv_bridge/src/boost/predef/os/bsd/bsdi.h b/src/vision_opencv/cv_bridge/src/boost/predef/os/bsd/bsdi.h deleted file mode 100644 index afdcd3e..0000000 --- a/src/vision_opencv/cv_bridge/src/boost/predef/os/bsd/bsdi.h +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright Rene Rivera 2012-2015 -Distributed under the Boost Software License, Version 1.0. -(See accompanying file LICENSE_1_0.txt or copy at -http://www.boost.org/LICENSE_1_0.txt) -*/ - -#ifndef BOOST_PREDEF_OS_BSD_BSDI_H -#define BOOST_PREDEF_OS_BSD_BSDI_H - -#include - -/*` -[heading `BOOST_OS_BSD_BSDI`] - -[@http://en.wikipedia.org/wiki/BSD/OS BSDi BSD/OS] operating system. - -[table - [[__predef_symbol__] [__predef_version__]] - - [[`__bsdi__`] [__predef_detection__]] - ] - */ - -#define BOOST_OS_BSD_BSDI BOOST_VERSION_NUMBER_NOT_AVAILABLE - -#if !defined(BOOST_PREDEF_DETAIL_OS_DETECTED) && ( \ - defined(__bsdi__) \ - ) -# ifndef BOOST_OS_BSD_AVAILABLE -# define BOOST_OS_BSD BOOST_VERSION_NUMBER_AVAILABLE -# define BOOST_OS_BSD_AVAILABLE -# endif -# undef BOOST_OS_BSD_BSDI -# define BOOST_OS_BSD_BSDI BOOST_VERSION_NUMBER_AVAILABLE -#endif - -#if BOOST_OS_BSD_BSDI -# define BOOST_OS_BSD_BSDI_AVAILABLE -# include -#endif - -#define BOOST_OS_BSD_BSDI_NAME "BSDi BSD/OS" - -#endif - -#include -BOOST_PREDEF_DECLARE_TEST(BOOST_OS_BSD_BSDI,BOOST_OS_BSD_BSDI_NAME) diff --git a/src/vision_opencv/cv_bridge/src/boost/predef/os/bsd/dragonfly.h b/src/vision_opencv/cv_bridge/src/boost/predef/os/bsd/dragonfly.h deleted file mode 100644 index 1d07579..0000000 --- a/src/vision_opencv/cv_bridge/src/boost/predef/os/bsd/dragonfly.h +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright Rene Rivera 2012-2015 -Distributed under the Boost Software License, Version 1.0. -(See accompanying file LICENSE_1_0.txt or copy at -http://www.boost.org/LICENSE_1_0.txt) -*/ - -#ifndef BOOST_PREDEF_OS_BSD_DRAGONFLY_H -#define BOOST_PREDEF_OS_BSD_DRAGONFLY_H - -#include - -/*` -[heading `BOOST_OS_BSD_DRAGONFLY`] - -[@http://en.wikipedia.org/wiki/DragonFly_BSD DragonFly BSD] operating system. - -[table - [[__predef_symbol__] [__predef_version__]] - - [[`__DragonFly__`] [__predef_detection__]] - ] - */ - -#define BOOST_OS_BSD_DRAGONFLY BOOST_VERSION_NUMBER_NOT_AVAILABLE - -#if !defined(BOOST_PREDEF_DETAIL_OS_DETECTED) && ( \ - defined(__DragonFly__) \ - ) -# ifndef BOOST_OS_BSD_AVAILABLE -# define BOOST_OS_BSD BOOST_VERSION_NUMBER_AVAILABLE -# define BOOST_OS_BSD_AVAILABLE -# endif -# undef BOOST_OS_BSD_DRAGONFLY -# if defined(__DragonFly__) -# define BOOST_OS_DRAGONFLY_BSD BOOST_VERSION_NUMBER_AVAILABLE -# endif -#endif - -#if BOOST_OS_BSD_DRAGONFLY -# define BOOST_OS_BSD_DRAGONFLY_AVAILABLE -# include -#endif - -#define BOOST_OS_BSD_DRAGONFLY_NAME "DragonFly BSD" - -#endif - -#include -BOOST_PREDEF_DECLARE_TEST(BOOST_OS_BSD_DRAGONFLY,BOOST_OS_BSD_DRAGONFLY_NAME) diff --git a/src/vision_opencv/cv_bridge/src/boost/predef/os/bsd/free.h b/src/vision_opencv/cv_bridge/src/boost/predef/os/bsd/free.h deleted file mode 100644 index 248011a..0000000 --- a/src/vision_opencv/cv_bridge/src/boost/predef/os/bsd/free.h +++ /dev/null @@ -1,60 +0,0 @@ -/* -Copyright Rene Rivera 2012-2015 -Distributed under the Boost Software License, Version 1.0. -(See accompanying file LICENSE_1_0.txt or copy at -http://www.boost.org/LICENSE_1_0.txt) -*/ - -#ifndef BOOST_PREDEF_OS_BSD_FREE_H -#define BOOST_PREDEF_OS_BSD_FREE_H - -#include - -/*` -[heading `BOOST_OS_BSD_FREE`] - -[@http://en.wikipedia.org/wiki/Freebsd FreeBSD] operating system. - -[table - [[__predef_symbol__] [__predef_version__]] - - [[`__FreeBSD__`] [__predef_detection__]] - - [[`__FreeBSD_version`] [V.R.P]] - ] - */ - -#define BOOST_OS_BSD_FREE BOOST_VERSION_NUMBER_NOT_AVAILABLE - -#if !defined(BOOST_PREDEF_DETAIL_OS_DETECTED) && ( \ - defined(__FreeBSD__) \ - ) -# ifndef BOOST_OS_BSD_AVAILABLE -# define BOOST_OS_BSD BOOST_VERSION_NUMBER_AVAILABLE -# define BOOST_OS_BSD_AVAILABLE -# endif -# undef BOOST_OS_BSD_FREE -# if defined(__FreeBSD_version) -# if __FreeBSD_version < 500000 -# define BOOST_OS_BSD_FREE \ - BOOST_PREDEF_MAKE_10_VRP000(__FreeBSD_version) -# else -# define BOOST_OS_BSD_FREE \ - BOOST_PREDEF_MAKE_10_VRR000(__FreeBSD_version) -# endif -# else -# define BOOST_OS_BSD_FREE BOOST_VERSION_NUMBER_AVAILABLE -# endif -#endif - -#if BOOST_OS_BSD_FREE -# define BOOST_OS_BSD_FREE_AVAILABLE -# include -#endif - -#define BOOST_OS_BSD_FREE_NAME "Free BSD" - -#endif - -#include -BOOST_PREDEF_DECLARE_TEST(BOOST_OS_BSD_FREE,BOOST_OS_BSD_FREE_NAME) diff --git a/src/vision_opencv/cv_bridge/src/boost/predef/os/bsd/net.h b/src/vision_opencv/cv_bridge/src/boost/predef/os/bsd/net.h deleted file mode 100644 index 387cbde..0000000 --- a/src/vision_opencv/cv_bridge/src/boost/predef/os/bsd/net.h +++ /dev/null @@ -1,84 +0,0 @@ -/* -Copyright Rene Rivera 2012-2015 -Distributed under the Boost Software License, Version 1.0. -(See accompanying file LICENSE_1_0.txt or copy at -http://www.boost.org/LICENSE_1_0.txt) -*/ - -#ifndef BOOST_PREDEF_OS_BSD_NET_H -#define BOOST_PREDEF_OS_BSD_NET_H - -#include - -/*` -[heading `BOOST_OS_BSD_NET`] - -[@http://en.wikipedia.org/wiki/Netbsd NetBSD] operating system. - -[table - [[__predef_symbol__] [__predef_version__]] - - [[`__NETBSD__`] [__predef_detection__]] - [[`__NetBSD__`] [__predef_detection__]] - - [[`__NETBSD_version`] [V.R.P]] - [[`NetBSD0_8`] [0.8.0]] - [[`NetBSD0_9`] [0.9.0]] - [[`NetBSD1_0`] [1.0.0]] - [[`__NetBSD_Version`] [V.R.P]] - ] - */ - -#define BOOST_OS_BSD_NET BOOST_VERSION_NUMBER_NOT_AVAILABLE - -#if !defined(BOOST_PREDEF_DETAIL_OS_DETECTED) && ( \ - defined(__NETBSD__) || defined(__NetBSD__) \ - ) -# ifndef BOOST_OS_BSD_AVAILABLE -# define BOOST_OS_BSD BOOST_VERSION_NUMBER_AVAILABLE -# define BOOST_OS_BSD_AVAILABLE -# endif -# undef BOOST_OS_BSD_NET -# if defined(__NETBSD__) -# if defined(__NETBSD_version) -# if __NETBSD_version < 500000 -# define BOOST_OS_BSD_NET \ - BOOST_PREDEF_MAKE_10_VRP000(__NETBSD_version) -# else -# define BOOST_OS_BSD_NET \ - BOOST_PREDEF_MAKE_10_VRR000(__NETBSD_version) -# endif -# else -# define BOOST_OS_BSD_NET BOOST_VERSION_NUMBER_AVAILABLE -# endif -# elif defined(__NetBSD__) -# if !defined(BOOST_OS_BSD_NET) && defined(NetBSD0_8) -# define BOOST_OS_BSD_NET BOOST_VERSION_NUMBER(0,8,0) -# endif -# if !defined(BOOST_OS_BSD_NET) && defined(NetBSD0_9) -# define BOOST_OS_BSD_NET BOOST_VERSION_NUMBER(0,9,0) -# endif -# if !defined(BOOST_OS_BSD_NET) && defined(NetBSD1_0) -# define BOOST_OS_BSD_NET BOOST_VERSION_NUMBER(1,0,0) -# endif -# if !defined(BOOST_OS_BSD_NET) && defined(__NetBSD_Version) -# define BOOST_OS_BSD_NET \ - BOOST_PREDEF_MAKE_10_VVRR00PP00(__NetBSD_Version) -# endif -# if !defined(BOOST_OS_BSD_NET) -# define BOOST_OS_BSD_NET BOOST_VERSION_NUMBER_AVAILABLE -# endif -# endif -#endif - -#if BOOST_OS_BSD_NET -# define BOOST_OS_BSD_NET_AVAILABLE -# include -#endif - -#define BOOST_OS_BSD_NET_NAME "DragonFly BSD" - -#endif - -#include -BOOST_PREDEF_DECLARE_TEST(BOOST_OS_BSD_NET,BOOST_OS_BSD_NET_NAME) diff --git a/src/vision_opencv/cv_bridge/src/boost/predef/os/bsd/open.h b/src/vision_opencv/cv_bridge/src/boost/predef/os/bsd/open.h deleted file mode 100644 index 423103a..0000000 --- a/src/vision_opencv/cv_bridge/src/boost/predef/os/bsd/open.h +++ /dev/null @@ -1,171 +0,0 @@ -/* -Copyright Rene Rivera 2012-2015 -Distributed under the Boost Software License, Version 1.0. -(See accompanying file LICENSE_1_0.txt or copy at -http://www.boost.org/LICENSE_1_0.txt) -*/ - -#ifndef BOOST_PREDEF_OS_BSD_OPEN_H -#define BOOST_PREDEF_OS_BSD_OPEN_H - -#include - -/*` -[heading `BOOST_OS_BSD_OPEN`] - -[@http://en.wikipedia.org/wiki/Openbsd OpenBSD] operating system. - -[table - [[__predef_symbol__] [__predef_version__]] - - [[`__OpenBSD__`] [__predef_detection__]] - - [[`OpenBSD2_0`] [2.0.0]] - [[`OpenBSD2_1`] [2.1.0]] - [[`OpenBSD2_2`] [2.2.0]] - [[`OpenBSD2_3`] [2.3.0]] - [[`OpenBSD2_4`] [2.4.0]] - [[`OpenBSD2_5`] [2.5.0]] - [[`OpenBSD2_6`] [2.6.0]] - [[`OpenBSD2_7`] [2.7.0]] - [[`OpenBSD2_8`] [2.8.0]] - [[`OpenBSD2_9`] [2.9.0]] - [[`OpenBSD3_0`] [3.0.0]] - [[`OpenBSD3_1`] [3.1.0]] - [[`OpenBSD3_2`] [3.2.0]] - [[`OpenBSD3_3`] [3.3.0]] - [[`OpenBSD3_4`] [3.4.0]] - [[`OpenBSD3_5`] [3.5.0]] - [[`OpenBSD3_6`] [3.6.0]] - [[`OpenBSD3_7`] [3.7.0]] - [[`OpenBSD3_8`] [3.8.0]] - [[`OpenBSD3_9`] [3.9.0]] - [[`OpenBSD4_0`] [4.0.0]] - [[`OpenBSD4_1`] [4.1.0]] - [[`OpenBSD4_2`] [4.2.0]] - [[`OpenBSD4_3`] [4.3.0]] - [[`OpenBSD4_4`] [4.4.0]] - [[`OpenBSD4_5`] [4.5.0]] - [[`OpenBSD4_6`] [4.6.0]] - [[`OpenBSD4_7`] [4.7.0]] - [[`OpenBSD4_8`] [4.8.0]] - [[`OpenBSD4_9`] [4.9.0]] - ] - */ - -#define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER_NOT_AVAILABLE - -#if !defined(BOOST_PREDEF_DETAIL_OS_DETECTED) && ( \ - defined(__OpenBSD__) \ - ) -# ifndef BOOST_OS_BSD_AVAILABLE -# define BOOST_OS_BSD BOOST_VERSION_NUMBER_AVAILABLE -# define BOOST_OS_BSD_AVAILABLE -# endif -# undef BOOST_OS_BSD_OPEN -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD2_0) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(2,0,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD2_1) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(2,1,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD2_2) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(2,2,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD2_3) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(2,3,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD2_4) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(2,4,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD2_5) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(2,5,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD2_6) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(2,6,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD2_7) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(2,7,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD2_8) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(2,8,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD2_9) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(2,9,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD3_0) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(3,0,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD3_1) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(3,1,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD3_2) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(3,2,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD3_3) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(3,3,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD3_4) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(3,4,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD3_5) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(3,5,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD3_6) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(3,6,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD3_7) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(3,7,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD3_8) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(3,8,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD3_9) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(3,9,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD4_0) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(4,0,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD4_1) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(4,1,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD4_2) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(4,2,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD4_3) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(4,3,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD4_4) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(4,4,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD4_5) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(4,5,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD4_6) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(4,6,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD4_7) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(4,7,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD4_8) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(4,8,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD4_9) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(4,9,0) -# endif -# if !defined(BOOST_OS_BSD_OPEN) -# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER_AVAILABLE -# endif -#endif - -#if BOOST_OS_BSD_OPEN -# define BOOST_OS_BSD_OPEN_AVAILABLE -# include -#endif - -#define BOOST_OS_BSD_OPEN_NAME "OpenBSD" - -#endif - -#include -BOOST_PREDEF_DECLARE_TEST(BOOST_OS_BSD_OPEN,BOOST_OS_BSD_OPEN_NAME) diff --git a/src/vision_opencv/cv_bridge/src/boost/predef/os/ios.h b/src/vision_opencv/cv_bridge/src/boost/predef/os/ios.h deleted file mode 100644 index f853815..0000000 --- a/src/vision_opencv/cv_bridge/src/boost/predef/os/ios.h +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright Franz Detro 2014 -Copyright Rene Rivera 2015 -Distributed under the Boost Software License, Version 1.0. -(See accompanying file LICENSE_1_0.txt or copy at -http://www.boost.org/LICENSE_1_0.txt) -*/ - -#ifndef BOOST_PREDEF_OS_IOS_H -#define BOOST_PREDEF_OS_IOS_H - -#include -#include - -/*` -[heading `BOOST_OS_IOS`] - -[@http://en.wikipedia.org/wiki/iOS iOS] operating system. - -[table - [[__predef_symbol__] [__predef_version__]] - - [[`__APPLE__`] [__predef_detection__]] - [[`__MACH__`] [__predef_detection__]] - [[`__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__`] [__predef_detection__]] - - [[`__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__`] [__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__*1000]] - ] - */ - -#define BOOST_OS_IOS BOOST_VERSION_NUMBER_NOT_AVAILABLE - -#if !defined(BOOST_PREDEF_DETAIL_OS_DETECTED) && ( \ - defined(__APPLE__) && defined(__MACH__) && \ - defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) \ - ) -# undef BOOST_OS_IOS -# define BOOST_OS_IOS (__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__*1000) -#endif - -#if BOOST_OS_IOS -# define BOOST_OS_IOS_AVAILABLE -# include -#endif - -#define BOOST_OS_IOS_NAME "iOS" - -#endif - -#include -BOOST_PREDEF_DECLARE_TEST(BOOST_OS_IOS,BOOST_OS_IOS_NAME) diff --git a/src/vision_opencv/cv_bridge/src/boost/predef/os/macos.h b/src/vision_opencv/cv_bridge/src/boost/predef/os/macos.h deleted file mode 100644 index 4afb30d..0000000 --- a/src/vision_opencv/cv_bridge/src/boost/predef/os/macos.h +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright Rene Rivera 2008-2015 -Copyright Franz Detro 2014 -Distributed under the Boost Software License, Version 1.0. -(See accompanying file LICENSE_1_0.txt or copy at -http://www.boost.org/LICENSE_1_0.txt) -*/ - -#ifndef BOOST_PREDEF_OS_MACOS_H -#define BOOST_PREDEF_OS_MACOS_H - -/* Special case: iOS will define the same predefs as MacOS, and additionally - '__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__'. We can guard against that, - but only if we detect iOS first. Hence we will force include iOS detection - * before doing any MacOS detection. - */ -#include - -#include -#include - -/*` -[heading `BOOST_OS_MACOS`] - -[@http://en.wikipedia.org/wiki/Mac_OS Mac OS] operating system. - -[table - [[__predef_symbol__] [__predef_version__]] - - [[`macintosh`] [__predef_detection__]] - [[`Macintosh`] [__predef_detection__]] - [[`__APPLE__`] [__predef_detection__]] - [[`__MACH__`] [__predef_detection__]] - - [[`__APPLE__`, `__MACH__`] [10.0.0]] - [[ /otherwise/ ] [9.0.0]] - ] - */ - -#define BOOST_OS_MACOS BOOST_VERSION_NUMBER_NOT_AVAILABLE - -#if !defined(BOOST_PREDEF_DETAIL_OS_DETECTED) && ( \ - defined(macintosh) || defined(Macintosh) || \ - (defined(__APPLE__) && defined(__MACH__)) \ - ) -# undef BOOST_OS_MACOS -# if !defined(BOOST_OS_MACOS) && defined(__APPLE__) && defined(__MACH__) -# define BOOST_OS_MACOS BOOST_VERSION_NUMBER(10,0,0) -# endif -# if !defined(BOOST_OS_MACOS) -# define BOOST_OS_MACOS BOOST_VERSION_NUMBER(9,0,0) -# endif -#endif - -#if BOOST_OS_MACOS -# define BOOST_OS_MACOS_AVAILABLE -# include -#endif - -#define BOOST_OS_MACOS_NAME "Mac OS" - -#endif - -#include -BOOST_PREDEF_DECLARE_TEST(BOOST_OS_MACOS,BOOST_OS_MACOS_NAME) diff --git a/src/vision_opencv/cv_bridge/src/boost/predef/other/endian.h b/src/vision_opencv/cv_bridge/src/boost/predef/other/endian.h deleted file mode 100644 index 6d1f43f..0000000 --- a/src/vision_opencv/cv_bridge/src/boost/predef/other/endian.h +++ /dev/null @@ -1,204 +0,0 @@ -/* -Copyright Rene Rivera 2013-2015 -Distributed under the Boost Software License, Version 1.0. -(See accompanying file LICENSE_1_0.txt or copy at -http://www.boost.org/LICENSE_1_0.txt) -*/ - -#ifndef BOOST_PREDEF_ENDIAN_H -#define BOOST_PREDEF_ENDIAN_H - -#include -#include -#include -#include -#include -#include - -/*` -[heading `BOOST_ENDIAN_*`] - -Detection of endian memory ordering. There are four defined macros -in this header that define the various generally possible endian -memory orderings: - -* `BOOST_ENDIAN_BIG_BYTE`, byte-swapped big-endian. -* `BOOST_ENDIAN_BIG_WORD`, word-swapped big-endian. -* `BOOST_ENDIAN_LITTLE_BYTE`, byte-swapped little-endian. -* `BOOST_ENDIAN_LITTLE_WORD`, word-swapped little-endian. - -The detection is conservative in that it only identifies endianness -that it knows for certain. In particular bi-endianness is not -indicated as is it not practically possible to determine the -endianness from anything but an operating system provided -header. And the currently known headers do not define that -programatic bi-endianness is available. - -This implementation is a compilation of various publicly available -information and acquired knowledge: - -# The indispensable documentation of "Pre-defined Compiler Macros" - [@http://sourceforge.net/p/predef/wiki/Endianness Endianness]. -# The various endian specifications available in the - [@http://wikipedia.org/ Wikipedia] computer architecture pages. -# Generally available searches for headers that define endianness. - */ - -#define BOOST_ENDIAN_BIG_BYTE BOOST_VERSION_NUMBER_NOT_AVAILABLE -#define BOOST_ENDIAN_BIG_WORD BOOST_VERSION_NUMBER_NOT_AVAILABLE -#define BOOST_ENDIAN_LITTLE_BYTE BOOST_VERSION_NUMBER_NOT_AVAILABLE -#define BOOST_ENDIAN_LITTLE_WORD BOOST_VERSION_NUMBER_NOT_AVAILABLE - -/* GNU libc provides a header defining __BYTE_ORDER, or _BYTE_ORDER. - * And some OSs provide some for of endian header also. - */ -#if !BOOST_ENDIAN_BIG_BYTE && !BOOST_ENDIAN_BIG_WORD && \ - !BOOST_ENDIAN_LITTLE_BYTE && !BOOST_ENDIAN_LITTLE_WORD -# if BOOST_LIB_C_GNU || BOOST_OS_ANDROID -# include -# else -# if BOOST_OS_MACOS -# include -# else -# if BOOST_OS_BSD -# if BOOST_OS_BSD_OPEN -# include -# else -# include -# endif -# endif -# endif -# endif -# if defined(__BYTE_ORDER) -# if defined(__BIG_ENDIAN) && (__BYTE_ORDER == __BIG_ENDIAN) -# undef BOOST_ENDIAN_BIG_BYTE -# define BOOST_ENDIAN_BIG_BYTE BOOST_VERSION_NUMBER_AVAILABLE -# endif -# if defined(__LITTLE_ENDIAN) && (__BYTE_ORDER == __LITTLE_ENDIAN) -# undef BOOST_ENDIAN_LITTLE_BYTE -# define BOOST_ENDIAN_LITTLE_BYTE BOOST_VERSION_NUMBER_AVAILABLE -# endif -# if defined(__PDP_ENDIAN) && (__BYTE_ORDER == __PDP_ENDIAN) -# undef BOOST_ENDIAN_LITTLE_WORD -# define BOOST_ENDIAN_LITTLE_WORD BOOST_VERSION_NUMBER_AVAILABLE -# endif -# endif -# if !defined(__BYTE_ORDER) && defined(_BYTE_ORDER) -# if defined(_BIG_ENDIAN) && (_BYTE_ORDER == _BIG_ENDIAN) -# undef BOOST_ENDIAN_BIG_BYTE -# define BOOST_ENDIAN_BIG_BYTE BOOST_VERSION_NUMBER_AVAILABLE -# endif -# if defined(_LITTLE_ENDIAN) && (_BYTE_ORDER == _LITTLE_ENDIAN) -# undef BOOST_ENDIAN_LITTLE_BYTE -# define BOOST_ENDIAN_LITTLE_BYTE BOOST_VERSION_NUMBER_AVAILABLE -# endif -# if defined(_PDP_ENDIAN) && (_BYTE_ORDER == _PDP_ENDIAN) -# undef BOOST_ENDIAN_LITTLE_WORD -# define BOOST_ENDIAN_LITTLE_WORD BOOST_VERSION_NUMBER_AVAILABLE -# endif -# endif -#endif - -/* Built-in byte-swpped big-endian macros. - */ -#if !BOOST_ENDIAN_BIG_BYTE && !BOOST_ENDIAN_BIG_WORD && \ - !BOOST_ENDIAN_LITTLE_BYTE && !BOOST_ENDIAN_LITTLE_WORD -# if (defined(__BIG_ENDIAN__) && !defined(__LITTLE_ENDIAN__)) || \ - (defined(_BIG_ENDIAN) && !defined(_LITTLE_ENDIAN)) || \ - defined(__ARMEB__) || \ - defined(__THUMBEB__) || \ - defined(__AARCH64EB__) || \ - defined(_MIPSEB) || \ - defined(__MIPSEB) || \ - defined(__MIPSEB__) -# undef BOOST_ENDIAN_BIG_BYTE -# define BOOST_ENDIAN_BIG_BYTE BOOST_VERSION_NUMBER_AVAILABLE -# endif -#endif - -/* Built-in byte-swpped little-endian macros. - */ -#if !BOOST_ENDIAN_BIG_BYTE && !BOOST_ENDIAN_BIG_WORD && \ - !BOOST_ENDIAN_LITTLE_BYTE && !BOOST_ENDIAN_LITTLE_WORD -# if (defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)) || \ - (defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN)) || \ - defined(__ARMEL__) || \ - defined(__THUMBEL__) || \ - defined(__AARCH64EL__) || \ - defined(_MIPSEL) || \ - defined(__MIPSEL) || \ - defined(__MIPSEL__) -# undef BOOST_ENDIAN_LITTLE_BYTE -# define BOOST_ENDIAN_LITTLE_BYTE BOOST_VERSION_NUMBER_AVAILABLE -# endif -#endif - -/* Some architectures are strictly one endianess (as opposed - * the current common bi-endianess). - */ -#if !BOOST_ENDIAN_BIG_BYTE && !BOOST_ENDIAN_BIG_WORD && \ - !BOOST_ENDIAN_LITTLE_BYTE && !BOOST_ENDIAN_LITTLE_WORD -# include -# if BOOST_ARCH_M68K || \ - BOOST_ARCH_PARISC || \ - BOOST_ARCH_SPARC || \ - BOOST_ARCH_SYS370 || \ - BOOST_ARCH_SYS390 || \ - BOOST_ARCH_Z -# undef BOOST_ENDIAN_BIG_BYTE -# define BOOST_ENDIAN_BIG_BYTE BOOST_VERSION_NUMBER_AVAILABLE -# endif -# if BOOST_ARCH_AMD64 || \ - BOOST_ARCH_IA64 || \ - BOOST_ARCH_X86 || \ - BOOST_ARCH_BLACKFIN -# undef BOOST_ENDIAN_LITTLE_BYTE -# define BOOST_ENDIAN_LITTLE_BYTE BOOST_VERSION_NUMBER_AVAILABLE -# endif -#endif - -/* Windows on ARM, if not otherwise detected/specified, is always - * byte-swaped little-endian. - */ -#if !BOOST_ENDIAN_BIG_BYTE && !BOOST_ENDIAN_BIG_WORD && \ - !BOOST_ENDIAN_LITTLE_BYTE && !BOOST_ENDIAN_LITTLE_WORD -# if BOOST_ARCH_ARM -# include -# if BOOST_OS_WINDOWS -# undef BOOST_ENDIAN_LITTLE_BYTE -# define BOOST_ENDIAN_LITTLE_BYTE BOOST_VERSION_NUMBER_AVAILABLE -# endif -# endif -#endif - -#if BOOST_ENDIAN_BIG_BYTE -# define BOOST_ENDIAN_BIG_BYTE_AVAILABLE -#endif -#if BOOST_ENDIAN_BIG_WORD -# define BOOST_ENDIAN_BIG_WORD_BYTE_AVAILABLE -#endif -#if BOOST_ENDIAN_LITTLE_BYTE -# define BOOST_ENDIAN_LITTLE_BYTE_AVAILABLE -#endif -#if BOOST_ENDIAN_LITTLE_WORD -# define BOOST_ENDIAN_LITTLE_WORD_BYTE_AVAILABLE -#endif - -#define BOOST_ENDIAN_BIG_BYTE_NAME "Byte-Swapped Big-Endian" -#define BOOST_ENDIAN_BIG_WORD_NAME "Word-Swapped Big-Endian" -#define BOOST_ENDIAN_LITTLE_BYTE_NAME "Byte-Swapped Little-Endian" -#define BOOST_ENDIAN_LITTLE_WORD_NAME "Word-Swapped Little-Endian" - -#endif - -#include -BOOST_PREDEF_DECLARE_TEST(BOOST_ENDIAN_BIG_BYTE,BOOST_ENDIAN_BIG_BYTE_NAME) - -#include -BOOST_PREDEF_DECLARE_TEST(BOOST_ENDIAN_BIG_WORD,BOOST_ENDIAN_BIG_WORD_NAME) - -#include -BOOST_PREDEF_DECLARE_TEST(BOOST_ENDIAN_LITTLE_BYTE,BOOST_ENDIAN_LITTLE_BYTE_NAME) - -#include -BOOST_PREDEF_DECLARE_TEST(BOOST_ENDIAN_LITTLE_WORD,BOOST_ENDIAN_LITTLE_WORD_NAME) diff --git a/src/vision_opencv/cv_bridge/src/boost/predef/version_number.h b/src/vision_opencv/cv_bridge/src/boost/predef/version_number.h deleted file mode 100644 index 3903a36..0000000 --- a/src/vision_opencv/cv_bridge/src/boost/predef/version_number.h +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright Rene Rivera 2005, 2008-2013 -Distributed under the Boost Software License, Version 1.0. -(See accompanying file LICENSE_1_0.txt or copy at -http://www.boost.org/LICENSE_1_0.txt) -*/ - -#ifndef BOOST_PREDEF_VERSION_NUMBER_H -#define BOOST_PREDEF_VERSION_NUMBER_H - -/*` -[heading `BOOST_VERSION_NUMBER`] - -`` -BOOST_VERSION_NUMBER(major,minor,patch) -`` - -Defines standard version numbers, with these properties: - -* Decimal base whole numbers in the range \[0,1000000000). - The number range is designed to allow for a (2,2,5) triplet. - Which fits within a 32 bit value. -* The `major` number can be in the \[0,99\] range. -* The `minor` number can be in the \[0,99\] range. -* The `patch` number can be in the \[0,99999\] range. -* Values can be specified in any base. As the defined value - is an constant expression. -* Value can be directly used in both preprocessor and compiler - expressions for comparison to other similarly defined values. -* The implementation enforces the individual ranges for the - major, minor, and patch numbers. And values over the ranges - are truncated (modulo). - -*/ -#define BOOST_VERSION_NUMBER(major,minor,patch) \ - ( (((major)%100)*10000000) + (((minor)%100)*100000) + ((patch)%100000) ) - -#define BOOST_VERSION_NUMBER_MAX \ - BOOST_VERSION_NUMBER(99,99,99999) - -#define BOOST_VERSION_NUMBER_ZERO \ - BOOST_VERSION_NUMBER(0,0,0) - -#define BOOST_VERSION_NUMBER_MIN \ - BOOST_VERSION_NUMBER(0,0,1) - -#define BOOST_VERSION_NUMBER_AVAILABLE \ - BOOST_VERSION_NUMBER_MIN - -#define BOOST_VERSION_NUMBER_NOT_AVAILABLE \ - BOOST_VERSION_NUMBER_ZERO - -#endif diff --git a/src/vision_opencv/cv_bridge/src/cv_bridge.cpp b/src/vision_opencv/cv_bridge/src/cv_bridge.cpp deleted file mode 100644 index 109440e..0000000 --- a/src/vision_opencv/cv_bridge/src/cv_bridge.cpp +++ /dev/null @@ -1,699 +0,0 @@ -/********************************************************************* -* Software License Agreement (BSD License) -* -* Copyright (c) 2011, Willow Garage, Inc. -* Copyright (c) 2015, Tal Regev. -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions -* are met: -* -* * Redistributions of source code must retain the above copyright -* notice, this list of conditions and the following disclaimer. -* * Redistributions in binary form must reproduce the above -* copyright notice, this list of conditions and the following -* disclaimer in the documentation and/or other materials provided -* with the distribution. -* * Neither the name of the Willow Garage nor the names of its -* contributors may be used to endorse or promote products derived -* from this software without specific prior written permission. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -*********************************************************************/ - -#include "boost/endian/conversion.hpp" - -#include - -#include -#include - -#include -#include - -#include - -#include -#include - -namespace enc = sensor_msgs::image_encodings; - -namespace cv_bridge { - -static int depthStrToInt(const std::string depth) { - if (depth == "8U") { - return 0; - } else if (depth == "8S") { - return 1; - } else if (depth == "16U") { - return 2; - } else if (depth == "16S") { - return 3; - } else if (depth == "32S") { - return 4; - } else if (depth == "32F") { - return 5; - } - return 6; -} - -int getCvType(const std::string& encoding) -{ - // Check for the most common encodings first - if (encoding == enc::BGR8) return CV_8UC3; - if (encoding == enc::MONO8) return CV_8UC1; - if (encoding == enc::RGB8) return CV_8UC3; - if (encoding == enc::MONO16) return CV_16UC1; - if (encoding == enc::BGR16) return CV_16UC3; - if (encoding == enc::RGB16) return CV_16UC3; - if (encoding == enc::BGRA8) return CV_8UC4; - if (encoding == enc::RGBA8) return CV_8UC4; - if (encoding == enc::BGRA16) return CV_16UC4; - if (encoding == enc::RGBA16) return CV_16UC4; - - // For bayer, return one-channel - if (encoding == enc::BAYER_RGGB8) return CV_8UC1; - if (encoding == enc::BAYER_BGGR8) return CV_8UC1; - if (encoding == enc::BAYER_GBRG8) return CV_8UC1; - if (encoding == enc::BAYER_GRBG8) return CV_8UC1; - if (encoding == enc::BAYER_RGGB16) return CV_16UC1; - if (encoding == enc::BAYER_BGGR16) return CV_16UC1; - if (encoding == enc::BAYER_GBRG16) return CV_16UC1; - if (encoding == enc::BAYER_GRBG16) return CV_16UC1; - - // Miscellaneous - if (encoding == enc::YUV422) return CV_8UC2; - - // Check all the generic content encodings - boost::cmatch m; - - if (boost::regex_match(encoding.c_str(), m, - boost::regex("(8U|8S|16U|16S|32S|32F|64F)C([0-9]+)"))) { - return CV_MAKETYPE(depthStrToInt(m[1].str()), atoi(m[2].str().c_str())); - } - - if (boost::regex_match(encoding.c_str(), m, - boost::regex("(8U|8S|16U|16S|32S|32F|64F)"))) { - return CV_MAKETYPE(depthStrToInt(m[1].str()), 1); - } - - throw Exception("Unrecognized image encoding [" + encoding + "]"); -} - -/// @cond DOXYGEN_IGNORE - -enum Encoding { INVALID = -1, GRAY = 0, RGB, BGR, RGBA, BGRA, YUV422, BAYER_RGGB, BAYER_BGGR, BAYER_GBRG, BAYER_GRBG}; - -Encoding getEncoding(const std::string& encoding) -{ - if ((encoding == enc::MONO8) || (encoding == enc::MONO16)) return GRAY; - if ((encoding == enc::BGR8) || (encoding == enc::BGR16)) return BGR; - if ((encoding == enc::RGB8) || (encoding == enc::RGB16)) return RGB; - if ((encoding == enc::BGRA8) || (encoding == enc::BGRA16)) return BGRA; - if ((encoding == enc::RGBA8) || (encoding == enc::RGBA16)) return RGBA; - if (encoding == enc::YUV422) return YUV422; - - if ((encoding == enc::BAYER_RGGB8) || (encoding == enc::BAYER_RGGB16)) return BAYER_RGGB; - if ((encoding == enc::BAYER_BGGR8) || (encoding == enc::BAYER_BGGR16)) return BAYER_BGGR; - if ((encoding == enc::BAYER_GBRG8) || (encoding == enc::BAYER_GBRG16)) return BAYER_GBRG; - if ((encoding == enc::BAYER_GRBG8) || (encoding == enc::BAYER_GRBG16)) return BAYER_GRBG; - - // We don't support conversions to/from other types - return INVALID; -} - -static const int SAME_FORMAT = -1; - -/** Return a lit of OpenCV conversion codes to get from one Format to the other - * The key is a pair: and the value a succession of OpenCV code conversion - * It's not efficient code but it is only called once and the structure is small enough - */ -std::map, std::vector > getConversionCodes() { - std::map, std::vector > res; - for(int i=0; i<=5; ++i) - res[std::pair(Encoding(i),Encoding(i))].push_back(SAME_FORMAT); - - res[std::make_pair(GRAY, RGB)].push_back(cv::COLOR_GRAY2RGB); - res[std::make_pair(GRAY, BGR)].push_back(cv::COLOR_GRAY2BGR); - res[std::make_pair(GRAY, RGBA)].push_back(cv::COLOR_GRAY2RGBA); - res[std::make_pair(GRAY, BGRA)].push_back(cv::COLOR_GRAY2BGRA); - - res[std::make_pair(RGB, GRAY)].push_back(cv::COLOR_RGB2GRAY); - res[std::make_pair(RGB, BGR)].push_back(cv::COLOR_RGB2BGR); - res[std::make_pair(RGB, RGBA)].push_back(cv::COLOR_RGB2RGBA); - res[std::make_pair(RGB, BGRA)].push_back(cv::COLOR_RGB2BGRA); - - res[std::make_pair(BGR, GRAY)].push_back(cv::COLOR_BGR2GRAY); - res[std::make_pair(BGR, RGB)].push_back(cv::COLOR_BGR2RGB); - res[std::make_pair(BGR, RGBA)].push_back(cv::COLOR_BGR2RGBA); - res[std::make_pair(BGR, BGRA)].push_back(cv::COLOR_BGR2BGRA); - - res[std::make_pair(RGBA, GRAY)].push_back(cv::COLOR_RGBA2GRAY); - res[std::make_pair(RGBA, RGB)].push_back(cv::COLOR_RGBA2RGB); - res[std::make_pair(RGBA, BGR)].push_back(cv::COLOR_RGBA2BGR); - res[std::make_pair(RGBA, BGRA)].push_back(cv::COLOR_RGBA2BGRA); - - res[std::make_pair(BGRA, GRAY)].push_back(cv::COLOR_BGRA2GRAY); - res[std::make_pair(BGRA, RGB)].push_back(cv::COLOR_BGRA2RGB); - res[std::make_pair(BGRA, BGR)].push_back(cv::COLOR_BGRA2BGR); - res[std::make_pair(BGRA, RGBA)].push_back(cv::COLOR_BGRA2RGBA); - - res[std::make_pair(YUV422, GRAY)].push_back(cv::COLOR_YUV2GRAY_UYVY); - res[std::make_pair(YUV422, RGB)].push_back(cv::COLOR_YUV2RGB_UYVY); - res[std::make_pair(YUV422, BGR)].push_back(cv::COLOR_YUV2BGR_UYVY); - res[std::make_pair(YUV422, RGBA)].push_back(cv::COLOR_YUV2RGBA_UYVY); - res[std::make_pair(YUV422, BGRA)].push_back(cv::COLOR_YUV2BGRA_UYVY); - - // Deal with Bayer - res[std::make_pair(BAYER_RGGB, GRAY)].push_back(cv::COLOR_BayerBG2GRAY); - res[std::make_pair(BAYER_RGGB, RGB)].push_back(cv::COLOR_BayerBG2RGB); - res[std::make_pair(BAYER_RGGB, BGR)].push_back(cv::COLOR_BayerBG2BGR); - - res[std::make_pair(BAYER_BGGR, GRAY)].push_back(cv::COLOR_BayerRG2GRAY); - res[std::make_pair(BAYER_BGGR, RGB)].push_back(cv::COLOR_BayerRG2RGB); - res[std::make_pair(BAYER_BGGR, BGR)].push_back(cv::COLOR_BayerRG2BGR); - - res[std::make_pair(BAYER_GBRG, GRAY)].push_back(cv::COLOR_BayerGR2GRAY); - res[std::make_pair(BAYER_GBRG, RGB)].push_back(cv::COLOR_BayerGR2RGB); - res[std::make_pair(BAYER_GBRG, BGR)].push_back(cv::COLOR_BayerGR2BGR); - - res[std::make_pair(BAYER_GRBG, GRAY)].push_back(cv::COLOR_BayerGB2GRAY); - res[std::make_pair(BAYER_GRBG, RGB)].push_back(cv::COLOR_BayerGB2RGB); - res[std::make_pair(BAYER_GRBG, BGR)].push_back(cv::COLOR_BayerGB2BGR); - - return res; -} - -const std::vector getConversionCode(std::string src_encoding, std::string dst_encoding) -{ - Encoding src_encod = getEncoding(src_encoding); - Encoding dst_encod = getEncoding(dst_encoding); - bool is_src_color_format = enc::isColor(src_encoding) || enc::isMono(src_encoding) || - enc::isBayer(src_encoding) || (src_encoding == enc::YUV422); - bool is_dst_color_format = enc::isColor(dst_encoding) || enc::isMono(dst_encoding) || - enc::isBayer(dst_encoding) || (dst_encoding == enc::YUV422); - bool is_num_channels_the_same = (enc::numChannels(src_encoding) == enc::numChannels(dst_encoding)); - - // If we have no color info in the source, we can only convert to the same format which - // was resolved in the previous condition. Otherwise, fail - if (!is_src_color_format) { - if (is_dst_color_format) - throw Exception("[" + src_encoding + "] is not a color format. but [" + dst_encoding + - "] is. The conversion does not make sense"); - if (!is_num_channels_the_same) - throw Exception("[" + src_encoding + "] and [" + dst_encoding + "] do not have the same number of channel"); - return std::vector(1, SAME_FORMAT); - } - - // If we are converting from a color type to a non color type, we can only do so if we stick - // to the number of channels - if (!is_dst_color_format) { - if (!is_num_channels_the_same) - throw Exception("[" + src_encoding + "] is a color format but [" + dst_encoding + "] " + - "is not so they must have the same OpenCV type, CV_8UC3, CV16UC1 ...."); - return std::vector(1, SAME_FORMAT); - } - - // If we are converting from a color type to another type, then everything is fine - static const std::map, std::vector > CONVERSION_CODES = getConversionCodes(); - - std::pair key(src_encod, dst_encod); - std::map, std::vector >::const_iterator val = CONVERSION_CODES.find(key); - if (val == CONVERSION_CODES.end()) - throw Exception("Unsupported conversion from [" + src_encoding + - "] to [" + dst_encoding + "]"); - - // And deal with depth differences if the colors are different - std::vector res = val->second; - if ((enc::bitDepth(src_encoding) != enc::bitDepth(dst_encoding)) && (getEncoding(src_encoding) != getEncoding(dst_encoding))) - res.push_back(SAME_FORMAT); - - return res; -} - -/////////////////////////////////////// Image /////////////////////////////////////////// - -// Converts a ROS Image to a cv::Mat by sharing the data or changing its endianness if needed -cv::Mat matFromImage(const sensor_msgs::Image& source) -{ - int source_type = getCvType(source.encoding); - int byte_depth = enc::bitDepth(source.encoding) / 8; - int num_channels = enc::numChannels(source.encoding); - - if (source.step < source.width * byte_depth * num_channels) - { - std::stringstream ss; - ss << "Image is wrongly formed: step < width * byte_depth * num_channels or " << source.step << " != " << - source.width << " * " << byte_depth << " * " << num_channels; - throw Exception(ss.str()); - } - - if (source.height * source.step != source.data.size()) - { - std::stringstream ss; - ss << "Image is wrongly formed: height * step != size or " << source.height << " * " << - source.step << " != " << source.data.size(); - throw Exception(ss.str()); - } - - // If the endianness is the same as locally, share the data - cv::Mat mat(source.height, source.width, source_type, const_cast(&source.data[0]), source.step); - if ((boost::endian::order::native == boost::endian::order::big && source.is_bigendian) || - (boost::endian::order::native == boost::endian::order::little && !source.is_bigendian) || - byte_depth == 1) - return mat; - - // Otherwise, reinterpret the data as bytes and switch the channels accordingly - mat = cv::Mat(source.height, source.width, CV_MAKETYPE(CV_8U, num_channels*byte_depth), - const_cast(&source.data[0]), source.step); - cv::Mat mat_swap(source.height, source.width, mat.type()); - - std::vector fromTo; - fromTo.reserve(num_channels*byte_depth); - for(int i = 0; i < num_channels; ++i) - for(int j = 0; j < byte_depth; ++j) - { - fromTo.push_back(byte_depth*i + j); - fromTo.push_back(byte_depth*i + byte_depth - 1 - j); - } - cv::mixChannels(std::vector(1, mat), std::vector(1, mat_swap), fromTo); - - // Interpret mat_swap back as the proper type - mat_swap.reshape(num_channels); - - return mat_swap; -} - -// Internal, used by toCvCopy and cvtColor -CvImagePtr toCvCopyImpl(const cv::Mat& source, - const std_msgs::Header& src_header, - const std::string& src_encoding, - const std::string& dst_encoding) -{ - // Copy metadata - CvImagePtr ptr = boost::make_shared(); - ptr->header = src_header; - - // Copy to new buffer if same encoding requested - if (dst_encoding.empty() || dst_encoding == src_encoding) - { - ptr->encoding = src_encoding; - source.copyTo(ptr->image); - } - else - { - // Convert the source data to the desired encoding - const std::vector conversion_codes = getConversionCode(src_encoding, dst_encoding); - cv::Mat image1 = source; - cv::Mat image2; - for(size_t i=0; iimage = image2; - ptr->encoding = dst_encoding; - } - - return ptr; -} - -/// @endcond - -sensor_msgs::ImagePtr CvImage::toImageMsg() const -{ - sensor_msgs::ImagePtr ptr = boost::make_shared(); - toImageMsg(*ptr); - return ptr; -} - -void CvImage::toImageMsg(sensor_msgs::Image& ros_image) const -{ - ros_image.header = header; - ros_image.height = image.rows; - ros_image.width = image.cols; - ros_image.encoding = encoding; - ros_image.is_bigendian = (boost::endian::order::native == boost::endian::order::big); - ros_image.step = image.cols * image.elemSize(); - size_t size = ros_image.step * image.rows; - ros_image.data.resize(size); - - if (image.isContinuous()) - { - memcpy((char*)(&ros_image.data[0]), image.data, size); - } - else - { - // Copy by row by row - uchar* ros_data_ptr = (uchar*)(&ros_image.data[0]); - uchar* cv_data_ptr = image.data; - for (int i = 0; i < image.rows; ++i) - { - memcpy(ros_data_ptr, cv_data_ptr, ros_image.step); - ros_data_ptr += ros_image.step; - cv_data_ptr += image.step; - } - } -} - -// Deep copy data, returnee is mutable -CvImagePtr toCvCopy(const sensor_msgs::ImageConstPtr& source, - const std::string& encoding) -{ - return toCvCopy(*source, encoding); -} - -CvImagePtr toCvCopy(const sensor_msgs::Image& source, - const std::string& encoding) -{ - // Construct matrix pointing to source data - return toCvCopyImpl(matFromImage(source), source.header, source.encoding, encoding); -} - -// Share const data, returnee is immutable -CvImageConstPtr toCvShare(const sensor_msgs::ImageConstPtr& source, - const std::string& encoding) -{ - return toCvShare(*source, source, encoding); -} - -CvImageConstPtr toCvShare(const sensor_msgs::Image& source, - const boost::shared_ptr& tracked_object, - const std::string& encoding) -{ - // If the encoding different or the endianness different, you have to copy - if ((!encoding.empty() && source.encoding != encoding) || (source.is_bigendian && - (boost::endian::order::native != boost::endian::order::big))) - return toCvCopy(source, encoding); - - CvImagePtr ptr = boost::make_shared(); - ptr->header = source.header; - ptr->encoding = source.encoding; - ptr->tracked_object_ = tracked_object; - ptr->image = matFromImage(source); - return ptr; -} - -CvImagePtr cvtColor(const CvImageConstPtr& source, - const std::string& encoding) -{ - return toCvCopyImpl(source->image, source->header, source->encoding, encoding); -} - -/////////////////////////////////////// CompressedImage /////////////////////////////////////////// - -sensor_msgs::CompressedImagePtr CvImage::toCompressedImageMsg(const Format dst_format) const -{ - sensor_msgs::CompressedImagePtr ptr = boost::make_shared(); - toCompressedImageMsg(*ptr,dst_format); - return ptr; -} - -std::string getFormat(Format format) { - - switch (format) { - case DIB: - return "dib"; - case BMP: - return "bmp"; - case JPG: - return "jpg"; - case JPEG: - return "jpeg"; - case JPE: - return "jpe"; - case JP2: - return "jp2"; - case PNG: - return "png"; - case PBM: - return "pbm"; - case PGM: - return "pgm"; - case PPM: - return "ppm"; - case RAS: - return "ras"; - case SR: - return "sr"; - case TIF: - return "tif"; - case TIFF: - return "tiff"; - } - - throw Exception("Unrecognized image format"); -} - -void CvImage::toCompressedImageMsg(sensor_msgs::CompressedImage& ros_image, const Format dst_format) const -{ - ros_image.header = header; - cv::Mat image; - if (encoding == enc::BGR8 || encoding == enc::BGRA8) - { - image = this->image; - } - else - { - CvImagePtr tempThis = boost::make_shared(*this); - CvImagePtr temp; - if (enc::hasAlpha(encoding)) - { - temp = cvtColor(tempThis, enc::BGRA8); - } - else - { - temp = cvtColor(tempThis, enc::BGR8); - } - image = temp->image; - } - - std::string format = getFormat(dst_format); - ros_image.format = format; - cv::imencode("." + format, image, ros_image.data); -} - -// Deep copy data, returnee is mutable -CvImagePtr toCvCopy(const sensor_msgs::CompressedImageConstPtr& source, - const std::string& encoding) -{ - return toCvCopy(*source, encoding); -} - -CvImagePtr toCvCopy(const sensor_msgs::CompressedImage& source, const std::string& encoding) -{ - // Construct matrix pointing to source data - const cv::Mat_ in(1, source.data.size(), const_cast(&source.data[0])); - // Loads as BGR or BGRA. - const cv::Mat rgb_a = cv::imdecode(in, cv::IMREAD_UNCHANGED); - - switch (rgb_a.channels()) - { - case 4: - return toCvCopyImpl(rgb_a, source.header, enc::BGRA8, encoding); - break; - case 3: - return toCvCopyImpl(rgb_a, source.header, enc::BGR8, encoding); - break; - case 1: - return toCvCopyImpl(rgb_a, source.header, enc::MONO8, encoding); - break; - default: - return CvImagePtr(); - } -} - -CvImageConstPtr cvtColorForDisplay(const CvImageConstPtr& source, - const std::string& encoding_out, - const CvtColorForDisplayOptions options) -{ - double min_image_value = options.min_image_value; - double max_image_value = options.max_image_value; - - if (!source) - throw Exception("cv_bridge.cvtColorForDisplay() called with empty image."); - // let's figure out what to do with the empty encoding - std::string encoding = encoding_out; - if (encoding.empty()) - { - try - { - // Let's decide upon an output format - if (enc::numChannels(source->encoding) == 1) - { - if ((source->encoding == enc::TYPE_32SC1) || - (enc::bitDepth(source->encoding) == 8) || - (enc::bitDepth(source->encoding) == 16) || - (enc::bitDepth(source->encoding) == 32)) - encoding = enc::BGR8; - else - throw std::runtime_error("Unsupported depth of the source encoding " + encoding); - } - else - { - // We choose BGR by default here as we assume people will use OpenCV - if ((enc::bitDepth(source->encoding) == 8) || - (enc::bitDepth(source->encoding) == 16)) - encoding = enc::BGR8; - else - throw std::runtime_error("Unsupported depth of the source encoding " + encoding); - } - } - // We could have cv_bridge exception or std_runtime_error from sensor_msgs::image_codings routines - catch (const std::runtime_error& e) - { - throw Exception("cv_bridge.cvtColorForDisplay() output encoding is empty and cannot be guessed."); - } - } - else - { - if ((!enc::isColor(encoding_out) && !enc::isMono(encoding_out)) || - (enc::bitDepth(encoding) != 8)) - throw Exception("cv_bridge.cvtColorForDisplay() does not have an output encoding that is color or mono, and has is bit in depth"); - - } - - // Convert label to bgr image - if (encoding == sensor_msgs::image_encodings::BGR8 && - source->encoding == enc::TYPE_32SC1) - { - CvImagePtr result(new CvImage()); - result->header = source->header; - result->encoding = encoding; - result->image = cv::Mat(source->image.rows, source->image.cols, CV_8UC3); - for (size_t j = 0; j < source->image.rows; ++j) { - for (size_t i = 0; i < source->image.cols; ++i) { - int label = source->image.at(j, i); - if (label == options.bg_label) { // background label - result->image.at(j, i) = cv::Vec3b(0, 0, 0); - } - else - { - cv::Vec3d rgb = rgb_colors::getRGBColor(label); - // result image should be BGR - result->image.at(j, i) = cv::Vec3b(int(rgb[2] * 255), int(rgb[1] * 255), int(rgb[0] * 255)); - } - } - } - return result; - } - - // Perform scaling if asked for - if (options.do_dynamic_scaling) - { - cv::minMaxLoc(source->image, &min_image_value, &max_image_value); - if (min_image_value == max_image_value) - { - CvImagePtr result(new CvImage()); - result->header = source->header; - result->encoding = encoding; - if (enc::bitDepth(encoding) == 1) - { - result->image = cv::Mat(source->image.size(), CV_8UC1); - result->image.setTo(255./2.); - } else { - result->image = cv::Mat(source->image.size(), CV_8UC3); - result->image.setTo(cv::Scalar(1., 1., 1.)*255./2.); - } - return result; - } - } - - if (min_image_value != max_image_value) - { - if (enc::numChannels(source->encoding) != 1) - throw Exception("cv_bridge.cvtColorForDisplay() scaling for images with more than one channel is unsupported"); - CvImagePtr img_scaled(new CvImage()); - img_scaled->header = source->header; - if (options.colormap == -1) { - img_scaled->encoding = enc::MONO8; - cv::Mat(source->image-min_image_value).convertTo(img_scaled->image, CV_8UC1, 255.0 / - (max_image_value - min_image_value)); - } else { - img_scaled->encoding = enc::BGR8; - cv::Mat(source->image-min_image_value).convertTo(img_scaled->image, CV_8UC3, 255.0 / - (max_image_value - min_image_value)); - cv::applyColorMap(img_scaled->image, img_scaled->image, options.colormap); - // Fill black color to the nan region. - if (source->encoding == enc::TYPE_32FC1) { - for (size_t j = 0; j < source->image.rows; ++j) { - for (size_t i = 0; i < source->image.cols; ++i) { - float float_value = source->image.at(j, i); - if (std::isnan(float_value)) { - img_scaled->image.at(j, i) = cv::Vec3b(0, 0, 0); - } - } - } - } - } - return cvtColor(img_scaled, encoding); - } - - // If no color conversion is possible, we must "guess" the input format - CvImagePtr source_typed(new CvImage()); - source_typed->image = source->image; - source_typed->header = source->header; - source_typed->encoding = source->encoding; - - // If we get the OpenCV format, if we have 1,3 or 4 channels, we are most likely in mono, BGR or BGRA modes - if (source->encoding == "CV_8UC1") - source_typed->encoding = enc::MONO8; - else if (source->encoding == "16UC1") - source_typed->encoding = enc::MONO16; - else if (source->encoding == "CV_8UC3") - source_typed->encoding = enc::BGR8; - else if (source->encoding == "CV_8UC4") - source_typed->encoding = enc::BGRA8; - else if (source->encoding == "CV_16UC3") - source_typed->encoding = enc::BGR8; - else if (source->encoding == "CV_16UC4") - source_typed->encoding = enc::BGRA8; - - // If no conversion is needed, don't convert - if (source_typed->encoding == encoding) - return source; - - try - { - // Now that the output is a proper color format, try to see if any conversion is possible - return cvtColor(source_typed, encoding); - } - catch (cv_bridge::Exception& e) - { - throw Exception("cv_bridge.cvtColorForDisplay() while trying to convert image from '" + source->encoding + "' to '" + encoding + "' an exception was thrown (" + e.what() + ")"); - } -} - -} //namespace cv_bridge diff --git a/src/vision_opencv/cv_bridge/src/module.cpp b/src/vision_opencv/cv_bridge/src/module.cpp deleted file mode 100644 index c123198..0000000 --- a/src/vision_opencv/cv_bridge/src/module.cpp +++ /dev/null @@ -1,110 +0,0 @@ -/********************************************************************* -* Software License Agreement (BSD License) -* -* Copyright (c) 2012, Willow Garage, Inc. -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions -* are met: -* -* * Redistributions of source code must retain the above copyright -* notice, this list of conditions and the following disclaimer. -* * Redistributions in binary form must reproduce the above -* copyright notice, this list of conditions and the following -* disclaimer in the documentation and/or other materials provided -* with the distribution. -* * Neither the name of the Willow Garage nor the names of its -* contributors may be used to endorse or promote products derived -* from this software without specific prior written permission. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -*********************************************************************/ - -#include "module.hpp" - -PyObject *mod_opencv; - -bp::object -cvtColor2Wrap(bp::object obj_in, const std::string & encoding_in, const std::string & encoding_out) { - // Convert the Python input to an image - cv::Mat mat_in; - convert_to_CvMat2(obj_in.ptr(), mat_in); - - // Call cv_bridge for color conversion - cv_bridge::CvImagePtr cv_image(new cv_bridge::CvImage(std_msgs::Header(), encoding_in, mat_in)); - - cv::Mat mat = cv_bridge::cvtColor(cv_image, encoding_out)->image; - - return bp::object(boost::python::handle<>(pyopencv_from(mat))); -} - -bp::object -cvtColorForDisplayWrap(bp::object obj_in, - const std::string & encoding_in, - const std::string & encoding_out, - bool do_dynamic_scaling = false, - double min_image_value = 0.0, - double max_image_value = 0.0) { - // Convert the Python input to an image - cv::Mat mat_in; - convert_to_CvMat2(obj_in.ptr(), mat_in); - - cv_bridge::CvImagePtr cv_image(new cv_bridge::CvImage(std_msgs::Header(), encoding_in, mat_in)); - - cv_bridge::CvtColorForDisplayOptions options; - options.do_dynamic_scaling = do_dynamic_scaling; - options.min_image_value = min_image_value; - options.max_image_value = max_image_value; - cv::Mat mat = cv_bridge::cvtColorForDisplay(/*source=*/cv_image, - /*encoding_out=*/encoding_out, - /*options=*/options)->image; - - return bp::object(boost::python::handle<>(pyopencv_from(mat))); -} - -BOOST_PYTHON_FUNCTION_OVERLOADS(cvtColorForDisplayWrap_overloads, cvtColorForDisplayWrap, 3, 6) - -int CV_MAT_CNWrap(int i) { - return CV_MAT_CN(i); -} - -int CV_MAT_DEPTHWrap(int i) { - return CV_MAT_DEPTH(i); -} - -BOOST_PYTHON_MODULE(cv_bridge_boost) -{ - do_numpy_import(); - mod_opencv = PyImport_ImportModule("cv2"); - - // Wrap the function to get encodings as OpenCV types - boost::python::def("getCvType", cv_bridge::getCvType); - boost::python::def("cvtColor2", cvtColor2Wrap); - boost::python::def("CV_MAT_CNWrap", CV_MAT_CNWrap); - boost::python::def("CV_MAT_DEPTHWrap", CV_MAT_DEPTHWrap); - boost::python::def("cvtColorForDisplay", cvtColorForDisplayWrap, - cvtColorForDisplayWrap_overloads( - boost::python::args("source", "encoding_in", "encoding_out", "do_dynamic_scaling", - "min_image_value", "max_image_value"), - "Convert image to display with specified encodings.\n\n" - "Args:\n" - " - source (numpy.ndarray): input image\n" - " - encoding_in (str): input image encoding\n" - " - encoding_out (str): encoding to which the image conveted\n" - " - do_dynamic_scaling (bool): flag to do dynamic scaling with min/max value\n" - " - min_image_value (float): minimum pixel value for dynamic scaling\n" - " - max_image_value (float): maximum pixel value for dynamic scaling\n" - )); -} diff --git a/src/vision_opencv/cv_bridge/src/module.hpp b/src/vision_opencv/cv_bridge/src/module.hpp deleted file mode 100644 index 035b88a..0000000 --- a/src/vision_opencv/cv_bridge/src/module.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2014 Open Source Robotics Foundation, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef CV_BRIDGE_MODULE_HPP_ -#define CV_BRIDGE_MODULE_HPP_ - -#include -#include -#include -#include - -#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION -#include - -#include - -namespace bp = boost::python; - -int convert_to_CvMat2(const PyObject* o, cv::Mat& m); - -PyObject* pyopencv_from(const cv::Mat& m); - -#if PYTHON3 -static void * do_numpy_import( ) -{ - import_array( ); - return nullptr; -} -#else -static void do_numpy_import( ) -{ - import_array( ); -} -#endif - -#endif diff --git a/src/vision_opencv/cv_bridge/src/module_opencv2.cpp b/src/vision_opencv/cv_bridge/src/module_opencv2.cpp deleted file mode 100644 index 9f0752b..0000000 --- a/src/vision_opencv/cv_bridge/src/module_opencv2.cpp +++ /dev/null @@ -1,262 +0,0 @@ -/********************************************************************* -* Software License Agreement (BSD License) -* -* Copyright (c) 2012, Willow Garage, Inc. -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions -* are met: -* -* * Redistributions of source code must retain the above copyright -* notice, this list of conditions and the following disclaimer. -* * Redistributions in binary form must reproduce the above -* copyright notice, this list of conditions and the following -* disclaimer in the documentation and/or other materials provided -* with the distribution. -* * Neither the name of the Willow Garage nor the names of its -* contributors may be used to endorse or promote products derived -* from this software without specific prior written permission. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -*********************************************************************/ - -#include "module.hpp" - -using namespace cv; - -// These are sucky, sketchy versions of the real things in OpenCV Python, -// inferior in every way. - -static int failmsg(const char *fmt, ...) -{ - char str[1000]; - - va_list ap; - va_start(ap, fmt); - vsnprintf(str, sizeof(str), fmt, ap); - va_end(ap); - - PyErr_SetString(PyExc_TypeError, str); - return 0; -} - -static PyObject* opencv_error = 0; - -class PyAllowThreads -{ -public: - PyAllowThreads() : _state(PyEval_SaveThread()) {} - ~PyAllowThreads() - { - PyEval_RestoreThread(_state); - } -private: - PyThreadState* _state; -}; - -#define ERRWRAP2(expr) \ -try \ -{ \ - PyAllowThreads allowThreads; \ - expr; \ -} \ -catch (const cv::Exception &e) \ -{ \ - PyErr_SetString(opencv_error, e.what()); \ - return 0; \ -} - -// Taken from http://stackoverflow.com/questions/19136944/call-c-opencv-functions-from-python-send-a-cv-mat-to-c-dll-which-is-usi - - -static size_t REFCOUNT_OFFSET = ( size_t )&((( PyObject* )0)->ob_refcnt ) + -( 0x12345678 != *( const size_t* )"\x78\x56\x34\x12\0\0\0\0\0" )*sizeof( int ); - - -static inline PyObject* pyObjectFromRefcount( const int* refcount ) -{ -return ( PyObject* )(( size_t )refcount - REFCOUNT_OFFSET ); -} - -static inline int* refcountFromPyObject( const PyObject* obj ) -{ -return ( int* )(( size_t )obj + REFCOUNT_OFFSET ); -} - -class NumpyAllocator : public cv::MatAllocator -{ -public: -NumpyAllocator( ) { } -~NumpyAllocator( ) { } - -void allocate( int dims, const int* sizes, int type, int*& refcount, -uchar*& datastart, uchar*& data, size_t* step ); - -void deallocate( int* refcount, uchar* datastart, uchar* data ); -}; - -void NumpyAllocator::allocate( int dims, const int* sizes, int type, int*& refcount, uchar*& datastart, uchar*& data, size_t* step ) -{ - int depth = CV_MAT_DEPTH( type ); - int cn = CV_MAT_CN( type ); - const int f = ( int )( sizeof( size_t )/8 ); - int typenum = depth == CV_8U ? NPY_UBYTE : depth == CV_8S ? NPY_BYTE : - depth == CV_16U ? NPY_USHORT : depth == CV_16S ? NPY_SHORT : - depth == CV_32S ? NPY_INT : depth == CV_32F ? NPY_FLOAT : - depth == CV_64F ? NPY_DOUBLE : f*NPY_ULONGLONG + (f^1)*NPY_UINT; - int i; - npy_intp _sizes[CV_MAX_DIM+1]; - for( i = 0; i < dims; i++ ) - _sizes[i] = sizes[i]; - if( cn > 1 ) - { - /*if( _sizes[dims-1] == 1 ) - _sizes[dims-1] = cn; - else*/ - _sizes[dims++] = cn; - } - PyObject* o = PyArray_SimpleNew( dims, _sizes, typenum ); - if( !o ) - CV_Error_(CV_StsError, ("The numpy array of typenum=%d, ndims=%d can not be created", typenum, dims)); - refcount = refcountFromPyObject(o); - npy_intp* _strides = PyArray_STRIDES((PyArrayObject*) o); - for( i = 0; i < dims - (cn > 1); i++ ) - step[i] = (size_t)_strides[i]; - datastart = data = (uchar*)PyArray_DATA((PyArrayObject*)o); - -} - -void NumpyAllocator::deallocate( int* refcount, uchar* datastart, uchar* data ) -{ - if( !refcount ) - return; - PyObject* o = pyObjectFromRefcount(refcount); - Py_INCREF(o); - Py_DECREF(o); -} - -// Declare the object -NumpyAllocator g_numpyAllocator; - -int convert_to_CvMat2(const PyObject* o, cv::Mat& m) -{ - // to avoid PyArray_Check() to crash even with valid array - do_numpy_import(); - - if(!o || o == Py_None) - { - if( !m.data ) - m.allocator = &g_numpyAllocator; - return true; - } - - if( !PyArray_Check(o) ) - { - failmsg("Not a numpy array"); - return false; - } - - // NPY_LONG (64 bit) is converted to CV_32S (32 bit) - int typenum = PyArray_TYPE((PyArrayObject*) o); - int type = typenum == NPY_UBYTE ? CV_8U : typenum == NPY_BYTE ? CV_8S : - typenum == NPY_USHORT ? CV_16U : typenum == NPY_SHORT ? CV_16S : - typenum == NPY_INT || typenum == NPY_LONG ? CV_32S : - typenum == NPY_FLOAT ? CV_32F : - typenum == NPY_DOUBLE ? CV_64F : -1; - - if( type < 0 ) - { - failmsg("data type = %d is not supported", typenum); - return false; - } - - int ndims = PyArray_NDIM((PyArrayObject*) o); - if(ndims >= CV_MAX_DIM) - { - failmsg("dimensionality (=%d) is too high", ndims); - return false; - } - - int size[CV_MAX_DIM+1]; - size_t step[CV_MAX_DIM+1], elemsize = CV_ELEM_SIZE1(type); - const npy_intp* _sizes = PyArray_DIMS((PyArrayObject*) o); - const npy_intp* _strides = PyArray_STRIDES((PyArrayObject*) o); - bool transposed = false; - - for(int i = 0; i < ndims; i++) - { - size[i] = (int)_sizes[i]; - step[i] = (size_t)_strides[i]; - } - - if( ndims == 0 || step[ndims-1] > elemsize ) { - size[ndims] = 1; - step[ndims] = elemsize; - ndims++; - } - - if( ndims >= 2 && step[0] < step[1] ) - { - std::swap(size[0], size[1]); - std::swap(step[0], step[1]); - transposed = true; - } - - if( ndims == 3 && size[2] <= CV_CN_MAX && step[1] == elemsize*size[2] ) - { - ndims--; - type |= CV_MAKETYPE(0, size[2]); - } - - if( ndims > 2 ) - { - failmsg("more than 2 dimensions"); - return false; - } - - m = cv::Mat(ndims, size, type, PyArray_DATA((PyArrayObject*) o), step); - - if( m.data ) - { - m.refcount = refcountFromPyObject(o); - m.addref(); // protect the original numpy array from deallocation - // (since Mat destructor will decrement the reference counter) - }; - m.allocator = &g_numpyAllocator; - - if( transposed ) - { - cv::Mat tmp; - tmp.allocator = &g_numpyAllocator; - transpose(m, tmp); - m = tmp; - } - return true; -} - -PyObject* pyopencv_from(const Mat& m) -{ - if( !m.data ) - Py_RETURN_NONE; - Mat temp, *p = (Mat*)&m; - if(!p->refcount || p->allocator != &g_numpyAllocator) - { - temp.allocator = &g_numpyAllocator; - ERRWRAP2(m.copyTo(temp)); - p = &temp; - } - p->addref(); - return pyObjectFromRefcount(p->refcount); -} diff --git a/src/vision_opencv/cv_bridge/src/module_opencv3.cpp b/src/vision_opencv/cv_bridge/src/module_opencv3.cpp deleted file mode 100644 index 68c1b20..0000000 --- a/src/vision_opencv/cv_bridge/src/module_opencv3.cpp +++ /dev/null @@ -1,367 +0,0 @@ -// Taken from opencv/modules/python/src2/cv2.cpp - -#include "module.hpp" - -#include "opencv2/core/types_c.h" - -#include "opencv2/opencv_modules.hpp" - -#include "pycompat.hpp" - -static PyObject* opencv_error = 0; - -static int failmsg(const char *fmt, ...) -{ - char str[1000]; - - va_list ap; - va_start(ap, fmt); - vsnprintf(str, sizeof(str), fmt, ap); - va_end(ap); - - PyErr_SetString(PyExc_TypeError, str); - return 0; -} - -struct ArgInfo -{ - const char * name; - bool outputarg; - // more fields may be added if necessary - - ArgInfo(const char * name_, bool outputarg_) - : name(name_) - , outputarg(outputarg_) {} - - // to match with older pyopencv_to function signature - operator const char *() const { return name; } -}; - -class PyAllowThreads -{ -public: - PyAllowThreads() : _state(PyEval_SaveThread()) {} - ~PyAllowThreads() - { - PyEval_RestoreThread(_state); - } -private: - PyThreadState* _state; -}; - -class PyEnsureGIL -{ -public: - PyEnsureGIL() : _state(PyGILState_Ensure()) {} - ~PyEnsureGIL() - { - PyGILState_Release(_state); - } -private: - PyGILState_STATE _state; -}; - -#define ERRWRAP2(expr) \ -try \ -{ \ - PyAllowThreads allowThreads; \ - expr; \ -} \ -catch (const cv::Exception &e) \ -{ \ - PyErr_SetString(opencv_error, e.what()); \ - return 0; \ -} - -using namespace cv; - -static PyObject* failmsgp(const char *fmt, ...) -{ - char str[1000]; - - va_list ap; - va_start(ap, fmt); - vsnprintf(str, sizeof(str), fmt, ap); - va_end(ap); - - PyErr_SetString(PyExc_TypeError, str); - return 0; -} - -class NumpyAllocator : public MatAllocator -{ -public: - NumpyAllocator() { stdAllocator = Mat::getStdAllocator(); } - ~NumpyAllocator() {} - - UMatData* allocate(PyObject* o, int dims, const int* sizes, int type, size_t* step) const - { - UMatData* u = new UMatData(this); - u->data = u->origdata = (uchar*)PyArray_DATA((PyArrayObject*) o); - npy_intp* _strides = PyArray_STRIDES((PyArrayObject*) o); - for( int i = 0; i < dims - 1; i++ ) - step[i] = (size_t)_strides[i]; - step[dims-1] = CV_ELEM_SIZE(type); - u->size = sizes[0]*step[0]; - u->userdata = o; - return u; - } - - UMatData* allocate(int dims0, const int* sizes, int type, void* data, size_t* step, int flags, UMatUsageFlags usageFlags) const - { - if( data != 0 ) - { - CV_Error(Error::StsAssert, "The data should normally be NULL!"); - // probably this is safe to do in such extreme case - return stdAllocator->allocate(dims0, sizes, type, data, step, flags, usageFlags); - } - PyEnsureGIL gil; - - int depth = CV_MAT_DEPTH(type); - int cn = CV_MAT_CN(type); - const int f = (int)(sizeof(size_t)/8); - int typenum = depth == CV_8U ? NPY_UBYTE : depth == CV_8S ? NPY_BYTE : - depth == CV_16U ? NPY_USHORT : depth == CV_16S ? NPY_SHORT : - depth == CV_32S ? NPY_INT : depth == CV_32F ? NPY_FLOAT : - depth == CV_64F ? NPY_DOUBLE : f*NPY_ULONGLONG + (f^1)*NPY_UINT; - int i, dims = dims0; - cv::AutoBuffer _sizes(dims + 1); - for( i = 0; i < dims; i++ ) - _sizes[i] = sizes[i]; - if( cn > 1 ) - _sizes[dims++] = cn; - PyObject* o = PyArray_SimpleNew(dims, _sizes, typenum); - if(!o) - CV_Error_(Error::StsError, ("The numpy array of typenum=%d, ndims=%d can not be created", typenum, dims)); - return allocate(o, dims0, sizes, type, step); - } - - bool allocate(UMatData* u, int accessFlags, UMatUsageFlags usageFlags) const - { - return stdAllocator->allocate(u, accessFlags, usageFlags); - } - - void deallocate(UMatData* u) const - { - if(u) - { - PyEnsureGIL gil; - PyObject* o = (PyObject*)u->userdata; - Py_XDECREF(o); - delete u; - } - } - - const MatAllocator* stdAllocator; -}; - -NumpyAllocator g_numpyAllocator; - - -template static -bool pyopencv_to(PyObject* obj, T& p, const char* name = ""); - -template static -PyObject* pyopencv_from(const T& src); - -enum { ARG_NONE = 0, ARG_MAT = 1, ARG_SCALAR = 2 }; - -// special case, when the convertor needs full ArgInfo structure -static bool pyopencv_to(PyObject* o, Mat& m, const ArgInfo info) -{ - // to avoid PyArray_Check() to crash even with valid array - do_numpy_import( ); - - - bool allowND = true; - if(!o || o == Py_None) - { - if( !m.data ) - m.allocator = &g_numpyAllocator; - return true; - } - - if( PyInt_Check(o) ) - { - double v[] = {(double)PyInt_AsLong((PyObject*)o), 0., 0., 0.}; - m = Mat(4, 1, CV_64F, v).clone(); - return true; - } - if( PyFloat_Check(o) ) - { - double v[] = {PyFloat_AsDouble((PyObject*)o), 0., 0., 0.}; - m = Mat(4, 1, CV_64F, v).clone(); - return true; - } - if( PyTuple_Check(o) ) - { - int i, sz = (int)PyTuple_Size((PyObject*)o); - m = Mat(sz, 1, CV_64F); - for( i = 0; i < sz; i++ ) - { - PyObject* oi = PyTuple_GET_ITEM(o, i); - if( PyInt_Check(oi) ) - m.at(i) = (double)PyInt_AsLong(oi); - else if( PyFloat_Check(oi) ) - m.at(i) = (double)PyFloat_AsDouble(oi); - else - { - failmsg("%s is not a numerical tuple", info.name); - m.release(); - return false; - } - } - return true; - } - - if( !PyArray_Check(o) ) - { - failmsg("%s is not a numpy array, neither a scalar", info.name); - return false; - } - - PyArrayObject* oarr = (PyArrayObject*) o; - - bool needcopy = false, needcast = false; - int typenum = PyArray_TYPE(oarr), new_typenum = typenum; - int type = typenum == NPY_UBYTE ? CV_8U : - typenum == NPY_BYTE ? CV_8S : - typenum == NPY_USHORT ? CV_16U : - typenum == NPY_SHORT ? CV_16S : - typenum == NPY_INT ? CV_32S : - typenum == NPY_INT32 ? CV_32S : - typenum == NPY_FLOAT ? CV_32F : - typenum == NPY_DOUBLE ? CV_64F : -1; - - if( type < 0 ) - { - if( typenum == NPY_INT64 || typenum == NPY_UINT64 || type == NPY_LONG ) - { - needcopy = needcast = true; - new_typenum = NPY_INT; - type = CV_32S; - } - else - { - failmsg("%s data type = %d is not supported", info.name, typenum); - return false; - } - } - -#ifndef CV_MAX_DIM - const int CV_MAX_DIM = 32; -#endif - - int ndims = PyArray_NDIM(oarr); - if(ndims >= CV_MAX_DIM) - { - failmsg("%s dimensionality (=%d) is too high", info.name, ndims); - return false; - } - - int size[CV_MAX_DIM+1]; - size_t step[CV_MAX_DIM+1]; - size_t elemsize = CV_ELEM_SIZE1(type); - const npy_intp* _sizes = PyArray_DIMS(oarr); - const npy_intp* _strides = PyArray_STRIDES(oarr); - bool ismultichannel = ndims == 3 && _sizes[2] <= CV_CN_MAX; - - for( int i = ndims-1; i >= 0 && !needcopy; i-- ) - { - // these checks handle cases of - // a) multi-dimensional (ndims > 2) arrays, as well as simpler 1- and 2-dimensional cases - // b) transposed arrays, where _strides[] elements go in non-descending order - // c) flipped arrays, where some of _strides[] elements are negative - if( (i == ndims-1 && (size_t)_strides[i] != elemsize) || - (i < ndims-1 && _strides[i] < _strides[i+1]) ) - needcopy = true; - } - - if( ismultichannel && _strides[1] != (npy_intp)elemsize*_sizes[2] ) - needcopy = true; - - if (needcopy) - { - if (info.outputarg) - { - failmsg("Layout of the output array %s is incompatible with cv::Mat (step[ndims-1] != elemsize or step[1] != elemsize*nchannels)", info.name); - return false; - } - - if( needcast ) { - o = PyArray_Cast(oarr, new_typenum); - oarr = (PyArrayObject*) o; - } - else { - oarr = PyArray_GETCONTIGUOUS(oarr); - o = (PyObject*) oarr; - } - - _strides = PyArray_STRIDES(oarr); - } - - for(int i = 0; i < ndims; i++) - { - size[i] = (int)_sizes[i]; - step[i] = (size_t)_strides[i]; - } - - // handle degenerate case - if( ndims == 0) { - size[ndims] = 1; - step[ndims] = elemsize; - ndims++; - } - - if( ismultichannel ) - { - ndims--; - type |= CV_MAKETYPE(0, size[2]); - } - - if( ndims > 2 && !allowND ) - { - failmsg("%s has more than 2 dimensions", info.name); - return false; - } - - m = Mat(ndims, size, type, PyArray_DATA(oarr), step); - m.u = g_numpyAllocator.allocate(o, ndims, size, type, step); - m.addref(); - - if( !needcopy ) - { - Py_INCREF(o); - } - m.allocator = &g_numpyAllocator; - - return true; -} - -template<> -bool pyopencv_to(PyObject* o, Mat& m, const char* name) -{ - return pyopencv_to(o, m, ArgInfo(name, 0)); -} - -PyObject* pyopencv_from(const Mat& m) -{ - if( !m.data ) - Py_RETURN_NONE; - Mat temp, *p = (Mat*)&m; - if(!p->u || p->allocator != &g_numpyAllocator) - { - temp.allocator = &g_numpyAllocator; - ERRWRAP2(m.copyTo(temp)); - p = &temp; - } - PyObject* o = (PyObject*)p->u->userdata; - Py_INCREF(o); - return o; -} - -int convert_to_CvMat2(const PyObject* o, cv::Mat& m) -{ - pyopencv_to(const_cast(o), m, "unknown"); - return 0; -} diff --git a/src/vision_opencv/cv_bridge/src/pycompat.hpp b/src/vision_opencv/cv_bridge/src/pycompat.hpp deleted file mode 100644 index f4ebea6..0000000 --- a/src/vision_opencv/cv_bridge/src/pycompat.hpp +++ /dev/null @@ -1,70 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -// Defines for Python 2/3 compatibility. -#ifndef __PYCOMPAT_HPP__ -#define __PYCOMPAT_HPP__ - -#if PY_MAJOR_VERSION >= 3 -// Python3 treats all ints as longs, PyInt_X functions have been removed. -#define PyInt_Check PyLong_Check -#define PyInt_CheckExact PyLong_CheckExact -#define PyInt_AsLong PyLong_AsLong -#define PyInt_AS_LONG PyLong_AS_LONG -#define PyInt_FromLong PyLong_FromLong -#define PyNumber_Int PyNumber_Long - -// Python3 strings are unicode, these defines mimic the Python2 functionality. -#define PyString_Check PyUnicode_Check -#define PyString_FromString PyUnicode_FromString -#define PyString_FromStringAndSize PyUnicode_FromStringAndSize -#define PyString_Size PyUnicode_GET_SIZE - -// PyUnicode_AsUTF8 isn't available until Python 3.3 -#if (PY_VERSION_HEX < 0x03030000) -#define PyString_AsString _PyUnicode_AsString -#else -#define PyString_AsString PyUnicode_AsUTF8 -#endif -#endif - -#endif // END HEADER GUARD diff --git a/src/vision_opencv/cv_bridge/src/rgb_colors.cpp b/src/vision_opencv/cv_bridge/src/rgb_colors.cpp deleted file mode 100644 index ca2eaaa..0000000 --- a/src/vision_opencv/cv_bridge/src/rgb_colors.cpp +++ /dev/null @@ -1,202 +0,0 @@ -// -*- mode: c++ -*- -/********************************************************************* - * Original color definition is at scikit-image distributed with - * following license disclaimer: - * - * Copyright (C) 2011, the scikit-image team - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * 3. Neither the name of skimage nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING - * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - *********************************************************************/ - -#include "cv_bridge/rgb_colors.h" -#include - - -namespace cv_bridge -{ - -namespace rgb_colors -{ - - cv::Vec3d getRGBColor(const int color) - { - cv::Vec3d c; - switch (color % 146) { - case ALICEBLUE: c = cv::Vec3d(0.941, 0.973, 1); break; - case ANTIQUEWHITE: c = cv::Vec3d(0.98, 0.922, 0.843); break; - case AQUA: c = cv::Vec3d(0, 1, 1); break; - case AQUAMARINE: c = cv::Vec3d(0.498, 1, 0.831); break; - case AZURE: c = cv::Vec3d(0.941, 1, 1); break; - case BEIGE: c = cv::Vec3d(0.961, 0.961, 0.863); break; - case BISQUE: c = cv::Vec3d(1, 0.894, 0.769); break; - case BLACK: c = cv::Vec3d(0, 0, 0); break; - case BLANCHEDALMOND: c = cv::Vec3d(1, 0.922, 0.804); break; - case BLUE: c = cv::Vec3d(0, 0, 1); break; - case BLUEVIOLET: c = cv::Vec3d(0.541, 0.169, 0.886); break; - case BROWN: c = cv::Vec3d(0.647, 0.165, 0.165); break; - case BURLYWOOD: c = cv::Vec3d(0.871, 0.722, 0.529); break; - case CADETBLUE: c = cv::Vec3d(0.373, 0.62, 0.627); break; - case CHARTREUSE: c = cv::Vec3d(0.498, 1, 0); break; - case CHOCOLATE: c = cv::Vec3d(0.824, 0.412, 0.118); break; - case CORAL: c = cv::Vec3d(1, 0.498, 0.314); break; - case CORNFLOWERBLUE: c = cv::Vec3d(0.392, 0.584, 0.929); break; - case CORNSILK: c = cv::Vec3d(1, 0.973, 0.863); break; - case CRIMSON: c = cv::Vec3d(0.863, 0.0784, 0.235); break; - case CYAN: c = cv::Vec3d(0, 1, 1); break; - case DARKBLUE: c = cv::Vec3d(0, 0, 0.545); break; - case DARKCYAN: c = cv::Vec3d(0, 0.545, 0.545); break; - case DARKGOLDENROD: c = cv::Vec3d(0.722, 0.525, 0.0431); break; - case DARKGRAY: c = cv::Vec3d(0.663, 0.663, 0.663); break; - case DARKGREEN: c = cv::Vec3d(0, 0.392, 0); break; - case DARKGREY: c = cv::Vec3d(0.663, 0.663, 0.663); break; - case DARKKHAKI: c = cv::Vec3d(0.741, 0.718, 0.42); break; - case DARKMAGENTA: c = cv::Vec3d(0.545, 0, 0.545); break; - case DARKOLIVEGREEN: c = cv::Vec3d(0.333, 0.42, 0.184); break; - case DARKORANGE: c = cv::Vec3d(1, 0.549, 0); break; - case DARKORCHID: c = cv::Vec3d(0.6, 0.196, 0.8); break; - case DARKRED: c = cv::Vec3d(0.545, 0, 0); break; - case DARKSALMON: c = cv::Vec3d(0.914, 0.588, 0.478); break; - case DARKSEAGREEN: c = cv::Vec3d(0.561, 0.737, 0.561); break; - case DARKSLATEBLUE: c = cv::Vec3d(0.282, 0.239, 0.545); break; - case DARKSLATEGRAY: c = cv::Vec3d(0.184, 0.31, 0.31); break; - case DARKSLATEGREY: c = cv::Vec3d(0.184, 0.31, 0.31); break; - case DARKTURQUOISE: c = cv::Vec3d(0, 0.808, 0.82); break; - case DARKVIOLET: c = cv::Vec3d(0.58, 0, 0.827); break; - case DEEPPINK: c = cv::Vec3d(1, 0.0784, 0.576); break; - case DEEPSKYBLUE: c = cv::Vec3d(0, 0.749, 1); break; - case DIMGRAY: c = cv::Vec3d(0.412, 0.412, 0.412); break; - case DIMGREY: c = cv::Vec3d(0.412, 0.412, 0.412); break; - case DODGERBLUE: c = cv::Vec3d(0.118, 0.565, 1); break; - case FIREBRICK: c = cv::Vec3d(0.698, 0.133, 0.133); break; - case FLORALWHITE: c = cv::Vec3d(1, 0.98, 0.941); break; - case FORESTGREEN: c = cv::Vec3d(0.133, 0.545, 0.133); break; - case FUCHSIA: c = cv::Vec3d(1, 0, 1); break; - case GAINSBORO: c = cv::Vec3d(0.863, 0.863, 0.863); break; - case GHOSTWHITE: c = cv::Vec3d(0.973, 0.973, 1); break; - case GOLD: c = cv::Vec3d(1, 0.843, 0); break; - case GOLDENROD: c = cv::Vec3d(0.855, 0.647, 0.125); break; - case GRAY: c = cv::Vec3d(0.502, 0.502, 0.502); break; - case GREEN: c = cv::Vec3d(0, 0.502, 0); break; - case GREENYELLOW: c = cv::Vec3d(0.678, 1, 0.184); break; - case GREY: c = cv::Vec3d(0.502, 0.502, 0.502); break; - case HONEYDEW: c = cv::Vec3d(0.941, 1, 0.941); break; - case HOTPINK: c = cv::Vec3d(1, 0.412, 0.706); break; - case INDIANRED: c = cv::Vec3d(0.804, 0.361, 0.361); break; - case INDIGO: c = cv::Vec3d(0.294, 0, 0.51); break; - case IVORY: c = cv::Vec3d(1, 1, 0.941); break; - case KHAKI: c = cv::Vec3d(0.941, 0.902, 0.549); break; - case LAVENDER: c = cv::Vec3d(0.902, 0.902, 0.98); break; - case LAVENDERBLUSH: c = cv::Vec3d(1, 0.941, 0.961); break; - case LAWNGREEN: c = cv::Vec3d(0.486, 0.988, 0); break; - case LEMONCHIFFON: c = cv::Vec3d(1, 0.98, 0.804); break; - case LIGHTBLUE: c = cv::Vec3d(0.678, 0.847, 0.902); break; - case LIGHTCORAL: c = cv::Vec3d(0.941, 0.502, 0.502); break; - case LIGHTCYAN: c = cv::Vec3d(0.878, 1, 1); break; - case LIGHTGOLDENRODYELLOW: c = cv::Vec3d(0.98, 0.98, 0.824); break; - case LIGHTGRAY: c = cv::Vec3d(0.827, 0.827, 0.827); break; - case LIGHTGREEN: c = cv::Vec3d(0.565, 0.933, 0.565); break; - case LIGHTGREY: c = cv::Vec3d(0.827, 0.827, 0.827); break; - case LIGHTPINK: c = cv::Vec3d(1, 0.714, 0.757); break; - case LIGHTSALMON: c = cv::Vec3d(1, 0.627, 0.478); break; - case LIGHTSEAGREEN: c = cv::Vec3d(0.125, 0.698, 0.667); break; - case LIGHTSKYBLUE: c = cv::Vec3d(0.529, 0.808, 0.98); break; - case LIGHTSLATEGRAY: c = cv::Vec3d(0.467, 0.533, 0.6); break; - case LIGHTSLATEGREY: c = cv::Vec3d(0.467, 0.533, 0.6); break; - case LIGHTSTEELBLUE: c = cv::Vec3d(0.69, 0.769, 0.871); break; - case LIGHTYELLOW: c = cv::Vec3d(1, 1, 0.878); break; - case LIME: c = cv::Vec3d(0, 1, 0); break; - case LIMEGREEN: c = cv::Vec3d(0.196, 0.804, 0.196); break; - case LINEN: c = cv::Vec3d(0.98, 0.941, 0.902); break; - case MAGENTA: c = cv::Vec3d(1, 0, 1); break; - case MAROON: c = cv::Vec3d(0.502, 0, 0); break; - case MEDIUMAQUAMARINE: c = cv::Vec3d(0.4, 0.804, 0.667); break; - case MEDIUMBLUE: c = cv::Vec3d(0, 0, 0.804); break; - case MEDIUMORCHID: c = cv::Vec3d(0.729, 0.333, 0.827); break; - case MEDIUMPURPLE: c = cv::Vec3d(0.576, 0.439, 0.859); break; - case MEDIUMSEAGREEN: c = cv::Vec3d(0.235, 0.702, 0.443); break; - case MEDIUMSLATEBLUE: c = cv::Vec3d(0.482, 0.408, 0.933); break; - case MEDIUMSPRINGGREEN: c = cv::Vec3d(0, 0.98, 0.604); break; - case MEDIUMTURQUOISE: c = cv::Vec3d(0.282, 0.82, 0.8); break; - case MEDIUMVIOLETRED: c = cv::Vec3d(0.78, 0.0824, 0.522); break; - case MIDNIGHTBLUE: c = cv::Vec3d(0.098, 0.098, 0.439); break; - case MINTCREAM: c = cv::Vec3d(0.961, 1, 0.98); break; - case MISTYROSE: c = cv::Vec3d(1, 0.894, 0.882); break; - case MOCCASIN: c = cv::Vec3d(1, 0.894, 0.71); break; - case NAVAJOWHITE: c = cv::Vec3d(1, 0.871, 0.678); break; - case NAVY: c = cv::Vec3d(0, 0, 0.502); break; - case OLDLACE: c = cv::Vec3d(0.992, 0.961, 0.902); break; - case OLIVE: c = cv::Vec3d(0.502, 0.502, 0); break; - case OLIVEDRAB: c = cv::Vec3d(0.42, 0.557, 0.137); break; - case ORANGE: c = cv::Vec3d(1, 0.647, 0); break; - case ORANGERED: c = cv::Vec3d(1, 0.271, 0); break; - case ORCHID: c = cv::Vec3d(0.855, 0.439, 0.839); break; - case PALEGOLDENROD: c = cv::Vec3d(0.933, 0.91, 0.667); break; - case PALEGREEN: c = cv::Vec3d(0.596, 0.984, 0.596); break; - case PALEVIOLETRED: c = cv::Vec3d(0.686, 0.933, 0.933); break; - case PAPAYAWHIP: c = cv::Vec3d(1, 0.937, 0.835); break; - case PEACHPUFF: c = cv::Vec3d(1, 0.855, 0.725); break; - case PERU: c = cv::Vec3d(0.804, 0.522, 0.247); break; - case PINK: c = cv::Vec3d(1, 0.753, 0.796); break; - case PLUM: c = cv::Vec3d(0.867, 0.627, 0.867); break; - case POWDERBLUE: c = cv::Vec3d(0.69, 0.878, 0.902); break; - case PURPLE: c = cv::Vec3d(0.502, 0, 0.502); break; - case RED: c = cv::Vec3d(1, 0, 0); break; - case ROSYBROWN: c = cv::Vec3d(0.737, 0.561, 0.561); break; - case ROYALBLUE: c = cv::Vec3d(0.255, 0.412, 0.882); break; - case SADDLEBROWN: c = cv::Vec3d(0.545, 0.271, 0.0745); break; - case SALMON: c = cv::Vec3d(0.98, 0.502, 0.447); break; - case SANDYBROWN: c = cv::Vec3d(0.98, 0.643, 0.376); break; - case SEAGREEN: c = cv::Vec3d(0.18, 0.545, 0.341); break; - case SEASHELL: c = cv::Vec3d(1, 0.961, 0.933); break; - case SIENNA: c = cv::Vec3d(0.627, 0.322, 0.176); break; - case SILVER: c = cv::Vec3d(0.753, 0.753, 0.753); break; - case SKYBLUE: c = cv::Vec3d(0.529, 0.808, 0.922); break; - case SLATEBLUE: c = cv::Vec3d(0.416, 0.353, 0.804); break; - case SLATEGRAY: c = cv::Vec3d(0.439, 0.502, 0.565); break; - case SLATEGREY: c = cv::Vec3d(0.439, 0.502, 0.565); break; - case SNOW: c = cv::Vec3d(1, 0.98, 0.98); break; - case SPRINGGREEN: c = cv::Vec3d(0, 1, 0.498); break; - case STEELBLUE: c = cv::Vec3d(0.275, 0.51, 0.706); break; - case TAN: c = cv::Vec3d(0.824, 0.706, 0.549); break; - case TEAL: c = cv::Vec3d(0, 0.502, 0.502); break; - case THISTLE: c = cv::Vec3d(0.847, 0.749, 0.847); break; - case TOMATO: c = cv::Vec3d(1, 0.388, 0.278); break; - case TURQUOISE: c = cv::Vec3d(0.251, 0.878, 0.816); break; - case VIOLET: c = cv::Vec3d(0.933, 0.51, 0.933); break; - case WHEAT: c = cv::Vec3d(0.961, 0.871, 0.702); break; - case WHITE: c = cv::Vec3d(1, 1, 1); break; - case WHITESMOKE: c = cv::Vec3d(0.961, 0.961, 0.961); break; - case YELLOW: c = cv::Vec3d(1, 1, 0); break; - case YELLOWGREEN: c = cv::Vec3d(0.604, 0.804, 0.196); break; - } // switch - return c; - } - -} // namespace rgb_colors - -} // namespace cv_bridge diff --git a/src/vision_opencv/cv_bridge/test/CMakeLists.txt b/src/vision_opencv/cv_bridge/test/CMakeLists.txt deleted file mode 100644 index 7497d45..0000000 --- a/src/vision_opencv/cv_bridge/test/CMakeLists.txt +++ /dev/null @@ -1,15 +0,0 @@ -# add the tests - -# add boost directories for now -include_directories("../src") - -catkin_add_gtest(${PROJECT_NAME}-utest test_endian.cpp test_compression.cpp utest.cpp utest2.cpp test_rgb_colors.cpp) -target_link_libraries(${PROJECT_NAME}-utest - ${PROJECT_NAME} - ${OpenCV_LIBRARIES} - ${catkin_LIBRARIES} -) - -catkin_add_nosetests(enumerants.py) -catkin_add_nosetests(conversions.py) -catkin_add_nosetests(python_bindings.py) diff --git a/src/vision_opencv/cv_bridge/test/conversions.py b/src/vision_opencv/cv_bridge/test/conversions.py deleted file mode 100644 index bde1380..0000000 --- a/src/vision_opencv/cv_bridge/test/conversions.py +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env python -import rostest -import unittest - -import numpy as np - -import sensor_msgs.msg - -from cv_bridge import CvBridge, CvBridgeError - -class TestConversions(unittest.TestCase): - - def test_mono16_cv2(self): - import numpy as np - br = CvBridge() - im = np.uint8(np.random.randint(0, 255, size=(480, 640, 3))) - self.assertRaises(CvBridgeError, lambda: br.imgmsg_to_cv2(br.cv2_to_imgmsg(im), "mono16")) - br.imgmsg_to_cv2(br.cv2_to_imgmsg(im,"rgb8"), "mono16") - - def test_encode_decode_cv2(self): - import cv2 - import numpy as np - fmts = [cv2.CV_8U, cv2.CV_8S, cv2.CV_16U, cv2.CV_16S, cv2.CV_32S, cv2.CV_32F, cv2.CV_64F] - - cvb_en = CvBridge() - cvb_de = CvBridge() - - for w in range(100, 800, 100): - for h in range(100, 800, 100): - for f in fmts: - for channels in ([], 1, 2, 3, 4, 5): - if channels == []: - original = np.uint8(np.random.randint(0, 255, size=(h, w))) - else: - original = np.uint8(np.random.randint(0, 255, size=(h, w, channels))) - rosmsg = cvb_en.cv2_to_imgmsg(original) - newimg = cvb_de.imgmsg_to_cv2(rosmsg) - - self.assert_(original.dtype == newimg.dtype) - if channels == 1: - # in that case, a gray image has a shape of size 2 - self.assert_(original.shape[:2] == newimg.shape[:2]) - else: - self.assert_(original.shape == newimg.shape) - self.assert_(len(original.tostring()) == len(newimg.tostring())) - - def test_encode_decode_cv2_compressed(self): - import numpy as np - # from: http://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#Mat imread(const string& filename, int flags) - # NOTE: remove jp2(a.k.a JPEG2000) as its JASPER codec is disabled within Ubuntu opencv library - # due to security issues, but it works once you rebuild your opencv library with JASPER enabled - formats = ["jpg", "jpeg", "jpe", "png", "bmp", "dib", "ppm", "pgm", "pbm", - "sr", "ras", "tif", "tiff"] # this formats rviz is not support - - cvb_en = CvBridge() - cvb_de = CvBridge() - - for w in range(100, 800, 100): - for h in range(100, 800, 100): - for f in formats: - for channels in ([], 1, 3): - if channels == []: - original = np.uint8(np.random.randint(0, 255, size=(h, w))) - else: - original = np.uint8(np.random.randint(0, 255, size=(h, w, channels))) - compress_rosmsg = cvb_en.cv2_to_compressed_imgmsg(original, f) - newimg = cvb_de.compressed_imgmsg_to_cv2(compress_rosmsg) - self.assert_(original.dtype == newimg.dtype) - if channels == 1: - # in that case, a gray image has a shape of size 2 - self.assert_(original.shape[:2] == newimg.shape[:2]) - else: - self.assert_(original.shape == newimg.shape) - self.assert_(len(original.tostring()) == len(newimg.tostring())) - - def test_endianness(self): - br = CvBridge() - dtype = np.dtype('int32') - # Set to big endian. - dtype = dtype.newbyteorder('>') - img = np.random.randint(0, 255, size=(30, 40)) - msg = br.cv2_to_imgmsg(img.astype(dtype)) - self.assert_(msg.is_bigendian == True) - self.assert_((br.imgmsg_to_cv2(msg) == img).all()) - -if __name__ == '__main__': - rosunit.unitrun('opencv_tests', 'conversions', TestConversions) diff --git a/src/vision_opencv/cv_bridge/test/enumerants.py b/src/vision_opencv/cv_bridge/test/enumerants.py deleted file mode 100644 index bdcc7a8..0000000 --- a/src/vision_opencv/cv_bridge/test/enumerants.py +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env python -import rostest -import unittest - -import numpy as np -import cv2 - -import sensor_msgs.msg - -from cv_bridge import CvBridge, CvBridgeError, getCvType - -class TestEnumerants(unittest.TestCase): - - def test_enumerants_cv2(self): - img_msg = sensor_msgs.msg.Image() - img_msg.width = 640 - img_msg.height = 480 - img_msg.encoding = "rgba8" - img_msg.step = 640*4 - img_msg.data = (640 * 480) * "1234" - - bridge_ = CvBridge() - cvim = bridge_.imgmsg_to_cv2(img_msg, "rgb8") - import sys - self.assertRaises(sys.getrefcount(cvim) == 2) - - # A 3 channel image cannot be sent as an rgba8 - self.assertRaises(CvBridgeError, lambda: bridge_.cv2_to_imgmsg(cvim, "rgba8")) - - # but it can be sent as rgb8 and bgr8 - bridge_.cv2_to_imgmsg(cvim, "rgb8") - bridge_.cv2_to_imgmsg(cvim, "bgr8") - - self.assertRaises(getCvType("32FC4") == cv2.CV_8UC4) - self.assertRaises(getCvType("8UC1") == cv2.CV_8UC1) - self.assertRaises(getCvType("8U") == cv2.CV_8UC1) - - def test_numpy_types(self): - import cv2 - import numpy as np - bridge_ = CvBridge() - self.assertRaises(TypeError, lambda: bridge_.cv2_to_imgmsg(1, "rgba8")) - if hasattr(cv2, 'cv'): - self.assertRaises(TypeError, lambda: bridge_.cv2_to_imgmsg(cv2.cv(), "rgba8")) - -if __name__ == '__main__': - rosunit.unitrun('opencv_tests', 'enumerants', TestEnumerants) diff --git a/src/vision_opencv/cv_bridge/test/python_bindings.py b/src/vision_opencv/cv_bridge/test/python_bindings.py deleted file mode 100644 index 6dd3209..0000000 --- a/src/vision_opencv/cv_bridge/test/python_bindings.py +++ /dev/null @@ -1,35 +0,0 @@ -from nose.tools import assert_equal -import numpy as np - -import cv_bridge - - -def test_cvtColorForDisplay(): - # convert label image to display - label = np.zeros((480, 640), dtype=np.int32) - height, width = label.shape[:2] - label_value = 0 - grid_num_y, grid_num_x = 3, 4 - for grid_row in xrange(grid_num_y): - grid_size_y = height / grid_num_y - min_y = grid_size_y * grid_row - max_y = min_y + grid_size_y - for grid_col in xrange(grid_num_x): - grid_size_x = width / grid_num_x - min_x = grid_size_x * grid_col - max_x = min_x + grid_size_x - label[min_y:max_y, min_x:max_x] = label_value - label_value += 1 - label_viz = cv_bridge.cvtColorForDisplay(label, '32SC1', 'bgr8') - assert_equal(label_viz.dtype, np.uint8) - assert_equal(label_viz.min(), 0) - assert_equal(label_viz.max(), 255) - - # Check that mono8 conversion returns the right shape. - bridge = cv_bridge.CvBridge() - mono = np.random.random((100, 100)) * 255 - mono = mono.astype(np.uint8) - - input_msg = bridge.cv2_to_imgmsg(mono, encoding='mono8') - output = bridge.imgmsg_to_cv2(input_msg, desired_encoding='mono8') - assert_equal(output.shape, (100,100)) diff --git a/src/vision_opencv/cv_bridge/test/test_compression.cpp b/src/vision_opencv/cv_bridge/test/test_compression.cpp deleted file mode 100644 index 5be403e..0000000 --- a/src/vision_opencv/cv_bridge/test/test_compression.cpp +++ /dev/null @@ -1,36 +0,0 @@ -#include -#include -#include - -TEST(CvBridgeTest, compression) -{ - cv::RNG rng(0); - std_msgs::Header header; - - // Test 3 channel images. - for (int i = 0; i < 2; ++i) - { - const std::string format = (i == 0) ? "bgr8" : "rgb8"; - cv::Mat_ in(10, 10); - rng.fill(in, cv::RNG::UNIFORM, 0, 256); - - sensor_msgs::CompressedImagePtr msg = cv_bridge::CvImage(header, format, in).toCompressedImageMsg(cv_bridge::PNG); - const cv_bridge::CvImageConstPtr out = cv_bridge::toCvCopy(msg, format); - - EXPECT_EQ(out->image.channels(), 3); - EXPECT_EQ(cv::norm(out->image, in), 0); - } - - // Test 4 channel images. - for (int i = 0; i < 2; ++i) - { - const std::string format = (i == 0) ? "bgra8" : "rgba8"; - cv::Mat_ in(10, 10); - rng.fill(in, cv::RNG::UNIFORM, 0, 256); - - sensor_msgs::CompressedImagePtr msg = cv_bridge::CvImage(header, format, in).toCompressedImageMsg(cv_bridge::PNG); - const cv_bridge::CvImageConstPtr out = cv_bridge::toCvCopy(msg, format); - EXPECT_EQ(out->image.channels(), 4); - EXPECT_EQ(cv::norm(out->image, in), 0); - } -} diff --git a/src/vision_opencv/cv_bridge/test/test_endian.cpp b/src/vision_opencv/cv_bridge/test/test_endian.cpp deleted file mode 100644 index 58a1f50..0000000 --- a/src/vision_opencv/cv_bridge/test/test_endian.cpp +++ /dev/null @@ -1,38 +0,0 @@ -#include "boost/endian/conversion.hpp" -#include -#include -#include - -TEST(CvBridgeTest, endianness) -{ - using namespace boost::endian; - - // Create an image of the type opposite to the platform - sensor_msgs::Image msg; - msg.height = 1; - msg.width = 1; - msg.encoding = "32SC2"; - msg.step = 8; - - msg.data.resize(msg.step); - int32_t* data = reinterpret_cast(&msg.data[0]); - - // Write 1 and 2 in order, but with an endianness opposite to the platform - if (order::native == order::little) - { - msg.is_bigendian = true; - *(data++) = native_to_big(static_cast(1)); - *data = native_to_big(static_cast(2)); - } else { - msg.is_bigendian = false; - *(data++) = native_to_little(static_cast(1)); - *data = native_to_little(static_cast(2)); - } - - // Make sure the values are still the same - cv_bridge::CvImageConstPtr img = cv_bridge::toCvShare(boost::make_shared(msg)); - EXPECT_EQ(img->image.at(0, 0)[0], 1); - EXPECT_EQ(img->image.at(0, 0)[1], 2); - // Make sure we cannot share data - EXPECT_NE(img->image.data, &msg.data[0]); -} diff --git a/src/vision_opencv/cv_bridge/test/test_rgb_colors.cpp b/src/vision_opencv/cv_bridge/test/test_rgb_colors.cpp deleted file mode 100644 index 090e667..0000000 --- a/src/vision_opencv/cv_bridge/test/test_rgb_colors.cpp +++ /dev/null @@ -1,19 +0,0 @@ -#include "cv_bridge/rgb_colors.h" -#include -#include - - -TEST(RGBColors, testGetRGBColor) -{ - cv::Vec3d color; - // red - color = cv_bridge::rgb_colors::getRGBColor(cv_bridge::rgb_colors::RED); - EXPECT_EQ(1, color[0]); - EXPECT_EQ(0, color[1]); - EXPECT_EQ(0, color[2]); - // gray - color = cv_bridge::rgb_colors::getRGBColor(cv_bridge::rgb_colors::GRAY); - EXPECT_EQ(0.502, color[0]); - EXPECT_EQ(0.502, color[1]); - EXPECT_EQ(0.502, color[2]); -} diff --git a/src/vision_opencv/cv_bridge/test/utest.cpp b/src/vision_opencv/cv_bridge/test/utest.cpp deleted file mode 100644 index d74be56..0000000 --- a/src/vision_opencv/cv_bridge/test/utest.cpp +++ /dev/null @@ -1,141 +0,0 @@ -#include "cv_bridge/cv_bridge.h" -#include -#include - - -// Tests conversion of non-continuous cv::Mat. #5206 -TEST(CvBridgeTest, NonContinuous) -{ - cv::Mat full = cv::Mat::eye(8, 8, CV_16U); - cv::Mat partial = full.colRange(2, 5); - - cv_bridge::CvImage cvi; - cvi.encoding = sensor_msgs::image_encodings::MONO16; - cvi.image = partial; - - sensor_msgs::ImagePtr msg = cvi.toImageMsg(); - EXPECT_EQ(msg->height, 8); - EXPECT_EQ(msg->width, 3); - EXPECT_EQ(msg->encoding, cvi.encoding); - EXPECT_EQ(msg->step, 6); -} - -TEST(CvBridgeTest, ChannelOrder) -{ - cv::Mat_ mat(200, 200); - mat.setTo(cv::Scalar(1000,0,0,0)); - sensor_msgs::ImagePtr image(new sensor_msgs::Image()); - - image = cv_bridge::CvImage(image->header, sensor_msgs::image_encodings::MONO16, mat).toImageMsg(); - - cv_bridge::CvImageConstPtr cv_ptr = cv_bridge::toCvShare(image); - - cv_bridge::CvImagePtr res = cv_bridge::cvtColor(cv_ptr, sensor_msgs::image_encodings::BGR8); - EXPECT_EQ(res->encoding, sensor_msgs::image_encodings::BGR8); - EXPECT_EQ(res->image.type(), cv_bridge::getCvType(res->encoding)); - EXPECT_EQ(res->image.channels(), sensor_msgs::image_encodings::numChannels(res->encoding)); - EXPECT_EQ(res->image.depth(), CV_8U); - - // The matrix should be the following - cv::Mat_ gt(200, 200); - gt.setTo(cv::Scalar(1, 1, 1)*1000.*255./65535.); - - ASSERT_EQ(res->image.type(), gt.type()); - EXPECT_EQ(cv::norm(res->image, gt, cv::NORM_INF), 0); -} - -TEST(CvBridgeTest, initialization) -{ - sensor_msgs::Image image; - cv_bridge::CvImagePtr cv_ptr; - - image.encoding = "bgr8"; - image.height = 200; - image.width = 200; - - try { - cv_ptr = cv_bridge::toCvCopy(image, "mono8"); - // Before the fix, it would never get here, as it would segfault - EXPECT_EQ(1, 0); - } catch (cv_bridge::Exception& e) { - EXPECT_EQ(1, 1); - } - - // Check some normal images with different ratios - for(int height = 100; height <= 300; ++height) { - image.encoding = sensor_msgs::image_encodings::RGB8; - image.step = image.width*3; - image.data.resize(image.height*image.step); - cv_ptr = cv_bridge::toCvCopy(image, "mono8"); - } -} - -TEST(CvBridgeTest, imageMessageStep) -{ - // Test 1: image step is padded - sensor_msgs::Image image; - cv_bridge::CvImagePtr cv_ptr; - - image.encoding = "mono8"; - image.height = 220; - image.width = 200; - image.is_bigendian = false; - image.step = 208; - - image.data.resize(image.height*image.step); - - ASSERT_NO_THROW(cv_ptr = cv_bridge::toCvCopy(image, "mono8")); - ASSERT_EQ(220, cv_ptr->image.rows); - ASSERT_EQ(200, cv_ptr->image.cols); - //OpenCV copyTo argument removes the stride - ASSERT_EQ(200, cv_ptr->image.step[0]); - - //Test 2: image step is invalid - image.step = 199; - - ASSERT_THROW(cv_ptr = cv_bridge::toCvCopy(image, "mono8"), cv_bridge::Exception); - - //Test 3: image step == image.width * element size * number of channels - image.step = 200; - image.data.resize(image.height*image.step); - - ASSERT_NO_THROW(cv_ptr = cv_bridge::toCvCopy(image, "mono8")); - ASSERT_EQ(220, cv_ptr->image.rows); - ASSERT_EQ(200, cv_ptr->image.cols); - ASSERT_EQ(200, cv_ptr->image.step[0]); -} - -TEST(CvBridgeTest, imageMessageConversion) -{ - sensor_msgs::Image imgmsg; - cv_bridge::CvImagePtr cv_ptr; - imgmsg.height = 220; - imgmsg.width = 200; - imgmsg.is_bigendian = false; - - // image with data type float32 and 1 channels - imgmsg.encoding = "32FC1"; - imgmsg.step = imgmsg.width * 32 / 8 * 1; - imgmsg.data.resize(imgmsg.height * imgmsg.step); - ASSERT_NO_THROW(cv_ptr = cv_bridge::toCvCopy(imgmsg, imgmsg.encoding)); - ASSERT_EQ(imgmsg.height, cv_ptr->image.rows); - ASSERT_EQ(imgmsg.width, cv_ptr->image.cols); - ASSERT_EQ(1, cv_ptr->image.channels()); - ASSERT_EQ(imgmsg.step, cv_ptr->image.step[0]); - - // image with data type float32 and 10 channels - imgmsg.encoding = "32FC10"; - imgmsg.step = imgmsg.width * 32 / 8 * 10; - imgmsg.data.resize(imgmsg.height * imgmsg.step); - ASSERT_NO_THROW(cv_ptr = cv_bridge::toCvCopy(imgmsg, imgmsg.encoding)); - ASSERT_EQ(imgmsg.height, cv_ptr->image.rows); - ASSERT_EQ(imgmsg.width, cv_ptr->image.cols); - ASSERT_EQ(10, cv_ptr->image.channels()); - ASSERT_EQ(imgmsg.step, cv_ptr->image.step[0]); -} - -int main(int argc, char** argv) -{ - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/src/vision_opencv/cv_bridge/test/utest2.cpp b/src/vision_opencv/cv_bridge/test/utest2.cpp deleted file mode 100644 index d41c976..0000000 --- a/src/vision_opencv/cv_bridge/test/utest2.cpp +++ /dev/null @@ -1,150 +0,0 @@ -/********************************************************************* -* Software License Agreement (BSD License) -* -* Copyright (c) 2009, Willow Garage, Inc. -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions -* are met: -* -* * Redistributions of source code must retain the above copyright -* notice, this list of conditions and the following disclaimer. -* * Redistributions in binary form must reproduce the above -* copyright notice, this list of conditions and the following -* disclaimer in the documentation and/or other materials provided -* with the distribution. -* * Neither the name of the Willow Garage nor the names of its -* contributors may be used to endorse or promote products derived -* from this software without specific prior written permission. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -* POSSIBILITY OF SUCH DAMAGE. -*********************************************************************/ - -#include -#include -#include - -#include "opencv2/core/core.hpp" - -#include "cv_bridge/cv_bridge.h" -#include -#include - -using namespace sensor_msgs::image_encodings; - -bool isUnsigned(const std::string & encoding) { - return encoding == RGB8 || encoding == RGBA8 || encoding == RGB16 || encoding == RGBA16 || encoding == BGR8 || encoding == BGRA8 || encoding == BGR16 || encoding == BGRA16 || encoding == MONO8 || encoding == MONO16 || - encoding == MONO8 || encoding == MONO16 || encoding == TYPE_8UC1 || encoding == TYPE_8UC2 || encoding == TYPE_8UC3 || encoding == TYPE_8UC4 || - encoding == TYPE_16UC1 || encoding == TYPE_16UC2 || encoding == TYPE_16UC3 || encoding == TYPE_16UC4; - //BAYER_RGGB8, BAYER_BGGR8, BAYER_GBRG8, BAYER_GRBG8, BAYER_RGGB16, BAYER_BGGR16, BAYER_GBRG16, BAYER_GRBG16, - //YUV422 -} -std::vector -getEncodings() { -// TODO for Groovy, the following types should be uncommented -std::string encodings[] = { RGB8, RGBA8, RGB16, RGBA16, BGR8, BGRA8, BGR16, BGRA16, MONO8, MONO16, - TYPE_8UC1, /*TYPE_8UC2,*/ TYPE_8UC3, TYPE_8UC4, - TYPE_8SC1, /*TYPE_8SC2,*/ TYPE_8SC3, TYPE_8SC4, - TYPE_16UC1, /*TYPE_16UC2,*/ TYPE_16UC3, TYPE_16UC4, - TYPE_16SC1, /*TYPE_16SC2,*/ TYPE_16SC3, TYPE_16SC4, - TYPE_32SC1, /*TYPE_32SC2,*/ TYPE_32SC3, TYPE_32SC4, - TYPE_32FC1, /*TYPE_32FC2,*/ TYPE_32FC3, TYPE_32FC4, - TYPE_64FC1, /*TYPE_64FC2,*/ TYPE_64FC3, TYPE_64FC4, - //BAYER_RGGB8, BAYER_BGGR8, BAYER_GBRG8, BAYER_GRBG8, BAYER_RGGB16, BAYER_BGGR16, BAYER_GBRG16, BAYER_GRBG16, - YUV422 - }; -return std::vector(encodings, encodings+47-8-7); -} - -TEST(OpencvTests, testCase_encode_decode) -{ - std::vector encodings = getEncodings(); - for(size_t i=0; iimage, cv_bridge::Exception); - continue; - } - // We do not support conversion to YUV422 for now, except from YUV422 - if ((dst_encoding == YUV422) && (src_encoding != YUV422)) { - EXPECT_THROW(cv_bridge::toCvShare(image_msg, dst_encoding), cv_bridge::Exception); - continue; - } - - cv_image = cv_bridge::toCvShare(image_msg, dst_encoding); - - // We do not support conversion to YUV422 for now, except from YUV422 - if ((src_encoding == YUV422) && (dst_encoding != YUV422)) { - EXPECT_THROW(cvtColor(cv_image, src_encoding)->image, cv_bridge::Exception); - continue; - } - } - // And convert back to a cv::Mat - image_back = cvtColor(cv_image, src_encoding)->image; - - // If the number of channels,s different some information got lost at some point, so no possible test - if (!is_num_channels_the_same) - continue; - if (bitDepth(src_encoding) >= 32) { - // In the case where the input has floats, we will lose precision but no more than 1 - EXPECT_LT(cv::norm(image_original, image_back, cv::NORM_INF), 1) << "problem converting from " << src_encoding << " to " << dst_encoding << " and back."; - } else if ((bitDepth(src_encoding) == 16) && (bitDepth(dst_encoding) == 8)) { - // In the case where the input has floats, we will lose precision but no more than 1 * max(127) - EXPECT_LT(cv::norm(image_original, image_back, cv::NORM_INF), 128) << "problem converting from " << src_encoding << " to " << dst_encoding << " and back."; - } else { - EXPECT_EQ(cv::norm(image_original, image_back, cv::NORM_INF), 0) << "problem converting from " << src_encoding << " to " << dst_encoding << " and back."; - } - } - } -} diff --git a/src/vision_opencv/image_geometry/CHANGELOG.rst b/src/vision_opencv/image_geometry/CHANGELOG.rst deleted file mode 100644 index 0955c54..0000000 --- a/src/vision_opencv/image_geometry/CHANGELOG.rst +++ /dev/null @@ -1,383 +0,0 @@ -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Changelog for package image_geometry -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -1.13.1 (2022-10-03) -------------------- -* Update CMakeLists.txt for Windows build environment (`#265 `_) -* Windows bringup -* Correct binary locations for shared libraries. -* Fix build break. -* Fix cv_bridge_boost.pyd. -* remove hard-coded STATIC (`#3 `_) -* remove WINDOWS_EXPORT_ALL_SYMBOLS property (`#4 `_) -* add DLL import/export macros (`#266 `_) -* update macro names (`#2 `_) -* add exports.h and dll import/export macros -* Contributors: James Xu - -1.13.0 (2018-04-30) -------------------- -* Use rosdep OpenCV and not ROS one. - We defintely don't need the whole OpenCV. - We need to clean the rosdep keys. -* Contributors: Vincent Rabaud - -1.12.8 (2018-04-17) -------------------- -* Merge pull request `#189 `_ from ros2/python3_support_in_test - python 3 compatibility in test -* python 3 compatibility in test -* fix doc job -* Contributors: Mikael Arguedas, Vincent Rabaud - -1.12.7 (2017-11-12) -------------------- -* get shared_ptr from boost or C++11 -* Contributors: Vincent Rabaud - -1.12.6 (2017-11-11) -------------------- -* missing STL includes -* Contributors: Mikael Arguedas, Vincent Rabaud - -1.12.5 (2017-11-05) -------------------- -* Fix compilation issues. - Fix suggested by `#173 `_ comment -* Make sure to initialize the distorted_image Mat. - Otherwise, valgrind throws errors about accessing uninitialized - memory. - Signed-off-by: Chris Lalancette -* Remove the last remnants of boost from image_geometry. - All of its functionality can be had from std:: in C++11, so - use that instead. This also requires us to add the -std=c++11 - flag. - Signed-off-by: Chris Lalancette -* Contributors: Chris Lalancette, Vincent Rabaud - -1.12.4 (2017-01-29) -------------------- -* Import using __future_\_ for python 3 compatibility. -* Contributors: Hans Gaiser - -1.12.3 (2016-12-04) -------------------- - -1.12.2 (2016-09-24) -------------------- -* Fix "stdlib.h: No such file or directory" errors in GCC-6 - Including '-isystem /usr/include' breaks building with GCC-6. - See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70129 -* Merge pull request `#142 `_ from YuOhara/remap_with_nan_border_value - remap with nan border if mat value is float or double -* remap with nan border if mat value is float or double -* Contributors: Hodorgasm, Vincent Rabaud, YuOhara - -1.12.1 (2016-07-11) -------------------- -* Add fullResolution getter to PinholeCameraModel -* add a missing dependency when building the doc -* fix sphinx doc path -* Contributors: Jacob Panikulam, Vincent Rabaud - -1.12.0 (2016-03-18) -------------------- -* depend on OpenCV3 only -* Contributors: Vincent Rabaud - -1.11.12 (2016-03-10) --------------------- -* issue `#117 `_ pull request `#118 `_ check all distortion coefficients to see if rectification ought to be done -* Contributors: Lucas Walter - -1.11.11 (2016-01-31) --------------------- -* clean up the doc files -* fix a few warnings in doc jobs -* Contributors: Vincent Rabaud - -1.11.10 (2016-01-16) --------------------- - -1.11.9 (2015-11-29) -------------------- -* add a condition if D=None -* fix compilation warnings -* Contributors: Vincent Rabaud, YuOhara - -1.11.8 (2015-07-15) -------------------- -* fixes `#62 `_, bug in Python rectifyPoint old opencv1 API -* Simplify some OpenCV3 distinction -* Contributors: Basheer Subei, Vincent Rabaud - -1.11.7 (2014-12-14) -------------------- -* Merge pull request `#53 `_ from carnegieroboticsllc/patch-1 - Update stereo_camera_model.cpp -* Updated inline math for reprojecting a single disparity -* Update stereo_camera_model.cpp - Correct slight error in the Q matrix derivation -* Updated Q matrix to account for cameras with different Fx and Fy values -* Contributors: Carnegie Robotics LLC, Matt Alvarado, Vincent Rabaud - -1.11.6 (2014-11-16) -------------------- -* Fixes in image_geometry for Python cv2 API -* Fixed typo: np -> numpy -* Added missing tfFrame method to Python PinholeCameraModel. -* Removed trailing whitespace. -* Contributors: Daniel Maturana - -1.11.5 (2014-09-21) -------------------- -* get code to work with OpenCV3 - actually fixes `#46 `_ properly -* Contributors: Vincent Rabaud - -1.11.4 (2014-07-27) -------------------- - -1.11.3 (2014-06-08) -------------------- -* pinhole_camera_model: fix implicit shared_ptr cast to bool for C++11 - In C++11 boost::shared_ptr does not provide the implicit bool conversion - operator anymore, so make the cast in pinhole_camera_model.h explicit. - That does not hurt in older C++ standards and makes compilation with C++11 - possible. -* Contributors: Max Schwarz - -1.11.2 (2014-04-28) -------------------- - -1.11.1 (2014-04-16) -------------------- - -1.11.0 (2014-02-15) -------------------- -* remove OpenCV 1 API -* fixes `#6 `_ -* fix OpenCV dependencies -* Contributors: Vincent Rabaud - -1.10.15 (2014-02-07) --------------------- -* add assignment operator for StereoCameraModel -* fixed Python rectifyImage implementation in PinholeCameraModel -* Added operator= for the PinholeCameraModel. - Added the operator= for the PinholeCameraModel. I am not sure if the - PinholeCameraModel needs to have a destructor, too. To follow the - 'rule of three' it should actually have one. -* Contributors: Tobias Bar, Valsamis Ntouskos, Vincent Rabaud - -1.10.14 (2013-11-23 16:17) --------------------------- -* Contributors: Vincent Rabaud - -1.10.13 (2013-11-23 09:19) --------------------------- -* Contributors: Vincent Rabaud - -1.10.12 (2013-11-22) --------------------- -* "1.10.12" -* Contributors: Vincent Rabaud - -1.10.11 (2013-10-23) --------------------- -* Contributors: Vincent Rabaud - -1.10.10 (2013-10-19) --------------------- -* Contributors: Vincent Rabaud - -1.10.9 (2013-10-07) -------------------- -* fixes `#23 `_ -* Contributors: Vincent Rabaud - -1.10.8 (2013-09-09) -------------------- -* check for CATKIN_ENABLE_TESTING -* update email address -* Contributors: Lukas Bulwahn, Vincent Rabaud - -1.10.7 (2013-07-17) -------------------- - -1.10.6 (2013-03-01) -------------------- - -1.10.5 (2013-02-11) -------------------- -* Add dependency on generated messages - Catkin requires explicit enumeration of dependencies on generated messages. - Add this declaration to properly flatten the dependency graph and force Catkin - to generate geometry_msgs before compiling image_geometry. -* Contributors: Adam Hachey - -1.10.4 (2013-02-02) -------------------- - -1.10.3 (2013-01-17) -------------------- - -1.10.2 (2013-01-13) -------------------- -* fix ticket 4253 -* Contributors: Vincent Rabaud - -1.10.1 (2013-01-10) -------------------- - -1.10.0 (2013-01-03) -------------------- - -1.9.15 (2013-01-02) -------------------- - -1.9.14 (2012-12-30) -------------------- -* add feature for https://code.ros.org/trac/ros-pkg/ticket/5592 -* CMake cleanups -* fix a failing test -* Contributors: Vincent Rabaud - -1.9.13 (2012-12-15) -------------------- -* use the catkin macros for the setup.py -* Contributors: Vincent Rabaud - -1.9.12 (2012-12-14) -------------------- -* buildtool_depend catkin fix -* Contributors: William Woodall - -1.9.11 (2012-12-10) -------------------- -* Fixing image_geometry package.xml -* fix https://code.ros.org/trac/ros-pkg/ticket/5570 -* Contributors: Vincent Rabaud, William Woodall - -1.9.10 (2012-10-04) -------------------- - -1.9.9 (2012-10-01) ------------------- -* fix dependencies -* Contributors: Vincent Rabaud - -1.9.8 (2012-09-30) ------------------- -* fix some dependencies -* fix missing Python at install and fix some dependencies -* Contributors: Vincent Rabaud - -1.9.7 (2012-09-28 21:07) ------------------------- -* add missing stuff -* make sure we find catkin -* Contributors: Vincent Rabaud - -1.9.6 (2012-09-28 15:17) ------------------------- -* make all the tests pass -* comply to the new Catkin API -* Contributors: Vincent Rabaud - -1.9.5 (2012-09-15) ------------------- -* remove dependencies to the opencv2 ROS package -* Contributors: Vincent Rabaud - -1.9.4 (2012-09-13) ------------------- -* make sure the include folders are copied to the right place -* Contributors: Vincent Rabaud - -1.9.3 (2012-09-12) ------------------- - -1.9.2 (2012-09-07) ------------------- -* be more compliant to the latest catkin -* added catkin_project() to cv_bridge, image_geometry, and opencv_tests -* Contributors: Jonathan Binney, Vincent Rabaud - -1.9.1 (2012-08-28 22:06) ------------------------- -* remove things that were marked as ROS_DEPRECATED -* Contributors: Vincent Rabaud - -1.9.0 (2012-08-28 14:29) ------------------------- -* catkinized opencv_tests by Jon Binney -* fix ticket 5449 -* use OpenCV's projectPoints -* remove the version check, let's trust OpenCV :) -* revert the removal of opencv2 -* vision_opencv: Export OpenCV flags in manifests for image_geometry, cv_bridge. -* finally get rid of opencv2 as it is a system dependency now -* bump REQUIRED version of OpenCV to 2.3.2, which is what's in ros-fuerte-opencv -* switch rosdep name to opencv2, to refer to ros-fuerte-opencv2 -* Adding a few missing headers so that client code may compile against pinhole camera model. -* Adding opencv2 to all manifests, so that client packages may - not break when using them. -* baking in opencv debs and attempting a pre-release -* image_geometry: (Python) Adjust K and P for ROI/binning. Also expose full resolution K and P. Add raw_roi property. -* image_geometry: Add Tx, Ty getters (Python). -* image_geometry: Added tf_frame and stamp properties. Only generate undistort maps when rectifyImage is called. -* image_geometry: Fix for when D is empty (Python). -* image_geometry: Take all D coefficients, not just the first 4 (Python). -* image_geometry: Fix rectification in the presence of binning (`#4848 `_). -* image_geometry: Fixed wg-ros-pkg `#5019 `_, error updating StereoCameraModel. Removed manifest dependency on cv_bridge. -* image_geometry: fromCameraInfo() returns bool, true if parameters have changed since last call. -* image_geometry: Accessors for full-res K, P. -* image_geometry: Implemented toFullResolution(), toReducedResolution(). -* image_geometry: Implemented reducedResolution(). -* image_geometry: Implemented rectifiedRoi() with caching. Fixed bug that would cause rectification maps to be regenerated every time. -* image_geometry: Implemented rectifyRoi(). -* image_geometry: Overloads of projection functions that return the output directly instead of through a reference parameter. Implemented unrectifyRoi(). Added fullResolution(), rawRoi(). -* image_geometry: Library-specific exception class. -* image_geometry: PIMPL pattern for cached data, so I can change in patch releases if necessary. Changed projectPixelTo3dRay() to normalize to z=1. -* image_geometry (rep0104): Added binning. Partially fixed ROI (not finding rectified ROI yet). Now interpreting distortion_model. Lots of code cleanup. -* image_geometry (rep0104): Got tests passing again, were issues with D becoming variable-length. -* image_geometry: Fixed swapped width/height in computing ROI undistort maps. Partially fixes `#4206 `_. -* image_geometry: getDelta functions, getZ and getDisparity in C++ and Python. Docs and tests for them. Changed Python fx() and friends to pull values out of P instead of K. -* image_geometry: Added C++ getDeltaU and getDeltaV. -* `#4201 `_, implement/doc/test for getDeltaU getDeltaX getDeltaV getDeltaY -* Added Ubuntu platform tags to manifest -* `#4083 `_, projectPixelTo3dRay implemented -* image_geometry: Added PinholeCameraModel::stamp() returning the time stamp. -* image_geometry: Fixed bugs related to ignoring Tx & Ty in projectPixelTo3dRay and unrectifyPoint. Added Tx() and Ty() accessors. -* image_geometry: Fixed `#4063 `_, PinholeCameraModel ignores Tx term in P matrix. -* image_geometry: Implemented projectDisparityTo3d, `#4019 `_. -* image_geometry: Implemented unrectifyPoint, with unit tests. -* image_geometry: Fixed bug in rectifyPoint due to cv::undistortPoints not accepting double pt data, `#4053 `_. -* image_geometry: Tweaked manifest. -* image_geometry: Better manifest description. -* Removed tfFrame sample -* image_geometry: Doxygen main page, manifest updates. -* image_geometry: Doxygen for StereoCameraModel. -* image_geometry: Made Q calculation match old stereoproc one. -* image_geometry: Tweaked projectDisparityImageTo3D API for handling missing values. -* image_geometry: Added method to project disparity image to 3d. Added ConstPtr version of fromCameraInfo in StereoCameraModel. -* image_geometry: Export linker flags. Fixed bug that could cause rectification maps to not be initialized before use. -* Fixed path-handling on gtest for CMake 2.6 -* image_geometry: Added missing source file. -* image_geometry: Added some C++ docs. -* image_geometry: Minor cleanup of StereoCameraModel, added it to build. Put in copy constructors. -* image_geometry: Switched pinhole_camera_model to use new C++ OpenCV types and functions. -* Remove use of deprecated rosbuild macros -* image_geometry (C++): Unit test for projecting points uv <-> xyz. -* image_geometry (C++): Implemented more projection functions, added beginnings of the unit tests. -* trigger rebuild -* Enable rosdoc.yaml -* docs -* image_geometry: Started C++ API. PinholeCameraModel is in theory (untested) able to track state efficiently and rectify images. -* First stereo test -* Checkpoint -* Skeleton of test -* First cut -* Contributors: Vincent Rabaud, ethanrublee, gerkey, jamesb, mihelich, vrabaud, wheeler diff --git a/src/vision_opencv/image_geometry/CMakeLists.txt b/src/vision_opencv/image_geometry/CMakeLists.txt deleted file mode 100644 index 6c22741..0000000 --- a/src/vision_opencv/image_geometry/CMakeLists.txt +++ /dev/null @@ -1,37 +0,0 @@ -cmake_minimum_required(VERSION 2.8) -project(image_geometry) - -find_package(catkin REQUIRED sensor_msgs) -find_package(OpenCV REQUIRED) - -catkin_package(CATKIN_DEPENDS sensor_msgs - DEPENDS OpenCV - INCLUDE_DIRS include - LIBRARIES ${PROJECT_NAME} -) - -catkin_python_setup() - -include_directories(include) -include_directories(${catkin_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS}) - -# add a library -add_library(${PROJECT_NAME} src/pinhole_camera_model.cpp src/stereo_camera_model.cpp) -target_link_libraries(${PROJECT_NAME} ${OpenCV_LIBRARIES}) -add_dependencies(${PROJECT_NAME} ${catkin_EXPORTED_TARGETS}) - -install(DIRECTORY include/${PROJECT_NAME}/ - DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}/ -) - -# install library -install(TARGETS ${PROJECT_NAME} - ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} - LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} - RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -) - -# add tests -if(CATKIN_ENABLE_TESTING) - add_subdirectory(test) -endif() diff --git a/src/vision_opencv/image_geometry/doc/conf.py b/src/vision_opencv/image_geometry/doc/conf.py deleted file mode 100644 index a30006f..0000000 --- a/src/vision_opencv/image_geometry/doc/conf.py +++ /dev/null @@ -1,201 +0,0 @@ -# -*- coding: utf-8 -*- -# -# image_geometry documentation build configuration file, created by -# sphinx-quickstart on Mon Jun 1 14:21:53 2009. -# -# This file is execfile()d with the current directory set to its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys, os - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.append(os.path.abspath('.')) - -# -- General configuration ----------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.pngmath'] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'image_geometry' -copyright = u'2009, Willow Garage, Inc.' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = '0.1' -# The full version, including alpha/beta/rc tags. -release = '0.1.0' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -#language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of documents that shouldn't be included in the build. -#unused_docs = [] - -# List of directories, relative to source directory, that shouldn't be searched -# for source files. -exclude_trees = ['_build'] - -# The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - - -# -- Options for HTML output --------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -html_theme = 'default' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -#html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -#html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_use_modindex = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = '' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'image_geometrydoc' - - -# -- Options for LaTeX output -------------------------------------------------- - -# The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass [howto/manual]). -latex_documents = [ - ('index', 'image_geometry.tex', u'stereo\\_utils Documentation', - u'James Bowman', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# Additional stuff for the LaTeX preamble. -#latex_preamble = '' - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_use_modindex = True - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - 'http://docs.python.org/': None, - 'http://docs.scipy.org/doc/numpy' : None, - 'http://docs.ros.org/api/tf/html/python/' : None, - } diff --git a/src/vision_opencv/image_geometry/doc/index.rst b/src/vision_opencv/image_geometry/doc/index.rst deleted file mode 100644 index 08d0c10..0000000 --- a/src/vision_opencv/image_geometry/doc/index.rst +++ /dev/null @@ -1,21 +0,0 @@ -image_geometry -============== - -image_geometry simplifies interpreting images geometrically using the -parameters from sensor_msgs/CameraInfo. - -.. module:: image_geometry - -.. autoclass:: image_geometry.PinholeCameraModel - :members: fromCameraInfo, rectifyImage, rectifyPoint, tfFrame, project3dToPixel, projectPixelTo3dRay, distortionCoeffs, intrinsicMatrix, projectionMatrix, rotationMatrix, cx, cy, fx, fy - -.. autoclass:: image_geometry.StereoCameraModel - :members: - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`search` - diff --git a/src/vision_opencv/image_geometry/doc/mainpage.dox b/src/vision_opencv/image_geometry/doc/mainpage.dox deleted file mode 100644 index eadc128..0000000 --- a/src/vision_opencv/image_geometry/doc/mainpage.dox +++ /dev/null @@ -1,29 +0,0 @@ -/** -\mainpage -\htmlinclude manifest.html - -\b image_geometry contains camera model classes that simplify interpreting -images geometrically using the calibration parameters from -sensor_msgs/CameraInfo messages. They may be efficiently updated in your -image callback: - -\code -image_geometry::PinholeCameraModel model_; - -void imageCb(const sensor_msgs::ImageConstPtr& raw_image, - const sensor_msgs::CameraInfoConstPtr& cam_info) -{ - // Update the camera model (usually a no-op) - model_.fromCameraInfo(cam_info); - - // Do processing... -} -\endcode - -\section codeapi Code API - -\b image_geometry contains two classes: - - image_geometry::PinholeCameraModel - models a pinhole camera with distortion. - - image_geometry::StereoCameraModel - models a stereo pair of pinhole cameras. - -*/ diff --git a/src/vision_opencv/image_geometry/include/image_geometry/exports.h b/src/vision_opencv/image_geometry/include/image_geometry/exports.h deleted file mode 100644 index 34bafad..0000000 --- a/src/vision_opencv/image_geometry/include/image_geometry/exports.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef IMAGE_GEOMETRY_EXPORTS_H -#define IMAGE_GEOMETRY_EXPORTS_H - -#include - -// Import/export for windows dll's and visibility for gcc shared libraries. - -#ifdef ROS_BUILD_SHARED_LIBS // ros is being built around shared libraries - #ifdef image_geometry_EXPORTS // we are building a shared lib/dll - #define IMAGE_GEOMETRY_DECL ROS_HELPER_EXPORT - #else // we are using shared lib/dll - #define IMAGE_GEOMETRY_DECL ROS_HELPER_IMPORT - #endif -#else // ros is being built around static libraries - #define IMAGE_GEOMETRY_DECL -#endif - -#endif // IMAGE_GEOMETRY_EXPORTS_H diff --git a/src/vision_opencv/image_geometry/include/image_geometry/pinhole_camera_model.h b/src/vision_opencv/image_geometry/include/image_geometry/pinhole_camera_model.h deleted file mode 100644 index 5a0677c..0000000 --- a/src/vision_opencv/image_geometry/include/image_geometry/pinhole_camera_model.h +++ /dev/null @@ -1,344 +0,0 @@ -#ifndef IMAGE_GEOMETRY_PINHOLE_CAMERA_MODEL_H -#define IMAGE_GEOMETRY_PINHOLE_CAMERA_MODEL_H - -#include -#include -#include -#include -#include -#include -#include "exports.h" - -namespace image_geometry { - -class Exception : public std::runtime_error -{ -public: - Exception(const std::string& description) : std::runtime_error(description) {} -}; - -/** - * \brief Simplifies interpreting images geometrically using the parameters from - * sensor_msgs/CameraInfo. - */ -class IMAGE_GEOMETRY_DECL PinholeCameraModel -{ -public: - - PinholeCameraModel(); - - PinholeCameraModel(const PinholeCameraModel& other); - - PinholeCameraModel& operator=(const PinholeCameraModel& other); - - /** - * \brief Set the camera parameters from the sensor_msgs/CameraInfo message. - */ - bool fromCameraInfo(const sensor_msgs::CameraInfo& msg); - - /** - * \brief Set the camera parameters from the sensor_msgs/CameraInfo message. - */ - bool fromCameraInfo(const sensor_msgs::CameraInfoConstPtr& msg); - - /** - * \brief Get the name of the camera coordinate frame in tf. - */ - std::string tfFrame() const; - - /** - * \brief Get the time stamp associated with this camera model. - */ - ros::Time stamp() const; - - /** - * \brief The resolution at which the camera was calibrated. - * - * The maximum resolution at which the camera can be used with the current - * calibration; normally this is the same as the imager resolution. - */ - cv::Size fullResolution() const; - - /** - * \brief The resolution of the current rectified image. - * - * The size of the rectified image associated with the latest CameraInfo, as - * reduced by binning/ROI and affected by distortion. If binning and ROI are - * not in use, this is the same as fullResolution(). - */ - cv::Size reducedResolution() const; - - cv::Point2d toFullResolution(const cv::Point2d& uv_reduced) const; - - cv::Rect toFullResolution(const cv::Rect& roi_reduced) const; - - cv::Point2d toReducedResolution(const cv::Point2d& uv_full) const; - - cv::Rect toReducedResolution(const cv::Rect& roi_full) const; - - /** - * \brief The current raw ROI, as used for capture by the camera driver. - */ - cv::Rect rawRoi() const; - - /** - * \brief The current rectified ROI, which best fits the raw ROI. - */ - cv::Rect rectifiedRoi() const; - - /** - * \brief Project a 3d point to rectified pixel coordinates. - * - * This is the inverse of projectPixelTo3dRay(). - * - * \param xyz 3d point in the camera coordinate frame - * \return (u,v) in rectified pixel coordinates - */ - cv::Point2d project3dToPixel(const cv::Point3d& xyz) const; - - /** - * \brief Project a rectified pixel to a 3d ray. - * - * Returns the unit vector in the camera coordinate frame in the direction of rectified - * pixel (u,v) in the image plane. This is the inverse of project3dToPixel(). - * - * In 1.4.x, the vector has z = 1.0. Previously, this function returned a unit vector. - * - * \param uv_rect Rectified pixel coordinates - * \return 3d ray passing through (u,v) - */ - cv::Point3d projectPixelTo3dRay(const cv::Point2d& uv_rect) const; - - /** - * \brief Rectify a raw camera image. - */ - void rectifyImage(const cv::Mat& raw, cv::Mat& rectified, - int interpolation = cv::INTER_LINEAR) const; - - /** - * \brief Apply camera distortion to a rectified image. - */ - void unrectifyImage(const cv::Mat& rectified, cv::Mat& raw, - int interpolation = cv::INTER_LINEAR) const; - - /** - * \brief Compute the rectified image coordinates of a pixel in the raw image. - */ - cv::Point2d rectifyPoint(const cv::Point2d& uv_raw) const; - - /** - * \brief Compute the raw image coordinates of a pixel in the rectified image. - */ - cv::Point2d unrectifyPoint(const cv::Point2d& uv_rect) const; - - /** - * \brief Compute the rectified ROI best fitting a raw ROI. - */ - cv::Rect rectifyRoi(const cv::Rect& roi_raw) const; - - /** - * \brief Compute the raw ROI best fitting a rectified ROI. - */ - cv::Rect unrectifyRoi(const cv::Rect& roi_rect) const; - - /** - * \brief Returns the camera info message held internally - */ - const sensor_msgs::CameraInfo& cameraInfo() const; - - /** - * \brief Returns the original camera matrix. - */ - const cv::Matx33d& intrinsicMatrix() const; - - /** - * \brief Returns the distortion coefficients. - */ - const cv::Mat_& distortionCoeffs() const; - - /** - * \brief Returns the rotation matrix. - */ - const cv::Matx33d& rotationMatrix() const; - - /** - * \brief Returns the projection matrix. - */ - const cv::Matx34d& projectionMatrix() const; - - /** - * \brief Returns the original camera matrix for full resolution. - */ - const cv::Matx33d& fullIntrinsicMatrix() const; - - /** - * \brief Returns the projection matrix for full resolution. - */ - const cv::Matx34d& fullProjectionMatrix() const; - - /** - * \brief Returns the focal length (pixels) in x direction of the rectified image. - */ - double fx() const; - - /** - * \brief Returns the focal length (pixels) in y direction of the rectified image. - */ - double fy() const; - - /** - * \brief Returns the x coordinate of the optical center. - */ - double cx() const; - - /** - * \brief Returns the y coordinate of the optical center. - */ - double cy() const; - - /** - * \brief Returns the x-translation term of the projection matrix. - */ - double Tx() const; - - /** - * \brief Returns the y-translation term of the projection matrix. - */ - double Ty() const; - - /** - * \brief Returns the number of columns in each bin. - */ - uint32_t binningX() const; - - /** - * \brief Returns the number of rows in each bin. - */ - uint32_t binningY() const; - - /** - * \brief Compute delta u, given Z and delta X in Cartesian space. - * - * For given Z, this is the inverse of getDeltaX(). - * - * \param deltaX Delta X, in Cartesian space - * \param Z Z (depth), in Cartesian space - */ - double getDeltaU(double deltaX, double Z) const; - - /** - * \brief Compute delta v, given Z and delta Y in Cartesian space. - * - * For given Z, this is the inverse of getDeltaY(). - * - * \param deltaY Delta Y, in Cartesian space - * \param Z Z (depth), in Cartesian space - */ - double getDeltaV(double deltaY, double Z) const; - - /** - * \brief Compute delta X, given Z in Cartesian space and delta u in pixels. - * - * For given Z, this is the inverse of getDeltaU(). - * - * \param deltaU Delta u, in pixels - * \param Z Z (depth), in Cartesian space - */ - double getDeltaX(double deltaU, double Z) const; - - /** - * \brief Compute delta Y, given Z in Cartesian space and delta v in pixels. - * - * For given Z, this is the inverse of getDeltaV(). - * - * \param deltaV Delta v, in pixels - * \param Z Z (depth), in Cartesian space - */ - double getDeltaY(double deltaV, double Z) const; - - /** - * \brief Returns true if the camera has been initialized - */ - bool initialized() const { return (bool)cache_; } - -protected: - sensor_msgs::CameraInfo cam_info_; - cv::Mat_ D_; // Unaffected by binning, ROI - cv::Matx33d R_; // Unaffected by binning, ROI - cv::Matx33d K_; // Describe current image (includes binning, ROI) - cv::Matx34d P_; // Describe current image (includes binning, ROI) - cv::Matx33d K_full_; // Describe full-res image, needed for full maps - cv::Matx34d P_full_; // Describe full-res image, needed for full maps - - // Use PIMPL here so we can change internals in patch updates if needed - struct Cache; -#ifdef BOOST_SHARED_PTR_HPP_INCLUDED - boost::shared_ptr cache_; // Holds cached data for internal use -#else - std::shared_ptr cache_; // Holds cached data for internal use -#endif - - void initRectificationMaps() const; - - friend class StereoCameraModel; -}; - - -/* Trivial inline functions */ -inline std::string PinholeCameraModel::tfFrame() const -{ - assert( initialized() ); - return cam_info_.header.frame_id; -} - -inline ros::Time PinholeCameraModel::stamp() const -{ - assert( initialized() ); - return cam_info_.header.stamp; -} - -inline const sensor_msgs::CameraInfo& PinholeCameraModel::cameraInfo() const { return cam_info_; } -inline const cv::Matx33d& PinholeCameraModel::intrinsicMatrix() const { return K_; } -inline const cv::Mat_& PinholeCameraModel::distortionCoeffs() const { return D_; } -inline const cv::Matx33d& PinholeCameraModel::rotationMatrix() const { return R_; } -inline const cv::Matx34d& PinholeCameraModel::projectionMatrix() const { return P_; } -inline const cv::Matx33d& PinholeCameraModel::fullIntrinsicMatrix() const { return K_full_; } -inline const cv::Matx34d& PinholeCameraModel::fullProjectionMatrix() const { return P_full_; } - -inline double PinholeCameraModel::fx() const { return P_(0,0); } -inline double PinholeCameraModel::fy() const { return P_(1,1); } -inline double PinholeCameraModel::cx() const { return P_(0,2); } -inline double PinholeCameraModel::cy() const { return P_(1,2); } -inline double PinholeCameraModel::Tx() const { return P_(0,3); } -inline double PinholeCameraModel::Ty() const { return P_(1,3); } - -inline uint32_t PinholeCameraModel::binningX() const { return cam_info_.binning_x; } -inline uint32_t PinholeCameraModel::binningY() const { return cam_info_.binning_y; } - -inline double PinholeCameraModel::getDeltaU(double deltaX, double Z) const -{ - assert( initialized() ); - return fx() * deltaX / Z; -} - -inline double PinholeCameraModel::getDeltaV(double deltaY, double Z) const -{ - assert( initialized() ); - return fy() * deltaY / Z; -} - -inline double PinholeCameraModel::getDeltaX(double deltaU, double Z) const -{ - assert( initialized() ); - return Z * deltaU / fx(); -} - -inline double PinholeCameraModel::getDeltaY(double deltaV, double Z) const -{ - assert( initialized() ); - return Z * deltaV / fy(); -} - -} //namespace image_geometry - -#endif diff --git a/src/vision_opencv/image_geometry/include/image_geometry/stereo_camera_model.h b/src/vision_opencv/image_geometry/include/image_geometry/stereo_camera_model.h deleted file mode 100644 index f952714..0000000 --- a/src/vision_opencv/image_geometry/include/image_geometry/stereo_camera_model.h +++ /dev/null @@ -1,131 +0,0 @@ -#ifndef IMAGE_GEOMETRY_STEREO_CAMERA_MODEL_H -#define IMAGE_GEOMETRY_STEREO_CAMERA_MODEL_H - -#include "image_geometry/pinhole_camera_model.h" -#include "exports.h" - -namespace image_geometry { - -/** - * \brief Simplifies interpreting stereo image pairs geometrically using the - * parameters from the left and right sensor_msgs/CameraInfo. - */ -class IMAGE_GEOMETRY_DECL StereoCameraModel -{ -public: - StereoCameraModel(); - - StereoCameraModel(const StereoCameraModel& other); - - StereoCameraModel& operator=(const StereoCameraModel& other); - - /** - * \brief Set the camera parameters from the sensor_msgs/CameraInfo messages. - */ - bool fromCameraInfo(const sensor_msgs::CameraInfo& left, - const sensor_msgs::CameraInfo& right); - - /** - * \brief Set the camera parameters from the sensor_msgs/CameraInfo messages. - */ - bool fromCameraInfo(const sensor_msgs::CameraInfoConstPtr& left, - const sensor_msgs::CameraInfoConstPtr& right); - - /** - * \brief Get the left monocular camera model. - */ - const PinholeCameraModel& left() const; - - /** - * \brief Get the right monocular camera model. - */ - const PinholeCameraModel& right() const; - - /** - * \brief Get the name of the camera coordinate frame in tf. - * - * For stereo cameras, both the left and right CameraInfo should be in the left - * optical frame. - */ - std::string tfFrame() const; - - /** - * \brief Project a rectified pixel with disparity to a 3d point. - */ - void projectDisparityTo3d(const cv::Point2d& left_uv_rect, float disparity, cv::Point3d& xyz) const; - - /** - * \brief Project a disparity image to a 3d point cloud. - * - * If handleMissingValues = true, all points with minimal disparity (outliers) have - * Z set to MISSING_Z (currently 10000.0). - */ - void projectDisparityImageTo3d(const cv::Mat& disparity, cv::Mat& point_cloud, - bool handleMissingValues = false) const; - static const double MISSING_Z; - - /** - * \brief Returns the disparity reprojection matrix. - */ - const cv::Matx44d& reprojectionMatrix() const; - - /** - * \brief Returns the horizontal baseline in world coordinates. - */ - double baseline() const; - - /** - * \brief Returns the depth at which a point is observed with a given disparity. - * - * This is the inverse of getDisparity(). - */ - double getZ(double disparity) const; - - /** - * \brief Returns the disparity observed for a point at depth Z. - * - * This is the inverse of getZ(). - */ - double getDisparity(double Z) const; - - /** - * \brief Returns true if the camera has been initialized - */ - bool initialized() const { return left_.initialized() && right_.initialized(); } -protected: - PinholeCameraModel left_, right_; - cv::Matx44d Q_; - - void updateQ(); -}; - - -/* Trivial inline functions */ -inline const PinholeCameraModel& StereoCameraModel::left() const { return left_; } -inline const PinholeCameraModel& StereoCameraModel::right() const { return right_; } - -inline std::string StereoCameraModel::tfFrame() const { return left_.tfFrame(); } - -inline const cv::Matx44d& StereoCameraModel::reprojectionMatrix() const { return Q_; } - -inline double StereoCameraModel::baseline() const -{ - /// @todo Currently assuming horizontal baseline - return -right_.Tx() / right_.fx(); -} - -inline double StereoCameraModel::getZ(double disparity) const -{ - assert( initialized() ); - return -right_.Tx() / (disparity - (left().cx() - right().cx())); -} - -inline double StereoCameraModel::getDisparity(double Z) const -{ - assert( initialized() ); - return -right_.Tx() / Z + (left().cx() - right().cx()); ; -} - -} //namespace image_geometry - -#endif diff --git a/src/vision_opencv/image_geometry/package.xml b/src/vision_opencv/image_geometry/package.xml deleted file mode 100644 index d557b68..0000000 --- a/src/vision_opencv/image_geometry/package.xml +++ /dev/null @@ -1,30 +0,0 @@ - - image_geometry - 1.13.1 - - `image_geometry` contains C++ and Python libraries for interpreting images - geometrically. It interfaces the calibration parameters in sensor_msgs/CameraInfo - messages with OpenCV functions such as image rectification, much as cv_bridge - interfaces ROS sensor_msgs/Image with OpenCV data types. - - Patrick Mihelich - Vincent Rabaud - BSD - http://www.ros.org/wiki/image_geometry - - - - - - catkin - - libopencv-dev - sensor_msgs - - libopencv-dev - libopencv-dev - sensor_msgs - - dvipng - texlive-latex-extra - diff --git a/src/vision_opencv/image_geometry/rosdoc.yaml b/src/vision_opencv/image_geometry/rosdoc.yaml deleted file mode 100644 index 615dc7e..0000000 --- a/src/vision_opencv/image_geometry/rosdoc.yaml +++ /dev/null @@ -1,8 +0,0 @@ - - builder: sphinx - name: Python API - output_dir: python - sphinx_root_dir: doc - - builder: doxygen - name: C++ API - output_dir: c++ - file_patterns: '*.c *.cpp *.h *.cc *.hh *.dox' diff --git a/src/vision_opencv/image_geometry/setup.py b/src/vision_opencv/image_geometry/setup.py deleted file mode 100644 index 407ce1e..0000000 --- a/src/vision_opencv/image_geometry/setup.py +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env python -from distutils.core import setup -from catkin_pkg.python_setup import generate_distutils_setup - -d = generate_distutils_setup() - -d['packages'] = ['image_geometry'] -d['package_dir'] = {'' : 'src'} - -setup(**d) diff --git a/src/vision_opencv/image_geometry/src/image_geometry/__init__.py b/src/vision_opencv/image_geometry/src/image_geometry/__init__.py deleted file mode 100644 index f1cbda0..0000000 --- a/src/vision_opencv/image_geometry/src/image_geometry/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from __future__ import absolute_import -from .cameramodels import PinholeCameraModel, StereoCameraModel diff --git a/src/vision_opencv/image_geometry/src/image_geometry/cameramodels.py b/src/vision_opencv/image_geometry/src/image_geometry/cameramodels.py deleted file mode 100644 index 139c95c..0000000 --- a/src/vision_opencv/image_geometry/src/image_geometry/cameramodels.py +++ /dev/null @@ -1,373 +0,0 @@ -import array - -import cv2 -import sensor_msgs.msg -import math -import copy -import numpy - -def mkmat(rows, cols, L): - mat = numpy.matrix(L, dtype='float64') - mat.resize((rows,cols)) - return mat - -class PinholeCameraModel: - - """ - A pinhole camera is an idealized monocular camera. - """ - - def __init__(self): - self.K = None - self.D = None - self.R = None - self.P = None - self.full_K = None - self.full_P = None - self.width = None - self.height = None - self.binning_x = None - self.binning_y = None - self.raw_roi = None - self.tf_frame = None - self.stamp = None - - def fromCameraInfo(self, msg): - """ - :param msg: camera parameters - :type msg: sensor_msgs.msg.CameraInfo - - Set the camera parameters from the :class:`sensor_msgs.msg.CameraInfo` message. - """ - self.K = mkmat(3, 3, msg.K) - if msg.D: - self.D = mkmat(len(msg.D), 1, msg.D) - else: - self.D = None - self.R = mkmat(3, 3, msg.R) - self.P = mkmat(3, 4, msg.P) - self.full_K = mkmat(3, 3, msg.K) - self.full_P = mkmat(3, 4, msg.P) - self.width = msg.width - self.height = msg.height - self.binning_x = max(1, msg.binning_x) - self.binning_y = max(1, msg.binning_y) - self.resolution = (msg.width, msg.height) - - self.raw_roi = copy.copy(msg.roi) - # ROI all zeros is considered the same as full resolution - if (self.raw_roi.x_offset == 0 and self.raw_roi.y_offset == 0 and - self.raw_roi.width == 0 and self.raw_roi.height == 0): - self.raw_roi.width = self.width - self.raw_roi.height = self.height - self.tf_frame = msg.header.frame_id - self.stamp = msg.header.stamp - - # Adjust K and P for binning and ROI - self.K[0,0] /= self.binning_x - self.K[1,1] /= self.binning_y - self.K[0,2] = (self.K[0,2] - self.raw_roi.x_offset) / self.binning_x - self.K[1,2] = (self.K[1,2] - self.raw_roi.y_offset) / self.binning_y - self.P[0,0] /= self.binning_x - self.P[1,1] /= self.binning_y - self.P[0,2] = (self.P[0,2] - self.raw_roi.x_offset) / self.binning_x - self.P[1,2] = (self.P[1,2] - self.raw_roi.y_offset) / self.binning_y - - def rectifyImage(self, raw, rectified): - """ - :param raw: input image - :type raw: :class:`CvMat` or :class:`IplImage` - :param rectified: rectified output image - :type rectified: :class:`CvMat` or :class:`IplImage` - - Applies the rectification specified by camera parameters :math:`K` and and :math:`D` to image `raw` and writes the resulting image `rectified`. - """ - - self.mapx = numpy.ndarray(shape=(self.height, self.width, 1), - dtype='float32') - self.mapy = numpy.ndarray(shape=(self.height, self.width, 1), - dtype='float32') - cv2.initUndistortRectifyMap(self.K, self.D, self.R, self.P, - (self.width, self.height), cv2.CV_32FC1, self.mapx, self.mapy) - cv2.remap(raw, self.mapx, self.mapy, cv2.INTER_CUBIC, rectified) - - def rectifyPoint(self, uv_raw): - """ - :param uv_raw: pixel coordinates - :type uv_raw: (u, v) - - Applies the rectification specified by camera parameters - :math:`K` and and :math:`D` to point (u, v) and returns the - pixel coordinates of the rectified point. - """ - - src = mkmat(1, 2, list(uv_raw)) - src.resize((1,1,2)) - dst = cv2.undistortPoints(src, self.K, self.D, R=self.R, P=self.P) - return dst[0,0] - - def project3dToPixel(self, point): - """ - :param point: 3D point - :type point: (x, y, z) - - Returns the rectified pixel coordinates (u, v) of the 3D point, - using the camera :math:`P` matrix. - This is the inverse of :meth:`projectPixelTo3dRay`. - """ - src = mkmat(4, 1, [point[0], point[1], point[2], 1.0]) - dst = self.P * src - x = dst[0,0] - y = dst[1,0] - w = dst[2,0] - if w != 0: - return (x / w, y / w) - else: - return (float('nan'), float('nan')) - - def projectPixelTo3dRay(self, uv): - """ - :param uv: rectified pixel coordinates - :type uv: (u, v) - - Returns the unit vector which passes from the camera center to through rectified pixel (u, v), - using the camera :math:`P` matrix. - This is the inverse of :meth:`project3dToPixel`. - """ - x = (uv[0] - self.cx()) / self.fx() - y = (uv[1] - self.cy()) / self.fy() - norm = math.sqrt(x*x + y*y + 1) - x /= norm - y /= norm - z = 1.0 / norm - return (x, y, z) - - def getDeltaU(self, deltaX, Z): - """ - :param deltaX: delta X, in cartesian space - :type deltaX: float - :param Z: Z, in cartesian space - :type Z: float - :rtype: float - - Compute delta u, given Z and delta X in Cartesian space. - For given Z, this is the inverse of :meth:`getDeltaX`. - """ - fx = self.P[0, 0] - if Z == 0: - return float('inf') - else: - return fx * deltaX / Z - - def getDeltaV(self, deltaY, Z): - """ - :param deltaY: delta Y, in cartesian space - :type deltaY: float - :param Z: Z, in cartesian space - :type Z: float - :rtype: float - - Compute delta v, given Z and delta Y in Cartesian space. - For given Z, this is the inverse of :meth:`getDeltaY`. - """ - fy = self.P[1, 1] - if Z == 0: - return float('inf') - else: - return fy * deltaY / Z - - def getDeltaX(self, deltaU, Z): - """ - :param deltaU: delta u in pixels - :type deltaU: float - :param Z: Z, in cartesian space - :type Z: float - :rtype: float - - Compute delta X, given Z in cartesian space and delta u in pixels. - For given Z, this is the inverse of :meth:`getDeltaU`. - """ - fx = self.P[0, 0] - return Z * deltaU / fx - - def getDeltaY(self, deltaV, Z): - """ - :param deltaV: delta v in pixels - :type deltaV: float - :param Z: Z, in cartesian space - :type Z: float - :rtype: float - - Compute delta Y, given Z in cartesian space and delta v in pixels. - For given Z, this is the inverse of :meth:`getDeltaV`. - """ - fy = self.P[1, 1] - return Z * deltaV / fy - - def fullResolution(self): - """Returns the full resolution of the camera""" - return self.resolution - - def intrinsicMatrix(self): - """ Returns :math:`K`, also called camera_matrix in cv docs """ - return self.K - def distortionCoeffs(self): - """ Returns :math:`D` """ - return self.D - def rotationMatrix(self): - """ Returns :math:`R` """ - return self.R - def projectionMatrix(self): - """ Returns :math:`P` """ - return self.P - def fullIntrinsicMatrix(self): - """ Return the original camera matrix for full resolution """ - return self.full_K - def fullProjectionMatrix(self): - """ Return the projection matrix for full resolution """ - return self.full_P - - def cx(self): - """ Returns x center """ - return self.P[0,2] - def cy(self): - """ Returns y center """ - return self.P[1,2] - def fx(self): - """ Returns x focal length """ - return self.P[0,0] - def fy(self): - """ Returns y focal length """ - return self.P[1,1] - - def Tx(self): - """ Return the x-translation term of the projection matrix """ - return self.P[0,3] - - def Ty(self): - """ Return the y-translation term of the projection matrix """ - return self.P[1,3] - - def tfFrame(self): - """ Returns the tf frame name - a string - of the camera. - This is the frame of the :class:`sensor_msgs.msg.CameraInfo` message. - """ - return self.tf_frame - -class StereoCameraModel: - """ - An idealized stereo camera. - """ - def __init__(self): - self.left = PinholeCameraModel() - self.right = PinholeCameraModel() - - def fromCameraInfo(self, left_msg, right_msg): - """ - :param left_msg: left camera parameters - :type left_msg: sensor_msgs.msg.CameraInfo - :param right_msg: right camera parameters - :type right_msg: sensor_msgs.msg.CameraInfo - - Set the camera parameters from the :class:`sensor_msgs.msg.CameraInfo` messages. - """ - self.left.fromCameraInfo(left_msg) - self.right.fromCameraInfo(right_msg) - - # [ Fx, 0, Cx, Fx*-Tx ] - # [ 0, Fy, Cy, 0 ] - # [ 0, 0, 1, 0 ] - - fx = self.right.P[0, 0] - fy = self.right.P[1, 1] - cx = self.right.P[0, 2] - cy = self.right.P[1, 2] - tx = -self.right.P[0, 3] / fx - - # Q is: - # [ 1, 0, 0, -Clx ] - # [ 0, 1, 0, -Cy ] - # [ 0, 0, 0, Fx ] - # [ 0, 0, 1 / Tx, (Crx-Clx)/Tx ] - - self.Q = numpy.zeros((4, 4), dtype='float64') - self.Q[0, 0] = 1.0 - self.Q[0, 3] = -cx - self.Q[1, 1] = 1.0 - self.Q[1, 3] = -cy - self.Q[2, 3] = fx - self.Q[3, 2] = 1 / tx - - def tfFrame(self): - """ - Returns the tf frame name - a string - of the 3d points. This is - the frame of the :class:`sensor_msgs.msg.CameraInfo` message. It - may be used as a source frame in :class:`tf.TransformListener`. - """ - - return self.left.tfFrame() - - def project3dToPixel(self, point): - """ - :param point: 3D point - :type point: (x, y, z) - - Returns the rectified pixel coordinates (u, v) of the 3D point, for each camera, as ((u_left, v_left), (u_right, v_right)) - using the cameras' :math:`P` matrices. - This is the inverse of :meth:`projectPixelTo3d`. - """ - l = self.left.project3dToPixel(point) - r = self.right.project3dToPixel(point) - return (l, r) - - def projectPixelTo3d(self, left_uv, disparity): - """ - :param left_uv: rectified pixel coordinates - :type left_uv: (u, v) - :param disparity: disparity, in pixels - :type disparity: float - - Returns the 3D point (x, y, z) for the given pixel position, - using the cameras' :math:`P` matrices. - This is the inverse of :meth:`project3dToPixel`. - - Note that a disparity of zero implies that the 3D point is at infinity. - """ - src = mkmat(4, 1, [left_uv[0], left_uv[1], disparity, 1.0]) - dst = self.Q * src - x = dst[0, 0] - y = dst[1, 0] - z = dst[2, 0] - w = dst[3, 0] - if w != 0: - return (x / w, y / w, z / w) - else: - return (0.0, 0.0, 0.0) - - def getZ(self, disparity): - """ - :param disparity: disparity, in pixels - :type disparity: float - - Returns the depth at which a point is observed with a given disparity. - This is the inverse of :meth:`getDisparity`. - - Note that a disparity of zero implies Z is infinite. - """ - if disparity == 0: - return float('inf') - Tx = -self.right.P[0, 3] - return Tx / disparity - - def getDisparity(self, Z): - """ - :param Z: Z (depth), in cartesian space - :type Z: float - - Returns the disparity observed for a point at depth Z. - This is the inverse of :meth:`getZ`. - """ - if Z == 0: - return float('inf') - Tx = -self.right.P[0, 3] - return Tx / Z diff --git a/src/vision_opencv/image_geometry/src/pinhole_camera_model.cpp b/src/vision_opencv/image_geometry/src/pinhole_camera_model.cpp deleted file mode 100644 index 69f6f2c..0000000 --- a/src/vision_opencv/image_geometry/src/pinhole_camera_model.cpp +++ /dev/null @@ -1,484 +0,0 @@ -#include "image_geometry/pinhole_camera_model.h" -#include -#ifdef BOOST_SHARED_PTR_HPP_INCLUDED -#include -#endif - -namespace image_geometry { - -enum DistortionState { NONE, CALIBRATED, UNKNOWN }; - -struct PinholeCameraModel::Cache -{ - DistortionState distortion_state; - - cv::Mat_ K_binned, P_binned; // Binning applied, but not cropping - - mutable bool full_maps_dirty; - mutable cv::Mat full_map1, full_map2; - - mutable bool reduced_maps_dirty; - mutable cv::Mat reduced_map1, reduced_map2; - - mutable bool rectified_roi_dirty; - mutable cv::Rect rectified_roi; - - Cache() - : full_maps_dirty(true), - reduced_maps_dirty(true), - rectified_roi_dirty(true) - { - } -}; - -PinholeCameraModel::PinholeCameraModel() -{ -} - -PinholeCameraModel& PinholeCameraModel::operator=(const PinholeCameraModel& other) -{ - if (other.initialized()) - this->fromCameraInfo(other.cameraInfo()); - return *this; -} - -PinholeCameraModel::PinholeCameraModel(const PinholeCameraModel& other) -{ - if (other.initialized()) - fromCameraInfo(other.cam_info_); -} - -// For uint32_t, string, bool... -template -bool update(const T& new_val, T& my_val) -{ - if (my_val == new_val) - return false; - my_val = new_val; - return true; -} - -// For std::vector -template -bool updateMat(const MatT& new_mat, MatT& my_mat, cv::Mat_& cv_mat, int rows, int cols) -{ - if ((my_mat == new_mat) && (my_mat.size() == cv_mat.rows*cv_mat.cols)) - return false; - my_mat = new_mat; - // D may be empty if camera is uncalibrated or distortion model is non-standard - cv_mat = (my_mat.size() == 0) ? cv::Mat_() : cv::Mat_(rows, cols, &my_mat[0]); - return true; -} - -template -bool updateMat(const MatT& new_mat, MatT& my_mat, MatU& cv_mat) -{ - if ((my_mat == new_mat) && (my_mat.size() == cv_mat.rows*cv_mat.cols)) - return false; - my_mat = new_mat; - // D may be empty if camera is uncalibrated or distortion model is non-standard - cv_mat = MatU(&my_mat[0]); - return true; -} - -bool PinholeCameraModel::fromCameraInfo(const sensor_msgs::CameraInfo& msg) -{ - // Create our repository of cached data (rectification maps, etc.) - if (!cache_) -#ifdef BOOST_SHARED_PTR_HPP_INCLUDED - cache_ = boost::make_shared(); -#else - cache_ = std::make_shared(); -#endif - - // Binning = 0 is considered the same as binning = 1 (no binning). - uint32_t binning_x = msg.binning_x ? msg.binning_x : 1; - uint32_t binning_y = msg.binning_y ? msg.binning_y : 1; - - // ROI all zeros is considered the same as full resolution. - sensor_msgs::RegionOfInterest roi = msg.roi; - if (roi.x_offset == 0 && roi.y_offset == 0 && roi.width == 0 && roi.height == 0) { - roi.width = msg.width; - roi.height = msg.height; - } - - // Update time stamp (and frame_id if that changes for some reason) - cam_info_.header = msg.header; - - // Update any parameters that have changed. The full rectification maps are - // invalidated by any change in the calibration parameters OR binning. - bool &full_dirty = cache_->full_maps_dirty; - full_dirty |= update(msg.height, cam_info_.height); - full_dirty |= update(msg.width, cam_info_.width); - full_dirty |= update(msg.distortion_model, cam_info_.distortion_model); - full_dirty |= updateMat(msg.D, cam_info_.D, D_, 1, msg.D.size()); - full_dirty |= updateMat(msg.K, cam_info_.K, K_full_); - full_dirty |= updateMat(msg.R, cam_info_.R, R_); - full_dirty |= updateMat(msg.P, cam_info_.P, P_full_); - full_dirty |= update(binning_x, cam_info_.binning_x); - full_dirty |= update(binning_y, cam_info_.binning_y); - - // The reduced rectification maps are invalidated by any of the above or a - // change in ROI. - bool &reduced_dirty = cache_->reduced_maps_dirty; - reduced_dirty = full_dirty; - reduced_dirty |= update(roi.x_offset, cam_info_.roi.x_offset); - reduced_dirty |= update(roi.y_offset, cam_info_.roi.y_offset); - reduced_dirty |= update(roi.height, cam_info_.roi.height); - reduced_dirty |= update(roi.width, cam_info_.roi.width); - reduced_dirty |= update(roi.do_rectify, cam_info_.roi.do_rectify); - // As is the rectified ROI - cache_->rectified_roi_dirty = reduced_dirty; - - // Figure out how to handle the distortion - if (cam_info_.distortion_model == sensor_msgs::distortion_models::PLUMB_BOB || - cam_info_.distortion_model == sensor_msgs::distortion_models::RATIONAL_POLYNOMIAL) { - // If any distortion coefficient is non-zero, then need to apply the distortion - cache_->distortion_state = NONE; - for (size_t i = 0; i < cam_info_.D.size(); ++i) - { - if (cam_info_.D[i] != 0) - { - cache_->distortion_state = CALIBRATED; - break; - } - } - } - else - cache_->distortion_state = UNKNOWN; - - // If necessary, create new K_ and P_ adjusted for binning and ROI - /// @todo Calculate and use rectified ROI - bool adjust_binning = (binning_x > 1) || (binning_y > 1); - bool adjust_roi = (roi.x_offset != 0) || (roi.y_offset != 0); - - if (!adjust_binning && !adjust_roi) { - K_ = K_full_; - P_ = P_full_; - } - else { - K_ = K_full_; - P_ = P_full_; - - // ROI is in full image coordinates, so change it first - if (adjust_roi) { - // Move principal point by the offset - /// @todo Adjust P by rectified ROI instead - K_(0,2) -= roi.x_offset; - K_(1,2) -= roi.y_offset; - P_(0,2) -= roi.x_offset; - P_(1,2) -= roi.y_offset; - } - - if (binning_x > 1) { - double scale_x = 1.0 / binning_x; - K_(0,0) *= scale_x; - K_(0,2) *= scale_x; - P_(0,0) *= scale_x; - P_(0,2) *= scale_x; - P_(0,3) *= scale_x; - } - if (binning_y > 1) { - double scale_y = 1.0 / binning_y; - K_(1,1) *= scale_y; - K_(1,2) *= scale_y; - P_(1,1) *= scale_y; - P_(1,2) *= scale_y; - P_(1,3) *= scale_y; - } - } - - return reduced_dirty; -} - -bool PinholeCameraModel::fromCameraInfo(const sensor_msgs::CameraInfoConstPtr& msg) -{ - return fromCameraInfo(*msg); -} - -cv::Size PinholeCameraModel::fullResolution() const -{ - assert( initialized() ); - return cv::Size(cam_info_.width, cam_info_.height); -} - -cv::Size PinholeCameraModel::reducedResolution() const -{ - assert( initialized() ); - - cv::Rect roi = rectifiedRoi(); - return cv::Size(roi.width / binningX(), roi.height / binningY()); -} - -cv::Point2d PinholeCameraModel::toFullResolution(const cv::Point2d& uv_reduced) const -{ - cv::Rect roi = rectifiedRoi(); - return cv::Point2d(uv_reduced.x * binningX() + roi.x, - uv_reduced.y * binningY() + roi.y); -} - -cv::Rect PinholeCameraModel::toFullResolution(const cv::Rect& roi_reduced) const -{ - cv::Rect roi = rectifiedRoi(); - return cv::Rect(roi_reduced.x * binningX() + roi.x, - roi_reduced.y * binningY() + roi.y, - roi_reduced.width * binningX(), - roi_reduced.height * binningY()); -} - -cv::Point2d PinholeCameraModel::toReducedResolution(const cv::Point2d& uv_full) const -{ - cv::Rect roi = rectifiedRoi(); - return cv::Point2d((uv_full.x - roi.x) / binningX(), - (uv_full.y - roi.y) / binningY()); -} - -cv::Rect PinholeCameraModel::toReducedResolution(const cv::Rect& roi_full) const -{ - cv::Rect roi = rectifiedRoi(); - return cv::Rect((roi_full.x - roi.x) / binningX(), - (roi_full.y - roi.y) / binningY(), - roi_full.width / binningX(), - roi_full.height / binningY()); -} - -cv::Rect PinholeCameraModel::rawRoi() const -{ - assert( initialized() ); - - return cv::Rect(cam_info_.roi.x_offset, cam_info_.roi.y_offset, - cam_info_.roi.width, cam_info_.roi.height); -} - -cv::Rect PinholeCameraModel::rectifiedRoi() const -{ - assert( initialized() ); - - if (cache_->rectified_roi_dirty) - { - if (!cam_info_.roi.do_rectify) - cache_->rectified_roi = rawRoi(); - else - cache_->rectified_roi = rectifyRoi(rawRoi()); - cache_->rectified_roi_dirty = false; - } - return cache_->rectified_roi; -} - -cv::Point2d PinholeCameraModel::project3dToPixel(const cv::Point3d& xyz) const -{ - assert( initialized() ); - assert(P_(2, 3) == 0.0); // Calibrated stereo cameras should be in the same plane - - // [U V W]^T = P * [X Y Z 1]^T - // u = U/W - // v = V/W - cv::Point2d uv_rect; - uv_rect.x = (fx()*xyz.x + Tx()) / xyz.z + cx(); - uv_rect.y = (fy()*xyz.y + Ty()) / xyz.z + cy(); - return uv_rect; -} - -cv::Point3d PinholeCameraModel::projectPixelTo3dRay(const cv::Point2d& uv_rect) const -{ - assert( initialized() ); - - cv::Point3d ray; - ray.x = (uv_rect.x - cx() - Tx()) / fx(); - ray.y = (uv_rect.y - cy() - Ty()) / fy(); - ray.z = 1.0; - return ray; -} - -void PinholeCameraModel::rectifyImage(const cv::Mat& raw, cv::Mat& rectified, int interpolation) const -{ - assert( initialized() ); - - switch (cache_->distortion_state) { - case NONE: - raw.copyTo(rectified); - break; - case CALIBRATED: - initRectificationMaps(); - if (raw.depth() == CV_32F || raw.depth() == CV_64F) - { - cv::remap(raw, rectified, cache_->reduced_map1, cache_->reduced_map2, interpolation, cv::BORDER_CONSTANT, std::numeric_limits::quiet_NaN()); - } - else { - cv::remap(raw, rectified, cache_->reduced_map1, cache_->reduced_map2, interpolation); - } - break; - default: - assert(cache_->distortion_state == UNKNOWN); - throw Exception("Cannot call rectifyImage when distortion is unknown."); - } -} - -void PinholeCameraModel::unrectifyImage(const cv::Mat& rectified, cv::Mat& raw, int interpolation) const -{ - assert( initialized() ); - - throw Exception("PinholeCameraModel::unrectifyImage is unimplemented."); - /// @todo Implement unrectifyImage() - // Similar to rectifyImage, but need to build separate set of inverse maps (raw->rectified)... - // - Build src_pt Mat with all the raw pixel coordinates (or do it one row at a time) - // - Do cv::undistortPoints(src_pt, dst_pt, K_, D_, R_, P_) - // - Use convertMaps() to convert dst_pt to fast fixed-point maps - // - cv::remap(rectified, raw, ...) - // Need interpolation argument. Same caching behavior? -} - -cv::Point2d PinholeCameraModel::rectifyPoint(const cv::Point2d& uv_raw) const -{ - assert( initialized() ); - - if (cache_->distortion_state == NONE) - return uv_raw; - if (cache_->distortion_state == UNKNOWN) - throw Exception("Cannot call rectifyPoint when distortion is unknown."); - assert(cache_->distortion_state == CALIBRATED); - - /// @todo cv::undistortPoints requires the point data to be float, should allow double - cv::Point2f raw32 = uv_raw, rect32; - const cv::Mat src_pt(1, 1, CV_32FC2, &raw32.x); - cv::Mat dst_pt(1, 1, CV_32FC2, &rect32.x); - cv::undistortPoints(src_pt, dst_pt, K_, D_, R_, P_); - return rect32; -} - -cv::Point2d PinholeCameraModel::unrectifyPoint(const cv::Point2d& uv_rect) const -{ - assert( initialized() ); - - if (cache_->distortion_state == NONE) - return uv_rect; - if (cache_->distortion_state == UNKNOWN) - throw Exception("Cannot call unrectifyPoint when distortion is unknown."); - assert(cache_->distortion_state == CALIBRATED); - - // Convert to a ray - cv::Point3d ray = projectPixelTo3dRay(uv_rect); - - // Project the ray on the image - cv::Mat r_vec, t_vec = cv::Mat_::zeros(3, 1); - cv::Rodrigues(R_.t(), r_vec); - std::vector image_point; - cv::projectPoints(std::vector(1, ray), r_vec, t_vec, K_, D_, image_point); - - return image_point[0]; -} - -cv::Rect PinholeCameraModel::rectifyRoi(const cv::Rect& roi_raw) const -{ - assert( initialized() ); - - /// @todo Actually implement "best fit" as described by REP 104. - - // For now, just unrectify the four corners and take the bounding box. - cv::Point2d rect_tl = rectifyPoint(cv::Point2d(roi_raw.x, roi_raw.y)); - cv::Point2d rect_tr = rectifyPoint(cv::Point2d(roi_raw.x + roi_raw.width, roi_raw.y)); - cv::Point2d rect_br = rectifyPoint(cv::Point2d(roi_raw.x + roi_raw.width, - roi_raw.y + roi_raw.height)); - cv::Point2d rect_bl = rectifyPoint(cv::Point2d(roi_raw.x, roi_raw.y + roi_raw.height)); - - cv::Point roi_tl(std::ceil (std::min(rect_tl.x, rect_bl.x)), - std::ceil (std::min(rect_tl.y, rect_tr.y))); - cv::Point roi_br(std::floor(std::max(rect_tr.x, rect_br.x)), - std::floor(std::max(rect_bl.y, rect_br.y))); - - return cv::Rect(roi_tl.x, roi_tl.y, roi_br.x - roi_tl.x, roi_br.y - roi_tl.y); -} - -cv::Rect PinholeCameraModel::unrectifyRoi(const cv::Rect& roi_rect) const -{ - assert( initialized() ); - - /// @todo Actually implement "best fit" as described by REP 104. - - // For now, just unrectify the four corners and take the bounding box. - cv::Point2d raw_tl = unrectifyPoint(cv::Point2d(roi_rect.x, roi_rect.y)); - cv::Point2d raw_tr = unrectifyPoint(cv::Point2d(roi_rect.x + roi_rect.width, roi_rect.y)); - cv::Point2d raw_br = unrectifyPoint(cv::Point2d(roi_rect.x + roi_rect.width, - roi_rect.y + roi_rect.height)); - cv::Point2d raw_bl = unrectifyPoint(cv::Point2d(roi_rect.x, roi_rect.y + roi_rect.height)); - - cv::Point roi_tl(std::floor(std::min(raw_tl.x, raw_bl.x)), - std::floor(std::min(raw_tl.y, raw_tr.y))); - cv::Point roi_br(std::ceil (std::max(raw_tr.x, raw_br.x)), - std::ceil (std::max(raw_bl.y, raw_br.y))); - - return cv::Rect(roi_tl.x, roi_tl.y, roi_br.x - roi_tl.x, roi_br.y - roi_tl.y); -} - -void PinholeCameraModel::initRectificationMaps() const -{ - /// @todo For large binning settings, can drop extra rows/cols at bottom/right boundary. - /// Make sure we're handling that 100% correctly. - - if (cache_->full_maps_dirty) { - // Create the full-size map at the binned resolution - /// @todo Should binned resolution, K, P be part of public API? - cv::Size binned_resolution = fullResolution(); - binned_resolution.width /= binningX(); - binned_resolution.height /= binningY(); - - cv::Matx33d K_binned; - cv::Matx34d P_binned; - if (binningX() == 1 && binningY() == 1) { - K_binned = K_full_; - P_binned = P_full_; - } - else { - K_binned = K_full_; - P_binned = P_full_; - if (binningX() > 1) { - double scale_x = 1.0 / binningX(); - K_binned(0,0) *= scale_x; - K_binned(0,2) *= scale_x; - P_binned(0,0) *= scale_x; - P_binned(0,2) *= scale_x; - P_binned(0,3) *= scale_x; - } - if (binningY() > 1) { - double scale_y = 1.0 / binningY(); - K_binned(1,1) *= scale_y; - K_binned(1,2) *= scale_y; - P_binned(1,1) *= scale_y; - P_binned(1,2) *= scale_y; - P_binned(1,3) *= scale_y; - } - } - - // Note: m1type=CV_16SC2 to use fast fixed-point maps (see cv::remap) - cv::initUndistortRectifyMap(K_binned, D_, R_, P_binned, binned_resolution, - CV_16SC2, cache_->full_map1, cache_->full_map2); - cache_->full_maps_dirty = false; - } - - if (cache_->reduced_maps_dirty) { - /// @todo Use rectified ROI - cv::Rect roi(cam_info_.roi.x_offset, cam_info_.roi.y_offset, - cam_info_.roi.width, cam_info_.roi.height); - if (roi.x != 0 || roi.y != 0 || - roi.height != (int)cam_info_.height || - roi.width != (int)cam_info_.width) { - - // map1 contains integer (x,y) offsets, which we adjust by the ROI offset - // map2 contains LUT index for subpixel interpolation, which we can leave as-is - roi.x /= binningX(); - roi.y /= binningY(); - roi.width /= binningX(); - roi.height /= binningY(); - cache_->reduced_map1 = cache_->full_map1(roi) - cv::Scalar(roi.x, roi.y); - cache_->reduced_map2 = cache_->full_map2(roi); - } - else { - // Otherwise we're rectifying the full image - cache_->reduced_map1 = cache_->full_map1; - cache_->reduced_map2 = cache_->full_map2; - } - cache_->reduced_maps_dirty = false; - } -} - -} //namespace image_geometry diff --git a/src/vision_opencv/image_geometry/src/stereo_camera_model.cpp b/src/vision_opencv/image_geometry/src/stereo_camera_model.cpp deleted file mode 100644 index 062edb7..0000000 --- a/src/vision_opencv/image_geometry/src/stereo_camera_model.cpp +++ /dev/null @@ -1,140 +0,0 @@ -#include "image_geometry/stereo_camera_model.h" - -namespace image_geometry { - -StereoCameraModel::StereoCameraModel() - : Q_(0.0) -{ - Q_(0,0) = Q_(1,1) = 1.0; -} - -StereoCameraModel::StereoCameraModel(const StereoCameraModel& other) - : left_(other.left_), right_(other.right_), - Q_(0.0) -{ - Q_(0,0) = Q_(1,1) = 1.0; - if (other.initialized()) - updateQ(); -} - -StereoCameraModel& StereoCameraModel::operator=(const StereoCameraModel& other) -{ - if (other.initialized()) - this->fromCameraInfo(other.left_.cameraInfo(), other.right_.cameraInfo()); - return *this; -} - -bool StereoCameraModel::fromCameraInfo(const sensor_msgs::CameraInfo& left, - const sensor_msgs::CameraInfo& right) -{ - bool changed_left = left_.fromCameraInfo(left); - bool changed_right = right_.fromCameraInfo(right); - bool changed = changed_left || changed_right; - - // Note: don't require identical time stamps to allow imperfectly synced stereo. - assert( left_.tfFrame() == right_.tfFrame() ); - assert( left_.fx() == right_.fx() ); - assert( left_.fy() == right_.fy() ); - assert( left_.cy() == right_.cy() ); - // cx may differ for verged cameras - - if (changed) - updateQ(); - - return changed; -} - -bool StereoCameraModel::fromCameraInfo(const sensor_msgs::CameraInfoConstPtr& left, - const sensor_msgs::CameraInfoConstPtr& right) -{ - return fromCameraInfo(*left, *right); -} - -void StereoCameraModel::updateQ() -{ - // Update variable fields of reprojection matrix - /* - From Springer Handbook of Robotics, p. 524: - - [ Fx 0 Cx 0 ] - P = [ 0 Fy Cy 0 ] - [ 0 0 1 0 ] - - [ Fx 0 Cx' FxTx ] - P' = [ 0 Fy Cy 0 ] - [ 0 0 1 0 ] - where primed parameters are from the left projection matrix, unprimed from the right. - - [u v 1]^T = P * [x y z 1]^T - [u-d v 1]^T = P' * [x y z 1]^T - - Combining the two equations above results in the following equation - - [u v u-d 1]^T = [ Fx 0 Cx 0 ] * [ x y z 1]^T - [ 0 Fy Cy 0 ] - [ Fx 0 Cx' FxTx ] - [ 0 0 1 0 ] - - Subtracting the 3rd from from the first and inverting the expression - results in the following equation. - - [x y z 1]^T = Q * [u v d 1]^T - - Where Q is defined as - - Q = [ FyTx 0 0 -FyCxTx ] - [ 0 FxTx 0 -FxCyTx ] - [ 0 0 0 FxFyTx ] - [ 0 0 -Fy Fy(Cx-Cx') ] - - Using the assumption Fx = Fy Q can be simplified to the following. But for - compatibility with stereo cameras with different focal lengths we will use - the full Q matrix. - - [ 1 0 0 -Cx ] - Q = [ 0 1 0 -Cy ] - [ 0 0 0 Fx ] - [ 0 0 -1/Tx (Cx-Cx')/Tx ] - - Disparity = x_left - x_right - - For compatibility with stereo cameras with different focal lengths we will use - the full Q matrix. - - */ - double Tx = -baseline(); // The baseline member negates our Tx. Undo this negation - Q_(0,0) = left_.fy() * Tx; - Q_(0,3) = -left_.fy() * left_.cx() * Tx; - Q_(1,1) = left_.fx() * Tx; - Q_(1,3) = -left_.fx() * left_.cy() * Tx; - Q_(2,3) = left_.fx() * left_.fy() * Tx; - Q_(3,2) = -left_.fy(); - Q_(3,3) = left_.fy() * (left_.cx() - right_.cx()); // zero when disparities are pre-adjusted -} - -void StereoCameraModel::projectDisparityTo3d(const cv::Point2d& left_uv_rect, float disparity, - cv::Point3d& xyz) const -{ - assert( initialized() ); - - // Do the math inline: - // [X Y Z W]^T = Q * [u v d 1]^T - // Point = (X/W, Y/W, Z/W) - // cv::perspectiveTransform could be used but with more overhead. - double u = left_uv_rect.x, v = left_uv_rect.y; - cv::Point3d XYZ( (Q_(0,0) * u) + Q_(0,3), (Q_(1,1) * v) + Q_(1,3), Q_(2,3)); - double W = Q_(3,2)*disparity + Q_(3,3); - xyz = XYZ * (1.0/W); -} - -const double StereoCameraModel::MISSING_Z = 10000.; - -void StereoCameraModel::projectDisparityImageTo3d(const cv::Mat& disparity, cv::Mat& point_cloud, - bool handleMissingValues) const -{ - assert( initialized() ); - - cv::reprojectImageTo3D(disparity, point_cloud, Q_, handleMissingValues); -} - -} //namespace image_geometry diff --git a/src/vision_opencv/image_geometry/test/CMakeLists.txt b/src/vision_opencv/image_geometry/test/CMakeLists.txt deleted file mode 100644 index 6dba1e5..0000000 --- a/src/vision_opencv/image_geometry/test/CMakeLists.txt +++ /dev/null @@ -1,4 +0,0 @@ -catkin_add_nosetests(directed.py) - -catkin_add_gtest(${PROJECT_NAME}-utest utest.cpp) -target_link_libraries(${PROJECT_NAME}-utest ${PROJECT_NAME} ${OpenCV_LIBS}) diff --git a/src/vision_opencv/image_geometry/test/directed.py b/src/vision_opencv/image_geometry/test/directed.py deleted file mode 100644 index 3c0c769..0000000 --- a/src/vision_opencv/image_geometry/test/directed.py +++ /dev/null @@ -1,77 +0,0 @@ -from __future__ import print_function - -import rostest -import rospy -import unittest -import sensor_msgs.msg - -from image_geometry import PinholeCameraModel, StereoCameraModel - -class TestDirected(unittest.TestCase): - - def setUp(self): - pass - - def test_monocular(self): - ci = sensor_msgs.msg.CameraInfo() - ci.width = 640 - ci.height = 480 - print(ci) - cam = PinholeCameraModel() - cam.fromCameraInfo(ci) - print(cam.rectifyPoint((0, 0))) - - print(cam.project3dToPixel((0,0,0))) - - def test_stereo(self): - lmsg = sensor_msgs.msg.CameraInfo() - rmsg = sensor_msgs.msg.CameraInfo() - for m in (lmsg, rmsg): - m.width = 640 - m.height = 480 - - # These parameters taken from a real camera calibration - lmsg.D = [-0.363528858080088, 0.16117037733986861, -8.1109585007538829e-05, -0.00044776712298447841, 0.0] - lmsg.K = [430.15433020105519, 0.0, 311.71339830549732, 0.0, 430.60920415473657, 221.06824942698509, 0.0, 0.0, 1.0] - lmsg.R = [0.99806560714807102, 0.0068562422224214027, 0.061790256276695904, -0.0067522959054715113, 0.99997541519165112, -0.0018909025066874664, -0.061801701660692349, 0.0014700186639396652, 0.99808736527268516] - lmsg.P = [295.53402059708782, 0.0, 285.55760765075684, 0.0, 0.0, 295.53402059708782, 223.29617881774902, 0.0, 0.0, 0.0, 1.0, 0.0] - - rmsg.D = [-0.3560641041112021, 0.15647260261553159, -0.00016442960757099968, -0.00093175810713916221] - rmsg.K = [428.38163131344191, 0.0, 327.95553847249192, 0.0, 428.85728580588329, 217.54828640915309, 0.0, 0.0, 1.0] - rmsg.R = [0.9982082576219119, 0.0067433328293516528, 0.059454199832973849, -0.0068433268864187356, 0.99997549128605434, 0.0014784127772287513, -0.059442773257581252, -0.0018826283666309878, 0.99822993965212292] - rmsg.P = [295.53402059708782, 0.0, 285.55760765075684, -26.507895206214123, 0.0, 295.53402059708782, 223.29617881774902, 0.0, 0.0, 0.0, 1.0, 0.0] - - cam = StereoCameraModel() - cam.fromCameraInfo(lmsg, rmsg) - - for x in (16, 320, m.width - 16): - for y in (16, 240, m.height - 16): - for d in range(1, 10): - pt3d = cam.projectPixelTo3d((x, y), d) - ((lx, ly), (rx, ry)) = cam.project3dToPixel(pt3d) - self.assertAlmostEqual(y, ly, 3) - self.assertAlmostEqual(y, ry, 3) - self.assertAlmostEqual(x, lx, 3) - self.assertAlmostEqual(x, rx + d, 3) - - u = 100.0 - v = 200.0 - du = 17.0 - dv = 23.0 - Z = 2.0 - xyz0 = cam.left.projectPixelTo3dRay((u, v)) - xyz0 = (xyz0[0] * (Z / xyz0[2]), xyz0[1] * (Z / xyz0[2]), Z) - xyz1 = cam.left.projectPixelTo3dRay((u + du, v + dv)) - xyz1 = (xyz1[0] * (Z / xyz1[2]), xyz1[1] * (Z / xyz1[2]), Z) - self.assertAlmostEqual(cam.left.getDeltaU(xyz1[0] - xyz0[0], Z), du, 3) - self.assertAlmostEqual(cam.left.getDeltaV(xyz1[1] - xyz0[1], Z), dv, 3) - self.assertAlmostEqual(cam.left.getDeltaX(du, Z), xyz1[0] - xyz0[0], 3) - self.assertAlmostEqual(cam.left.getDeltaY(dv, Z), xyz1[1] - xyz0[1], 3) - -if __name__ == '__main__': - if 1: - rostest.unitrun('image_geometry', 'directed', TestDirected) - else: - suite = unittest.TestSuite() - suite.addTest(TestDirected('test_stereo')) - unittest.TextTestRunner(verbosity=2).run(suite) diff --git a/src/vision_opencv/image_geometry/test/utest.cpp b/src/vision_opencv/image_geometry/test/utest.cpp deleted file mode 100644 index 2589019..0000000 --- a/src/vision_opencv/image_geometry/test/utest.cpp +++ /dev/null @@ -1,259 +0,0 @@ -#include "image_geometry/pinhole_camera_model.h" -#include -#include - -/// @todo Tests with simple values (R = identity, D = 0, P = K or simple scaling) -/// @todo Test projection functions for right stereo values, P(:,3) != 0 -/// @todo Tests for [un]rectifyImage -/// @todo Tests using ROI, needs support from PinholeCameraModel -/// @todo Tests for StereoCameraModel - -class PinholeTest : public testing::Test -{ -protected: - virtual void SetUp() - { - /// @todo Just load these from file - // These parameters taken from a real camera calibration - double D[] = {-0.363528858080088, 0.16117037733986861, -8.1109585007538829e-05, -0.00044776712298447841, 0.0}; - double K[] = {430.15433020105519, 0.0, 311.71339830549732, - 0.0, 430.60920415473657, 221.06824942698509, - 0.0, 0.0, 1.0}; - double R[] = {0.99806560714807102, 0.0068562422224214027, 0.061790256276695904, - -0.0067522959054715113, 0.99997541519165112, -0.0018909025066874664, - -0.061801701660692349, 0.0014700186639396652, 0.99808736527268516}; - double P[] = {295.53402059708782, 0.0, 285.55760765075684, 0.0, - 0.0, 295.53402059708782, 223.29617881774902, 0.0, - 0.0, 0.0, 1.0, 0.0}; - - cam_info_.header.frame_id = "tf_frame"; - cam_info_.height = 480; - cam_info_.width = 640; - // No ROI - cam_info_.D.resize(5); - std::copy(D, D+5, cam_info_.D.begin()); - std::copy(K, K+9, cam_info_.K.begin()); - std::copy(R, R+9, cam_info_.R.begin()); - std::copy(P, P+12, cam_info_.P.begin()); - cam_info_.distortion_model = sensor_msgs::distortion_models::PLUMB_BOB; - - model_.fromCameraInfo(cam_info_); - } - - sensor_msgs::CameraInfo cam_info_; - image_geometry::PinholeCameraModel model_; -}; - -TEST_F(PinholeTest, accessorsCorrect) -{ - EXPECT_STREQ("tf_frame", model_.tfFrame().c_str()); - EXPECT_EQ(cam_info_.P[0], model_.fx()); - EXPECT_EQ(cam_info_.P[5], model_.fy()); - EXPECT_EQ(cam_info_.P[2], model_.cx()); - EXPECT_EQ(cam_info_.P[6], model_.cy()); -} - -TEST_F(PinholeTest, projectPoint) -{ - // Spot test an arbitrary point. - { - cv::Point2d uv(100, 100); - cv::Point3d xyz = model_.projectPixelTo3dRay(uv); - EXPECT_NEAR(-0.62787224048135637, xyz.x, 1e-8); - EXPECT_NEAR(-0.41719792045817677, xyz.y, 1e-8); - EXPECT_DOUBLE_EQ(1.0, xyz.z); - } - - // Principal point should project straight out. - { - cv::Point2d uv(model_.cx(), model_.cy()); - cv::Point3d xyz = model_.projectPixelTo3dRay(uv); - EXPECT_DOUBLE_EQ(0.0, xyz.x); - EXPECT_DOUBLE_EQ(0.0, xyz.y); - EXPECT_DOUBLE_EQ(1.0, xyz.z); - } - - // Check projecting to 3d and back over entire image is accurate. - const size_t step = 10; - for (size_t row = 0; row <= cam_info_.height; row += step) { - for (size_t col = 0; col <= cam_info_.width; col += step) { - cv::Point2d uv(row, col), uv_back; - cv::Point3d xyz = model_.projectPixelTo3dRay(uv); - uv_back = model_.project3dToPixel(xyz); - // Measured max error at 1.13687e-13 - EXPECT_NEAR(uv.x, uv_back.x, 1.14e-13) << "at (" << row << ", " << col << ")"; - EXPECT_NEAR(uv.y, uv_back.y, 1.14e-13) << "at (" << row << ", " << col << ")"; - } - } -} - -TEST_F(PinholeTest, rectifyPoint) -{ - // Spot test an arbitrary point. - { - cv::Point2d uv_raw(100, 100), uv_rect; - uv_rect = model_.rectifyPoint(uv_raw); - EXPECT_DOUBLE_EQ(142.30311584472656, uv_rect.x); - EXPECT_DOUBLE_EQ(132.061065673828, uv_rect.y); - } - - /// @todo Need R = identity for the principal point tests. -#if 0 - // Test rectifyPoint takes (c'x, c'y) [from K] -> (cx, cy) [from P]. - double cxp = model_.intrinsicMatrix()(0,2), cyp = model_.intrinsicMatrix()(1,2); - { - cv::Point2d uv_raw(cxp, cyp), uv_rect; - model_.rectifyPoint(uv_raw, uv_rect); - EXPECT_NEAR(uv_rect.x, model_.cx(), 1e-4); - EXPECT_NEAR(uv_rect.y, model_.cy(), 1e-4); - } - - // Test unrectifyPoint takes (cx, cy) [from P] -> (c'x, c'y) [from K]. - { - cv::Point2d uv_rect(model_.cx(), model_.cy()), uv_raw; - model_.unrectifyPoint(uv_rect, uv_raw); - EXPECT_NEAR(uv_raw.x, cxp, 1e-4); - EXPECT_NEAR(uv_raw.y, cyp, 1e-4); - } -#endif - - // Check rectifying then unrectifying over most of the image is accurate. - const size_t step = 5; - const size_t border = 65; // Expect bad accuracy far from the center of the image. - for (size_t row = border; row <= cam_info_.height - border; row += step) { - for (size_t col = border; col <= cam_info_.width - border; col += step) { - cv::Point2d uv_raw(row, col), uv_rect, uv_unrect; - uv_rect = model_.rectifyPoint(uv_raw); - uv_unrect = model_.unrectifyPoint(uv_rect); - // Check that we're at least within a pixel... - EXPECT_NEAR(uv_raw.x, uv_unrect.x, 1.0); - EXPECT_NEAR(uv_raw.y, uv_unrect.y, 1.0); - } - } -} - -TEST_F(PinholeTest, getDeltas) -{ - double u = 100.0, v = 200.0, du = 17.0, dv = 23.0, Z = 2.0; - cv::Point2d uv0(u, v), uv1(u + du, v + dv); - cv::Point3d xyz0, xyz1; - xyz0 = model_.projectPixelTo3dRay(uv0); - xyz0 *= (Z / xyz0.z); - xyz1 = model_.projectPixelTo3dRay(uv1); - xyz1 *= (Z / xyz1.z); - - EXPECT_NEAR(model_.getDeltaU(xyz1.x - xyz0.x, Z), du, 1e-4); - EXPECT_NEAR(model_.getDeltaV(xyz1.y - xyz0.y, Z), dv, 1e-4); - EXPECT_NEAR(model_.getDeltaX(du, Z), xyz1.x - xyz0.x, 1e-4); - EXPECT_NEAR(model_.getDeltaY(dv, Z), xyz1.y - xyz0.y, 1e-4); -} - -TEST_F(PinholeTest, initialization) -{ - - sensor_msgs::CameraInfo info; - image_geometry::PinholeCameraModel camera; - - camera.fromCameraInfo(info); - - EXPECT_EQ(camera.initialized(), 1); - EXPECT_EQ(camera.projectionMatrix().rows, 3); - EXPECT_EQ(camera.projectionMatrix().cols, 4); -} - -TEST_F(PinholeTest, rectifyIfCalibrated) -{ - /// @todo use forward distortion for a better test - // Ideally this test would have two images stored on disk - // one which is distorted and the other which is rectified, - // and then rectification would take place here and the output - // image compared to the one on disk (which would mean if - // the distortion coefficients above can't change once paired with - // an image). - - // Later could incorporate distort code - // (https://github.com/lucasw/vimjay/blob/master/src/standalone/distort_image.cpp) - // to take any image distort it, then undistort with rectifyImage, - // and given the distortion coefficients are consistent the input image - // and final output image should be mostly the same (though some - // interpolation error - // creeps in), except for outside a masked region where information was lost. - // The masked region can be generated with a pure white image that - // goes through the same process (if it comes out completely black - // then the distortion parameters are problematic). - - // For now generate an image and pass the test simply if - // the rectified image does not match the distorted image. - // Then zero out the first distortion coefficient and run - // the test again. - // Then zero out all the distortion coefficients and test - // that the output image is the same as the input. - cv::Mat distorted_image(cv::Size(cam_info_.width, cam_info_.height), CV_8UC3, cv::Scalar(0, 0, 0)); - - // draw a grid - const cv::Scalar color = cv::Scalar(255, 255, 255); - // draw the lines thick so the proportion of error due to - // interpolation is reduced - const int thickness = 7; - const int type = 8; - for (size_t y = 0; y <= cam_info_.height; y += cam_info_.height/10) - { - cv::line(distorted_image, - cv::Point(0, y), cv::Point(cam_info_.width, y), - color, type, thickness); - } - for (size_t x = 0; x <= cam_info_.width; x += cam_info_.width/10) - { - // draw the lines thick so the prorportion of interpolation error is reduced - cv::line(distorted_image, - cv::Point(x, 0), cv::Point(x, cam_info_.height), - color, type, thickness); - } - - cv::Mat rectified_image; - // Just making this number up, maybe ought to be larger - // since a completely different image would be on the order of - // width * height * 255 = 78e6 - const double diff_threshold = 10000.0; - double error; - - // Test that rectified image is sufficiently different - // using default distortion - model_.rectifyImage(distorted_image, rectified_image); - error = cv::norm(distorted_image, rectified_image, cv::NORM_L1); - // Just making this number up, maybe ought to be larger - EXPECT_GT(error, diff_threshold); - - // Test that rectified image is sufficiently different - // using default distortion but with first element zeroed - // out. - sensor_msgs::CameraInfo cam_info_2 = cam_info_; - cam_info_2.D[0] = 0.0; - model_.fromCameraInfo(cam_info_2); - model_.rectifyImage(distorted_image, rectified_image); - error = cv::norm(distorted_image, rectified_image, cv::NORM_L1); - EXPECT_GT(error, diff_threshold); - - // Test that rectified image is the same using zero distortion - cam_info_2.D.assign(cam_info_2.D.size(), 0); - model_.fromCameraInfo(cam_info_2); - model_.rectifyImage(distorted_image, rectified_image); - error = cv::norm(distorted_image, rectified_image, cv::NORM_L1); - EXPECT_EQ(error, 0); - - // Test that rectified image is the same using empty distortion - cam_info_2.D.clear(); - model_.fromCameraInfo(cam_info_2); - model_.rectifyImage(distorted_image, rectified_image); - error = cv::norm(distorted_image, rectified_image, cv::NORM_L1); - EXPECT_EQ(error, 0); - - // restore original distortion - model_.fromCameraInfo(cam_info_); -} - -int main(int argc, char** argv) -{ - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/src/vision_opencv/opencv_tests/CHANGELOG.rst b/src/vision_opencv/opencv_tests/CHANGELOG.rst deleted file mode 100644 index 31e7f23..0000000 --- a/src/vision_opencv/opencv_tests/CHANGELOG.rst +++ /dev/null @@ -1,281 +0,0 @@ -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Changelog for package opencv_tests -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -1.13.1 (2022-10-03) -------------------- - -1.13.0 (2018-04-30) -------------------- - -1.12.8 (2018-04-17) -------------------- - -1.12.7 (2017-11-12) -------------------- - -1.12.6 (2017-11-11) -------------------- - -1.12.5 (2017-11-05) -------------------- - -1.12.4 (2017-01-29) -------------------- - -1.12.3 (2016-12-04) -------------------- - -1.12.2 (2016-09-24) -------------------- - -1.12.1 (2016-07-11) -------------------- -* Support compressed Images messages in python for indigo - - Add cv2_to_comprssed_imgmsg: Convert from cv2 image to compressed image ros msg. - - Add comprssed_imgmsg_to_cv2: Convert the compress message to a new image. - - Add compressed image tests. - - Add time to msgs (compressed and regular). - add enumerants test for compressed image. - merge the compressed tests with the regular ones. - better comment explanation. I will squash this commit. - Fix indentation - fix typo mistage: from .imgmsg_to_compressed_cv2 to .compressed_imgmsg_to_cv2. - remove cv2.CV_8UC1 - remove rospy and time depndency. - change from IMREAD_COLOR to IMREAD_ANYCOLOR. - - make indentaion of 4. - - remove space trailer. - - remove space from empty lines. - - another set of for loops, it will make things easier to track. In that new set, just have the number of channels in ([],1,3,4) (ignore two for jpg). from: https://github.com/ros-perception/vision_opencv/pull/132#discussion_r66721943 - - keep the OpenCV error message. from: https://github.com/ros-perception/vision_opencv/pull/132#discussion_r66721013 - add debug print for test. - add case for 4 channels in test. - remove 4 channels case from compressed test. - add debug print for test. - change typo of format. - fix typo in format. change from dip to dib. - change to IMREAD_ANYCOLOR as python code. (as it should). - rename TIFF to tiff - Sperate the tests one for regular images and one for compressed. - update comment -* Contributors: talregev - -1.12.0 (2016-03-18) -------------------- - -1.11.12 (2016-03-10) --------------------- - -1.11.11 (2016-01-31) --------------------- -* fix a few warnings in doc jobs -* Contributors: Vincent Rabaud - -1.11.10 (2016-01-16) --------------------- - -1.11.9 (2015-11-29) -------------------- - -1.11.8 (2015-07-15) -------------------- -* simplify dependencies -* Contributors: Vincent Rabaud - -1.11.7 (2014-12-14) -------------------- - -1.11.6 (2014-11-16) -------------------- - -1.11.5 (2014-09-21) -------------------- - -1.11.4 (2014-07-27) -------------------- - -1.11.3 (2014-06-08) -------------------- -* remove file whose functinality is now in cv_bridge -* remove references to cv (use cv2) -* Correct dependency from non-existent package to cv_bridge -* Contributors: Isaac Isao Saito, Vincent Rabaud - -1.11.2 (2014-04-28) -------------------- - -1.11.1 (2014-04-16) -------------------- - -1.11.0 (2014-02-15) -------------------- - -1.10.15 (2014-02-07) --------------------- - -1.10.14 (2013-11-23 16:17) --------------------------- -* Contributors: Vincent Rabaud - -1.10.13 (2013-11-23 09:19) --------------------------- -* Contributors: Vincent Rabaud - -1.10.12 (2013-11-22) --------------------- -* Contributors: Vincent Rabaud - -1.10.11 (2013-10-23) --------------------- -* Contributors: Vincent Rabaud - -1.10.10 (2013-10-19) --------------------- -* Contributors: Vincent Rabaud - -1.10.9 (2013-10-07) -------------------- -* Contributors: Vincent Rabaud - -1.10.8 (2013-09-09) -------------------- -* update email address -* Contributors: Vincent Rabaud - -1.10.7 (2013-07-17) -------------------- - -1.10.6 (2013-03-01) -------------------- - -1.10.5 (2013-02-11) -------------------- - -1.10.4 (2013-02-02) -------------------- - -1.10.3 (2013-01-17) -------------------- - -1.10.2 (2013-01-13) -------------------- - -1.10.1 (2013-01-10) -------------------- -* fixes `#5 `_ by removing the logic from Python and using wrapped C++ and adding a test for it -* Contributors: Vincent Rabaud - -1.10.0 (2013-01-03) -------------------- - -1.9.15 (2013-01-02) -------------------- - -1.9.14 (2012-12-30) -------------------- - -1.9.13 (2012-12-15) -------------------- - -1.9.12 (2012-12-14) -------------------- -* Removed brief tag - Conflicts: - opencv_tests/package.xml -* buildtool_depend catkin fix -* Contributors: William Woodall - -1.9.11 (2012-12-10) -------------------- - -1.9.10 (2012-10-04) -------------------- - -1.9.9 (2012-10-01) ------------------- - -1.9.8 (2012-09-30) ------------------- - -1.9.7 (2012-09-28 21:07) ------------------------- -* add missing stuff -* make sure we find catkin -* Contributors: Vincent Rabaud - -1.9.6 (2012-09-28 15:17) ------------------------- -* move the test to where it belongs -* fix the tests and the API to not handle conversion from CV_TYPE to Color type (does not make sense) -* make all the tests pass -* comply to the new Catkin API -* backport the C++ test from Fuerte -* Contributors: Vincent Rabaud - -1.9.5 (2012-09-15) ------------------- -* remove dependencies to the opencv2 ROS package -* Contributors: Vincent Rabaud - -1.9.4 (2012-09-13) ------------------- - -1.9.3 (2012-09-12) ------------------- -* update to nosetests -* Contributors: Vincent Rabaud - -1.9.2 (2012-09-07) ------------------- -* be more compliant to the latest catkin -* added catkin_project() to cv_bridge, image_geometry, and opencv_tests -* Contributors: Jonathan Binney, Vincent Rabaud - -1.9.1 (2012-08-28 22:06) ------------------------- -* remove a deprecated header -* Contributors: Vincent Rabaud - -1.9.0 (2012-08-28 14:29) ------------------------- -* cleanup by Jon Binney -* catkinized opencv_tests by Jon Binney -* remove the version check, let's trust OpenCV :) -* revert the removal of opencv2 -* finally get rid of opencv2 as it is a system dependency now -* bump REQUIRED version of OpenCV to 2.3.2, which is what's in ros-fuerte-opencv -* switch rosdep name to opencv2, to refer to ros-fuerte-opencv2 -* Fixing link lines for gtest against opencv. -* Adding opencv2 to all manifests, so that client packages may - not break when using them. -* baking in opencv debs and attempting a pre-release -* Another hack for prerelease to quiet test failures. -* Dissable a dubious opencv test. Temporary HACK. -* Changing to expect for more verbose failure. -* Minor change to test. -* Making this depend on libopencv-2.3-dev debian available in ros-shadow. -* mono16 -> bgr conversion tested and fixed in C -* Added Ubuntu platform tags to manifest -* Tuned for parc loop -* Demo of ROS node face detecton -* mono16 support, ticket `#2890 `_ -* Remove use of deprecated rosbuild macros -* cv_bridge split from opencv2 -* Name changes for opencv -> vision_opencv -* Validation for image message encoding -* utest changed to reflect rosimgtocv change to imgmsgtocv -* Add opencvpython as empty package -* New methods for cv image conversion -* Disabling tests on OSX, `#2769 `_ -* New Python CvBridge, rewrote C CvBridge, regression test for C and Python CvBridge -* Fix underscore problem, test 8UC3->BGR8, fix 8UC3->BGR8 -* New image format -* Image message and CvBridge change -* Rename rows,cols to height,width in Image message -* New node bbc for image testing -* Make executable -* Pong demo -* Missing utest.cpp -* New sensor_msgs::Image message -* Contributors: Vincent Rabaud, ethanrublee, gerkey, jamesb, jamesbowman, pantofaru, vrabaud, wheeler diff --git a/src/vision_opencv/opencv_tests/CMakeLists.txt b/src/vision_opencv/opencv_tests/CMakeLists.txt deleted file mode 100644 index b45ba8c..0000000 --- a/src/vision_opencv/opencv_tests/CMakeLists.txt +++ /dev/null @@ -1,5 +0,0 @@ -cmake_minimum_required(VERSION 2.8) -project(opencv_tests) - -find_package(catkin REQUIRED) -catkin_package() diff --git a/src/vision_opencv/opencv_tests/launch/pong.launch b/src/vision_opencv/opencv_tests/launch/pong.launch deleted file mode 100644 index c571243..0000000 --- a/src/vision_opencv/opencv_tests/launch/pong.launch +++ /dev/null @@ -1,5 +0,0 @@ - - - - - diff --git a/src/vision_opencv/opencv_tests/mainpage.dox b/src/vision_opencv/opencv_tests/mainpage.dox deleted file mode 100644 index ea72319..0000000 --- a/src/vision_opencv/opencv_tests/mainpage.dox +++ /dev/null @@ -1,119 +0,0 @@ -/** -\mainpage -\htmlinclude manifest.html - -\b opencv_tests is ... - - - - -\section codeapi Code API - - - -\section rosapi ROS API - - - - - - - - -*/ \ No newline at end of file diff --git a/src/vision_opencv/opencv_tests/nodes/broadcast.py b/src/vision_opencv/opencv_tests/nodes/broadcast.py deleted file mode 100755 index 0df824d..0000000 --- a/src/vision_opencv/opencv_tests/nodes/broadcast.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python -# Software License Agreement (BSD License) -# -# Copyright (c) 2008, Willow Garage, Inc. -# Copyright (c) 2016, Tal Regev. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# * Neither the name of the Willow Garage nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -import sys -import time -import math -import rospy -import cv2 - -import sensor_msgs.msg -from cv_bridge import CvBridge - - -# Send each image by iterate it from given array of files names to a given topic, -# as a regular and compressed ROS Images msgs. -class Source: - - def __init__(self, topic, filenames): - self.pub = rospy.Publisher(topic, sensor_msgs.msg.Image) - self.pub_compressed = rospy.Publisher(topic + "/compressed", sensor_msgs.msg.CompressedImage) - self.filenames = filenames - - def spin(self): - time.sleep(1.0) - cvb = CvBridge() - while not rospy.core.is_shutdown(): - cvim = cv2.imload(self.filenames[0]) - self.pub.publish(cvb.cv2_to_imgmsg(cvim)) - self.pub_compressed.publish(cvb.cv2_to_compressed_imgmsg(cvim)) - self.filenames = self.filenames[1:] + [self.filenames[0]] - time.sleep(1) - - -def main(args): - s = Source(args[1], args[2:]) - rospy.init_node('Source') - try: - s.spin() - rospy.spin() - outcome = 'test completed' - except KeyboardInterrupt: - print "shutting down" - outcome = 'keyboard interrupt' - rospy.core.signal_shutdown(outcome) - -if __name__ == '__main__': - main(sys.argv) diff --git a/src/vision_opencv/opencv_tests/nodes/rosfacedetect.py b/src/vision_opencv/opencv_tests/nodes/rosfacedetect.py deleted file mode 100755 index 9b072d7..0000000 --- a/src/vision_opencv/opencv_tests/nodes/rosfacedetect.py +++ /dev/null @@ -1,105 +0,0 @@ -#!/usr/bin/python -""" -This program is demonstration for face and object detection using haar-like features. -The program finds faces in a camera image or video stream and displays a red box around them. - -Original C implementation by: ? -Python implementation by: Roman Stanchak, James Bowman -Updated: Copyright (c) 2016, Tal Regev. -""" - -import sys -import os -from optparse import OptionParser - -import rospy -import sensor_msgs.msg -from cv_bridge import CvBridge -import cv2 -import numpy - -# Parameters for haar detection -# From the API: -# The default parameters (scale_factor=2, min_neighbors=3, flags=0) are tuned -# for accurate yet slow object detection. For a faster operation on real video -# images the settings are: -# scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING, -# min_size= - opencv_tests - 1.13.1 - - Tests the enumerants of the ROS Image message, and functionally tests the Python and C++ implementations of CvBridge. - - James Bowman - Vincent Rabaud - BSD - http://wiki.ros.org/opencv_tests - - catkin - - cv_bridge - diff --git a/src/vision_opencv/vision_opencv/CHANGELOG.rst b/src/vision_opencv/vision_opencv/CHANGELOG.rst deleted file mode 100644 index c1c2975..0000000 --- a/src/vision_opencv/vision_opencv/CHANGELOG.rst +++ /dev/null @@ -1,189 +0,0 @@ -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Changelog for package vision_opencv -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -1.13.1 (2022-10-03) -------------------- - -1.13.0 (2018-04-30) -------------------- - -1.12.8 (2018-04-17) -------------------- - -1.12.7 (2017-11-12) -------------------- - -1.12.6 (2017-11-11) -------------------- - -1.12.5 (2017-11-05) -------------------- - -1.12.4 (2017-01-29) -------------------- - -1.12.3 (2016-12-04) -------------------- - -1.12.2 (2016-09-24) -------------------- - -1.12.1 (2016-07-11) -------------------- - -1.12.0 (2016-03-18) -------------------- -* remove opencv_apps from vision_opencv -* Contributors: Vincent Rabaud - -1.11.12 (2016-03-10) --------------------- - -1.11.11 (2016-01-31) --------------------- - -1.11.10 (2016-01-16) --------------------- - -1.11.9 (2015-11-29) -------------------- -* Add opencv_apps to vision_opencv dependency -* Contributors: Ryohei Ueda - -1.11.8 (2015-07-15) -------------------- - -1.11.7 (2014-12-14) -------------------- - -1.11.6 (2014-11-16) -------------------- - -1.11.5 (2014-09-21) -------------------- - -1.11.4 (2014-07-27) -------------------- - -1.11.3 (2014-06-08) -------------------- - -1.11.2 (2014-04-28) -------------------- - -1.11.1 (2014-04-16) -------------------- - -1.11.0 (2014-02-15) -------------------- - -1.10.15 (2014-02-07) --------------------- - -1.10.14 (2013-11-23 16:17) --------------------------- -* Contributors: Vincent Rabaud - -1.10.13 (2013-11-23 09:19) --------------------------- -* Contributors: Vincent Rabaud - -1.10.12 (2013-11-22) --------------------- -* Contributors: Vincent Rabaud - -1.10.11 (2013-10-23) --------------------- -* Contributors: Vincent Rabaud - -1.10.10 (2013-10-19) --------------------- -* Contributors: Vincent Rabaud - -1.10.9 (2013-10-07) -------------------- -* Contributors: Vincent Rabaud - -1.10.8 (2013-09-09) -------------------- -* update email address -* Contributors: Vincent Rabaud - -1.10.7 (2013-07-17) -------------------- -* update to REP 0127 -* Contributors: Vincent Rabaud - -1.10.6 (2013-03-01) -------------------- - -1.10.5 (2013-02-11) -------------------- - -1.10.4 (2013-02-02) -------------------- - -1.10.3 (2013-01-17) -------------------- - -1.10.2 (2013-01-13) -------------------- - -1.10.1 (2013-01-10) -------------------- - -1.10.0 (2013-01-03) -------------------- - -1.9.15 (2013-01-02) -------------------- - -1.9.14 (2012-12-30) -------------------- - -1.9.13 (2012-12-15) -------------------- - -1.9.12 (2012-12-14) -------------------- - -1.9.11 (2012-12-10) -------------------- - -1.9.10 (2012-10-04) -------------------- -* the CMake file is useless -* add the missing CMake file -* re-add the meta-package -* Contributors: Vincent Rabaud - -1.9.9 (2012-10-01) ------------------- - -1.9.8 (2012-09-30) ------------------- - -1.9.7 (2012-09-28 21:07) ------------------------- - -1.9.6 (2012-09-28 15:17) ------------------------- - -1.9.5 (2012-09-15) ------------------- - -1.9.4 (2012-09-13) ------------------- - -1.9.3 (2012-09-12) ------------------- - -1.9.2 (2012-09-07) ------------------- - -1.9.1 (2012-08-28 22:06) ------------------------- - -1.9.0 (2012-08-28 14:29) ------------------------- diff --git a/src/vision_opencv/vision_opencv/CMakeLists.txt b/src/vision_opencv/vision_opencv/CMakeLists.txt deleted file mode 100644 index 8f1965d..0000000 --- a/src/vision_opencv/vision_opencv/CMakeLists.txt +++ /dev/null @@ -1,4 +0,0 @@ -cmake_minimum_required(VERSION 2.8.3) -project(vision_opencv) -find_package(catkin REQUIRED) -catkin_metapackage() diff --git a/src/vision_opencv/vision_opencv/package.xml b/src/vision_opencv/vision_opencv/package.xml deleted file mode 100644 index e2f2a69..0000000 --- a/src/vision_opencv/vision_opencv/package.xml +++ /dev/null @@ -1,22 +0,0 @@ - - vision_opencv - 1.13.1 - Packages for interfacing ROS with OpenCV, a library of programming functions for real time computer vision. - Patrick Mihelich - James Bowman - Vincent Rabaud - BSD - - http://www.ros.org/wiki/vision_opencv - https://github.com/ros-perception/vision_opencv/issues - https://github.com/ros-perception/vision_opencv - - catkin - - cv_bridge - image_geometry - - - - - diff --git a/src/yolov5_ros/CMakeLists.txt b/src/yolov5_ros/CMakeLists.txt deleted file mode 100644 index 3802220..0000000 --- a/src/yolov5_ros/CMakeLists.txt +++ /dev/null @@ -1,207 +0,0 @@ -cmake_minimum_required(VERSION 3.0.2...3.26.3) -project(yolov5_ros) - -## Compile as C++11, supported in ROS Kinetic and newer -# add_compile_options(-std=c++11) - -## Find catkin macros and libraries -## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) -## is used, also find other catkin packages -find_package(catkin REQUIRED COMPONENTS - rospy - sensor_msgs - std_msgs - detection_msgs -) - -## System dependencies are found with CMake's conventions -# find_package(Boost REQUIRED COMPONENTS system) - - -## Uncomment this if the package has a setup.py. This macro ensures -## modules and global scripts declared therein get installed -## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html -# catkin_python_setup() - -################################################ -## Declare ROS messages, services and actions ## -################################################ - -## To declare and build messages, services or actions from within this -## package, follow these steps: -## * Let MSG_DEP_SET be the set of packages whose message types you use in -## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). -## * In the file package.xml: -## * add a build_depend tag for "message_generation" -## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET -## * If MSG_DEP_SET isn't empty the following dependency has been pulled in -## but can be declared for certainty nonetheless: -## * add a exec_depend tag for "message_runtime" -## * In this file (CMakeLists.txt): -## * add "message_generation" and every package in MSG_DEP_SET to -## find_package(catkin REQUIRED COMPONENTS ...) -## * add "message_runtime" and every package in MSG_DEP_SET to -## catkin_package(CATKIN_DEPENDS ...) -## * uncomment the add_*_files sections below as needed -## and list every .msg/.srv/.action file to be processed -## * uncomment the generate_messages entry below -## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) - -## Generate messages in the 'msg' folder -# add_message_files( -# FILES -# Message1.msg -# Message2.msg -# ) - -## Generate services in the 'srv' folder -# add_service_files( -# FILES -# Service1.srv -# Service2.srv -# ) - -## Generate actions in the 'action' folder -# add_action_files( -# FILES -# Action1.action -# Action2.action -# ) - -## Generate added messages and services with any dependencies listed here -# generate_messages( -# DEPENDENCIES -# sensor_msgs# std_msgs -# ) - -################################################ -## Declare ROS dynamic reconfigure parameters ## -################################################ - -## To declare and build dynamic reconfigure parameters within this -## package, follow these steps: -## * In the file package.xml: -## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" -## * In this file (CMakeLists.txt): -## * add "dynamic_reconfigure" to -## find_package(catkin REQUIRED COMPONENTS ...) -## * uncomment the "generate_dynamic_reconfigure_options" section below -## and list every .cfg file to be processed - -## Generate dynamic reconfigure parameters in the 'cfg' folder -# generate_dynamic_reconfigure_options( -# cfg/DynReconf1.cfg -# cfg/DynReconf2.cfg -# ) - -################################### -## catkin specific configuration ## -################################### -## The catkin_package macro generates cmake config files for your package -## Declare things to be passed to dependent projects -## INCLUDE_DIRS: uncomment this if your package contains header files -## LIBRARIES: libraries you create in this project that dependent projects also need -## CATKIN_DEPENDS: catkin_packages dependent projects also need -## DEPENDS: system dependencies of this project that dependent projects also need -catkin_package( -# INCLUDE_DIRS include -# LIBRARIES yolov5_ros -# CATKIN_DEPENDS rospy sensor_msgs std_msgs -# DEPENDS system_lib -) - -########### -## Build ## -########### - -## Specify additional locations of header files -## Your package locations should be listed before other locations -include_directories( -# include - ${catkin_INCLUDE_DIRS} -) - -## Declare a C++ library -# add_library(${PROJECT_NAME} -# src/${PROJECT_NAME}/yolov5_ros.cpp -# ) - -## Add cmake target dependencies of the library -## as an example, code may need to be generated before libraries -## either from message generation or dynamic reconfigure -# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Declare a C++ executable -## With catkin_make all packages are built within a single CMake context -## The recommended prefix ensures that target names across packages don't collide -# add_executable(${PROJECT_NAME}_node src/yolov5_ros_node.cpp) - -## Rename C++ executable without prefix -## The above recommended prefix causes long target names, the following renames the -## target back to the shorter version for ease of user use -## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" -# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") - -## Add cmake target dependencies of the executable -## same as for the library above -# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) - -## Specify libraries to link a library or executable target against -# target_link_libraries(${PROJECT_NAME}_node -# ${catkin_LIBRARIES} -# ) - -############# -## Install ## -############# - -# all install targets should use catkin DESTINATION variables -# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html - -## Mark executable scripts (Python etc.) for installation -## in contrast to setup.py, you can choose the destination -catkin_install_python(PROGRAMS - src/detect.py - DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -) - -## Mark executables for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html -# install(TARGETS ${PROJECT_NAME}_node -# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -# ) - -## Mark libraries for installation -## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html -# install(TARGETS ${PROJECT_NAME} -# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} -# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} -# ) - -## Mark cpp header files for installation -# install(DIRECTORY include/${PROJECT_NAME}/ -# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -# FILES_MATCHING PATTERN "*.h" -# PATTERN ".svn" EXCLUDE -# ) - -## Mark other files for installation (e.g. launch and bag files, etc.) -# install(FILES -# # myfile1 -# # myfile2 -# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} -# ) - -############# -## Testing ## -############# - -## Add gtest based cpp test target and link libraries -# catkin_add_gtest(${PROJECT_NAME}-test test/test_yolov5_ros.cpp) -# if(TARGET ${PROJECT_NAME}-test) -# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) -# endif() - -## Add folders to be run by python nosetests -# catkin_add_nosetests(test) diff --git a/src/yolov5_ros/LICENSE b/src/yolov5_ros/LICENSE deleted file mode 100644 index f288702..0000000 --- a/src/yolov5_ros/LICENSE +++ /dev/null @@ -1,674 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. diff --git a/src/yolov5_ros/README.md b/src/yolov5_ros/README.md deleted file mode 100644 index ada3000..0000000 --- a/src/yolov5_ros/README.md +++ /dev/null @@ -1,44 +0,0 @@ -# YOLOv5 ROS -This is a ROS interface for using YOLOv5 for real time object detection on a ROS image topic. It supports inference on multiple deep learning frameworks used in the [official YOLOv5 repository](https://github.com/ultralytics/yolov5). - -## Installation - -### Dependencies -This package is built and tested on Ubuntu 20.04 LTS and ROS Noetic with Python 3.8. - -* Clone the packages to ROS workspace and install requirement for YOLOv5 submodule: -```bash -cd /src -git clone https://github.com/mats-robotics/detection_msgs.git -git clone --recurse-submodules https://github.com/mats-robotics/yolov5_ros.git -cd yolov5_ros/src/yolov5 -pip install -r requirements.txt # install the requirements for yolov5 -``` -* Build the ROS package: -```bash -cd -catkin build yolov5_ros # build the ROS package -``` -* Make the Python script executable -```bash -cd /src/yolov5_ros/src -chmod +x detect.py -``` - -## Basic usage -Change the parameter for `input_image_topic` in launch/yolov5.launch to any ROS topic with message type of `sensor_msgs/Image` or `sensor_msgs/CompressedImage`. Other parameters can be modified or used as is. - -* Launch the node: -```bash -roslaunch yolov5_ros yolov5.launch -``` - -## Using custom weights and dataset (Working) -* Put your weights into `yolov5_ros/src/yolov5` -* Put the yaml file for your dataset classes into `yolov5_ros/src/yolov5/data` -* Change related ROS parameters in yolov5.launch: `weights`, `data` - -## Reference -* YOLOv5 official repository: https://github.com/ultralytics/yolov5 -* YOLOv3 ROS PyTorch: https://github.com/eriklindernoren/PyTorch-YOLOv3 -* Darknet ROS: https://github.com/leggedrobotics/darknet_ros diff --git a/src/yolov5_ros/launch/yolov5.launch b/src/yolov5_ros/launch/yolov5.launch deleted file mode 100644 index 5cade72..0000000 --- a/src/yolov5_ros/launch/yolov5.launch +++ /dev/null @@ -1,56 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/src/yolov5_ros/launch/yolov5_d435.launch b/src/yolov5_ros/launch/yolov5_d435.launch deleted file mode 100644 index 0bbb85e..0000000 --- a/src/yolov5_ros/launch/yolov5_d435.launch +++ /dev/null @@ -1,56 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/src/yolov5_ros/package.xml b/src/yolov5_ros/package.xml deleted file mode 100644 index d3f15dc..0000000 --- a/src/yolov5_ros/package.xml +++ /dev/null @@ -1,72 +0,0 @@ - - - yolov5_ros - 0.0.0 - The yolov5_ros package - - - - - nle17 - - - - - - TODO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - catkin - rospy - sensor_msgs - std_msgs - detection_msgs - rospy - sensor_msgs - std_msgs - detection_msgs - rospy - sensor_msgs - std_msgs - detection_msgs - - - - - - - - - diff --git a/src/yolov5_ros/src/detect.py b/src/yolov5_ros/src/detect.py deleted file mode 100755 index 0b79314..0000000 --- a/src/yolov5_ros/src/detect.py +++ /dev/null @@ -1,200 +0,0 @@ -#!/usr/bin/env python3 - -import rospy -import cv2 -import torch -import torch.backends.cudnn as cudnn -import numpy as np -from cv_bridge import CvBridge -from pathlib import Path -import os -import sys -from rostopic import get_topic_type - -from sensor_msgs.msg import Image, CompressedImage -from detection_msgs.msg import BoundingBox, BoundingBoxes - - -# add yolov5 submodule to path -FILE = Path(__file__).resolve() -ROOT = FILE.parents[0] / "yolov5" -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative path - -# import from yolov5 submodules -from models.common import DetectMultiBackend -from utils.general import ( - check_img_size, - check_requirements, - non_max_suppression, - scale_coords -) -from utils.plots import Annotator, colors -from utils.torch_utils import select_device -from utils.augmentations import letterbox - - -@torch.no_grad() -class Yolov5Detector: - def __init__(self): - self.conf_thres = rospy.get_param("~confidence_threshold") - self.iou_thres = rospy.get_param("~iou_threshold") - self.agnostic_nms = rospy.get_param("~agnostic_nms") - self.max_det = rospy.get_param("~maximum_detections") - self.classes = rospy.get_param("~classes", None) - self.line_thickness = rospy.get_param("~line_thickness") - self.view_image = rospy.get_param("~view_image") - # Initialize weights - weights = rospy.get_param("~weights") - # Initialize model - self.device = select_device(str(rospy.get_param("~device",""))) - self.model = DetectMultiBackend(weights, device=self.device, dnn=rospy.get_param("~dnn"), data=rospy.get_param("~data")) - self.stride, self.names, self.pt, self.jit, self.onnx, self.engine = ( - self.model.stride, - self.model.names, - self.model.pt, - self.model.jit, - self.model.onnx, - self.model.engine, - ) - - # Setting inference size - self.img_size = [rospy.get_param("~inference_size_w", 640), rospy.get_param("~inference_size_h",480)] - self.img_size = check_img_size(self.img_size, s=self.stride) - - # Half - self.half = rospy.get_param("~half", False) - self.half &= ( - self.pt or self.jit or self.onnx or self.engine - ) and self.device.type != "cpu" # FP16 supported on limited backends with CUDA - if self.pt or self.jit: - self.model.model.half() if self.half else self.model.model.float() - bs = 1 # batch_size - cudnn.benchmark = True # set True to speed up constant image size inference - self.model.warmup() # warmup - - # Initialize subscriber to Image/CompressedImage topic - input_image_type, input_image_topic, _ = get_topic_type(rospy.get_param("~input_image_topic"), blocking = True) - self.compressed_input = input_image_type == "sensor_msgs/CompressedImage" - - if self.compressed_input: - self.image_sub = rospy.Subscriber( - input_image_topic, CompressedImage, self.callback, queue_size=1 - ) - else: - self.image_sub = rospy.Subscriber( - input_image_topic, Image, self.callback, queue_size=1 - ) - - # Initialize prediction publisher - self.pred_pub = rospy.Publisher( - rospy.get_param("~output_topic"), BoundingBoxes, queue_size=10 - ) - # Initialize image publisher - self.publish_image = rospy.get_param("~publish_image") - if self.publish_image: - self.image_pub = rospy.Publisher( - rospy.get_param("~output_image_topic"), Image, queue_size=10 - ) - - # Initialize CV_Bridge - self.bridge = CvBridge() - - def callback(self, data): - """adapted from yolov5/detect.py""" - # print(data.header) - if self.compressed_input: - im = self.bridge.compressed_imgmsg_to_cv2(data, desired_encoding="bgr8") - else: - im = self.bridge.imgmsg_to_cv2(data, desired_encoding="bgr8") - - im, im0 = self.preprocess(im) - # print(im.shape) - # print(img0.shape) - # print(img.shape) - - # Run inference - im = torch.from_numpy(im).to(self.device) - im = im.half() if self.half else im.float() - im /= 255 - if len(im.shape) == 3: - im = im[None] - - pred = self.model(im, augment=False, visualize=False) - pred = non_max_suppression( - pred, self.conf_thres, self.iou_thres, self.classes, self.agnostic_nms, max_det=self.max_det - ) - - ### To-do move pred to CPU and fill BoundingBox messages - - # Process predictions - det = pred[0].cpu().numpy() - - bounding_boxes = BoundingBoxes() - bounding_boxes.header = data.header - bounding_boxes.image_header = data.header - - annotator = Annotator(im0, line_width=self.line_thickness, example=str(self.names)) - if len(det): - # Rescale boxes from img_size to im0 size - det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() - - # Write results - for *xyxy, conf, cls in reversed(det): - bounding_box = BoundingBox() - c = int(cls) - # Fill in bounding box message - bounding_box.Class = self.names[c] - bounding_box.probability = conf - bounding_box.xmin = int(xyxy[0]) - bounding_box.ymin = int(xyxy[1]) - bounding_box.xmax = int(xyxy[2]) - bounding_box.ymax = int(xyxy[3]) - - bounding_boxes.bounding_boxes.append(bounding_box) - - # Annotate the image - if self.publish_image or self.view_image: # Add bbox to image - # integer class - label = f"{self.names[c]} {conf:.2f}" - annotator.box_label(xyxy, label, color=colors(c, True)) - - - ### POPULATE THE DETECTION MESSAGE HERE - - # Stream results - im0 = annotator.result() - - # Publish prediction - self.pred_pub.publish(bounding_boxes) - - # Publish & visualize images - if self.view_image: - cv2.imshow(str(0), im0) - cv2.waitKey(1) # 1 millisecond - if self.publish_image: - self.image_pub.publish(self.bridge.cv2_to_imgmsg(im0, "bgr8")) - - - def preprocess(self, img): - """ - Adapted from yolov5/utils/datasets.py LoadStreams class - """ - img0 = img.copy() - img = np.array([letterbox(img, self.img_size, stride=self.stride, auto=self.pt)[0]]) - # Convert - img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW - img = np.ascontiguousarray(img) - - return img, img0 - - -if __name__ == "__main__": - - check_requirements(exclude=("tensorboard", "thop")) - - rospy.init_node("yolov5", anonymous=True) - detector = Yolov5Detector() - - rospy.spin() diff --git a/src/yolov5_ros/src/yolov5/.dockerignore b/src/yolov5_ros/src/yolov5/.dockerignore deleted file mode 100644 index 3b66925..0000000 --- a/src/yolov5_ros/src/yolov5/.dockerignore +++ /dev/null @@ -1,222 +0,0 @@ -# Repo-specific DockerIgnore ------------------------------------------------------------------------------------------- -.git -.cache -.idea -runs -output -coco -storage.googleapis.com - -data/samples/* -**/results*.csv -*.jpg - -# Neural Network weights ----------------------------------------------------------------------------------------------- -**/*.pt -**/*.pth -**/*.onnx -**/*.engine -**/*.mlmodel -**/*.torchscript -**/*.torchscript.pt -**/*.tflite -**/*.h5 -**/*.pb -*_saved_model/ -*_web_model/ -*_openvino_model/ - -# Below Copied From .gitignore ----------------------------------------------------------------------------------------- -# Below Copied From .gitignore ----------------------------------------------------------------------------------------- - - -# GitHub Python GitIgnore ---------------------------------------------------------------------------------------------- -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -env/ -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -wandb/ -.installed.cfg -*.egg - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -.hypothesis/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# pyenv -.python-version - -# celery beat schedule file -celerybeat-schedule - -# SageMath parsed files -*.sage.py - -# dotenv -.env - -# virtualenv -.venv* -venv*/ -ENV*/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ - - -# https://github.com/github/gitignore/blob/master/Global/macOS.gitignore ----------------------------------------------- - -# General -.DS_Store -.AppleDouble -.LSOverride - -# Icon must end with two \r -Icon -Icon? - -# Thumbnails -._* - -# Files that might appear in the root of a volume -.DocumentRevisions-V100 -.fseventsd -.Spotlight-V100 -.TemporaryItems -.Trashes -.VolumeIcon.icns -.com.apple.timemachine.donotpresent - -# Directories potentially created on remote AFP share -.AppleDB -.AppleDesktop -Network Trash Folder -Temporary Items -.apdisk - - -# https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore -# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm -# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 - -# User-specific stuff: -.idea/* -.idea/**/workspace.xml -.idea/**/tasks.xml -.idea/dictionaries -.html # Bokeh Plots -.pg # TensorFlow Frozen Graphs -.avi # videos - -# Sensitive or high-churn files: -.idea/**/dataSources/ -.idea/**/dataSources.ids -.idea/**/dataSources.local.xml -.idea/**/sqlDataSources.xml -.idea/**/dynamic.xml -.idea/**/uiDesigner.xml - -# Gradle: -.idea/**/gradle.xml -.idea/**/libraries - -# CMake -cmake-build-debug/ -cmake-build-release/ - -# Mongo Explorer plugin: -.idea/**/mongoSettings.xml - -## File-based project format: -*.iws - -## Plugin-specific files: - -# IntelliJ -out/ - -# mpeltonen/sbt-idea plugin -.idea_modules/ - -# JIRA plugin -atlassian-ide-plugin.xml - -# Cursive Clojure plugin -.idea/replstate.xml - -# Crashlytics plugin (for Android Studio and IntelliJ) -com_crashlytics_export_strings.xml -crashlytics.properties -crashlytics-build.properties -fabric.properties diff --git a/src/yolov5_ros/src/yolov5/.gitignore b/src/yolov5_ros/src/yolov5/.gitignore deleted file mode 100644 index 47afce3..0000000 --- a/src/yolov5_ros/src/yolov5/.gitignore +++ /dev/null @@ -1,255 +0,0 @@ -# Repo-specific GitIgnore ---------------------------------------------------------------------------------------------- -*.jpg -*.jpeg -*.png -*.bmp -*.tif -*.tiff -*.heic -*.JPG -*.JPEG -*.PNG -*.BMP -*.TIF -*.TIFF -*.HEIC -*.mp4 -*.mov -*.MOV -*.avi -*.data -*.json -*.cfg -!setup.cfg -!cfg/yolov3*.cfg - -storage.googleapis.com -runs/* -data/* -data/images/* -!data/*.yaml -!data/hyps -!data/scripts -!data/images -!data/images/zidane.jpg -!data/images/bus.jpg -!data/*.sh - -results*.csv - -# Datasets ------------------------------------------------------------------------------------------------------------- -coco/ -coco128/ -VOC/ - -# MATLAB GitIgnore ----------------------------------------------------------------------------------------------------- -*.m~ -*.mat -!targets*.mat - -# Neural Network weights ----------------------------------------------------------------------------------------------- -*.weights -*.pb -*.onnx -*.engine -*.mlmodel -*.torchscript -*.tflite -*.h5 -*_saved_model/ -*_web_model/ -*_openvino_model/ -darknet53.conv.74 -yolov3-tiny.conv.15 - -# GitHub Python GitIgnore ---------------------------------------------------------------------------------------------- -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -env/ -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -/wandb/ -.installed.cfg -*.egg - - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -.hypothesis/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# pyenv -.python-version - -# celery beat schedule file -celerybeat-schedule - -# SageMath parsed files -*.sage.py - -# dotenv -.env - -# virtualenv -.venv* -venv*/ -ENV*/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ - - -# https://github.com/github/gitignore/blob/master/Global/macOS.gitignore ----------------------------------------------- - -# General -.DS_Store -.AppleDouble -.LSOverride - -# Icon must end with two \r -Icon -Icon? - -# Thumbnails -._* - -# Files that might appear in the root of a volume -.DocumentRevisions-V100 -.fseventsd -.Spotlight-V100 -.TemporaryItems -.Trashes -.VolumeIcon.icns -.com.apple.timemachine.donotpresent - -# Directories potentially created on remote AFP share -.AppleDB -.AppleDesktop -Network Trash Folder -Temporary Items -.apdisk - - -# https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore -# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm -# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 - -# User-specific stuff: -.idea/* -.idea/**/workspace.xml -.idea/**/tasks.xml -.idea/dictionaries -.html # Bokeh Plots -.pg # TensorFlow Frozen Graphs -.avi # videos - -# Sensitive or high-churn files: -.idea/**/dataSources/ -.idea/**/dataSources.ids -.idea/**/dataSources.local.xml -.idea/**/sqlDataSources.xml -.idea/**/dynamic.xml -.idea/**/uiDesigner.xml - -# Gradle: -.idea/**/gradle.xml -.idea/**/libraries - -# CMake -cmake-build-debug/ -cmake-build-release/ - -# Mongo Explorer plugin: -.idea/**/mongoSettings.xml - -## File-based project format: -*.iws - -## Plugin-specific files: - -# IntelliJ -out/ - -# mpeltonen/sbt-idea plugin -.idea_modules/ - -# JIRA plugin -atlassian-ide-plugin.xml - -# Cursive Clojure plugin -.idea/replstate.xml - -# Crashlytics plugin (for Android Studio and IntelliJ) -com_crashlytics_export_strings.xml -crashlytics.properties -crashlytics-build.properties -fabric.properties diff --git a/src/yolov5_ros/src/yolov5/.pre-commit-config.yaml b/src/yolov5_ros/src/yolov5/.pre-commit-config.yaml deleted file mode 100644 index ba80055..0000000 --- a/src/yolov5_ros/src/yolov5/.pre-commit-config.yaml +++ /dev/null @@ -1,64 +0,0 @@ -# Define hooks for code formations -# Will be applied on any updated commit files if a user has installed and linked commit hook - -default_language_version: - python: python3.8 - -# Define bot property if installed via https://github.com/marketplace/pre-commit-ci -ci: - autofix_prs: true - autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions' - autoupdate_schedule: monthly - # submodules: true - -repos: - - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.3.0 - hooks: - # - id: end-of-file-fixer - - id: trailing-whitespace - - id: check-case-conflict - - id: check-yaml - - id: check-toml - - id: pretty-format-json - - id: check-docstring-first - - - repo: https://github.com/asottile/pyupgrade - rev: v2.37.3 - hooks: - - id: pyupgrade - name: Upgrade code - args: [ --py37-plus ] - - - repo: https://github.com/PyCQA/isort - rev: 5.10.1 - hooks: - - id: isort - name: Sort imports - - - repo: https://github.com/pre-commit/mirrors-yapf - rev: v0.32.0 - hooks: - - id: yapf - name: YAPF formatting - - - repo: https://github.com/executablebooks/mdformat - rev: 0.7.16 - hooks: - - id: mdformat - name: MD formatting - additional_dependencies: - - mdformat-gfm - - mdformat-black - exclude: "README.md|README_cn.md" - - - repo: https://github.com/asottile/yesqa - rev: v1.4.0 - hooks: - - id: yesqa - - - repo: https://github.com/PyCQA/flake8 - rev: 5.0.4 - hooks: - - id: flake8 - name: PEP8 diff --git a/src/yolov5_ros/src/yolov5/CONTRIBUTING.md b/src/yolov5_ros/src/yolov5/CONTRIBUTING.md deleted file mode 100644 index 7498f89..0000000 --- a/src/yolov5_ros/src/yolov5/CONTRIBUTING.md +++ /dev/null @@ -1,93 +0,0 @@ -## Contributing to YOLOv5 🚀 - -We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible, whether it's: - -- Reporting a bug -- Discussing the current state of the code -- Submitting a fix -- Proposing a new feature -- Becoming a maintainer - -YOLOv5 works so well due to our combined community effort, and for every small improvement you contribute you will be -helping push the frontiers of what's possible in AI 😃! - -## Submitting a Pull Request (PR) 🛠️ - -Submitting a PR is easy! This example shows how to submit a PR for updating `requirements.txt` in 4 steps: - -### 1. Select File to Update - -Select `requirements.txt` to update by clicking on it in GitHub. - -

PR_step1

- -### 2. Click 'Edit this file' - -Button is in top-right corner. - -

PR_step2

- -### 3. Make Changes - -Change `matplotlib` version from `3.2.2` to `3.3`. - -

PR_step3

- -### 4. Preview Changes and Submit PR - -Click on the **Preview changes** tab to verify your updates. At the bottom of the screen select 'Create a **new branch** -for this commit', assign your branch a descriptive name such as `fix/matplotlib_version` and click the green **Propose -changes** button. All done, your PR is now submitted to YOLOv5 for review and approval 😃! - -

PR_step4

- -### PR recommendations - -To allow your work to be integrated as seamlessly as possible, we advise you to: - -- ✅ Verify your PR is **up-to-date** with `ultralytics/yolov5` `master` branch. If your PR is behind you can update - your code by clicking the 'Update branch' button or by running `git pull` and `git merge master` locally. - -

Screenshot 2022-08-29 at 22 47 15

- -- ✅ Verify all YOLOv5 Continuous Integration (CI) **checks are passing**. - -

Screenshot 2022-08-29 at 22 47 03

- -- ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase - but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee - -## Submitting a Bug Report 🐛 - -If you spot a problem with YOLOv5 please submit a Bug Report! - -For us to start investigating a possible problem we need to be able to reproduce it ourselves first. We've created a few -short guidelines below to help users provide what we need in order to get started. - -When asking a question, people will be better able to provide help if you provide **code** that they can easily -understand and use to **reproduce** the problem. This is referred to by community members as creating -a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). Your code that reproduces -the problem should be: - -- ✅ **Minimal** – Use as little code as possible that still produces the same problem -- ✅ **Complete** – Provide **all** parts someone else needs to reproduce your problem in the question itself -- ✅ **Reproducible** – Test the code you're about to provide to make sure it reproduces the problem - -In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code -should be: - -- ✅ **Current** – Verify that your code is up-to-date with current - GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new - copy to ensure your problem has not already been resolved by previous commits. -- ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this - repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️. - -If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛 -**Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and providing -a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us better -understand and diagnose your problem. - -## License - -By contributing, you agree that your contributions will be licensed under -the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/) diff --git a/src/yolov5_ros/src/yolov5/LICENSE b/src/yolov5_ros/src/yolov5/LICENSE deleted file mode 100644 index 92b370f..0000000 --- a/src/yolov5_ros/src/yolov5/LICENSE +++ /dev/null @@ -1,674 +0,0 @@ -GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. diff --git a/src/yolov5_ros/src/yolov5/README.md b/src/yolov5_ros/src/yolov5/README.md deleted file mode 100644 index da8bf1d..0000000 --- a/src/yolov5_ros/src/yolov5/README.md +++ /dev/null @@ -1,371 +0,0 @@ -
-

- - -

- -   - - -

- - English | [简体中文](.github/README_cn.md) -
-
- CI CPU testing - YOLOv5 Citation - Docker Pulls -
- Open In Colab - Open In Kaggle - Join Forum -
- -
-

- YOLOv5 🚀 is a family of object detection architectures and models pretrained on the COCO dataset, and represents Ultralytics - open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. -

- -
- - - - - - - - - - - - - - - - - - - - -
-
- - -##
Documentation
- -See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment. - -##
Quick Start Examples
- -
-Install - -Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a -[**Python>=3.7.0**](https://www.python.org/) environment, including -[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). - -```bash -git clone https://github.com/ultralytics/yolov5 # clone -cd yolov5 -pip install -r requirements.txt # install -``` - -
- -
-Inference - -YOLOv5 [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest -YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). - -```python -import torch - -# Model -model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5n - yolov5x6, custom - -# Images -img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list - -# Inference -results = model(img) - -# Results -results.print() # or .show(), .save(), .crop(), .pandas(), etc. -``` - -
- -
-Inference with detect.py - -`detect.py` runs inference on a variety of sources, downloading [models](https://github.com/ultralytics/yolov5/tree/master/models) automatically from -the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. - -```bash -python detect.py --source 0 # webcam - img.jpg # image - vid.mp4 # video - path/ # directory - 'path/*.jpg' # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube - 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream -``` - -
- -
-Training - -The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) -results. [Models](https://github.com/ultralytics/yolov5/tree/master/models) -and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest -YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are -1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://github.com/ultralytics/yolov5/issues/475) times faster). Use the -largest `--batch-size` possible, or pass `--batch-size -1` for -YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB. - -```bash -python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 128 - yolov5s 64 - yolov5m 40 - yolov5l 24 - yolov5x 16 -``` - - - -
- -
-Tutorials - -- [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)  🚀 RECOMMENDED -- [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)  ☘️ - RECOMMENDED -- [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475) -- [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) 🌟 NEW -- [TFLite, ONNX, CoreML, TensorRT Export](https://github.com/ultralytics/yolov5/issues/251) 🚀 -- [Test-Time Augmentation (TTA)](https://github.com/ultralytics/yolov5/issues/303) -- [Model Ensembling](https://github.com/ultralytics/yolov5/issues/318) -- [Model Pruning/Sparsity](https://github.com/ultralytics/yolov5/issues/304) -- [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607) -- [Transfer Learning with Frozen Layers](https://github.com/ultralytics/yolov5/issues/1314) -- [Architecture Summary](https://github.com/ultralytics/yolov5/issues/6998) 🌟 NEW -- [Weights & Biases Logging](https://github.com/ultralytics/yolov5/issues/1289) -- [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975)  🌟 NEW -- [ClearML Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) 🌟 NEW -- [Deci Platform](https://github.com/ultralytics/yolov5/wiki/Deci-Platform) 🌟 NEW -- [Comet Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet) 🌟 NEW - -
- - -##
Integrations
- - - -
- - - - - - - - - - - - - - -
- -|Comet ⭐ NEW|Deci ⭐ NEW|ClearML ⭐ NEW|Roboflow|Weights & Biases -|:-:|:-:|:-:|:-:|:-:| -|Visualize model metrics and predictions and upload models and datasets in realtime with [Comet](https://www.comet.com/site/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration)|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) |Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) - - -##
Why YOLOv5
- -

-
- YOLOv5-P5 640 Figure (click to expand) - -

-
-
- Figure Notes (click to expand) - -- **COCO AP val** denotes mAP@0.5:0.95 metric measured on the 5000-image [COCO val2017](http://cocodataset.org) dataset over various inference sizes from 256 to 1536. -- **GPU Speed** measures average inference time per image on [COCO val2017](http://cocodataset.org) dataset using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100 instance at batch-size 32. -- **EfficientDet** data from [google/automl](https://github.com/google/automl) at batch size 8. -- **Reproduce** by `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` - -
- -### Pretrained Checkpoints - -| Model | size
(pixels) | mAPval
0.5:0.95 | mAPval
0.5 | Speed
CPU b1
(ms) | Speed
V100 b1
(ms) | Speed
V100 b32
(ms) | params
(M) | FLOPs
@640 (B) | -|------------------------------------------------------------------------------------------------------|-----------------------|-------------------------|--------------------|------------------------------|-------------------------------|--------------------------------|--------------------|------------------------| -| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | -| | | | | | | | | | -| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | -| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | -| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | -| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+ [TTA][TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | - -
- Table Notes (click to expand) - -- All checkpoints are trained to 300 epochs with default settings. Nano and Small models use [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, all others use [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). -- **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.
Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` -- **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) instance. NMS times (~1 ms/img) not included.
Reproduce by `python val.py --data coco.yaml --img 640 --task speed --batch 1` -- **TTA** [Test Time Augmentation](https://github.com/ultralytics/yolov5/issues/303) includes reflection and scale augmentations.
Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` - -
- -##
Classification ⭐ NEW
- -YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) brings support for classification model training, validation, prediction and export! We've made training classifier models super simple. Click below to get started. - -
- Classification Checkpoints (click to expand) - -
- -We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4xA100 instance, and we trained ResNet and EfficientNet models alongside with the same default training settings to compare. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) for easy reproducibility. - -| Model | size
(pixels) | acc
top1 | acc
top5 | Training
90 epochs
4xA100 (hours) | Speed
ONNX CPU
(ms) | Speed
TensorRT V100
(ms) | params
(M) | FLOPs
@224 (B) | -|----------------------------------------------------------------------------------------------------|-----------------------|------------------|------------------|----------------------------------------------|--------------------------------|-------------------------------------|--------------------|------------------------| -| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | -| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | -| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | -| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | -| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | -| | -| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | -| [ResNet34](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | -| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | -| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | -| | -| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | -| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | -| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | -| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | - -
- Table Notes (click to expand) - -- All checkpoints are trained to 90 epochs with SGD optimizer with `lr0=0.001` and `weight_decay=5e-5` at image size 224 and all default settings.
Runs logged to https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2 -- **Accuracy** values are for single-model single-scale on [ImageNet-1k](https://www.image-net.org/index.php) dataset.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224` -- **Speed** averaged over 100 inference images using a Google [Colab Pro](https://colab.research.google.com/signup) V100 High-RAM instance.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` -- **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
Reproduce by `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` -
-
- -
- Classification Usage Examples (click to expand) - -### Train -YOLOv5 classification training supports auto-download of MNIST, Fashion-MNIST, CIFAR10, CIFAR100, Imagenette, Imagewoof, and ImageNet datasets with the `--data` argument. To start training on MNIST for example use `--data mnist`. - -```bash -# Single-GPU -python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128 - -# Multi-GPU DDP -python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 -``` - -### Val -Validate YOLOv5m-cls accuracy on ImageNet-1k dataset: -```bash -bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) -python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate -``` - -### Predict -Use pretrained YOLOv5s-cls.pt to predict bus.jpg: -```bash -python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg -``` -```python -model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load from PyTorch Hub -``` - -### Export -Export a group of trained YOLOv5s-cls, ResNet and EfficientNet models to ONNX and TensorRT: -```bash -python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224 -``` -
- - -##
Environments
- -Get started in seconds with our verified environments. Click each icon below for details. - -
- - - - - - - - - - - - - - -
- - -##
Contribute
- -We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! - - - - -##
Contact
- -For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For business inquiries or -professional support requests please visit [https://ultralytics.com/contact](https://ultralytics.com/contact). - -
-
- - - - - - - - - - - - - - - - - - - - -
- -[assets]: https://github.com/ultralytics/yolov5/releases -[tta]: https://github.com/ultralytics/yolov5/issues/303 diff --git a/src/yolov5_ros/src/yolov5/best.pt b/src/yolov5_ros/src/yolov5/best.pt deleted file mode 100644 index 52d5f43..0000000 Binary files a/src/yolov5_ros/src/yolov5/best.pt and /dev/null differ diff --git a/src/yolov5_ros/src/yolov5/classify/predict.py b/src/yolov5_ros/src/yolov5/classify/predict.py deleted file mode 100644 index 878cf48..0000000 --- a/src/yolov5_ros/src/yolov5/classify/predict.py +++ /dev/null @@ -1,215 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Run YOLOv5 classification inference on images, videos, directories, globs, YouTube, webcam, streams, etc. - -Usage - sources: - $ python classify/predict.py --weights yolov5s-cls.pt --source 0 # webcam - img.jpg # image - vid.mp4 # video - path/ # directory - 'path/*.jpg' # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube - 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream - -Usage - formats: - $ python classify/predict.py --weights yolov5s-cls.pt # PyTorch - yolov5s-cls.torchscript # TorchScript - yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s-cls.xml # OpenVINO - yolov5s-cls.engine # TensorRT - yolov5s-cls.mlmodel # CoreML (macOS-only) - yolov5s-cls_saved_model # TensorFlow SavedModel - yolov5s-cls.pb # TensorFlow GraphDef - yolov5s-cls.tflite # TensorFlow Lite - yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU - yolov5s-cls_paddle_model # PaddlePaddle -""" - -import argparse -import os -import platform -import sys -from pathlib import Path - -import torch -import torch.nn.functional as F - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[1] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -from models.common import DetectMultiBackend -from utils.augmentations import classify_transforms -from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams -from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, - increment_path, print_args, strip_optimizer) -from utils.plots import Annotator -from utils.torch_utils import select_device, smart_inference_mode - - -@smart_inference_mode() -def run( - weights=ROOT / 'yolov5s-cls.pt', # model.pt path(s) - source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam - data=ROOT / 'data/coco128.yaml', # dataset.yaml path - imgsz=(224, 224), # inference size (height, width) - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - view_img=False, # show results - save_txt=False, # save results to *.txt - nosave=False, # do not save images/videos - augment=False, # augmented inference - visualize=False, # visualize features - update=False, # update all models - project=ROOT / 'runs/predict-cls', # save results to project/name - name='exp', # save results to project/name - exist_ok=False, # existing project/name ok, do not increment - half=False, # use FP16 half-precision inference - dnn=False, # use OpenCV DNN for ONNX inference - vid_stride=1, # video frame-rate stride -): - source = str(source) - save_img = not nosave and not source.endswith('.txt') # save inference images - is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) - is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) - webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) - if is_url and is_file: - source = check_file(source) # download - - # Directories - save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run - (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir - - # Load model - device = select_device(device) - model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) - stride, names, pt = model.stride, model.names, model.pt - imgsz = check_img_size(imgsz, s=stride) # check image size - - # Dataloader - if webcam: - view_img = check_imshow() - dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) - bs = len(dataset) # batch_size - else: - dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) - bs = 1 # batch_size - vid_path, vid_writer = [None] * bs, [None] * bs - - # Run inference - model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup - seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) - for path, im, im0s, vid_cap, s in dataset: - with dt[0]: - im = torch.Tensor(im).to(device) - im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 - if len(im.shape) == 3: - im = im[None] # expand for batch dim - - # Inference - with dt[1]: - results = model(im) - - # Post-process - with dt[2]: - pred = F.softmax(results, dim=1) # probabilities - - # Process predictions - for i, prob in enumerate(pred): # per image - seen += 1 - if webcam: # batch_size >= 1 - p, im0 = path[i], im0s[i].copy() - s += f'{i}: ' - else: - p, im0 = path, im0s.copy() - - p = Path(p) # to Path - save_path = str(save_dir / p.name) # im.jpg - s += '%gx%g ' % im.shape[2:] # print string - annotator = Annotator(im0, example=str(names), pil=True) - - # Print results - top5i = prob.argsort(0, descending=True)[:5].tolist() # top 5 indices - s += f"{', '.join(f'{names[j]} {prob[j]:.2f}' for j in top5i)}, " - - # Write results - if save_img or view_img: # Add bbox to image - text = '\n'.join(f'{prob[j]:.2f} {names[j]}' for j in top5i) - annotator.text((32, 32), text, txt_color=(255, 255, 255)) - - # Stream results - im0 = annotator.result() - if view_img: - if platform.system() == 'Linux' and p not in windows: - windows.append(p) - cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) - cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) - cv2.imshow(str(p), im0) - cv2.waitKey(1) # 1 millisecond - - # Save results (image with detections) - if save_img: - if dataset.mode == 'image': - cv2.imwrite(save_path, im0) - else: # 'video' or 'stream' - if vid_path[i] != save_path: # new video - vid_path[i] = save_path - if isinstance(vid_writer[i], cv2.VideoWriter): - vid_writer[i].release() # release previous video writer - if vid_cap: # video - fps = vid_cap.get(cv2.CAP_PROP_FPS) - w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) - h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - else: # stream - fps, w, h = 30, im0.shape[1], im0.shape[0] - save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos - vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) - vid_writer[i].write(im0) - - # Print time (inference-only) - LOGGER.info(f"{s}{dt[1].dt * 1E3:.1f}ms") - - # Print results - t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image - LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) - if save_txt or save_img: - s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' - LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") - if update: - strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning) - - -def parse_opt(): - parser = argparse.ArgumentParser() - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model path(s)') - parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam') - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') - parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[224], help='inference size h,w') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--view-img', action='store_true', help='show results') - parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') - parser.add_argument('--nosave', action='store_true', help='do not save images/videos') - parser.add_argument('--augment', action='store_true', help='augmented inference') - parser.add_argument('--visualize', action='store_true', help='visualize features') - parser.add_argument('--update', action='store_true', help='update all models') - parser.add_argument('--project', default=ROOT / 'runs/predict-cls', help='save results to project/name') - parser.add_argument('--name', default='exp', help='save results to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') - parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') - parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride') - opt = parser.parse_args() - opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand - print_args(vars(opt)) - return opt - - -def main(opt): - check_requirements(exclude=('tensorboard', 'thop')) - run(**vars(opt)) - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) diff --git a/src/yolov5_ros/src/yolov5/classify/train.py b/src/yolov5_ros/src/yolov5/classify/train.py deleted file mode 100644 index 2233672..0000000 --- a/src/yolov5_ros/src/yolov5/classify/train.py +++ /dev/null @@ -1,331 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Train a YOLOv5 classifier model on a classification dataset - -Usage - Single-GPU training: - $ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 128 - -Usage - Multi-GPU DDP training: - $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 - -Datasets: --data mnist, fashion-mnist, cifar10, cifar100, imagenette, imagewoof, imagenet, or 'path/to/data' -YOLOv5-cls models: --model yolov5n-cls.pt, yolov5s-cls.pt, yolov5m-cls.pt, yolov5l-cls.pt, yolov5x-cls.pt -Torchvision models: --model resnet50, efficientnet_b0, etc. See https://pytorch.org/vision/stable/models.html -""" - -import argparse -import os -import subprocess -import sys -import time -from copy import deepcopy -from datetime import datetime -from pathlib import Path - -import torch -import torch.distributed as dist -import torch.hub as hub -import torch.optim.lr_scheduler as lr_scheduler -import torchvision -from torch.cuda import amp -from tqdm import tqdm - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[1] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -from classify import val as validate -from models.experimental import attempt_load -from models.yolo import ClassificationModel, DetectionModel -from utils.dataloaders import create_classification_dataloader -from utils.general import (DATASETS_DIR, LOGGER, WorkingDirectory, check_git_status, check_requirements, colorstr, - download, increment_path, init_seeds, print_args, yaml_save) -from utils.loggers import GenericLogger -from utils.plots import imshow_cls -from utils.torch_utils import (ModelEMA, model_info, reshape_classifier_output, select_device, smart_DDP, - smart_optimizer, smartCrossEntropyLoss, torch_distributed_zero_first) - -LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html -RANK = int(os.getenv('RANK', -1)) -WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) - - -def train(opt, device): - init_seeds(opt.seed + 1 + RANK, deterministic=True) - save_dir, data, bs, epochs, nw, imgsz, pretrained = \ - opt.save_dir, Path(opt.data), opt.batch_size, opt.epochs, min(os.cpu_count() - 1, opt.workers), \ - opt.imgsz, str(opt.pretrained).lower() == 'true' - cuda = device.type != 'cpu' - - # Directories - wdir = save_dir / 'weights' - wdir.mkdir(parents=True, exist_ok=True) # make dir - last, best = wdir / 'last.pt', wdir / 'best.pt' - - # Save run settings - yaml_save(save_dir / 'opt.yaml', vars(opt)) - - # Logger - logger = GenericLogger(opt=opt, console_logger=LOGGER) if RANK in {-1, 0} else None - - # Download Dataset - with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT): - data_dir = data if data.is_dir() else (DATASETS_DIR / data) - if not data_dir.is_dir(): - LOGGER.info(f'\nDataset not found ⚠️, missing path {data_dir}, attempting download...') - t = time.time() - if str(data) == 'imagenet': - subprocess.run(f"bash {ROOT / 'data/scripts/get_imagenet.sh'}", shell=True, check=True) - else: - url = f'https://github.com/ultralytics/yolov5/releases/download/v1.0/{data}.zip' - download(url, dir=data_dir.parent) - s = f"Dataset download success ✅ ({time.time() - t:.1f}s), saved to {colorstr('bold', data_dir)}\n" - LOGGER.info(s) - - # Dataloaders - nc = len([x for x in (data_dir / 'train').glob('*') if x.is_dir()]) # number of classes - trainloader = create_classification_dataloader(path=data_dir / 'train', - imgsz=imgsz, - batch_size=bs // WORLD_SIZE, - augment=True, - cache=opt.cache, - rank=LOCAL_RANK, - workers=nw) - - test_dir = data_dir / 'test' if (data_dir / 'test').exists() else data_dir / 'val' # data/test or data/val - if RANK in {-1, 0}: - testloader = create_classification_dataloader(path=test_dir, - imgsz=imgsz, - batch_size=bs // WORLD_SIZE * 2, - augment=False, - cache=opt.cache, - rank=-1, - workers=nw) - - # Model - with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT): - if Path(opt.model).is_file() or opt.model.endswith('.pt'): - model = attempt_load(opt.model, device='cpu', fuse=False) - elif opt.model in torchvision.models.__dict__: # TorchVision models i.e. resnet50, efficientnet_b0 - model = torchvision.models.__dict__[opt.model](weights='IMAGENET1K_V1' if pretrained else None) - else: - m = hub.list('ultralytics/yolov5') # + hub.list('pytorch/vision') # models - raise ModuleNotFoundError(f'--model {opt.model} not found. Available models are: \n' + '\n'.join(m)) - if isinstance(model, DetectionModel): - LOGGER.warning("WARNING: pass YOLOv5 classifier model with '-cls' suffix, i.e. '--model yolov5s-cls.pt'") - model = ClassificationModel(model=model, nc=nc, cutoff=opt.cutoff or 10) # convert to classification model - reshape_classifier_output(model, nc) # update class count - for m in model.modules(): - if not pretrained and hasattr(m, 'reset_parameters'): - m.reset_parameters() - if isinstance(m, torch.nn.Dropout) and opt.dropout is not None: - m.p = opt.dropout # set dropout - for p in model.parameters(): - p.requires_grad = True # for training - model = model.to(device) - - # Info - if RANK in {-1, 0}: - model.names = trainloader.dataset.classes # attach class names - model.transforms = testloader.dataset.torch_transforms # attach inference transforms - model_info(model) - if opt.verbose: - LOGGER.info(model) - images, labels = next(iter(trainloader)) - file = imshow_cls(images[:25], labels[:25], names=model.names, f=save_dir / 'train_images.jpg') - logger.log_images(file, name='Train Examples') - logger.log_graph(model, imgsz) # log model - - # Optimizer - optimizer = smart_optimizer(model, opt.optimizer, opt.lr0, momentum=0.9, decay=opt.decay) - - # Scheduler - lrf = 0.01 # final lr (fraction of lr0) - # lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - lrf) + lrf # cosine - lf = lambda x: (1 - x / epochs) * (1 - lrf) + lrf # linear - scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) - # scheduler = lr_scheduler.OneCycleLR(optimizer, max_lr=lr0, total_steps=epochs, pct_start=0.1, - # final_div_factor=1 / 25 / lrf) - - # EMA - ema = ModelEMA(model) if RANK in {-1, 0} else None - - # DDP mode - if cuda and RANK != -1: - model = smart_DDP(model) - - # Train - t0 = time.time() - criterion = smartCrossEntropyLoss(label_smoothing=opt.label_smoothing) # loss function - best_fitness = 0.0 - scaler = amp.GradScaler(enabled=cuda) - val = test_dir.stem # 'val' or 'test' - LOGGER.info(f'Image sizes {imgsz} train, {imgsz} test\n' - f'Using {nw * WORLD_SIZE} dataloader workers\n' - f"Logging results to {colorstr('bold', save_dir)}\n" - f'Starting {opt.model} training on {data} dataset with {nc} classes for {epochs} epochs...\n\n' - f"{'Epoch':>10}{'GPU_mem':>10}{'train_loss':>12}{f'{val}_loss':>12}{'top1_acc':>12}{'top5_acc':>12}") - for epoch in range(epochs): # loop over the dataset multiple times - tloss, vloss, fitness = 0.0, 0.0, 0.0 # train loss, val loss, fitness - model.train() - if RANK != -1: - trainloader.sampler.set_epoch(epoch) - pbar = enumerate(trainloader) - if RANK in {-1, 0}: - pbar = tqdm(enumerate(trainloader), total=len(trainloader), bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') - for i, (images, labels) in pbar: # progress bar - images, labels = images.to(device, non_blocking=True), labels.to(device) - - # Forward - with amp.autocast(enabled=cuda): # stability issues when enabled - loss = criterion(model(images), labels) - - # Backward - scaler.scale(loss).backward() - - # Optimize - scaler.unscale_(optimizer) # unscale gradients - torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients - scaler.step(optimizer) - scaler.update() - optimizer.zero_grad() - if ema: - ema.update(model) - - if RANK in {-1, 0}: - # Print - tloss = (tloss * i + loss.item()) / (i + 1) # update mean losses - mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB) - pbar.desc = f"{f'{epoch + 1}/{epochs}':>10}{mem:>10}{tloss:>12.3g}" + ' ' * 36 - - # Test - if i == len(pbar) - 1: # last batch - top1, top5, vloss = validate.run(model=ema.ema, - dataloader=testloader, - criterion=criterion, - pbar=pbar) # test accuracy, loss - fitness = top1 # define fitness as top1 accuracy - - # Scheduler - scheduler.step() - - # Log metrics - if RANK in {-1, 0}: - # Best fitness - if fitness > best_fitness: - best_fitness = fitness - - # Log - metrics = { - "train/loss": tloss, - f"{val}/loss": vloss, - "metrics/accuracy_top1": top1, - "metrics/accuracy_top5": top5, - "lr/0": optimizer.param_groups[0]['lr']} # learning rate - logger.log_metrics(metrics, epoch) - - # Save model - final_epoch = epoch + 1 == epochs - if (not opt.nosave) or final_epoch: - ckpt = { - 'epoch': epoch, - 'best_fitness': best_fitness, - 'model': deepcopy(ema.ema).half(), # deepcopy(de_parallel(model)).half(), - 'ema': None, # deepcopy(ema.ema).half(), - 'updates': ema.updates, - 'optimizer': None, # optimizer.state_dict(), - 'opt': vars(opt), - 'date': datetime.now().isoformat()} - - # Save last, best and delete - torch.save(ckpt, last) - if best_fitness == fitness: - torch.save(ckpt, best) - del ckpt - - # Train complete - if RANK in {-1, 0} and final_epoch: - LOGGER.info(f'\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)' - f"\nResults saved to {colorstr('bold', save_dir)}" - f"\nPredict: python classify/predict.py --weights {best} --source im.jpg" - f"\nValidate: python classify/val.py --weights {best} --data {data_dir}" - f"\nExport: python export.py --weights {best} --include onnx" - f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{best}')" - f"\nVisualize: https://netron.app\n") - - # Plot examples - images, labels = (x[:25] for x in next(iter(testloader))) # first 25 images and labels - pred = torch.max(ema.ema(images.to(device)), 1)[1] - file = imshow_cls(images, labels, pred, model.names, verbose=False, f=save_dir / 'test_images.jpg') - - # Log results - meta = {"epochs": epochs, "top1_acc": best_fitness, "date": datetime.now().isoformat()} - logger.log_images(file, name='Test Examples (true-predicted)', epoch=epoch) - logger.log_model(best, epochs, metadata=meta) - - -def parse_opt(known=False): - parser = argparse.ArgumentParser() - parser.add_argument('--model', type=str, default='yolov5s-cls.pt', help='initial weights path') - parser.add_argument('--data', type=str, default='imagenette160', help='cifar10, cifar100, mnist, imagenet, ...') - parser.add_argument('--epochs', type=int, default=10, help='total training epochs') - parser.add_argument('--batch-size', type=int, default=64, help='total batch size for all GPUs') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=128, help='train, val image size (pixels)') - parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') - parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') - parser.add_argument('--project', default=ROOT / 'runs/train-cls', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--pretrained', nargs='?', const=True, default=True, help='start from i.e. --pretrained False') - parser.add_argument('--optimizer', choices=['SGD', 'Adam', 'AdamW', 'RMSProp'], default='Adam', help='optimizer') - parser.add_argument('--lr0', type=float, default=0.001, help='initial learning rate') - parser.add_argument('--decay', type=float, default=5e-5, help='weight decay') - parser.add_argument('--label-smoothing', type=float, default=0.1, help='Label smoothing epsilon') - parser.add_argument('--cutoff', type=int, default=None, help='Model layer cutoff index for Classify() head') - parser.add_argument('--dropout', type=float, default=None, help='Dropout (fraction)') - parser.add_argument('--verbose', action='store_true', help='Verbose mode') - parser.add_argument('--seed', type=int, default=0, help='Global training seed') - parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') - return parser.parse_known_args()[0] if known else parser.parse_args() - - -def main(opt): - # Checks - if RANK in {-1, 0}: - print_args(vars(opt)) - check_git_status() - check_requirements() - - # DDP mode - device = select_device(opt.device, batch_size=opt.batch_size) - if LOCAL_RANK != -1: - assert opt.batch_size != -1, 'AutoBatch is coming soon for classification, please pass a valid --batch-size' - assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE' - assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' - torch.cuda.set_device(LOCAL_RANK) - device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") - - # Parameters - opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run - - # Train - train(opt, device) - - -def run(**kwargs): - # Usage: from yolov5 import classify; classify.train.run(data=mnist, imgsz=320, model='yolov5m') - opt = parse_opt(True) - for k, v in kwargs.items(): - setattr(opt, k, v) - main(opt) - return opt - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) diff --git a/src/yolov5_ros/src/yolov5/classify/val.py b/src/yolov5_ros/src/yolov5/classify/val.py deleted file mode 100644 index 3c16ec8..0000000 --- a/src/yolov5_ros/src/yolov5/classify/val.py +++ /dev/null @@ -1,169 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Validate a trained YOLOv5 classification model on a classification dataset - -Usage: - $ bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) - $ python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate ImageNet - -Usage - formats: - $ python classify/val.py --weights yolov5s-cls.pt # PyTorch - yolov5s-cls.torchscript # TorchScript - yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s-cls.xml # OpenVINO - yolov5s-cls.engine # TensorRT - yolov5s-cls.mlmodel # CoreML (macOS-only) - yolov5s-cls_saved_model # TensorFlow SavedModel - yolov5s-cls.pb # TensorFlow GraphDef - yolov5s-cls.tflite # TensorFlow Lite - yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU - yolov5s-cls_paddle_model # PaddlePaddle -""" - -import argparse -import os -import sys -from pathlib import Path - -import torch -from tqdm import tqdm - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[1] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -from models.common import DetectMultiBackend -from utils.dataloaders import create_classification_dataloader -from utils.general import LOGGER, Profile, check_img_size, check_requirements, colorstr, increment_path, print_args -from utils.torch_utils import select_device, smart_inference_mode - - -@smart_inference_mode() -def run( - data=ROOT / '../datasets/mnist', # dataset dir - weights=ROOT / 'yolov5s-cls.pt', # model.pt path(s) - batch_size=128, # batch size - imgsz=224, # inference size (pixels) - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - workers=8, # max dataloader workers (per RANK in DDP mode) - verbose=False, # verbose output - project=ROOT / 'runs/val-cls', # save to project/name - name='exp', # save to project/name - exist_ok=False, # existing project/name ok, do not increment - half=False, # use FP16 half-precision inference - dnn=False, # use OpenCV DNN for ONNX inference - model=None, - dataloader=None, - criterion=None, - pbar=None, -): - # Initialize/load model and set device - training = model is not None - if training: # called by train.py - device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model - half &= device.type != 'cpu' # half precision only supported on CUDA - model.half() if half else model.float() - else: # called directly - device = select_device(device, batch_size=batch_size) - - # Directories - save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run - save_dir.mkdir(parents=True, exist_ok=True) # make dir - - # Load model - model = DetectMultiBackend(weights, device=device, dnn=dnn, fp16=half) - stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine - imgsz = check_img_size(imgsz, s=stride) # check image size - half = model.fp16 # FP16 supported on limited backends with CUDA - if engine: - batch_size = model.batch_size - else: - device = model.device - if not (pt or jit): - batch_size = 1 # export.py models default to batch-size 1 - LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') - - # Dataloader - data = Path(data) - test_dir = data / 'test' if (data / 'test').exists() else data / 'val' # data/test or data/val - dataloader = create_classification_dataloader(path=test_dir, - imgsz=imgsz, - batch_size=batch_size, - augment=False, - rank=-1, - workers=workers) - - model.eval() - pred, targets, loss, dt = [], [], 0, (Profile(), Profile(), Profile()) - n = len(dataloader) # number of batches - action = 'validating' if dataloader.dataset.root.stem == 'val' else 'testing' - desc = f"{pbar.desc[:-36]}{action:>36}" if pbar else f"{action}" - bar = tqdm(dataloader, desc, n, not training, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}', position=0) - with torch.cuda.amp.autocast(enabled=device.type != 'cpu'): - for images, labels in bar: - with dt[0]: - images, labels = images.to(device, non_blocking=True), labels.to(device) - - with dt[1]: - y = model(images) - - with dt[2]: - pred.append(y.argsort(1, descending=True)[:, :5]) - targets.append(labels) - if criterion: - loss += criterion(y, labels) - - loss /= n - pred, targets = torch.cat(pred), torch.cat(targets) - correct = (targets[:, None] == pred).float() - acc = torch.stack((correct[:, 0], correct.max(1).values), dim=1) # (top1, top5) accuracy - top1, top5 = acc.mean(0).tolist() - - if pbar: - pbar.desc = f"{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}" - if verbose: # all classes - LOGGER.info(f"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}") - LOGGER.info(f"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}") - for i, c in model.names.items(): - aci = acc[targets == i] - top1i, top5i = aci.mean(0).tolist() - LOGGER.info(f"{c:>24}{aci.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}") - - # Print results - t = tuple(x.t / len(dataloader.dataset.samples) * 1E3 for x in dt) # speeds per image - shape = (1, 3, imgsz, imgsz) - LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t) - LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") - - return top1, top5, loss - - -def parse_opt(): - parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default=ROOT / '../datasets/mnist', help='dataset path') - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model.pt path(s)') - parser.add_argument('--batch-size', type=int, default=128, help='batch size') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='inference size (pixels)') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') - parser.add_argument('--verbose', nargs='?', const=True, default=True, help='verbose output') - parser.add_argument('--project', default=ROOT / 'runs/val-cls', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') - parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') - opt = parser.parse_args() - print_args(vars(opt)) - return opt - - -def main(opt): - check_requirements(exclude=('tensorboard', 'thop')) - run(**vars(opt)) - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) diff --git a/src/yolov5_ros/src/yolov5/data/Argoverse.yaml b/src/yolov5_ros/src/yolov5/data/Argoverse.yaml deleted file mode 100644 index e3e9ba1..0000000 --- a/src/yolov5_ros/src/yolov5/data/Argoverse.yaml +++ /dev/null @@ -1,74 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI -# Example usage: python train.py --data Argoverse.yaml -# parent -# ├── yolov5 -# └── datasets -# └── Argoverse ← downloads here (31.3 GB) - - -# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/Argoverse # dataset root dir -train: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images -val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images -test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview - -# Classes -names: - 0: person - 1: bicycle - 2: car - 3: motorcycle - 4: bus - 5: truck - 6: traffic_light - 7: stop_sign - - -# Download script/URL (optional) --------------------------------------------------------------------------------------- -download: | - import json - - from tqdm import tqdm - from utils.general import download, Path - - - def argoverse2yolo(set): - labels = {} - a = json.load(open(set, "rb")) - for annot in tqdm(a['annotations'], desc=f"Converting {set} to YOLOv5 format..."): - img_id = annot['image_id'] - img_name = a['images'][img_id]['name'] - img_label_name = f'{img_name[:-3]}txt' - - cls = annot['category_id'] # instance class id - x_center, y_center, width, height = annot['bbox'] - x_center = (x_center + width / 2) / 1920.0 # offset and scale - y_center = (y_center + height / 2) / 1200.0 # offset and scale - width /= 1920.0 # scale - height /= 1200.0 # scale - - img_dir = set.parents[2] / 'Argoverse-1.1' / 'labels' / a['seq_dirs'][a['images'][annot['image_id']]['sid']] - if not img_dir.exists(): - img_dir.mkdir(parents=True, exist_ok=True) - - k = str(img_dir / img_label_name) - if k not in labels: - labels[k] = [] - labels[k].append(f"{cls} {x_center} {y_center} {width} {height}\n") - - for k in labels: - with open(k, "w") as f: - f.writelines(labels[k]) - - - # Download - dir = Path('../datasets/Argoverse') # dataset root dir - urls = ['https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip'] - download(urls, dir=dir, delete=False) - - # Convert - annotations_dir = 'Argoverse-HD/annotations/' - (dir / 'Argoverse-1.1' / 'tracking').rename(dir / 'Argoverse-1.1' / 'images') # rename 'tracking' to 'images' - for d in "train.json", "val.json": - argoverse2yolo(dir / annotations_dir / d) # convert VisDrone annotations to YOLO labels diff --git a/src/yolov5_ros/src/yolov5/data/GlobalWheat2020.yaml b/src/yolov5_ros/src/yolov5/data/GlobalWheat2020.yaml deleted file mode 100644 index 01812d0..0000000 --- a/src/yolov5_ros/src/yolov5/data/GlobalWheat2020.yaml +++ /dev/null @@ -1,54 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan -# Example usage: python train.py --data GlobalWheat2020.yaml -# parent -# ├── yolov5 -# └── datasets -# └── GlobalWheat2020 ← downloads here (7.0 GB) - - -# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/GlobalWheat2020 # dataset root dir -train: # train images (relative to 'path') 3422 images - - images/arvalis_1 - - images/arvalis_2 - - images/arvalis_3 - - images/ethz_1 - - images/rres_1 - - images/inrae_1 - - images/usask_1 -val: # val images (relative to 'path') 748 images (WARNING: train set contains ethz_1) - - images/ethz_1 -test: # test images (optional) 1276 images - - images/utokyo_1 - - images/utokyo_2 - - images/nau_1 - - images/uq_1 - -# Classes -names: - 0: wheat_head - - -# Download script/URL (optional) --------------------------------------------------------------------------------------- -download: | - from utils.general import download, Path - - - # Download - dir = Path(yaml['path']) # dataset root dir - urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip', - 'https://github.com/ultralytics/yolov5/releases/download/v1.0/GlobalWheat2020_labels.zip'] - download(urls, dir=dir) - - # Make Directories - for p in 'annotations', 'images', 'labels': - (dir / p).mkdir(parents=True, exist_ok=True) - - # Move - for p in 'arvalis_1', 'arvalis_2', 'arvalis_3', 'ethz_1', 'rres_1', 'inrae_1', 'usask_1', \ - 'utokyo_1', 'utokyo_2', 'nau_1', 'uq_1': - (dir / p).rename(dir / 'images' / p) # move to /images - f = (dir / p).with_suffix('.json') # json file - if f.exists(): - f.rename((dir / 'annotations' / p).with_suffix('.json')) # move to /annotations diff --git a/src/yolov5_ros/src/yolov5/data/ImageNet.yaml b/src/yolov5_ros/src/yolov5/data/ImageNet.yaml deleted file mode 100644 index 14f1295..0000000 --- a/src/yolov5_ros/src/yolov5/data/ImageNet.yaml +++ /dev/null @@ -1,1022 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University -# Simplified class names from https://github.com/anishathalye/imagenet-simple-labels -# Example usage: python classify/train.py --data imagenet -# parent -# ├── yolov5 -# └── datasets -# └── imagenet ← downloads here (144 GB) - - -# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/imagenet # dataset root dir -train: train # train images (relative to 'path') 1281167 images -val: val # val images (relative to 'path') 50000 images -test: # test images (optional) - -# Classes -names: - 0: tench - 1: goldfish - 2: great white shark - 3: tiger shark - 4: hammerhead shark - 5: electric ray - 6: stingray - 7: cock - 8: hen - 9: ostrich - 10: brambling - 11: goldfinch - 12: house finch - 13: junco - 14: indigo bunting - 15: American robin - 16: bulbul - 17: jay - 18: magpie - 19: chickadee - 20: American dipper - 21: kite - 22: bald eagle - 23: vulture - 24: great grey owl - 25: fire salamander - 26: smooth newt - 27: newt - 28: spotted salamander - 29: axolotl - 30: American bullfrog - 31: tree frog - 32: tailed frog - 33: loggerhead sea turtle - 34: leatherback sea turtle - 35: mud turtle - 36: terrapin - 37: box turtle - 38: banded gecko - 39: green iguana - 40: Carolina anole - 41: desert grassland whiptail lizard - 42: agama - 43: frilled-necked lizard - 44: alligator lizard - 45: Gila monster - 46: European green lizard - 47: chameleon - 48: Komodo dragon - 49: Nile crocodile - 50: American alligator - 51: triceratops - 52: worm snake - 53: ring-necked snake - 54: eastern hog-nosed snake - 55: smooth green snake - 56: kingsnake - 57: garter snake - 58: water snake - 59: vine snake - 60: night snake - 61: boa constrictor - 62: African rock python - 63: Indian cobra - 64: green mamba - 65: sea snake - 66: Saharan horned viper - 67: eastern diamondback rattlesnake - 68: sidewinder - 69: trilobite - 70: harvestman - 71: scorpion - 72: yellow garden spider - 73: barn spider - 74: European garden spider - 75: southern black widow - 76: tarantula - 77: wolf spider - 78: tick - 79: centipede - 80: black grouse - 81: ptarmigan - 82: ruffed grouse - 83: prairie grouse - 84: peacock - 85: quail - 86: partridge - 87: grey parrot - 88: macaw - 89: sulphur-crested cockatoo - 90: lorikeet - 91: coucal - 92: bee eater - 93: hornbill - 94: hummingbird - 95: jacamar - 96: toucan - 97: duck - 98: red-breasted merganser - 99: goose - 100: black swan - 101: tusker - 102: echidna - 103: platypus - 104: wallaby - 105: koala - 106: wombat - 107: jellyfish - 108: sea anemone - 109: brain coral - 110: flatworm - 111: nematode - 112: conch - 113: snail - 114: slug - 115: sea slug - 116: chiton - 117: chambered nautilus - 118: Dungeness crab - 119: rock crab - 120: fiddler crab - 121: red king crab - 122: American lobster - 123: spiny lobster - 124: crayfish - 125: hermit crab - 126: isopod - 127: white stork - 128: black stork - 129: spoonbill - 130: flamingo - 131: little blue heron - 132: great egret - 133: bittern - 134: crane (bird) - 135: limpkin - 136: common gallinule - 137: American coot - 138: bustard - 139: ruddy turnstone - 140: dunlin - 141: common redshank - 142: dowitcher - 143: oystercatcher - 144: pelican - 145: king penguin - 146: albatross - 147: grey whale - 148: killer whale - 149: dugong - 150: sea lion - 151: Chihuahua - 152: Japanese Chin - 153: Maltese - 154: Pekingese - 155: Shih Tzu - 156: King Charles Spaniel - 157: Papillon - 158: toy terrier - 159: Rhodesian Ridgeback - 160: Afghan Hound - 161: Basset Hound - 162: Beagle - 163: Bloodhound - 164: Bluetick Coonhound - 165: Black and Tan Coonhound - 166: Treeing Walker Coonhound - 167: English foxhound - 168: Redbone Coonhound - 169: borzoi - 170: Irish Wolfhound - 171: Italian Greyhound - 172: Whippet - 173: Ibizan Hound - 174: Norwegian Elkhound - 175: Otterhound - 176: Saluki - 177: Scottish Deerhound - 178: Weimaraner - 179: Staffordshire Bull Terrier - 180: American Staffordshire Terrier - 181: Bedlington Terrier - 182: Border Terrier - 183: Kerry Blue Terrier - 184: Irish Terrier - 185: Norfolk Terrier - 186: Norwich Terrier - 187: Yorkshire Terrier - 188: Wire Fox Terrier - 189: Lakeland Terrier - 190: Sealyham Terrier - 191: Airedale Terrier - 192: Cairn Terrier - 193: Australian Terrier - 194: Dandie Dinmont Terrier - 195: Boston Terrier - 196: Miniature Schnauzer - 197: Giant Schnauzer - 198: Standard Schnauzer - 199: Scottish Terrier - 200: Tibetan Terrier - 201: Australian Silky Terrier - 202: Soft-coated Wheaten Terrier - 203: West Highland White Terrier - 204: Lhasa Apso - 205: Flat-Coated Retriever - 206: Curly-coated Retriever - 207: Golden Retriever - 208: Labrador Retriever - 209: Chesapeake Bay Retriever - 210: German Shorthaired Pointer - 211: Vizsla - 212: English Setter - 213: Irish Setter - 214: Gordon Setter - 215: Brittany - 216: Clumber Spaniel - 217: English Springer Spaniel - 218: Welsh Springer Spaniel - 219: Cocker Spaniels - 220: Sussex Spaniel - 221: Irish Water Spaniel - 222: Kuvasz - 223: Schipperke - 224: Groenendael - 225: Malinois - 226: Briard - 227: Australian Kelpie - 228: Komondor - 229: Old English Sheepdog - 230: Shetland Sheepdog - 231: collie - 232: Border Collie - 233: Bouvier des Flandres - 234: Rottweiler - 235: German Shepherd Dog - 236: Dobermann - 237: Miniature Pinscher - 238: Greater Swiss Mountain Dog - 239: Bernese Mountain Dog - 240: Appenzeller Sennenhund - 241: Entlebucher Sennenhund - 242: Boxer - 243: Bullmastiff - 244: Tibetan Mastiff - 245: French Bulldog - 246: Great Dane - 247: St. Bernard - 248: husky - 249: Alaskan Malamute - 250: Siberian Husky - 251: Dalmatian - 252: Affenpinscher - 253: Basenji - 254: pug - 255: Leonberger - 256: Newfoundland - 257: Pyrenean Mountain Dog - 258: Samoyed - 259: Pomeranian - 260: Chow Chow - 261: Keeshond - 262: Griffon Bruxellois - 263: Pembroke Welsh Corgi - 264: Cardigan Welsh Corgi - 265: Toy Poodle - 266: Miniature Poodle - 267: Standard Poodle - 268: Mexican hairless dog - 269: grey wolf - 270: Alaskan tundra wolf - 271: red wolf - 272: coyote - 273: dingo - 274: dhole - 275: African wild dog - 276: hyena - 277: red fox - 278: kit fox - 279: Arctic fox - 280: grey fox - 281: tabby cat - 282: tiger cat - 283: Persian cat - 284: Siamese cat - 285: Egyptian Mau - 286: cougar - 287: lynx - 288: leopard - 289: snow leopard - 290: jaguar - 291: lion - 292: tiger - 293: cheetah - 294: brown bear - 295: American black bear - 296: polar bear - 297: sloth bear - 298: mongoose - 299: meerkat - 300: tiger beetle - 301: ladybug - 302: ground beetle - 303: longhorn beetle - 304: leaf beetle - 305: dung beetle - 306: rhinoceros beetle - 307: weevil - 308: fly - 309: bee - 310: ant - 311: grasshopper - 312: cricket - 313: stick insect - 314: cockroach - 315: mantis - 316: cicada - 317: leafhopper - 318: lacewing - 319: dragonfly - 320: damselfly - 321: red admiral - 322: ringlet - 323: monarch butterfly - 324: small white - 325: sulphur butterfly - 326: gossamer-winged butterfly - 327: starfish - 328: sea urchin - 329: sea cucumber - 330: cottontail rabbit - 331: hare - 332: Angora rabbit - 333: hamster - 334: porcupine - 335: fox squirrel - 336: marmot - 337: beaver - 338: guinea pig - 339: common sorrel - 340: zebra - 341: pig - 342: wild boar - 343: warthog - 344: hippopotamus - 345: ox - 346: water buffalo - 347: bison - 348: ram - 349: bighorn sheep - 350: Alpine ibex - 351: hartebeest - 352: impala - 353: gazelle - 354: dromedary - 355: llama - 356: weasel - 357: mink - 358: European polecat - 359: black-footed ferret - 360: otter - 361: skunk - 362: badger - 363: armadillo - 364: three-toed sloth - 365: orangutan - 366: gorilla - 367: chimpanzee - 368: gibbon - 369: siamang - 370: guenon - 371: patas monkey - 372: baboon - 373: macaque - 374: langur - 375: black-and-white colobus - 376: proboscis monkey - 377: marmoset - 378: white-headed capuchin - 379: howler monkey - 380: titi - 381: Geoffroy's spider monkey - 382: common squirrel monkey - 383: ring-tailed lemur - 384: indri - 385: Asian elephant - 386: African bush elephant - 387: red panda - 388: giant panda - 389: snoek - 390: eel - 391: coho salmon - 392: rock beauty - 393: clownfish - 394: sturgeon - 395: garfish - 396: lionfish - 397: pufferfish - 398: abacus - 399: abaya - 400: academic gown - 401: accordion - 402: acoustic guitar - 403: aircraft carrier - 404: airliner - 405: airship - 406: altar - 407: ambulance - 408: amphibious vehicle - 409: analog clock - 410: apiary - 411: apron - 412: waste container - 413: assault rifle - 414: backpack - 415: bakery - 416: balance beam - 417: balloon - 418: ballpoint pen - 419: Band-Aid - 420: banjo - 421: baluster - 422: barbell - 423: barber chair - 424: barbershop - 425: barn - 426: barometer - 427: barrel - 428: wheelbarrow - 429: baseball - 430: basketball - 431: bassinet - 432: bassoon - 433: swimming cap - 434: bath towel - 435: bathtub - 436: station wagon - 437: lighthouse - 438: beaker - 439: military cap - 440: beer bottle - 441: beer glass - 442: bell-cot - 443: bib - 444: tandem bicycle - 445: bikini - 446: ring binder - 447: binoculars - 448: birdhouse - 449: boathouse - 450: bobsleigh - 451: bolo tie - 452: poke bonnet - 453: bookcase - 454: bookstore - 455: bottle cap - 456: bow - 457: bow tie - 458: brass - 459: bra - 460: breakwater - 461: breastplate - 462: broom - 463: bucket - 464: buckle - 465: bulletproof vest - 466: high-speed train - 467: butcher shop - 468: taxicab - 469: cauldron - 470: candle - 471: cannon - 472: canoe - 473: can opener - 474: cardigan - 475: car mirror - 476: carousel - 477: tool kit - 478: carton - 479: car wheel - 480: automated teller machine - 481: cassette - 482: cassette player - 483: castle - 484: catamaran - 485: CD player - 486: cello - 487: mobile phone - 488: chain - 489: chain-link fence - 490: chain mail - 491: chainsaw - 492: chest - 493: chiffonier - 494: chime - 495: china cabinet - 496: Christmas stocking - 497: church - 498: movie theater - 499: cleaver - 500: cliff dwelling - 501: cloak - 502: clogs - 503: cocktail shaker - 504: coffee mug - 505: coffeemaker - 506: coil - 507: combination lock - 508: computer keyboard - 509: confectionery store - 510: container ship - 511: convertible - 512: corkscrew - 513: cornet - 514: cowboy boot - 515: cowboy hat - 516: cradle - 517: crane (machine) - 518: crash helmet - 519: crate - 520: infant bed - 521: Crock Pot - 522: croquet ball - 523: crutch - 524: cuirass - 525: dam - 526: desk - 527: desktop computer - 528: rotary dial telephone - 529: diaper - 530: digital clock - 531: digital watch - 532: dining table - 533: dishcloth - 534: dishwasher - 535: disc brake - 536: dock - 537: dog sled - 538: dome - 539: doormat - 540: drilling rig - 541: drum - 542: drumstick - 543: dumbbell - 544: Dutch oven - 545: electric fan - 546: electric guitar - 547: electric locomotive - 548: entertainment center - 549: envelope - 550: espresso machine - 551: face powder - 552: feather boa - 553: filing cabinet - 554: fireboat - 555: fire engine - 556: fire screen sheet - 557: flagpole - 558: flute - 559: folding chair - 560: football helmet - 561: forklift - 562: fountain - 563: fountain pen - 564: four-poster bed - 565: freight car - 566: French horn - 567: frying pan - 568: fur coat - 569: garbage truck - 570: gas mask - 571: gas pump - 572: goblet - 573: go-kart - 574: golf ball - 575: golf cart - 576: gondola - 577: gong - 578: gown - 579: grand piano - 580: greenhouse - 581: grille - 582: grocery store - 583: guillotine - 584: barrette - 585: hair spray - 586: half-track - 587: hammer - 588: hamper - 589: hair dryer - 590: hand-held computer - 591: handkerchief - 592: hard disk drive - 593: harmonica - 594: harp - 595: harvester - 596: hatchet - 597: holster - 598: home theater - 599: honeycomb - 600: hook - 601: hoop skirt - 602: horizontal bar - 603: horse-drawn vehicle - 604: hourglass - 605: iPod - 606: clothes iron - 607: jack-o'-lantern - 608: jeans - 609: jeep - 610: T-shirt - 611: jigsaw puzzle - 612: pulled rickshaw - 613: joystick - 614: kimono - 615: knee pad - 616: knot - 617: lab coat - 618: ladle - 619: lampshade - 620: laptop computer - 621: lawn mower - 622: lens cap - 623: paper knife - 624: library - 625: lifeboat - 626: lighter - 627: limousine - 628: ocean liner - 629: lipstick - 630: slip-on shoe - 631: lotion - 632: speaker - 633: loupe - 634: sawmill - 635: magnetic compass - 636: mail bag - 637: mailbox - 638: tights - 639: tank suit - 640: manhole cover - 641: maraca - 642: marimba - 643: mask - 644: match - 645: maypole - 646: maze - 647: measuring cup - 648: medicine chest - 649: megalith - 650: microphone - 651: microwave oven - 652: military uniform - 653: milk can - 654: minibus - 655: miniskirt - 656: minivan - 657: missile - 658: mitten - 659: mixing bowl - 660: mobile home - 661: Model T - 662: modem - 663: monastery - 664: monitor - 665: moped - 666: mortar - 667: square academic cap - 668: mosque - 669: mosquito net - 670: scooter - 671: mountain bike - 672: tent - 673: computer mouse - 674: mousetrap - 675: moving van - 676: muzzle - 677: nail - 678: neck brace - 679: necklace - 680: nipple - 681: notebook computer - 682: obelisk - 683: oboe - 684: ocarina - 685: odometer - 686: oil filter - 687: organ - 688: oscilloscope - 689: overskirt - 690: bullock cart - 691: oxygen mask - 692: packet - 693: paddle - 694: paddle wheel - 695: padlock - 696: paintbrush - 697: pajamas - 698: palace - 699: pan flute - 700: paper towel - 701: parachute - 702: parallel bars - 703: park bench - 704: parking meter - 705: passenger car - 706: patio - 707: payphone - 708: pedestal - 709: pencil case - 710: pencil sharpener - 711: perfume - 712: Petri dish - 713: photocopier - 714: plectrum - 715: Pickelhaube - 716: picket fence - 717: pickup truck - 718: pier - 719: piggy bank - 720: pill bottle - 721: pillow - 722: ping-pong ball - 723: pinwheel - 724: pirate ship - 725: pitcher - 726: hand plane - 727: planetarium - 728: plastic bag - 729: plate rack - 730: plow - 731: plunger - 732: Polaroid camera - 733: pole - 734: police van - 735: poncho - 736: billiard table - 737: soda bottle - 738: pot - 739: potter's wheel - 740: power drill - 741: prayer rug - 742: printer - 743: prison - 744: projectile - 745: projector - 746: hockey puck - 747: punching bag - 748: purse - 749: quill - 750: quilt - 751: race car - 752: racket - 753: radiator - 754: radio - 755: radio telescope - 756: rain barrel - 757: recreational vehicle - 758: reel - 759: reflex camera - 760: refrigerator - 761: remote control - 762: restaurant - 763: revolver - 764: rifle - 765: rocking chair - 766: rotisserie - 767: eraser - 768: rugby ball - 769: ruler - 770: running shoe - 771: safe - 772: safety pin - 773: salt shaker - 774: sandal - 775: sarong - 776: saxophone - 777: scabbard - 778: weighing scale - 779: school bus - 780: schooner - 781: scoreboard - 782: CRT screen - 783: screw - 784: screwdriver - 785: seat belt - 786: sewing machine - 787: shield - 788: shoe store - 789: shoji - 790: shopping basket - 791: shopping cart - 792: shovel - 793: shower cap - 794: shower curtain - 795: ski - 796: ski mask - 797: sleeping bag - 798: slide rule - 799: sliding door - 800: slot machine - 801: snorkel - 802: snowmobile - 803: snowplow - 804: soap dispenser - 805: soccer ball - 806: sock - 807: solar thermal collector - 808: sombrero - 809: soup bowl - 810: space bar - 811: space heater - 812: space shuttle - 813: spatula - 814: motorboat - 815: spider web - 816: spindle - 817: sports car - 818: spotlight - 819: stage - 820: steam locomotive - 821: through arch bridge - 822: steel drum - 823: stethoscope - 824: scarf - 825: stone wall - 826: stopwatch - 827: stove - 828: strainer - 829: tram - 830: stretcher - 831: couch - 832: stupa - 833: submarine - 834: suit - 835: sundial - 836: sunglass - 837: sunglasses - 838: sunscreen - 839: suspension bridge - 840: mop - 841: sweatshirt - 842: swimsuit - 843: swing - 844: switch - 845: syringe - 846: table lamp - 847: tank - 848: tape player - 849: teapot - 850: teddy bear - 851: television - 852: tennis ball - 853: thatched roof - 854: front curtain - 855: thimble - 856: threshing machine - 857: throne - 858: tile roof - 859: toaster - 860: tobacco shop - 861: toilet seat - 862: torch - 863: totem pole - 864: tow truck - 865: toy store - 866: tractor - 867: semi-trailer truck - 868: tray - 869: trench coat - 870: tricycle - 871: trimaran - 872: tripod - 873: triumphal arch - 874: trolleybus - 875: trombone - 876: tub - 877: turnstile - 878: typewriter keyboard - 879: umbrella - 880: unicycle - 881: upright piano - 882: vacuum cleaner - 883: vase - 884: vault - 885: velvet - 886: vending machine - 887: vestment - 888: viaduct - 889: violin - 890: volleyball - 891: waffle iron - 892: wall clock - 893: wallet - 894: wardrobe - 895: military aircraft - 896: sink - 897: washing machine - 898: water bottle - 899: water jug - 900: water tower - 901: whiskey jug - 902: whistle - 903: wig - 904: window screen - 905: window shade - 906: Windsor tie - 907: wine bottle - 908: wing - 909: wok - 910: wooden spoon - 911: wool - 912: split-rail fence - 913: shipwreck - 914: yawl - 915: yurt - 916: website - 917: comic book - 918: crossword - 919: traffic sign - 920: traffic light - 921: dust jacket - 922: menu - 923: plate - 924: guacamole - 925: consomme - 926: hot pot - 927: trifle - 928: ice cream - 929: ice pop - 930: baguette - 931: bagel - 932: pretzel - 933: cheeseburger - 934: hot dog - 935: mashed potato - 936: cabbage - 937: broccoli - 938: cauliflower - 939: zucchini - 940: spaghetti squash - 941: acorn squash - 942: butternut squash - 943: cucumber - 944: artichoke - 945: bell pepper - 946: cardoon - 947: mushroom - 948: Granny Smith - 949: strawberry - 950: orange - 951: lemon - 952: fig - 953: pineapple - 954: banana - 955: jackfruit - 956: custard apple - 957: pomegranate - 958: hay - 959: carbonara - 960: chocolate syrup - 961: dough - 962: meatloaf - 963: pizza - 964: pot pie - 965: burrito - 966: red wine - 967: espresso - 968: cup - 969: eggnog - 970: alp - 971: bubble - 972: cliff - 973: coral reef - 974: geyser - 975: lakeshore - 976: promontory - 977: shoal - 978: seashore - 979: valley - 980: volcano - 981: baseball player - 982: bridegroom - 983: scuba diver - 984: rapeseed - 985: daisy - 986: yellow lady's slipper - 987: corn - 988: acorn - 989: rose hip - 990: horse chestnut seed - 991: coral fungus - 992: agaric - 993: gyromitra - 994: stinkhorn mushroom - 995: earth star - 996: hen-of-the-woods - 997: bolete - 998: ear - 999: toilet paper - - -# Download script/URL (optional) -download: data/scripts/get_imagenet.sh diff --git a/src/yolov5_ros/src/yolov5/data/Objects365.yaml b/src/yolov5_ros/src/yolov5/data/Objects365.yaml deleted file mode 100644 index 05b26a1..0000000 --- a/src/yolov5_ros/src/yolov5/data/Objects365.yaml +++ /dev/null @@ -1,438 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# Objects365 dataset https://www.objects365.org/ by Megvii -# Example usage: python train.py --data Objects365.yaml -# parent -# ├── yolov5 -# └── datasets -# └── Objects365 ← downloads here (712 GB = 367G data + 345G zips) - - -# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/Objects365 # dataset root dir -train: images/train # train images (relative to 'path') 1742289 images -val: images/val # val images (relative to 'path') 80000 images -test: # test images (optional) - -# Classes -names: - 0: Person - 1: Sneakers - 2: Chair - 3: Other Shoes - 4: Hat - 5: Car - 6: Lamp - 7: Glasses - 8: Bottle - 9: Desk - 10: Cup - 11: Street Lights - 12: Cabinet/shelf - 13: Handbag/Satchel - 14: Bracelet - 15: Plate - 16: Picture/Frame - 17: Helmet - 18: Book - 19: Gloves - 20: Storage box - 21: Boat - 22: Leather Shoes - 23: Flower - 24: Bench - 25: Potted Plant - 26: Bowl/Basin - 27: Flag - 28: Pillow - 29: Boots - 30: Vase - 31: Microphone - 32: Necklace - 33: Ring - 34: SUV - 35: Wine Glass - 36: Belt - 37: Monitor/TV - 38: Backpack - 39: Umbrella - 40: Traffic Light - 41: Speaker - 42: Watch - 43: Tie - 44: Trash bin Can - 45: Slippers - 46: Bicycle - 47: Stool - 48: Barrel/bucket - 49: Van - 50: Couch - 51: Sandals - 52: Basket - 53: Drum - 54: Pen/Pencil - 55: Bus - 56: Wild Bird - 57: High Heels - 58: Motorcycle - 59: Guitar - 60: Carpet - 61: Cell Phone - 62: Bread - 63: Camera - 64: Canned - 65: Truck - 66: Traffic cone - 67: Cymbal - 68: Lifesaver - 69: Towel - 70: Stuffed Toy - 71: Candle - 72: Sailboat - 73: Laptop - 74: Awning - 75: Bed - 76: Faucet - 77: Tent - 78: Horse - 79: Mirror - 80: Power outlet - 81: Sink - 82: Apple - 83: Air Conditioner - 84: Knife - 85: Hockey Stick - 86: Paddle - 87: Pickup Truck - 88: Fork - 89: Traffic Sign - 90: Balloon - 91: Tripod - 92: Dog - 93: Spoon - 94: Clock - 95: Pot - 96: Cow - 97: Cake - 98: Dinning Table - 99: Sheep - 100: Hanger - 101: Blackboard/Whiteboard - 102: Napkin - 103: Other Fish - 104: Orange/Tangerine - 105: Toiletry - 106: Keyboard - 107: Tomato - 108: Lantern - 109: Machinery Vehicle - 110: Fan - 111: Green Vegetables - 112: Banana - 113: Baseball Glove - 114: Airplane - 115: Mouse - 116: Train - 117: Pumpkin - 118: Soccer - 119: Skiboard - 120: Luggage - 121: Nightstand - 122: Tea pot - 123: Telephone - 124: Trolley - 125: Head Phone - 126: Sports Car - 127: Stop Sign - 128: Dessert - 129: Scooter - 130: Stroller - 131: Crane - 132: Remote - 133: Refrigerator - 134: Oven - 135: Lemon - 136: Duck - 137: Baseball Bat - 138: Surveillance Camera - 139: Cat - 140: Jug - 141: Broccoli - 142: Piano - 143: Pizza - 144: Elephant - 145: Skateboard - 146: Surfboard - 147: Gun - 148: Skating and Skiing shoes - 149: Gas stove - 150: Donut - 151: Bow Tie - 152: Carrot - 153: Toilet - 154: Kite - 155: Strawberry - 156: Other Balls - 157: Shovel - 158: Pepper - 159: Computer Box - 160: Toilet Paper - 161: Cleaning Products - 162: Chopsticks - 163: Microwave - 164: Pigeon - 165: Baseball - 166: Cutting/chopping Board - 167: Coffee Table - 168: Side Table - 169: Scissors - 170: Marker - 171: Pie - 172: Ladder - 173: Snowboard - 174: Cookies - 175: Radiator - 176: Fire Hydrant - 177: Basketball - 178: Zebra - 179: Grape - 180: Giraffe - 181: Potato - 182: Sausage - 183: Tricycle - 184: Violin - 185: Egg - 186: Fire Extinguisher - 187: Candy - 188: Fire Truck - 189: Billiards - 190: Converter - 191: Bathtub - 192: Wheelchair - 193: Golf Club - 194: Briefcase - 195: Cucumber - 196: Cigar/Cigarette - 197: Paint Brush - 198: Pear - 199: Heavy Truck - 200: Hamburger - 201: Extractor - 202: Extension Cord - 203: Tong - 204: Tennis Racket - 205: Folder - 206: American Football - 207: earphone - 208: Mask - 209: Kettle - 210: Tennis - 211: Ship - 212: Swing - 213: Coffee Machine - 214: Slide - 215: Carriage - 216: Onion - 217: Green beans - 218: Projector - 219: Frisbee - 220: Washing Machine/Drying Machine - 221: Chicken - 222: Printer - 223: Watermelon - 224: Saxophone - 225: Tissue - 226: Toothbrush - 227: Ice cream - 228: Hot-air balloon - 229: Cello - 230: French Fries - 231: Scale - 232: Trophy - 233: Cabbage - 234: Hot dog - 235: Blender - 236: Peach - 237: Rice - 238: Wallet/Purse - 239: Volleyball - 240: Deer - 241: Goose - 242: Tape - 243: Tablet - 244: Cosmetics - 245: Trumpet - 246: Pineapple - 247: Golf Ball - 248: Ambulance - 249: Parking meter - 250: Mango - 251: Key - 252: Hurdle - 253: Fishing Rod - 254: Medal - 255: Flute - 256: Brush - 257: Penguin - 258: Megaphone - 259: Corn - 260: Lettuce - 261: Garlic - 262: Swan - 263: Helicopter - 264: Green Onion - 265: Sandwich - 266: Nuts - 267: Speed Limit Sign - 268: Induction Cooker - 269: Broom - 270: Trombone - 271: Plum - 272: Rickshaw - 273: Goldfish - 274: Kiwi fruit - 275: Router/modem - 276: Poker Card - 277: Toaster - 278: Shrimp - 279: Sushi - 280: Cheese - 281: Notepaper - 282: Cherry - 283: Pliers - 284: CD - 285: Pasta - 286: Hammer - 287: Cue - 288: Avocado - 289: Hamimelon - 290: Flask - 291: Mushroom - 292: Screwdriver - 293: Soap - 294: Recorder - 295: Bear - 296: Eggplant - 297: Board Eraser - 298: Coconut - 299: Tape Measure/Ruler - 300: Pig - 301: Showerhead - 302: Globe - 303: Chips - 304: Steak - 305: Crosswalk Sign - 306: Stapler - 307: Camel - 308: Formula 1 - 309: Pomegranate - 310: Dishwasher - 311: Crab - 312: Hoverboard - 313: Meat ball - 314: Rice Cooker - 315: Tuba - 316: Calculator - 317: Papaya - 318: Antelope - 319: Parrot - 320: Seal - 321: Butterfly - 322: Dumbbell - 323: Donkey - 324: Lion - 325: Urinal - 326: Dolphin - 327: Electric Drill - 328: Hair Dryer - 329: Egg tart - 330: Jellyfish - 331: Treadmill - 332: Lighter - 333: Grapefruit - 334: Game board - 335: Mop - 336: Radish - 337: Baozi - 338: Target - 339: French - 340: Spring Rolls - 341: Monkey - 342: Rabbit - 343: Pencil Case - 344: Yak - 345: Red Cabbage - 346: Binoculars - 347: Asparagus - 348: Barbell - 349: Scallop - 350: Noddles - 351: Comb - 352: Dumpling - 353: Oyster - 354: Table Tennis paddle - 355: Cosmetics Brush/Eyeliner Pencil - 356: Chainsaw - 357: Eraser - 358: Lobster - 359: Durian - 360: Okra - 361: Lipstick - 362: Cosmetics Mirror - 363: Curling - 364: Table Tennis - - -# Download script/URL (optional) --------------------------------------------------------------------------------------- -download: | - from tqdm import tqdm - - from utils.general import Path, check_requirements, download, np, xyxy2xywhn - - check_requirements(('pycocotools>=2.0',)) - from pycocotools.coco import COCO - - # Make Directories - dir = Path(yaml['path']) # dataset root dir - for p in 'images', 'labels': - (dir / p).mkdir(parents=True, exist_ok=True) - for q in 'train', 'val': - (dir / p / q).mkdir(parents=True, exist_ok=True) - - # Train, Val Splits - for split, patches in [('train', 50 + 1), ('val', 43 + 1)]: - print(f"Processing {split} in {patches} patches ...") - images, labels = dir / 'images' / split, dir / 'labels' / split - - # Download - url = f"https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/{split}/" - if split == 'train': - download([f'{url}zhiyuan_objv2_{split}.tar.gz'], dir=dir, delete=False) # annotations json - download([f'{url}patch{i}.tar.gz' for i in range(patches)], dir=images, curl=True, delete=False, threads=8) - elif split == 'val': - download([f'{url}zhiyuan_objv2_{split}.json'], dir=dir, delete=False) # annotations json - download([f'{url}images/v1/patch{i}.tar.gz' for i in range(15 + 1)], dir=images, curl=True, delete=False, threads=8) - download([f'{url}images/v2/patch{i}.tar.gz' for i in range(16, patches)], dir=images, curl=True, delete=False, threads=8) - - # Move - for f in tqdm(images.rglob('*.jpg'), desc=f'Moving {split} images'): - f.rename(images / f.name) # move to /images/{split} - - # Labels - coco = COCO(dir / f'zhiyuan_objv2_{split}.json') - names = [x["name"] for x in coco.loadCats(coco.getCatIds())] - for cid, cat in enumerate(names): - catIds = coco.getCatIds(catNms=[cat]) - imgIds = coco.getImgIds(catIds=catIds) - for im in tqdm(coco.loadImgs(imgIds), desc=f'Class {cid + 1}/{len(names)} {cat}'): - width, height = im["width"], im["height"] - path = Path(im["file_name"]) # image filename - try: - with open(labels / path.with_suffix('.txt').name, 'a') as file: - annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=None) - for a in coco.loadAnns(annIds): - x, y, w, h = a['bbox'] # bounding box in xywh (xy top-left corner) - xyxy = np.array([x, y, x + w, y + h])[None] # pixels(1,4) - x, y, w, h = xyxy2xywhn(xyxy, w=width, h=height, clip=True)[0] # normalized and clipped - file.write(f"{cid} {x:.5f} {y:.5f} {w:.5f} {h:.5f}\n") - except Exception as e: - print(e) diff --git a/src/yolov5_ros/src/yolov5/data/SKU-110K.yaml b/src/yolov5_ros/src/yolov5/data/SKU-110K.yaml deleted file mode 100644 index edae717..0000000 --- a/src/yolov5_ros/src/yolov5/data/SKU-110K.yaml +++ /dev/null @@ -1,53 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail -# Example usage: python train.py --data SKU-110K.yaml -# parent -# ├── yolov5 -# └── datasets -# └── SKU-110K ← downloads here (13.6 GB) - - -# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/SKU-110K # dataset root dir -train: train.txt # train images (relative to 'path') 8219 images -val: val.txt # val images (relative to 'path') 588 images -test: test.txt # test images (optional) 2936 images - -# Classes -names: - 0: object - - -# Download script/URL (optional) --------------------------------------------------------------------------------------- -download: | - import shutil - from tqdm import tqdm - from utils.general import np, pd, Path, download, xyxy2xywh - - - # Download - dir = Path(yaml['path']) # dataset root dir - parent = Path(dir.parent) # download dir - urls = ['http://trax-geometry.s3.amazonaws.com/cvpr_challenge/SKU110K_fixed.tar.gz'] - download(urls, dir=parent, delete=False) - - # Rename directories - if dir.exists(): - shutil.rmtree(dir) - (parent / 'SKU110K_fixed').rename(dir) # rename dir - (dir / 'labels').mkdir(parents=True, exist_ok=True) # create labels dir - - # Convert labels - names = 'image', 'x1', 'y1', 'x2', 'y2', 'class', 'image_width', 'image_height' # column names - for d in 'annotations_train.csv', 'annotations_val.csv', 'annotations_test.csv': - x = pd.read_csv(dir / 'annotations' / d, names=names).values # annotations - images, unique_images = x[:, 0], np.unique(x[:, 0]) - with open((dir / d).with_suffix('.txt').__str__().replace('annotations_', ''), 'w') as f: - f.writelines(f'./images/{s}\n' for s in unique_images) - for im in tqdm(unique_images, desc=f'Converting {dir / d}'): - cls = 0 # single-class dataset - with open((dir / 'labels' / im).with_suffix('.txt'), 'a') as f: - for r in x[images == im]: - w, h = r[6], r[7] # image width, height - xywh = xyxy2xywh(np.array([[r[1] / w, r[2] / h, r[3] / w, r[4] / h]]))[0] # instance - f.write(f"{cls} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label diff --git a/src/yolov5_ros/src/yolov5/data/VOC.yaml b/src/yolov5_ros/src/yolov5/data/VOC.yaml deleted file mode 100644 index 27d3810..0000000 --- a/src/yolov5_ros/src/yolov5/data/VOC.yaml +++ /dev/null @@ -1,100 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford -# Example usage: python train.py --data VOC.yaml -# parent -# ├── yolov5 -# └── datasets -# └── VOC ← downloads here (2.8 GB) - - -# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/VOC -train: # train images (relative to 'path') 16551 images - - images/train2012 - - images/train2007 - - images/val2012 - - images/val2007 -val: # val images (relative to 'path') 4952 images - - images/test2007 -test: # test images (optional) - - images/test2007 - -# Classes -names: - 0: aeroplane - 1: bicycle - 2: bird - 3: boat - 4: bottle - 5: bus - 6: car - 7: cat - 8: chair - 9: cow - 10: diningtable - 11: dog - 12: horse - 13: motorbike - 14: person - 15: pottedplant - 16: sheep - 17: sofa - 18: train - 19: tvmonitor - - -# Download script/URL (optional) --------------------------------------------------------------------------------------- -download: | - import xml.etree.ElementTree as ET - - from tqdm import tqdm - from utils.general import download, Path - - - def convert_label(path, lb_path, year, image_id): - def convert_box(size, box): - dw, dh = 1. / size[0], 1. / size[1] - x, y, w, h = (box[0] + box[1]) / 2.0 - 1, (box[2] + box[3]) / 2.0 - 1, box[1] - box[0], box[3] - box[2] - return x * dw, y * dh, w * dw, h * dh - - in_file = open(path / f'VOC{year}/Annotations/{image_id}.xml') - out_file = open(lb_path, 'w') - tree = ET.parse(in_file) - root = tree.getroot() - size = root.find('size') - w = int(size.find('width').text) - h = int(size.find('height').text) - - names = list(yaml['names'].values()) # names list - for obj in root.iter('object'): - cls = obj.find('name').text - if cls in names and int(obj.find('difficult').text) != 1: - xmlbox = obj.find('bndbox') - bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ('xmin', 'xmax', 'ymin', 'ymax')]) - cls_id = names.index(cls) # class id - out_file.write(" ".join([str(a) for a in (cls_id, *bb)]) + '\n') - - - # Download - dir = Path(yaml['path']) # dataset root dir - url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/' - urls = [f'{url}VOCtrainval_06-Nov-2007.zip', # 446MB, 5012 images - f'{url}VOCtest_06-Nov-2007.zip', # 438MB, 4953 images - f'{url}VOCtrainval_11-May-2012.zip'] # 1.95GB, 17126 images - download(urls, dir=dir / 'images', delete=False, curl=True, threads=3) - - # Convert - path = dir / 'images/VOCdevkit' - for year, image_set in ('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test'): - imgs_path = dir / 'images' / f'{image_set}{year}' - lbs_path = dir / 'labels' / f'{image_set}{year}' - imgs_path.mkdir(exist_ok=True, parents=True) - lbs_path.mkdir(exist_ok=True, parents=True) - - with open(path / f'VOC{year}/ImageSets/Main/{image_set}.txt') as f: - image_ids = f.read().strip().split() - for id in tqdm(image_ids, desc=f'{image_set}{year}'): - f = path / f'VOC{year}/JPEGImages/{id}.jpg' # old img path - lb_path = (lbs_path / f.name).with_suffix('.txt') # new label path - f.rename(imgs_path / f.name) # move image - convert_label(path, lb_path, year, id) # convert labels to YOLO format diff --git a/src/yolov5_ros/src/yolov5/data/VisDrone.yaml b/src/yolov5_ros/src/yolov5/data/VisDrone.yaml deleted file mode 100644 index a8bcf8e..0000000 --- a/src/yolov5_ros/src/yolov5/data/VisDrone.yaml +++ /dev/null @@ -1,70 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University -# Example usage: python train.py --data VisDrone.yaml -# parent -# ├── yolov5 -# └── datasets -# └── VisDrone ← downloads here (2.3 GB) - - -# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/VisDrone # dataset root dir -train: VisDrone2019-DET-train/images # train images (relative to 'path') 6471 images -val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images -test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images - -# Classes -names: - 0: pedestrian - 1: people - 2: bicycle - 3: car - 4: van - 5: truck - 6: tricycle - 7: awning-tricycle - 8: bus - 9: motor - - -# Download script/URL (optional) --------------------------------------------------------------------------------------- -download: | - from utils.general import download, os, Path - - def visdrone2yolo(dir): - from PIL import Image - from tqdm import tqdm - - def convert_box(size, box): - # Convert VisDrone box to YOLO xywh box - dw = 1. / size[0] - dh = 1. / size[1] - return (box[0] + box[2] / 2) * dw, (box[1] + box[3] / 2) * dh, box[2] * dw, box[3] * dh - - (dir / 'labels').mkdir(parents=True, exist_ok=True) # make labels directory - pbar = tqdm((dir / 'annotations').glob('*.txt'), desc=f'Converting {dir}') - for f in pbar: - img_size = Image.open((dir / 'images' / f.name).with_suffix('.jpg')).size - lines = [] - with open(f, 'r') as file: # read annotation.txt - for row in [x.split(',') for x in file.read().strip().splitlines()]: - if row[4] == '0': # VisDrone 'ignored regions' class 0 - continue - cls = int(row[5]) - 1 - box = convert_box(img_size, tuple(map(int, row[:4]))) - lines.append(f"{cls} {' '.join(f'{x:.6f}' for x in box)}\n") - with open(str(f).replace(os.sep + 'annotations' + os.sep, os.sep + 'labels' + os.sep), 'w') as fl: - fl.writelines(lines) # write label.txt - - - # Download - dir = Path(yaml['path']) # dataset root dir - urls = ['https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-train.zip', - 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip', - 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip', - 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-challenge.zip'] - download(urls, dir=dir, curl=True, threads=4) - - # Convert - for d in 'VisDrone2019-DET-train', 'VisDrone2019-DET-val', 'VisDrone2019-DET-test-dev': - visdrone2yolo(dir / d) # convert VisDrone annotations to YOLO labels diff --git a/src/yolov5_ros/src/yolov5/data/coco.yaml b/src/yolov5_ros/src/yolov5/data/coco.yaml deleted file mode 100644 index d64dfc7..0000000 --- a/src/yolov5_ros/src/yolov5/data/coco.yaml +++ /dev/null @@ -1,116 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# COCO 2017 dataset http://cocodataset.org by Microsoft -# Example usage: python train.py --data coco.yaml -# parent -# ├── yolov5 -# └── datasets -# └── coco ← downloads here (20.1 GB) - - -# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/coco # dataset root dir -train: train2017.txt # train images (relative to 'path') 118287 images -val: val2017.txt # val images (relative to 'path') 5000 images -test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 - -# Classes -names: - 0: person - 1: bicycle - 2: car - 3: motorcycle - 4: airplane - 5: bus - 6: train - 7: truck - 8: boat - 9: traffic light - 10: fire hydrant - 11: stop sign - 12: parking meter - 13: bench - 14: bird - 15: cat - 16: dog - 17: horse - 18: sheep - 19: cow - 20: elephant - 21: bear - 22: zebra - 23: giraffe - 24: backpack - 25: umbrella - 26: handbag - 27: tie - 28: suitcase - 29: frisbee - 30: skis - 31: snowboard - 32: sports ball - 33: kite - 34: baseball bat - 35: baseball glove - 36: skateboard - 37: surfboard - 38: tennis racket - 39: bottle - 40: wine glass - 41: cup - 42: fork - 43: knife - 44: spoon - 45: bowl - 46: banana - 47: apple - 48: sandwich - 49: orange - 50: broccoli - 51: carrot - 52: hot dog - 53: pizza - 54: donut - 55: cake - 56: chair - 57: couch - 58: potted plant - 59: bed - 60: dining table - 61: toilet - 62: tv - 63: laptop - 64: mouse - 65: remote - 66: keyboard - 67: cell phone - 68: microwave - 69: oven - 70: toaster - 71: sink - 72: refrigerator - 73: book - 74: clock - 75: vase - 76: scissors - 77: teddy bear - 78: hair drier - 79: toothbrush - - -# Download script/URL (optional) -download: | - from utils.general import download, Path - - - # Download labels - segments = False # segment or box labels - dir = Path(yaml['path']) # dataset root dir - url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/' - urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels - download(urls, dir=dir.parent) - - # Download data - urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images - 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images - 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional) - download(urls, dir=dir / 'images', threads=3) diff --git a/src/yolov5_ros/src/yolov5/data/coco128.yaml b/src/yolov5_ros/src/yolov5/data/coco128.yaml deleted file mode 100644 index 1255673..0000000 --- a/src/yolov5_ros/src/yolov5/data/coco128.yaml +++ /dev/null @@ -1,101 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics -# Example usage: python train.py --data coco128.yaml -# parent -# ├── yolov5 -# └── datasets -# └── coco128 ← downloads here (7 MB) - - -# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/coco128 # dataset root dir -train: images/train2017 # train images (relative to 'path') 128 images -val: images/train2017 # val images (relative to 'path') 128 images -test: # test images (optional) - -# Classes -names: - 0: person - 1: bicycle - 2: car - 3: motorcycle - 4: airplane - 5: bus - 6: train - 7: truck - 8: boat - 9: traffic light - 10: fire hydrant - 11: stop sign - 12: parking meter - 13: bench - 14: bird - 15: cat - 16: dog - 17: horse - 18: sheep - 19: cow - 20: elephant - 21: bear - 22: zebra - 23: giraffe - 24: backpack - 25: umbrella - 26: handbag - 27: tie - 28: suitcase - 29: frisbee - 30: skis - 31: snowboard - 32: sports ball - 33: kite - 34: baseball bat - 35: baseball glove - 36: skateboard - 37: surfboard - 38: tennis racket - 39: bottle - 40: wine glass - 41: cup - 42: fork - 43: knife - 44: spoon - 45: bowl - 46: banana - 47: apple - 48: sandwich - 49: orange - 50: broccoli - 51: carrot - 52: hot dog - 53: pizza - 54: donut - 55: cake - 56: chair - 57: couch - 58: potted plant - 59: bed - 60: dining table - 61: toilet - 62: tv - 63: laptop - 64: mouse - 65: remote - 66: keyboard - 67: cell phone - 68: microwave - 69: oven - 70: toaster - 71: sink - 72: refrigerator - 73: book - 74: clock - 75: vase - 76: scissors - 77: teddy bear - 78: hair drier - 79: toothbrush - - -# Download script/URL (optional) -download: https://ultralytics.com/assets/coco128.zip diff --git a/src/yolov5_ros/src/yolov5/data/hyps/hyp.Objects365.yaml b/src/yolov5_ros/src/yolov5/data/hyps/hyp.Objects365.yaml deleted file mode 100644 index 7497174..0000000 --- a/src/yolov5_ros/src/yolov5/data/hyps/hyp.Objects365.yaml +++ /dev/null @@ -1,34 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# Hyperparameters for Objects365 training -# python train.py --weights yolov5m.pt --data Objects365.yaml --evolve -# See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials - -lr0: 0.00258 -lrf: 0.17 -momentum: 0.779 -weight_decay: 0.00058 -warmup_epochs: 1.33 -warmup_momentum: 0.86 -warmup_bias_lr: 0.0711 -box: 0.0539 -cls: 0.299 -cls_pw: 0.825 -obj: 0.632 -obj_pw: 1.0 -iou_t: 0.2 -anchor_t: 3.44 -anchors: 3.2 -fl_gamma: 0.0 -hsv_h: 0.0188 -hsv_s: 0.704 -hsv_v: 0.36 -degrees: 0.0 -translate: 0.0902 -scale: 0.491 -shear: 0.0 -perspective: 0.0 -flipud: 0.0 -fliplr: 0.5 -mosaic: 1.0 -mixup: 0.0 -copy_paste: 0.0 diff --git a/src/yolov5_ros/src/yolov5/data/hyps/hyp.VOC.yaml b/src/yolov5_ros/src/yolov5/data/hyps/hyp.VOC.yaml deleted file mode 100644 index 0aa4e7d..0000000 --- a/src/yolov5_ros/src/yolov5/data/hyps/hyp.VOC.yaml +++ /dev/null @@ -1,40 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# Hyperparameters for VOC training -# python train.py --batch 128 --weights yolov5m6.pt --data VOC.yaml --epochs 50 --img 512 --hyp hyp.scratch-med.yaml --evolve -# See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials - -# YOLOv5 Hyperparameter Evolution Results -# Best generation: 467 -# Last generation: 996 -# metrics/precision, metrics/recall, metrics/mAP_0.5, metrics/mAP_0.5:0.95, val/box_loss, val/obj_loss, val/cls_loss -# 0.87729, 0.85125, 0.91286, 0.72664, 0.0076739, 0.0042529, 0.0013865 - -lr0: 0.00334 -lrf: 0.15135 -momentum: 0.74832 -weight_decay: 0.00025 -warmup_epochs: 3.3835 -warmup_momentum: 0.59462 -warmup_bias_lr: 0.18657 -box: 0.02 -cls: 0.21638 -cls_pw: 0.5 -obj: 0.51728 -obj_pw: 0.67198 -iou_t: 0.2 -anchor_t: 3.3744 -fl_gamma: 0.0 -hsv_h: 0.01041 -hsv_s: 0.54703 -hsv_v: 0.27739 -degrees: 0.0 -translate: 0.04591 -scale: 0.75544 -shear: 0.0 -perspective: 0.0 -flipud: 0.0 -fliplr: 0.5 -mosaic: 0.85834 -mixup: 0.04266 -copy_paste: 0.0 -anchors: 3.412 diff --git a/src/yolov5_ros/src/yolov5/data/hyps/hyp.scratch-high.yaml b/src/yolov5_ros/src/yolov5/data/hyps/hyp.scratch-high.yaml deleted file mode 100644 index 123cc84..0000000 --- a/src/yolov5_ros/src/yolov5/data/hyps/hyp.scratch-high.yaml +++ /dev/null @@ -1,34 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# Hyperparameters for high-augmentation COCO training from scratch -# python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300 -# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials - -lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) -lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf) -momentum: 0.937 # SGD momentum/Adam beta1 -weight_decay: 0.0005 # optimizer weight decay 5e-4 -warmup_epochs: 3.0 # warmup epochs (fractions ok) -warmup_momentum: 0.8 # warmup initial momentum -warmup_bias_lr: 0.1 # warmup initial bias lr -box: 0.05 # box loss gain -cls: 0.3 # cls loss gain -cls_pw: 1.0 # cls BCELoss positive_weight -obj: 0.7 # obj loss gain (scale with pixels) -obj_pw: 1.0 # obj BCELoss positive_weight -iou_t: 0.20 # IoU training threshold -anchor_t: 4.0 # anchor-multiple threshold -# anchors: 3 # anchors per output layer (0 to ignore) -fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) -hsv_h: 0.015 # image HSV-Hue augmentation (fraction) -hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) -hsv_v: 0.4 # image HSV-Value augmentation (fraction) -degrees: 0.0 # image rotation (+/- deg) -translate: 0.1 # image translation (+/- fraction) -scale: 0.9 # image scale (+/- gain) -shear: 0.0 # image shear (+/- deg) -perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 -flipud: 0.0 # image flip up-down (probability) -fliplr: 0.5 # image flip left-right (probability) -mosaic: 1.0 # image mosaic (probability) -mixup: 0.1 # image mixup (probability) -copy_paste: 0.1 # segment copy-paste (probability) diff --git a/src/yolov5_ros/src/yolov5/data/hyps/hyp.scratch-low.yaml b/src/yolov5_ros/src/yolov5/data/hyps/hyp.scratch-low.yaml deleted file mode 100644 index b9ef1d5..0000000 --- a/src/yolov5_ros/src/yolov5/data/hyps/hyp.scratch-low.yaml +++ /dev/null @@ -1,34 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# Hyperparameters for low-augmentation COCO training from scratch -# python train.py --batch 64 --cfg yolov5n6.yaml --weights '' --data coco.yaml --img 640 --epochs 300 --linear -# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials - -lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) -lrf: 0.01 # final OneCycleLR learning rate (lr0 * lrf) -momentum: 0.937 # SGD momentum/Adam beta1 -weight_decay: 0.0005 # optimizer weight decay 5e-4 -warmup_epochs: 3.0 # warmup epochs (fractions ok) -warmup_momentum: 0.8 # warmup initial momentum -warmup_bias_lr: 0.1 # warmup initial bias lr -box: 0.05 # box loss gain -cls: 0.5 # cls loss gain -cls_pw: 1.0 # cls BCELoss positive_weight -obj: 1.0 # obj loss gain (scale with pixels) -obj_pw: 1.0 # obj BCELoss positive_weight -iou_t: 0.20 # IoU training threshold -anchor_t: 4.0 # anchor-multiple threshold -# anchors: 3 # anchors per output layer (0 to ignore) -fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) -hsv_h: 0.015 # image HSV-Hue augmentation (fraction) -hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) -hsv_v: 0.4 # image HSV-Value augmentation (fraction) -degrees: 0.0 # image rotation (+/- deg) -translate: 0.1 # image translation (+/- fraction) -scale: 0.5 # image scale (+/- gain) -shear: 0.0 # image shear (+/- deg) -perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 -flipud: 0.0 # image flip up-down (probability) -fliplr: 0.5 # image flip left-right (probability) -mosaic: 1.0 # image mosaic (probability) -mixup: 0.0 # image mixup (probability) -copy_paste: 0.0 # segment copy-paste (probability) diff --git a/src/yolov5_ros/src/yolov5/data/hyps/hyp.scratch-med.yaml b/src/yolov5_ros/src/yolov5/data/hyps/hyp.scratch-med.yaml deleted file mode 100644 index d6867d7..0000000 --- a/src/yolov5_ros/src/yolov5/data/hyps/hyp.scratch-med.yaml +++ /dev/null @@ -1,34 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# Hyperparameters for medium-augmentation COCO training from scratch -# python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300 -# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials - -lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) -lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf) -momentum: 0.937 # SGD momentum/Adam beta1 -weight_decay: 0.0005 # optimizer weight decay 5e-4 -warmup_epochs: 3.0 # warmup epochs (fractions ok) -warmup_momentum: 0.8 # warmup initial momentum -warmup_bias_lr: 0.1 # warmup initial bias lr -box: 0.05 # box loss gain -cls: 0.3 # cls loss gain -cls_pw: 1.0 # cls BCELoss positive_weight -obj: 0.7 # obj loss gain (scale with pixels) -obj_pw: 1.0 # obj BCELoss positive_weight -iou_t: 0.20 # IoU training threshold -anchor_t: 4.0 # anchor-multiple threshold -# anchors: 3 # anchors per output layer (0 to ignore) -fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) -hsv_h: 0.015 # image HSV-Hue augmentation (fraction) -hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) -hsv_v: 0.4 # image HSV-Value augmentation (fraction) -degrees: 0.0 # image rotation (+/- deg) -translate: 0.1 # image translation (+/- fraction) -scale: 0.9 # image scale (+/- gain) -shear: 0.0 # image shear (+/- deg) -perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 -flipud: 0.0 # image flip up-down (probability) -fliplr: 0.5 # image flip left-right (probability) -mosaic: 1.0 # image mosaic (probability) -mixup: 0.1 # image mixup (probability) -copy_paste: 0.0 # segment copy-paste (probability) diff --git a/src/yolov5_ros/src/yolov5/data/images/bus.jpg b/src/yolov5_ros/src/yolov5/data/images/bus.jpg deleted file mode 100644 index b43e311..0000000 Binary files a/src/yolov5_ros/src/yolov5/data/images/bus.jpg and /dev/null differ diff --git a/src/yolov5_ros/src/yolov5/data/images/zidane.jpg b/src/yolov5_ros/src/yolov5/data/images/zidane.jpg deleted file mode 100644 index 92d72ea..0000000 Binary files a/src/yolov5_ros/src/yolov5/data/images/zidane.jpg and /dev/null differ diff --git a/src/yolov5_ros/src/yolov5/data/mydata.yaml b/src/yolov5_ros/src/yolov5/data/mydata.yaml deleted file mode 100644 index 44adb0e..0000000 --- a/src/yolov5_ros/src/yolov5/data/mydata.yaml +++ /dev/null @@ -1,13 +0,0 @@ -train: ../../coco/train/images -val: ../../coco/valid/images -test: ../../coco/images - -nc: 1 -names: ['1'] - -roboflow: - workspace: project - project: 1-3wivj - version: 1 - license: CC BY 4.0 - url: https://universe.roboflow.com/project/1-3wivj/dataset/1 diff --git a/src/yolov5_ros/src/yolov5/data/scripts/download_weights.sh b/src/yolov5_ros/src/yolov5/data/scripts/download_weights.sh deleted file mode 100755 index a4f3bec..0000000 --- a/src/yolov5_ros/src/yolov5/data/scripts/download_weights.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# Download latest models from https://github.com/ultralytics/yolov5/releases -# Example usage: bash data/scripts/download_weights.sh -# parent -# └── yolov5 -# ├── yolov5s.pt ← downloads here -# ├── yolov5m.pt -# └── ... - -python - <= cls >= 0, f'incorrect class index {cls}' - - # Write YOLO label - if id not in shapes: - shapes[id] = Image.open(file).size - box = xyxy2xywhn(box[None].astype(np.float), w=shapes[id][0], h=shapes[id][1], clip=True) - with open((labels / id).with_suffix('.txt'), 'a') as f: - f.write(f"{cls} {' '.join(f'{x:.6f}' for x in box[0])}\n") # write label.txt - except Exception as e: - print(f'WARNING: skipping one label for {file}: {e}') - - - # Download manually from https://challenge.xviewdataset.org - dir = Path(yaml['path']) # dataset root dir - # urls = ['https://d307kc0mrhucc3.cloudfront.net/train_labels.zip', # train labels - # 'https://d307kc0mrhucc3.cloudfront.net/train_images.zip', # 15G, 847 train images - # 'https://d307kc0mrhucc3.cloudfront.net/val_images.zip'] # 5G, 282 val images (no labels) - # download(urls, dir=dir, delete=False) - - # Convert labels - convert_labels(dir / 'xView_train.geojson') - - # Move images - images = Path(dir / 'images') - images.mkdir(parents=True, exist_ok=True) - Path(dir / 'train_images').rename(dir / 'images' / 'train') - Path(dir / 'val_images').rename(dir / 'images' / 'val') - - # Split - autosplit(dir / 'images' / 'train') diff --git a/src/yolov5_ros/src/yolov5/detect.py b/src/yolov5_ros/src/yolov5/detect.py deleted file mode 100644 index a69606a..0000000 --- a/src/yolov5_ros/src/yolov5/detect.py +++ /dev/null @@ -1,255 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc. - -Usage - sources: - $ python detect.py --weights yolov5s.pt --source 0 # webcam - img.jpg # image - vid.mp4 # video - path/ # directory - 'path/*.jpg' # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube - 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream - -Usage - formats: - $ python detect.py --weights yolov5s.pt # PyTorch - yolov5s.torchscript # TorchScript - yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s.xml # OpenVINO - yolov5s.engine # TensorRT - yolov5s.mlmodel # CoreML (macOS-only) - yolov5s_saved_model # TensorFlow SavedModel - yolov5s.pb # TensorFlow GraphDef - yolov5s.tflite # TensorFlow Lite - yolov5s_edgetpu.tflite # TensorFlow Edge TPU - yolov5s_paddle_model # PaddlePaddle -""" - -import argparse -import os -import platform -import sys -from pathlib import Path - -import torch - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[0] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -from models.common import DetectMultiBackend -from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams -from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, - increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh) -from utils.plots import Annotator, colors, save_one_box -from utils.torch_utils import select_device, smart_inference_mode - - -@smart_inference_mode() -def run( - weights=ROOT / 'yolov5s.pt', # model.pt path(s) - source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam - data=ROOT / 'data/coco128.yaml', # dataset.yaml path - imgsz=(640, 640), # inference size (height, width) - conf_thres=0.25, # confidence threshold - iou_thres=0.45, # NMS IOU threshold - max_det=1000, # maximum detections per image - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - view_img=False, # show results - save_txt=False, # save results to *.txt - save_conf=False, # save confidences in --save-txt labels - save_crop=False, # save cropped prediction boxes - nosave=False, # do not save images/videos - classes=None, # filter by class: --class 0, or --class 0 2 3 - agnostic_nms=False, # class-agnostic NMS - augment=False, # augmented inference - visualize=False, # visualize features - update=False, # update all models - project=ROOT / 'runs/detect', # save results to project/name - name='exp', # save results to project/name - exist_ok=False, # existing project/name ok, do not increment - line_thickness=3, # bounding box thickness (pixels) - hide_labels=False, # hide labels - hide_conf=False, # hide confidences - half=False, # use FP16 half-precision inference - dnn=False, # use OpenCV DNN for ONNX inference - vid_stride=1, # video frame-rate stride -): - source = str(source) - save_img = not nosave and not source.endswith('.txt') # save inference images - is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) - is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) - webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) - if is_url and is_file: - source = check_file(source) # download - - # Directories - save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run - (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir - - # Load model - device = select_device(device) - model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) - stride, names, pt = model.stride, model.names, model.pt - imgsz = check_img_size(imgsz, s=stride) # check image size - - # Dataloader - if webcam: - view_img = check_imshow() - dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) - bs = len(dataset) # batch_size - else: - dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) - bs = 1 # batch_size - vid_path, vid_writer = [None] * bs, [None] * bs - - # Run inference - model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup - seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) - for path, im, im0s, vid_cap, s in dataset: - with dt[0]: - im = torch.from_numpy(im).to(device) - im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 - im /= 255 # 0 - 255 to 0.0 - 1.0 - if len(im.shape) == 3: - im = im[None] # expand for batch dim - - # Inference - with dt[1]: - visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False - pred = model(im, augment=augment, visualize=visualize) - - # NMS - with dt[2]: - pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) - - # Second-stage classifier (optional) - # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) - - # Process predictions - for i, det in enumerate(pred): # per image - seen += 1 - if webcam: # batch_size >= 1 - p, im0, frame = path[i], im0s[i].copy(), dataset.count - s += f'{i}: ' - else: - p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) - - p = Path(p) # to Path - save_path = str(save_dir / p.name) # im.jpg - txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt - s += '%gx%g ' % im.shape[2:] # print string - gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh - imc = im0.copy() if save_crop else im0 # for save_crop - annotator = Annotator(im0, line_width=line_thickness, example=str(names)) - if len(det): - # Rescale boxes from img_size to im0 size - det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() - - # Print results - for c in det[:, -1].unique(): - n = (det[:, -1] == c).sum() # detections per class - s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string - - # Write results - for *xyxy, conf, cls in reversed(det): - if save_txt: # Write to file - xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh - line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format - with open(f'{txt_path}.txt', 'a') as f: - f.write(('%g ' * len(line)).rstrip() % line + '\n') - - if save_img or save_crop or view_img: # Add bbox to image - c = int(cls) # integer class - label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') - annotator.box_label(xyxy, label, color=colors(c, True)) - if save_crop: - save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) - - # Stream results - im0 = annotator.result() - if view_img: - if platform.system() == 'Linux' and p not in windows: - windows.append(p) - cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) - cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) - cv2.imshow(str(p), im0) - cv2.waitKey(1) # 1 millisecond - - # Save results (image with detections) - if save_img: - if dataset.mode == 'image': - cv2.imwrite(save_path, im0) - else: # 'video' or 'stream' - if vid_path[i] != save_path: # new video - vid_path[i] = save_path - if isinstance(vid_writer[i], cv2.VideoWriter): - vid_writer[i].release() # release previous video writer - if vid_cap: # video - fps = vid_cap.get(cv2.CAP_PROP_FPS) - w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) - h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - else: # stream - fps, w, h = 30, im0.shape[1], im0.shape[0] - save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos - vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) - vid_writer[i].write(im0) - - # Print time (inference-only) - LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") - - # Print results - t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image - LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) - if save_txt or save_img: - s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' - LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") - if update: - strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning) - - -def parse_opt(): - parser = argparse.ArgumentParser() - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path(s)') - parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam') - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') - parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') - parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') - parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') - parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--view-img', action='store_true', help='show results') - parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') - parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') - parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') - parser.add_argument('--nosave', action='store_true', help='do not save images/videos') - parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') - parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') - parser.add_argument('--augment', action='store_true', help='augmented inference') - parser.add_argument('--visualize', action='store_true', help='visualize features') - parser.add_argument('--update', action='store_true', help='update all models') - parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name') - parser.add_argument('--name', default='exp', help='save results to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') - parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') - parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') - parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') - parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') - parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride') - opt = parser.parse_args() - opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand - print_args(vars(opt)) - return opt - - -def main(opt): - check_requirements(exclude=('tensorboard', 'thop')) - run(**vars(opt)) - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) diff --git a/src/yolov5_ros/src/yolov5/export.py b/src/yolov5_ros/src/yolov5/export.py deleted file mode 100644 index 9d33024..0000000 --- a/src/yolov5_ros/src/yolov5/export.py +++ /dev/null @@ -1,612 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit - -Format | `export.py --include` | Model ---- | --- | --- -PyTorch | - | yolov5s.pt -TorchScript | `torchscript` | yolov5s.torchscript -ONNX | `onnx` | yolov5s.onnx -OpenVINO | `openvino` | yolov5s_openvino_model/ -TensorRT | `engine` | yolov5s.engine -CoreML | `coreml` | yolov5s.mlmodel -TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/ -TensorFlow GraphDef | `pb` | yolov5s.pb -TensorFlow Lite | `tflite` | yolov5s.tflite -TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite -TensorFlow.js | `tfjs` | yolov5s_web_model/ -PaddlePaddle | `paddle` | yolov5s_paddle_model/ - -Requirements: - $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU - $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU - -Usage: - $ python export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ... - -Inference: - $ python detect.py --weights yolov5s.pt # PyTorch - yolov5s.torchscript # TorchScript - yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s.xml # OpenVINO - yolov5s.engine # TensorRT - yolov5s.mlmodel # CoreML (macOS-only) - yolov5s_saved_model # TensorFlow SavedModel - yolov5s.pb # TensorFlow GraphDef - yolov5s.tflite # TensorFlow Lite - yolov5s_edgetpu.tflite # TensorFlow Edge TPU - yolov5s_paddle_model # PaddlePaddle - -TensorFlow.js: - $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example - $ npm install - $ ln -s ../../yolov5/yolov5s_web_model public/yolov5s_web_model - $ npm start -""" - -import argparse -import json -import os -import platform -import subprocess -import sys -import time -import warnings -from pathlib import Path - -import pandas as pd -import torch -from torch.utils.mobile_optimizer import optimize_for_mobile - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[0] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -if platform.system() != 'Windows': - ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -from models.experimental import attempt_load -from models.yolo import ClassificationModel, Detect -from utils.dataloaders import LoadImages -from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_version, - check_yaml, colorstr, file_size, get_default_args, print_args, url2file, yaml_save) -from utils.torch_utils import select_device, smart_inference_mode - - -def export_formats(): - # YOLOv5 export formats - x = [ - ['PyTorch', '-', '.pt', True, True], - ['TorchScript', 'torchscript', '.torchscript', True, True], - ['ONNX', 'onnx', '.onnx', True, True], - ['OpenVINO', 'openvino', '_openvino_model', True, False], - ['TensorRT', 'engine', '.engine', False, True], - ['CoreML', 'coreml', '.mlmodel', True, False], - ['TensorFlow SavedModel', 'saved_model', '_saved_model', True, True], - ['TensorFlow GraphDef', 'pb', '.pb', True, True], - ['TensorFlow Lite', 'tflite', '.tflite', True, False], - ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False], - ['TensorFlow.js', 'tfjs', '_web_model', False, False], - ['PaddlePaddle', 'paddle', '_paddle_model', True, True],] - return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU']) - - -def try_export(inner_func): - # YOLOv5 export decorator, i..e @try_export - inner_args = get_default_args(inner_func) - - def outer_func(*args, **kwargs): - prefix = inner_args['prefix'] - try: - with Profile() as dt: - f, model = inner_func(*args, **kwargs) - LOGGER.info(f'{prefix} export success ✅ {dt.t:.1f}s, saved as {f} ({file_size(f):.1f} MB)') - return f, model - except Exception as e: - LOGGER.info(f'{prefix} export failure ❌ {dt.t:.1f}s: {e}') - return None, None - - return outer_func - - -@try_export -def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')): - # YOLOv5 TorchScript model export - LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...') - f = file.with_suffix('.torchscript') - - ts = torch.jit.trace(model, im, strict=False) - d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} - extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap() - if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html - optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files) - else: - ts.save(str(f), _extra_files=extra_files) - return f, None - - -@try_export -def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorstr('ONNX:')): - # YOLOv5 ONNX export - check_requirements('onnx') - import onnx - - LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') - f = file.with_suffix('.onnx') - - torch.onnx.export( - model.cpu() if dynamic else model, # --dynamic only compatible with cpu - im.cpu() if dynamic else im, - f, - verbose=False, - opset_version=opset, - training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, - do_constant_folding=not train, - input_names=['images'], - output_names=['output'], - dynamic_axes={ - 'images': { - 0: 'batch', - 2: 'height', - 3: 'width'}, # shape(1,3,640,640) - 'output': { - 0: 'batch', - 1: 'anchors'} # shape(1,25200,85) - } if dynamic else None) - - # Checks - model_onnx = onnx.load(f) # load onnx model - onnx.checker.check_model(model_onnx) # check onnx model - - # Metadata - d = {'stride': int(max(model.stride)), 'names': model.names} - for k, v in d.items(): - meta = model_onnx.metadata_props.add() - meta.key, meta.value = k, str(v) - onnx.save(model_onnx, f) - - # Simplify - if simplify: - try: - cuda = torch.cuda.is_available() - check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1')) - import onnxsim - - LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') - model_onnx, check = onnxsim.simplify(model_onnx) - assert check, 'assert check failed' - onnx.save(model_onnx, f) - except Exception as e: - LOGGER.info(f'{prefix} simplifier failure: {e}') - return f, model_onnx - - -@try_export -def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')): - # YOLOv5 OpenVINO export - check_requirements('openvino-dev') # requires openvino-dev: https://pypi.org/project/openvino-dev/ - import openvino.inference_engine as ie - - LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') - f = str(file).replace('.pt', f'_openvino_model{os.sep}') - - cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}" - subprocess.run(cmd.split(), check=True, env=os.environ) # export - yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml - return f, None - - -@try_export -def export_paddle(model, im, file, metadata, prefix=colorstr('PaddlePaddle:')): - # YOLOv5 Paddle export - check_requirements(('paddlepaddle', 'x2paddle')) - import x2paddle - from x2paddle.convert import pytorch2paddle - - LOGGER.info(f'\n{prefix} starting export with X2Paddle {x2paddle.__version__}...') - f = str(file).replace('.pt', f'_paddle_model{os.sep}') - - pytorch2paddle(module=model, save_dir=f, jit_type='trace', input_examples=[im]) # export - yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml - return f, None - - -@try_export -def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): - # YOLOv5 CoreML export - check_requirements('coremltools') - import coremltools as ct - - LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') - f = file.with_suffix('.mlmodel') - - ts = torch.jit.trace(model, im, strict=False) # TorchScript model - ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]) - bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None) - if bits < 32: - if platform.system() == 'Darwin': # quantization only supported on macOS - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning - ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) - else: - print(f'{prefix} quantization only supported on macOS, skipping...') - ct_model.save(f) - return f, ct_model - - -@try_export -def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')): - # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt - assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`' - try: - import tensorrt as trt - except Exception: - if platform.system() == 'Linux': - check_requirements('nvidia-tensorrt', cmds='-U --index-url https://pypi.ngc.nvidia.com') - import tensorrt as trt - - if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 - grid = model.model[-1].anchor_grid - model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] - export_onnx(model, im, file, 12, False, dynamic, simplify) # opset 12 - model.model[-1].anchor_grid = grid - else: # TensorRT >= 8 - check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 - export_onnx(model, im, file, 13, False, dynamic, simplify) # opset 13 - onnx = file.with_suffix('.onnx') - - LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') - assert onnx.exists(), f'failed to export ONNX file: {onnx}' - f = file.with_suffix('.engine') # TensorRT engine file - logger = trt.Logger(trt.Logger.INFO) - if verbose: - logger.min_severity = trt.Logger.Severity.VERBOSE - - builder = trt.Builder(logger) - config = builder.create_builder_config() - config.max_workspace_size = workspace * 1 << 30 - # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice - - flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) - network = builder.create_network(flag) - parser = trt.OnnxParser(network, logger) - if not parser.parse_from_file(str(onnx)): - raise RuntimeError(f'failed to load ONNX file: {onnx}') - - inputs = [network.get_input(i) for i in range(network.num_inputs)] - outputs = [network.get_output(i) for i in range(network.num_outputs)] - LOGGER.info(f'{prefix} Network Description:') - for inp in inputs: - LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}') - for out in outputs: - LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') - - if dynamic: - if im.shape[0] <= 1: - LOGGER.warning(f"{prefix}WARNING: --dynamic model requires maximum --batch-size argument") - profile = builder.create_optimization_profile() - for inp in inputs: - profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) - config.add_optimization_profile(profile) - - LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine in {f}') - if builder.platform_has_fast_fp16 and half: - config.set_flag(trt.BuilderFlag.FP16) - with builder.build_engine(network, config) as engine, open(f, 'wb') as t: - t.write(engine.serialize()) - return f, None - - -@try_export -def export_saved_model(model, - im, - file, - dynamic, - tf_nms=False, - agnostic_nms=False, - topk_per_class=100, - topk_all=100, - iou_thres=0.45, - conf_thres=0.25, - keras=False, - prefix=colorstr('TensorFlow SavedModel:')): - # YOLOv5 TensorFlow SavedModel export - import tensorflow as tf - from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 - - from models.tf import TFModel - - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - f = str(file).replace('.pt', '_saved_model') - batch_size, ch, *imgsz = list(im.shape) # BCHW - - tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) - im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow - _ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) - inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size) - outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) - keras_model = tf.keras.Model(inputs=inputs, outputs=outputs) - keras_model.trainable = False - keras_model.summary() - if keras: - keras_model.save(f, save_format='tf') - else: - spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype) - m = tf.function(lambda x: keras_model(x)) # full model - m = m.get_concrete_function(spec) - frozen_func = convert_variables_to_constants_v2(m) - tfm = tf.Module() - tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x)[0], [spec]) - tfm.__call__(im) - tf.saved_model.save(tfm, - f, - options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) if check_version( - tf.__version__, '2.6') else tf.saved_model.SaveOptions()) - return f, keras_model - - -@try_export -def export_pb(keras_model, file, prefix=colorstr('TensorFlow GraphDef:')): - # YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow - import tensorflow as tf - from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 - - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - f = file.with_suffix('.pb') - - m = tf.function(lambda x: keras_model(x)) # full model - m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)) - frozen_func = convert_variables_to_constants_v2(m) - frozen_func.graph.as_graph_def() - tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False) - return f, None - - -@try_export -def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')): - # YOLOv5 TensorFlow Lite export - import tensorflow as tf - - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - batch_size, ch, *imgsz = list(im.shape) # BCHW - f = str(file).replace('.pt', '-fp16.tflite') - - converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] - converter.target_spec.supported_types = [tf.float16] - converter.optimizations = [tf.lite.Optimize.DEFAULT] - if int8: - from models.tf import representative_dataset_gen - dataset = LoadImages(check_dataset(check_yaml(data))['train'], img_size=imgsz, auto=False) - converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100) - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] - converter.target_spec.supported_types = [] - converter.inference_input_type = tf.uint8 # or tf.int8 - converter.inference_output_type = tf.uint8 # or tf.int8 - converter.experimental_new_quantizer = True - f = str(file).replace('.pt', '-int8.tflite') - if nms or agnostic_nms: - converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) - - tflite_model = converter.convert() - open(f, "wb").write(tflite_model) - return f, None - - -@try_export -def export_edgetpu(file, prefix=colorstr('Edge TPU:')): - # YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/ - cmd = 'edgetpu_compiler --version' - help_url = 'https://coral.ai/docs/edgetpu/compiler/' - assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}' - if subprocess.run(f'{cmd} >/dev/null', shell=True).returncode != 0: - LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') - sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system - for c in ( - 'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -', - 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list', - 'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'): - subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True) - ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1] - - LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') - f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model - f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model - - cmd = f"edgetpu_compiler -s -d -k 10 --out_dir {file.parent} {f_tfl}" - subprocess.run(cmd.split(), check=True) - return f, None - - -@try_export -def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): - # YOLOv5 TensorFlow.js export - check_requirements('tensorflowjs') - import re - - import tensorflowjs as tfjs - - LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') - f = str(file).replace('.pt', '_web_model') # js dir - f_pb = file.with_suffix('.pb') # *.pb path - f_json = f'{f}/model.json' # *.json path - - cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \ - f'--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}' - subprocess.run(cmd.split()) - - json = Path(f_json).read_text() - with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order - subst = re.sub( - r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}}}', r'{"outputs": {"Identity": {"name": "Identity"}, ' - r'"Identity_1": {"name": "Identity_1"}, ' - r'"Identity_2": {"name": "Identity_2"}, ' - r'"Identity_3": {"name": "Identity_3"}}}', json) - j.write(subst) - return f, None - - -@smart_inference_mode() -def run( - data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' - weights=ROOT / 'yolov5s.pt', # weights path - imgsz=(640, 640), # image (height, width) - batch_size=1, # batch size - device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu - include=('torchscript', 'onnx'), # include formats - half=False, # FP16 half-precision export - inplace=False, # set YOLOv5 Detect() inplace=True - train=False, # model.train() mode - keras=False, # use Keras - optimize=False, # TorchScript: optimize for mobile - int8=False, # CoreML/TF INT8 quantization - dynamic=False, # ONNX/TF/TensorRT: dynamic axes - simplify=False, # ONNX: simplify model - opset=12, # ONNX: opset version - verbose=False, # TensorRT: verbose log - workspace=4, # TensorRT: workspace size (GB) - nms=False, # TF: add NMS to model - agnostic_nms=False, # TF: add agnostic NMS to model - topk_per_class=100, # TF.js NMS: topk per class to keep - topk_all=100, # TF.js NMS: topk for all classes to keep - iou_thres=0.45, # TF.js NMS: IoU threshold - conf_thres=0.25, # TF.js NMS: confidence threshold -): - t = time.time() - include = [x.lower() for x in include] # to lowercase - fmts = tuple(export_formats()['Argument'][1:]) # --include arguments - flags = [x in include for x in fmts] - assert sum(flags) == len(include), f'ERROR: Invalid --include {include}, valid --include arguments are {fmts}' - jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle = flags # export booleans - file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) # PyTorch weights - - # Load PyTorch model - device = select_device(device) - if half: - assert device.type != 'cpu' or coreml, '--half only compatible with GPU export, i.e. use --device 0' - assert not dynamic, '--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both' - model = attempt_load(weights, device=device, inplace=True, fuse=True) # load FP32 model - - # Checks - imgsz *= 2 if len(imgsz) == 1 else 1 # expand - if optimize: - assert device.type == 'cpu', '--optimize not compatible with cuda devices, i.e. use --device cpu' - - # Input - gs = int(max(model.stride)) # grid size (max stride) - imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples - im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection - - # Update model - model.train() if train else model.eval() # training mode = no Detect() layer grid construction - for k, m in model.named_modules(): - if isinstance(m, Detect): - m.inplace = inplace - m.dynamic = dynamic - m.export = True - - for _ in range(2): - y = model(im) # dry runs - if half and not coreml: - im, model = im.half(), model.half() # to FP16 - shape = tuple((y[0] if isinstance(y, tuple) else y).shape) # model output shape - metadata = {'stride': int(max(model.stride)), 'names': model.names} # model metadata - LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)") - - # Exports - f = [''] * len(fmts) # exported filenames - warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning - if jit: # TorchScript - f[0], _ = export_torchscript(model, im, file, optimize) - if engine: # TensorRT required before ONNX - f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose) - if onnx or xml: # OpenVINO requires ONNX - f[2], _ = export_onnx(model, im, file, opset, train, dynamic, simplify) - if xml: # OpenVINO - f[3], _ = export_openvino(file, metadata, half) - if coreml: # CoreML - f[4], _ = export_coreml(model, im, file, int8, half) - if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats - if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 - check_requirements('flatbuffers==1.12') # required before `import tensorflow` - assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.' - assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.' - f[5], s_model = export_saved_model(model.cpu(), - im, - file, - dynamic, - tf_nms=nms or agnostic_nms or tfjs, - agnostic_nms=agnostic_nms or tfjs, - topk_per_class=topk_per_class, - topk_all=topk_all, - iou_thres=iou_thres, - conf_thres=conf_thres, - keras=keras) - if pb or tfjs: # pb prerequisite to tfjs - f[6], _ = export_pb(s_model, file) - if tflite or edgetpu: - f[7], _ = export_tflite(s_model, im, file, int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms) - if edgetpu: - f[8], _ = export_edgetpu(file) - if tfjs: - f[9], _ = export_tfjs(file) - if paddle: # PaddlePaddle - f[10], _ = export_paddle(model, im, file, metadata) - - # Finish - f = [str(x) for x in f if x] # filter out '' and None - if any(f): - h = '--half' if half else '' # --half FP16 inference arg - LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)' - f"\nResults saved to {colorstr('bold', file.parent.resolve())}" - f"\nDetect: python detect.py --weights {f[-1]} {h}" - f"\nValidate: python val.py --weights {f[-1]} {h}" - f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}')" - f"\nVisualize: https://netron.app") - return f # return list of exported files/dirs - - -def parse_opt(): - parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)') - parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640, 640], help='image (h, w)') - parser.add_argument('--batch-size', type=int, default=1, help='batch size') - parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--half', action='store_true', help='FP16 half-precision export') - parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') - parser.add_argument('--train', action='store_true', help='model.train() mode') - parser.add_argument('--keras', action='store_true', help='TF: use Keras') - parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') - parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') - parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes') - parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') - parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version') - parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') - parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)') - parser.add_argument('--nms', action='store_true', help='TF: add NMS to model') - parser.add_argument('--agnostic-nms', action='store_true', help='TF: add agnostic NMS to model') - parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep') - parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep') - parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold') - parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold') - parser.add_argument('--include', - nargs='+', - default=['torchscript'], - help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs') - opt = parser.parse_args() - print_args(vars(opt)) - return opt - - -def main(opt): - for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]): - run(**vars(opt)) - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) diff --git a/src/yolov5_ros/src/yolov5/hubconf.py b/src/yolov5_ros/src/yolov5/hubconf.py deleted file mode 100644 index 2f05565..0000000 --- a/src/yolov5_ros/src/yolov5/hubconf.py +++ /dev/null @@ -1,164 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5 - -Usage: - import torch - model = torch.hub.load('ultralytics/yolov5', 'yolov5s') - model = torch.hub.load('ultralytics/yolov5:master', 'custom', 'path/to/yolov5s.onnx') # custom model from branch -""" - -import torch - - -def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): - """Creates or loads a YOLOv5 model - - Arguments: - name (str): model name 'yolov5s' or path 'path/to/best.pt' - pretrained (bool): load pretrained weights into the model - channels (int): number of input channels - classes (int): number of model classes - autoshape (bool): apply YOLOv5 .autoshape() wrapper to model - verbose (bool): print all information to screen - device (str, torch.device, None): device to use for model parameters - - Returns: - YOLOv5 model - """ - from pathlib import Path - - from models.common import AutoShape, DetectMultiBackend - from models.experimental import attempt_load - from models.yolo import ClassificationModel, DetectionModel - from utils.downloads import attempt_download - from utils.general import LOGGER, check_requirements, intersect_dicts, logging - from utils.torch_utils import select_device - - if not verbose: - LOGGER.setLevel(logging.WARNING) - check_requirements(exclude=('ipython', 'opencv-python', 'tensorboard', 'thop')) - name = Path(name) - path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path - try: - device = select_device(device) - if pretrained and channels == 3 and classes == 80: - try: - model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model - if autoshape: - if model.pt and isinstance(model.model, ClassificationModel): - LOGGER.warning('WARNING: ⚠️ YOLOv5 v6.2 ClassificationModel is not yet AutoShape compatible. ' - 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).') - else: - model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS - except Exception: - model = attempt_load(path, device=device, fuse=False) # arbitrary model - else: - cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path - model = DetectionModel(cfg, channels, classes) # create model - if pretrained: - ckpt = torch.load(attempt_download(path), map_location=device) # load - csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 - csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect - model.load_state_dict(csd, strict=False) # load - if len(ckpt['model'].names) == classes: - model.names = ckpt['model'].names # set class names attribute - if not verbose: - LOGGER.setLevel(logging.INFO) # reset to default - return model.to(device) - - except Exception as e: - help_url = 'https://github.com/ultralytics/yolov5/issues/36' - s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.' - raise Exception(s) from e - - -def custom(path='path/to/model.pt', autoshape=True, _verbose=True, device=None): - # YOLOv5 custom or local model - return _create(path, autoshape=autoshape, verbose=_verbose, device=device) - - -def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-nano model https://github.com/ultralytics/yolov5 - return _create('yolov5n', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-small model https://github.com/ultralytics/yolov5 - return _create('yolov5s', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-medium model https://github.com/ultralytics/yolov5 - return _create('yolov5m', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-large model https://github.com/ultralytics/yolov5 - return _create('yolov5l', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-xlarge model https://github.com/ultralytics/yolov5 - return _create('yolov5x', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-nano-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5n6', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5s6', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5m6', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-large-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5l6', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5x6', pretrained, channels, classes, autoshape, _verbose, device) - - -if __name__ == '__main__': - import argparse - from pathlib import Path - - import numpy as np - from PIL import Image - - from utils.general import cv2, print_args - - # Argparser - parser = argparse.ArgumentParser() - parser.add_argument('--model', type=str, default='yolov5s', help='model name') - opt = parser.parse_args() - print_args(vars(opt)) - - # Model - model = _create(name=opt.model, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) - # model = custom(path='path/to/model.pt') # custom - - # Images - imgs = [ - 'data/images/zidane.jpg', # filename - Path('data/images/zidane.jpg'), # Path - 'https://ultralytics.com/images/zidane.jpg', # URI - cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV - Image.open('data/images/bus.jpg'), # PIL - np.zeros((320, 640, 3))] # numpy - - # Inference - results = model(imgs, size=320) # batched inference - - # Results - results.print() - results.save() diff --git a/src/yolov5_ros/src/yolov5/models/__init__.py b/src/yolov5_ros/src/yolov5/models/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/yolov5_ros/src/yolov5/models/common.py b/src/yolov5_ros/src/yolov5/models/common.py deleted file mode 100644 index 8b7dbbf..0000000 --- a/src/yolov5_ros/src/yolov5/models/common.py +++ /dev/null @@ -1,802 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Common modules -""" - -import json -import math -import platform -import warnings -from collections import OrderedDict, namedtuple -from copy import copy -from pathlib import Path - -import cv2 -import numpy as np -import pandas as pd -import requests -import torch -import torch.nn as nn -from PIL import Image -from torch.cuda import amp - -from utils.dataloaders import exif_transpose, letterbox -from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr, - increment_path, make_divisible, non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh, - yaml_load) -from utils.plots import Annotator, colors, save_one_box -from utils.torch_utils import copy_attr, smart_inference_mode - - -def autopad(k, p=None, d=1): # kernel, padding, dilation - # Pad to 'same' shape outputs - if d > 1: - k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size - if p is None: - p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad - return p - - -class Conv(nn.Module): - # Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation) - def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True): - super().__init__() - self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False) - self.bn = nn.BatchNorm2d(c2) - self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) - - def forward(self, x): - return self.act(self.bn(self.conv(x))) - - def forward_fuse(self, x): - return self.act(self.conv(x)) - - -class DWConv(Conv): - # Depth-wise convolution - def __init__(self, c1, c2, k=1, s=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), act=act) - - -class DWConvTranspose2d(nn.ConvTranspose2d): - # Depth-wise transpose convolution - def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0): # ch_in, ch_out, kernel, stride, padding, padding_out - super().__init__(c1, c2, k, s, p1, p2, groups=math.gcd(c1, c2)) - - -class TransformerLayer(nn.Module): - # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance) - def __init__(self, c, num_heads): - super().__init__() - self.q = nn.Linear(c, c, bias=False) - self.k = nn.Linear(c, c, bias=False) - self.v = nn.Linear(c, c, bias=False) - self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads) - self.fc1 = nn.Linear(c, c, bias=False) - self.fc2 = nn.Linear(c, c, bias=False) - - def forward(self, x): - x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x - x = self.fc2(self.fc1(x)) + x - return x - - -class TransformerBlock(nn.Module): - # Vision Transformer https://arxiv.org/abs/2010.11929 - def __init__(self, c1, c2, num_heads, num_layers): - super().__init__() - self.conv = None - if c1 != c2: - self.conv = Conv(c1, c2) - self.linear = nn.Linear(c2, c2) # learnable position embedding - self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers))) - self.c2 = c2 - - def forward(self, x): - if self.conv is not None: - x = self.conv(x) - b, _, w, h = x.shape - p = x.flatten(2).permute(2, 0, 1) - return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h) - - -class Bottleneck(nn.Module): - # Standard bottleneck - def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_, c2, 3, 1, g=g) - self.add = shortcut and c1 == c2 - - def forward(self, x): - return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) - - -class BottleneckCSP(nn.Module): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False) - self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False) - self.cv4 = Conv(2 * c_, c2, 1, 1) - self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3) - self.act = nn.SiLU() - self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) - - def forward(self, x): - y1 = self.cv3(self.m(self.cv1(x))) - y2 = self.cv2(x) - return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1)))) - - -class CrossConv(nn.Module): - # Cross Convolution Downsample - def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): - # ch_in, ch_out, kernel, stride, groups, expansion, shortcut - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, (1, k), (1, s)) - self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) - self.add = shortcut and c1 == c2 - - def forward(self, x): - return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) - - -class C3(nn.Module): - # CSP Bottleneck with 3 convolutions - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c1, c_, 1, 1) - self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2) - self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) - - def forward(self, x): - return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1)) - - -class C3x(C3): - # C3 module with cross-convolutions - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) - self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n))) - - -class C3TR(C3): - # C3 module with TransformerBlock() - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) - self.m = TransformerBlock(c_, c_, 4, n) - - -class C3SPP(C3): - # C3 module with SPP() - def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5): - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) - self.m = SPP(c_, c_, k) - - -class C3Ghost(C3): - # C3 module with GhostBottleneck() - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n))) - - -class SPP(nn.Module): - # Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729 - def __init__(self, c1, c2, k=(5, 9, 13)): - super().__init__() - c_ = c1 // 2 # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1) - self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) - - def forward(self, x): - x = self.cv1(x) - with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning - return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) - - -class SPPF(nn.Module): - # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher - def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13)) - super().__init__() - c_ = c1 // 2 # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_ * 4, c2, 1, 1) - self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2) - - def forward(self, x): - x = self.cv1(x) - with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning - y1 = self.m(x) - y2 = self.m(y1) - return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1)) - - -class Focus(nn.Module): - # Focus wh information into c-space - def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super().__init__() - self.conv = Conv(c1 * 4, c2, k, s, p, g, act) - # self.contract = Contract(gain=2) - - def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) - return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1)) - # return self.conv(self.contract(x)) - - -class GhostConv(nn.Module): - # Ghost Convolution https://github.com/huawei-noah/ghostnet - def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups - super().__init__() - c_ = c2 // 2 # hidden channels - self.cv1 = Conv(c1, c_, k, s, None, g, act) - self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) - - def forward(self, x): - y = self.cv1(x) - return torch.cat((y, self.cv2(y)), 1) - - -class GhostBottleneck(nn.Module): - # Ghost Bottleneck https://github.com/huawei-noah/ghostnet - def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride - super().__init__() - c_ = c2 // 2 - self.conv = nn.Sequential( - GhostConv(c1, c_, 1, 1), # pw - DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw - GhostConv(c_, c2, 1, 1, act=False)) # pw-linear - self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1, - act=False)) if s == 2 else nn.Identity() - - def forward(self, x): - return self.conv(x) + self.shortcut(x) - - -class Contract(nn.Module): - # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40) - def __init__(self, gain=2): - super().__init__() - self.gain = gain - - def forward(self, x): - b, c, h, w = x.size() # assert (h / s == 0) and (W / s == 0), 'Indivisible gain' - s = self.gain - x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2) - x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40) - return x.view(b, c * s * s, h // s, w // s) # x(1,256,40,40) - - -class Expand(nn.Module): - # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160) - def __init__(self, gain=2): - super().__init__() - self.gain = gain - - def forward(self, x): - b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain' - s = self.gain - x = x.view(b, s, s, c // s ** 2, h, w) # x(1,2,2,16,80,80) - x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2) - return x.view(b, c // s ** 2, h * s, w * s) # x(1,16,160,160) - - -class Concat(nn.Module): - # Concatenate a list of tensors along dimension - def __init__(self, dimension=1): - super().__init__() - self.d = dimension - - def forward(self, x): - return torch.cat(x, self.d) - - -class DetectMultiBackend(nn.Module): - # YOLOv5 MultiBackend class for python inference on various backends - def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True): - # Usage: - # PyTorch: weights = *.pt - # TorchScript: *.torchscript - # ONNX Runtime: *.onnx - # ONNX OpenCV DNN: *.onnx --dnn - # OpenVINO: *.xml - # CoreML: *.mlmodel - # TensorRT: *.engine - # TensorFlow SavedModel: *_saved_model - # TensorFlow GraphDef: *.pb - # TensorFlow Lite: *.tflite - # TensorFlow Edge TPU: *_edgetpu.tflite - # PaddlePaddle: *_paddle_model - from models.experimental import attempt_download, attempt_load # scoped to avoid circular import - - super().__init__() - w = str(weights[0] if isinstance(weights, list) else weights) - pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle = self._model_type(w) # type - w = attempt_download(w) # download if not local - fp16 &= pt or jit or onnx or engine # FP16 - stride = 32 # default stride - cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA - - if pt: # PyTorch - model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse) - stride = max(int(model.stride.max()), 32) # model stride - names = model.module.names if hasattr(model, 'module') else model.names # get class names - model.half() if fp16 else model.float() - self.model = model # explicitly assign for to(), cpu(), cuda(), half() - elif jit: # TorchScript - LOGGER.info(f'Loading {w} for TorchScript inference...') - extra_files = {'config.txt': ''} # model metadata - model = torch.jit.load(w, _extra_files=extra_files) - model.half() if fp16 else model.float() - if extra_files['config.txt']: # load metadata dict - d = json.loads(extra_files['config.txt'], - object_hook=lambda d: {int(k) if k.isdigit() else k: v - for k, v in d.items()}) - stride, names = int(d['stride']), d['names'] - elif dnn: # ONNX OpenCV DNN - LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...') - check_requirements('opencv-python>=4.5.4') - net = cv2.dnn.readNetFromONNX(w) - elif onnx: # ONNX Runtime - LOGGER.info(f'Loading {w} for ONNX Runtime inference...') - check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime')) - import onnxruntime - providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] - session = onnxruntime.InferenceSession(w, providers=providers) - output_names = [x.name for x in session.get_outputs()] - meta = session.get_modelmeta().custom_metadata_map # metadata - if 'stride' in meta: - stride, names = int(meta['stride']), eval(meta['names']) - elif xml: # OpenVINO - LOGGER.info(f'Loading {w} for OpenVINO inference...') - check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/ - from openvino.runtime import Core, Layout, get_batch - ie = Core() - if not Path(w).is_file(): # if not *.xml - w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir - network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin')) - if network.get_parameters()[0].get_layout().empty: - network.get_parameters()[0].set_layout(Layout("NCHW")) - batch_dim = get_batch(network) - if batch_dim.is_static: - batch_size = batch_dim.get_length() - executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2 - output_layer = next(iter(executable_network.outputs)) - stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata - elif engine: # TensorRT - LOGGER.info(f'Loading {w} for TensorRT inference...') - import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download - check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 - if device.type == 'cpu': - device = torch.device('cuda:0') - Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) - logger = trt.Logger(trt.Logger.INFO) - with open(w, 'rb') as f, trt.Runtime(logger) as runtime: - model = runtime.deserialize_cuda_engine(f.read()) - context = model.create_execution_context() - bindings = OrderedDict() - fp16 = False # default updated below - dynamic = False - for index in range(model.num_bindings): - name = model.get_binding_name(index) - dtype = trt.nptype(model.get_binding_dtype(index)) - if model.binding_is_input(index): - if -1 in tuple(model.get_binding_shape(index)): # dynamic - dynamic = True - context.set_binding_shape(index, tuple(model.get_profile_shape(0, index)[2])) - if dtype == np.float16: - fp16 = True - shape = tuple(context.get_binding_shape(index)) - im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device) - bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr())) - binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) - batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size - elif coreml: # CoreML - LOGGER.info(f'Loading {w} for CoreML inference...') - import coremltools as ct - model = ct.models.MLModel(w) - elif saved_model: # TF SavedModel - LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...') - import tensorflow as tf - keras = False # assume TF1 saved_model - model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w) - elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt - LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...') - import tensorflow as tf - - def wrap_frozen_graph(gd, inputs, outputs): - x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped - ge = x.graph.as_graph_element - return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) - - gd = tf.Graph().as_graph_def() # TF GraphDef - with open(w, 'rb') as f: - gd.ParseFromString(f.read()) - frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs="Identity:0") - elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python - try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu - from tflite_runtime.interpreter import Interpreter, load_delegate - except ImportError: - import tensorflow as tf - Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate, - if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime - LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') - delegate = { - 'Linux': 'libedgetpu.so.1', - 'Darwin': 'libedgetpu.1.dylib', - 'Windows': 'edgetpu.dll'}[platform.system()] - interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)]) - else: # TFLite - LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') - interpreter = Interpreter(model_path=w) # load TFLite model - interpreter.allocate_tensors() # allocate - input_details = interpreter.get_input_details() # inputs - output_details = interpreter.get_output_details() # outputs - elif tfjs: # TF.js - raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported') - elif paddle: # PaddlePaddle - LOGGER.info(f'Loading {w} for PaddlePaddle inference...') - check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle') - import paddle.inference as pdi - if not Path(w).is_file(): # if not *.pdmodel - w = next(Path(w).rglob('*.pdmodel')) # get *.xml file from *_openvino_model dir - weights = Path(w).with_suffix('.pdiparams') - config = pdi.Config(str(w), str(weights)) - if cuda: - config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0) - predictor = pdi.create_predictor(config) - input_names = predictor.get_input_names() - input_handle = predictor.get_input_handle(input_names[0]) - else: - raise NotImplementedError(f'ERROR: {w} is not a supported format') - - # class names - if 'names' not in locals(): - names = yaml_load(data)['names'] if data else {i: f'class{i}' for i in range(999)} - if names[0] == 'n01440764' and len(names) == 1000: # ImageNet - names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names - - self.__dict__.update(locals()) # assign all variables to self - - def forward(self, im, augment=False, visualize=False): - # YOLOv5 MultiBackend inference - b, ch, h, w = im.shape # batch, channel, height, width - if self.fp16 and im.dtype != torch.float16: - im = im.half() # to FP16 - - if self.pt: # PyTorch - y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im) - elif self.jit: # TorchScript - y = self.model(im) - elif self.dnn: # ONNX OpenCV DNN - im = im.cpu().numpy() # torch to numpy - self.net.setInput(im) - y = self.net.forward() - elif self.onnx: # ONNX Runtime - im = im.cpu().numpy() # torch to numpy - y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im}) - elif self.xml: # OpenVINO - im = im.cpu().numpy() # FP32 - y = self.executable_network([im])[self.output_layer] - elif self.engine: # TensorRT - if self.dynamic and im.shape != self.bindings['images'].shape: - i_in, i_out = (self.model.get_binding_index(x) for x in ('images', 'output')) - self.context.set_binding_shape(i_in, im.shape) # reshape if dynamic - self.bindings['images'] = self.bindings['images']._replace(shape=im.shape) - self.bindings['output'].data.resize_(tuple(self.context.get_binding_shape(i_out))) - s = self.bindings['images'].shape - assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}" - self.binding_addrs['images'] = int(im.data_ptr()) - self.context.execute_v2(list(self.binding_addrs.values())) - y = self.bindings['output'].data - elif self.coreml: # CoreML - im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) - im = Image.fromarray((im[0] * 255).astype('uint8')) - # im = im.resize((192, 320), Image.ANTIALIAS) - y = self.model.predict({'image': im}) # coordinates are xywh normalized - if 'confidence' in y: - box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels - conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) - y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) - else: - k = 'var_' + str(sorted(int(k.replace('var_', '')) for k in y)[-1]) # output key - y = y[k] # output - elif self.paddle: # PaddlePaddle - im = im.cpu().numpy().astype("float32") - self.input_handle.copy_from_cpu(im) - self.predictor.run() - output_names = self.predictor.get_output_names() - output_handle = self.predictor.get_output_handle(output_names[0]) - y = output_handle.copy_to_cpu() - else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) - im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) - if self.saved_model: # SavedModel - y = (self.model(im, training=False) if self.keras else self.model(im)).numpy() - elif self.pb: # GraphDef - y = self.frozen_func(x=self.tf.constant(im)).numpy() - else: # Lite or Edge TPU - input, output = self.input_details[0], self.output_details[0] - int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model - if int8: - scale, zero_point = input['quantization'] - im = (im / scale + zero_point).astype(np.uint8) # de-scale - self.interpreter.set_tensor(input['index'], im) - self.interpreter.invoke() - y = self.interpreter.get_tensor(output['index']) - if int8: - scale, zero_point = output['quantization'] - y = (y.astype(np.float32) - zero_point) * scale # re-scale - y[..., :4] *= [w, h, w, h] # xywh normalized to pixels - - if isinstance(y, (list, tuple)): - return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y] - else: - return self.from_numpy(y) - - def from_numpy(self, x): - return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x - - def warmup(self, imgsz=(1, 3, 640, 640)): - # Warmup model by running inference once - warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb - if any(warmup_types) and self.device.type != 'cpu': - im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input - for _ in range(2 if self.jit else 1): # - self.forward(im) # warmup - - @staticmethod - def _model_type(p='path/to/model.pt'): - # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx - from export import export_formats - sf = list(export_formats().Suffix) + ['.xml'] # export suffixes - check_suffix(p, sf) # checks - p = Path(p).name # eliminate trailing separators - pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, xml2 = (s in p for s in sf) - xml |= xml2 # *_openvino_model or *.xml - tflite &= not edgetpu # *.tflite - return pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle - - @staticmethod - def _load_metadata(f=Path('path/to/meta.yaml')): - # Load metadata from meta.yaml if it exists - if f.exists(): - d = yaml_load(f) - return d['stride'], d['names'] # assign stride, names - return None, None - - -class AutoShape(nn.Module): - # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS - conf = 0.25 # NMS confidence threshold - iou = 0.45 # NMS IoU threshold - agnostic = False # NMS class-agnostic - multi_label = False # NMS multiple labels per box - classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs - max_det = 1000 # maximum number of detections per image - amp = False # Automatic Mixed Precision (AMP) inference - - def __init__(self, model, verbose=True): - super().__init__() - if verbose: - LOGGER.info('Adding AutoShape... ') - copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names', 'stride', 'abc'), exclude=()) # copy attributes - self.dmb = isinstance(model, DetectMultiBackend) # DetectMultiBackend() instance - self.pt = not self.dmb or model.pt # PyTorch model - self.model = model.eval() - if self.pt: - m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect() - m.inplace = False # Detect.inplace=False for safe multithread inference - m.export = True # do not output loss values - - def _apply(self, fn): - # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers - self = super()._apply(fn) - if self.pt: - m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect() - m.stride = fn(m.stride) - m.grid = list(map(fn, m.grid)) - if isinstance(m.anchor_grid, list): - m.anchor_grid = list(map(fn, m.anchor_grid)) - return self - - @smart_inference_mode() - def forward(self, ims, size=640, augment=False, profile=False): - # Inference from various sources. For size(height=640, width=1280), RGB images example inputs are: - # file: ims = 'data/images/zidane.jpg' # str or PosixPath - # URI: = 'https://ultralytics.com/images/zidane.jpg' - # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) - # PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3) - # numpy: = np.zeros((640,1280,3)) # HWC - # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values) - # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images - - dt = (Profile(), Profile(), Profile()) - with dt[0]: - if isinstance(size, int): # expand - size = (size, size) - p = next(self.model.parameters()) if self.pt else torch.empty(1, device=self.model.device) # param - autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference - if isinstance(ims, torch.Tensor): # torch - with amp.autocast(autocast): - return self.model(ims.to(p.device).type_as(p), augment, profile) # inference - - # Pre-process - n, ims = (len(ims), list(ims)) if isinstance(ims, (list, tuple)) else (1, [ims]) # number, list of images - shape0, shape1, files = [], [], [] # image and inference shapes, filenames - for i, im in enumerate(ims): - f = f'image{i}' # filename - if isinstance(im, (str, Path)): # filename or uri - im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im - im = np.asarray(exif_transpose(im)) - elif isinstance(im, Image.Image): # PIL Image - im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f - files.append(Path(f).with_suffix('.jpg').name) - if im.shape[0] < 5: # image in CHW - im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) - im = im[..., :3] if im.ndim == 3 else cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) # enforce 3ch input - s = im.shape[:2] # HWC - shape0.append(s) # image shape - g = max(size) / max(s) # gain - shape1.append([y * g for y in s]) - ims[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update - shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)] if self.pt else size # inf shape - x = [letterbox(im, shape1, auto=False)[0] for im in ims] # pad - x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW - x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 - - with amp.autocast(autocast): - # Inference - with dt[1]: - y = self.model(x, augment, profile) # forward - - # Post-process - with dt[2]: - y = non_max_suppression(y if self.dmb else y[0], - self.conf, - self.iou, - self.classes, - self.agnostic, - self.multi_label, - max_det=self.max_det) # NMS - for i in range(n): - scale_coords(shape1, y[i][:, :4], shape0[i]) - - return Detections(ims, y, files, dt, self.names, x.shape) - - -class Detections: - # YOLOv5 detections class for inference results - def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None): - super().__init__() - d = pred[0].device # device - gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in ims] # normalizations - self.ims = ims # list of images as numpy arrays - self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) - self.names = names # class names - self.files = files # image filenames - self.times = times # profiling times - self.xyxy = pred # xyxy pixels - self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels - self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized - self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized - self.n = len(self.pred) # number of images (batch size) - self.t = tuple(x.t / self.n * 1E3 for x in times) # timestamps (ms) - self.s = shape # inference BCHW shape - - def display(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')): - crops = [] - for i, (im, pred) in enumerate(zip(self.ims, self.pred)): - s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string - if pred.shape[0]: - for c in pred[:, -1].unique(): - n = (pred[:, -1] == c).sum() # detections per class - s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string - if show or save or render or crop: - annotator = Annotator(im, example=str(self.names)) - for *box, conf, cls in reversed(pred): # xyxy, confidence, class - label = f'{self.names[int(cls)]} {conf:.2f}' - if crop: - file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None - crops.append({ - 'box': box, - 'conf': conf, - 'cls': cls, - 'label': label, - 'im': save_one_box(box, im, file=file, save=save)}) - else: # all others - annotator.box_label(box, label if labels else '', color=colors(cls)) - im = annotator.im - else: - s += '(no detections)' - - im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np - if pprint: - print(s.rstrip(', ')) - if show: - im.show(self.files[i]) # show - if save: - f = self.files[i] - im.save(save_dir / f) # save - if i == self.n - 1: - LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}") - if render: - self.ims[i] = np.asarray(im) - if crop: - if save: - LOGGER.info(f'Saved results to {save_dir}\n') - return crops - - def print(self): - self.display(pprint=True) # print results - print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t) - - def show(self, labels=True): - self.display(show=True, labels=labels) # show results - - def save(self, labels=True, save_dir='runs/detect/exp'): - save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir - self.display(save=True, labels=labels, save_dir=save_dir) # save results - - def crop(self, save=True, save_dir='runs/detect/exp'): - save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) if save else None - return self.display(crop=True, save=save, save_dir=save_dir) # crop results - - def render(self, labels=True): - self.display(render=True, labels=labels) # render results - return self.ims - - def pandas(self): - # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0]) - new = copy(self) # return copy - ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns - cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns - for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]): - a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update - setattr(new, k, [pd.DataFrame(x, columns=c) for x in a]) - return new - - def tolist(self): - # return a list of Detections objects, i.e. 'for result in results.tolist():' - r = range(self.n) # iterable - x = [Detections([self.ims[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r] - # for d in x: - # for k in ['ims', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: - # setattr(d, k, getattr(d, k)[0]) # pop out of list - return x - - def __len__(self): - return self.n # override len(results) - - def __str__(self): - self.print() # override print(results) - return '' - - -class Classify(nn.Module): - # Classification head, i.e. x(b,c1,20,20) to x(b,c2) - def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups - super().__init__() - c_ = 1280 # efficientnet_b0 size - self.conv = Conv(c1, c_, k, s, autopad(k, p), g) - self.pool = nn.AdaptiveAvgPool2d(1) # to x(b,c_,1,1) - self.drop = nn.Dropout(p=0.0, inplace=True) - self.linear = nn.Linear(c_, c2) # to x(b,c2) - - def forward(self, x): - if isinstance(x, list): - x = torch.cat(x, 1) - return self.linear(self.drop(self.pool(self.conv(x)).flatten(1))) diff --git a/src/yolov5_ros/src/yolov5/models/experimental.py b/src/yolov5_ros/src/yolov5/models/experimental.py deleted file mode 100644 index 02d35b9..0000000 --- a/src/yolov5_ros/src/yolov5/models/experimental.py +++ /dev/null @@ -1,111 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Experimental modules -""" -import math - -import numpy as np -import torch -import torch.nn as nn - -from utils.downloads import attempt_download - - -class Sum(nn.Module): - # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 - def __init__(self, n, weight=False): # n: number of inputs - super().__init__() - self.weight = weight # apply weights boolean - self.iter = range(n - 1) # iter object - if weight: - self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True) # layer weights - - def forward(self, x): - y = x[0] # no weight - if self.weight: - w = torch.sigmoid(self.w) * 2 - for i in self.iter: - y = y + x[i + 1] * w[i] - else: - for i in self.iter: - y = y + x[i + 1] - return y - - -class MixConv2d(nn.Module): - # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595 - def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kernel, stride, ch_strategy - super().__init__() - n = len(k) # number of convolutions - if equal_ch: # equal c_ per group - i = torch.linspace(0, n - 1E-6, c2).floor() # c2 indices - c_ = [(i == g).sum() for g in range(n)] # intermediate channels - else: # equal weight.numel() per group - b = [c2] + [0] * n - a = np.eye(n + 1, n, k=-1) - a -= np.roll(a, 1, axis=1) - a *= np.array(k) ** 2 - a[0] = 1 - c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b - - self.m = nn.ModuleList([ - nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)]) - self.bn = nn.BatchNorm2d(c2) - self.act = nn.SiLU() - - def forward(self, x): - return self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) - - -class Ensemble(nn.ModuleList): - # Ensemble of models - def __init__(self): - super().__init__() - - def forward(self, x, augment=False, profile=False, visualize=False): - y = [module(x, augment, profile, visualize)[0] for module in self] - # y = torch.stack(y).max(0)[0] # max ensemble - # y = torch.stack(y).mean(0) # mean ensemble - y = torch.cat(y, 1) # nms ensemble - return y, None # inference, train output - - -def attempt_load(weights, device=None, inplace=True, fuse=True): - # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a - from models.yolo import Detect, Model - - model = Ensemble() - for w in weights if isinstance(weights, list) else [weights]: - ckpt = torch.load(attempt_download(w), map_location='cpu') # load - ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model - - # Model compatibility updates - if not hasattr(ckpt, 'stride'): - ckpt.stride = torch.tensor([32.]) - if hasattr(ckpt, 'names') and isinstance(ckpt.names, (list, tuple)): - ckpt.names = dict(enumerate(ckpt.names)) # convert to dict - - model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode - - # Module compatibility updates - for m in model.modules(): - t = type(m) - if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model): - m.inplace = inplace # torch 1.7.0 compatibility - if t is Detect and not isinstance(m.anchor_grid, list): - delattr(m, 'anchor_grid') - setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl) - elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'): - m.recompute_scale_factor = None # torch 1.11.0 compatibility - - # Return model - if len(model) == 1: - return model[-1] - - # Return detection ensemble - print(f'Ensemble created with {weights}\n') - for k in 'names', 'nc', 'yaml': - setattr(model, k, getattr(model[0], k)) - model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride - assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}' - return model diff --git a/src/yolov5_ros/src/yolov5/models/hub/anchors.yaml b/src/yolov5_ros/src/yolov5/models/hub/anchors.yaml deleted file mode 100644 index e4d7beb..0000000 --- a/src/yolov5_ros/src/yolov5/models/hub/anchors.yaml +++ /dev/null @@ -1,59 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# Default anchors for COCO data - - -# P5 ------------------------------------------------------------------------------------------------------------------- -# P5-640: -anchors_p5_640: - - [10,13, 16,30, 33,23] # P3/8 - - [30,61, 62,45, 59,119] # P4/16 - - [116,90, 156,198, 373,326] # P5/32 - - -# P6 ------------------------------------------------------------------------------------------------------------------- -# P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387 -anchors_p6_640: - - [9,11, 21,19, 17,41] # P3/8 - - [43,32, 39,70, 86,64] # P4/16 - - [65,131, 134,130, 120,265] # P5/32 - - [282,180, 247,354, 512,387] # P6/64 - -# P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792 -anchors_p6_1280: - - [19,27, 44,40, 38,94] # P3/8 - - [96,68, 86,152, 180,137] # P4/16 - - [140,301, 303,264, 238,542] # P5/32 - - [436,615, 739,380, 925,792] # P6/64 - -# P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187 -anchors_p6_1920: - - [28,41, 67,59, 57,141] # P3/8 - - [144,103, 129,227, 270,205] # P4/16 - - [209,452, 455,396, 358,812] # P5/32 - - [653,922, 1109,570, 1387,1187] # P6/64 - - -# P7 ------------------------------------------------------------------------------------------------------------------- -# P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372 -anchors_p7_640: - - [11,11, 13,30, 29,20] # P3/8 - - [30,46, 61,38, 39,92] # P4/16 - - [78,80, 146,66, 79,163] # P5/32 - - [149,150, 321,143, 157,303] # P6/64 - - [257,402, 359,290, 524,372] # P7/128 - -# P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818 -anchors_p7_1280: - - [19,22, 54,36, 32,77] # P3/8 - - [70,83, 138,71, 75,173] # P4/16 - - [165,159, 148,334, 375,151] # P5/32 - - [334,317, 251,626, 499,474] # P6/64 - - [750,326, 534,814, 1079,818] # P7/128 - -# P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227 -anchors_p7_1920: - - [29,34, 81,55, 47,115] # P3/8 - - [105,124, 207,107, 113,259] # P4/16 - - [247,238, 222,500, 563,227] # P5/32 - - [501,476, 376,939, 749,711] # P6/64 - - [1126,489, 801,1222, 1618,1227] # P7/128 diff --git a/src/yolov5_ros/src/yolov5/models/hub/yolov3-spp.yaml b/src/yolov5_ros/src/yolov5/models/hub/yolov3-spp.yaml deleted file mode 100644 index c669821..0000000 --- a/src/yolov5_ros/src/yolov5/models/hub/yolov3-spp.yaml +++ /dev/null @@ -1,51 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license - -# Parameters -nc: 80 # number of classes -depth_multiple: 1.0 # model depth multiple -width_multiple: 1.0 # layer channel multiple -anchors: - - [10,13, 16,30, 33,23] # P3/8 - - [30,61, 62,45, 59,119] # P4/16 - - [116,90, 156,198, 373,326] # P5/32 - -# darknet53 backbone -backbone: - # [from, number, module, args] - [[-1, 1, Conv, [32, 3, 1]], # 0 - [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 - [-1, 1, Bottleneck, [64]], - [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 - [-1, 2, Bottleneck, [128]], - [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 - [-1, 8, Bottleneck, [256]], - [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 - [-1, 8, Bottleneck, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 - [-1, 4, Bottleneck, [1024]], # 10 - ] - -# YOLOv3-SPP head -head: - [[-1, 1, Bottleneck, [1024, False]], - [-1, 1, SPP, [512, [5, 9, 13]]], - [-1, 1, Conv, [1024, 3, 1]], - [-1, 1, Conv, [512, 1, 1]], - [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) - - [-2, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 8], 1, Concat, [1]], # cat backbone P4 - [-1, 1, Bottleneck, [512, False]], - [-1, 1, Bottleneck, [512, False]], - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) - - [-2, 1, Conv, [128, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P3 - [-1, 1, Bottleneck, [256, False]], - [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) - - [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) - ] diff --git a/src/yolov5_ros/src/yolov5/models/hub/yolov3-tiny.yaml b/src/yolov5_ros/src/yolov5/models/hub/yolov3-tiny.yaml deleted file mode 100644 index b28b443..0000000 --- a/src/yolov5_ros/src/yolov5/models/hub/yolov3-tiny.yaml +++ /dev/null @@ -1,41 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license - -# Parameters -nc: 80 # number of classes -depth_multiple: 1.0 # model depth multiple -width_multiple: 1.0 # layer channel multiple -anchors: - - [10,14, 23,27, 37,58] # P4/16 - - [81,82, 135,169, 344,319] # P5/32 - -# YOLOv3-tiny backbone -backbone: - # [from, number, module, args] - [[-1, 1, Conv, [16, 3, 1]], # 0 - [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2 - [-1, 1, Conv, [32, 3, 1]], - [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4 - [-1, 1, Conv, [64, 3, 1]], - [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8 - [-1, 1, Conv, [128, 3, 1]], - [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16 - [-1, 1, Conv, [256, 3, 1]], - [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32 - [-1, 1, Conv, [512, 3, 1]], - [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11 - [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12 - ] - -# YOLOv3-tiny head -head: - [[-1, 1, Conv, [1024, 3, 1]], - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large) - - [-2, 1, Conv, [128, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 8], 1, Concat, [1]], # cat backbone P4 - [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium) - - [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5) - ] diff --git a/src/yolov5_ros/src/yolov5/models/hub/yolov3.yaml b/src/yolov5_ros/src/yolov5/models/hub/yolov3.yaml deleted file mode 100644 index d1ef912..0000000 --- a/src/yolov5_ros/src/yolov5/models/hub/yolov3.yaml +++ /dev/null @@ -1,51 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license - -# Parameters -nc: 80 # number of classes -depth_multiple: 1.0 # model depth multiple -width_multiple: 1.0 # layer channel multiple -anchors: - - [10,13, 16,30, 33,23] # P3/8 - - [30,61, 62,45, 59,119] # P4/16 - - [116,90, 156,198, 373,326] # P5/32 - -# darknet53 backbone -backbone: - # [from, number, module, args] - [[-1, 1, Conv, [32, 3, 1]], # 0 - [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 - [-1, 1, Bottleneck, [64]], - [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 - [-1, 2, Bottleneck, [128]], - [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 - [-1, 8, Bottleneck, [256]], - [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 - [-1, 8, Bottleneck, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 - [-1, 4, Bottleneck, [1024]], # 10 - ] - -# YOLOv3 head -head: - [[-1, 1, Bottleneck, [1024, False]], - [-1, 1, Conv, [512, 1, 1]], - [-1, 1, Conv, [1024, 3, 1]], - [-1, 1, Conv, [512, 1, 1]], - [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) - - [-2, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 8], 1, Concat, [1]], # cat backbone P4 - [-1, 1, Bottleneck, [512, False]], - [-1, 1, Bottleneck, [512, False]], - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) - - [-2, 1, Conv, [128, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P3 - [-1, 1, Bottleneck, [256, False]], - [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) - - [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) - ] diff --git a/src/yolov5_ros/src/yolov5/models/hub/yolov5-bifpn.yaml b/src/yolov5_ros/src/yolov5/models/hub/yolov5-bifpn.yaml deleted file mode 100644 index 504815f..0000000 --- a/src/yolov5_ros/src/yolov5/models/hub/yolov5-bifpn.yaml +++ /dev/null @@ -1,48 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license - -# Parameters -nc: 80 # number of classes -depth_multiple: 1.0 # model depth multiple -width_multiple: 1.0 # layer channel multiple -anchors: - - [10,13, 16,30, 33,23] # P3/8 - - [30,61, 62,45, 59,119] # P4/16 - - [116,90, 156,198, 373,326] # P5/32 - -# YOLOv5 v6.0 backbone -backbone: - # [from, number, module, args] - [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, C3, [128]], - [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 6, C3, [256]], - [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, C3, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 3, C3, [1024]], - [-1, 1, SPPF, [1024, 5]], # 9 - ] - -# YOLOv5 v6.0 BiFPN head -head: - [[-1, 1, Conv, [512, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, C3, [512, False]], # 13 - - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, C3, [256, False]], # 17 (P3/8-small) - - [-1, 1, Conv, [256, 3, 2]], - [[-1, 14, 6], 1, Concat, [1]], # cat P4 <--- BiFPN change - [-1, 3, C3, [512, False]], # 20 (P4/16-medium) - - [-1, 1, Conv, [512, 3, 2]], - [[-1, 10], 1, Concat, [1]], # cat head P5 - [-1, 3, C3, [1024, False]], # 23 (P5/32-large) - - [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) - ] diff --git a/src/yolov5_ros/src/yolov5/models/hub/yolov5-fpn.yaml b/src/yolov5_ros/src/yolov5/models/hub/yolov5-fpn.yaml deleted file mode 100644 index a23e9c6..0000000 --- a/src/yolov5_ros/src/yolov5/models/hub/yolov5-fpn.yaml +++ /dev/null @@ -1,42 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license - -# Parameters -nc: 80 # number of classes -depth_multiple: 1.0 # model depth multiple -width_multiple: 1.0 # layer channel multiple -anchors: - - [10,13, 16,30, 33,23] # P3/8 - - [30,61, 62,45, 59,119] # P4/16 - - [116,90, 156,198, 373,326] # P5/32 - -# YOLOv5 v6.0 backbone -backbone: - # [from, number, module, args] - [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, C3, [128]], - [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 6, C3, [256]], - [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, C3, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 3, C3, [1024]], - [-1, 1, SPPF, [1024, 5]], # 9 - ] - -# YOLOv5 v6.0 FPN head -head: - [[-1, 3, C3, [1024, False]], # 10 (P5/32-large) - - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 1, Conv, [512, 1, 1]], - [-1, 3, C3, [512, False]], # 14 (P4/16-medium) - - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 1, Conv, [256, 1, 1]], - [-1, 3, C3, [256, False]], # 18 (P3/8-small) - - [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) - ] diff --git a/src/yolov5_ros/src/yolov5/models/hub/yolov5-p2.yaml b/src/yolov5_ros/src/yolov5/models/hub/yolov5-p2.yaml deleted file mode 100644 index 554117d..0000000 --- a/src/yolov5_ros/src/yolov5/models/hub/yolov5-p2.yaml +++ /dev/null @@ -1,54 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license - -# Parameters -nc: 80 # number of classes -depth_multiple: 1.0 # model depth multiple -width_multiple: 1.0 # layer channel multiple -anchors: 3 # AutoAnchor evolves 3 anchors per P output layer - -# YOLOv5 v6.0 backbone -backbone: - # [from, number, module, args] - [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, C3, [128]], - [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 6, C3, [256]], - [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, C3, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 3, C3, [1024]], - [-1, 1, SPPF, [1024, 5]], # 9 - ] - -# YOLOv5 v6.0 head with (P2, P3, P4, P5) outputs -head: - [[-1, 1, Conv, [512, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, C3, [512, False]], # 13 - - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, C3, [256, False]], # 17 (P3/8-small) - - [-1, 1, Conv, [128, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 2], 1, Concat, [1]], # cat backbone P2 - [-1, 1, C3, [128, False]], # 21 (P2/4-xsmall) - - [-1, 1, Conv, [128, 3, 2]], - [[-1, 18], 1, Concat, [1]], # cat head P3 - [-1, 3, C3, [256, False]], # 24 (P3/8-small) - - [-1, 1, Conv, [256, 3, 2]], - [[-1, 14], 1, Concat, [1]], # cat head P4 - [-1, 3, C3, [512, False]], # 27 (P4/16-medium) - - [-1, 1, Conv, [512, 3, 2]], - [[-1, 10], 1, Concat, [1]], # cat head P5 - [-1, 3, C3, [1024, False]], # 30 (P5/32-large) - - [[21, 24, 27, 30], 1, Detect, [nc, anchors]], # Detect(P2, P3, P4, P5) - ] diff --git a/src/yolov5_ros/src/yolov5/models/hub/yolov5-p34.yaml b/src/yolov5_ros/src/yolov5/models/hub/yolov5-p34.yaml deleted file mode 100644 index dbf0f85..0000000 --- a/src/yolov5_ros/src/yolov5/models/hub/yolov5-p34.yaml +++ /dev/null @@ -1,41 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license - -# Parameters -nc: 80 # number of classes -depth_multiple: 0.33 # model depth multiple -width_multiple: 0.50 # layer channel multiple -anchors: 3 # AutoAnchor evolves 3 anchors per P output layer - -# YOLOv5 v6.0 backbone -backbone: - # [from, number, module, args] - [ [ -1, 1, Conv, [ 64, 6, 2, 2 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, C3, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 6, C3, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, C3, [ 512 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 - [ -1, 3, C3, [ 1024 ] ], - [ -1, 1, SPPF, [ 1024, 5 ] ], # 9 - ] - -# YOLOv5 v6.0 head with (P3, P4) outputs -head: - [ [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 3, C3, [ 512, False ] ], # 13 - - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small) - - [ -1, 1, Conv, [ 256, 3, 2 ] ], - [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4 - [ -1, 3, C3, [ 512, False ] ], # 20 (P4/16-medium) - - [ [ 17, 20 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4) - ] diff --git a/src/yolov5_ros/src/yolov5/models/hub/yolov5-p6.yaml b/src/yolov5_ros/src/yolov5/models/hub/yolov5-p6.yaml deleted file mode 100644 index a17202f..0000000 --- a/src/yolov5_ros/src/yolov5/models/hub/yolov5-p6.yaml +++ /dev/null @@ -1,56 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license - -# Parameters -nc: 80 # number of classes -depth_multiple: 1.0 # model depth multiple -width_multiple: 1.0 # layer channel multiple -anchors: 3 # AutoAnchor evolves 3 anchors per P output layer - -# YOLOv5 v6.0 backbone -backbone: - # [from, number, module, args] - [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, C3, [128]], - [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 6, C3, [256]], - [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, C3, [512]], - [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 - [-1, 3, C3, [768]], - [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 - [-1, 3, C3, [1024]], - [-1, 1, SPPF, [1024, 5]], # 11 - ] - -# YOLOv5 v6.0 head with (P3, P4, P5, P6) outputs -head: - [[-1, 1, Conv, [768, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 8], 1, Concat, [1]], # cat backbone P5 - [-1, 3, C3, [768, False]], # 15 - - [-1, 1, Conv, [512, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, C3, [512, False]], # 19 - - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, C3, [256, False]], # 23 (P3/8-small) - - [-1, 1, Conv, [256, 3, 2]], - [[-1, 20], 1, Concat, [1]], # cat head P4 - [-1, 3, C3, [512, False]], # 26 (P4/16-medium) - - [-1, 1, Conv, [512, 3, 2]], - [[-1, 16], 1, Concat, [1]], # cat head P5 - [-1, 3, C3, [768, False]], # 29 (P5/32-large) - - [-1, 1, Conv, [768, 3, 2]], - [[-1, 12], 1, Concat, [1]], # cat head P6 - [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) - - [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) - ] diff --git a/src/yolov5_ros/src/yolov5/models/hub/yolov5-p7.yaml b/src/yolov5_ros/src/yolov5/models/hub/yolov5-p7.yaml deleted file mode 100644 index edd7d13..0000000 --- a/src/yolov5_ros/src/yolov5/models/hub/yolov5-p7.yaml +++ /dev/null @@ -1,67 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license - -# Parameters -nc: 80 # number of classes -depth_multiple: 1.0 # model depth multiple -width_multiple: 1.0 # layer channel multiple -anchors: 3 # AutoAnchor evolves 3 anchors per P output layer - -# YOLOv5 v6.0 backbone -backbone: - # [from, number, module, args] - [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, C3, [128]], - [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 6, C3, [256]], - [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, C3, [512]], - [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 - [-1, 3, C3, [768]], - [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 - [-1, 3, C3, [1024]], - [-1, 1, Conv, [1280, 3, 2]], # 11-P7/128 - [-1, 3, C3, [1280]], - [-1, 1, SPPF, [1280, 5]], # 13 - ] - -# YOLOv5 v6.0 head with (P3, P4, P5, P6, P7) outputs -head: - [[-1, 1, Conv, [1024, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 10], 1, Concat, [1]], # cat backbone P6 - [-1, 3, C3, [1024, False]], # 17 - - [-1, 1, Conv, [768, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 8], 1, Concat, [1]], # cat backbone P5 - [-1, 3, C3, [768, False]], # 21 - - [-1, 1, Conv, [512, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, C3, [512, False]], # 25 - - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, C3, [256, False]], # 29 (P3/8-small) - - [-1, 1, Conv, [256, 3, 2]], - [[-1, 26], 1, Concat, [1]], # cat head P4 - [-1, 3, C3, [512, False]], # 32 (P4/16-medium) - - [-1, 1, Conv, [512, 3, 2]], - [[-1, 22], 1, Concat, [1]], # cat head P5 - [-1, 3, C3, [768, False]], # 35 (P5/32-large) - - [-1, 1, Conv, [768, 3, 2]], - [[-1, 18], 1, Concat, [1]], # cat head P6 - [-1, 3, C3, [1024, False]], # 38 (P6/64-xlarge) - - [-1, 1, Conv, [1024, 3, 2]], - [[-1, 14], 1, Concat, [1]], # cat head P7 - [-1, 3, C3, [1280, False]], # 41 (P7/128-xxlarge) - - [[29, 32, 35, 38, 41], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6, P7) - ] diff --git a/src/yolov5_ros/src/yolov5/models/hub/yolov5-panet.yaml b/src/yolov5_ros/src/yolov5/models/hub/yolov5-panet.yaml deleted file mode 100644 index ccfbf90..0000000 --- a/src/yolov5_ros/src/yolov5/models/hub/yolov5-panet.yaml +++ /dev/null @@ -1,48 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license - -# Parameters -nc: 80 # number of classes -depth_multiple: 1.0 # model depth multiple -width_multiple: 1.0 # layer channel multiple -anchors: - - [10,13, 16,30, 33,23] # P3/8 - - [30,61, 62,45, 59,119] # P4/16 - - [116,90, 156,198, 373,326] # P5/32 - -# YOLOv5 v6.0 backbone -backbone: - # [from, number, module, args] - [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, C3, [128]], - [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 6, C3, [256]], - [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, C3, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 3, C3, [1024]], - [-1, 1, SPPF, [1024, 5]], # 9 - ] - -# YOLOv5 v6.0 PANet head -head: - [[-1, 1, Conv, [512, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, C3, [512, False]], # 13 - - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, C3, [256, False]], # 17 (P3/8-small) - - [-1, 1, Conv, [256, 3, 2]], - [[-1, 14], 1, Concat, [1]], # cat head P4 - [-1, 3, C3, [512, False]], # 20 (P4/16-medium) - - [-1, 1, Conv, [512, 3, 2]], - [[-1, 10], 1, Concat, [1]], # cat head P5 - [-1, 3, C3, [1024, False]], # 23 (P5/32-large) - - [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) - ] diff --git a/src/yolov5_ros/src/yolov5/models/hub/yolov5l6.yaml b/src/yolov5_ros/src/yolov5/models/hub/yolov5l6.yaml deleted file mode 100644 index 632c2cb..0000000 --- a/src/yolov5_ros/src/yolov5/models/hub/yolov5l6.yaml +++ /dev/null @@ -1,60 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license - -# Parameters -nc: 80 # number of classes -depth_multiple: 1.0 # model depth multiple -width_multiple: 1.0 # layer channel multiple -anchors: - - [19,27, 44,40, 38,94] # P3/8 - - [96,68, 86,152, 180,137] # P4/16 - - [140,301, 303,264, 238,542] # P5/32 - - [436,615, 739,380, 925,792] # P6/64 - -# YOLOv5 v6.0 backbone -backbone: - # [from, number, module, args] - [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, C3, [128]], - [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 6, C3, [256]], - [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, C3, [512]], - [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 - [-1, 3, C3, [768]], - [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 - [-1, 3, C3, [1024]], - [-1, 1, SPPF, [1024, 5]], # 11 - ] - -# YOLOv5 v6.0 head -head: - [[-1, 1, Conv, [768, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 8], 1, Concat, [1]], # cat backbone P5 - [-1, 3, C3, [768, False]], # 15 - - [-1, 1, Conv, [512, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, C3, [512, False]], # 19 - - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, C3, [256, False]], # 23 (P3/8-small) - - [-1, 1, Conv, [256, 3, 2]], - [[-1, 20], 1, Concat, [1]], # cat head P4 - [-1, 3, C3, [512, False]], # 26 (P4/16-medium) - - [-1, 1, Conv, [512, 3, 2]], - [[-1, 16], 1, Concat, [1]], # cat head P5 - [-1, 3, C3, [768, False]], # 29 (P5/32-large) - - [-1, 1, Conv, [768, 3, 2]], - [[-1, 12], 1, Concat, [1]], # cat head P6 - [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) - - [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) - ] diff --git a/src/yolov5_ros/src/yolov5/models/hub/yolov5m6.yaml b/src/yolov5_ros/src/yolov5/models/hub/yolov5m6.yaml deleted file mode 100644 index ecc53fd..0000000 --- a/src/yolov5_ros/src/yolov5/models/hub/yolov5m6.yaml +++ /dev/null @@ -1,60 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license - -# Parameters -nc: 80 # number of classes -depth_multiple: 0.67 # model depth multiple -width_multiple: 0.75 # layer channel multiple -anchors: - - [19,27, 44,40, 38,94] # P3/8 - - [96,68, 86,152, 180,137] # P4/16 - - [140,301, 303,264, 238,542] # P5/32 - - [436,615, 739,380, 925,792] # P6/64 - -# YOLOv5 v6.0 backbone -backbone: - # [from, number, module, args] - [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, C3, [128]], - [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 6, C3, [256]], - [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, C3, [512]], - [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 - [-1, 3, C3, [768]], - [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 - [-1, 3, C3, [1024]], - [-1, 1, SPPF, [1024, 5]], # 11 - ] - -# YOLOv5 v6.0 head -head: - [[-1, 1, Conv, [768, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 8], 1, Concat, [1]], # cat backbone P5 - [-1, 3, C3, [768, False]], # 15 - - [-1, 1, Conv, [512, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, C3, [512, False]], # 19 - - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, C3, [256, False]], # 23 (P3/8-small) - - [-1, 1, Conv, [256, 3, 2]], - [[-1, 20], 1, Concat, [1]], # cat head P4 - [-1, 3, C3, [512, False]], # 26 (P4/16-medium) - - [-1, 1, Conv, [512, 3, 2]], - [[-1, 16], 1, Concat, [1]], # cat head P5 - [-1, 3, C3, [768, False]], # 29 (P5/32-large) - - [-1, 1, Conv, [768, 3, 2]], - [[-1, 12], 1, Concat, [1]], # cat head P6 - [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) - - [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) - ] diff --git a/src/yolov5_ros/src/yolov5/models/hub/yolov5n6.yaml b/src/yolov5_ros/src/yolov5/models/hub/yolov5n6.yaml deleted file mode 100644 index 0c0c71d..0000000 --- a/src/yolov5_ros/src/yolov5/models/hub/yolov5n6.yaml +++ /dev/null @@ -1,60 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license - -# Parameters -nc: 80 # number of classes -depth_multiple: 0.33 # model depth multiple -width_multiple: 0.25 # layer channel multiple -anchors: - - [19,27, 44,40, 38,94] # P3/8 - - [96,68, 86,152, 180,137] # P4/16 - - [140,301, 303,264, 238,542] # P5/32 - - [436,615, 739,380, 925,792] # P6/64 - -# YOLOv5 v6.0 backbone -backbone: - # [from, number, module, args] - [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, C3, [128]], - [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 6, C3, [256]], - [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, C3, [512]], - [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 - [-1, 3, C3, [768]], - [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 - [-1, 3, C3, [1024]], - [-1, 1, SPPF, [1024, 5]], # 11 - ] - -# YOLOv5 v6.0 head -head: - [[-1, 1, Conv, [768, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 8], 1, Concat, [1]], # cat backbone P5 - [-1, 3, C3, [768, False]], # 15 - - [-1, 1, Conv, [512, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, C3, [512, False]], # 19 - - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, C3, [256, False]], # 23 (P3/8-small) - - [-1, 1, Conv, [256, 3, 2]], - [[-1, 20], 1, Concat, [1]], # cat head P4 - [-1, 3, C3, [512, False]], # 26 (P4/16-medium) - - [-1, 1, Conv, [512, 3, 2]], - [[-1, 16], 1, Concat, [1]], # cat head P5 - [-1, 3, C3, [768, False]], # 29 (P5/32-large) - - [-1, 1, Conv, [768, 3, 2]], - [[-1, 12], 1, Concat, [1]], # cat head P6 - [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) - - [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) - ] diff --git a/src/yolov5_ros/src/yolov5/models/hub/yolov5s-ghost.yaml b/src/yolov5_ros/src/yolov5/models/hub/yolov5s-ghost.yaml deleted file mode 100644 index ff9519c..0000000 --- a/src/yolov5_ros/src/yolov5/models/hub/yolov5s-ghost.yaml +++ /dev/null @@ -1,48 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license - -# Parameters -nc: 80 # number of classes -depth_multiple: 0.33 # model depth multiple -width_multiple: 0.50 # layer channel multiple -anchors: - - [10,13, 16,30, 33,23] # P3/8 - - [30,61, 62,45, 59,119] # P4/16 - - [116,90, 156,198, 373,326] # P5/32 - -# YOLOv5 v6.0 backbone -backbone: - # [from, number, module, args] - [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 - [-1, 1, GhostConv, [128, 3, 2]], # 1-P2/4 - [-1, 3, C3Ghost, [128]], - [-1, 1, GhostConv, [256, 3, 2]], # 3-P3/8 - [-1, 6, C3Ghost, [256]], - [-1, 1, GhostConv, [512, 3, 2]], # 5-P4/16 - [-1, 9, C3Ghost, [512]], - [-1, 1, GhostConv, [1024, 3, 2]], # 7-P5/32 - [-1, 3, C3Ghost, [1024]], - [-1, 1, SPPF, [1024, 5]], # 9 - ] - -# YOLOv5 v6.0 head -head: - [[-1, 1, GhostConv, [512, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, C3Ghost, [512, False]], # 13 - - [-1, 1, GhostConv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, C3Ghost, [256, False]], # 17 (P3/8-small) - - [-1, 1, GhostConv, [256, 3, 2]], - [[-1, 14], 1, Concat, [1]], # cat head P4 - [-1, 3, C3Ghost, [512, False]], # 20 (P4/16-medium) - - [-1, 1, GhostConv, [512, 3, 2]], - [[-1, 10], 1, Concat, [1]], # cat head P5 - [-1, 3, C3Ghost, [1024, False]], # 23 (P5/32-large) - - [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) - ] diff --git a/src/yolov5_ros/src/yolov5/models/hub/yolov5s-transformer.yaml b/src/yolov5_ros/src/yolov5/models/hub/yolov5s-transformer.yaml deleted file mode 100644 index 100d7c4..0000000 --- a/src/yolov5_ros/src/yolov5/models/hub/yolov5s-transformer.yaml +++ /dev/null @@ -1,48 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license - -# Parameters -nc: 80 # number of classes -depth_multiple: 0.33 # model depth multiple -width_multiple: 0.50 # layer channel multiple -anchors: - - [10,13, 16,30, 33,23] # P3/8 - - [30,61, 62,45, 59,119] # P4/16 - - [116,90, 156,198, 373,326] # P5/32 - -# YOLOv5 v6.0 backbone -backbone: - # [from, number, module, args] - [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, C3, [128]], - [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 6, C3, [256]], - [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, C3, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 3, C3TR, [1024]], # 9 <--- C3TR() Transformer module - [-1, 1, SPPF, [1024, 5]], # 9 - ] - -# YOLOv5 v6.0 head -head: - [[-1, 1, Conv, [512, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, C3, [512, False]], # 13 - - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, C3, [256, False]], # 17 (P3/8-small) - - [-1, 1, Conv, [256, 3, 2]], - [[-1, 14], 1, Concat, [1]], # cat head P4 - [-1, 3, C3, [512, False]], # 20 (P4/16-medium) - - [-1, 1, Conv, [512, 3, 2]], - [[-1, 10], 1, Concat, [1]], # cat head P5 - [-1, 3, C3, [1024, False]], # 23 (P5/32-large) - - [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) - ] diff --git a/src/yolov5_ros/src/yolov5/models/hub/yolov5s6.yaml b/src/yolov5_ros/src/yolov5/models/hub/yolov5s6.yaml deleted file mode 100644 index a28fb55..0000000 --- a/src/yolov5_ros/src/yolov5/models/hub/yolov5s6.yaml +++ /dev/null @@ -1,60 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license - -# Parameters -nc: 80 # number of classes -depth_multiple: 0.33 # model depth multiple -width_multiple: 0.50 # layer channel multiple -anchors: - - [19,27, 44,40, 38,94] # P3/8 - - [96,68, 86,152, 180,137] # P4/16 - - [140,301, 303,264, 238,542] # P5/32 - - [436,615, 739,380, 925,792] # P6/64 - -# YOLOv5 v6.0 backbone -backbone: - # [from, number, module, args] - [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, C3, [128]], - [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 6, C3, [256]], - [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, C3, [512]], - [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 - [-1, 3, C3, [768]], - [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 - [-1, 3, C3, [1024]], - [-1, 1, SPPF, [1024, 5]], # 11 - ] - -# YOLOv5 v6.0 head -head: - [[-1, 1, Conv, [768, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 8], 1, Concat, [1]], # cat backbone P5 - [-1, 3, C3, [768, False]], # 15 - - [-1, 1, Conv, [512, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, C3, [512, False]], # 19 - - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, C3, [256, False]], # 23 (P3/8-small) - - [-1, 1, Conv, [256, 3, 2]], - [[-1, 20], 1, Concat, [1]], # cat head P4 - [-1, 3, C3, [512, False]], # 26 (P4/16-medium) - - [-1, 1, Conv, [512, 3, 2]], - [[-1, 16], 1, Concat, [1]], # cat head P5 - [-1, 3, C3, [768, False]], # 29 (P5/32-large) - - [-1, 1, Conv, [768, 3, 2]], - [[-1, 12], 1, Concat, [1]], # cat head P6 - [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) - - [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) - ] diff --git a/src/yolov5_ros/src/yolov5/models/hub/yolov5x6.yaml b/src/yolov5_ros/src/yolov5/models/hub/yolov5x6.yaml deleted file mode 100644 index ba795c4..0000000 --- a/src/yolov5_ros/src/yolov5/models/hub/yolov5x6.yaml +++ /dev/null @@ -1,60 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license - -# Parameters -nc: 80 # number of classes -depth_multiple: 1.33 # model depth multiple -width_multiple: 1.25 # layer channel multiple -anchors: - - [19,27, 44,40, 38,94] # P3/8 - - [96,68, 86,152, 180,137] # P4/16 - - [140,301, 303,264, 238,542] # P5/32 - - [436,615, 739,380, 925,792] # P6/64 - -# YOLOv5 v6.0 backbone -backbone: - # [from, number, module, args] - [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, C3, [128]], - [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 6, C3, [256]], - [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, C3, [512]], - [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 - [-1, 3, C3, [768]], - [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 - [-1, 3, C3, [1024]], - [-1, 1, SPPF, [1024, 5]], # 11 - ] - -# YOLOv5 v6.0 head -head: - [[-1, 1, Conv, [768, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 8], 1, Concat, [1]], # cat backbone P5 - [-1, 3, C3, [768, False]], # 15 - - [-1, 1, Conv, [512, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, C3, [512, False]], # 19 - - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, C3, [256, False]], # 23 (P3/8-small) - - [-1, 1, Conv, [256, 3, 2]], - [[-1, 20], 1, Concat, [1]], # cat head P4 - [-1, 3, C3, [512, False]], # 26 (P4/16-medium) - - [-1, 1, Conv, [512, 3, 2]], - [[-1, 16], 1, Concat, [1]], # cat head P5 - [-1, 3, C3, [768, False]], # 29 (P5/32-large) - - [-1, 1, Conv, [768, 3, 2]], - [[-1, 12], 1, Concat, [1]], # cat head P6 - [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) - - [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) - ] diff --git a/src/yolov5_ros/src/yolov5/models/tf.py b/src/yolov5_ros/src/yolov5/models/tf.py deleted file mode 100644 index ecb0d4d..0000000 --- a/src/yolov5_ros/src/yolov5/models/tf.py +++ /dev/null @@ -1,574 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -TensorFlow, Keras and TFLite versions of YOLOv5 -Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127 - -Usage: - $ python models/tf.py --weights yolov5s.pt - -Export: - $ python export.py --weights yolov5s.pt --include saved_model pb tflite tfjs -""" - -import argparse -import sys -from copy import deepcopy -from pathlib import Path - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[1] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -# ROOT = ROOT.relative_to(Path.cwd()) # relative - -import numpy as np -import tensorflow as tf -import torch -import torch.nn as nn -from tensorflow import keras - -from models.common import (C3, SPP, SPPF, Bottleneck, BottleneckCSP, C3x, Concat, Conv, CrossConv, DWConv, - DWConvTranspose2d, Focus, autopad) -from models.experimental import MixConv2d, attempt_load -from models.yolo import Detect -from utils.activations import SiLU -from utils.general import LOGGER, make_divisible, print_args - - -class TFBN(keras.layers.Layer): - # TensorFlow BatchNormalization wrapper - def __init__(self, w=None): - super().__init__() - self.bn = keras.layers.BatchNormalization( - beta_initializer=keras.initializers.Constant(w.bias.numpy()), - gamma_initializer=keras.initializers.Constant(w.weight.numpy()), - moving_mean_initializer=keras.initializers.Constant(w.running_mean.numpy()), - moving_variance_initializer=keras.initializers.Constant(w.running_var.numpy()), - epsilon=w.eps) - - def call(self, inputs): - return self.bn(inputs) - - -class TFPad(keras.layers.Layer): - # Pad inputs in spatial dimensions 1 and 2 - def __init__(self, pad): - super().__init__() - if isinstance(pad, int): - self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]]) - else: # tuple/list - self.pad = tf.constant([[0, 0], [pad[0], pad[0]], [pad[1], pad[1]], [0, 0]]) - - def call(self, inputs): - return tf.pad(inputs, self.pad, mode='constant', constant_values=0) - - -class TFConv(keras.layers.Layer): - # Standard convolution - def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): - # ch_in, ch_out, weights, kernel, stride, padding, groups - super().__init__() - assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument" - # TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding) - # see https://stackoverflow.com/questions/52975843/comparing-conv2d-with-padding-between-tensorflow-and-pytorch - conv = keras.layers.Conv2D( - filters=c2, - kernel_size=k, - strides=s, - padding='SAME' if s == 1 else 'VALID', - use_bias=not hasattr(w, 'bn'), - kernel_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()), - bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy())) - self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv]) - self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity - self.act = activations(w.act) if act else tf.identity - - def call(self, inputs): - return self.act(self.bn(self.conv(inputs))) - - -class TFDWConv(keras.layers.Layer): - # Depthwise convolution - def __init__(self, c1, c2, k=1, s=1, p=None, act=True, w=None): - # ch_in, ch_out, weights, kernel, stride, padding, groups - super().__init__() - assert c2 % c1 == 0, f'TFDWConv() output={c2} must be a multiple of input={c1} channels' - conv = keras.layers.DepthwiseConv2D( - kernel_size=k, - depth_multiplier=c2 // c1, - strides=s, - padding='SAME' if s == 1 else 'VALID', - use_bias=not hasattr(w, 'bn'), - depthwise_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()), - bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy())) - self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv]) - self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity - self.act = activations(w.act) if act else tf.identity - - def call(self, inputs): - return self.act(self.bn(self.conv(inputs))) - - -class TFDWConvTranspose2d(keras.layers.Layer): - # Depthwise ConvTranspose2d - def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0, w=None): - # ch_in, ch_out, weights, kernel, stride, padding, groups - super().__init__() - assert c1 == c2, f'TFDWConv() output={c2} must be equal to input={c1} channels' - assert k == 4 and p1 == 1, 'TFDWConv() only valid for k=4 and p1=1' - weight, bias = w.weight.permute(2, 3, 1, 0).numpy(), w.bias.numpy() - self.c1 = c1 - self.conv = [ - keras.layers.Conv2DTranspose(filters=1, - kernel_size=k, - strides=s, - padding='VALID', - output_padding=p2, - use_bias=True, - kernel_initializer=keras.initializers.Constant(weight[..., i:i + 1]), - bias_initializer=keras.initializers.Constant(bias[i])) for i in range(c1)] - - def call(self, inputs): - return tf.concat([m(x) for m, x in zip(self.conv, tf.split(inputs, self.c1, 3))], 3)[:, 1:-1, 1:-1] - - -class TFFocus(keras.layers.Layer): - # Focus wh information into c-space - def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): - # ch_in, ch_out, kernel, stride, padding, groups - super().__init__() - self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv) - - def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c) - # inputs = inputs / 255 # normalize 0-255 to 0-1 - inputs = [inputs[:, ::2, ::2, :], inputs[:, 1::2, ::2, :], inputs[:, ::2, 1::2, :], inputs[:, 1::2, 1::2, :]] - return self.conv(tf.concat(inputs, 3)) - - -class TFBottleneck(keras.layers.Layer): - # Standard bottleneck - def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, shortcut, groups, expansion - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) - self.cv2 = TFConv(c_, c2, 3, 1, g=g, w=w.cv2) - self.add = shortcut and c1 == c2 - - def call(self, inputs): - return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs)) - - -class TFCrossConv(keras.layers.Layer): - # Cross Convolution - def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False, w=None): - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = TFConv(c1, c_, (1, k), (1, s), w=w.cv1) - self.cv2 = TFConv(c_, c2, (k, 1), (s, 1), g=g, w=w.cv2) - self.add = shortcut and c1 == c2 - - def call(self, inputs): - return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs)) - - -class TFConv2d(keras.layers.Layer): - # Substitution for PyTorch nn.Conv2D - def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None): - super().__init__() - assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument" - self.conv = keras.layers.Conv2D(filters=c2, - kernel_size=k, - strides=s, - padding='VALID', - use_bias=bias, - kernel_initializer=keras.initializers.Constant( - w.weight.permute(2, 3, 1, 0).numpy()), - bias_initializer=keras.initializers.Constant(w.bias.numpy()) if bias else None) - - def call(self, inputs): - return self.conv(inputs) - - -class TFBottleneckCSP(keras.layers.Layer): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): - # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) - self.cv2 = TFConv2d(c1, c_, 1, 1, bias=False, w=w.cv2) - self.cv3 = TFConv2d(c_, c_, 1, 1, bias=False, w=w.cv3) - self.cv4 = TFConv(2 * c_, c2, 1, 1, w=w.cv4) - self.bn = TFBN(w.bn) - self.act = lambda x: keras.activations.swish(x) - self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)]) - - def call(self, inputs): - y1 = self.cv3(self.m(self.cv1(inputs))) - y2 = self.cv2(inputs) - return self.cv4(self.act(self.bn(tf.concat((y1, y2), axis=3)))) - - -class TFC3(keras.layers.Layer): - # CSP Bottleneck with 3 convolutions - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): - # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) - self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2) - self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3) - self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)]) - - def call(self, inputs): - return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3)) - - -class TFC3x(keras.layers.Layer): - # 3 module with cross-convolutions - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): - # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) - self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2) - self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3) - self.m = keras.Sequential([ - TFCrossConv(c_, c_, k=3, s=1, g=g, e=1.0, shortcut=shortcut, w=w.m[j]) for j in range(n)]) - - def call(self, inputs): - return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3)) - - -class TFSPP(keras.layers.Layer): - # Spatial pyramid pooling layer used in YOLOv3-SPP - def __init__(self, c1, c2, k=(5, 9, 13), w=None): - super().__init__() - c_ = c1 // 2 # hidden channels - self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) - self.cv2 = TFConv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2) - self.m = [keras.layers.MaxPool2D(pool_size=x, strides=1, padding='SAME') for x in k] - - def call(self, inputs): - x = self.cv1(inputs) - return self.cv2(tf.concat([x] + [m(x) for m in self.m], 3)) - - -class TFSPPF(keras.layers.Layer): - # Spatial pyramid pooling-Fast layer - def __init__(self, c1, c2, k=5, w=None): - super().__init__() - c_ = c1 // 2 # hidden channels - self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) - self.cv2 = TFConv(c_ * 4, c2, 1, 1, w=w.cv2) - self.m = keras.layers.MaxPool2D(pool_size=k, strides=1, padding='SAME') - - def call(self, inputs): - x = self.cv1(inputs) - y1 = self.m(x) - y2 = self.m(y1) - return self.cv2(tf.concat([x, y1, y2, self.m(y2)], 3)) - - -class TFDetect(keras.layers.Layer): - # TF YOLOv5 Detect layer - def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detection layer - super().__init__() - self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32) - self.nc = nc # number of classes - self.no = nc + 5 # number of outputs per anchor - self.nl = len(anchors) # number of detection layers - self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [tf.zeros(1)] * self.nl # init grid - self.anchors = tf.convert_to_tensor(w.anchors.numpy(), dtype=tf.float32) - self.anchor_grid = tf.reshape(self.anchors * tf.reshape(self.stride, [self.nl, 1, 1]), [self.nl, 1, -1, 1, 2]) - self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)] - self.training = False # set to False after building model - self.imgsz = imgsz - for i in range(self.nl): - ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i] - self.grid[i] = self._make_grid(nx, ny) - - def call(self, inputs): - z = [] # inference output - x = [] - for i in range(self.nl): - x.append(self.m[i](inputs[i])) - # x(bs,20,20,255) to x(bs,3,20,20,85) - ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i] - x[i] = tf.reshape(x[i], [-1, ny * nx, self.na, self.no]) - - if not self.training: # inference - y = tf.sigmoid(x[i]) - grid = tf.transpose(self.grid[i], [0, 2, 1, 3]) - 0.5 - anchor_grid = tf.transpose(self.anchor_grid[i], [0, 2, 1, 3]) * 4 - xy = (y[..., 0:2] * 2 + grid) * self.stride[i] # xy - wh = y[..., 2:4] ** 2 * anchor_grid - # Normalize xywh to 0-1 to reduce calibration error - xy /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) - wh /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) - y = tf.concat([xy, wh, y[..., 4:]], -1) - z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no])) - - return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1), x) - - @staticmethod - def _make_grid(nx=20, ny=20): - # yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) - # return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() - xv, yv = tf.meshgrid(tf.range(nx), tf.range(ny)) - return tf.cast(tf.reshape(tf.stack([xv, yv], 2), [1, 1, ny * nx, 2]), dtype=tf.float32) - - -class TFUpsample(keras.layers.Layer): - # TF version of torch.nn.Upsample() - def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w' - super().__init__() - assert scale_factor == 2, "scale_factor must be 2" - self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=mode) - # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode) - # with default arguments: align_corners=False, half_pixel_centers=False - # self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x, - # size=(x.shape[1] * 2, x.shape[2] * 2)) - - def call(self, inputs): - return self.upsample(inputs) - - -class TFConcat(keras.layers.Layer): - # TF version of torch.concat() - def __init__(self, dimension=1, w=None): - super().__init__() - assert dimension == 1, "convert only NCHW to NHWC concat" - self.d = 3 - - def call(self, inputs): - return tf.concat(inputs, self.d) - - -def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) - LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") - anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] - na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors - no = na * (nc + 5) # number of outputs = anchors * (classes + 5) - - layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out - for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args - m_str = m - m = eval(m) if isinstance(m, str) else m # eval strings - for j, a in enumerate(args): - try: - args[j] = eval(a) if isinstance(a, str) else a # eval strings - except NameError: - pass - - n = max(round(n * gd), 1) if n > 1 else n # depth gain - if m in [ - nn.Conv2d, Conv, DWConv, DWConvTranspose2d, Bottleneck, SPP, SPPF, MixConv2d, Focus, CrossConv, - BottleneckCSP, C3, C3x]: - c1, c2 = ch[f], args[0] - c2 = make_divisible(c2 * gw, 8) if c2 != no else c2 - - args = [c1, c2, *args[1:]] - if m in [BottleneckCSP, C3, C3x]: - args.insert(2, n) - n = 1 - elif m is nn.BatchNorm2d: - args = [ch[f]] - elif m is Concat: - c2 = sum(ch[-1 if x == -1 else x + 1] for x in f) - elif m is Detect: - args.append([ch[x + 1] for x in f]) - if isinstance(args[1], int): # number of anchors - args[1] = [list(range(args[1] * 2))] * len(f) - args.append(imgsz) - else: - c2 = ch[f] - - tf_m = eval('TF' + m_str.replace('nn.', '')) - m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \ - else tf_m(*args, w=model.model[i]) # module - - torch_m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module - t = str(m)[8:-2].replace('__main__.', '') # module type - np = sum(x.numel() for x in torch_m_.parameters()) # number params - m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params - LOGGER.info(f'{i:>3}{str(f):>18}{str(n):>3}{np:>10} {t:<40}{str(args):<30}') # print - save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist - layers.append(m_) - ch.append(c2) - return keras.Sequential(layers), sorted(save) - - -class TFModel: - # TF YOLOv5 model - def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes - super().__init__() - if isinstance(cfg, dict): - self.yaml = cfg # model dict - else: # is *.yaml - import yaml # for torch hub - self.yaml_file = Path(cfg).name - with open(cfg) as f: - self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict - - # Define model - if nc and nc != self.yaml['nc']: - LOGGER.info(f"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}") - self.yaml['nc'] = nc # override yaml value - self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz) - - def predict(self, - inputs, - tf_nms=False, - agnostic_nms=False, - topk_per_class=100, - topk_all=100, - iou_thres=0.45, - conf_thres=0.25): - y = [] # outputs - x = inputs - for m in self.model.layers: - if m.f != -1: # if not from previous layer - x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers - - x = m(x) # run - y.append(x if m.i in self.savelist else None) # save output - - # Add TensorFlow NMS - if tf_nms: - boxes = self._xywh2xyxy(x[0][..., :4]) - probs = x[0][:, :, 4:5] - classes = x[0][:, :, 5:] - scores = probs * classes - if agnostic_nms: - nms = AgnosticNMS()((boxes, classes, scores), topk_all, iou_thres, conf_thres) - else: - boxes = tf.expand_dims(boxes, 2) - nms = tf.image.combined_non_max_suppression(boxes, - scores, - topk_per_class, - topk_all, - iou_thres, - conf_thres, - clip_boxes=False) - return nms, x[1] - return x[0] # output only first tensor [1,6300,85] = [xywh, conf, class0, class1, ...] - # x = x[0][0] # [x(1,6300,85), ...] to x(6300,85) - # xywh = x[..., :4] # x(6300,4) boxes - # conf = x[..., 4:5] # x(6300,1) confidences - # cls = tf.reshape(tf.cast(tf.argmax(x[..., 5:], axis=1), tf.float32), (-1, 1)) # x(6300,1) classes - # return tf.concat([conf, cls, xywh], 1) - - @staticmethod - def _xywh2xyxy(xywh): - # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right - x, y, w, h = tf.split(xywh, num_or_size_splits=4, axis=-1) - return tf.concat([x - w / 2, y - h / 2, x + w / 2, y + h / 2], axis=-1) - - -class AgnosticNMS(keras.layers.Layer): - # TF Agnostic NMS - def call(self, input, topk_all, iou_thres, conf_thres): - # wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450 - return tf.map_fn(lambda x: self._nms(x, topk_all, iou_thres, conf_thres), - input, - fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32), - name='agnostic_nms') - - @staticmethod - def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25): # agnostic NMS - boxes, classes, scores = x - class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32) - scores_inp = tf.reduce_max(scores, -1) - selected_inds = tf.image.non_max_suppression(boxes, - scores_inp, - max_output_size=topk_all, - iou_threshold=iou_thres, - score_threshold=conf_thres) - selected_boxes = tf.gather(boxes, selected_inds) - padded_boxes = tf.pad(selected_boxes, - paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]], - mode="CONSTANT", - constant_values=0.0) - selected_scores = tf.gather(scores_inp, selected_inds) - padded_scores = tf.pad(selected_scores, - paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], - mode="CONSTANT", - constant_values=-1.0) - selected_classes = tf.gather(class_inds, selected_inds) - padded_classes = tf.pad(selected_classes, - paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], - mode="CONSTANT", - constant_values=-1.0) - valid_detections = tf.shape(selected_inds)[0] - return padded_boxes, padded_scores, padded_classes, valid_detections - - -def activations(act=nn.SiLU): - # Returns TF activation from input PyTorch activation - if isinstance(act, nn.LeakyReLU): - return lambda x: keras.activations.relu(x, alpha=0.1) - elif isinstance(act, nn.Hardswish): - return lambda x: x * tf.nn.relu6(x + 3) * 0.166666667 - elif isinstance(act, (nn.SiLU, SiLU)): - return lambda x: keras.activations.swish(x) - else: - raise Exception(f'no matching TensorFlow activation found for PyTorch activation {act}') - - -def representative_dataset_gen(dataset, ncalib=100): - # Representative dataset generator for use with converter.representative_dataset, returns a generator of np arrays - for n, (path, img, im0s, vid_cap, string) in enumerate(dataset): - im = np.transpose(img, [1, 2, 0]) - im = np.expand_dims(im, axis=0).astype(np.float32) - im /= 255 - yield [im] - if n >= ncalib: - break - - -def run( - weights=ROOT / 'yolov5s.pt', # weights path - imgsz=(640, 640), # inference size h,w - batch_size=1, # batch size - dynamic=False, # dynamic batch size -): - # PyTorch model - im = torch.zeros((batch_size, 3, *imgsz)) # BCHW image - model = attempt_load(weights, device=torch.device('cpu'), inplace=True, fuse=False) - _ = model(im) # inference - model.info() - - # TensorFlow model - im = tf.zeros((batch_size, *imgsz, 3)) # BHWC image - tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) - _ = tf_model.predict(im) # inference - - # Keras model - im = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size) - keras_model = keras.Model(inputs=im, outputs=tf_model.predict(im)) - keras_model.summary() - - LOGGER.info('PyTorch, TensorFlow and Keras models successfully verified.\nUse export.py for TF model export.') - - -def parse_opt(): - parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path') - parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') - parser.add_argument('--batch-size', type=int, default=1, help='batch size') - parser.add_argument('--dynamic', action='store_true', help='dynamic batch size') - opt = parser.parse_args() - opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand - print_args(vars(opt)) - return opt - - -def main(opt): - run(**vars(opt)) - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) diff --git a/src/yolov5_ros/src/yolov5/models/yolo.py b/src/yolov5_ros/src/yolov5/models/yolo.py deleted file mode 100644 index fa05fcf..0000000 --- a/src/yolov5_ros/src/yolov5/models/yolo.py +++ /dev/null @@ -1,357 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -YOLO-specific modules - -Usage: - $ python models/yolo.py --cfg yolov5s.yaml -""" - -import argparse -import contextlib -import os -import platform -import sys -from copy import deepcopy -from pathlib import Path - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[1] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -if platform.system() != 'Windows': - ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -from models.common import * -from models.experimental import * -from utils.autoanchor import check_anchor_order -from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args -from utils.plots import feature_visualization -from utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device, - time_sync) - -try: - import thop # for FLOPs computation -except ImportError: - thop = None - - -class Detect(nn.Module): - stride = None # strides computed during build - dynamic = False # force grid reconstruction - export = False # export mode - - def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer - super().__init__() - self.nc = nc # number of classes - self.no = nc + 5 # number of outputs per anchor - self.nl = len(anchors) # number of detection layers - self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.empty(1)] * self.nl # init grid - self.anchor_grid = [torch.empty(1)] * self.nl # init anchor grid - self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2) - self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv - self.inplace = inplace # use inplace ops (e.g. slice assignment) - - def forward(self, x): - z = [] # inference output - for i in range(self.nl): - x[i] = self.m[i](x[i]) # conv - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - if not self.training: # inference - if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) - - y = x[i].sigmoid() - if self.inplace: - y[..., 0:2] = (y[..., 0:2] * 2 + self.grid[i]) * self.stride[i] # xy - y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 - xy, wh, conf = y.split((2, 2, self.nc + 1), 4) # y.tensor_split((2, 4, 5), 4) # torch 1.8.0 - xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy - wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh - y = torch.cat((xy, wh, conf), 4) - z.append(y.view(bs, -1, self.no)) - - return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x) - - def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')): - d = self.anchors[i].device - t = self.anchors[i].dtype - shape = 1, self.na, ny, nx, 2 # grid shape - y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t) - yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility - grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5 - anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape) - return grid, anchor_grid - - -class BaseModel(nn.Module): - # YOLOv5 base model - def forward(self, x, profile=False, visualize=False): - return self._forward_once(x, profile, visualize) # single-scale inference, train - - def _forward_once(self, x, profile=False, visualize=False): - y, dt = [], [] # outputs - for m in self.model: - if m.f != -1: # if not from previous layer - x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers - if profile: - self._profile_one_layer(m, x, dt) - x = m(x) # run - y.append(x if m.i in self.save else None) # save output - if visualize: - feature_visualization(x, m.type, m.i, save_dir=visualize) - return x - - def _profile_one_layer(self, m, x, dt): - c = m == self.model[-1] # is final layer, copy input as inplace fix - o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs - t = time_sync() - for _ in range(10): - m(x.copy() if c else x) - dt.append((time_sync() - t) * 100) - if m == self.model[0]: - LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module") - LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') - if c: - LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total") - - def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers - LOGGER.info('Fusing layers... ') - for m in self.model.modules(): - if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'): - m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv - delattr(m, 'bn') # remove batchnorm - m.forward = m.forward_fuse # update forward - self.info() - return self - - def info(self, verbose=False, img_size=640): # print model information - model_info(self, verbose, img_size) - - def _apply(self, fn): - # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers - self = super()._apply(fn) - m = self.model[-1] # Detect() - if isinstance(m, Detect): - m.stride = fn(m.stride) - m.grid = list(map(fn, m.grid)) - if isinstance(m.anchor_grid, list): - m.anchor_grid = list(map(fn, m.anchor_grid)) - return self - - -class DetectionModel(BaseModel): - # YOLOv5 detection model - def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes - super().__init__() - if isinstance(cfg, dict): - self.yaml = cfg # model dict - else: # is *.yaml - import yaml # for torch hub - self.yaml_file = Path(cfg).name - with open(cfg, encoding='ascii', errors='ignore') as f: - self.yaml = yaml.safe_load(f) # model dict - - # Define model - ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels - if nc and nc != self.yaml['nc']: - LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") - self.yaml['nc'] = nc # override yaml value - if anchors: - LOGGER.info(f'Overriding model.yaml anchors with anchors={anchors}') - self.yaml['anchors'] = round(anchors) # override yaml value - self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist - self.names = [str(i) for i in range(self.yaml['nc'])] # default names - self.inplace = self.yaml.get('inplace', True) - - # Build strides, anchors - m = self.model[-1] # Detect() - if isinstance(m, Detect): - s = 256 # 2x min stride - m.inplace = self.inplace - m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.empty(1, ch, s, s))]) # forward - check_anchor_order(m) # must be in pixel-space (not grid-space) - m.anchors /= m.stride.view(-1, 1, 1) - self.stride = m.stride - self._initialize_biases() # only run once - - # Init weights, biases - initialize_weights(self) - self.info() - LOGGER.info('') - - def forward(self, x, augment=False, profile=False, visualize=False): - if augment: - return self._forward_augment(x) # augmented inference, None - return self._forward_once(x, profile, visualize) # single-scale inference, train - - def _forward_augment(self, x): - img_size = x.shape[-2:] # height, width - s = [1, 0.83, 0.67] # scales - f = [None, 3, None] # flips (2-ud, 3-lr) - y = [] # outputs - for si, fi in zip(s, f): - xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) - yi = self._forward_once(xi)[0] # forward - # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save - yi = self._descale_pred(yi, fi, si, img_size) - y.append(yi) - y = self._clip_augmented(y) # clip augmented tails - return torch.cat(y, 1), None # augmented inference, train - - def _descale_pred(self, p, flips, scale, img_size): - # de-scale predictions following augmented inference (inverse operation) - if self.inplace: - p[..., :4] /= scale # de-scale - if flips == 2: - p[..., 1] = img_size[0] - p[..., 1] # de-flip ud - elif flips == 3: - p[..., 0] = img_size[1] - p[..., 0] # de-flip lr - else: - x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale # de-scale - if flips == 2: - y = img_size[0] - y # de-flip ud - elif flips == 3: - x = img_size[1] - x # de-flip lr - p = torch.cat((x, y, wh, p[..., 4:]), -1) - return p - - def _clip_augmented(self, y): - # Clip YOLOv5 augmented inference tails - nl = self.model[-1].nl # number of detection layers (P3-P5) - g = sum(4 ** x for x in range(nl)) # grid points - e = 1 # exclude layer count - i = (y[0].shape[1] // g) * sum(4 ** x for x in range(e)) # indices - y[0] = y[0][:, :-i] # large - i = (y[-1].shape[1] // g) * sum(4 ** (nl - 1 - x) for x in range(e)) # indices - y[-1] = y[-1][:, i:] # small - return y - - def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency - # https://arxiv.org/abs/1708.02002 section 3.3 - # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. - m = self.model[-1] # Detect() module - for mi, s in zip(m.m, m.stride): # from - b = mi.bias.view(m.na, -1).detach() # conv.bias(255) to (3,85) - b[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b[:, 5:] += math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # cls - mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) - - -Model = DetectionModel # retain YOLOv5 'Model' class for backwards compatibility - - -class ClassificationModel(BaseModel): - # YOLOv5 classification model - def __init__(self, cfg=None, model=None, nc=1000, cutoff=10): # yaml, model, number of classes, cutoff index - super().__init__() - self._from_detection_model(model, nc, cutoff) if model is not None else self._from_yaml(cfg) - - def _from_detection_model(self, model, nc=1000, cutoff=10): - # Create a YOLOv5 classification model from a YOLOv5 detection model - if isinstance(model, DetectMultiBackend): - model = model.model # unwrap DetectMultiBackend - model.model = model.model[:cutoff] # backbone - m = model.model[-1] # last layer - ch = m.conv.in_channels if hasattr(m, 'conv') else m.cv1.conv.in_channels # ch into module - c = Classify(ch, nc) # Classify() - c.i, c.f, c.type = m.i, m.f, 'models.common.Classify' # index, from, type - model.model[-1] = c # replace - self.model = model.model - self.stride = model.stride - self.save = [] - self.nc = nc - - def _from_yaml(self, cfg): - # Create a YOLOv5 classification model from a *.yaml file - self.model = None - - -def parse_model(d, ch): # model_dict, input_channels(3) - LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") - anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] - na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors - no = na * (nc + 5) # number of outputs = anchors * (classes + 5) - - layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out - for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args - m = eval(m) if isinstance(m, str) else m # eval strings - for j, a in enumerate(args): - with contextlib.suppress(NameError): - args[j] = eval(a) if isinstance(a, str) else a # eval strings - - n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain - if m in (Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv, - BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x): - c1, c2 = ch[f], args[0] - if c2 != no: # if not output - c2 = make_divisible(c2 * gw, 8) - - args = [c1, c2, *args[1:]] - if m in [BottleneckCSP, C3, C3TR, C3Ghost, C3x]: - args.insert(2, n) # number of repeats - n = 1 - elif m is nn.BatchNorm2d: - args = [ch[f]] - elif m is Concat: - c2 = sum(ch[x] for x in f) - elif m is Detect: - args.append([ch[x] for x in f]) - if isinstance(args[1], int): # number of anchors - args[1] = [list(range(args[1] * 2))] * len(f) - elif m is Contract: - c2 = ch[f] * args[0] ** 2 - elif m is Expand: - c2 = ch[f] // args[0] ** 2 - else: - c2 = ch[f] - - m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module - t = str(m)[8:-2].replace('__main__.', '') # module type - np = sum(x.numel() for x in m_.parameters()) # number params - m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params - LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print - save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist - layers.append(m_) - if i == 0: - ch = [] - ch.append(c2) - return nn.Sequential(*layers), sorted(save) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml') - parser.add_argument('--batch-size', type=int, default=1, help='total batch size for all GPUs') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--profile', action='store_true', help='profile model speed') - parser.add_argument('--line-profile', action='store_true', help='profile model speed layer by layer') - parser.add_argument('--test', action='store_true', help='test all yolo*.yaml') - opt = parser.parse_args() - opt.cfg = check_yaml(opt.cfg) # check YAML - print_args(vars(opt)) - device = select_device(opt.device) - - # Create model - im = torch.rand(opt.batch_size, 3, 640, 640).to(device) - model = Model(opt.cfg).to(device) - - # Options - if opt.line_profile: # profile layer by layer - model(im, profile=True) - - elif opt.profile: # profile forward-backward - results = profile(input=im, ops=[model], n=3) - - elif opt.test: # test all models - for cfg in Path(ROOT / 'models').rglob('yolo*.yaml'): - try: - _ = Model(cfg) - except Exception as e: - print(f'Error in {cfg}: {e}') - - else: # report fused model summary - model.fuse() diff --git a/src/yolov5_ros/src/yolov5/models/yolov5l.yaml b/src/yolov5_ros/src/yolov5/models/yolov5l.yaml deleted file mode 100644 index ce8a5de..0000000 --- a/src/yolov5_ros/src/yolov5/models/yolov5l.yaml +++ /dev/null @@ -1,48 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license - -# Parameters -nc: 80 # number of classes -depth_multiple: 1.0 # model depth multiple -width_multiple: 1.0 # layer channel multiple -anchors: - - [10,13, 16,30, 33,23] # P3/8 - - [30,61, 62,45, 59,119] # P4/16 - - [116,90, 156,198, 373,326] # P5/32 - -# YOLOv5 v6.0 backbone -backbone: - # [from, number, module, args] - [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, C3, [128]], - [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 6, C3, [256]], - [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, C3, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 3, C3, [1024]], - [-1, 1, SPPF, [1024, 5]], # 9 - ] - -# YOLOv5 v6.0 head -head: - [[-1, 1, Conv, [512, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, C3, [512, False]], # 13 - - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, C3, [256, False]], # 17 (P3/8-small) - - [-1, 1, Conv, [256, 3, 2]], - [[-1, 14], 1, Concat, [1]], # cat head P4 - [-1, 3, C3, [512, False]], # 20 (P4/16-medium) - - [-1, 1, Conv, [512, 3, 2]], - [[-1, 10], 1, Concat, [1]], # cat head P5 - [-1, 3, C3, [1024, False]], # 23 (P5/32-large) - - [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) - ] diff --git a/src/yolov5_ros/src/yolov5/models/yolov5m.yaml b/src/yolov5_ros/src/yolov5/models/yolov5m.yaml deleted file mode 100644 index ad13ab3..0000000 --- a/src/yolov5_ros/src/yolov5/models/yolov5m.yaml +++ /dev/null @@ -1,48 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license - -# Parameters -nc: 80 # number of classes -depth_multiple: 0.67 # model depth multiple -width_multiple: 0.75 # layer channel multiple -anchors: - - [10,13, 16,30, 33,23] # P3/8 - - [30,61, 62,45, 59,119] # P4/16 - - [116,90, 156,198, 373,326] # P5/32 - -# YOLOv5 v6.0 backbone -backbone: - # [from, number, module, args] - [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, C3, [128]], - [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 6, C3, [256]], - [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, C3, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 3, C3, [1024]], - [-1, 1, SPPF, [1024, 5]], # 9 - ] - -# YOLOv5 v6.0 head -head: - [[-1, 1, Conv, [512, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, C3, [512, False]], # 13 - - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, C3, [256, False]], # 17 (P3/8-small) - - [-1, 1, Conv, [256, 3, 2]], - [[-1, 14], 1, Concat, [1]], # cat head P4 - [-1, 3, C3, [512, False]], # 20 (P4/16-medium) - - [-1, 1, Conv, [512, 3, 2]], - [[-1, 10], 1, Concat, [1]], # cat head P5 - [-1, 3, C3, [1024, False]], # 23 (P5/32-large) - - [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) - ] diff --git a/src/yolov5_ros/src/yolov5/models/yolov5n.yaml b/src/yolov5_ros/src/yolov5/models/yolov5n.yaml deleted file mode 100644 index 8a28a40..0000000 --- a/src/yolov5_ros/src/yolov5/models/yolov5n.yaml +++ /dev/null @@ -1,48 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license - -# Parameters -nc: 80 # number of classes -depth_multiple: 0.33 # model depth multiple -width_multiple: 0.25 # layer channel multiple -anchors: - - [10,13, 16,30, 33,23] # P3/8 - - [30,61, 62,45, 59,119] # P4/16 - - [116,90, 156,198, 373,326] # P5/32 - -# YOLOv5 v6.0 backbone -backbone: - # [from, number, module, args] - [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, C3, [128]], - [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 6, C3, [256]], - [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, C3, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 3, C3, [1024]], - [-1, 1, SPPF, [1024, 5]], # 9 - ] - -# YOLOv5 v6.0 head -head: - [[-1, 1, Conv, [512, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, C3, [512, False]], # 13 - - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, C3, [256, False]], # 17 (P3/8-small) - - [-1, 1, Conv, [256, 3, 2]], - [[-1, 14], 1, Concat, [1]], # cat head P4 - [-1, 3, C3, [512, False]], # 20 (P4/16-medium) - - [-1, 1, Conv, [512, 3, 2]], - [[-1, 10], 1, Concat, [1]], # cat head P5 - [-1, 3, C3, [1024, False]], # 23 (P5/32-large) - - [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) - ] diff --git a/src/yolov5_ros/src/yolov5/models/yolov5s.yaml b/src/yolov5_ros/src/yolov5/models/yolov5s.yaml deleted file mode 100644 index f35beab..0000000 --- a/src/yolov5_ros/src/yolov5/models/yolov5s.yaml +++ /dev/null @@ -1,48 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license - -# Parameters -nc: 80 # number of classes -depth_multiple: 0.33 # model depth multiple -width_multiple: 0.50 # layer channel multiple -anchors: - - [10,13, 16,30, 33,23] # P3/8 - - [30,61, 62,45, 59,119] # P4/16 - - [116,90, 156,198, 373,326] # P5/32 - -# YOLOv5 v6.0 backbone -backbone: - # [from, number, module, args] - [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, C3, [128]], - [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 6, C3, [256]], - [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, C3, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 3, C3, [1024]], - [-1, 1, SPPF, [1024, 5]], # 9 - ] - -# YOLOv5 v6.0 head -head: - [[-1, 1, Conv, [512, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, C3, [512, False]], # 13 - - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, C3, [256, False]], # 17 (P3/8-small) - - [-1, 1, Conv, [256, 3, 2]], - [[-1, 14], 1, Concat, [1]], # cat head P4 - [-1, 3, C3, [512, False]], # 20 (P4/16-medium) - - [-1, 1, Conv, [512, 3, 2]], - [[-1, 10], 1, Concat, [1]], # cat head P5 - [-1, 3, C3, [1024, False]], # 23 (P5/32-large) - - [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) - ] diff --git a/src/yolov5_ros/src/yolov5/models/yolov5x.yaml b/src/yolov5_ros/src/yolov5/models/yolov5x.yaml deleted file mode 100644 index f617a02..0000000 --- a/src/yolov5_ros/src/yolov5/models/yolov5x.yaml +++ /dev/null @@ -1,48 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license - -# Parameters -nc: 80 # number of classes -depth_multiple: 1.33 # model depth multiple -width_multiple: 1.25 # layer channel multiple -anchors: - - [10,13, 16,30, 33,23] # P3/8 - - [30,61, 62,45, 59,119] # P4/16 - - [116,90, 156,198, 373,326] # P5/32 - -# YOLOv5 v6.0 backbone -backbone: - # [from, number, module, args] - [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, C3, [128]], - [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 6, C3, [256]], - [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, C3, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 3, C3, [1024]], - [-1, 1, SPPF, [1024, 5]], # 9 - ] - -# YOLOv5 v6.0 head -head: - [[-1, 1, Conv, [512, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, C3, [512, False]], # 13 - - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, C3, [256, False]], # 17 (P3/8-small) - - [-1, 1, Conv, [256, 3, 2]], - [[-1, 14], 1, Concat, [1]], # cat head P4 - [-1, 3, C3, [512, False]], # 20 (P4/16-medium) - - [-1, 1, Conv, [512, 3, 2]], - [[-1, 10], 1, Concat, [1]], # cat head P5 - [-1, 3, C3, [1024, False]], # 23 (P5/32-large) - - [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) - ] diff --git a/src/yolov5_ros/src/yolov5/requirements.txt b/src/yolov5_ros/src/yolov5/requirements.txt deleted file mode 100644 index 44fe1ce..0000000 --- a/src/yolov5_ros/src/yolov5/requirements.txt +++ /dev/null @@ -1,43 +0,0 @@ -# YOLOv5 requirements -# Usage: pip install -r requirements.txt - -# Base ---------------------------------------- -matplotlib>=3.2.2 -numpy>=1.18.5 -opencv-python>=4.1.1 -Pillow>=7.1.2 -PyYAML>=5.3.1 -requests>=2.23.0 -scipy>=1.4.1 -torch>=1.7.0 -torchvision>=0.8.1 -tqdm>=4.64.0 -# protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 - -# Logging ------------------------------------- -tensorboard>=2.4.1 -# wandb -# clearml - -# Plotting ------------------------------------ -pandas>=1.1.4 -seaborn>=0.11.0 - -# Export -------------------------------------- -# coremltools>=5.2 # CoreML export -# onnx>=1.9.0 # ONNX export -# onnx-simplifier>=0.4.1 # ONNX simplifier -# nvidia-pyindex # TensorRT export -# nvidia-tensorrt # TensorRT export -# scikit-learn==0.19.2 # CoreML quantization -# tensorflow>=2.4.1 # TFLite export (or tensorflow-cpu, tensorflow-aarch64) -# tensorflowjs>=3.9.0 # TF.js export -# openvino-dev # OpenVINO export - -# Extras -------------------------------------- -ipython # interactive notebook -psutil # system utilization -thop>=0.1.1 # FLOPs computation -# albumentations>=1.0.3 -# pycocotools>=2.0 # COCO mAP -# roboflow diff --git a/src/yolov5_ros/src/yolov5/setup.cfg b/src/yolov5_ros/src/yolov5/setup.cfg deleted file mode 100644 index 020a757..0000000 --- a/src/yolov5_ros/src/yolov5/setup.cfg +++ /dev/null @@ -1,59 +0,0 @@ -# Project-wide configuration file, can be used for package metadata and other toll configurations -# Example usage: global configuration for PEP8 (via flake8) setting or default pytest arguments -# Local usage: pip install pre-commit, pre-commit run --all-files - -[metadata] -license_file = LICENSE -description_file = README.md - - -[tool:pytest] -norecursedirs = - .git - dist - build -addopts = - --doctest-modules - --durations=25 - --color=yes - - -[flake8] -max-line-length = 120 -exclude = .tox,*.egg,build,temp -select = E,W,F -doctests = True -verbose = 2 -# https://pep8.readthedocs.io/en/latest/intro.html#error-codes -format = pylint -# see: https://www.flake8rules.com/ -ignore = - E731 # Do not assign a lambda expression, use a def - F405 # name may be undefined, or defined from star imports: module - E402 # module level import not at top of file - F401 # module imported but unused - W504 # line break after binary operator - E127 # continuation line over-indented for visual indent - W504 # line break after binary operator - E231 # missing whitespace after ‘,’, ‘;’, or ‘:’ - E501 # line too long - F403 # ‘from module import *’ used; unable to detect undefined names - - -[isort] -# https://pycqa.github.io/isort/docs/configuration/options.html -line_length = 120 -# see: https://pycqa.github.io/isort/docs/configuration/multi_line_output_modes.html -multi_line_output = 0 - - -[yapf] -based_on_style = pep8 -spaces_before_comment = 2 -COLUMN_LIMIT = 120 -COALESCE_BRACKETS = True -SPACES_AROUND_POWER_OPERATOR = True -SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET = False -SPLIT_BEFORE_CLOSING_BRACKET = False -SPLIT_BEFORE_FIRST_ARGUMENT = False -# EACH_DICT_ENTRY_ON_SEPARATE_LINE = False diff --git a/src/yolov5_ros/src/yolov5/train.py b/src/yolov5_ros/src/yolov5/train.py deleted file mode 100644 index 4eff6e5..0000000 --- a/src/yolov5_ros/src/yolov5/train.py +++ /dev/null @@ -1,630 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Train a YOLOv5 model on a custom dataset. -Models and datasets download automatically from the latest YOLOv5 release. - -Usage - Single-GPU training: - $ python train.py --data coco128.yaml --weights yolov5s.pt --img 640 # from pretrained (recommended) - $ python train.py --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640 # from scratch - -Usage - Multi-GPU DDP training: - $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 train.py --data coco128.yaml --weights yolov5s.pt --img 640 --device 0,1,2,3 - -Models: https://github.com/ultralytics/yolov5/tree/master/models -Datasets: https://github.com/ultralytics/yolov5/tree/master/data -Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data -""" - -import argparse -import math -import os -import random -import sys -import time -from copy import deepcopy -from datetime import datetime -from pathlib import Path - -import numpy as np -import torch -import torch.distributed as dist -import torch.nn as nn -import yaml -from torch.optim import lr_scheduler -from tqdm import tqdm - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[0] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -import val as validate # for end-of-epoch mAP -from models.experimental import attempt_load -from models.yolo import Model -from utils.autoanchor import check_anchors -from utils.autobatch import check_train_batch_size -from utils.callbacks import Callbacks -from utils.dataloaders import create_dataloader -from utils.downloads import attempt_download, is_url -from utils.general import (LOGGER, check_amp, check_dataset, check_file, check_git_status, check_img_size, - check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, - init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, - one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) -from utils.loggers import Loggers -from utils.loggers.comet.comet_utils import check_comet_resume -from utils.loggers.wandb.wandb_utils import check_wandb_resume -from utils.loss import ComputeLoss -from utils.metrics import fitness -from utils.plots import plot_evolve -from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer, - smart_resume, torch_distributed_zero_first) - -LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html -RANK = int(os.getenv('RANK', -1)) -WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) - - -def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary - save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \ - Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ - opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze - callbacks.run('on_pretrain_routine_start') - - # Directories - w = save_dir / 'weights' # weights dir - (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir - last, best = w / 'last.pt', w / 'best.pt' - - # Hyperparameters - if isinstance(hyp, str): - with open(hyp, errors='ignore') as f: - hyp = yaml.safe_load(f) # load hyps dict - LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) - opt.hyp = hyp.copy() # for saving hyps to checkpoints - - # Save run settings - if not evolve: - yaml_save(save_dir / 'hyp.yaml', hyp) - yaml_save(save_dir / 'opt.yaml', vars(opt)) - - # Loggers - data_dict = None - if RANK in {-1, 0}: - loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance - - # Register actions - for k in methods(loggers): - callbacks.register_action(k, callback=getattr(loggers, k)) - - # Process custom dataset artifact link - data_dict = loggers.remote_dataset - if resume: # If resuming runs from remote artifact - weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size - - # Config - plots = not evolve and not opt.noplots # create plots - cuda = device.type != 'cpu' - init_seeds(opt.seed + 1 + RANK, deterministic=True) - with torch_distributed_zero_first(LOCAL_RANK): - data_dict = data_dict or check_dataset(data) # check if None - train_path, val_path = data_dict['train'], data_dict['val'] - nc = 1 if single_cls else int(data_dict['nc']) # number of classes - names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names - is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset - - # Model - check_suffix(weights, '.pt') # check weights - pretrained = weights.endswith('.pt') - if pretrained: - with torch_distributed_zero_first(LOCAL_RANK): - weights = attempt_download(weights) # download if not found locally - ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak - model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create - exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys - csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 - csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect - model.load_state_dict(csd, strict=False) # load - LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report - else: - model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create - amp = check_amp(model) # check AMP - - # Freeze - freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze - for k, v in model.named_parameters(): - v.requires_grad = True # train all layers - # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results) - if any(x in k for x in freeze): - LOGGER.info(f'freezing {k}') - v.requires_grad = False - - # Image size - gs = max(int(model.stride.max()), 32) # grid size (max stride) - imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple - - # Batch size - if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size - batch_size = check_train_batch_size(model, imgsz, amp) - loggers.on_params_update({"batch_size": batch_size}) - - # Optimizer - nbs = 64 # nominal batch size - accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing - hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay - optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay']) - - # Scheduler - if opt.cos_lr: - lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] - else: - lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear - scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) - - # EMA - ema = ModelEMA(model) if RANK in {-1, 0} else None - - # Resume - best_fitness, start_epoch = 0.0, 0 - if pretrained: - if resume: - best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume) - del ckpt, csd - - # DP mode - if cuda and RANK == -1 and torch.cuda.device_count() > 1: - LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' - 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') - model = torch.nn.DataParallel(model) - - # SyncBatchNorm - if opt.sync_bn and cuda and RANK != -1: - model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) - LOGGER.info('Using SyncBatchNorm()') - - # Trainloader - train_loader, dataset = create_dataloader(train_path, - imgsz, - batch_size // WORLD_SIZE, - gs, - single_cls, - hyp=hyp, - augment=True, - cache=None if opt.cache == 'val' else opt.cache, - rect=opt.rect, - rank=LOCAL_RANK, - workers=workers, - image_weights=opt.image_weights, - quad=opt.quad, - prefix=colorstr('train: '), - shuffle=True) - labels = np.concatenate(dataset.labels, 0) - mlc = int(labels[:, 0].max()) # max label class - assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' - - # Process 0 - if RANK in {-1, 0}: - val_loader = create_dataloader(val_path, - imgsz, - batch_size // WORLD_SIZE * 2, - gs, - single_cls, - hyp=hyp, - cache=None if noval else opt.cache, - rect=True, - rank=-1, - workers=workers * 2, - pad=0.5, - prefix=colorstr('val: '))[0] - - if not resume: - if not opt.noautoanchor: - check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor - model.half().float() # pre-reduce anchor precision - - callbacks.run('on_pretrain_routine_end', labels, names) - - # DDP mode - if cuda and RANK != -1: - model = smart_DDP(model) - - # Model attributes - nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps) - hyp['box'] *= 3 / nl # scale to layers - hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers - hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers - hyp['label_smoothing'] = opt.label_smoothing - model.nc = nc # attach number of classes to model - model.hyp = hyp # attach hyperparameters to model - model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights - model.names = names - - # Start training - t0 = time.time() - nb = len(train_loader) # number of batches - nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations) - # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training - last_opt_step = -1 - maps = np.zeros(nc) # mAP per class - results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) - scheduler.last_epoch = start_epoch - 1 # do not move - scaler = torch.cuda.amp.GradScaler(enabled=amp) - stopper, stop = EarlyStopping(patience=opt.patience), False - compute_loss = ComputeLoss(model) # init loss class - callbacks.run('on_train_start') - LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' - f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' - f"Logging results to {colorstr('bold', save_dir)}\n" - f'Starting training for {epochs} epochs...') - for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ - callbacks.run('on_train_epoch_start') - model.train() - - # Update image weights (optional, single-GPU only) - if opt.image_weights: - cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights - iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights - dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx - - # Update mosaic border (optional) - # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) - # dataset.mosaic_border = [b - imgsz, -b] # height, width borders - - mloss = torch.zeros(3, device=device) # mean losses - if RANK != -1: - train_loader.sampler.set_epoch(epoch) - pbar = enumerate(train_loader) - LOGGER.info(('\n' + '%11s' * 7) % ('Epoch', 'GPU_mem', 'box_loss', 'obj_loss', 'cls_loss', 'Instances', 'Size')) - if RANK in {-1, 0}: - pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar - optimizer.zero_grad() - for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- - callbacks.run('on_train_batch_start') - ni = i + nb * epoch # number integrated batches (since train start) - imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0 - - # Warmup - if ni <= nw: - xi = [0, nw] # x interp - # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) - accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) - for j, x in enumerate(optimizer.param_groups): - # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 - x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)]) - if 'momentum' in x: - x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) - - # Multi-scale - if opt.multi_scale: - sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size - sf = sz / max(imgs.shape[2:]) # scale factor - if sf != 1: - ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) - imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) - - # Forward - with torch.cuda.amp.autocast(amp): - pred = model(imgs) # forward - loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size - if RANK != -1: - loss *= WORLD_SIZE # gradient averaged between devices in DDP mode - if opt.quad: - loss *= 4. - - # Backward - scaler.scale(loss).backward() - - # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html - if ni - last_opt_step >= accumulate: - scaler.unscale_(optimizer) # unscale gradients - torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients - scaler.step(optimizer) # optimizer.step - scaler.update() - optimizer.zero_grad() - if ema: - ema.update(model) - last_opt_step = ni - - # Log - if RANK in {-1, 0}: - mloss = (mloss * i + loss_items) / (i + 1) # update mean losses - mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) - pbar.set_description(('%11s' * 2 + '%11.4g' * 5) % - (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) - callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths, list(mloss)) - if callbacks.stop_training: - return - # end batch ------------------------------------------------------------------------------------------------ - - # Scheduler - lr = [x['lr'] for x in optimizer.param_groups] # for loggers - scheduler.step() - - if RANK in {-1, 0}: - # mAP - callbacks.run('on_train_epoch_end', epoch=epoch) - ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) - final_epoch = (epoch + 1 == epochs) or stopper.possible_stop - if not noval or final_epoch: # Calculate mAP - results, maps, _ = validate.run(data_dict, - batch_size=batch_size // WORLD_SIZE * 2, - imgsz=imgsz, - half=amp, - model=ema.ema, - single_cls=single_cls, - dataloader=val_loader, - save_dir=save_dir, - plots=False, - callbacks=callbacks, - compute_loss=compute_loss) - - # Update best mAP - fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] - stop = stopper(epoch=epoch, fitness=fi) # early stop check - if fi > best_fitness: - best_fitness = fi - log_vals = list(mloss) + list(results) + lr - callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi) - - # Save model - if (not nosave) or (final_epoch and not evolve): # if save - ckpt = { - 'epoch': epoch, - 'best_fitness': best_fitness, - 'model': deepcopy(de_parallel(model)).half(), - 'ema': deepcopy(ema.ema).half(), - 'updates': ema.updates, - 'optimizer': optimizer.state_dict(), - 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None, - 'opt': vars(opt), - 'date': datetime.now().isoformat()} - - # Save last, best and delete - torch.save(ckpt, last) - if best_fitness == fi: - torch.save(ckpt, best) - if opt.save_period > 0 and epoch % opt.save_period == 0: - torch.save(ckpt, w / f'epoch{epoch}.pt') - del ckpt - callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) - - # EarlyStopping - if RANK != -1: # if DDP training - broadcast_list = [stop if RANK == 0 else None] - dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks - if RANK != 0: - stop = broadcast_list[0] - if stop: - break # must break all DDP ranks - - # end epoch ---------------------------------------------------------------------------------------------------- - # end training ----------------------------------------------------------------------------------------------------- - if RANK in {-1, 0}: - LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') - for f in last, best: - if f.exists(): - strip_optimizer(f) # strip optimizers - if f is best: - LOGGER.info(f'\nValidating {f}...') - results, _, _ = validate.run( - data_dict, - batch_size=batch_size // WORLD_SIZE * 2, - imgsz=imgsz, - model=attempt_load(f, device).half(), - iou_thres=0.65 if is_coco else 0.60, # best pycocotools at iou 0.65 - single_cls=single_cls, - dataloader=val_loader, - save_dir=save_dir, - save_json=is_coco, - verbose=True, - plots=plots, - callbacks=callbacks, - compute_loss=compute_loss) # val best model with plots - if is_coco: - callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi) - - callbacks.run('on_train_end', last, best, epoch, results) - - torch.cuda.empty_cache() - return results - - -def parse_opt(known=False): - parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path') - parser.add_argument('--cfg', type=str, default='', help='model.yaml path') - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') - parser.add_argument('--epochs', type=int, default=300, help='total training epochs') - parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') - parser.add_argument('--rect', action='store_true', help='rectangular training') - parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') - parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') - parser.add_argument('--noval', action='store_true', help='only validate final epoch') - parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') - parser.add_argument('--noplots', action='store_true', help='save no plot files') - parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') - parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') - parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') - parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') - parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') - parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') - parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') - parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') - parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--quad', action='store_true', help='quad dataloader') - parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') - parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') - parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') - parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') - parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') - parser.add_argument('--seed', type=int, default=0, help='Global training seed') - parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') - - # Logger arguments - parser.add_argument('--entity', default=None, help='Entity') - parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='Upload data, "val" option') - parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval') - parser.add_argument('--artifact_alias', type=str, default='latest', help='Version of dataset artifact to use') - - return parser.parse_known_args()[0] if known else parser.parse_args() - - -def main(opt, callbacks=Callbacks()): - # Checks - if RANK in {-1, 0}: - print_args(vars(opt)) - check_git_status() - check_requirements() - - # Resume (from specified or most recent last.pt) - if opt.resume and not check_wandb_resume(opt) and not check_comet_resume(opt) and not opt.evolve: - last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run()) - opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml - opt_data = opt.data # original dataset - if opt_yaml.is_file(): - with open(opt_yaml, errors='ignore') as f: - d = yaml.safe_load(f) - else: - d = torch.load(last, map_location='cpu')['opt'] - opt = argparse.Namespace(**d) # replace - opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate - if is_url(opt_data): - opt.data = check_file(opt_data) # avoid HUB resume auth timeout - else: - opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \ - check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks - assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' - if opt.evolve: - if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve - opt.project = str(ROOT / 'runs/evolve') - opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume - if opt.name == 'cfg': - opt.name = Path(opt.cfg).stem # use model.yaml as name - opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) - - # DDP mode - device = select_device(opt.device, batch_size=opt.batch_size) - if LOCAL_RANK != -1: - msg = 'is not compatible with YOLOv5 Multi-GPU DDP training' - assert not opt.image_weights, f'--image-weights {msg}' - assert not opt.evolve, f'--evolve {msg}' - assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size' - assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE' - assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' - torch.cuda.set_device(LOCAL_RANK) - device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") - - # Train - if not opt.evolve: - train(opt.hyp, opt, device, callbacks) - - # Evolve hyperparameters (optional) - else: - # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) - meta = { - 'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) - 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) - 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 - 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay - 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) - 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum - 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr - 'box': (1, 0.02, 0.2), # box loss gain - 'cls': (1, 0.2, 4.0), # cls loss gain - 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight - 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) - 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight - 'iou_t': (0, 0.1, 0.7), # IoU training threshold - 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold - 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) - 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) - 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) - 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) - 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) - 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) - 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) - 'scale': (1, 0.0, 0.9), # image scale (+/- gain) - 'shear': (1, 0.0, 10.0), # image shear (+/- deg) - 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 - 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) - 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) - 'mosaic': (1, 0.0, 1.0), # image mixup (probability) - 'mixup': (1, 0.0, 1.0), # image mixup (probability) - 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability) - - with open(opt.hyp, errors='ignore') as f: - hyp = yaml.safe_load(f) # load hyps dict - if 'anchors' not in hyp: # anchors commented in hyp.yaml - hyp['anchors'] = 3 - if opt.noautoanchor: - del hyp['anchors'], meta['anchors'] - opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch - # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices - evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' - if opt.bucket: - os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists - - for _ in range(opt.evolve): # generations to evolve - if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate - # Select parent(s) - parent = 'single' # parent selection method: 'single' or 'weighted' - x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1) - n = min(5, len(x)) # number of previous results to consider - x = x[np.argsort(-fitness(x))][:n] # top n mutations - w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0) - if parent == 'single' or len(x) == 1: - # x = x[random.randint(0, n - 1)] # random selection - x = x[random.choices(range(n), weights=w)[0]] # weighted selection - elif parent == 'weighted': - x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination - - # Mutate - mp, s = 0.8, 0.2 # mutation probability, sigma - npr = np.random - npr.seed(int(time.time())) - g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1 - ng = len(meta) - v = np.ones(ng) - while all(v == 1): # mutate until a change occurs (prevent duplicates) - v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) - for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) - hyp[k] = float(x[i + 7] * v[i]) # mutate - - # Constrain to limits - for k, v in meta.items(): - hyp[k] = max(hyp[k], v[1]) # lower limit - hyp[k] = min(hyp[k], v[2]) # upper limit - hyp[k] = round(hyp[k], 5) # significant digits - - # Train mutation - results = train(hyp.copy(), opt, device, callbacks) - callbacks = Callbacks() - # Write mutation results - print_mutation(results, hyp.copy(), save_dir, opt.bucket) - - # Plot results - plot_evolve(evolve_csv) - LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n' - f"Results saved to {colorstr('bold', save_dir)}\n" - f'Usage example: $ python train.py --hyp {evolve_yaml}') - - -def run(**kwargs): - # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt') - opt = parse_opt(True) - for k, v in kwargs.items(): - setattr(opt, k, v) - main(opt) - return opt - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) diff --git a/src/yolov5_ros/src/yolov5/tutorial.ipynb b/src/yolov5_ros/src/yolov5/tutorial.ipynb deleted file mode 100644 index 957437b..0000000 --- a/src/yolov5_ros/src/yolov5/tutorial.ipynb +++ /dev/null @@ -1,1124 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "name": "YOLOv5 Tutorial", - "provenance": [], - "collapsed_sections": [], - "machine_shape": "hm", - "toc_visible": true - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "accelerator": "GPU", - "widgets": { - "application/vnd.jupyter.widget-state+json": { - "9b8caa3522fc4cbab31e13b5dfc7808d": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HBoxModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_574140e4c4bc48c9a171541a02cd0211", - "IPY_MODEL_35e03ce5090346c9ae602891470fc555", - "IPY_MODEL_c942c208e72d46568b476bb0f2d75496" - ], - "layout": "IPY_MODEL_65881db1db8a4e9c930fab9172d45143" - } - }, - "574140e4c4bc48c9a171541a02cd0211": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_60b913d755b34d638478e30705a2dde1", - "placeholder": "​", - "style": "IPY_MODEL_0856bea36ec148b68522ff9c9eb258d8", - "value": "100%" - } - }, - "35e03ce5090346c9ae602891470fc555": { - "model_module": "@jupyter-widgets/controls", - "model_name": "FloatProgressModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "FloatProgressModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "ProgressView", - "bar_style": "success", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_76879f6f2aa54637a7a07faeea2bd684", - "max": 818322941, - "min": 0, - "orientation": "horizontal", - "style": "IPY_MODEL_0ace3934ec6f4d36a1b3a9e086390926", - "value": 818322941 - } - }, - "c942c208e72d46568b476bb0f2d75496": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_d6b7a2243e0c4beca714d99dceec23d6", - "placeholder": "​", - "style": "IPY_MODEL_5966ba6e6f114d8c9d8d1d6b1bd4f4c7", - "value": " 780M/780M [02:19<00:00, 6.24MB/s]" - } - }, - "65881db1db8a4e9c930fab9172d45143": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "60b913d755b34d638478e30705a2dde1": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "0856bea36ec148b68522ff9c9eb258d8": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "76879f6f2aa54637a7a07faeea2bd684": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "0ace3934ec6f4d36a1b3a9e086390926": { - "model_module": "@jupyter-widgets/controls", - "model_name": "ProgressStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "ProgressStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "bar_color": null, - "description_width": "" - } - }, - "d6b7a2243e0c4beca714d99dceec23d6": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "5966ba6e6f114d8c9d8d1d6b1bd4f4c7": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - } - } - } - }, - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "t6MPjfT5NrKQ" - }, - "source": [ - "
\n", - "\n", - " \n", - " \n", - "\n", - "\n", - "
\n", - " \"Open\n", - " \"Open\n", - "
\n", - "\n", - "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", - "\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "7mGmQbAO5pQb" - }, - "source": [ - "# Setup\n", - "\n", - "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "wbvMlHd_QwMG", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "0f9ee467-cea4-48e8-9050-7a76ae1b6141" - }, - "source": [ - "!git clone https://github.com/ultralytics/yolov5 # clone\n", - "%cd yolov5\n", - "%pip install -qr requirements.txt # install\n", - "\n", - "import torch\n", - "import utils\n", - "display = utils.notebook_init() # checks" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stderr", - "text": [ - "YOLOv5 🚀 v6.2-56-g30e674b Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n" - ] - }, - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Setup complete ✅ (8 CPUs, 51.0 GB RAM, 37.4/166.8 GB disk)\n" - ] - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "4JnkELT0cIJg" - }, - "source": [ - "# 1. Detect\n", - "\n", - "`detect.py` runs YOLOv5 inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/detect`. Example inference sources are:\n", - "\n", - "```shell\n", - "python detect.py --source 0 # webcam\n", - " img.jpg # image \n", - " vid.mp4 # video\n", - " path/ # directory\n", - " 'path/*.jpg' # glob\n", - " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", - " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", - "```" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "zR9ZbuQCH7FX", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "60647b99-e8d4-402c-f444-331bf6746da4" - }, - "source": [ - "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", - "# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False\n", - "YOLOv5 🚀 v6.2-56-g30e674b Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", - "\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 27.8MB/s]\n", - "\n", - "Fusing layers... \n", - "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 14.8ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 20.1ms\n", - "Speed: 0.6ms pre-process, 17.4ms inference, 21.6ms NMS per image at shape (1, 3, 640, 640)\n", - "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n" - ] - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "hkAzDWJ7cWTr" - }, - "source": [ - "        \n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "0eq1SMWl6Sfn" - }, - "source": [ - "# 2. Validate\n", - "Validate a model's accuracy on the [COCO](https://cocodataset.org/#home) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "WQPtK1QYVaD_", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 49, - "referenced_widgets": [ - "9b8caa3522fc4cbab31e13b5dfc7808d", - "574140e4c4bc48c9a171541a02cd0211", - "35e03ce5090346c9ae602891470fc555", - "c942c208e72d46568b476bb0f2d75496", - "65881db1db8a4e9c930fab9172d45143", - "60b913d755b34d638478e30705a2dde1", - "0856bea36ec148b68522ff9c9eb258d8", - "76879f6f2aa54637a7a07faeea2bd684", - "0ace3934ec6f4d36a1b3a9e086390926", - "d6b7a2243e0c4beca714d99dceec23d6", - "5966ba6e6f114d8c9d8d1d6b1bd4f4c7" - ] - }, - "outputId": "102dabed-bc31-42fe-9133-d9ce28a2c01e" - }, - "source": [ - "# Download COCO val\n", - "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n", - "!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "display_data", - "data": { - "text/plain": [ - " 0%| | 0.00/780M [00:00

\n", - "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", - "

\n", - "\n", - "Train a YOLOv5s model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`.\n", - "\n", - "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", - "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", - "- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n", - "- **Training Results** are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc.\n", - "

\n", - "\n", - "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", - "\n", - "## Train on Custom Data with Roboflow 🌟 NEW\n", - "\n", - "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", - "\n", - "- Custom Training Example: [https://blog.roboflow.com/how-to-train-yolov5-on-a-custom-dataset/](https://blog.roboflow.com/how-to-train-yolov5-on-a-custom-dataset/?ref=ultralytics)\n", - "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/roboflow-ai/yolov5-custom-training-tutorial/blob/main/yolov5-custom-training.ipynb)\n", - "
\n", - "\n", - "

Label images lightning fast (including with model-assisted labeling)" - ] - }, - { - "cell_type": "code", - "source": [ - "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", - "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML', 'W&B']\n", - "\n", - "if logger == 'TensorBoard':\n", - " %load_ext tensorboard\n", - " %tensorboard --logdir runs/train\n", - "elif logger == 'Comet':\n", - " %pip install -q comet_ml\n", - " import comet_ml; comet_ml.init()\n", - "elif logger == 'ClearML':\n", - " %pip install -q clearml && clearml-init\n", - "elif logger == 'W&B':\n", - " %pip install -q wandb\n", - " import wandb; wandb.login()" - ], - "metadata": { - "id": "i3oKtE4g-aNn" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "1NcFxRcFdJ_O", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "baa6d4be-3379-4aab-844a-d5a5396c0e49" - }, - "source": [ - "# Train YOLOv5s on COCO128 for 3 epochs\n", - "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", - "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-56-g30e674b Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", - "\n", - "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", - "\u001b[34m\u001b[1mWeights & Biases: \u001b[0mrun 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases\n", - "\u001b[34m\u001b[1mClearML: \u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML\n", - "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", - "\n", - "Dataset not found ⚠️, missing paths ['/content/datasets/coco128/images/train2017']\n", - "Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n", - "100% 6.66M/6.66M [00:00<00:00, 41.1MB/s]\n", - "Dataset download success ✅ (0.8s), saved to \u001b[1m/content/datasets\u001b[0m\n", - "\n", - " from n params module arguments \n", - " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", - " 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n", - " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n", - " 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n", - " 4 -1 2 115712 models.common.C3 [128, 128, 2] \n", - " 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n", - " 6 -1 3 625152 models.common.C3 [256, 256, 3] \n", - " 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n", - " 8 -1 1 1182720 models.common.C3 [512, 512, 1] \n", - " 9 -1 1 656896 models.common.SPPF [512, 512, 5] \n", - " 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n", - " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", - " 12 [-1, 6] 1 0 models.common.Concat [1] \n", - " 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n", - " 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n", - " 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", - " 16 [-1, 4] 1 0 models.common.Concat [1] \n", - " 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n", - " 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n", - " 19 [-1, 14] 1 0 models.common.Concat [1] \n", - " 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n", - " 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n", - " 22 [-1, 10] 1 0 models.common.Concat [1] \n", - " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", - " 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n", - "Model summary: 270 layers, 7235389 parameters, 7235389 gradients, 16.6 GFLOPs\n", - "\n", - "Transferred 349/349 items from yolov5s.pt\n", - "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", - "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n", - "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 9659.25it/s]\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 951.31it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00 # 2. paste API key\n", - "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", - "```\n", - "\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration). Get started by trying out the Comet Colab Notebook:\n", - "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", - "\n", - "\"yolo-ui\"" - ], - "metadata": { - "id": "nWOsI5wJR1o3" - } - }, - { - "cell_type": "markdown", - "source": [ - "## ClearML Logging and Automation 🌟 NEW\n", - "\n", - "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n", - "\n", - "- `pip install clearml`\n", - "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n", - "\n", - "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", - "\n", - "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", - "\n", - "\n", - "\"ClearML" - ], - "metadata": { - "id": "Lay2WsTjNJzP" - } - }, - { - "cell_type": "markdown", - "metadata": { - "id": "DLI1JmHU7B0l" - }, - "source": [ - "## Weights & Biases Logging\n", - "\n", - "[Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_notebook) (W&B) is integrated with YOLOv5 for real-time visualization and cloud logging of training runs. This allows for better run comparison and introspection, as well improved visibility and collaboration for teams. To enable W&B `pip install wandb`, and then train normally (you will be guided through setup on first use). \n", - "\n", - "During training you will see live updates at [https://wandb.ai/home](https://wandb.ai/home?utm_campaign=repo_yolo_notebook), and you can create and share detailed [Reports](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY) of your results. For more information see the [YOLOv5 Weights & Biases Tutorial](https://github.com/ultralytics/yolov5/issues/1289). \n", - "\n", - "\n", - "\"Weights" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "-WPvRbS5Swl6" - }, - "source": [ - "## Local Logging\n", - "\n", - "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", - "\n", - "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", - "\n", - "\"Local\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Zelyeqbyt3GD" - }, - "source": [ - "# Environments\n", - "\n", - "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", - "\n", - "- **Google Colab and Kaggle** notebooks with free GPU: \"Open \"Open\n", - "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", - "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", - "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "6Qu7Iesl0p54" - }, - "source": [ - "# Status\n", - "\n", - "![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg)\n", - "\n", - "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "IEijrePND_2I" - }, - "source": [ - "# Appendix\n", - "\n", - "Additional content below for PyTorch Hub, CI, reproducing results, profiling speeds, VOC training, classification training and TensorRT example." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "GMusP4OAxFu6" - }, - "source": [ - "import torch\n", - "\n", - "# PyTorch Hub Model\n", - "model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5n - yolov5x6, custom\n", - "\n", - "# Images\n", - "img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list\n", - "\n", - "# Inference\n", - "results = model(img)\n", - "\n", - "# Results\n", - "results.print() # or .show(), .save(), .crop(), .pandas(), etc." - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "FGH0ZjkGjejy" - }, - "source": [ - "# YOLOv5 CI\n", - "%%shell\n", - "rm -rf runs # remove runs/\n", - "m=yolov5n # official weights\n", - "b=runs/train/exp/weights/best # best.pt checkpoint\n", - "python train.py --imgsz 64 --batch 32 --weights $m.pt --cfg $m.yaml --epochs 1 --device 0 # train\n", - "for d in 0 cpu; do # devices\n", - " for w in $m $b; do # weights\n", - " python val.py --imgsz 64 --batch 32 --weights $w.pt --device $d # val\n", - " python detect.py --imgsz 64 --weights $w.pt --device $d # detect\n", - " done\n", - "done\n", - "python hubconf.py --model $m # hub\n", - "python models/tf.py --weights $m.pt # build TF model\n", - "python models/yolo.py --cfg $m.yaml # build PyTorch model\n", - "python export.py --weights $m.pt --img 64 --include torchscript # export" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "mcKoSIK2WSzj" - }, - "source": [ - "# Reproduce\n", - "for x in (f'yolov5{x}' for x in 'nsmlx'):\n", - " !python val.py --weights {x}.pt --data coco.yaml --img 640 --task speed # speed\n", - " !python val.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.001 --iou 0.65 # mAP" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "gogI-kwi3Tye" - }, - "source": [ - "# Profile\n", - "from utils.torch_utils import profile\n", - "\n", - "m1 = lambda x: x * torch.sigmoid(x)\n", - "m2 = torch.nn.SiLU()\n", - "results = profile(input=torch.randn(16, 3, 640, 640), ops=[m1, m2], n=100)" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "BSgFCAcMbk1R" - }, - "source": [ - "# VOC\n", - "for b, m in zip([64, 64, 64, 32, 16], [f'yolov5{x}' for x in 'nsmlx']): # batch, model\n", - " !python train.py --batch {b} --weights {m}.pt --data VOC.yaml --epochs 50 --img 512 --hyp hyp.VOC.yaml --project VOC --name {m} --cache" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "# Classification train\n", - "for m in [*(f'yolov5{x}-cls.pt' for x in 'nsmlx'), 'resnet50.pt', 'resnet101.pt', 'efficientnet_b0.pt', 'efficientnet_b1.pt']:\n", - " for d in 'mnist', 'fashion-mnist', 'cifar10', 'cifar100', 'imagenette160', 'imagenette320', 'imagenette', 'imagewoof160', 'imagewoof320', 'imagewoof':\n", - " !python classify/train.py --model {m} --data {d} --epochs 10 --project YOLOv5-cls --name {m}-{d}" - ], - "metadata": { - "id": "UWGH7H6yakVl" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "# Classification val\n", - "!bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G - 50000 images)\n", - "!python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate" - ], - "metadata": { - "id": "yYgOiFNHZx-1" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "# Validate on COCO test. Zip results.json and submit to eval server at https://competitions.codalab.org/competitions/20794\n", - "!bash data/scripts/get_coco.sh --test # download COCO test-dev2017 (7G - 40000 images, test 20000)\n", - "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half --task test" - ], - "metadata": { - "id": "aq4DPWGu0Bl1" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "VTRwsvA9u7ln" - }, - "source": [ - "# TensorRT \n", - "!pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # install\n", - "!python export.py --weights yolov5s.pt --include engine --imgsz 640 --device 0 # export\n", - "!python detect.py --weights yolov5s.engine --imgsz 640 --device 0 # inference" - ], - "execution_count": null, - "outputs": [] - } - ] -} diff --git a/src/yolov5_ros/src/yolov5/utils/__init__.py b/src/yolov5_ros/src/yolov5/utils/__init__.py deleted file mode 100644 index 46225c2..0000000 --- a/src/yolov5_ros/src/yolov5/utils/__init__.py +++ /dev/null @@ -1,65 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -utils/initialization -""" - -import contextlib -import threading - - -class TryExcept(contextlib.ContextDecorator): - # YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager - def __init__(self, msg=''): - self.msg = msg - - def __enter__(self): - pass - - def __exit__(self, exc_type, value, traceback): - if value: - print(f'{self.msg}{value}') - return True - - -def threaded(func): - # Multi-threads a target function and returns thread. Usage: @threaded decorator - def wrapper(*args, **kwargs): - thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True) - thread.start() - return thread - - return wrapper - - -def notebook_init(verbose=True): - # Check system software and hardware - print('Checking setup...') - - import os - import shutil - - from utils.general import check_font, check_requirements, emojis, is_colab - from utils.torch_utils import select_device # imports - - check_requirements(('psutil', 'IPython')) - check_font() - - import psutil - from IPython import display # to display images and clear console output - - if is_colab(): - shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory - - # System info - if verbose: - gb = 1 << 30 # bytes to GiB (1024 ** 3) - ram = psutil.virtual_memory().total - total, used, free = shutil.disk_usage("/") - display.clear_output() - s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' - else: - s = '' - - select_device(newline=False) - print(emojis(f'Setup complete ✅ {s}')) - return display diff --git a/src/yolov5_ros/src/yolov5/utils/activations.py b/src/yolov5_ros/src/yolov5/utils/activations.py deleted file mode 100644 index 084ce8c..0000000 --- a/src/yolov5_ros/src/yolov5/utils/activations.py +++ /dev/null @@ -1,103 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Activation functions -""" - -import torch -import torch.nn as nn -import torch.nn.functional as F - - -class SiLU(nn.Module): - # SiLU activation https://arxiv.org/pdf/1606.08415.pdf - @staticmethod - def forward(x): - return x * torch.sigmoid(x) - - -class Hardswish(nn.Module): - # Hard-SiLU activation - @staticmethod - def forward(x): - # return x * F.hardsigmoid(x) # for TorchScript and CoreML - return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX - - -class Mish(nn.Module): - # Mish activation https://github.com/digantamisra98/Mish - @staticmethod - def forward(x): - return x * F.softplus(x).tanh() - - -class MemoryEfficientMish(nn.Module): - # Mish activation memory-efficient - class F(torch.autograd.Function): - - @staticmethod - def forward(ctx, x): - ctx.save_for_backward(x) - return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x))) - - @staticmethod - def backward(ctx, grad_output): - x = ctx.saved_tensors[0] - sx = torch.sigmoid(x) - fx = F.softplus(x).tanh() - return grad_output * (fx + x * sx * (1 - fx * fx)) - - def forward(self, x): - return self.F.apply(x) - - -class FReLU(nn.Module): - # FReLU activation https://arxiv.org/abs/2007.11824 - def __init__(self, c1, k=3): # ch_in, kernel - super().__init__() - self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) - self.bn = nn.BatchNorm2d(c1) - - def forward(self, x): - return torch.max(x, self.bn(self.conv(x))) - - -class AconC(nn.Module): - r""" ACON activation (activate or not) - AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter - according to "Activate or Not: Learning Customized Activation" . - """ - - def __init__(self, c1): - super().__init__() - self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) - self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) - self.beta = nn.Parameter(torch.ones(1, c1, 1, 1)) - - def forward(self, x): - dpx = (self.p1 - self.p2) * x - return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x - - -class MetaAconC(nn.Module): - r""" ACON activation (activate or not) - MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network - according to "Activate or Not: Learning Customized Activation" . - """ - - def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r - super().__init__() - c2 = max(r, c1 // r) - self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) - self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) - self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True) - self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True) - # self.bn1 = nn.BatchNorm2d(c2) - # self.bn2 = nn.BatchNorm2d(c1) - - def forward(self, x): - y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True) - # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891 - # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable - beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed - dpx = (self.p1 - self.p2) * x - return dpx * torch.sigmoid(beta * dpx) + self.p2 * x diff --git a/src/yolov5_ros/src/yolov5/utils/augmentations.py b/src/yolov5_ros/src/yolov5/utils/augmentations.py deleted file mode 100644 index a558735..0000000 --- a/src/yolov5_ros/src/yolov5/utils/augmentations.py +++ /dev/null @@ -1,396 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Image augmentation functions -""" - -import math -import random - -import cv2 -import numpy as np -import torch -import torchvision.transforms as T -import torchvision.transforms.functional as TF - -from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box -from utils.metrics import bbox_ioa - -IMAGENET_MEAN = 0.485, 0.456, 0.406 # RGB mean -IMAGENET_STD = 0.229, 0.224, 0.225 # RGB standard deviation - - -class Albumentations: - # YOLOv5 Albumentations class (optional, only used if package is installed) - def __init__(self): - self.transform = None - prefix = colorstr('albumentations: ') - try: - import albumentations as A - check_version(A.__version__, '1.0.3', hard=True) # version requirement - - T = [ - A.Blur(p=0.01), - A.MedianBlur(p=0.01), - A.ToGray(p=0.01), - A.CLAHE(p=0.01), - A.RandomBrightnessContrast(p=0.0), - A.RandomGamma(p=0.0), - A.ImageCompression(quality_lower=75, p=0.0)] # transforms - self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) - - LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) - except ImportError: # package not installed, skip - pass - except Exception as e: - LOGGER.info(f'{prefix}{e}') - - def __call__(self, im, labels, p=1.0): - if self.transform and random.random() < p: - new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed - im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])]) - return im, labels - - -def normalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD, inplace=False): - # Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = (x - mean) / std - return TF.normalize(x, mean, std, inplace=inplace) - - -def denormalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD): - # Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = x * std + mean - for i in range(3): - x[:, i] = x[:, i] * std[i] + mean[i] - return x - - -def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): - # HSV color-space augmentation - if hgain or sgain or vgain: - r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains - hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV)) - dtype = im.dtype # uint8 - - x = np.arange(0, 256, dtype=r.dtype) - lut_hue = ((x * r[0]) % 180).astype(dtype) - lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) - lut_val = np.clip(x * r[2], 0, 255).astype(dtype) - - im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) - cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed - - -def hist_equalize(im, clahe=True, bgr=False): - # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255 - yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) - if clahe: - c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) - yuv[:, :, 0] = c.apply(yuv[:, :, 0]) - else: - yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram - return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB - - -def replicate(im, labels): - # Replicate labels - h, w = im.shape[:2] - boxes = labels[:, 1:].astype(int) - x1, y1, x2, y2 = boxes.T - s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) - for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices - x1b, y1b, x2b, y2b = boxes[i] - bh, bw = y2b - y1b, x2b - x1b - yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y - x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] - im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax] - labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) - - return im, labels - - -def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): - # Resize and pad image while meeting stride-multiple constraints - shape = im.shape[:2] # current shape [height, width] - if isinstance(new_shape, int): - new_shape = (new_shape, new_shape) - - # Scale ratio (new / old) - r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) - if not scaleup: # only scale down, do not scale up (for better val mAP) - r = min(r, 1.0) - - # Compute padding - ratio = r, r # width, height ratios - new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) - dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding - if auto: # minimum rectangle - dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding - elif scaleFill: # stretch - dw, dh = 0.0, 0.0 - new_unpad = (new_shape[1], new_shape[0]) - ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios - - dw /= 2 # divide padding into 2 sides - dh /= 2 - - if shape[::-1] != new_unpad: # resize - im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) - top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) - left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) - im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border - return im, ratio, (dw, dh) - - -def random_perspective(im, - targets=(), - segments=(), - degrees=10, - translate=.1, - scale=.1, - shear=10, - perspective=0.0, - border=(0, 0)): - # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10)) - # targets = [cls, xyxy] - - height = im.shape[0] + border[0] * 2 # shape(h,w,c) - width = im.shape[1] + border[1] * 2 - - # Center - C = np.eye(3) - C[0, 2] = -im.shape[1] / 2 # x translation (pixels) - C[1, 2] = -im.shape[0] / 2 # y translation (pixels) - - # Perspective - P = np.eye(3) - P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) - P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) - - # Rotation and Scale - R = np.eye(3) - a = random.uniform(-degrees, degrees) - # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations - s = random.uniform(1 - scale, 1 + scale) - # s = 2 ** random.uniform(-scale, scale) - R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) - - # Shear - S = np.eye(3) - S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) - S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) - - # Translation - T = np.eye(3) - T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) - T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) - - # Combined rotation matrix - M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT - if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed - if perspective: - im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) - else: # affine - im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) - - # Visualize - # import matplotlib.pyplot as plt - # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() - # ax[0].imshow(im[:, :, ::-1]) # base - # ax[1].imshow(im2[:, :, ::-1]) # warped - - # Transform label coordinates - n = len(targets) - if n: - use_segments = any(x.any() for x in segments) - new = np.zeros((n, 4)) - if use_segments: # warp segments - segments = resample_segments(segments) # upsample - for i, segment in enumerate(segments): - xy = np.ones((len(segment), 3)) - xy[:, :2] = segment - xy = xy @ M.T # transform - xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine - - # clip - new[i] = segment2box(xy, width, height) - - else: # warp boxes - xy = np.ones((n * 4, 3)) - xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 - xy = xy @ M.T # transform - xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine - - # create new boxes - x = xy[:, [0, 2, 4, 6]] - y = xy[:, [1, 3, 5, 7]] - new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T - - # clip - new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) - new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) - - # filter candidates - i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) - targets = targets[i] - targets[:, 1:5] = new[i] - - return im, targets - - -def copy_paste(im, labels, segments, p=0.5): - # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) - n = len(segments) - if p and n: - h, w, c = im.shape # height, width, channels - im_new = np.zeros(im.shape, np.uint8) - for j in random.sample(range(n), k=round(p * n)): - l, s = labels[j], segments[j] - box = w - l[3], l[2], w - l[1], l[4] - ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - if (ioa < 0.30).all(): # allow 30% obscuration of existing labels - labels = np.concatenate((labels, [[l[0], *box]]), 0) - segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) - cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) - - result = cv2.bitwise_and(src1=im, src2=im_new) - result = cv2.flip(result, 1) # augment segments (flip left-right) - i = result > 0 # pixels to replace - # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch - im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug - - return im, labels, segments - - -def cutout(im, labels, p=0.5): - # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 - if random.random() < p: - h, w = im.shape[:2] - scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction - for s in scales: - mask_h = random.randint(1, int(h * s)) # create random masks - mask_w = random.randint(1, int(w * s)) - - # box - xmin = max(0, random.randint(0, w) - mask_w // 2) - ymin = max(0, random.randint(0, h) - mask_h // 2) - xmax = min(w, xmin + mask_w) - ymax = min(h, ymin + mask_h) - - # apply random color mask - im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] - - # return unobscured labels - if len(labels) and s > 0.03: - box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) - ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - labels = labels[ioa < 0.60] # remove >60% obscured labels - - return labels - - -def mixup(im, labels, im2, labels2): - # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf - r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 - im = (im * r + im2 * (1 - r)).astype(np.uint8) - labels = np.concatenate((labels, labels2), 0) - return im, labels - - -def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) - # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio - w1, h1 = box1[2] - box1[0], box1[3] - box1[1] - w2, h2 = box2[2] - box2[0], box2[3] - box2[1] - ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio - return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates - - -def classify_albumentations(augment=True, - size=224, - scale=(0.08, 1.0), - hflip=0.5, - vflip=0.0, - jitter=0.4, - mean=IMAGENET_MEAN, - std=IMAGENET_STD, - auto_aug=False): - # YOLOv5 classification Albumentations (optional, only used if package is installed) - prefix = colorstr('albumentations: ') - try: - import albumentations as A - from albumentations.pytorch import ToTensorV2 - check_version(A.__version__, '1.0.3', hard=True) # version requirement - if augment: # Resize and crop - T = [A.RandomResizedCrop(height=size, width=size, scale=scale)] - if auto_aug: - # TODO: implement AugMix, AutoAug & RandAug in albumentation - LOGGER.info(f'{prefix}auto augmentations are currently not supported') - else: - if hflip > 0: - T += [A.HorizontalFlip(p=hflip)] - if vflip > 0: - T += [A.VerticalFlip(p=vflip)] - if jitter > 0: - color_jitter = (float(jitter),) * 3 # repeat value for brightness, contrast, satuaration, 0 hue - T += [A.ColorJitter(*color_jitter, 0)] - else: # Use fixed crop for eval set (reproducibility) - T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)] - T += [A.Normalize(mean=mean, std=std), ToTensorV2()] # Normalize and convert to Tensor - LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) - return A.Compose(T) - - except ImportError: # package not installed, skip - pass - except Exception as e: - LOGGER.info(f'{prefix}{e}') - - -def classify_transforms(size=224): - # Transforms to apply if albumentations not installed - assert isinstance(size, int), f'ERROR: classify_transforms size {size} must be integer, not (list, tuple)' - # T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) - return T.Compose([CenterCrop(size), ToTensor(), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) - - -class LetterBox: - # YOLOv5 LetterBox class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) - def __init__(self, size=(640, 640), auto=False, stride=32): - super().__init__() - self.h, self.w = (size, size) if isinstance(size, int) else size - self.auto = auto # pass max size integer, automatically solve for short side using stride - self.stride = stride # used with auto - - def __call__(self, im): # im = np.array HWC - imh, imw = im.shape[:2] - r = min(self.h / imh, self.w / imw) # ratio of new/old - h, w = round(imh * r), round(imw * r) # resized image - hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else self.h, self.w - top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1) - im_out = np.full((self.h, self.w, 3), 114, dtype=im.dtype) - im_out[top:top + h, left:left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR) - return im_out - - -class CenterCrop: - # YOLOv5 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()]) - def __init__(self, size=640): - super().__init__() - self.h, self.w = (size, size) if isinstance(size, int) else size - - def __call__(self, im): # im = np.array HWC - imh, imw = im.shape[:2] - m = min(imh, imw) # min dimension - top, left = (imh - m) // 2, (imw - m) // 2 - return cv2.resize(im[top:top + m, left:left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR) - - -class ToTensor: - # YOLOv5 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) - def __init__(self, half=False): - super().__init__() - self.half = half - - def __call__(self, im): # im = np.array HWC in BGR order - im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1]) # HWC to CHW -> BGR to RGB -> contiguous - im = torch.from_numpy(im) # to torch - im = im.half() if self.half else im.float() # uint8 to fp16/32 - im /= 255.0 # 0-255 to 0.0-1.0 - return im diff --git a/src/yolov5_ros/src/yolov5/utils/autoanchor.py b/src/yolov5_ros/src/yolov5/utils/autoanchor.py deleted file mode 100644 index 0b49ab3..0000000 --- a/src/yolov5_ros/src/yolov5/utils/autoanchor.py +++ /dev/null @@ -1,169 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -AutoAnchor utils -""" - -import random - -import numpy as np -import torch -import yaml -from tqdm import tqdm - -from utils import TryExcept -from utils.general import LOGGER, colorstr - -PREFIX = colorstr('AutoAnchor: ') - - -def check_anchor_order(m): - # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary - a = m.anchors.prod(-1).mean(-1).view(-1) # mean anchor area per output layer - da = a[-1] - a[0] # delta a - ds = m.stride[-1] - m.stride[0] # delta s - if da and (da.sign() != ds.sign()): # same order - LOGGER.info(f'{PREFIX}Reversing anchor order') - m.anchors[:] = m.anchors.flip(0) - - -@TryExcept(f'{PREFIX}ERROR: ') -def check_anchors(dataset, model, thr=4.0, imgsz=640): - # Check anchor fit to data, recompute if necessary - m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() - shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) - scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale - wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh - - def metric(k): # compute metric - r = wh[:, None] / k[None] - x = torch.min(r, 1 / r).min(2)[0] # ratio metric - best = x.max(1)[0] # best_x - aat = (x > 1 / thr).float().sum(1).mean() # anchors above threshold - bpr = (best > 1 / thr).float().mean() # best possible recall - return bpr, aat - - stride = m.stride.to(m.anchors.device).view(-1, 1, 1) # model strides - anchors = m.anchors.clone() * stride # current anchors - bpr, aat = metric(anchors.cpu().view(-1, 2)) - s = f'\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). ' - if bpr > 0.98: # threshold to recompute - LOGGER.info(f'{s}Current anchors are a good fit to dataset ✅') - else: - LOGGER.info(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...') - na = m.anchors.numel() // 2 # number of anchors - anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) - new_bpr = metric(anchors)[0] - if new_bpr > bpr: # replace anchors - anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) - m.anchors[:] = anchors.clone().view_as(m.anchors) - check_anchor_order(m) # must be in pixel-space (not grid-space) - m.anchors /= stride - s = f'{PREFIX}Done ✅ (optional: update model *.yaml to use these anchors in the future)' - else: - s = f'{PREFIX}Done ⚠️ (original anchors better than new anchors, proceeding with original anchors)' - LOGGER.info(s) - - -def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): - """ Creates kmeans-evolved anchors from training dataset - - Arguments: - dataset: path to data.yaml, or a loaded dataset - n: number of anchors - img_size: image size used for training - thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 - gen: generations to evolve anchors using genetic algorithm - verbose: print all results - - Return: - k: kmeans evolved anchors - - Usage: - from utils.autoanchor import *; _ = kmean_anchors() - """ - from scipy.cluster.vq import kmeans - - npr = np.random - thr = 1 / thr - - def metric(k, wh): # compute metrics - r = wh[:, None] / k[None] - x = torch.min(r, 1 / r).min(2)[0] # ratio metric - # x = wh_iou(wh, torch.tensor(k)) # iou metric - return x, x.max(1)[0] # x, best_x - - def anchor_fitness(k): # mutation fitness - _, best = metric(torch.tensor(k, dtype=torch.float32), wh) - return (best * (best > thr).float()).mean() # fitness - - def print_results(k, verbose=True): - k = k[np.argsort(k.prod(1))] # sort small to large - x, best = metric(k, wh0) - bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr - s = f'{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\n' \ - f'{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' \ - f'past_thr={x[x > thr].mean():.3f}-mean: ' - for x in k: - s += '%i,%i, ' % (round(x[0]), round(x[1])) - if verbose: - LOGGER.info(s[:-2]) - return k - - if isinstance(dataset, str): # *.yaml file - with open(dataset, errors='ignore') as f: - data_dict = yaml.safe_load(f) # model dict - from utils.dataloaders import LoadImagesAndLabels - dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) - - # Get label wh - shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) - wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh - - # Filter - i = (wh0 < 3.0).any(1).sum() - if i: - LOGGER.info(f'{PREFIX}WARNING: Extremely small objects found: {i} of {len(wh0)} labels are < 3 pixels in size') - wh = wh0[(wh0 >= 2.0).any(1)].astype(np.float32) # filter > 2 pixels - # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 - - # Kmeans init - try: - LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...') - assert n <= len(wh) # apply overdetermined constraint - s = wh.std(0) # sigmas for whitening - k = kmeans(wh / s, n, iter=30)[0] * s # points - assert n == len(k) # kmeans may return fewer points than requested if wh is insufficient or too similar - except Exception: - LOGGER.warning(f'{PREFIX}WARNING: switching strategies from kmeans to random init') - k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init - wh, wh0 = (torch.tensor(x, dtype=torch.float32) for x in (wh, wh0)) - k = print_results(k, verbose=False) - - # Plot - # k, d = [None] * 20, [None] * 20 - # for i in tqdm(range(1, 21)): - # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance - # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True) - # ax = ax.ravel() - # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.') - # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh - # ax[0].hist(wh[wh[:, 0]<100, 0],400) - # ax[1].hist(wh[wh[:, 1]<100, 1],400) - # fig.savefig('wh.png', dpi=200) - - # Evolve - f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma - pbar = tqdm(range(gen), bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar - for _ in pbar: - v = np.ones(sh) - while (v == 1).all(): # mutate until a change occurs (prevent duplicates) - v = ((npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) - kg = (k.copy() * v).clip(min=2.0) - fg = anchor_fitness(kg) - if fg > f: - f, k = fg, kg.copy() - pbar.desc = f'{PREFIX}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' - if verbose: - print_results(k, verbose) - - return print_results(k).astype(np.float32) diff --git a/src/yolov5_ros/src/yolov5/utils/autobatch.py b/src/yolov5_ros/src/yolov5/utils/autobatch.py deleted file mode 100644 index 641b055..0000000 --- a/src/yolov5_ros/src/yolov5/utils/autobatch.py +++ /dev/null @@ -1,69 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Auto-batch utils -""" - -from copy import deepcopy - -import numpy as np -import torch - -from utils.general import LOGGER, colorstr -from utils.torch_utils import profile - - -def check_train_batch_size(model, imgsz=640, amp=True): - # Check YOLOv5 training batch size - with torch.cuda.amp.autocast(amp): - return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size - - -def autobatch(model, imgsz=640, fraction=0.8, batch_size=16): - # Automatically estimate best batch size to use `fraction` of available CUDA memory - # Usage: - # import torch - # from utils.autobatch import autobatch - # model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False) - # print(autobatch(model)) - - # Check device - prefix = colorstr('AutoBatch: ') - LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}') - device = next(model.parameters()).device # get model device - if device.type == 'cpu': - LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}') - return batch_size - - # Inspect CUDA memory - gb = 1 << 30 # bytes to GiB (1024 ** 3) - d = str(device).upper() # 'CUDA:0' - properties = torch.cuda.get_device_properties(device) # device properties - t = properties.total_memory / gb # GiB total - r = torch.cuda.memory_reserved(device) / gb # GiB reserved - a = torch.cuda.memory_allocated(device) / gb # GiB allocated - f = t - (r + a) # GiB free - LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free') - - # Profile batch sizes - batch_sizes = [1, 2, 4, 8, 16] - try: - img = [torch.empty(b, 3, imgsz, imgsz) for b in batch_sizes] - results = profile(img, model, n=3, device=device) - except Exception as e: - LOGGER.warning(f'{prefix}{e}') - - # Fit a solution - y = [x[2] for x in results if x] # memory [2] - p = np.polyfit(batch_sizes[:len(y)], y, deg=1) # first degree polynomial fit - b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size) - if None in results: # some sizes failed - i = results.index(None) # first fail index - if b >= batch_sizes[i]: # y intercept above failure point - b = batch_sizes[max(i - 1, 0)] # select prior safe point - if b < 1 or b > 1024: # b outside of safe range - b = batch_size - LOGGER.warning(f'{prefix}WARNING: ⚠️ CUDA anomaly detected, recommend restart environment and retry command.') - - fraction = np.polyval(p, b) / t # actual fraction predicted - LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅') - return b diff --git a/src/yolov5_ros/src/yolov5/utils/aws/__init__.py b/src/yolov5_ros/src/yolov5/utils/aws/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/yolov5_ros/src/yolov5/utils/aws/mime.sh b/src/yolov5_ros/src/yolov5/utils/aws/mime.sh deleted file mode 100644 index c319a83..0000000 --- a/src/yolov5_ros/src/yolov5/utils/aws/mime.sh +++ /dev/null @@ -1,26 +0,0 @@ -# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/ -# This script will run on every instance restart, not only on first start -# --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA --- - -Content-Type: multipart/mixed; boundary="//" -MIME-Version: 1.0 - ---// -Content-Type: text/cloud-config; charset="us-ascii" -MIME-Version: 1.0 -Content-Transfer-Encoding: 7bit -Content-Disposition: attachment; filename="cloud-config.txt" - -#cloud-config -cloud_final_modules: -- [scripts-user, always] - ---// -Content-Type: text/x-shellscript; charset="us-ascii" -MIME-Version: 1.0 -Content-Transfer-Encoding: 7bit -Content-Disposition: attachment; filename="userdata.txt" - -#!/bin/bash -# --- paste contents of userdata.sh here --- ---// diff --git a/src/yolov5_ros/src/yolov5/utils/aws/resume.py b/src/yolov5_ros/src/yolov5/utils/aws/resume.py deleted file mode 100644 index b21731c..0000000 --- a/src/yolov5_ros/src/yolov5/utils/aws/resume.py +++ /dev/null @@ -1,40 +0,0 @@ -# Resume all interrupted trainings in yolov5/ dir including DDP trainings -# Usage: $ python utils/aws/resume.py - -import os -import sys -from pathlib import Path - -import torch -import yaml - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[2] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH - -port = 0 # --master_port -path = Path('').resolve() -for last in path.rglob('*/**/last.pt'): - ckpt = torch.load(last) - if ckpt['optimizer'] is None: - continue - - # Load opt.yaml - with open(last.parent.parent / 'opt.yaml', errors='ignore') as f: - opt = yaml.safe_load(f) - - # Get device count - d = opt['device'].split(',') # devices - nd = len(d) # number of devices - ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel - - if ddp: # multi-GPU - port += 1 - cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}' - else: # single-GPU - cmd = f'python train.py --resume {last}' - - cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread - print(cmd) - os.system(cmd) diff --git a/src/yolov5_ros/src/yolov5/utils/aws/userdata.sh b/src/yolov5_ros/src/yolov5/utils/aws/userdata.sh deleted file mode 100644 index 5fc1332..0000000 --- a/src/yolov5_ros/src/yolov5/utils/aws/userdata.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash -# AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html -# This script will run only once on first instance start (for a re-start script see mime.sh) -# /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir -# Use >300 GB SSD - -cd home/ubuntu -if [ ! -d yolov5 ]; then - echo "Running first-time script." # install dependencies, download COCO, pull Docker - git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5 - cd yolov5 - bash data/scripts/get_coco.sh && echo "COCO done." & - sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & - python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & - wait && echo "All tasks done." # finish background tasks -else - echo "Running re-start script." # resume interrupted runs - i=0 - list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour' - while IFS= read -r id; do - ((i++)) - echo "restarting container $i: $id" - sudo docker start $id - # sudo docker exec -it $id python train.py --resume # single-GPU - sudo docker exec -d $id python utils/aws/resume.py # multi-scenario - done <<<"$list" -fi diff --git a/src/yolov5_ros/src/yolov5/utils/benchmarks.py b/src/yolov5_ros/src/yolov5/utils/benchmarks.py deleted file mode 100644 index 9d5c7f2..0000000 --- a/src/yolov5_ros/src/yolov5/utils/benchmarks.py +++ /dev/null @@ -1,161 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Run YOLOv5 benchmarks on all supported export formats - -Format | `export.py --include` | Model ---- | --- | --- -PyTorch | - | yolov5s.pt -TorchScript | `torchscript` | yolov5s.torchscript -ONNX | `onnx` | yolov5s.onnx -OpenVINO | `openvino` | yolov5s_openvino_model/ -TensorRT | `engine` | yolov5s.engine -CoreML | `coreml` | yolov5s.mlmodel -TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/ -TensorFlow GraphDef | `pb` | yolov5s.pb -TensorFlow Lite | `tflite` | yolov5s.tflite -TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite -TensorFlow.js | `tfjs` | yolov5s_web_model/ - -Requirements: - $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU - $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU - $ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT - -Usage: - $ python utils/benchmarks.py --weights yolov5s.pt --img 640 -""" - -import argparse -import platform -import sys -import time -from pathlib import Path - -import pandas as pd - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[1] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -# ROOT = ROOT.relative_to(Path.cwd()) # relative - -import export -import val -from utils import notebook_init -from utils.general import LOGGER, check_yaml, file_size, print_args -from utils.torch_utils import select_device - - -def run( - weights=ROOT / 'yolov5s.pt', # weights path - imgsz=640, # inference size (pixels) - batch_size=1, # batch size - data=ROOT / 'data/coco128.yaml', # dataset.yaml path - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - half=False, # use FP16 half-precision inference - test=False, # test exports only - pt_only=False, # test PyTorch only - hard_fail=False, # throw error on benchmark failure -): - y, t = [], time.time() - device = select_device(device) - for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU) - try: - assert i not in (9, 10, 11), 'inference not supported' # Edge TPU, TF.js and Paddle are unsupported - assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML - if 'cpu' in device.type: - assert cpu, 'inference not supported on CPU' - if 'cuda' in device.type: - assert gpu, 'inference not supported on GPU' - - # Export - if f == '-': - w = weights # PyTorch format - else: - w = export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # all others - assert suffix in str(w), 'export failed' - - # Validate - result = val.run(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half) - metrics = result[0] # metrics (mp, mr, map50, map, *losses(box, obj, cls)) - speeds = result[2] # times (preprocess, inference, postprocess) - y.append([name, round(file_size(w), 1), round(metrics[3], 4), round(speeds[1], 2)]) # MB, mAP, t_inference - except Exception as e: - if hard_fail: - assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}' - LOGGER.warning(f'WARNING: Benchmark failure for {name}: {e}') - y.append([name, None, None, None]) # mAP, t_inference - if pt_only and i == 0: - break # break after PyTorch - - # Print results - LOGGER.info('\n') - parse_opt() - notebook_init() # print system info - c = ['Format', 'Size (MB)', 'mAP50-95', 'Inference time (ms)'] if map else ['Format', 'Export', '', ''] - py = pd.DataFrame(y, columns=c) - LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)') - LOGGER.info(str(py if map else py.iloc[:, :2])) - if hard_fail and isinstance(hard_fail, str): - metrics = py['mAP50-95'].array # values to compare to floor - floor = eval(hard_fail) # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n - assert all(x > floor for x in metrics if pd.notna(x)), f'HARD FAIL: mAP50-95 < floor {floor}' - return py - - -def test( - weights=ROOT / 'yolov5s.pt', # weights path - imgsz=640, # inference size (pixels) - batch_size=1, # batch size - data=ROOT / 'data/coco128.yaml', # dataset.yaml path - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - half=False, # use FP16 half-precision inference - test=False, # test exports only - pt_only=False, # test PyTorch only - hard_fail=False, # throw error on benchmark failure -): - y, t = [], time.time() - device = select_device(device) - for i, (name, f, suffix, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, gpu-capable) - try: - w = weights if f == '-' else \ - export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # weights - assert suffix in str(w), 'export failed' - y.append([name, True]) - except Exception: - y.append([name, False]) # mAP, t_inference - - # Print results - LOGGER.info('\n') - parse_opt() - notebook_init() # print system info - py = pd.DataFrame(y, columns=['Format', 'Export']) - LOGGER.info(f'\nExports complete ({time.time() - t:.2f}s)') - LOGGER.info(str(py)) - return py - - -def parse_opt(): - parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') - parser.add_argument('--batch-size', type=int, default=1, help='batch size') - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') - parser.add_argument('--test', action='store_true', help='test exports only') - parser.add_argument('--pt-only', action='store_true', help='test PyTorch only') - parser.add_argument('--hard-fail', nargs='?', const=True, default=False, help='Exception on error or < min metric') - opt = parser.parse_args() - opt.data = check_yaml(opt.data) # check YAML - print_args(vars(opt)) - return opt - - -def main(opt): - test(**vars(opt)) if opt.test else run(**vars(opt)) - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) diff --git a/src/yolov5_ros/src/yolov5/utils/callbacks.py b/src/yolov5_ros/src/yolov5/utils/callbacks.py deleted file mode 100644 index 166d893..0000000 --- a/src/yolov5_ros/src/yolov5/utils/callbacks.py +++ /dev/null @@ -1,76 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Callback utils -""" - -import threading - - -class Callbacks: - """" - Handles all registered callbacks for YOLOv5 Hooks - """ - - def __init__(self): - # Define the available callbacks - self._callbacks = { - 'on_pretrain_routine_start': [], - 'on_pretrain_routine_end': [], - 'on_train_start': [], - 'on_train_epoch_start': [], - 'on_train_batch_start': [], - 'optimizer_step': [], - 'on_before_zero_grad': [], - 'on_train_batch_end': [], - 'on_train_epoch_end': [], - 'on_val_start': [], - 'on_val_batch_start': [], - 'on_val_image_end': [], - 'on_val_batch_end': [], - 'on_val_end': [], - 'on_fit_epoch_end': [], # fit = train + val - 'on_model_save': [], - 'on_train_end': [], - 'on_params_update': [], - 'teardown': [],} - self.stop_training = False # set True to interrupt training - - def register_action(self, hook, name='', callback=None): - """ - Register a new action to a callback hook - - Args: - hook: The callback hook name to register the action to - name: The name of the action for later reference - callback: The callback to fire - """ - assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" - assert callable(callback), f"callback '{callback}' is not callable" - self._callbacks[hook].append({'name': name, 'callback': callback}) - - def get_registered_actions(self, hook=None): - """" - Returns all the registered actions by callback hook - - Args: - hook: The name of the hook to check, defaults to all - """ - return self._callbacks[hook] if hook else self._callbacks - - def run(self, hook, *args, thread=False, **kwargs): - """ - Loop through the registered actions and fire all callbacks on main thread - - Args: - hook: The name of the hook to check, defaults to all - args: Arguments to receive from YOLOv5 - thread: (boolean) Run callbacks in daemon thread - kwargs: Keyword Arguments to receive from YOLOv5 - """ - - assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" - for logger in self._callbacks[hook]: - if thread: - threading.Thread(target=logger['callback'], args=args, kwargs=kwargs, daemon=True).start() - else: - logger['callback'](*args, **kwargs) diff --git a/src/yolov5_ros/src/yolov5/utils/dataloaders.py b/src/yolov5_ros/src/yolov5/utils/dataloaders.py deleted file mode 100755 index d8ef11f..0000000 --- a/src/yolov5_ros/src/yolov5/utils/dataloaders.py +++ /dev/null @@ -1,1129 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Dataloaders and dataset utils -""" - -import contextlib -import glob -import hashlib -import json -import math -import os -import random -import shutil -import time -from itertools import repeat -from multiprocessing.pool import Pool, ThreadPool -from pathlib import Path -from threading import Thread -from urllib.parse import urlparse -from zipfile import ZipFile - -import numpy as np -import torch -import torch.nn.functional as F -import torchvision -import yaml -from PIL import ExifTags, Image, ImageOps -from torch.utils.data import DataLoader, Dataset, dataloader, distributed -from tqdm import tqdm - -from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, - letterbox, mixup, random_perspective) -from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, - cv2, is_colab, is_kaggle, segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) -from utils.torch_utils import torch_distributed_zero_first - -# Parameters -HELP_URL = 'See https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' -IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes -VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes -BAR_FORMAT = '{l_bar}{bar:10}{r_bar}{bar:-10b}' # tqdm bar format -LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html -PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true' # global pin_memory for dataloaders - -# Get orientation exif tag -for orientation in ExifTags.TAGS.keys(): - if ExifTags.TAGS[orientation] == 'Orientation': - break - - -def get_hash(paths): - # Returns a single hash value of a list of paths (files or dirs) - size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes - h = hashlib.md5(str(size).encode()) # hash sizes - h.update(''.join(paths).encode()) # hash paths - return h.hexdigest() # return hash - - -def exif_size(img): - # Returns exif-corrected PIL size - s = img.size # (width, height) - with contextlib.suppress(Exception): - rotation = dict(img._getexif().items())[orientation] - if rotation in [6, 8]: # rotation 270 or 90 - s = (s[1], s[0]) - return s - - -def exif_transpose(image): - """ - Transpose a PIL image accordingly if it has an EXIF Orientation tag. - Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose() - - :param image: The image to transpose. - :return: An image. - """ - exif = image.getexif() - orientation = exif.get(0x0112, 1) # default 1 - if orientation > 1: - method = { - 2: Image.FLIP_LEFT_RIGHT, - 3: Image.ROTATE_180, - 4: Image.FLIP_TOP_BOTTOM, - 5: Image.TRANSPOSE, - 6: Image.ROTATE_270, - 7: Image.TRANSVERSE, - 8: Image.ROTATE_90}.get(orientation) - if method is not None: - image = image.transpose(method) - del exif[0x0112] - image.info["exif"] = exif.tobytes() - return image - - -def seed_worker(worker_id): - # Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader - worker_seed = torch.initial_seed() % 2 ** 32 - np.random.seed(worker_seed) - random.seed(worker_seed) - - -def create_dataloader(path, - imgsz, - batch_size, - stride, - single_cls=False, - hyp=None, - augment=False, - cache=False, - pad=0.0, - rect=False, - rank=-1, - workers=8, - image_weights=False, - quad=False, - prefix='', - shuffle=False): - if rect and shuffle: - LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False') - shuffle = False - with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP - dataset = LoadImagesAndLabels( - path, - imgsz, - batch_size, - augment=augment, # augmentation - hyp=hyp, # hyperparameters - rect=rect, # rectangular batches - cache_images=cache, - single_cls=single_cls, - stride=int(stride), - pad=pad, - image_weights=image_weights, - prefix=prefix) - - batch_size = min(batch_size, len(dataset)) - nd = torch.cuda.device_count() # number of CUDA devices - nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers - sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) - loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates - generator = torch.Generator() - generator.manual_seed(0) - return loader(dataset, - batch_size=batch_size, - shuffle=shuffle and sampler is None, - num_workers=nw, - sampler=sampler, - pin_memory=PIN_MEMORY, - collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn, - worker_init_fn=seed_worker, - generator=generator), dataset - - -class InfiniteDataLoader(dataloader.DataLoader): - """ Dataloader that reuses workers - - Uses same syntax as vanilla DataLoader - """ - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) - self.iterator = super().__iter__() - - def __len__(self): - return len(self.batch_sampler.sampler) - - def __iter__(self): - for _ in range(len(self)): - yield next(self.iterator) - - -class _RepeatSampler: - """ Sampler that repeats forever - - Args: - sampler (Sampler) - """ - - def __init__(self, sampler): - self.sampler = sampler - - def __iter__(self): - while True: - yield from iter(self.sampler) - - -class LoadImages: - # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` - def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): - files = [] - for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: - p = str(Path(p).resolve()) - if '*' in p: - files.extend(sorted(glob.glob(p, recursive=True))) # glob - elif os.path.isdir(p): - files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir - elif os.path.isfile(p): - files.append(p) # files - else: - raise FileNotFoundError(f'{p} does not exist') - - images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS] - videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS] - ni, nv = len(images), len(videos) - - self.img_size = img_size - self.stride = stride - self.files = images + videos - self.nf = ni + nv # number of files - self.video_flag = [False] * ni + [True] * nv - self.mode = 'image' - self.auto = auto - self.transforms = transforms # optional - self.vid_stride = vid_stride # video frame-rate stride - if any(videos): - self._new_video(videos[0]) # new video - else: - self.cap = None - assert self.nf > 0, f'No images or videos found in {p}. ' \ - f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}' - - def __iter__(self): - self.count = 0 - return self - - def __next__(self): - if self.count == self.nf: - raise StopIteration - path = self.files[self.count] - - if self.video_flag[self.count]: - # Read video - self.mode = 'video' - ret_val, im0 = self.cap.read() - self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.vid_stride * (self.frame + 1)) # read at vid_stride - while not ret_val: - self.count += 1 - self.cap.release() - if self.count == self.nf: # last video - raise StopIteration - path = self.files[self.count] - self._new_video(path) - ret_val, im0 = self.cap.read() - - self.frame += 1 - # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False - s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' - - else: - # Read image - self.count += 1 - im0 = cv2.imread(path) # BGR - assert im0 is not None, f'Image Not Found {path}' - s = f'image {self.count}/{self.nf} {path}: ' - - if self.transforms: - im = self.transforms(im0) # transforms - else: - im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize - im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB - im = np.ascontiguousarray(im) # contiguous - - return path, im, im0, self.cap, s - - def _new_video(self, path): - # Create a new video capture object - self.frame = 0 - self.cap = cv2.VideoCapture(path) - self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride) - self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees - # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493 - - def _cv2_rotate(self, im): - # Rotate a cv2 video manually - if self.orientation == 0: - return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE) - elif self.orientation == 180: - return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE) - elif self.orientation == 90: - return cv2.rotate(im, cv2.ROTATE_180) - return im - - def __len__(self): - return self.nf # number of files - - -class LoadStreams: - # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` - def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): - torch.backends.cudnn.benchmark = True # faster for fixed-size inference - self.mode = 'stream' - self.img_size = img_size - self.stride = stride - self.vid_stride = vid_stride # video frame-rate stride - sources = Path(sources).read_text().rsplit() if Path(sources).is_file() else [sources] - n = len(sources) - self.sources = [clean_str(x) for x in sources] # clean source names for later - self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n - for i, s in enumerate(sources): # index, source - # Start thread to read frames from video stream - st = f'{i + 1}/{n}: {s}... ' - if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video - check_requirements(('pafy', 'youtube_dl==2020.12.2')) - import pafy - s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL - s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam - if s == 0: - assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.' - assert not is_kaggle(), '--source 0 webcam unsupported on Kaggle. Rerun command in a local environment.' - cap = cv2.VideoCapture(s) - assert cap.isOpened(), f'{st}Failed to open {s}' - w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) - h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan - self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback - self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback - - _, self.imgs[i] = cap.read() # guarantee first frame - self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True) - LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)") - self.threads[i].start() - LOGGER.info('') # newline - - # check for common shapes - s = np.stack([letterbox(x, img_size, stride=stride, auto=auto)[0].shape for x in self.imgs]) - self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal - self.auto = auto and self.rect - self.transforms = transforms # optional - if not self.rect: - LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.') - - def update(self, i, cap, stream): - # Read stream `i` frames in daemon thread - n, f = 0, self.frames[i] # frame number, frame array - while cap.isOpened() and n < f: - n += 1 - cap.grab() # .read() = .grab() followed by .retrieve() - if n % self.vid_stride == 0: - success, im = cap.retrieve() - if success: - self.imgs[i] = im - else: - LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.') - self.imgs[i] = np.zeros_like(self.imgs[i]) - cap.open(stream) # re-open stream if signal was lost - time.sleep(0.0) # wait time - - def __iter__(self): - self.count = -1 - return self - - def __next__(self): - self.count += 1 - if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit - cv2.destroyAllWindows() - raise StopIteration - - im0 = self.imgs.copy() - if self.transforms: - im = np.stack([self.transforms(x) for x in im0]) # transforms - else: - im = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0] for x in im0]) # resize - im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW - im = np.ascontiguousarray(im) # contiguous - - return self.sources, im, im0, None, '' - - def __len__(self): - return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years - - -def img2label_paths(img_paths): - # Define label paths as a function of image paths - sa, sb = f'{os.sep}images{os.sep}', f'{os.sep}labels{os.sep}' # /images/, /labels/ substrings - return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths] - - -class LoadImagesAndLabels(Dataset): - # YOLOv5 train_loader/val_loader, loads images and labels for training and validation - cache_version = 0.6 # dataset labels *.cache version - rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4] - - def __init__(self, - path, - img_size=640, - batch_size=16, - augment=False, - hyp=None, - rect=False, - image_weights=False, - cache_images=False, - single_cls=False, - stride=32, - pad=0.0, - prefix=''): - self.img_size = img_size - self.augment = augment - self.hyp = hyp - self.image_weights = image_weights - self.rect = False if image_weights else rect - self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) - self.mosaic_border = [-img_size // 2, -img_size // 2] - self.stride = stride - self.path = path - self.albumentations = Albumentations() if augment else None - - try: - f = [] # image files - for p in path if isinstance(path, list) else [path]: - p = Path(p) # os-agnostic - if p.is_dir(): # dir - f += glob.glob(str(p / '**' / '*.*'), recursive=True) - # f = list(p.rglob('*.*')) # pathlib - elif p.is_file(): # file - with open(p) as t: - t = t.read().strip().splitlines() - parent = str(p.parent) + os.sep - f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path - # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) - else: - raise FileNotFoundError(f'{prefix}{p} does not exist') - self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) - # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib - assert self.im_files, f'{prefix}No images found' - except Exception as e: - raise Exception(f'{prefix}Error loading data from {path}: {e}\n{HELP_URL}') - - # Check cache - self.label_files = img2label_paths(self.im_files) # labels - cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') - try: - cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict - assert cache['version'] == self.cache_version # matches current version - assert cache['hash'] == get_hash(self.label_files + self.im_files) # identical hash - except Exception: - cache, exists = self.cache_labels(cache_path, prefix), False # run cache ops - - # Display cache - nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total - if exists and LOCAL_RANK in {-1, 0}: - d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupt" - tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=BAR_FORMAT) # display cache results - if cache['msgs']: - LOGGER.info('\n'.join(cache['msgs'])) # display warnings - assert nf > 0 or not augment, f'{prefix}No labels found in {cache_path}, can not start training. {HELP_URL}' - - # Read cache - [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items - labels, shapes, self.segments = zip(*cache.values()) - nl = len(np.concatenate(labels, 0)) # number of labels - assert nl > 0 or not augment, f'{prefix}All labels empty in {cache_path}, can not start training. {HELP_URL}' - self.labels = list(labels) - self.shapes = np.array(shapes) - self.im_files = list(cache.keys()) # update - self.label_files = img2label_paths(cache.keys()) # update - n = len(shapes) # number of images - bi = np.floor(np.arange(n) / batch_size).astype(int) # batch index - nb = bi[-1] + 1 # number of batches - self.batch = bi # batch index of image - self.n = n - self.indices = range(n) - - # Update labels - include_class = [] # filter labels to include only these classes (optional) - include_class_array = np.array(include_class).reshape(1, -1) - for i, (label, segment) in enumerate(zip(self.labels, self.segments)): - if include_class: - j = (label[:, 0:1] == include_class_array).any(1) - self.labels[i] = label[j] - if segment: - self.segments[i] = segment[j] - if single_cls: # single-class training, merge all classes into 0 - self.labels[i][:, 0] = 0 - if segment: - self.segments[i][:, 0] = 0 - - # Rectangular Training - if self.rect: - # Sort by aspect ratio - s = self.shapes # wh - ar = s[:, 1] / s[:, 0] # aspect ratio - irect = ar.argsort() - self.im_files = [self.im_files[i] for i in irect] - self.label_files = [self.label_files[i] for i in irect] - self.labels = [self.labels[i] for i in irect] - self.shapes = s[irect] # wh - ar = ar[irect] - - # Set training image shapes - shapes = [[1, 1]] * nb - for i in range(nb): - ari = ar[bi == i] - mini, maxi = ari.min(), ari.max() - if maxi < 1: - shapes[i] = [maxi, 1] - elif mini > 1: - shapes[i] = [1, 1 / mini] - - self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride - - # Cache images into RAM/disk for faster training (WARNING: large datasets may exceed system resources) - self.ims = [None] * n - self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files] - if cache_images: - gb = 0 # Gigabytes of cached images - self.im_hw0, self.im_hw = [None] * n, [None] * n - fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image - results = ThreadPool(NUM_THREADS).imap(fcn, range(n)) - pbar = tqdm(enumerate(results), total=n, bar_format=BAR_FORMAT, disable=LOCAL_RANK > 0) - for i, x in pbar: - if cache_images == 'disk': - gb += self.npy_files[i].stat().st_size - else: # 'ram' - self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) - gb += self.ims[i].nbytes - pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})' - pbar.close() - - def cache_labels(self, path=Path('./labels.cache'), prefix=''): - # Cache dataset labels, check images and read shapes - x = {} # dict - nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages - desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..." - with Pool(NUM_THREADS) as pool: - pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))), - desc=desc, - total=len(self.im_files), - bar_format=BAR_FORMAT) - for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: - nm += nm_f - nf += nf_f - ne += ne_f - nc += nc_f - if im_file: - x[im_file] = [lb, shape, segments] - if msg: - msgs.append(msg) - pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupt" - - pbar.close() - if msgs: - LOGGER.info('\n'.join(msgs)) - if nf == 0: - LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. {HELP_URL}') - x['hash'] = get_hash(self.label_files + self.im_files) - x['results'] = nf, nm, ne, nc, len(self.im_files) - x['msgs'] = msgs # warnings - x['version'] = self.cache_version # cache version - try: - np.save(path, x) # save cache for next time - path.with_suffix('.cache.npy').rename(path) # remove .npy suffix - LOGGER.info(f'{prefix}New cache created: {path}') - except Exception as e: - LOGGER.warning(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # not writeable - return x - - def __len__(self): - return len(self.im_files) - - # def __iter__(self): - # self.count = -1 - # print('ran dataset iter') - # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) - # return self - - def __getitem__(self, index): - index = self.indices[index] # linear, shuffled, or image_weights - - hyp = self.hyp - mosaic = self.mosaic and random.random() < hyp['mosaic'] - if mosaic: - # Load mosaic - img, labels = self.load_mosaic(index) - shapes = None - - # MixUp augmentation - if random.random() < hyp['mixup']: - img, labels = mixup(img, labels, *self.load_mosaic(random.randint(0, self.n - 1))) - - else: - # Load image - img, (h0, w0), (h, w) = self.load_image(index) - - # Letterbox - shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape - img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) - shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling - - labels = self.labels[index].copy() - if labels.size: # normalized xywh to pixel xyxy format - labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) - - if self.augment: - img, labels = random_perspective(img, - labels, - degrees=hyp['degrees'], - translate=hyp['translate'], - scale=hyp['scale'], - shear=hyp['shear'], - perspective=hyp['perspective']) - - nl = len(labels) # number of labels - if nl: - labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3) - - if self.augment: - # Albumentations - img, labels = self.albumentations(img, labels) - nl = len(labels) # update after albumentations - - # HSV color-space - augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) - - # Flip up-down - if random.random() < hyp['flipud']: - img = np.flipud(img) - if nl: - labels[:, 2] = 1 - labels[:, 2] - - # Flip left-right - if random.random() < hyp['fliplr']: - img = np.fliplr(img) - if nl: - labels[:, 1] = 1 - labels[:, 1] - - # Cutouts - # labels = cutout(img, labels, p=0.5) - # nl = len(labels) # update after cutout - - labels_out = torch.zeros((nl, 6)) - if nl: - labels_out[:, 1:] = torch.from_numpy(labels) - - # Convert - img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB - img = np.ascontiguousarray(img) - - return torch.from_numpy(img), labels_out, self.im_files[index], shapes - - def load_image(self, i): - # Loads 1 image from dataset index 'i', returns (im, original hw, resized hw) - im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i], - if im is None: # not cached in RAM - if fn.exists(): # load npy - im = np.load(fn) - else: # read image - im = cv2.imread(f) # BGR - assert im is not None, f'Image Not Found {f}' - h0, w0 = im.shape[:2] # orig hw - r = self.img_size / max(h0, w0) # ratio - if r != 1: # if sizes are not equal - interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA - im = cv2.resize(im, (int(w0 * r), int(h0 * r)), interpolation=interp) - return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized - return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized - - def cache_images_to_disk(self, i): - # Saves an image as an *.npy file for faster loading - f = self.npy_files[i] - if not f.exists(): - np.save(f.as_posix(), cv2.imread(self.im_files[i])) - - def load_mosaic(self, index): - # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic - labels4, segments4 = [], [] - s = self.img_size - yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y - indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices - random.shuffle(indices) - for i, index in enumerate(indices): - # Load image - img, _, (h, w) = self.load_image(index) - - # place img in img4 - if i == 0: # top left - img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles - x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) - x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) - elif i == 1: # top right - x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc - x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h - elif i == 2: # bottom left - x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) - x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) - elif i == 3: # bottom right - x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) - x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) - - img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] - padw = x1a - x1b - padh = y1a - y1b - - # Labels - labels, segments = self.labels[index].copy(), self.segments[index].copy() - if labels.size: - labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format - segments = [xyn2xy(x, w, h, padw, padh) for x in segments] - labels4.append(labels) - segments4.extend(segments) - - # Concat/clip labels - labels4 = np.concatenate(labels4, 0) - for x in (labels4[:, 1:], *segments4): - np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() - # img4, labels4 = replicate(img4, labels4) # replicate - - # Augment - img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste']) - img4, labels4 = random_perspective(img4, - labels4, - segments4, - degrees=self.hyp['degrees'], - translate=self.hyp['translate'], - scale=self.hyp['scale'], - shear=self.hyp['shear'], - perspective=self.hyp['perspective'], - border=self.mosaic_border) # border to remove - - return img4, labels4 - - def load_mosaic9(self, index): - # YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic - labels9, segments9 = [], [] - s = self.img_size - indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices - random.shuffle(indices) - hp, wp = -1, -1 # height, width previous - for i, index in enumerate(indices): - # Load image - img, _, (h, w) = self.load_image(index) - - # place img in img9 - if i == 0: # center - img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles - h0, w0 = h, w - c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates - elif i == 1: # top - c = s, s - h, s + w, s - elif i == 2: # top right - c = s + wp, s - h, s + wp + w, s - elif i == 3: # right - c = s + w0, s, s + w0 + w, s + h - elif i == 4: # bottom right - c = s + w0, s + hp, s + w0 + w, s + hp + h - elif i == 5: # bottom - c = s + w0 - w, s + h0, s + w0, s + h0 + h - elif i == 6: # bottom left - c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h - elif i == 7: # left - c = s - w, s + h0 - h, s, s + h0 - elif i == 8: # top left - c = s - w, s + h0 - hp - h, s, s + h0 - hp - - padx, pady = c[:2] - x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords - - # Labels - labels, segments = self.labels[index].copy(), self.segments[index].copy() - if labels.size: - labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format - segments = [xyn2xy(x, w, h, padx, pady) for x in segments] - labels9.append(labels) - segments9.extend(segments) - - # Image - img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax] - hp, wp = h, w # height, width previous - - # Offset - yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y - img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] - - # Concat/clip labels - labels9 = np.concatenate(labels9, 0) - labels9[:, [1, 3]] -= xc - labels9[:, [2, 4]] -= yc - c = np.array([xc, yc]) # centers - segments9 = [x - c for x in segments9] - - for x in (labels9[:, 1:], *segments9): - np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() - # img9, labels9 = replicate(img9, labels9) # replicate - - # Augment - img9, labels9 = random_perspective(img9, - labels9, - segments9, - degrees=self.hyp['degrees'], - translate=self.hyp['translate'], - scale=self.hyp['scale'], - shear=self.hyp['shear'], - perspective=self.hyp['perspective'], - border=self.mosaic_border) # border to remove - - return img9, labels9 - - @staticmethod - def collate_fn(batch): - im, label, path, shapes = zip(*batch) # transposed - for i, lb in enumerate(label): - lb[:, 0] = i # add target image index for build_targets() - return torch.stack(im, 0), torch.cat(label, 0), path, shapes - - @staticmethod - def collate_fn4(batch): - im, label, path, shapes = zip(*batch) # transposed - n = len(shapes) // 4 - im4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] - - ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]]) - wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]]) - s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]]) # scale - for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW - i *= 4 - if random.random() < 0.5: - im1 = F.interpolate(im[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', - align_corners=False)[0].type(im[i].type()) - lb = label[i] - else: - im1 = torch.cat((torch.cat((im[i], im[i + 1]), 1), torch.cat((im[i + 2], im[i + 3]), 1)), 2) - lb = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s - im4.append(im1) - label4.append(lb) - - for i, lb in enumerate(label4): - lb[:, 0] = i # add target image index for build_targets() - - return torch.stack(im4, 0), torch.cat(label4, 0), path4, shapes4 - - -# Ancillary functions -------------------------------------------------------------------------------------------------- -def flatten_recursive(path=DATASETS_DIR / 'coco128'): - # Flatten a recursive directory by bringing all files to top level - new_path = Path(f'{str(path)}_flat') - if os.path.exists(new_path): - shutil.rmtree(new_path) # delete output folder - os.makedirs(new_path) # make new output folder - for file in tqdm(glob.glob(f'{str(Path(path))}/**/*.*', recursive=True)): - shutil.copyfile(file, new_path / Path(file).name) - - -def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.dataloaders import *; extract_boxes() - # Convert detection dataset into classification dataset, with one directory per class - path = Path(path) # images dir - shutil.rmtree(path / 'classification') if (path / 'classification').is_dir() else None # remove existing - files = list(path.rglob('*.*')) - n = len(files) # number of files - for im_file in tqdm(files, total=n): - if im_file.suffix[1:] in IMG_FORMATS: - # image - im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB - h, w = im.shape[:2] - - # labels - lb_file = Path(img2label_paths([str(im_file)])[0]) - if Path(lb_file).exists(): - with open(lb_file) as f: - lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels - - for j, x in enumerate(lb): - c = int(x[0]) # class - f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename - if not f.parent.is_dir(): - f.parent.mkdir(parents=True) - - b = x[1:] * [w, h, w, h] # box - # b[2:] = b[2:].max() # rectangle to square - b[2:] = b[2:] * 1.2 + 3 # pad - b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(int) - - b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image - b[[1, 3]] = np.clip(b[[1, 3]], 0, h) - assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' - - -def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False): - """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files - Usage: from utils.dataloaders import *; autosplit() - Arguments - path: Path to images directory - weights: Train, val, test weights (list, tuple) - annotated_only: Only use images with an annotated txt file - """ - path = Path(path) # images dir - files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only - n = len(files) # number of files - random.seed(0) # for reproducibility - indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split - - txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files - for x in txt: - if (path.parent / x).exists(): - (path.parent / x).unlink() # remove existing - - print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only) - for i, img in tqdm(zip(indices, files), total=n): - if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label - with open(path.parent / txt[i], 'a') as f: - f.write(f'./{img.relative_to(path.parent).as_posix()}' + '\n') # add image to txt file - - -def verify_image_label(args): - # Verify one image-label pair - im_file, lb_file, prefix = args - nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments - try: - # verify images - im = Image.open(im_file) - im.verify() # PIL verify - shape = exif_size(im) # image size - assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' - assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}' - if im.format.lower() in ('jpg', 'jpeg'): - with open(im_file, 'rb') as f: - f.seek(-2, 2) - if f.read() != b'\xff\xd9': # corrupt JPEG - ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100) - msg = f'{prefix}WARNING: {im_file}: corrupt JPEG restored and saved' - - # verify labels - if os.path.isfile(lb_file): - nf = 1 # label found - with open(lb_file) as f: - lb = [x.split() for x in f.read().strip().splitlines() if len(x)] - if any(len(x) > 6 for x in lb): # is segment - classes = np.array([x[0] for x in lb], dtype=np.float32) - segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...) - lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) - lb = np.array(lb, dtype=np.float32) - nl = len(lb) - if nl: - assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected' - assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}' - assert (lb[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}' - _, i = np.unique(lb, axis=0, return_index=True) - if len(i) < nl: # duplicate row check - lb = lb[i] # remove duplicates - if segments: - segments = [segments[x] for x in i] - msg = f'{prefix}WARNING: {im_file}: {nl - len(i)} duplicate labels removed' - else: - ne = 1 # label empty - lb = np.zeros((0, 5), dtype=np.float32) - else: - nm = 1 # label missing - lb = np.zeros((0, 5), dtype=np.float32) - return im_file, lb, shape, segments, nm, nf, ne, nc, msg - except Exception as e: - nc = 1 - msg = f'{prefix}WARNING: {im_file}: ignoring corrupt image/label: {e}' - return [None, None, None, None, nm, nf, ne, nc, msg] - - -class HUBDatasetStats(): - """ Return dataset statistics dictionary with images and instances counts per split per class - To run in parent directory: export PYTHONPATH="$PWD/yolov5" - Usage1: from utils.dataloaders import *; HUBDatasetStats('coco128.yaml', autodownload=True) - Usage2: from utils.dataloaders import *; HUBDatasetStats('path/to/coco128_with_yaml.zip') - Arguments - path: Path to data.yaml or data.zip (with data.yaml inside data.zip) - autodownload: Attempt to download dataset if not found locally - """ - - def __init__(self, path='coco128.yaml', autodownload=False): - # Initialize class - zipped, data_dir, yaml_path = self._unzip(Path(path)) - try: - with open(check_yaml(yaml_path), errors='ignore') as f: - data = yaml.safe_load(f) # data dict - if zipped: - data['path'] = data_dir - except Exception as e: - raise Exception("error/HUB/dataset_stats/yaml_load") from e - - check_dataset(data, autodownload) # download dataset if missing - self.hub_dir = Path(data['path'] + '-hub') - self.im_dir = self.hub_dir / 'images' - self.im_dir.mkdir(parents=True, exist_ok=True) # makes /images - self.stats = {'nc': data['nc'], 'names': list(data['names'].values())} # statistics dictionary - self.data = data - - @staticmethod - def _find_yaml(dir): - # Return data.yaml file - files = list(dir.glob('*.yaml')) or list(dir.rglob('*.yaml')) # try root level first and then recursive - assert files, f'No *.yaml file found in {dir}' - if len(files) > 1: - files = [f for f in files if f.stem == dir.stem] # prefer *.yaml files that match dir name - assert files, f'Multiple *.yaml files found in {dir}, only 1 *.yaml file allowed' - assert len(files) == 1, f'Multiple *.yaml files found: {files}, only 1 *.yaml file allowed in {dir}' - return files[0] - - def _unzip(self, path): - # Unzip data.zip - if not str(path).endswith('.zip'): # path is data.yaml - return False, None, path - assert Path(path).is_file(), f'Error unzipping {path}, file not found' - ZipFile(path).extractall(path=path.parent) # unzip - dir = path.with_suffix('') # dataset directory == zip name - assert dir.is_dir(), f'Error unzipping {path}, {dir} not found. path/to/abc.zip MUST unzip to path/to/abc/' - return True, str(dir), self._find_yaml(dir) # zipped, data_dir, yaml_path - - def _hub_ops(self, f, max_dim=1920): - # HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing - f_new = self.im_dir / Path(f).name # dataset-hub image filename - try: # use PIL - im = Image.open(f) - r = max_dim / max(im.height, im.width) # ratio - if r < 1.0: # image too large - im = im.resize((int(im.width * r), int(im.height * r))) - im.save(f_new, 'JPEG', quality=50, optimize=True) # save - except Exception as e: # use OpenCV - print(f'WARNING: HUB ops PIL failure {f}: {e}') - im = cv2.imread(f) - im_height, im_width = im.shape[:2] - r = max_dim / max(im_height, im_width) # ratio - if r < 1.0: # image too large - im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_AREA) - cv2.imwrite(str(f_new), im) - - def get_json(self, save=False, verbose=False): - # Return dataset JSON for Ultralytics HUB - def _round(labels): - # Update labels to integer class and 6 decimal place floats - return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels] - - for split in 'train', 'val', 'test': - if self.data.get(split) is None: - self.stats[split] = None # i.e. no test set - continue - dataset = LoadImagesAndLabels(self.data[split]) # load dataset - x = np.array([ - np.bincount(label[:, 0].astype(int), minlength=self.data['nc']) - for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics')]) # shape(128x80) - self.stats[split] = { - 'instance_stats': { - 'total': int(x.sum()), - 'per_class': x.sum(0).tolist()}, - 'image_stats': { - 'total': dataset.n, - 'unlabelled': int(np.all(x == 0, 1).sum()), - 'per_class': (x > 0).sum(0).tolist()}, - 'labels': [{ - str(Path(k).name): _round(v.tolist())} for k, v in zip(dataset.im_files, dataset.labels)]} - - # Save, print and return - if save: - stats_path = self.hub_dir / 'stats.json' - print(f'Saving {stats_path.resolve()}...') - with open(stats_path, 'w') as f: - json.dump(self.stats, f) # save stats.json - if verbose: - print(json.dumps(self.stats, indent=2, sort_keys=False)) - return self.stats - - def process_images(self): - # Compress images for Ultralytics HUB - for split in 'train', 'val', 'test': - if self.data.get(split) is None: - continue - dataset = LoadImagesAndLabels(self.data[split]) # load dataset - desc = f'{split} images' - for _ in tqdm(ThreadPool(NUM_THREADS).imap(self._hub_ops, dataset.im_files), total=dataset.n, desc=desc): - pass - print(f'Done. All images saved to {self.im_dir}') - return self.im_dir - - -# Classification dataloaders ------------------------------------------------------------------------------------------- -class ClassificationDataset(torchvision.datasets.ImageFolder): - """ - YOLOv5 Classification Dataset. - Arguments - root: Dataset path - transform: torchvision transforms, used by default - album_transform: Albumentations transforms, used if installed - """ - - def __init__(self, root, augment, imgsz, cache=False): - super().__init__(root=root) - self.torch_transforms = classify_transforms(imgsz) - self.album_transforms = classify_albumentations(augment, imgsz) if augment else None - self.cache_ram = cache is True or cache == 'ram' - self.cache_disk = cache == 'disk' - self.samples = [list(x) + [Path(x[0]).with_suffix('.npy'), None] for x in self.samples] # file, index, npy, im - - def __getitem__(self, i): - f, j, fn, im = self.samples[i] # filename, index, filename.with_suffix('.npy'), image - if self.cache_ram and im is None: - im = self.samples[i][3] = cv2.imread(f) - elif self.cache_disk: - if not fn.exists(): # load npy - np.save(fn.as_posix(), cv2.imread(f)) - im = np.load(fn) - else: # read image - im = cv2.imread(f) # BGR - if self.album_transforms: - sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))["image"] - else: - sample = self.torch_transforms(im) - return sample, j - - -def create_classification_dataloader(path, - imgsz=224, - batch_size=16, - augment=True, - cache=False, - rank=-1, - workers=8, - shuffle=True): - # Returns Dataloader object to be used with YOLOv5 Classifier - with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP - dataset = ClassificationDataset(root=path, imgsz=imgsz, augment=augment, cache=cache) - batch_size = min(batch_size, len(dataset)) - nd = torch.cuda.device_count() - nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) - sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) - generator = torch.Generator() - generator.manual_seed(0) - return InfiniteDataLoader(dataset, - batch_size=batch_size, - shuffle=shuffle and sampler is None, - num_workers=nw, - sampler=sampler, - pin_memory=PIN_MEMORY, - worker_init_fn=seed_worker, - generator=generator) # or DataLoader(persistent_workers=True) diff --git a/src/yolov5_ros/src/yolov5/utils/docker/Dockerfile b/src/yolov5_ros/src/yolov5/utils/docker/Dockerfile deleted file mode 100644 index 4b9367c..0000000 --- a/src/yolov5_ros/src/yolov5/utils/docker/Dockerfile +++ /dev/null @@ -1,65 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 -# Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference - -# Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:22.07-py3 -RUN rm -rf /opt/pytorch # remove 1.2GB dir - -# Downloads to user config dir -ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ - -# Install linux packages -RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1-mesa-glx - -# Install pip packages -COPY requirements.txt . -RUN python -m pip install --upgrade pip wheel -RUN pip uninstall -y Pillow torchtext torch torchvision -RUN pip install --no-cache -r requirements.txt albumentations wandb gsutil notebook Pillow>=9.1.0 \ - 'opencv-python<4.6.0.66' \ - --extra-index-url https://download.pytorch.org/whl/cu113 - -# Create working directory -RUN mkdir -p /usr/src/app -WORKDIR /usr/src/app - -# Copy contents -# COPY . /usr/src/app (issues as not a .git directory) -RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app - -# Set environment variables -ENV OMP_NUM_THREADS=8 - - -# Usage Examples ------------------------------------------------------------------------------------------------------- - -# Build and Push -# t=ultralytics/yolov5:latest && sudo docker build -f utils/docker/Dockerfile -t $t . && sudo docker push $t - -# Pull and Run -# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t - -# Pull and Run with local directory access -# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t - -# Kill all -# sudo docker kill $(sudo docker ps -q) - -# Kill all image-based -# sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest) - -# DockerHub tag update -# t=ultralytics/yolov5:latest tnew=ultralytics/yolov5:v6.2 && sudo docker pull $t && sudo docker tag $t $tnew && sudo docker push $tnew - -# Clean up -# docker system prune -a --volumes - -# Update Ubuntu drivers -# https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/ - -# DDP test -# python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3 - -# GCP VM from Image -# docker.io/ultralytics/yolov5:latest diff --git a/src/yolov5_ros/src/yolov5/utils/docker/Dockerfile-arm64 b/src/yolov5_ros/src/yolov5/utils/docker/Dockerfile-arm64 deleted file mode 100644 index 6e8ff77..0000000 --- a/src/yolov5_ros/src/yolov5/utils/docker/Dockerfile-arm64 +++ /dev/null @@ -1,41 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# Builds ultralytics/yolov5:latest-arm64 image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 -# Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi - -# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM arm64v8/ubuntu:20.04 - -# Downloads to user config dir -ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ - -# Install linux packages -RUN apt update -RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata -RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1-mesa-glx libglib2.0-0 libpython3-dev -# RUN alias python=python3 - -# Install pip packages -COPY requirements.txt . -RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -r requirements.txt gsutil notebook \ - tensorflow-aarch64 - # tensorflowjs \ - # onnx onnx-simplifier onnxruntime \ - # coremltools openvino-dev \ - -# Create working directory -RUN mkdir -p /usr/src/app -WORKDIR /usr/src/app - -# Copy contents -# COPY . /usr/src/app (issues as not a .git directory) -RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app - - -# Usage Examples ------------------------------------------------------------------------------------------------------- - -# Build and Push -# t=ultralytics/yolov5:latest-M1 && sudo docker build --platform linux/arm64 -f utils/docker/Dockerfile-arm64 -t $t . && sudo docker push $t - -# Pull and Run -# t=ultralytics/yolov5:latest-M1 && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t diff --git a/src/yolov5_ros/src/yolov5/utils/docker/Dockerfile-cpu b/src/yolov5_ros/src/yolov5/utils/docker/Dockerfile-cpu deleted file mode 100644 index d6fac64..0000000 --- a/src/yolov5_ros/src/yolov5/utils/docker/Dockerfile-cpu +++ /dev/null @@ -1,40 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# Builds ultralytics/yolov5:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 -# Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments - -# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM ubuntu:20.04 - -# Downloads to user config dir -ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ - -# Install linux packages -RUN apt update -RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata -RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev -# RUN alias python=python3 - -# Install pip packages -COPY requirements.txt . -RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ - coremltools onnx onnx-simplifier onnxruntime tensorflow-cpu tensorflowjs \ - # openvino-dev \ - --extra-index-url https://download.pytorch.org/whl/cpu - -# Create working directory -RUN mkdir -p /usr/src/app -WORKDIR /usr/src/app - -# Copy contents -# COPY . /usr/src/app (issues as not a .git directory) -RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app - - -# Usage Examples ------------------------------------------------------------------------------------------------------- - -# Build and Push -# t=ultralytics/yolov5:latest-cpu && sudo docker build -f utils/docker/Dockerfile-cpu -t $t . && sudo docker push $t - -# Pull and Run -# t=ultralytics/yolov5:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t diff --git a/src/yolov5_ros/src/yolov5/utils/downloads.py b/src/yolov5_ros/src/yolov5/utils/downloads.py deleted file mode 100644 index dd2698f..0000000 --- a/src/yolov5_ros/src/yolov5/utils/downloads.py +++ /dev/null @@ -1,192 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Download utils -""" - -import logging -import os -import platform -import subprocess -import time -import urllib -from pathlib import Path -from zipfile import ZipFile - -import requests -import torch - - -def is_url(url, check_online=True): - # Check if online file exists - try: - url = str(url) - result = urllib.parse.urlparse(url) - assert all([result.scheme, result.netloc, result.path]) # check if is url - return (urllib.request.urlopen(url).getcode() == 200) if check_online else True # check if exists online - except (AssertionError, urllib.request.HTTPError): - return False - - -def gsutil_getsize(url=''): - # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du - s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8') - return eval(s.split(' ')[0]) if len(s) else 0 # bytes - - -def url_getsize(url='https://ultralytics.com/images/bus.jpg'): - # Return downloadable file size in bytes - response = requests.head(url, allow_redirects=True) - return int(response.headers.get('content-length', -1)) - - -def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): - # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes - from utils.general import LOGGER - - file = Path(file) - assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}" - try: # url1 - LOGGER.info(f'Downloading {url} to {file}...') - torch.hub.download_url_to_file(url, str(file), progress=LOGGER.level <= logging.INFO) - assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check - except Exception as e: # url2 - if file.exists(): - file.unlink() # remove partial downloads - LOGGER.info(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') - os.system(f"curl -# -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail - finally: - if not file.exists() or file.stat().st_size < min_bytes: # check - if file.exists(): - file.unlink() # remove partial downloads - LOGGER.info(f"ERROR: {assert_msg}\n{error_msg}") - LOGGER.info('') - - -def attempt_download(file, repo='ultralytics/yolov5', release='v6.2'): - # Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v6.2', etc. - from utils.general import LOGGER - - def github_assets(repository, version='latest'): - # Return GitHub repo tag (i.e. 'v6.2') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...]) - if version != 'latest': - version = f'tags/{version}' # i.e. tags/v6.2 - response = requests.get(f'https://api.github.com/repos/{repository}/releases/{version}').json() # github api - return response['tag_name'], [x['name'] for x in response['assets']] # tag, assets - - file = Path(str(file).strip().replace("'", '')) - if not file.exists(): - # URL specified - name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc. - if str(file).startswith(('http:/', 'https:/')): # download - url = str(file).replace(':/', '://') # Pathlib turns :// -> :/ - file = name.split('?')[0] # parse authentication https://url.com/file.txt?auth... - if Path(file).is_file(): - LOGGER.info(f'Found {url} locally at {file}') # file already exists - else: - safe_download(file=file, url=url, min_bytes=1E5) - return file - - # GitHub assets - assets = [ - 'yolov5n.pt', 'yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt', 'yolov5n6.pt', 'yolov5s6.pt', - 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt'] - try: - tag, assets = github_assets(repo, release) - except Exception: - try: - tag, assets = github_assets(repo) # latest release - except Exception: - try: - tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1] - except Exception: - tag = release - - file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) - if name in assets: - url3 = 'https://drive.google.com/drive/folders/1EFQTEUeXWSFww0luse2jB9M1QNZQGwNl' # backup gdrive mirror - safe_download( - file, - url=f'https://github.com/{repo}/releases/download/{tag}/{name}', - url2=f'https://storage.googleapis.com/{repo}/{tag}/{name}', # backup url (optional) - min_bytes=1E5, - error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag} or {url3}') - - return str(file) - - -def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): - # Downloads a file from Google Drive. from yolov5.utils.downloads import *; gdrive_download() - t = time.time() - file = Path(file) - cookie = Path('cookie') # gdrive cookie - print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='') - if file.exists(): - file.unlink() # remove existing file - if cookie.exists(): - cookie.unlink() # remove existing cookie - - # Attempt file download - out = "NUL" if platform.system() == "Windows" else "/dev/null" - os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}') - if os.path.exists('cookie'): # large file - s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}' - else: # small file - s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"' - r = os.system(s) # execute, capture return - if cookie.exists(): - cookie.unlink() # remove existing cookie - - # Error check - if r != 0: - if file.exists(): - file.unlink() # remove partial - print('Download error ') # raise Exception('Download error') - return r - - # Unzip if archive - if file.suffix == '.zip': - print('unzipping... ', end='') - ZipFile(file).extractall(path=file.parent) # unzip - file.unlink() # remove zip - - print(f'Done ({time.time() - t:.1f}s)') - return r - - -def get_token(cookie="./cookie"): - with open(cookie) as f: - for line in f: - if "download" in line: - return line.split()[-1] - return "" - - -# Google utils: https://cloud.google.com/storage/docs/reference/libraries ---------------------------------------------- -# -# -# def upload_blob(bucket_name, source_file_name, destination_blob_name): -# # Uploads a file to a bucket -# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python -# -# storage_client = storage.Client() -# bucket = storage_client.get_bucket(bucket_name) -# blob = bucket.blob(destination_blob_name) -# -# blob.upload_from_filename(source_file_name) -# -# print('File {} uploaded to {}.'.format( -# source_file_name, -# destination_blob_name)) -# -# -# def download_blob(bucket_name, source_blob_name, destination_file_name): -# # Uploads a blob from a bucket -# storage_client = storage.Client() -# bucket = storage_client.get_bucket(bucket_name) -# blob = bucket.blob(source_blob_name) -# -# blob.download_to_filename(destination_file_name) -# -# print('Blob {} downloaded to {}.'.format( -# source_blob_name, -# destination_file_name)) diff --git a/src/yolov5_ros/src/yolov5/utils/flask_rest_api/README.md b/src/yolov5_ros/src/yolov5/utils/flask_rest_api/README.md deleted file mode 100644 index a726acb..0000000 --- a/src/yolov5_ros/src/yolov5/utils/flask_rest_api/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# Flask REST API - -[REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are -commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API -created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/). - -## Requirements - -[Flask](https://palletsprojects.com/p/flask/) is required. Install with: - -```shell -$ pip install Flask -``` - -## Run - -After Flask installation run: - -```shell -$ python3 restapi.py --port 5000 -``` - -Then use [curl](https://curl.se/) to perform a request: - -```shell -$ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s' -``` - -The model inference results are returned as a JSON response: - -```json -[ - { - "class": 0, - "confidence": 0.8900438547, - "height": 0.9318675399, - "name": "person", - "width": 0.3264600933, - "xcenter": 0.7438579798, - "ycenter": 0.5207948685 - }, - { - "class": 0, - "confidence": 0.8440024257, - "height": 0.7155083418, - "name": "person", - "width": 0.6546785235, - "xcenter": 0.427829951, - "ycenter": 0.6334488392 - }, - { - "class": 27, - "confidence": 0.3771208823, - "height": 0.3902671337, - "name": "tie", - "width": 0.0696444362, - "xcenter": 0.3675483763, - "ycenter": 0.7991207838 - }, - { - "class": 27, - "confidence": 0.3527112305, - "height": 0.1540903747, - "name": "tie", - "width": 0.0336618312, - "xcenter": 0.7814827561, - "ycenter": 0.5065554976 - } -] -``` - -An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given -in `example_request.py` diff --git a/src/yolov5_ros/src/yolov5/utils/flask_rest_api/example_request.py b/src/yolov5_ros/src/yolov5/utils/flask_rest_api/example_request.py deleted file mode 100644 index 773ad89..0000000 --- a/src/yolov5_ros/src/yolov5/utils/flask_rest_api/example_request.py +++ /dev/null @@ -1,19 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Perform test request -""" - -import pprint - -import requests - -DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s" -IMAGE = "zidane.jpg" - -# Read image -with open(IMAGE, "rb") as f: - image_data = f.read() - -response = requests.post(DETECTION_URL, files={"image": image_data}).json() - -pprint.pprint(response) diff --git a/src/yolov5_ros/src/yolov5/utils/flask_rest_api/restapi.py b/src/yolov5_ros/src/yolov5/utils/flask_rest_api/restapi.py deleted file mode 100644 index 8482435..0000000 --- a/src/yolov5_ros/src/yolov5/utils/flask_rest_api/restapi.py +++ /dev/null @@ -1,48 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Run a Flask REST API exposing one or more YOLOv5s models -""" - -import argparse -import io - -import torch -from flask import Flask, request -from PIL import Image - -app = Flask(__name__) -models = {} - -DETECTION_URL = "/v1/object-detection/" - - -@app.route(DETECTION_URL, methods=["POST"]) -def predict(model): - if request.method != "POST": - return - - if request.files.get("image"): - # Method 1 - # with request.files["image"] as f: - # im = Image.open(io.BytesIO(f.read())) - - # Method 2 - im_file = request.files["image"] - im_bytes = im_file.read() - im = Image.open(io.BytesIO(im_bytes)) - - if model in models: - results = models[model](im, size=640) # reduce size=320 for faster inference - return results.pandas().xyxy[0].to_json(orient="records") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model") - parser.add_argument("--port", default=5000, type=int, help="port number") - parser.add_argument('--model', nargs='+', default=['yolov5s'], help='model(s) to run, i.e. --model yolov5n yolov5s') - opt = parser.parse_args() - - for m in opt.model: - models[m] = torch.hub.load("ultralytics/yolov5", m, force_reload=True, skip_validation=True) - - app.run(host="0.0.0.0", port=opt.port) # debug=True causes Restarting with stat diff --git a/src/yolov5_ros/src/yolov5/utils/general.py b/src/yolov5_ros/src/yolov5/utils/general.py deleted file mode 100755 index f5fb2c9..0000000 --- a/src/yolov5_ros/src/yolov5/utils/general.py +++ /dev/null @@ -1,1046 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -General utils -""" - -import contextlib -import glob -import inspect -import logging -import math -import os -import platform -import random -import re -import shutil -import signal -import sys -import time -import urllib -from copy import deepcopy -from datetime import datetime -from itertools import repeat -from multiprocessing.pool import ThreadPool -from pathlib import Path -from subprocess import check_output -from typing import Optional -from zipfile import ZipFile - -import cv2 -import numpy as np -import pandas as pd -import pkg_resources as pkg -import torch -import torchvision -import yaml - -from utils import TryExcept -from utils.downloads import gsutil_getsize -from utils.metrics import box_iou, fitness - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[1] # YOLOv5 root directory -RANK = int(os.getenv('RANK', -1)) - -# Settings -DATASETS_DIR = ROOT.parent / 'datasets' # YOLOv5 datasets directory -NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads -AUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode -VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode -FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf - -torch.set_printoptions(linewidth=320, precision=5, profile='long') -np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 -pd.options.display.max_columns = 10 -cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) -os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads -os.environ['OMP_NUM_THREADS'] = '1' if platform.system() == 'darwin' else str(NUM_THREADS) # OpenMP (PyTorch and SciPy) - - -def is_ascii(s=''): - # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7) - s = str(s) # convert list, tuple, None, etc. to str - return len(s.encode().decode('ascii', 'ignore')) == len(s) - - -def is_chinese(s='人工智能'): - # Is string composed of any Chinese characters? - return bool(re.search('[\u4e00-\u9fff]', str(s))) - - -def is_colab(): - # Is environment a Google Colab instance? - return 'COLAB_GPU' in os.environ - - -def is_kaggle(): - # Is environment a Kaggle Notebook? - return os.environ.get('PWD') == '/kaggle/working' and os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com' - - -def is_docker() -> bool: - """Check if the process runs inside a docker container.""" - if Path("/.dockerenv").exists(): - return True - try: # check if docker is in control groups - with open("/proc/self/cgroup") as file: - return any("docker" in line for line in file) - except OSError: - return False - - -def is_writeable(dir, test=False): - # Return True if directory has write permissions, test opening a file with write permissions if test=True - if not test: - return os.access(dir, os.W_OK) # possible issues on Windows - file = Path(dir) / 'tmp.txt' - try: - with open(file, 'w'): # open file with write permissions - pass - file.unlink() # remove file - return True - except OSError: - return False - - -def set_logging(name=None, verbose=VERBOSE): - # Sets level and returns logger - if is_kaggle() or is_colab(): - for h in logging.root.handlers: - logging.root.removeHandler(h) # remove all handlers associated with the root logger object - rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings - level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR - log = logging.getLogger(name) - log.setLevel(level) - handler = logging.StreamHandler() - handler.setFormatter(logging.Formatter("%(message)s")) - handler.setLevel(level) - log.addHandler(handler) - - -set_logging() # run before defining LOGGER -LOGGER = logging.getLogger("yolov5") # define globally (used in train.py, val.py, detect.py, etc.) -if platform.system() == 'Windows': - for fn in LOGGER.info, LOGGER.warning: - setattr(LOGGER, fn.__name__, lambda x: fn(emojis(x))) # emoji safe logging - - -def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'): - # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required. - env = os.getenv(env_var) - if env: - path = Path(env) # use environment variable - else: - cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs - path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir - path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable - path.mkdir(exist_ok=True) # make if required - return path - - -CONFIG_DIR = user_config_dir() # Ultralytics settings dir - - -class Profile(contextlib.ContextDecorator): - # YOLOv5 Profile class. Usage: @Profile() decorator or 'with Profile():' context manager - def __init__(self, t=0.0): - self.t = t - self.cuda = torch.cuda.is_available() - - def __enter__(self): - self.start = self.time() - return self - - def __exit__(self, type, value, traceback): - self.dt = self.time() - self.start # delta-time - self.t += self.dt # accumulate dt - - def time(self): - if self.cuda: - torch.cuda.synchronize() - return time.time() - - -class Timeout(contextlib.ContextDecorator): - # YOLOv5 Timeout class. Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager - def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True): - self.seconds = int(seconds) - self.timeout_message = timeout_msg - self.suppress = bool(suppress_timeout_errors) - - def _timeout_handler(self, signum, frame): - raise TimeoutError(self.timeout_message) - - def __enter__(self): - if platform.system() != 'Windows': # not supported on Windows - signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM - signal.alarm(self.seconds) # start countdown for SIGALRM to be raised - - def __exit__(self, exc_type, exc_val, exc_tb): - if platform.system() != 'Windows': - signal.alarm(0) # Cancel SIGALRM if it's scheduled - if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError - return True - - -class WorkingDirectory(contextlib.ContextDecorator): - # Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager - def __init__(self, new_dir): - self.dir = new_dir # new dir - self.cwd = Path.cwd().resolve() # current dir - - def __enter__(self): - os.chdir(self.dir) - - def __exit__(self, exc_type, exc_val, exc_tb): - os.chdir(self.cwd) - - -def methods(instance): - # Get class/instance methods - return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] - - -def print_args(args: Optional[dict] = None, show_file=True, show_func=False): - # Print function arguments (optional args dict) - x = inspect.currentframe().f_back # previous frame - file, _, func, _, _ = inspect.getframeinfo(x) - if args is None: # get args automatically - args, _, _, frm = inspect.getargvalues(x) - args = {k: v for k, v in frm.items() if k in args} - try: - file = Path(file).resolve().relative_to(ROOT).with_suffix('') - except ValueError: - file = Path(file).stem - s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '') - LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items())) - - -def init_seeds(seed=0, deterministic=False): - # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed(seed) - torch.cuda.manual_seed_all(seed) # for Multi-GPU, exception safe - torch.backends.cudnn.benchmark = True # for faster training - if deterministic and check_version(torch.__version__, '1.12.0'): # https://github.com/ultralytics/yolov5/pull/8213 - torch.use_deterministic_algorithms(True) - torch.backends.cudnn.deterministic = True - os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' - os.environ['PYTHONHASHSEED'] = str(seed) - - -def intersect_dicts(da, db, exclude=()): - # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values - return {k: v for k, v in da.items() if k in db and all(x not in k for x in exclude) and v.shape == db[k].shape} - - -def get_default_args(func): - # Get func() default arguments - signature = inspect.signature(func) - return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty} - - -def get_latest_run(search_dir='.'): - # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) - last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) - return max(last_list, key=os.path.getctime) if last_list else '' - - -def emojis(str=''): - # Return platform-dependent emoji-safe version of string - return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str - - -def file_age(path=__file__): - # Return days since last file update - dt = (datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime)) # delta - return dt.days # + dt.seconds / 86400 # fractional days - - -def file_date(path=__file__): - # Return human-readable file modification date, i.e. '2021-3-26' - t = datetime.fromtimestamp(Path(path).stat().st_mtime) - return f'{t.year}-{t.month}-{t.day}' - - -def file_size(path): - # Return file/dir size (MB) - mb = 1 << 20 # bytes to MiB (1024 ** 2) - path = Path(path) - if path.is_file(): - return path.stat().st_size / mb - elif path.is_dir(): - return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb - else: - return 0.0 - - -def check_online(): - # Check internet connectivity - import socket - try: - socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility - return True - except OSError: - return False - - -def git_describe(path=ROOT): # path must be a directory - # Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe - try: - assert (Path(path) / '.git').is_dir() - return check_output(f'git -C {path} describe --tags --long --always', shell=True).decode()[:-1] - except Exception: - return '' - - -@TryExcept() -@WorkingDirectory(ROOT) -def check_git_status(repo='ultralytics/yolov5', branch='master'): - # YOLOv5 status check, recommend 'git pull' if code is out of date - url = f'https://github.com/{repo}' - msg = f', for updates see {url}' - s = colorstr('github: ') # string - assert Path('.git').exists(), s + 'skipping check (not a git repository)' + msg - assert check_online(), s + 'skipping check (offline)' + msg - - splits = re.split(pattern=r'\s', string=check_output('git remote -v', shell=True).decode()) - matches = [repo in s for s in splits] - if any(matches): - remote = splits[matches.index(True) - 1] - else: - remote = 'ultralytics' - check_output(f'git remote add {remote} {url}', shell=True) - check_output(f'git fetch {remote}', shell=True, timeout=5) # git fetch - local_branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out - n = int(check_output(f'git rev-list {local_branch}..{remote}/{branch} --count', shell=True)) # commits behind - if n > 0: - pull = 'git pull' if remote == 'origin' else f'git pull {remote} {branch}' - s += f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `{pull}` or `git clone {url}` to update." - else: - s += f'up to date with {url} ✅' - LOGGER.info(s) - - -def check_python(minimum='3.7.0'): - # Check current python version vs. required python version - check_version(platform.python_version(), minimum, name='Python ', hard=True) - - -def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False): - # Check version vs. required version - current, minimum = (pkg.parse_version(x) for x in (current, minimum)) - result = (current == minimum) if pinned else (current >= minimum) # bool - s = f'WARNING: ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed' # string - if hard: - assert result, emojis(s) # assert min requirements met - if verbose and not result: - LOGGER.warning(s) - return result - - -@TryExcept() -def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=''): - # Check installed dependencies meet YOLOv5 requirements (pass *.txt file or list of packages or single package str) - prefix = colorstr('red', 'bold', 'requirements:') - check_python() # check python version - if isinstance(requirements, Path): # requirements.txt file - file = requirements.resolve() - assert file.exists(), f"{prefix} {file} not found, check failed." - with file.open() as f: - requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude] - elif isinstance(requirements, str): - requirements = [requirements] - - s = '' - n = 0 - for r in requirements: - try: - pkg.require(r) - except (pkg.VersionConflict, pkg.DistributionNotFound): # exception if requirements not met - s += f'"{r}" ' - n += 1 - - if s and install and AUTOINSTALL: # check environment variable - LOGGER.info(f"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") - try: - assert check_online(), "AutoUpdate skipped (offline)" - LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode()) - source = file if 'file' in locals() else requirements - s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ - f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" - LOGGER.info(s) - except Exception as e: - LOGGER.warning(f'{prefix} {e}') - - -def check_img_size(imgsz, s=32, floor=0): - # Verify image size is a multiple of stride s in each dimension - if isinstance(imgsz, int): # integer i.e. img_size=640 - new_size = max(make_divisible(imgsz, int(s)), floor) - else: # list i.e. img_size=[640, 480] - imgsz = list(imgsz) # convert to list if tuple - new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz] - if new_size != imgsz: - LOGGER.warning(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') - return new_size - - -def check_imshow(): - # Check if environment supports image displays - try: - assert not is_docker(), 'cv2.imshow() is disabled in Docker environments' - assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments' - cv2.imshow('test', np.zeros((1, 1, 3))) - cv2.waitKey(1) - cv2.destroyAllWindows() - cv2.waitKey(1) - return True - except Exception as e: - LOGGER.warning(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') - return False - - -def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''): - # Check file(s) for acceptable suffix - if file and suffix: - if isinstance(suffix, str): - suffix = [suffix] - for f in file if isinstance(file, (list, tuple)) else [file]: - s = Path(f).suffix.lower() # file suffix - if len(s): - assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}" - - -def check_yaml(file, suffix=('.yaml', '.yml')): - # Search/download YAML file (if necessary) and return path, checking suffix - return check_file(file, suffix) - - -def check_file(file, suffix=''): - # Search/download file (if necessary) and return path - check_suffix(file, suffix) # optional - file = str(file) # convert to str() - if Path(file).is_file() or not file: # exists - return file - elif file.startswith(('http:/', 'https:/')): # download - url = file # warning: Pathlib turns :// -> :/ - file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth - if Path(file).is_file(): - LOGGER.info(f'Found {url} locally at {file}') # file already exists - else: - LOGGER.info(f'Downloading {url} to {file}...') - torch.hub.download_url_to_file(url, file) - assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check - return file - elif file.startswith('clearml://'): # ClearML Dataset ID - assert 'clearml' in sys.modules, "ClearML is not installed, so cannot use ClearML dataset. Try running 'pip install clearml'." - return file - else: # search - files = [] - for d in 'data', 'models', 'utils': # search directories - files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file - assert len(files), f'File not found: {file}' # assert file was found - assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique - return files[0] # return file - - -def check_font(font=FONT, progress=False): - # Download font to CONFIG_DIR if necessary - font = Path(font) - file = CONFIG_DIR / font.name - if not font.exists() and not file.exists(): - url = f'https://ultralytics.com/assets/{font.name}' - LOGGER.info(f'Downloading {url} to {file}...') - torch.hub.download_url_to_file(url, str(file), progress=progress) - - -def check_dataset(data, autodownload=True): - # Download, check and/or unzip dataset if not found locally - - # Download (optional) - extract_dir = '' - if isinstance(data, (str, Path)) and str(data).endswith('.zip'): # i.e. gs://bucket/dir/coco128.zip - download(data, dir=f'{DATASETS_DIR}/{Path(data).stem}', unzip=True, delete=False, curl=False, threads=1) - data = next((DATASETS_DIR / Path(data).stem).rglob('*.yaml')) - extract_dir, autodownload = data.parent, False - - # Read yaml (optional) - if isinstance(data, (str, Path)): - with open(data, errors='ignore') as f: - data = yaml.safe_load(f) # dictionary - - # Checks - for k in 'train', 'val', 'names': - assert k in data, f"data.yaml '{k}:' field missing ❌" - if isinstance(data['names'], (list, tuple)): # old array format - data['names'] = dict(enumerate(data['names'])) # convert to dict - data['nc'] = len(data['names']) - - # Resolve paths - path = Path(extract_dir or data.get('path') or '') # optional 'path' default to '.' - if not path.is_absolute(): - path = (ROOT / path).resolve() - for k in 'train', 'val', 'test': - if data.get(k): # prepend path - data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]] - - # Parse yaml - train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download')) - if val: - val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path - if not all(x.exists() for x in val): - LOGGER.info('\nDataset not found ⚠️, missing paths %s' % [str(x) for x in val if not x.exists()]) - if not s or not autodownload: - raise Exception('Dataset not found ❌') - t = time.time() - root = path.parent if 'path' in data else '..' # unzip directory i.e. '../' - if s.startswith('http') and s.endswith('.zip'): # URL - f = Path(s).name # filename - LOGGER.info(f'Downloading {s} to {f}...') - torch.hub.download_url_to_file(s, f) - Path(root).mkdir(parents=True, exist_ok=True) # create root - ZipFile(f).extractall(path=root) # unzip - Path(f).unlink() # remove zip - r = None # success - elif s.startswith('bash '): # bash script - LOGGER.info(f'Running {s} ...') - r = os.system(s) - else: # python script - r = exec(s, {'yaml': data}) # return None - dt = f'({round(time.time() - t, 1)}s)' - s = f"success ✅ {dt}, saved to {colorstr('bold', root)}" if r in (0, None) else f"failure {dt} ❌" - LOGGER.info(f"Dataset download {s}") - check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf', progress=True) # download fonts - return data # dictionary - - -def check_amp(model): - # Check PyTorch Automatic Mixed Precision (AMP) functionality. Return True on correct operation - from models.common import AutoShape, DetectMultiBackend - - def amp_allclose(model, im): - # All close FP32 vs AMP results - m = AutoShape(model, verbose=False) # model - a = m(im).xywhn[0] # FP32 inference - m.amp = True - b = m(im).xywhn[0] # AMP inference - return a.shape == b.shape and torch.allclose(a, b, atol=0.1) # close to 10% absolute tolerance - - prefix = colorstr('AMP: ') - device = next(model.parameters()).device # get model device - if device.type in ('cpu', 'mps'): - return False # AMP only used on CUDA devices - f = ROOT / 'data' / 'images' / 'bus.jpg' # image to check - im = f if f.exists() else 'https://ultralytics.com/images/bus.jpg' if check_online() else np.ones((640, 640, 3)) - try: - assert amp_allclose(deepcopy(model), im) or amp_allclose(DetectMultiBackend('yolov5n.pt', device), im) - LOGGER.info(f'{prefix}checks passed ✅') - return True - except Exception: - help_url = 'https://github.com/ultralytics/yolov5/issues/7908' - LOGGER.warning(f'{prefix}checks failed ❌, disabling Automatic Mixed Precision. See {help_url}') - return False - - -def yaml_load(file='data.yaml'): - # Single-line safe yaml loading - with open(file, errors='ignore') as f: - return yaml.safe_load(f) - - -def yaml_save(file='data.yaml', data={}): - # Single-line safe yaml saving - with open(file, 'w') as f: - yaml.safe_dump({k: str(v) if isinstance(v, Path) else v for k, v in data.items()}, f, sort_keys=False) - - -def url2file(url): - # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt - url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ - return Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth - - -def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3): - # Multithreaded file download and unzip function, used in data.yaml for autodownload - def download_one(url, dir): - # Download 1 file - success = True - f = dir / Path(url).name # filename - if Path(url).is_file(): # exists in current path - Path(url).rename(f) # move to dir - elif not f.exists(): - LOGGER.info(f'Downloading {url} to {f}...') - for i in range(retry + 1): - if curl: - s = 'sS' if threads > 1 else '' # silent - r = os.system( - f'curl -# -{s}L "{url}" -o "{f}" --retry 9 -C -') # curl download with retry, continue - success = r == 0 - else: - torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download - success = f.is_file() - if success: - break - elif i < retry: - LOGGER.warning(f'Download failure, retrying {i + 1}/{retry} {url}...') - else: - LOGGER.warning(f'Failed to download {url}...') - - if unzip and success and f.suffix in ('.zip', '.tar', '.gz'): - LOGGER.info(f'Unzipping {f}...') - if f.suffix == '.zip': - ZipFile(f).extractall(path=dir) # unzip - elif f.suffix == '.tar': - os.system(f'tar xf {f} --directory {f.parent}') # unzip - elif f.suffix == '.gz': - os.system(f'tar xfz {f} --directory {f.parent}') # unzip - if delete: - f.unlink() # remove zip - - dir = Path(dir) - dir.mkdir(parents=True, exist_ok=True) # make directory - if threads > 1: - pool = ThreadPool(threads) - pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multithreaded - pool.close() - pool.join() - else: - for u in [url] if isinstance(url, (str, Path)) else url: - download_one(u, dir) - - -def make_divisible(x, divisor): - # Returns nearest x divisible by divisor - if isinstance(divisor, torch.Tensor): - divisor = int(divisor.max()) # to int - return math.ceil(x / divisor) * divisor - - -def clean_str(s): - # Cleans a string by replacing special characters with underscore _ - return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s) - - -def one_cycle(y1=0.0, y2=1.0, steps=100): - # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf - return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 - - -def colorstr(*input): - # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') - *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string - colors = { - 'black': '\033[30m', # basic colors - 'red': '\033[31m', - 'green': '\033[32m', - 'yellow': '\033[33m', - 'blue': '\033[34m', - 'magenta': '\033[35m', - 'cyan': '\033[36m', - 'white': '\033[37m', - 'bright_black': '\033[90m', # bright colors - 'bright_red': '\033[91m', - 'bright_green': '\033[92m', - 'bright_yellow': '\033[93m', - 'bright_blue': '\033[94m', - 'bright_magenta': '\033[95m', - 'bright_cyan': '\033[96m', - 'bright_white': '\033[97m', - 'end': '\033[0m', # misc - 'bold': '\033[1m', - 'underline': '\033[4m'} - return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] - - -def labels_to_class_weights(labels, nc=80): - # Get class weights (inverse frequency) from training labels - if labels[0] is None: # no labels loaded - return torch.Tensor() - - labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO - classes = labels[:, 0].astype(int) # labels = [class xywh] - weights = np.bincount(classes, minlength=nc) # occurrences per class - - # Prepend gridpoint count (for uCE training) - # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image - # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start - - weights[weights == 0] = 1 # replace empty bins with 1 - weights = 1 / weights # number of targets per class - weights /= weights.sum() # normalize - return torch.from_numpy(weights).float() - - -def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): - # Produces image weights based on class_weights and image contents - # Usage: index = random.choices(range(n), weights=image_weights, k=1) # weighted image sample - class_counts = np.array([np.bincount(x[:, 0].astype(int), minlength=nc) for x in labels]) - return (class_weights.reshape(1, nc) * class_counts).sum(1) - - -def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) - # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/ - # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n') - # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n') - # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco - # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet - return [ - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, - 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] - - -def xyxy2xywh(x): - # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right - y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center - y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center - y[:, 2] = x[:, 2] - x[:, 0] # width - y[:, 3] = x[:, 3] - x[:, 1] # height - return y - - -def xywh2xyxy(x): - # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right - y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x - y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y - y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x - y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y - return y - - -def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): - # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right - y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x - y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y - y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x - y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y - return y - - -def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): - # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right - if clip: - clip_coords(x, (h - eps, w - eps)) # warning: inplace clip - y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center - y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center - y[:, 2] = (x[:, 2] - x[:, 0]) / w # width - y[:, 3] = (x[:, 3] - x[:, 1]) / h # height - return y - - -def xyn2xy(x, w=640, h=640, padw=0, padh=0): - # Convert normalized segments into pixel segments, shape (n,2) - y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = w * x[:, 0] + padw # top left x - y[:, 1] = h * x[:, 1] + padh # top left y - return y - - -def segment2box(segment, width=640, height=640): - # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) - x, y = segment.T # segment xy - inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) - x, y, = x[inside], y[inside] - return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy - - -def segments2boxes(segments): - # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh) - boxes = [] - for s in segments: - x, y = s.T # segment xy - boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy - return xyxy2xywh(np.array(boxes)) # cls, xywh - - -def resample_segments(segments, n=1000): - # Up-sample an (n,2) segment - for i, s in enumerate(segments): - s = np.concatenate((s, s[0:1, :]), axis=0) - x = np.linspace(0, len(s) - 1, n) - xp = np.arange(len(s)) - segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy - return segments - - -def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): - # Rescale coords (xyxy) from img1_shape to img0_shape - if ratio_pad is None: # calculate from img0_shape - gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new - pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding - else: - gain = ratio_pad[0][0] - pad = ratio_pad[1] - - coords[:, [0, 2]] -= pad[0] # x padding - coords[:, [1, 3]] -= pad[1] # y padding - coords[:, :4] /= gain - clip_coords(coords, img0_shape) - return coords - - -def clip_coords(boxes, shape): - # Clip bounding xyxy bounding boxes to image shape (height, width) - if isinstance(boxes, torch.Tensor): # faster individually - boxes[:, 0].clamp_(0, shape[1]) # x1 - boxes[:, 1].clamp_(0, shape[0]) # y1 - boxes[:, 2].clamp_(0, shape[1]) # x2 - boxes[:, 3].clamp_(0, shape[0]) # y2 - else: # np.array (faster grouped) - boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2 - boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 - - -def non_max_suppression(prediction, - conf_thres=0.25, - iou_thres=0.45, - classes=None, - agnostic=False, - multi_label=False, - labels=(), - max_det=300): - """Non-Maximum Suppression (NMS) on inference results to reject overlapping bounding boxes - - Returns: - list of detections, on (n,6) tensor per image [xyxy, conf, cls] - """ - - if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out) - prediction = prediction[0] # select only inference output - - bs = prediction.shape[0] # batch size - nc = prediction.shape[2] - 5 # number of classes - xc = prediction[..., 4] > conf_thres # candidates - - # Checks - assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' - assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' - - # Settings - # min_wh = 2 # (pixels) minimum box width and height - max_wh = 7680 # (pixels) maximum box width and height - max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() - time_limit = 0.3 + 0.03 * bs # seconds to quit after - redundant = True # require redundant detections - multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) - merge = False # use merge-NMS - - t = time.time() - output = [torch.zeros((0, 6), device=prediction.device)] * bs - for xi, x in enumerate(prediction): # image index, image inference - # Apply constraints - # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height - x = x[xc[xi]] # confidence - - # Cat apriori labels if autolabelling - if labels and len(labels[xi]): - lb = labels[xi] - v = torch.zeros((len(lb), nc + 5), device=x.device) - v[:, :4] = lb[:, 1:5] # box - v[:, 4] = 1.0 # conf - v[range(len(lb)), lb[:, 0].long() + 5] = 1.0 # cls - x = torch.cat((x, v), 0) - - # If none remain process next image - if not x.shape[0]: - continue - - # Compute conf - x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf - - # Box (center x, center y, width, height) to (x1, y1, x2, y2) - box = xywh2xyxy(x[:, :4]) - - # Detections matrix nx6 (xyxy, conf, cls) - if multi_label: - i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T - x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) - else: # best class only - conf, j = x[:, 5:].max(1, keepdim=True) - x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] - - # Filter by class - if classes is not None: - x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] - - # Apply finite constraint - # if not torch.isfinite(x).all(): - # x = x[torch.isfinite(x).all(1)] - - # Check shape - n = x.shape[0] # number of boxes - if not n: # no boxes - continue - elif n > max_nms: # excess boxes - x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence - - # Batched NMS - c = x[:, 5:6] * (0 if agnostic else max_wh) # classes - boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores - i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS - if i.shape[0] > max_det: # limit detections - i = i[:max_det] - if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) - # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) - iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix - weights = iou * scores[None] # box weights - x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes - if redundant: - i = i[iou.sum(1) > 1] # require redundancy - - output[xi] = x[i] - if (time.time() - t) > time_limit: - LOGGER.warning(f'WARNING: NMS time limit {time_limit:.3f}s exceeded') - break # time limit exceeded - - return output - - -def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer() - # Strip optimizer from 'f' to finalize training, optionally save as 's' - x = torch.load(f, map_location=torch.device('cpu')) - if x.get('ema'): - x['model'] = x['ema'] # replace model with ema - for k in 'optimizer', 'best_fitness', 'wandb_id', 'ema', 'updates': # keys - x[k] = None - x['epoch'] = -1 - x['model'].half() # to FP16 - for p in x['model'].parameters(): - p.requires_grad = False - torch.save(x, s or f) - mb = os.path.getsize(s or f) / 1E6 # filesize - LOGGER.info(f"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB") - - -def print_mutation(results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')): - evolve_csv = save_dir / 'evolve.csv' - evolve_yaml = save_dir / 'hyp_evolve.yaml' - keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'val/box_loss', - 'val/obj_loss', 'val/cls_loss') + tuple(hyp.keys()) # [results + hyps] - keys = tuple(x.strip() for x in keys) - vals = results + tuple(hyp.values()) - n = len(keys) - - # Download (optional) - if bucket: - url = f'gs://{bucket}/evolve.csv' - if gsutil_getsize(url) > (evolve_csv.stat().st_size if evolve_csv.exists() else 0): - os.system(f'gsutil cp {url} {save_dir}') # download evolve.csv if larger than local - - # Log to evolve.csv - s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header - with open(evolve_csv, 'a') as f: - f.write(s + ('%20.5g,' * n % vals).rstrip(',') + '\n') - - # Save yaml - with open(evolve_yaml, 'w') as f: - data = pd.read_csv(evolve_csv) - data = data.rename(columns=lambda x: x.strip()) # strip keys - i = np.argmax(fitness(data.values[:, :4])) # - generations = len(data) - f.write('# YOLOv5 Hyperparameter Evolution Results\n' + f'# Best generation: {i}\n' + - f'# Last generation: {generations - 1}\n' + '# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) + - '\n' + '# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\n\n') - yaml.safe_dump(data.loc[i][7:].to_dict(), f, sort_keys=False) - - # Print to screen - LOGGER.info(prefix + f'{generations} generations finished, current result:\n' + prefix + - ', '.join(f'{x.strip():>20s}' for x in keys) + '\n' + prefix + ', '.join(f'{x:20.5g}' - for x in vals) + '\n\n') - - if bucket: - os.system(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}') # upload - - -def apply_classifier(x, model, img, im0): - # Apply a second stage classifier to YOLO outputs - # Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval() - im0 = [im0] if isinstance(im0, np.ndarray) else im0 - for i, d in enumerate(x): # per image - if d is not None and len(d): - d = d.clone() - - # Reshape and pad cutouts - b = xyxy2xywh(d[:, :4]) # boxes - b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square - b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad - d[:, :4] = xywh2xyxy(b).long() - - # Rescale boxes from img_size to im0 size - scale_coords(img.shape[2:], d[:, :4], im0[i].shape) - - # Classes - pred_cls1 = d[:, 5].long() - ims = [] - for a in d: - cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] - im = cv2.resize(cutout, (224, 224)) # BGR - - im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 - im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32 - im /= 255 # 0 - 255 to 0.0 - 1.0 - ims.append(im) - - pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction - x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections - - return x - - -def increment_path(path, exist_ok=False, sep='', mkdir=False): - # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. - path = Path(path) # os-agnostic - if path.exists() and not exist_ok: - path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '') - - # Method 1 - for n in range(2, 9999): - p = f'{path}{sep}{n}{suffix}' # increment path - if not os.path.exists(p): # - break - path = Path(p) - - # Method 2 (deprecated) - # dirs = glob.glob(f"{path}{sep}*") # similar paths - # matches = [re.search(rf"{path.stem}{sep}(\d+)", d) for d in dirs] - # i = [int(m.groups()[0]) for m in matches if m] # indices - # n = max(i) + 1 if i else 2 # increment number - # path = Path(f"{path}{sep}{n}{suffix}") # increment path - - if mkdir: - path.mkdir(parents=True, exist_ok=True) # make directory - - return path - - -# OpenCV Chinese-friendly functions ------------------------------------------------------------------------------------ -imshow_ = cv2.imshow # copy to avoid recursion errors - - -def imread(path, flags=cv2.IMREAD_COLOR): - return cv2.imdecode(np.fromfile(path, np.uint8), flags) - - -def imwrite(path, im): - try: - cv2.imencode(Path(path).suffix, im)[1].tofile(path) - return True - except Exception: - return False - - -def imshow(path, im): - imshow_(path.encode('unicode_escape').decode(), im) - - -cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine - -# Variables ------------------------------------------------------------------------------------------------------------ -NCOLS = 0 if is_docker() else shutil.get_terminal_size().columns # terminal window size for tqdm diff --git a/src/yolov5_ros/src/yolov5/utils/google_app_engine/Dockerfile b/src/yolov5_ros/src/yolov5/utils/google_app_engine/Dockerfile deleted file mode 100644 index 0155618..0000000 --- a/src/yolov5_ros/src/yolov5/utils/google_app_engine/Dockerfile +++ /dev/null @@ -1,25 +0,0 @@ -FROM gcr.io/google-appengine/python - -# Create a virtualenv for dependencies. This isolates these packages from -# system-level packages. -# Use -p python3 or -p python3.7 to select python version. Default is version 2. -RUN virtualenv /env -p python3 - -# Setting these environment variables are the same as running -# source /env/bin/activate. -ENV VIRTUAL_ENV /env -ENV PATH /env/bin:$PATH - -RUN apt-get update && apt-get install -y python-opencv - -# Copy the application's requirements.txt and run pip to install all -# dependencies into the virtualenv. -ADD requirements.txt /app/requirements.txt -RUN pip install -r /app/requirements.txt - -# Add the application source code. -ADD . /app - -# Run a WSGI server to serve the application. gunicorn must be declared as -# a dependency in requirements.txt. -CMD gunicorn -b :$PORT main:app diff --git a/src/yolov5_ros/src/yolov5/utils/google_app_engine/additional_requirements.txt b/src/yolov5_ros/src/yolov5/utils/google_app_engine/additional_requirements.txt deleted file mode 100644 index 42d7ffc..0000000 --- a/src/yolov5_ros/src/yolov5/utils/google_app_engine/additional_requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -# add these requirements in your app on top of the existing ones -pip==21.1 -Flask==1.0.2 -gunicorn==19.9.0 diff --git a/src/yolov5_ros/src/yolov5/utils/google_app_engine/app.yaml b/src/yolov5_ros/src/yolov5/utils/google_app_engine/app.yaml deleted file mode 100644 index 5056b7c..0000000 --- a/src/yolov5_ros/src/yolov5/utils/google_app_engine/app.yaml +++ /dev/null @@ -1,14 +0,0 @@ -runtime: custom -env: flex - -service: yolov5app - -liveness_check: - initial_delay_sec: 600 - -manual_scaling: - instances: 1 -resources: - cpu: 1 - memory_gb: 4 - disk_size_gb: 20 diff --git a/src/yolov5_ros/src/yolov5/utils/loggers/__init__.py b/src/yolov5_ros/src/yolov5/utils/loggers/__init__.py deleted file mode 100644 index f29debb..0000000 --- a/src/yolov5_ros/src/yolov5/utils/loggers/__init__.py +++ /dev/null @@ -1,404 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Logging utils -""" - -import os -import warnings -from pathlib import Path - -import pkg_resources as pkg -import torch -from torch.utils.tensorboard import SummaryWriter - -from utils.general import colorstr, cv2 -from utils.loggers.clearml.clearml_utils import ClearmlLogger -from utils.loggers.wandb.wandb_utils import WandbLogger -from utils.plots import plot_images, plot_labels, plot_results -from utils.torch_utils import de_parallel - -LOGGERS = ('csv', 'tb', 'wandb', 'clearml', 'comet') # *.csv, TensorBoard, Weights & Biases, ClearML -RANK = int(os.getenv('RANK', -1)) - -try: - import wandb - - assert hasattr(wandb, '__version__') # verify package import not local dir - if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in {0, -1}: - try: - wandb_login_success = wandb.login(timeout=30) - except wandb.errors.UsageError: # known non-TTY terminal issue - wandb_login_success = False - if not wandb_login_success: - wandb = None -except (ImportError, AssertionError): - wandb = None - -try: - import clearml - - assert hasattr(clearml, '__version__') # verify package import not local dir -except (ImportError, AssertionError): - clearml = None - -try: - if RANK not in [0, -1]: - comet_ml = None - else: - import comet_ml - - assert hasattr(comet_ml, '__version__') # verify package import not local dir - from utils.loggers.comet import CometLogger - -except (ModuleNotFoundError, ImportError, AssertionError): - comet_ml = None - - -class Loggers(): - # YOLOv5 Loggers class - def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS): - self.save_dir = save_dir - self.weights = weights - self.opt = opt - self.hyp = hyp - self.plots = not opt.noplots # plot results - self.logger = logger # for printing results to console - self.include = include - self.keys = [ - 'train/box_loss', - 'train/obj_loss', - 'train/cls_loss', # train loss - 'metrics/precision', - 'metrics/recall', - 'metrics/mAP_0.5', - 'metrics/mAP_0.5:0.95', # metrics - 'val/box_loss', - 'val/obj_loss', - 'val/cls_loss', # val loss - 'x/lr0', - 'x/lr1', - 'x/lr2'] # params - self.best_keys = ['best/epoch', 'best/precision', 'best/recall', 'best/mAP_0.5', 'best/mAP_0.5:0.95'] - for k in LOGGERS: - setattr(self, k, None) # init empty logger dictionary - self.csv = True # always log to csv - - # Messages - if not wandb: - prefix = colorstr('Weights & Biases: ') - s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases" - self.logger.info(s) - if not clearml: - prefix = colorstr('ClearML: ') - s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML" - self.logger.info(s) - if not comet_ml: - prefix = colorstr('Comet: ') - s = f"{prefix}run 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet" - self.logger.info(s) - # TensorBoard - s = self.save_dir - if 'tb' in self.include and not self.opt.evolve: - prefix = colorstr('TensorBoard: ') - self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/") - self.tb = SummaryWriter(str(s)) - - # W&B - if wandb and 'wandb' in self.include: - wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://') - run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None - self.opt.hyp = self.hyp # add hyperparameters - self.wandb = WandbLogger(self.opt, run_id) - # temp warn. because nested artifacts not supported after 0.12.10 - if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.11'): - s = "YOLOv5 temporarily requires wandb version 0.12.10 or below. Some features may not work as expected." - self.logger.warning(s) - else: - self.wandb = None - - # ClearML - if clearml and 'clearml' in self.include: - self.clearml = ClearmlLogger(self.opt, self.hyp) - else: - self.clearml = None - - # Comet - if comet_ml and 'comet' in self.include: - if isinstance(self.opt.resume, str) and self.opt.resume.startswith("comet://"): - run_id = self.opt.resume.split("/")[-1] - self.comet_logger = CometLogger(self.opt, self.hyp, run_id=run_id) - - else: - self.comet_logger = CometLogger(self.opt, self.hyp) - - else: - self.comet_logger = None - - @property - def remote_dataset(self): - # Get data_dict if custom dataset artifact link is provided - data_dict = None - if self.clearml: - data_dict = self.clearml.data_dict - if self.wandb: - data_dict = self.wandb.data_dict - if self.comet_logger: - data_dict = self.comet_logger.data_dict - - return data_dict - - def on_train_start(self): - if self.comet_logger: - self.comet_logger.on_train_start() - - def on_pretrain_routine_start(self): - if self.comet_logger: - self.comet_logger.on_pretrain_routine_start() - - def on_pretrain_routine_end(self, labels, names): - # Callback runs on pre-train routine end - if self.plots: - plot_labels(labels, names, self.save_dir) - paths = self.save_dir.glob('*labels*.jpg') # training labels - if self.wandb: - self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) - # if self.clearml: - # pass # ClearML saves these images automatically using hooks - if self.comet_logger: - self.comet_logger.on_pretrain_routine_end(paths) - - def on_train_batch_end(self, model, ni, imgs, targets, paths, vals): - log_dict = dict(zip(self.keys[0:3], vals)) - # Callback runs on train batch end - # ni: number integrated batches (since train start) - if self.plots: - if ni < 3: - f = self.save_dir / f'train_batch{ni}.jpg' # filename - plot_images(imgs, targets, paths, f) - if ni == 0 and self.tb and not self.opt.sync_bn: - log_tensorboard_graph(self.tb, model, imgsz=(self.opt.imgsz, self.opt.imgsz)) - if ni == 10 and (self.wandb or self.clearml): - files = sorted(self.save_dir.glob('train*.jpg')) - if self.wandb: - self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]}) - if self.clearml: - self.clearml.log_debug_samples(files, title='Mosaics') - - if self.comet_logger: - self.comet_logger.on_train_batch_end(log_dict, step=ni) - - def on_train_epoch_end(self, epoch): - # Callback runs on train epoch end - if self.wandb: - self.wandb.current_epoch = epoch + 1 - - if self.comet_logger: - self.comet_logger.on_train_epoch_end(epoch) - - def on_val_start(self): - if self.comet_logger: - self.comet_logger.on_val_start() - - def on_val_image_end(self, pred, predn, path, names, im): - # Callback runs on val image end - if self.wandb: - self.wandb.val_one_image(pred, predn, path, names, im) - if self.clearml: - self.clearml.log_image_with_boxes(path, pred, names, im) - - def on_val_batch_end(self, batch_i, im, targets, paths, shapes, out): - if self.comet_logger: - self.comet_logger.on_val_batch_end(batch_i, im, targets, paths, shapes, out) - - def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix): - # Callback runs on val end - if self.wandb or self.clearml: - files = sorted(self.save_dir.glob('val*.jpg')) - if self.wandb: - self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) - if self.clearml: - self.clearml.log_debug_samples(files, title='Validation') - - if self.comet_logger: - self.comet_logger.on_val_end(nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) - - def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): - # Callback runs at the end of each fit (train+val) epoch - x = dict(zip(self.keys, vals)) - if self.csv: - file = self.save_dir / 'results.csv' - n = len(x) + 1 # number of cols - s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header - with open(file, 'a') as f: - f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n') - - if self.tb: - for k, v in x.items(): - self.tb.add_scalar(k, v, epoch) - elif self.clearml: # log to ClearML if TensorBoard not used - for k, v in x.items(): - title, series = k.split('/') - self.clearml.task.get_logger().report_scalar(title, series, v, epoch) - - if self.wandb: - if best_fitness == fi: - best_results = [epoch] + vals[3:7] - for i, name in enumerate(self.best_keys): - self.wandb.wandb_run.summary[name] = best_results[i] # log best results in the summary - self.wandb.log(x) - self.wandb.end_epoch(best_result=best_fitness == fi) - - if self.clearml: - self.clearml.current_epoch_logged_images = set() # reset epoch image limit - self.clearml.current_epoch += 1 - - if self.comet_logger: - self.comet_logger.on_fit_epoch_end(x, epoch=epoch) - - def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): - # Callback runs on model save event - if (epoch + 1) % self.opt.save_period == 0 and not final_epoch and self.opt.save_period != -1: - if self.wandb: - self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) - if self.clearml: - self.clearml.task.update_output_model(model_path=str(last), - model_name='Latest Model', - auto_delete_file=False) - - if self.comet_logger: - self.comet_logger.on_model_save(last, epoch, final_epoch, best_fitness, fi) - - def on_train_end(self, last, best, epoch, results): - # Callback runs on training end, i.e. saving best model - if self.plots: - plot_results(file=self.save_dir / 'results.csv') # save results.png - files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] - files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter - self.logger.info(f"Results saved to {colorstr('bold', self.save_dir)}") - - if self.tb and not self.clearml: # These images are already captured by ClearML by now, we don't want doubles - for f in files: - self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') - - if self.wandb: - self.wandb.log(dict(zip(self.keys[3:10], results))) - self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) - # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model - if not self.opt.evolve: - wandb.log_artifact(str(best if best.exists() else last), - type='model', - name=f'run_{self.wandb.wandb_run.id}_model', - aliases=['latest', 'best', 'stripped']) - self.wandb.finish_run() - - if self.clearml and not self.opt.evolve: - self.clearml.task.update_output_model(model_path=str(best if best.exists() else last), - name='Best Model', - auto_delete_file=False) - - if self.comet_logger: - final_results = dict(zip(self.keys[3:10], results)) - self.comet_logger.on_train_end(files, self.save_dir, last, best, epoch, final_results) - - def on_params_update(self, params: dict): - # Update hyperparams or configs of the experiment - if self.wandb: - self.wandb.wandb_run.config.update(params, allow_val_change=True) - if self.comet_logger: - self.comet_logger.on_params_update(params) - - -class GenericLogger: - """ - YOLOv5 General purpose logger for non-task specific logging - Usage: from utils.loggers import GenericLogger; logger = GenericLogger(...) - Arguments - opt: Run arguments - console_logger: Console logger - include: loggers to include - """ - - def __init__(self, opt, console_logger, include=('tb', 'wandb')): - # init default loggers - self.save_dir = Path(opt.save_dir) - self.include = include - self.console_logger = console_logger - self.csv = self.save_dir / 'results.csv' # CSV logger - if 'tb' in self.include: - prefix = colorstr('TensorBoard: ') - self.console_logger.info( - f"{prefix}Start with 'tensorboard --logdir {self.save_dir.parent}', view at http://localhost:6006/") - self.tb = SummaryWriter(str(self.save_dir)) - - if wandb and 'wandb' in self.include: - self.wandb = wandb.init(project=web_project_name(str(opt.project)), - name=None if opt.name == "exp" else opt.name, - config=opt) - else: - self.wandb = None - - def log_metrics(self, metrics, epoch): - # Log metrics dictionary to all loggers - if self.csv: - keys, vals = list(metrics.keys()), list(metrics.values()) - n = len(metrics) + 1 # number of cols - s = '' if self.csv.exists() else (('%23s,' * n % tuple(['epoch'] + keys)).rstrip(',') + '\n') # header - with open(self.csv, 'a') as f: - f.write(s + ('%23.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n') - - if self.tb: - for k, v in metrics.items(): - self.tb.add_scalar(k, v, epoch) - - if self.wandb: - self.wandb.log(metrics, step=epoch) - - def log_images(self, files, name='Images', epoch=0): - # Log images to all loggers - files = [Path(f) for f in (files if isinstance(files, (tuple, list)) else [files])] # to Path - files = [f for f in files if f.exists()] # filter by exists - - if self.tb: - for f in files: - self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') - - if self.wandb: - self.wandb.log({name: [wandb.Image(str(f), caption=f.name) for f in files]}, step=epoch) - - def log_graph(self, model, imgsz=(640, 640)): - # Log model graph to all loggers - if self.tb: - log_tensorboard_graph(self.tb, model, imgsz) - - def log_model(self, model_path, epoch=0, metadata={}): - # Log model to all loggers - if self.wandb: - art = wandb.Artifact(name=f"run_{wandb.run.id}_model", type="model", metadata=metadata) - art.add_file(str(model_path)) - wandb.log_artifact(art) - - def update_params(self, params): - # Update the paramters logged - if self.wandb: - wandb.run.config.update(params, allow_val_change=True) - - -def log_tensorboard_graph(tb, model, imgsz=(640, 640)): - # Log model graph to TensorBoard - try: - p = next(model.parameters()) # for device, type - imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz # expand - im = torch.zeros((1, 3, *imgsz)).to(p.device).type_as(p) # input image (WARNING: must be zeros, not empty) - with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress jit trace warning - tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), []) - except Exception as e: - print(f'WARNING: TensorBoard graph visualization failure {e}') - - -def web_project_name(project): - # Convert local project name to web project name - if not project.startswith('runs/train'): - return project - suffix = '-Classify' if project.endswith('-cls') else '-Segment' if project.endswith('-seg') else '' - return f'YOLOv5{suffix}' diff --git a/src/yolov5_ros/src/yolov5/utils/loggers/clearml/README.md b/src/yolov5_ros/src/yolov5/utils/loggers/clearml/README.md deleted file mode 100644 index 64eef6b..0000000 --- a/src/yolov5_ros/src/yolov5/utils/loggers/clearml/README.md +++ /dev/null @@ -1,222 +0,0 @@ -# ClearML Integration - -Clear|MLClear|ML - -## About ClearML - -[ClearML](https://cutt.ly/yolov5-tutorial-clearml) is an [open-source](https://github.com/allegroai/clearml) toolbox designed to save you time ⏱️. - -🔨 Track every YOLOv5 training run in the experiment manager - -🔧 Version and easily access your custom training data with the integrated ClearML Data Versioning Tool - -🔦 Remotely train and monitor your YOLOv5 training runs using ClearML Agent - -🔬 Get the very best mAP using ClearML Hyperparameter Optimization - -🔭 Turn your newly trained YOLOv5 model into an API with just a few commands using ClearML Serving - -
-And so much more. It's up to you how many of these tools you want to use, you can stick to the experiment manager, or chain them all together into an impressive pipeline! -
-
- -![ClearML scalars dashboard](https://github.com/thepycoder/clearml_screenshots/raw/main/experiment_manager_with_compare.gif) - - -
-
- -## 🦾 Setting Things Up - -To keep track of your experiments and/or data, ClearML needs to communicate to a server. You have 2 options to get one: - -Either sign up for free to the [ClearML Hosted Service](https://cutt.ly/yolov5-tutorial-clearml) or you can set up your own server, see [here](https://clear.ml/docs/latest/docs/deploying_clearml/clearml_server). Even the server is open-source, so even if you're dealing with sensitive data, you should be good to go! - -1. Install the `clearml` python package: - - ```bash - pip install clearml - ``` - -1. Connect the ClearML SDK to the server by [creating credentials](https://app.clear.ml/settings/workspace-configuration) (go right top to Settings -> Workspace -> Create new credentials), then execute the command below and follow the instructions: - - ```bash - clearml-init - ``` - -That's it! You're done 😎 - -
- -## 🚀 Training YOLOv5 With ClearML - -To enable ClearML experiment tracking, simply install the ClearML pip package. - -```bash -pip install clearml -``` - -This will enable integration with the YOLOv5 training script. Every training run from now on, will be captured and stored by the ClearML experiment manager. If you want to change the `project_name` or `task_name`, head over to our custom logger, where you can change it: `utils/loggers/clearml/clearml_utils.py` - -```bash -python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache -``` - -This will capture: -- Source code + uncommitted changes -- Installed packages -- (Hyper)parameters -- Model files (use `--save-period n` to save a checkpoint every n epochs) -- Console output -- Scalars (mAP_0.5, mAP_0.5:0.95, precision, recall, losses, learning rates, ...) -- General info such as machine details, runtime, creation date etc. -- All produced plots such as label correlogram and confusion matrix -- Images with bounding boxes per epoch -- Mosaic per epoch -- Validation images per epoch -- ... - -That's a lot right? 🤯 -Now, we can visualize all of this information in the ClearML UI to get an overview of our training progress. Add custom columns to the table view (such as e.g. mAP_0.5) so you can easily sort on the best performing model. Or select multiple experiments and directly compare them! - -There even more we can do with all of this information, like hyperparameter optimization and remote execution, so keep reading if you want to see how that works! - -
- -## 🔗 Dataset Version Management - -Versioning your data separately from your code is generally a good idea and makes it easy to aqcuire the latest version too. This repository supports supplying a dataset version ID and it will make sure to get the data if it's not there yet. Next to that, this workflow also saves the used dataset ID as part of the task parameters, so you will always know for sure which data was used in which experiment! - -![ClearML Dataset Interface](https://github.com/thepycoder/clearml_screenshots/raw/main/clearml_data.gif) - -### Prepare Your Dataset - -The YOLOv5 repository supports a number of different datasets by using yaml files containing their information. By default datasets are downloaded to the `../datasets` folder in relation to the repository root folder. So if you downloaded the `coco128` dataset using the link in the yaml or with the scripts provided by yolov5, you get this folder structure: - -``` -.. -|_ yolov5 -|_ datasets - |_ coco128 - |_ images - |_ labels - |_ LICENSE - |_ README.txt -``` -But this can be any dataset you wish. Feel free to use your own, as long as you keep to this folder structure. - -Next, ⚠️**copy the corresponding yaml file to the root of the dataset folder**⚠️. This yaml files contains the information ClearML will need to properly use the dataset. You can make this yourself too, of course, just follow the structure of the example yamls. - -Basically we need the following keys: `path`, `train`, `test`, `val`, `nc`, `names`. - -``` -.. -|_ yolov5 -|_ datasets - |_ coco128 - |_ images - |_ labels - |_ coco128.yaml # <---- HERE! - |_ LICENSE - |_ README.txt -``` - -### Upload Your Dataset - -To get this dataset into ClearML as a versionned dataset, go to the dataset root folder and run the following command: -```bash -cd coco128 -clearml-data sync --project YOLOv5 --name coco128 --folder . -``` - -The command `clearml-data sync` is actually a shorthand command. You could also run these commands one after the other: -```bash -# Optionally add --parent if you want to base -# this version on another dataset version, so no duplicate files are uploaded! -clearml-data create --name coco128 --project YOLOv5 -clearml-data add --files . -clearml-data close -``` - -### Run Training Using A ClearML Dataset - -Now that you have a ClearML dataset, you can very simply use it to train custom YOLOv5 🚀 models! - -```bash -python train.py --img 640 --batch 16 --epochs 3 --data clearml:// --weights yolov5s.pt --cache -``` - -
- -## 👀 Hyperparameter Optimization - -Now that we have our experiments and data versioned, it's time to take a look at what we can build on top! - -Using the code information, installed packages and environment details, the experiment itself is now **completely reproducible**. In fact, ClearML allows you to clone an experiment and even change its parameters. We can then just rerun it with these new parameters automatically, this is basically what HPO does! - -To **run hyperparameter optimization locally**, we've included a pre-made script for you. Just make sure a training task has been run at least once, so it is in the ClearML experiment manager, we will essentially clone it and change its hyperparameters. - -You'll need to fill in the ID of this `template task` in the script found at `utils/loggers/clearml/hpo.py` and then just run it :) You can change `task.execute_locally()` to `task.execute()` to put it in a ClearML queue and have a remote agent work on it instead. - -```bash -# To use optuna, install it first, otherwise you can change the optimizer to just be RandomSearch -pip install optuna -python utils/loggers/clearml/hpo.py -``` - -![HPO](https://github.com/thepycoder/clearml_screenshots/raw/main/hpo.png) - -## 🤯 Remote Execution (advanced) - -Running HPO locally is really handy, but what if we want to run our experiments on a remote machine instead? Maybe you have access to a very powerful GPU machine on-site or you have some budget to use cloud GPUs. -This is where the ClearML Agent comes into play. Check out what the agent can do here: - -- [YouTube video](https://youtu.be/MX3BrXnaULs) -- [Documentation](https://clear.ml/docs/latest/docs/clearml_agent) - -In short: every experiment tracked by the experiment manager contains enough information to reproduce it on a different machine (installed packages, uncommitted changes etc.). So a ClearML agent does just that: it listens to a queue for incoming tasks and when it finds one, it recreates the environment and runs it while still reporting scalars, plots etc. to the experiment manager. - -You can turn any machine (a cloud VM, a local GPU machine, your own laptop ... ) into a ClearML agent by simply running: -```bash -clearml-agent daemon --queue [--docker] -``` - -### Cloning, Editing And Enqueuing - -With our agent running, we can give it some work. Remember from the HPO section that we can clone a task and edit the hyperparameters? We can do that from the interface too! - -🪄 Clone the experiment by right clicking it - -🎯 Edit the hyperparameters to what you wish them to be - -⏳ Enqueue the task to any of the queues by right clicking it - -![Enqueue a task from the UI](https://github.com/thepycoder/clearml_screenshots/raw/main/enqueue.gif) - -### Executing A Task Remotely - -Now you can clone a task like we explained above, or simply mark your current script by adding `task.execute_remotely()` and on execution it will be put into a queue, for the agent to start working on! - -To run the YOLOv5 training script remotely, all you have to do is add this line to the training.py script after the clearml logger has been instatiated: -```python -# ... -# Loggers -data_dict = None -if RANK in {-1, 0}: - loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance - if loggers.clearml: - loggers.clearml.task.execute_remotely(queue='my_queue') # <------ ADD THIS LINE - # Data_dict is either None is user did not choose for ClearML dataset or is filled in by ClearML - data_dict = loggers.clearml.data_dict -# ... -``` -When running the training script after this change, python will run the script up until that line, after which it will package the code and send it to the queue instead! - -### Autoscaling workers - -ClearML comes with autoscalers too! This tool will automatically spin up new remote machines in the cloud of your choice (AWS, GCP, Azure) and turn them into ClearML agents for you whenever there are experiments detected in the queue. Once the tasks are processed, the autoscaler will automatically shut down the remote machines and you stop paying! - -Check out the autoscalers getting started video below. - -[![Watch the video](https://img.youtube.com/vi/j4XVMAaUt3E/0.jpg)](https://youtu.be/j4XVMAaUt3E) diff --git a/src/yolov5_ros/src/yolov5/utils/loggers/clearml/__init__.py b/src/yolov5_ros/src/yolov5/utils/loggers/clearml/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/yolov5_ros/src/yolov5/utils/loggers/clearml/clearml_utils.py b/src/yolov5_ros/src/yolov5/utils/loggers/clearml/clearml_utils.py deleted file mode 100644 index 1e13690..0000000 --- a/src/yolov5_ros/src/yolov5/utils/loggers/clearml/clearml_utils.py +++ /dev/null @@ -1,156 +0,0 @@ -"""Main Logger class for ClearML experiment tracking.""" -import glob -import re -from pathlib import Path - -import numpy as np -import yaml - -from utils.plots import Annotator, colors - -try: - import clearml - from clearml import Dataset, Task - assert hasattr(clearml, '__version__') # verify package import not local dir -except (ImportError, AssertionError): - clearml = None - - -def construct_dataset(clearml_info_string): - """Load in a clearml dataset and fill the internal data_dict with its contents. - """ - dataset_id = clearml_info_string.replace('clearml://', '') - dataset = Dataset.get(dataset_id=dataset_id) - dataset_root_path = Path(dataset.get_local_copy()) - - # We'll search for the yaml file definition in the dataset - yaml_filenames = list(glob.glob(str(dataset_root_path / "*.yaml")) + glob.glob(str(dataset_root_path / "*.yml"))) - if len(yaml_filenames) > 1: - raise ValueError('More than one yaml file was found in the dataset root, cannot determine which one contains ' - 'the dataset definition this way.') - elif len(yaml_filenames) == 0: - raise ValueError('No yaml definition found in dataset root path, check that there is a correct yaml file ' - 'inside the dataset root path.') - with open(yaml_filenames[0]) as f: - dataset_definition = yaml.safe_load(f) - - assert set(dataset_definition.keys()).issuperset( - {'train', 'test', 'val', 'nc', 'names'} - ), "The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')" - - data_dict = dict() - data_dict['train'] = str( - (dataset_root_path / dataset_definition['train']).resolve()) if dataset_definition['train'] else None - data_dict['test'] = str( - (dataset_root_path / dataset_definition['test']).resolve()) if dataset_definition['test'] else None - data_dict['val'] = str( - (dataset_root_path / dataset_definition['val']).resolve()) if dataset_definition['val'] else None - data_dict['nc'] = dataset_definition['nc'] - data_dict['names'] = dataset_definition['names'] - - return data_dict - - -class ClearmlLogger: - """Log training runs, datasets, models, and predictions to ClearML. - - This logger sends information to ClearML at app.clear.ml or to your own hosted server. By default, - this information includes hyperparameters, system configuration and metrics, model metrics, code information and - basic data metrics and analyses. - - By providing additional command line arguments to train.py, datasets, - models and predictions can also be logged. - """ - - def __init__(self, opt, hyp): - """ - - Initialize ClearML Task, this object will capture the experiment - - Upload dataset version to ClearML Data if opt.upload_dataset is True - - arguments: - opt (namespace) -- Commandline arguments for this run - hyp (dict) -- Hyperparameters for this run - - """ - self.current_epoch = 0 - # Keep tracked of amount of logged images to enforce a limit - self.current_epoch_logged_images = set() - # Maximum number of images to log to clearML per epoch - self.max_imgs_to_log_per_epoch = 16 - # Get the interval of epochs when bounding box images should be logged - self.bbox_interval = opt.bbox_interval - self.clearml = clearml - self.task = None - self.data_dict = None - if self.clearml: - self.task = Task.init( - project_name='YOLOv5', - task_name='training', - tags=['YOLOv5'], - output_uri=True, - auto_connect_frameworks={'pytorch': False} - # We disconnect pytorch auto-detection, because we added manual model save points in the code - ) - # ClearML's hooks will already grab all general parameters - # Only the hyperparameters coming from the yaml config file - # will have to be added manually! - self.task.connect(hyp, name='Hyperparameters') - - # Get ClearML Dataset Version if requested - if opt.data.startswith('clearml://'): - # data_dict should have the following keys: - # names, nc (number of classes), test, train, val (all three relative paths to ../datasets) - self.data_dict = construct_dataset(opt.data) - # Set data to data_dict because wandb will crash without this information and opt is the best way - # to give it to them - opt.data = self.data_dict - - def log_debug_samples(self, files, title='Debug Samples'): - """ - Log files (images) as debug samples in the ClearML task. - - arguments: - files (List(PosixPath)) a list of file paths in PosixPath format - title (str) A title that groups together images with the same values - """ - for f in files: - if f.exists(): - it = re.search(r'_batch(\d+)', f.name) - iteration = int(it.groups()[0]) if it else 0 - self.task.get_logger().report_image(title=title, - series=f.name.replace(it.group(), ''), - local_path=str(f), - iteration=iteration) - - def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_threshold=0.25): - """ - Draw the bounding boxes on a single image and report the result as a ClearML debug sample. - - arguments: - image_path (PosixPath) the path the original image file - boxes (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] - class_names (dict): dict containing mapping of class int to class name - image (Tensor): A torch tensor containing the actual image data - """ - if len(self.current_epoch_logged_images) < self.max_imgs_to_log_per_epoch and self.current_epoch >= 0: - # Log every bbox_interval times and deduplicate for any intermittend extra eval runs - if self.current_epoch % self.bbox_interval == 0 and image_path not in self.current_epoch_logged_images: - im = np.ascontiguousarray(np.moveaxis(image.mul(255).clamp(0, 255).byte().cpu().numpy(), 0, 2)) - annotator = Annotator(im=im, pil=True) - for i, (conf, class_nr, box) in enumerate(zip(boxes[:, 4], boxes[:, 5], boxes[:, :4])): - color = colors(i) - - class_name = class_names[int(class_nr)] - confidence_percentage = round(float(conf) * 100, 2) - label = f"{class_name}: {confidence_percentage}%" - - if conf > conf_threshold: - annotator.rectangle(box.cpu().numpy(), outline=color) - annotator.box_label(box.cpu().numpy(), label=label, color=color) - - annotated_image = annotator.result() - self.task.get_logger().report_image(title='Bounding Boxes', - series=image_path.name, - iteration=self.current_epoch, - image=annotated_image) - self.current_epoch_logged_images.add(image_path) diff --git a/src/yolov5_ros/src/yolov5/utils/loggers/clearml/hpo.py b/src/yolov5_ros/src/yolov5/utils/loggers/clearml/hpo.py deleted file mode 100644 index ee518b0..0000000 --- a/src/yolov5_ros/src/yolov5/utils/loggers/clearml/hpo.py +++ /dev/null @@ -1,84 +0,0 @@ -from clearml import Task -# Connecting ClearML with the current process, -# from here on everything is logged automatically -from clearml.automation import HyperParameterOptimizer, UniformParameterRange -from clearml.automation.optuna import OptimizerOptuna - -task = Task.init(project_name='Hyper-Parameter Optimization', - task_name='YOLOv5', - task_type=Task.TaskTypes.optimizer, - reuse_last_task_id=False) - -# Example use case: -optimizer = HyperParameterOptimizer( - # This is the experiment we want to optimize - base_task_id='', - # here we define the hyper-parameters to optimize - # Notice: The parameter name should exactly match what you see in the UI: / - # For Example, here we see in the base experiment a section Named: "General" - # under it a parameter named "batch_size", this becomes "General/batch_size" - # If you have `argparse` for example, then arguments will appear under the "Args" section, - # and you should instead pass "Args/batch_size" - hyper_parameters=[ - UniformParameterRange('Hyperparameters/lr0', min_value=1e-5, max_value=1e-1), - UniformParameterRange('Hyperparameters/lrf', min_value=0.01, max_value=1.0), - UniformParameterRange('Hyperparameters/momentum', min_value=0.6, max_value=0.98), - UniformParameterRange('Hyperparameters/weight_decay', min_value=0.0, max_value=0.001), - UniformParameterRange('Hyperparameters/warmup_epochs', min_value=0.0, max_value=5.0), - UniformParameterRange('Hyperparameters/warmup_momentum', min_value=0.0, max_value=0.95), - UniformParameterRange('Hyperparameters/warmup_bias_lr', min_value=0.0, max_value=0.2), - UniformParameterRange('Hyperparameters/box', min_value=0.02, max_value=0.2), - UniformParameterRange('Hyperparameters/cls', min_value=0.2, max_value=4.0), - UniformParameterRange('Hyperparameters/cls_pw', min_value=0.5, max_value=2.0), - UniformParameterRange('Hyperparameters/obj', min_value=0.2, max_value=4.0), - UniformParameterRange('Hyperparameters/obj_pw', min_value=0.5, max_value=2.0), - UniformParameterRange('Hyperparameters/iou_t', min_value=0.1, max_value=0.7), - UniformParameterRange('Hyperparameters/anchor_t', min_value=2.0, max_value=8.0), - UniformParameterRange('Hyperparameters/fl_gamma', min_value=0.0, max_value=4.0), - UniformParameterRange('Hyperparameters/hsv_h', min_value=0.0, max_value=0.1), - UniformParameterRange('Hyperparameters/hsv_s', min_value=0.0, max_value=0.9), - UniformParameterRange('Hyperparameters/hsv_v', min_value=0.0, max_value=0.9), - UniformParameterRange('Hyperparameters/degrees', min_value=0.0, max_value=45.0), - UniformParameterRange('Hyperparameters/translate', min_value=0.0, max_value=0.9), - UniformParameterRange('Hyperparameters/scale', min_value=0.0, max_value=0.9), - UniformParameterRange('Hyperparameters/shear', min_value=0.0, max_value=10.0), - UniformParameterRange('Hyperparameters/perspective', min_value=0.0, max_value=0.001), - UniformParameterRange('Hyperparameters/flipud', min_value=0.0, max_value=1.0), - UniformParameterRange('Hyperparameters/fliplr', min_value=0.0, max_value=1.0), - UniformParameterRange('Hyperparameters/mosaic', min_value=0.0, max_value=1.0), - UniformParameterRange('Hyperparameters/mixup', min_value=0.0, max_value=1.0), - UniformParameterRange('Hyperparameters/copy_paste', min_value=0.0, max_value=1.0)], - # this is the objective metric we want to maximize/minimize - objective_metric_title='metrics', - objective_metric_series='mAP_0.5', - # now we decide if we want to maximize it or minimize it (accuracy we maximize) - objective_metric_sign='max', - # let us limit the number of concurrent experiments, - # this in turn will make sure we do dont bombard the scheduler with experiments. - # if we have an auto-scaler connected, this, by proxy, will limit the number of machine - max_number_of_concurrent_tasks=1, - # this is the optimizer class (actually doing the optimization) - # Currently, we can choose from GridSearch, RandomSearch or OptimizerBOHB (Bayesian optimization Hyper-Band) - optimizer_class=OptimizerOptuna, - # If specified only the top K performing Tasks will be kept, the others will be automatically archived - save_top_k_tasks_only=5, # 5, - compute_time_limit=None, - total_max_jobs=20, - min_iteration_per_job=None, - max_iteration_per_job=None, -) - -# report every 10 seconds, this is way too often, but we are testing here -optimizer.set_report_period(10 / 60) -# You can also use the line below instead to run all the optimizer tasks locally, without using queues or agent -# an_optimizer.start_locally(job_complete_callback=job_complete_callback) -# set the time limit for the optimization process (2 hours) -optimizer.set_time_limit(in_minutes=120.0) -# Start the optimization process in the local environment -optimizer.start_locally() -# wait until process is done (notice we are controlling the optimization process in the background) -optimizer.wait() -# make sure background optimization stopped -optimizer.stop() - -print('We are done, good bye') diff --git a/src/yolov5_ros/src/yolov5/utils/loggers/comet/README.md b/src/yolov5_ros/src/yolov5/utils/loggers/comet/README.md deleted file mode 100644 index 7b0b8e0..0000000 --- a/src/yolov5_ros/src/yolov5/utils/loggers/comet/README.md +++ /dev/null @@ -1,256 +0,0 @@ - - -# YOLOv5 with Comet - -This guide will cover how to use YOLOv5 with [Comet](https://www.comet.com/site/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) - -# About Comet - -Comet builds tools that help data scientists, engineers, and team leaders accelerate and optimize machine learning and deep learning models. - -Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration)! -Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! - -# Getting Started - -## Install Comet - -```shell -pip install comet_ml -``` - -## Configure Comet Credentials - -There are two ways to configure Comet with YOLOv5. - -You can either set your credentials through enviroment variables - -**Environment Variables** - -```shell -export COMET_API_KEY= -export COMET_PROJECT_NAME= # This will default to 'yolov5' -``` - -Or create a `.comet.config` file in your working directory and set your credentials there. - -**Comet Configuration File** - -``` -[comet] -api_key= -project_name= # This will default to 'yolov5' -``` - -## Run the Training Script - -```shell -# Train YOLOv5s on COCO128 for 5 epochs -python train.py --img 640 --batch 16 --epochs 5 --data coco128.yaml --weights yolov5s.pt -``` - -That's it! Comet will automatically log your hyperparameters, command line arguments, training and valiation metrics. You can visualize and analyze your runs in the Comet UI - -yolo-ui - -# Try out an Example! -Check out an example of a [completed run here](https://www.comet.com/examples/comet-example-yolov5/a0e29e0e9b984e4a822db2a62d0cb357?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) - -Or better yet, try it out yourself in this Colab Notebook - -[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing) - -# Log automatically - -By default, Comet will log the following items - -## Metrics -- Box Loss, Object Loss, Classification Loss for the training and validation data -- mAP_0.5, mAP_0.5:0.95 metrics for the validation data. -- Precision and Recall for the validation data - -## Parameters - -- Model Hyperparameters -- All parameters passed through the command line options - -## Visualizations - -- Confusion Matrix of the model predictions on the validation data -- Plots for the PR and F1 curves across all classes -- Correlogram of the Class Labels - -# Configure Comet Logging - -Comet can be configured to log additional data either through command line flags passed to the training script -or through environment variables. - -```shell -export COMET_MODE=online # Set whether to run Comet in 'online' or 'offline' mode. Defaults to online -export COMET_MODEL_NAME= #Set the name for the saved model. Defaults to yolov5 -export COMET_LOG_CONFUSION_MATRIX=false # Set to disable logging a Comet Confusion Matrix. Defaults to true -export COMET_MAX_IMAGE_UPLOADS= # Controls how many total image predictions to log to Comet. Defaults to 100. -export COMET_LOG_PER_CLASS_METRICS=true # Set to log evaluation metrics for each detected class at the end of training. Defaults to false -export COMET_DEFAULT_CHECKPOINT_FILENAME= # Set this if you would like to resume training from a different checkpoint. Defaults to 'last.pt' -export COMET_LOG_BATCH_LEVEL_METRICS=true # Set this if you would like to log training metrics at the batch level. Defaults to false. -export COMET_LOG_PREDICTIONS=true # Set this to false to disable logging model predictions -``` - -## Logging Checkpoints with Comet - -Logging Models to Comet is disabled by default. To enable it, pass the `save-period` argument to the training script. This will save the -logged checkpoints to Comet based on the interval value provided by `save-period` - -```shell -python train.py \ ---img 640 \ ---batch 16 \ ---epochs 5 \ ---data coco128.yaml \ ---weights yolov5s.pt \ ---save-period 1 -``` - -## Logging Model Predictions - -By default, model predictions (images, ground truth labels and bounding boxes) will be logged to Comet. - -You can control the frequency of logged predictions and the associated images by passing the `bbox_interval` command line argument. Predictions can be visualized using Comet's Object Detection Custom Panel. This frequency corresponds to every Nth batch of data per epoch. In the example below, we are logging every 2nd batch of data for each epoch. - -**Note:** The YOLOv5 validation dataloader will default to a batch size of 32, so you will have to set the logging frequency accordingly. - -Here is an [example project using the Panel](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) - - -```shell -python train.py \ ---img 640 \ ---batch 16 \ ---epochs 5 \ ---data coco128.yaml \ ---weights yolov5s.pt \ ---bbox_interval 2 -``` - -### Controlling the number of Prediction Images logged to Comet - -When logging predictions from YOLOv5, Comet will log the images associated with each set of predictions. By default a maximum of 100 validation images are logged. You can increase or decrease this number using the `COMET_MAX_IMAGE_UPLOADS` environment variable. - -```shell -env COMET_MAX_IMAGE_UPLOADS=200 python train.py \ ---img 640 \ ---batch 16 \ ---epochs 5 \ ---data coco128.yaml \ ---weights yolov5s.pt \ ---bbox_interval 1 -``` - -### Logging Class Level Metrics - -Use the `COMET_LOG_PER_CLASS_METRICS` environment variable to log mAP, precision, recall, f1 for each class. - -```shell -env COMET_LOG_PER_CLASS_METRICS=true python train.py \ ---img 640 \ ---batch 16 \ ---epochs 5 \ ---data coco128.yaml \ ---weights yolov5s.pt -``` - -## Uploading a Dataset to Comet Artifacts - -If you would like to store your data using [Comet Artifacts](https://www.comet.com/docs/v2/guides/data-management/using-artifacts/#learn-more?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration), you can do so using the `upload_dataset` flag. - -The dataset be organized in the way described in the [YOLOv5 documentation](https://docs.ultralytics.com/tutorials/train-custom-datasets/#3-organize-directories). The dataset config `yaml` file must follow the same format as that of the `coco128.yaml` file. - -```shell -python train.py \ ---img 640 \ ---batch 16 \ ---epochs 5 \ ---data coco128.yaml \ ---weights yolov5s.pt \ ---upload_dataset -``` - -You can find the uploaded dataset in the Artifacts tab in your Comet Workspace -artifact-1 - -You can preview the data directly in the Comet UI. -artifact-2 - -Artifacts are versioned and also support adding metadata about the dataset. Comet will automatically log the metadata from your dataset `yaml` file -artifact-3 - -### Using a saved Artifact - -If you would like to use a dataset from Comet Artifacts, set the `path` variable in your dataset `yaml` file to point to the following Artifact resource URL. - -``` -# contents of artifact.yaml file -path: "comet:///:" -``` -Then pass this file to your training script in the following way - -```shell -python train.py \ ---img 640 \ ---batch 16 \ ---epochs 5 \ ---data artifact.yaml \ ---weights yolov5s.pt -``` - -Artifacts also allow you to track the lineage of data as it flows through your Experimentation workflow. Here you can see a graph that shows you all the experiments that have used your uploaded dataset. -artifact-4 - -## Resuming a Training Run - -If your training run is interrupted for any reason, e.g. disrupted internet connection, you can resume the run using the `resume` flag and the Comet Run Path. - -The Run Path has the following format `comet:////`. - -This will restore the run to its state before the interruption, which includes restoring the model from a checkpoint, restoring all hyperparameters and training arguments and downloading Comet dataset Artifacts if they were used in the original run. The resumed run will continue logging to the existing Experiment in the Comet UI - -```shell -python train.py \ ---resume "comet://" -``` - -## Hyperparameter Search with the Comet Optimizer - -YOLOv5 is also integrated with Comet's Optimizer, making is simple to visualie hyperparameter sweeps in the Comet UI. - -### Configuring an Optimizer Sweep - -To configure the Comet Optimizer, you will have to create a JSON file with the information about the sweep. An example file has been provided in `utils/loggers/comet/optimizer_config.json` - -```shell -python utils/loggers/comet/hpo.py \ - --comet_optimizer_config "utils/loggers/comet/optimizer_config.json" -``` - -The `hpo.py` script accepts the same arguments as `train.py`. If you wish to pass additional arguments to your sweep simply add them after -the script. - -```shell -python utils/loggers/comet/hpo.py \ - --comet_optimizer_config "utils/loggers/comet/optimizer_config.json" \ - --save-period 1 \ - --bbox_interval 1 -``` - -### Running a Sweep in Parallel - -```shell -comet optimizer -j utils/loggers/comet/hpo.py \ - utils/loggers/comet/optimizer_config.json" -``` - -### Visualizing Results - -Comet provides a number of ways to visualize the results of your sweep. Take a look at a [project with a completed sweep here](https://www.comet.com/examples/comet-example-yolov5/view/PrlArHGuuhDTKC1UuBmTtOSXD/panels?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) - -hyperparameter-yolo \ No newline at end of file diff --git a/src/yolov5_ros/src/yolov5/utils/loggers/comet/__init__.py b/src/yolov5_ros/src/yolov5/utils/loggers/comet/__init__.py deleted file mode 100644 index 4ee86dd..0000000 --- a/src/yolov5_ros/src/yolov5/utils/loggers/comet/__init__.py +++ /dev/null @@ -1,496 +0,0 @@ -import glob -import json -import logging -import os -import sys -from pathlib import Path - -logger = logging.getLogger(__name__) - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[3] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH - -try: - import comet_ml - - # Project Configuration - config = comet_ml.config.get_config() - COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5") -except (ModuleNotFoundError, ImportError): - comet_ml = None - COMET_PROJECT_NAME = None - -import torch -import torchvision.transforms as T -import yaml - -from utils.dataloaders import img2label_paths -from utils.general import check_dataset, scale_coords, xywh2xyxy -from utils.metrics import box_iou - -COMET_PREFIX = "comet://" - -COMET_MODE = os.getenv("COMET_MODE", "online") - -# Model Saving Settings -COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5") - -# Dataset Artifact Settings -COMET_UPLOAD_DATASET = os.getenv("COMET_UPLOAD_DATASET", "false").lower() == "true" - -# Evaluation Settings -COMET_LOG_CONFUSION_MATRIX = os.getenv("COMET_LOG_CONFUSION_MATRIX", "true").lower() == "true" -COMET_LOG_PREDICTIONS = os.getenv("COMET_LOG_PREDICTIONS", "true").lower() == "true" -COMET_MAX_IMAGE_UPLOADS = int(os.getenv("COMET_MAX_IMAGE_UPLOADS", 100)) - -# Confusion Matrix Settings -CONF_THRES = float(os.getenv("CONF_THRES", 0.001)) -IOU_THRES = float(os.getenv("IOU_THRES", 0.6)) - -# Batch Logging Settings -COMET_LOG_BATCH_METRICS = os.getenv("COMET_LOG_BATCH_METRICS", "false").lower() == "true" -COMET_BATCH_LOGGING_INTERVAL = os.getenv("COMET_BATCH_LOGGING_INTERVAL", 1) -COMET_PREDICTION_LOGGING_INTERVAL = os.getenv("COMET_PREDICTION_LOGGING_INTERVAL", 1) -COMET_LOG_PER_CLASS_METRICS = os.getenv("COMET_LOG_PER_CLASS_METRICS", "false").lower() == "true" - -RANK = int(os.getenv("RANK", -1)) - -to_pil = T.ToPILImage() - - -class CometLogger: - """Log metrics, parameters, source code, models and much more - with Comet - """ - - def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwargs) -> None: - self.job_type = job_type - self.opt = opt - self.hyp = hyp - - # Comet Flags - self.comet_mode = COMET_MODE - - self.save_model = opt.save_period > -1 - self.model_name = COMET_MODEL_NAME - - # Batch Logging Settings - self.log_batch_metrics = COMET_LOG_BATCH_METRICS - self.comet_log_batch_interval = COMET_BATCH_LOGGING_INTERVAL - - # Dataset Artifact Settings - self.upload_dataset = self.opt.upload_dataset if self.opt.upload_dataset else COMET_UPLOAD_DATASET - self.resume = self.opt.resume - - # Default parameters to pass to Experiment objects - self.default_experiment_kwargs = { - "log_code": False, - "log_env_gpu": True, - "log_env_cpu": True, - "project_name": COMET_PROJECT_NAME,} - self.default_experiment_kwargs.update(experiment_kwargs) - self.experiment = self._get_experiment(self.comet_mode, run_id) - - self.data_dict = self.check_dataset(self.opt.data) - self.class_names = self.data_dict["names"] - self.num_classes = self.data_dict["nc"] - - self.logged_images_count = 0 - self.max_images = COMET_MAX_IMAGE_UPLOADS - - if run_id is None: - self.experiment.log_other("Created from", "YOLOv5") - if not isinstance(self.experiment, comet_ml.OfflineExperiment): - workspace, project_name, experiment_id = self.experiment.url.split("/")[-3:] - self.experiment.log_other( - "Run Path", - f"{workspace}/{project_name}/{experiment_id}", - ) - self.log_parameters(vars(opt)) - self.log_parameters(self.opt.hyp) - self.log_asset_data( - self.opt.hyp, - name="hyperparameters.json", - metadata={"type": "hyp-config-file"}, - ) - self.log_asset( - f"{self.opt.save_dir}/opt.yaml", - metadata={"type": "opt-config-file"}, - ) - - self.comet_log_confusion_matrix = COMET_LOG_CONFUSION_MATRIX - - if hasattr(self.opt, "conf_thres"): - self.conf_thres = self.opt.conf_thres - else: - self.conf_thres = CONF_THRES - if hasattr(self.opt, "iou_thres"): - self.iou_thres = self.opt.iou_thres - else: - self.iou_thres = IOU_THRES - - self.comet_log_predictions = COMET_LOG_PREDICTIONS - if self.opt.bbox_interval == -1: - self.comet_log_prediction_interval = 1 if self.opt.epochs < 10 else self.opt.epochs // 10 - else: - self.comet_log_prediction_interval = self.opt.bbox_interval - - if self.comet_log_predictions: - self.metadata_dict = {} - - self.comet_log_per_class_metrics = COMET_LOG_PER_CLASS_METRICS - - self.experiment.log_others({ - "comet_mode": COMET_MODE, - "comet_max_image_uploads": COMET_MAX_IMAGE_UPLOADS, - "comet_log_per_class_metrics": COMET_LOG_PER_CLASS_METRICS, - "comet_log_batch_metrics": COMET_LOG_BATCH_METRICS, - "comet_log_confusion_matrix": COMET_LOG_CONFUSION_MATRIX, - "comet_model_name": COMET_MODEL_NAME,}) - - # Check if running the Experiment with the Comet Optimizer - if hasattr(self.opt, "comet_optimizer_id"): - self.experiment.log_other("optimizer_id", self.opt.comet_optimizer_id) - self.experiment.log_other("optimizer_objective", self.opt.comet_optimizer_objective) - self.experiment.log_other("optimizer_metric", self.opt.comet_optimizer_metric) - self.experiment.log_other("optimizer_parameters", json.dumps(self.hyp)) - - def _get_experiment(self, mode, experiment_id=None): - if mode == "offline": - if experiment_id is not None: - return comet_ml.ExistingOfflineExperiment( - previous_experiment=experiment_id, - **self.default_experiment_kwargs, - ) - - return comet_ml.OfflineExperiment(**self.default_experiment_kwargs,) - - else: - try: - if experiment_id is not None: - return comet_ml.ExistingExperiment( - previous_experiment=experiment_id, - **self.default_experiment_kwargs, - ) - - return comet_ml.Experiment(**self.default_experiment_kwargs) - - except ValueError: - logger.warning("COMET WARNING: " - "Comet credentials have not been set. " - "Comet will default to offline logging. " - "Please set your credentials to enable online logging.") - return self._get_experiment("offline", experiment_id) - - return - - def log_metrics(self, log_dict, **kwargs): - self.experiment.log_metrics(log_dict, **kwargs) - - def log_parameters(self, log_dict, **kwargs): - self.experiment.log_parameters(log_dict, **kwargs) - - def log_asset(self, asset_path, **kwargs): - self.experiment.log_asset(asset_path, **kwargs) - - def log_asset_data(self, asset, **kwargs): - self.experiment.log_asset_data(asset, **kwargs) - - def log_image(self, img, **kwargs): - self.experiment.log_image(img, **kwargs) - - def log_model(self, path, opt, epoch, fitness_score, best_model=False): - if not self.save_model: - return - - model_metadata = { - "fitness_score": fitness_score[-1], - "epochs_trained": epoch + 1, - "save_period": opt.save_period, - "total_epochs": opt.epochs,} - - model_files = glob.glob(f"{path}/*.pt") - for model_path in model_files: - name = Path(model_path).name - - self.experiment.log_model( - self.model_name, - file_or_folder=model_path, - file_name=name, - metadata=model_metadata, - overwrite=True, - ) - - def check_dataset(self, data_file): - with open(data_file) as f: - data_config = yaml.safe_load(f) - - if data_config['path'].startswith(COMET_PREFIX): - path = data_config['path'].replace(COMET_PREFIX, "") - data_dict = self.download_dataset_artifact(path) - - return data_dict - - self.log_asset(self.opt.data, metadata={"type": "data-config-file"}) - - return check_dataset(data_file) - - def log_predictions(self, image, labelsn, path, shape, predn): - if self.logged_images_count >= self.max_images: - return - detections = predn[predn[:, 4] > self.conf_thres] - iou = box_iou(labelsn[:, 1:], detections[:, :4]) - mask, _ = torch.where(iou > self.iou_thres) - if len(mask) == 0: - return - - filtered_detections = detections[mask] - filtered_labels = labelsn[mask] - - processed_image = (image * 255).to(torch.uint8) - - image_id = path.split("/")[-1].split(".")[0] - image_name = f"{image_id}_curr_epoch_{self.experiment.curr_epoch}" - self.log_image(to_pil(processed_image), name=image_name) - - metadata = [] - for cls, *xyxy in filtered_labels.tolist(): - metadata.append({ - "label": f"{self.class_names[int(cls)]}-gt", - "score": 100, - "box": { - "x": xyxy[0], - "y": xyxy[1], - "x2": xyxy[2], - "y2": xyxy[3]},}) - for *xyxy, conf, cls in filtered_detections.tolist(): - metadata.append({ - "label": f"{self.class_names[int(cls)]}", - "score": conf * 100, - "box": { - "x": xyxy[0], - "y": xyxy[1], - "x2": xyxy[2], - "y2": xyxy[3]},}) - - self.metadata_dict[image_name] = metadata - self.logged_images_count += 1 - - return - - def preprocess_prediction(self, image, labels, shape, pred): - nl, _ = labels.shape[0], pred.shape[0] - - # Predictions - if self.opt.single_cls: - pred[:, 5] = 0 - - predn = pred.clone() - scale_coords(image.shape[1:], predn[:, :4], shape[0], shape[1]) - - labelsn = None - if nl: - tbox = xywh2xyxy(labels[:, 1:5]) # target boxes - scale_coords(image.shape[1:], tbox, shape[0], shape[1]) # native-space labels - labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels - scale_coords(image.shape[1:], predn[:, :4], shape[0], shape[1]) # native-space pred - - return predn, labelsn - - def add_assets_to_artifact(self, artifact, path, asset_path, split): - img_paths = sorted(glob.glob(f"{asset_path}/*")) - label_paths = img2label_paths(img_paths) - - for image_file, label_file in zip(img_paths, label_paths): - image_logical_path, label_logical_path = map(lambda x: os.path.relpath(x, path), [image_file, label_file]) - - try: - artifact.add(image_file, logical_path=image_logical_path, metadata={"split": split}) - artifact.add(label_file, logical_path=label_logical_path, metadata={"split": split}) - except ValueError as e: - logger.error('COMET ERROR: Error adding file to Artifact. Skipping file.') - logger.error(f"COMET ERROR: {e}") - continue - - return artifact - - def upload_dataset_artifact(self): - dataset_name = self.data_dict.get("dataset_name", "yolov5-dataset") - path = str((ROOT / Path(self.data_dict["path"])).resolve()) - - metadata = self.data_dict.copy() - for key in ["train", "val", "test"]: - split_path = metadata.get(key) - if split_path is not None: - metadata[key] = split_path.replace(path, "") - - artifact = comet_ml.Artifact(name=dataset_name, artifact_type="dataset", metadata=metadata) - for key in metadata.keys(): - if key in ["train", "val", "test"]: - if isinstance(self.upload_dataset, str) and (key != self.upload_dataset): - continue - - asset_path = self.data_dict.get(key) - if asset_path is not None: - artifact = self.add_assets_to_artifact(artifact, path, asset_path, key) - - self.experiment.log_artifact(artifact) - - return - - def download_dataset_artifact(self, artifact_path): - logged_artifact = self.experiment.get_artifact(artifact_path) - artifact_save_dir = str(Path(self.opt.save_dir) / logged_artifact.name) - logged_artifact.download(artifact_save_dir) - - metadata = logged_artifact.metadata - data_dict = metadata.copy() - data_dict["path"] = artifact_save_dir - data_dict["names"] = {int(k): v for k, v in metadata.get("names").items()} - - data_dict = self.update_data_paths(data_dict) - return data_dict - - def update_data_paths(self, data_dict): - path = data_dict.get("path", "") - - for split in ["train", "val", "test"]: - if data_dict.get(split): - split_path = data_dict.get(split) - data_dict[split] = (f"{path}/{split_path}" if isinstance(split, str) else [ - f"{path}/{x}" for x in split_path]) - - return data_dict - - def on_pretrain_routine_end(self, paths): - if self.opt.resume: - return - - for path in paths: - self.log_asset(str(path)) - - if self.upload_dataset: - if not self.resume: - self.upload_dataset_artifact() - - return - - def on_train_start(self): - self.log_parameters(self.hyp) - - def on_train_epoch_start(self): - return - - def on_train_epoch_end(self, epoch): - self.experiment.curr_epoch = epoch - - return - - def on_train_batch_start(self): - return - - def on_train_batch_end(self, log_dict, step): - self.experiment.curr_step = step - if self.log_batch_metrics and (step % self.comet_log_batch_interval == 0): - self.log_metrics(log_dict, step=step) - - return - - def on_train_end(self, files, save_dir, last, best, epoch, results): - if self.comet_log_predictions: - curr_epoch = self.experiment.curr_epoch - self.experiment.log_asset_data(self.metadata_dict, "image-metadata.json", epoch=curr_epoch) - - for f in files: - self.log_asset(f, metadata={"epoch": epoch}) - self.log_asset(f"{save_dir}/results.csv", metadata={"epoch": epoch}) - - if not self.opt.evolve: - model_path = str(best if best.exists() else last) - name = Path(model_path).name - if self.save_model: - self.experiment.log_model( - self.model_name, - file_or_folder=model_path, - file_name=name, - overwrite=True, - ) - - # Check if running Experiment with Comet Optimizer - if hasattr(self.opt, 'comet_optimizer_id'): - metric = results.get(self.opt.comet_optimizer_metric) - self.experiment.log_other('optimizer_metric_value', metric) - - self.finish_run() - - def on_val_start(self): - return - - def on_val_batch_start(self): - return - - def on_val_batch_end(self, batch_i, images, targets, paths, shapes, outputs): - if not (self.comet_log_predictions and ((batch_i + 1) % self.comet_log_prediction_interval == 0)): - return - - for si, pred in enumerate(outputs): - if len(pred) == 0: - continue - - image = images[si] - labels = targets[targets[:, 0] == si, 1:] - shape = shapes[si] - path = paths[si] - predn, labelsn = self.preprocess_prediction(image, labels, shape, pred) - if labelsn is not None: - self.log_predictions(image, labelsn, path, shape, predn) - - return - - def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix): - if self.comet_log_per_class_metrics: - if self.num_classes > 1: - for i, c in enumerate(ap_class): - class_name = self.class_names[c] - self.experiment.log_metrics( - { - 'mAP@.5': ap50[i], - 'mAP@.5:.95': ap[i], - 'precision': p[i], - 'recall': r[i], - 'f1': f1[i], - 'true_positives': tp[i], - 'false_positives': fp[i], - 'support': nt[c]}, - prefix=class_name) - - if self.comet_log_confusion_matrix: - epoch = self.experiment.curr_epoch - class_names = list(self.class_names.values()) - class_names.append("background") - num_classes = len(class_names) - - self.experiment.log_confusion_matrix( - matrix=confusion_matrix.matrix, - max_categories=num_classes, - labels=class_names, - epoch=epoch, - column_label='Actual Category', - row_label='Predicted Category', - file_name=f"confusion-matrix-epoch-{epoch}.json", - ) - - def on_fit_epoch_end(self, result, epoch): - self.log_metrics(result, epoch=epoch) - - def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): - if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1: - self.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) - - def on_params_update(self, params): - self.log_parameters(params) - - def finish_run(self): - self.experiment.end() diff --git a/src/yolov5_ros/src/yolov5/utils/loggers/comet/comet_utils.py b/src/yolov5_ros/src/yolov5/utils/loggers/comet/comet_utils.py deleted file mode 100644 index 3cbd451..0000000 --- a/src/yolov5_ros/src/yolov5/utils/loggers/comet/comet_utils.py +++ /dev/null @@ -1,150 +0,0 @@ -import logging -import os -from urllib.parse import urlparse - -try: - import comet_ml -except (ModuleNotFoundError, ImportError): - comet_ml = None - -import yaml - -logger = logging.getLogger(__name__) - -COMET_PREFIX = "comet://" -COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5") -COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv("COMET_DEFAULT_CHECKPOINT_FILENAME", "last.pt") - - -def download_model_checkpoint(opt, experiment): - model_dir = f"{opt.project}/{experiment.name}" - os.makedirs(model_dir, exist_ok=True) - - model_name = COMET_MODEL_NAME - model_asset_list = experiment.get_model_asset_list(model_name) - - if len(model_asset_list) == 0: - logger.error(f"COMET ERROR: No checkpoints found for model name : {model_name}") - return - - model_asset_list = sorted( - model_asset_list, - key=lambda x: x["step"], - reverse=True, - ) - logged_checkpoint_map = {asset["fileName"]: asset["assetId"] for asset in model_asset_list} - - resource_url = urlparse(opt.weights) - checkpoint_filename = resource_url.query - - if checkpoint_filename: - asset_id = logged_checkpoint_map.get(checkpoint_filename) - else: - asset_id = logged_checkpoint_map.get(COMET_DEFAULT_CHECKPOINT_FILENAME) - checkpoint_filename = COMET_DEFAULT_CHECKPOINT_FILENAME - - if asset_id is None: - logger.error(f"COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment") - return - - try: - logger.info(f"COMET INFO: Downloading checkpoint {checkpoint_filename}") - asset_filename = checkpoint_filename - - model_binary = experiment.get_asset(asset_id, return_type="binary", stream=False) - model_download_path = f"{model_dir}/{asset_filename}" - with open(model_download_path, "wb") as f: - f.write(model_binary) - - opt.weights = model_download_path - - except Exception as e: - logger.warning("COMET WARNING: Unable to download checkpoint from Comet") - logger.exception(e) - - -def set_opt_parameters(opt, experiment): - """Update the opts Namespace with parameters - from Comet's ExistingExperiment when resuming a run - - Args: - opt (argparse.Namespace): Namespace of command line options - experiment (comet_ml.APIExperiment): Comet API Experiment object - """ - asset_list = experiment.get_asset_list() - resume_string = opt.resume - - for asset in asset_list: - if asset["fileName"] == "opt.yaml": - asset_id = asset["assetId"] - asset_binary = experiment.get_asset(asset_id, return_type="binary", stream=False) - opt_dict = yaml.safe_load(asset_binary) - for key, value in opt_dict.items(): - setattr(opt, key, value) - opt.resume = resume_string - - # Save hyperparameters to YAML file - # Necessary to pass checks in training script - save_dir = f"{opt.project}/{experiment.name}" - os.makedirs(save_dir, exist_ok=True) - - hyp_yaml_path = f"{save_dir}/hyp.yaml" - with open(hyp_yaml_path, "w") as f: - yaml.dump(opt.hyp, f) - opt.hyp = hyp_yaml_path - - -def check_comet_weights(opt): - """Downloads model weights from Comet and updates the - weights path to point to saved weights location - - Args: - opt (argparse.Namespace): Command Line arguments passed - to YOLOv5 training script - - Returns: - None/bool: Return True if weights are successfully downloaded - else return None - """ - if comet_ml is None: - return - - if isinstance(opt.weights, str): - if opt.weights.startswith(COMET_PREFIX): - api = comet_ml.API() - resource = urlparse(opt.weights) - experiment_path = f"{resource.netloc}{resource.path}" - experiment = api.get(experiment_path) - download_model_checkpoint(opt, experiment) - return True - - return None - - -def check_comet_resume(opt): - """Restores run parameters to its original state based on the model checkpoint - and logged Experiment parameters. - - Args: - opt (argparse.Namespace): Command Line arguments passed - to YOLOv5 training script - - Returns: - None/bool: Return True if the run is restored successfully - else return None - """ - if comet_ml is None: - return - - if isinstance(opt.resume, str): - if opt.resume.startswith(COMET_PREFIX): - api = comet_ml.API() - resource = urlparse(opt.resume) - experiment_path = f"{resource.netloc}{resource.path}" - experiment = api.get(experiment_path) - set_opt_parameters(opt, experiment) - download_model_checkpoint(opt, experiment) - - return True - - return None diff --git a/src/yolov5_ros/src/yolov5/utils/loggers/comet/hpo.py b/src/yolov5_ros/src/yolov5/utils/loggers/comet/hpo.py deleted file mode 100644 index eab4df9..0000000 --- a/src/yolov5_ros/src/yolov5/utils/loggers/comet/hpo.py +++ /dev/null @@ -1,118 +0,0 @@ -import argparse -import json -import logging -import os -import sys -from pathlib import Path - -import comet_ml - -logger = logging.getLogger(__name__) - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[3] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH - -from train import parse_opt, train -from utils.callbacks import Callbacks -from utils.general import increment_path -from utils.torch_utils import select_device - -# Project Configuration -config = comet_ml.config.get_config() -COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5") - - -def get_args(known=False): - parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path') - parser.add_argument('--cfg', type=str, default='', help='model.yaml path') - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') - parser.add_argument('--epochs', type=int, default=300, help='total training epochs') - parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') - parser.add_argument('--rect', action='store_true', help='rectangular training') - parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') - parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') - parser.add_argument('--noval', action='store_true', help='only validate final epoch') - parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') - parser.add_argument('--noplots', action='store_true', help='save no plot files') - parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') - parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') - parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') - parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') - parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') - parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') - parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') - parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') - parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--quad', action='store_true', help='quad dataloader') - parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') - parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') - parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') - parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') - parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') - parser.add_argument('--seed', type=int, default=0, help='Global training seed') - parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') - - # Weights & Biases arguments - parser.add_argument('--entity', default=None, help='W&B: Entity') - parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option') - parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') - parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') - - # Comet Arguments - parser.add_argument("--comet_optimizer_config", type=str, help="Comet: Path to a Comet Optimizer Config File.") - parser.add_argument("--comet_optimizer_id", type=str, help="Comet: ID of the Comet Optimizer sweep.") - parser.add_argument("--comet_optimizer_objective", type=str, help="Comet: Set to 'minimize' or 'maximize'.") - parser.add_argument("--comet_optimizer_metric", type=str, help="Comet: Metric to Optimize.") - parser.add_argument("--comet_optimizer_workers", - type=int, - default=1, - help="Comet: Number of Parallel Workers to use with the Comet Optimizer.") - - return parser.parse_known_args()[0] if known else parser.parse_args() - - -def run(parameters, opt): - hyp_dict = {k: v for k, v in parameters.items() if k not in ["epochs", "batch_size"]} - - opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) - opt.batch_size = parameters.get("batch_size") - opt.epochs = parameters.get("epochs") - - device = select_device(opt.device, batch_size=opt.batch_size) - train(hyp_dict, opt, device, callbacks=Callbacks()) - - -if __name__ == "__main__": - opt = get_args(known=True) - - opt.weights = str(opt.weights) - opt.cfg = str(opt.cfg) - opt.data = str(opt.data) - opt.project = str(opt.project) - - optimizer_id = os.getenv("COMET_OPTIMIZER_ID") - if optimizer_id is None: - with open(opt.comet_optimizer_config) as f: - optimizer_config = json.load(f) - optimizer = comet_ml.Optimizer(optimizer_config) - else: - optimizer = comet_ml.Optimizer(optimizer_id) - - opt.comet_optimizer_id = optimizer.id - status = optimizer.status() - - opt.comet_optimizer_objective = status["spec"]["objective"] - opt.comet_optimizer_metric = status["spec"]["metric"] - - logger.info("COMET INFO: Starting Hyperparameter Sweep") - for parameter in optimizer.get_parameters(): - run(parameter["parameters"], opt) diff --git a/src/yolov5_ros/src/yolov5/utils/loggers/wandb/README.md b/src/yolov5_ros/src/yolov5/utils/loggers/wandb/README.md deleted file mode 100644 index d78324b..0000000 --- a/src/yolov5_ros/src/yolov5/utils/loggers/wandb/README.md +++ /dev/null @@ -1,162 +0,0 @@ -📚 This guide explains how to use **Weights & Biases** (W&B) with YOLOv5 🚀. UPDATED 29 September 2021. - -- [About Weights & Biases](#about-weights-&-biases) -- [First-Time Setup](#first-time-setup) -- [Viewing runs](#viewing-runs) -- [Disabling wandb](#disabling-wandb) -- [Advanced Usage: Dataset Versioning and Evaluation](#advanced-usage) -- [Reports: Share your work with the world!](#reports) - -## About Weights & Biases - -Think of [W&B](https://wandb.ai/site?utm_campaign=repo_yolo_wandbtutorial) like GitHub for machine learning models. With a few lines of code, save everything you need to debug, compare and reproduce your models — architecture, hyperparameters, git commits, model weights, GPU usage, and even datasets and predictions. - -Used by top researchers including teams at OpenAI, Lyft, Github, and MILA, W&B is part of the new standard of best practices for machine learning. How W&B can help you optimize your machine learning workflows: - -- [Debug](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Free-2) model performance in real time -- [GPU usage](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#System-4) visualized automatically -- [Custom charts](https://wandb.ai/wandb/customizable-charts/reports/Powerful-Custom-Charts-To-Debug-Model-Peformance--VmlldzoyNzY4ODI) for powerful, extensible visualization -- [Share insights](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Share-8) interactively with collaborators -- [Optimize hyperparameters](https://docs.wandb.com/sweeps) efficiently -- [Track](https://docs.wandb.com/artifacts) datasets, pipelines, and production models - -## First-Time Setup - -
- Toggle Details -When you first train, W&B will prompt you to create a new account and will generate an **API key** for you. If you are an existing user you can retrieve your key from https://wandb.ai/authorize. This key is used to tell W&B where to log your data. You only need to supply your key once, and then it is remembered on the same device. - -W&B will create a cloud **project** (default is 'YOLOv5') for your training runs, and each new training run will be provided a unique run **name** within that project as project/name. You can also manually set your project and run name as: - -```shell -$ python train.py --project ... --name ... -``` - -YOLOv5 notebook example: Open In Colab Open In Kaggle -Screen Shot 2021-09-29 at 10 23 13 PM - -
- -## Viewing Runs - -
- Toggle Details -Run information streams from your environment to the W&B cloud console as you train. This allows you to monitor and even cancel runs in realtime . All important information is logged: - -- Training & Validation losses -- Metrics: Precision, Recall, mAP@0.5, mAP@0.5:0.95 -- Learning Rate over time -- A bounding box debugging panel, showing the training progress over time -- GPU: Type, **GPU Utilization**, power, temperature, **CUDA memory usage** -- System: Disk I/0, CPU utilization, RAM memory usage -- Your trained model as W&B Artifact -- Environment: OS and Python types, Git repository and state, **training command** - -

Weights & Biases dashboard

-
- -## Disabling wandb - -- training after running `wandb disabled` inside that directory creates no wandb run - ![Screenshot (84)](https://user-images.githubusercontent.com/15766192/143441777-c780bdd7-7cb4-4404-9559-b4316030a985.png) - -- To enable wandb again, run `wandb online` - ![Screenshot (85)](https://user-images.githubusercontent.com/15766192/143441866-7191b2cb-22f0-4e0f-ae64-2dc47dc13078.png) - -## Advanced Usage - -You can leverage W&B artifacts and Tables integration to easily visualize and manage your datasets, models and training evaluations. Here are some quick examples to get you started. - -
-

1: Train and Log Evaluation simultaneousy

- This is an extension of the previous section, but it'll also training after uploading the dataset. This also evaluation Table - Evaluation table compares your predictions and ground truths across the validation set for each epoch. It uses the references to the already uploaded datasets, - so no images will be uploaded from your system more than once. -
- Usage - Code $ python train.py --upload_data val - -![Screenshot from 2021-11-21 17-40-06](https://user-images.githubusercontent.com/15766192/142761183-c1696d8c-3f38-45ab-991a-bb0dfd98ae7d.png) - -
- -

2. Visualize and Version Datasets

- Log, visualize, dynamically query, and understand your data with W&B Tables. You can use the following command to log your dataset as a W&B Table. This will generate a {dataset}_wandb.yaml file which can be used to train from dataset artifact. -
- Usage - Code $ python utils/logger/wandb/log_dataset.py --project ... --name ... --data .. - -![Screenshot (64)](https://user-images.githubusercontent.com/15766192/128486078-d8433890-98a3-4d12-8986-b6c0e3fc64b9.png) - -
- -

3: Train using dataset artifact

- When you upload a dataset as described in the first section, you get a new config file with an added `_wandb` to its name. This file contains the information that - can be used to train a model directly from the dataset artifact. This also logs evaluation -
- Usage - Code $ python train.py --data {data}_wandb.yaml - -![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png) - -
- -

4: Save model checkpoints as artifacts

- To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval. - You can also log both the dataset and model checkpoints simultaneously. If not passed, only the final model will be logged - -
- Usage - Code $ python train.py --save_period 1 - -![Screenshot (68)](https://user-images.githubusercontent.com/15766192/128726138-ec6c1f60-639d-437d-b4ee-3acd9de47ef3.png) - -
- -
- -

5: Resume runs from checkpoint artifacts.

-Any run can be resumed using artifacts if the --resume argument starts with wandb-artifact:// prefix followed by the run path, i.e, wandb-artifact://username/project/runid . This doesn't require the model checkpoint to be present on the local system. - -
- Usage - Code $ python train.py --resume wandb-artifact://{run_path} - -![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) - -
- -

6: Resume runs from dataset artifact & checkpoint artifacts.

- Local dataset or model checkpoints are not required. This can be used to resume runs directly on a different device - The syntax is same as the previous section, but you'll need to lof both the dataset and model checkpoints as artifacts, i.e, set bot --upload_dataset or - train from _wandb.yaml file and set --save_period - -
- Usage - Code $ python train.py --resume wandb-artifact://{run_path} - -![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) - -
- - - -

Reports

-W&B Reports can be created from your saved runs for sharing online. Once a report is created you will receive a link you can use to publically share your results. Here is an example report created from the COCO128 tutorial trainings of all four YOLOv5 models ([link](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY)). - -Weights & Biases Reports - -## Environments - -YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - -- **Google Colab and Kaggle** notebooks with free GPU: Open In Colab Open In Kaggle -- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) -- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) -- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls - -## Status - -![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) - -If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/src/yolov5_ros/src/yolov5/utils/loggers/wandb/__init__.py b/src/yolov5_ros/src/yolov5/utils/loggers/wandb/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/yolov5_ros/src/yolov5/utils/loggers/wandb/log_dataset.py b/src/yolov5_ros/src/yolov5/utils/loggers/wandb/log_dataset.py deleted file mode 100644 index 06e81fb..0000000 --- a/src/yolov5_ros/src/yolov5/utils/loggers/wandb/log_dataset.py +++ /dev/null @@ -1,27 +0,0 @@ -import argparse - -from wandb_utils import WandbLogger - -from utils.general import LOGGER - -WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' - - -def create_dataset_artifact(opt): - logger = WandbLogger(opt, None, job_type='Dataset Creation') # TODO: return value unused - if not logger.wandb: - LOGGER.info("install wandb using `pip install wandb` to log the dataset") - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') - parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') - parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') - parser.add_argument('--entity', default=None, help='W&B entity') - parser.add_argument('--name', type=str, default='log dataset', help='name of W&B run') - - opt = parser.parse_args() - opt.resume = False # Explicitly disallow resume check for dataset upload job - - create_dataset_artifact(opt) diff --git a/src/yolov5_ros/src/yolov5/utils/loggers/wandb/sweep.py b/src/yolov5_ros/src/yolov5/utils/loggers/wandb/sweep.py deleted file mode 100644 index d49ea6f..0000000 --- a/src/yolov5_ros/src/yolov5/utils/loggers/wandb/sweep.py +++ /dev/null @@ -1,41 +0,0 @@ -import sys -from pathlib import Path - -import wandb - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[3] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH - -from train import parse_opt, train -from utils.callbacks import Callbacks -from utils.general import increment_path -from utils.torch_utils import select_device - - -def sweep(): - wandb.init() - # Get hyp dict from sweep agent. Copy because train() modifies parameters which confused wandb. - hyp_dict = vars(wandb.config).get("_items").copy() - - # Workaround: get necessary opt args - opt = parse_opt(known=True) - opt.batch_size = hyp_dict.get("batch_size") - opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) - opt.epochs = hyp_dict.get("epochs") - opt.nosave = True - opt.data = hyp_dict.get("data") - opt.weights = str(opt.weights) - opt.cfg = str(opt.cfg) - opt.data = str(opt.data) - opt.hyp = str(opt.hyp) - opt.project = str(opt.project) - device = select_device(opt.device, batch_size=opt.batch_size) - - # train - train(hyp_dict, opt, device, callbacks=Callbacks()) - - -if __name__ == "__main__": - sweep() diff --git a/src/yolov5_ros/src/yolov5/utils/loggers/wandb/sweep.yaml b/src/yolov5_ros/src/yolov5/utils/loggers/wandb/sweep.yaml deleted file mode 100644 index 688b1ea..0000000 --- a/src/yolov5_ros/src/yolov5/utils/loggers/wandb/sweep.yaml +++ /dev/null @@ -1,143 +0,0 @@ -# Hyperparameters for training -# To set range- -# Provide min and max values as: -# parameter: -# -# min: scalar -# max: scalar -# OR -# -# Set a specific list of search space- -# parameter: -# values: [scalar1, scalar2, scalar3...] -# -# You can use grid, bayesian and hyperopt search strategy -# For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration - -program: utils/loggers/wandb/sweep.py -method: random -metric: - name: metrics/mAP_0.5 - goal: maximize - -parameters: - # hyperparameters: set either min, max range or values list - data: - value: "data/coco128.yaml" - batch_size: - values: [64] - epochs: - values: [10] - - lr0: - distribution: uniform - min: 1e-5 - max: 1e-1 - lrf: - distribution: uniform - min: 0.01 - max: 1.0 - momentum: - distribution: uniform - min: 0.6 - max: 0.98 - weight_decay: - distribution: uniform - min: 0.0 - max: 0.001 - warmup_epochs: - distribution: uniform - min: 0.0 - max: 5.0 - warmup_momentum: - distribution: uniform - min: 0.0 - max: 0.95 - warmup_bias_lr: - distribution: uniform - min: 0.0 - max: 0.2 - box: - distribution: uniform - min: 0.02 - max: 0.2 - cls: - distribution: uniform - min: 0.2 - max: 4.0 - cls_pw: - distribution: uniform - min: 0.5 - max: 2.0 - obj: - distribution: uniform - min: 0.2 - max: 4.0 - obj_pw: - distribution: uniform - min: 0.5 - max: 2.0 - iou_t: - distribution: uniform - min: 0.1 - max: 0.7 - anchor_t: - distribution: uniform - min: 2.0 - max: 8.0 - fl_gamma: - distribution: uniform - min: 0.0 - max: 4.0 - hsv_h: - distribution: uniform - min: 0.0 - max: 0.1 - hsv_s: - distribution: uniform - min: 0.0 - max: 0.9 - hsv_v: - distribution: uniform - min: 0.0 - max: 0.9 - degrees: - distribution: uniform - min: 0.0 - max: 45.0 - translate: - distribution: uniform - min: 0.0 - max: 0.9 - scale: - distribution: uniform - min: 0.0 - max: 0.9 - shear: - distribution: uniform - min: 0.0 - max: 10.0 - perspective: - distribution: uniform - min: 0.0 - max: 0.001 - flipud: - distribution: uniform - min: 0.0 - max: 1.0 - fliplr: - distribution: uniform - min: 0.0 - max: 1.0 - mosaic: - distribution: uniform - min: 0.0 - max: 1.0 - mixup: - distribution: uniform - min: 0.0 - max: 1.0 - copy_paste: - distribution: uniform - min: 0.0 - max: 1.0 diff --git a/src/yolov5_ros/src/yolov5/utils/loggers/wandb/wandb_utils.py b/src/yolov5_ros/src/yolov5/utils/loggers/wandb/wandb_utils.py deleted file mode 100644 index e850d2a..0000000 --- a/src/yolov5_ros/src/yolov5/utils/loggers/wandb/wandb_utils.py +++ /dev/null @@ -1,584 +0,0 @@ -"""Utilities and tools for tracking runs with Weights & Biases.""" - -import logging -import os -import sys -from contextlib import contextmanager -from pathlib import Path -from typing import Dict - -import yaml -from tqdm import tqdm - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[3] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH - -from utils.dataloaders import LoadImagesAndLabels, img2label_paths -from utils.general import LOGGER, check_dataset, check_file - -try: - import wandb - - assert hasattr(wandb, '__version__') # verify package import not local dir -except (ImportError, AssertionError): - wandb = None - -RANK = int(os.getenv('RANK', -1)) -WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' - - -def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX): - return from_string[len(prefix):] - - -def check_wandb_config_file(data_config_file): - wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path - if Path(wandb_config).is_file(): - return wandb_config - return data_config_file - - -def check_wandb_dataset(data_file): - is_trainset_wandb_artifact = False - is_valset_wandb_artifact = False - if isinstance(data_file, dict): - # In that case another dataset manager has already processed it and we don't have to - return data_file - if check_file(data_file) and data_file.endswith('.yaml'): - with open(data_file, errors='ignore') as f: - data_dict = yaml.safe_load(f) - is_trainset_wandb_artifact = isinstance(data_dict['train'], - str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX) - is_valset_wandb_artifact = isinstance(data_dict['val'], - str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX) - if is_trainset_wandb_artifact or is_valset_wandb_artifact: - return data_dict - else: - return check_dataset(data_file) - - -def get_run_info(run_path): - run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX)) - run_id = run_path.stem - project = run_path.parent.stem - entity = run_path.parent.parent.stem - model_artifact_name = 'run_' + run_id + '_model' - return entity, project, run_id, model_artifact_name - - -def check_wandb_resume(opt): - process_wandb_config_ddp_mode(opt) if RANK not in [-1, 0] else None - if isinstance(opt.resume, str): - if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - if RANK not in [-1, 0]: # For resuming DDP runs - entity, project, run_id, model_artifact_name = get_run_info(opt.resume) - api = wandb.Api() - artifact = api.artifact(entity + '/' + project + '/' + model_artifact_name + ':latest') - modeldir = artifact.download() - opt.weights = str(Path(modeldir) / "last.pt") - return True - return None - - -def process_wandb_config_ddp_mode(opt): - with open(check_file(opt.data), errors='ignore') as f: - data_dict = yaml.safe_load(f) # data dict - train_dir, val_dir = None, None - if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): - api = wandb.Api() - train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias) - train_dir = train_artifact.download() - train_path = Path(train_dir) / 'data/images/' - data_dict['train'] = str(train_path) - - if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX): - api = wandb.Api() - val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias) - val_dir = val_artifact.download() - val_path = Path(val_dir) / 'data/images/' - data_dict['val'] = str(val_path) - if train_dir or val_dir: - ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') - with open(ddp_data_path, 'w') as f: - yaml.safe_dump(data_dict, f) - opt.data = ddp_data_path - - -class WandbLogger(): - """Log training runs, datasets, models, and predictions to Weights & Biases. - - This logger sends information to W&B at wandb.ai. By default, this information - includes hyperparameters, system configuration and metrics, model metrics, - and basic data metrics and analyses. - - By providing additional command line arguments to train.py, datasets, - models and predictions can also be logged. - - For more on how this logger is used, see the Weights & Biases documentation: - https://docs.wandb.com/guides/integrations/yolov5 - """ - - def __init__(self, opt, run_id=None, job_type='Training'): - """ - - Initialize WandbLogger instance - - Upload dataset if opt.upload_dataset is True - - Setup training processes if job_type is 'Training' - - arguments: - opt (namespace) -- Commandline arguments for this run - run_id (str) -- Run ID of W&B run to be resumed - job_type (str) -- To set the job_type for this run - - """ - # Pre-training routine -- - self.job_type = job_type - self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run - self.val_artifact, self.train_artifact = None, None - self.train_artifact_path, self.val_artifact_path = None, None - self.result_artifact = None - self.val_table, self.result_table = None, None - self.bbox_media_panel_images = [] - self.val_table_path_map = None - self.max_imgs_to_log = 16 - self.wandb_artifact_data_dict = None - self.data_dict = None - # It's more elegant to stick to 1 wandb.init call, - # but useful config data is overwritten in the WandbLogger's wandb.init call - if isinstance(opt.resume, str): # checks resume from artifact - if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - entity, project, run_id, model_artifact_name = get_run_info(opt.resume) - model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name - assert wandb, 'install wandb to resume wandb runs' - # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config - self.wandb_run = wandb.init(id=run_id, - project=project, - entity=entity, - resume='allow', - allow_val_change=True) - opt.resume = model_artifact_name - elif self.wandb: - self.wandb_run = wandb.init(config=opt, - resume="allow", - project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, - entity=opt.entity, - name=opt.name if opt.name != 'exp' else None, - job_type=job_type, - id=run_id, - allow_val_change=True) if not wandb.run else wandb.run - if self.wandb_run: - if self.job_type == 'Training': - if opt.upload_dataset: - if not opt.resume: - self.wandb_artifact_data_dict = self.check_and_upload_dataset(opt) - - if isinstance(opt.data, dict): - # This means another dataset manager has already processed the dataset info (e.g. ClearML) - # and they will have stored the already processed dict in opt.data - self.data_dict = opt.data - elif opt.resume: - # resume from artifact - if isinstance(opt.resume, str) and opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - self.data_dict = dict(self.wandb_run.config.data_dict) - else: # local resume - self.data_dict = check_wandb_dataset(opt.data) - else: - self.data_dict = check_wandb_dataset(opt.data) - self.wandb_artifact_data_dict = self.wandb_artifact_data_dict or self.data_dict - - # write data_dict to config. useful for resuming from artifacts. Do this only when not resuming. - self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict}, allow_val_change=True) - self.setup_training(opt) - - if self.job_type == 'Dataset Creation': - self.wandb_run.config.update({"upload_dataset": True}) - self.data_dict = self.check_and_upload_dataset(opt) - - def check_and_upload_dataset(self, opt): - """ - Check if the dataset format is compatible and upload it as W&B artifact - - arguments: - opt (namespace)-- Commandline arguments for current run - - returns: - Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links. - """ - assert wandb, 'Install wandb to upload dataset' - config_path = self.log_dataset_artifact(opt.data, opt.single_cls, - 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) - with open(config_path, errors='ignore') as f: - wandb_data_dict = yaml.safe_load(f) - return wandb_data_dict - - def setup_training(self, opt): - """ - Setup the necessary processes for training YOLO models: - - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX - - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded - - Setup log_dict, initialize bbox_interval - - arguments: - opt (namespace) -- commandline arguments for this run - - """ - self.log_dict, self.current_epoch = {}, 0 - self.bbox_interval = opt.bbox_interval - if isinstance(opt.resume, str): - modeldir, _ = self.download_model_artifact(opt) - if modeldir: - self.weights = Path(modeldir) / "last.pt" - config = self.wandb_run.config - opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp, opt.imgsz = str( - self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs,\ - config.hyp, config.imgsz - data_dict = self.data_dict - if self.val_artifact is None: # If --upload_dataset is set, use the existing artifact, don't download - self.train_artifact_path, self.train_artifact = self.download_dataset_artifact( - data_dict.get('train'), opt.artifact_alias) - self.val_artifact_path, self.val_artifact = self.download_dataset_artifact( - data_dict.get('val'), opt.artifact_alias) - - if self.train_artifact_path is not None: - train_path = Path(self.train_artifact_path) / 'data/images/' - data_dict['train'] = str(train_path) - if self.val_artifact_path is not None: - val_path = Path(self.val_artifact_path) / 'data/images/' - data_dict['val'] = str(val_path) - - if self.val_artifact is not None: - self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") - columns = ["epoch", "id", "ground truth", "prediction"] - columns.extend(self.data_dict['names']) - self.result_table = wandb.Table(columns) - self.val_table = self.val_artifact.get("val") - if self.val_table_path_map is None: - self.map_val_table_path() - if opt.bbox_interval == -1: - self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 - if opt.evolve or opt.noplots: - self.bbox_interval = opt.bbox_interval = opt.epochs + 1 # disable bbox_interval - train_from_artifact = self.train_artifact_path is not None and self.val_artifact_path is not None - # Update the the data_dict to point to local artifacts dir - if train_from_artifact: - self.data_dict = data_dict - - def download_dataset_artifact(self, path, alias): - """ - download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX - - arguments: - path -- path of the dataset to be used for training - alias (str)-- alias of the artifact to be download/used for training - - returns: - (str, wandb.Artifact) -- path of the downladed dataset and it's corresponding artifact object if dataset - is found otherwise returns (None, None) - """ - if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): - artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) - dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\", "/")) - assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" - datadir = dataset_artifact.download() - return datadir, dataset_artifact - return None, None - - def download_model_artifact(self, opt): - """ - download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX - - arguments: - opt (namespace) -- Commandline arguments for this run - """ - if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest") - assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' - modeldir = model_artifact.download() - # epochs_trained = model_artifact.metadata.get('epochs_trained') - total_epochs = model_artifact.metadata.get('total_epochs') - is_finished = total_epochs is None - assert not is_finished, 'training is finished, can only resume incomplete runs.' - return modeldir, model_artifact - return None, None - - def log_model(self, path, opt, epoch, fitness_score, best_model=False): - """ - Log the model checkpoint as W&B artifact - - arguments: - path (Path) -- Path of directory containing the checkpoints - opt (namespace) -- Command line arguments for this run - epoch (int) -- Current epoch number - fitness_score (float) -- fitness score for current epoch - best_model (boolean) -- Boolean representing if the current checkpoint is the best yet. - """ - model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', - type='model', - metadata={ - 'original_url': str(path), - 'epochs_trained': epoch + 1, - 'save period': opt.save_period, - 'project': opt.project, - 'total_epochs': opt.epochs, - 'fitness_score': fitness_score}) - model_artifact.add_file(str(path / 'last.pt'), name='last.pt') - wandb.log_artifact(model_artifact, - aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) - LOGGER.info(f"Saving model artifact on epoch {epoch + 1}") - - def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): - """ - Log the dataset as W&B artifact and return the new data file with W&B links - - arguments: - data_file (str) -- the .yaml file with information about the dataset like - path, classes etc. - single_class (boolean) -- train multi-class data as single-class - project (str) -- project name. Used to construct the artifact path - overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new - file with _wandb postfix. Eg -> data_wandb.yaml - - returns: - the new .yaml file with artifact links. it can be used to start training directly from artifacts - """ - upload_dataset = self.wandb_run.config.upload_dataset - log_val_only = isinstance(upload_dataset, str) and upload_dataset == 'val' - self.data_dict = check_dataset(data_file) # parse and check - data = dict(self.data_dict) - nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) - names = {k: v for k, v in enumerate(names)} # to index dictionary - - # log train set - if not log_val_only: - self.train_artifact = self.create_dataset_table(LoadImagesAndLabels(data['train'], rect=True, batch_size=1), - names, - name='train') if data.get('train') else None - if data.get('train'): - data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') - - self.val_artifact = self.create_dataset_table( - LoadImagesAndLabels(data['val'], rect=True, batch_size=1), names, name='val') if data.get('val') else None - if data.get('val'): - data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') - - path = Path(data_file) - # create a _wandb.yaml file with artifacts links if both train and test set are logged - if not log_val_only: - path = (path.stem if overwrite_config else path.stem + '_wandb') + '.yaml' # updated data.yaml path - path = ROOT / 'data' / path - data.pop('download', None) - data.pop('path', None) - with open(path, 'w') as f: - yaml.safe_dump(data, f) - LOGGER.info(f"Created dataset config file {path}") - - if self.job_type == 'Training': # builds correct artifact pipeline graph - if not log_val_only: - self.wandb_run.log_artifact( - self.train_artifact) # calling use_artifact downloads the dataset. NOT NEEDED! - self.wandb_run.use_artifact(self.val_artifact) - self.val_artifact.wait() - self.val_table = self.val_artifact.get('val') - self.map_val_table_path() - else: - self.wandb_run.log_artifact(self.train_artifact) - self.wandb_run.log_artifact(self.val_artifact) - return path - - def map_val_table_path(self): - """ - Map the validation dataset Table like name of file -> it's id in the W&B Table. - Useful for - referencing artifacts for evaluation. - """ - self.val_table_path_map = {} - LOGGER.info("Mapping dataset") - for i, data in enumerate(tqdm(self.val_table.data)): - self.val_table_path_map[data[3]] = data[0] - - def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_id: Dict[int, str], name: str = 'dataset'): - """ - Create and return W&B artifact containing W&B Table of the dataset. - - arguments: - dataset -- instance of LoadImagesAndLabels class used to iterate over the data to build Table - class_to_id -- hash map that maps class ids to labels - name -- name of the artifact - - returns: - dataset artifact to be logged or used - """ - # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging - artifact = wandb.Artifact(name=name, type="dataset") - img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None - img_files = tqdm(dataset.im_files) if not img_files else img_files - for img_file in img_files: - if Path(img_file).is_dir(): - artifact.add_dir(img_file, name='data/images') - labels_path = 'labels'.join(dataset.path.rsplit('images', 1)) - artifact.add_dir(labels_path, name='data/labels') - else: - artifact.add_file(img_file, name='data/images/' + Path(img_file).name) - label_file = Path(img2label_paths([img_file])[0]) - artifact.add_file(str(label_file), name='data/labels/' + - label_file.name) if label_file.exists() else None - table = wandb.Table(columns=["id", "train_image", "Classes", "name"]) - class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) - for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)): - box_data, img_classes = [], {} - for cls, *xywh in labels[:, 1:].tolist(): - cls = int(cls) - box_data.append({ - "position": { - "middle": [xywh[0], xywh[1]], - "width": xywh[2], - "height": xywh[3]}, - "class_id": cls, - "box_caption": "%s" % (class_to_id[cls])}) - img_classes[cls] = class_to_id[cls] - boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space - table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), list(img_classes.values()), - Path(paths).name) - artifact.add(table, name) - return artifact - - def log_training_progress(self, predn, path, names): - """ - Build evaluation Table. Uses reference from validation dataset table. - - arguments: - predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class] - path (str): local path of the current evaluation image - names (dict(int, str)): hash map that maps class ids to labels - """ - class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) - box_data = [] - avg_conf_per_class = [0] * len(self.data_dict['names']) - pred_class_count = {} - for *xyxy, conf, cls in predn.tolist(): - if conf >= 0.25: - cls = int(cls) - box_data.append({ - "position": { - "minX": xyxy[0], - "minY": xyxy[1], - "maxX": xyxy[2], - "maxY": xyxy[3]}, - "class_id": cls, - "box_caption": f"{names[cls]} {conf:.3f}", - "scores": { - "class_score": conf}, - "domain": "pixel"}) - avg_conf_per_class[cls] += conf - - if cls in pred_class_count: - pred_class_count[cls] += 1 - else: - pred_class_count[cls] = 1 - - for pred_class in pred_class_count.keys(): - avg_conf_per_class[pred_class] = avg_conf_per_class[pred_class] / pred_class_count[pred_class] - - boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space - id = self.val_table_path_map[Path(path).name] - self.result_table.add_data(self.current_epoch, id, self.val_table.data[id][1], - wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), - *avg_conf_per_class) - - def val_one_image(self, pred, predn, path, names, im): - """ - Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media panel - - arguments: - pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] - predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class] - path (str): local path of the current evaluation image - """ - if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact - self.log_training_progress(predn, path, names) - - if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0: - if self.current_epoch % self.bbox_interval == 0: - box_data = [{ - "position": { - "minX": xyxy[0], - "minY": xyxy[1], - "maxX": xyxy[2], - "maxY": xyxy[3]}, - "class_id": int(cls), - "box_caption": f"{names[int(cls)]} {conf:.3f}", - "scores": { - "class_score": conf}, - "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] - boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space - self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name)) - - def log(self, log_dict): - """ - save the metrics to the logging dictionary - - arguments: - log_dict (Dict) -- metrics/media to be logged in current step - """ - if self.wandb_run: - for key, value in log_dict.items(): - self.log_dict[key] = value - - def end_epoch(self, best_result=False): - """ - commit the log_dict, model artifacts and Tables to W&B and flush the log_dict. - - arguments: - best_result (boolean): Boolean representing if the result of this evaluation is best or not - """ - if self.wandb_run: - with all_logging_disabled(): - if self.bbox_media_panel_images: - self.log_dict["BoundingBoxDebugger"] = self.bbox_media_panel_images - try: - wandb.log(self.log_dict) - except BaseException as e: - LOGGER.info( - f"An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}" - ) - self.wandb_run.finish() - self.wandb_run = None - - self.log_dict = {} - self.bbox_media_panel_images = [] - if self.result_artifact: - self.result_artifact.add(self.result_table, 'result') - wandb.log_artifact(self.result_artifact, - aliases=[ - 'latest', 'last', 'epoch ' + str(self.current_epoch), - ('best' if best_result else '')]) - - wandb.log({"evaluation": self.result_table}) - columns = ["epoch", "id", "ground truth", "prediction"] - columns.extend(self.data_dict['names']) - self.result_table = wandb.Table(columns) - self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") - - def finish_run(self): - """ - Log metrics if any and finish the current W&B run - """ - if self.wandb_run: - if self.log_dict: - with all_logging_disabled(): - wandb.log(self.log_dict) - wandb.run.finish() - - -@contextmanager -def all_logging_disabled(highest_level=logging.CRITICAL): - """ source - https://gist.github.com/simon-weber/7853144 - A context manager that will prevent any logging messages triggered during the body from being processed. - :param highest_level: the maximum logging level in use. - This would only need to be changed if a custom level greater than CRITICAL is defined. - """ - previous_level = logging.root.manager.disable - logging.disable(highest_level) - try: - yield - finally: - logging.disable(previous_level) diff --git a/src/yolov5_ros/src/yolov5/utils/loss.py b/src/yolov5_ros/src/yolov5/utils/loss.py deleted file mode 100644 index 9b9c3d9..0000000 --- a/src/yolov5_ros/src/yolov5/utils/loss.py +++ /dev/null @@ -1,234 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Loss functions -""" - -import torch -import torch.nn as nn - -from utils.metrics import bbox_iou -from utils.torch_utils import de_parallel - - -def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 - # return positive, negative label smoothing BCE targets - return 1.0 - 0.5 * eps, 0.5 * eps - - -class BCEBlurWithLogitsLoss(nn.Module): - # BCEwithLogitLoss() with reduced missing label effects. - def __init__(self, alpha=0.05): - super().__init__() - self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss() - self.alpha = alpha - - def forward(self, pred, true): - loss = self.loss_fcn(pred, true) - pred = torch.sigmoid(pred) # prob from logits - dx = pred - true # reduce only missing label effects - # dx = (pred - true).abs() # reduce missing label and false label effects - alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4)) - loss *= alpha_factor - return loss.mean() - - -class FocalLoss(nn.Module): - # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) - def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): - super().__init__() - self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() - self.gamma = gamma - self.alpha = alpha - self.reduction = loss_fcn.reduction - self.loss_fcn.reduction = 'none' # required to apply FL to each element - - def forward(self, pred, true): - loss = self.loss_fcn(pred, true) - # p_t = torch.exp(-loss) - # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability - - # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py - pred_prob = torch.sigmoid(pred) # prob from logits - p_t = true * pred_prob + (1 - true) * (1 - pred_prob) - alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) - modulating_factor = (1.0 - p_t) ** self.gamma - loss *= alpha_factor * modulating_factor - - if self.reduction == 'mean': - return loss.mean() - elif self.reduction == 'sum': - return loss.sum() - else: # 'none' - return loss - - -class QFocalLoss(nn.Module): - # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) - def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): - super().__init__() - self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() - self.gamma = gamma - self.alpha = alpha - self.reduction = loss_fcn.reduction - self.loss_fcn.reduction = 'none' # required to apply FL to each element - - def forward(self, pred, true): - loss = self.loss_fcn(pred, true) - - pred_prob = torch.sigmoid(pred) # prob from logits - alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) - modulating_factor = torch.abs(true - pred_prob) ** self.gamma - loss *= alpha_factor * modulating_factor - - if self.reduction == 'mean': - return loss.mean() - elif self.reduction == 'sum': - return loss.sum() - else: # 'none' - return loss - - -class ComputeLoss: - sort_obj_iou = False - - # Compute losses - def __init__(self, model, autobalance=False): - device = next(model.parameters()).device # get model device - h = model.hyp # hyperparameters - - # Define criteria - BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) - BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) - - # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 - self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets - - # Focal loss - g = h['fl_gamma'] # focal loss gamma - if g > 0: - BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) - - m = de_parallel(model).model[-1] # Detect() module - self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 - self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index - self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance - self.na = m.na # number of anchors - self.nc = m.nc # number of classes - self.nl = m.nl # number of layers - self.anchors = m.anchors - self.device = device - - def __call__(self, p, targets): # predictions, targets - lcls = torch.zeros(1, device=self.device) # class loss - lbox = torch.zeros(1, device=self.device) # box loss - lobj = torch.zeros(1, device=self.device) # object loss - tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets - - # Losses - for i, pi in enumerate(p): # layer index, layer predictions - b, a, gj, gi = indices[i] # image, anchor, gridy, gridx - tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj - - n = b.shape[0] # number of targets - if n: - # pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1) # faster, requires torch 1.8.0 - pxy, pwh, _, pcls = pi[b, a, gj, gi].split((2, 2, 1, self.nc), 1) # target-subset of predictions - - # Regression - pxy = pxy.sigmoid() * 2 - 0.5 - pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i] - pbox = torch.cat((pxy, pwh), 1) # predicted box - iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze() # iou(prediction, target) - lbox += (1.0 - iou).mean() # iou loss - - # Objectness - iou = iou.detach().clamp(0).type(tobj.dtype) - if self.sort_obj_iou: - j = iou.argsort() - b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j] - if self.gr < 1: - iou = (1.0 - self.gr) + self.gr * iou - tobj[b, a, gj, gi] = iou # iou ratio - - # Classification - if self.nc > 1: # cls loss (only if multiple classes) - t = torch.full_like(pcls, self.cn, device=self.device) # targets - t[range(n), tcls[i]] = self.cp - lcls += self.BCEcls(pcls, t) # BCE - - # Append targets to text file - # with open('targets.txt', 'a') as file: - # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] - - obji = self.BCEobj(pi[..., 4], tobj) - lobj += obji * self.balance[i] # obj loss - if self.autobalance: - self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() - - if self.autobalance: - self.balance = [x / self.balance[self.ssi] for x in self.balance] - lbox *= self.hyp['box'] - lobj *= self.hyp['obj'] - lcls *= self.hyp['cls'] - bs = tobj.shape[0] # batch size - - return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach() - - def build_targets(self, p, targets): - # Build targets for compute_loss(), input targets(image,class,x,y,w,h) - na, nt = self.na, targets.shape[0] # number of anchors, targets - tcls, tbox, indices, anch = [], [], [], [] - gain = torch.ones(7, device=self.device) # normalized to gridspace gain - ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) - targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None]), 2) # append anchor indices - - g = 0.5 # bias - off = torch.tensor( - [ - [0, 0], - [1, 0], - [0, 1], - [-1, 0], - [0, -1], # j,k,l,m - # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm - ], - device=self.device).float() * g # offsets - - for i in range(self.nl): - anchors, shape = self.anchors[i], p[i].shape - gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]] # xyxy gain - - # Match targets to anchors - t = targets * gain # shape(3,n,7) - if nt: - # Matches - r = t[..., 4:6] / anchors[:, None] # wh ratio - j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare - # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) - t = t[j] # filter - - # Offsets - gxy = t[:, 2:4] # grid xy - gxi = gain[[2, 3]] - gxy # inverse - j, k = ((gxy % 1 < g) & (gxy > 1)).T - l, m = ((gxi % 1 < g) & (gxi > 1)).T - j = torch.stack((torch.ones_like(j), j, k, l, m)) - t = t.repeat((5, 1, 1))[j] - offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] - else: - t = targets[0] - offsets = 0 - - # Define - bc, gxy, gwh, a = t.chunk(4, 1) # (image, class), grid xy, grid wh, anchors - a, (b, c) = a.long().view(-1), bc.long().T # anchors, image, class - gij = (gxy - offsets).long() - gi, gj = gij.T # grid indices - - # Append - indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1))) # image, anchor, grid - tbox.append(torch.cat((gxy - gij, gwh), 1)) # box - anch.append(anchors[a]) # anchors - tcls.append(c) # class - - return tcls, tbox, indices, anch diff --git a/src/yolov5_ros/src/yolov5/utils/metrics.py b/src/yolov5_ros/src/yolov5/utils/metrics.py deleted file mode 100644 index ee7d339..0000000 --- a/src/yolov5_ros/src/yolov5/utils/metrics.py +++ /dev/null @@ -1,367 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Model validation metrics -""" - -import math -import warnings -from pathlib import Path - -import matplotlib.pyplot as plt -import numpy as np -import torch - -from utils import TryExcept, threaded - - -def fitness(x): - # Model fitness as a weighted combination of metrics - w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] - return (x[:, :4] * w).sum(1) - - -def smooth(y, f=0.05): - # Box filter of fraction f - nf = round(len(y) * f * 2) // 2 + 1 # number of filter elements (must be odd) - p = np.ones(nf // 2) # ones padding - yp = np.concatenate((p * y[0], y, p * y[-1]), 0) # y padded - return np.convolve(yp, np.ones(nf) / nf, mode='valid') # y-smoothed - - -def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16): - """ Compute the average precision, given the recall and precision curves. - Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. - # Arguments - tp: True positives (nparray, nx1 or nx10). - conf: Objectness value from 0-1 (nparray). - pred_cls: Predicted object classes (nparray). - target_cls: True object classes (nparray). - plot: Plot precision-recall curve at mAP@0.5 - save_dir: Plot save directory - # Returns - The average precision as computed in py-faster-rcnn. - """ - - # Sort by objectness - i = np.argsort(-conf) - tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] - - # Find unique classes - unique_classes, nt = np.unique(target_cls, return_counts=True) - nc = unique_classes.shape[0] # number of classes, number of detections - - # Create Precision-Recall curve and compute AP for each class - px, py = np.linspace(0, 1, 1000), [] # for plotting - ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) - for ci, c in enumerate(unique_classes): - i = pred_cls == c - n_l = nt[ci] # number of labels - n_p = i.sum() # number of predictions - if n_p == 0 or n_l == 0: - continue - - # Accumulate FPs and TPs - fpc = (1 - tp[i]).cumsum(0) - tpc = tp[i].cumsum(0) - - # Recall - recall = tpc / (n_l + eps) # recall curve - r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases - - # Precision - precision = tpc / (tpc + fpc) # precision curve - p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score - - # AP from recall-precision curve - for j in range(tp.shape[1]): - ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) - if plot and j == 0: - py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 - - # Compute F1 (harmonic mean of precision and recall) - f1 = 2 * p * r / (p + r + eps) - names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data - names = dict(enumerate(names)) # to dict - if plot: - plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names) - plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1') - plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision') - plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall') - - i = smooth(f1.mean(0), 0.1).argmax() # max F1 index - p, r, f1 = p[:, i], r[:, i], f1[:, i] - tp = (r * nt).round() # true positives - fp = (tp / (p + eps) - tp).round() # false positives - return tp, fp, p, r, f1, ap, unique_classes.astype(int) - - -def compute_ap(recall, precision): - """ Compute the average precision, given the recall and precision curves - # Arguments - recall: The recall curve (list) - precision: The precision curve (list) - # Returns - Average precision, precision curve, recall curve - """ - - # Append sentinel values to beginning and end - mrec = np.concatenate(([0.0], recall, [1.0])) - mpre = np.concatenate(([1.0], precision, [0.0])) - - # Compute the precision envelope - mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) - - # Integrate area under curve - method = 'interp' # methods: 'continuous', 'interp' - if method == 'interp': - x = np.linspace(0, 1, 101) # 101-point interp (COCO) - ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate - else: # 'continuous' - i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes - ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve - - return ap, mpre, mrec - - -class ConfusionMatrix: - # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix - def __init__(self, nc, conf=0.25, iou_thres=0.45): - self.matrix = np.zeros((nc + 1, nc + 1)) - self.nc = nc # number of classes - self.conf = conf - self.iou_thres = iou_thres - - def process_batch(self, detections, labels): - """ - Return intersection-over-union (Jaccard index) of boxes. - Both sets of boxes are expected to be in (x1, y1, x2, y2) format. - Arguments: - detections (Array[N, 6]), x1, y1, x2, y2, conf, class - labels (Array[M, 5]), class, x1, y1, x2, y2 - Returns: - None, updates confusion matrix accordingly - """ - if detections is None: - gt_classes = labels.int() - for gc in gt_classes: - self.matrix[self.nc, gc] += 1 # background FN - return - - detections = detections[detections[:, 4] > self.conf] - gt_classes = labels[:, 0].int() - detection_classes = detections[:, 5].int() - iou = box_iou(labels[:, 1:], detections[:, :4]) - - x = torch.where(iou > self.iou_thres) - if x[0].shape[0]: - matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() - if x[0].shape[0] > 1: - matches = matches[matches[:, 2].argsort()[::-1]] - matches = matches[np.unique(matches[:, 1], return_index=True)[1]] - matches = matches[matches[:, 2].argsort()[::-1]] - matches = matches[np.unique(matches[:, 0], return_index=True)[1]] - else: - matches = np.zeros((0, 3)) - - n = matches.shape[0] > 0 - m0, m1, _ = matches.transpose().astype(int) - for i, gc in enumerate(gt_classes): - j = m0 == i - if n and sum(j) == 1: - self.matrix[detection_classes[m1[j]], gc] += 1 # correct - else: - self.matrix[self.nc, gc] += 1 # background FP - - if n: - for i, dc in enumerate(detection_classes): - if not any(m1 == i): - self.matrix[dc, self.nc] += 1 # background FN - - def matrix(self): - return self.matrix - - def tp_fp(self): - tp = self.matrix.diagonal() # true positives - fp = self.matrix.sum(1) - tp # false positives - # fn = self.matrix.sum(0) - tp # false negatives (missed detections) - return tp[:-1], fp[:-1] # remove background class - - @TryExcept('WARNING: ConfusionMatrix plot failure: ') - def plot(self, normalize=True, save_dir='', names=()): - import seaborn as sn - - array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns - array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) - - fig, ax = plt.subplots(1, 1, figsize=(12, 9), tight_layout=True) - nc, nn = self.nc, len(names) # number of classes, names - sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size - labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels - with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered - sn.heatmap(array, - ax=ax, - annot=nc < 30, - annot_kws={ - "size": 8}, - cmap='Blues', - fmt='.2f', - square=True, - vmin=0.0, - xticklabels=names + ['background FP'] if labels else "auto", - yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) - ax.set_ylabel('True') - ax.set_ylabel('Predicted') - ax.set_title('Confusion Matrix') - fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) - plt.close(fig) - - def print(self): - for i in range(self.nc + 1): - print(' '.join(map(str, self.matrix[i]))) - - -def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): - # Returns Intersection over Union (IoU) of box1(1,4) to box2(n,4) - - # Get the coordinates of bounding boxes - if xywh: # transform from xywh to xyxy - (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, 1), box2.chunk(4, 1) - w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2 - b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_ - b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_ - else: # x1, y1, x2, y2 = box1 - b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, 1) - b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, 1) - w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 - w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 - - # Intersection area - inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ - (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) - - # Union Area - union = w1 * h1 + w2 * h2 - inter + eps - - # IoU - iou = inter / union - if CIoU or DIoU or GIoU: - cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width - ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height - if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 - c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared - rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2 - if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 - v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / (h2 + eps)) - torch.atan(w1 / (h1 + eps)), 2) - with torch.no_grad(): - alpha = v / (v - iou + (1 + eps)) - return iou - (rho2 / c2 + v * alpha) # CIoU - return iou - rho2 / c2 # DIoU - c_area = cw * ch + eps # convex area - return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf - return iou # IoU - - -def box_area(box): - # box = xyxy(4,n) - return (box[2] - box[0]) * (box[3] - box[1]) - - -def box_iou(box1, box2, eps=1e-7): - # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py - """ - Return intersection-over-union (Jaccard index) of boxes. - Both sets of boxes are expected to be in (x1, y1, x2, y2) format. - Arguments: - box1 (Tensor[N, 4]) - box2 (Tensor[M, 4]) - Returns: - iou (Tensor[N, M]): the NxM matrix containing the pairwise - IoU values for every element in boxes1 and boxes2 - """ - - # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) - (a1, a2), (b1, b2) = box1[:, None].chunk(2, 2), box2.chunk(2, 1) - inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2) - - # IoU = inter / (area1 + area2 - inter) - return inter / (box_area(box1.T)[:, None] + box_area(box2.T) - inter + eps) - - -def bbox_ioa(box1, box2, eps=1e-7): - """ Returns the intersection over box2 area given box1, box2. Boxes are x1y1x2y2 - box1: np.array of shape(4) - box2: np.array of shape(nx4) - returns: np.array of shape(n) - """ - - # Get the coordinates of bounding boxes - b1_x1, b1_y1, b1_x2, b1_y2 = box1 - b2_x1, b2_y1, b2_x2, b2_y2 = box2.T - - # Intersection area - inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ - (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) - - # box2 area - box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps - - # Intersection over box2 area - return inter_area / box2_area - - -def wh_iou(wh1, wh2, eps=1e-7): - # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 - wh1 = wh1[:, None] # [N,1,2] - wh2 = wh2[None] # [1,M,2] - inter = torch.min(wh1, wh2).prod(2) # [N,M] - return inter / (wh1.prod(2) + wh2.prod(2) - inter + eps) # iou = inter / (area1 + area2 - inter) - - -# Plots ---------------------------------------------------------------------------------------------------------------- - - -@threaded -def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=()): - # Precision-recall curve - fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) - py = np.stack(py, axis=1) - - if 0 < len(names) < 21: # display per-class legend if < 21 classes - for i, y in enumerate(py.T): - ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) - else: - ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) - - ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) - ax.set_xlabel('Recall') - ax.set_ylabel('Precision') - ax.set_xlim(0, 1) - ax.set_ylim(0, 1) - ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") - ax.set_title('Precision-Recall Curve') - fig.savefig(save_dir, dpi=250) - plt.close(fig) - - -@threaded -def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confidence', ylabel='Metric'): - # Metric-confidence curve - fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) - - if 0 < len(names) < 21: # display per-class legend if < 21 classes - for i, y in enumerate(py): - ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) - else: - ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) - - y = smooth(py.mean(0), 0.05) - ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') - ax.set_xlabel(xlabel) - ax.set_ylabel(ylabel) - ax.set_xlim(0, 1) - ax.set_ylim(0, 1) - ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") - ax.set_title(f'{ylabel}-Confidence Curve') - fig.savefig(save_dir, dpi=250) - plt.close(fig) diff --git a/src/yolov5_ros/src/yolov5/utils/plots.py b/src/yolov5_ros/src/yolov5/utils/plots.py deleted file mode 100644 index 0530d0a..0000000 --- a/src/yolov5_ros/src/yolov5/utils/plots.py +++ /dev/null @@ -1,519 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Plotting utils -""" - -import contextlib -import math -import os -from copy import copy -from pathlib import Path -from urllib.error import URLError - -import cv2 -import matplotlib -import matplotlib.pyplot as plt -import numpy as np -import pandas as pd -import seaborn as sn -import torch -from PIL import Image, ImageDraw, ImageFont - -from utils import TryExcept, threaded -from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_coords, increment_path, - is_ascii, xywh2xyxy, xyxy2xywh) -from utils.metrics import fitness - -# Settings -RANK = int(os.getenv('RANK', -1)) -matplotlib.rc('font', **{'size': 11}) -matplotlib.use('Agg') # for writing to files only - - -class Colors: - # Ultralytics color palette https://ultralytics.com/ - def __init__(self): - # hex = matplotlib.colors.TABLEAU_COLORS.values() - hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB', - '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7') - self.palette = [self.hex2rgb(f'#{c}') for c in hexs] - self.n = len(self.palette) - - def __call__(self, i, bgr=False): - c = self.palette[int(i) % self.n] - return (c[2], c[1], c[0]) if bgr else c - - @staticmethod - def hex2rgb(h): # rgb order (PIL) - return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) - - -colors = Colors() # create instance for 'from utils.plots import colors' - - -def check_pil_font(font=FONT, size=10): - # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary - font = Path(font) - font = font if font.exists() else (CONFIG_DIR / font.name) - try: - return ImageFont.truetype(str(font) if font.exists() else font.name, size) - except Exception: # download if missing - try: - check_font(font) - return ImageFont.truetype(str(font), size) - except TypeError: - check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374 - except URLError: # not online - return ImageFont.load_default() - - -class Annotator: - # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations - def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): - assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' - non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic - self.pil = pil or non_ascii - if self.pil: # use PIL - self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) - self.draw = ImageDraw.Draw(self.im) - self.font = check_pil_font(font='Arial.Unicode.ttf' if non_ascii else font, - size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) - else: # use cv2 - self.im = im - self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width - - def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): - # Add one xyxy box to image with label - if self.pil or not is_ascii(label): - self.draw.rectangle(box, width=self.lw, outline=color) # box - if label: - w, h = self.font.getsize(label) # text width, height - outside = box[1] - h >= 0 # label fits outside box - self.draw.rectangle( - (box[0], box[1] - h if outside else box[1], box[0] + w + 1, - box[1] + 1 if outside else box[1] + h + 1), - fill=color, - ) - # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 - self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) - else: # cv2 - p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) - cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA) - if label: - tf = max(self.lw - 1, 1) # font thickness - w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height - outside = p1[1] - h >= 3 - p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 - cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled - cv2.putText(self.im, - label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), - 0, - self.lw / 3, - txt_color, - thickness=tf, - lineType=cv2.LINE_AA) - - def rectangle(self, xy, fill=None, outline=None, width=1): - # Add rectangle to image (PIL-only) - self.draw.rectangle(xy, fill, outline, width) - - def text(self, xy, text, txt_color=(255, 255, 255), anchor='top'): - # Add text to image (PIL-only) - if anchor == 'bottom': # start y from font bottom - w, h = self.font.getsize(text) # text width, height - xy[1] += 1 - h - self.draw.text(xy, text, fill=txt_color, font=self.font) - - def result(self): - # Return annotated image as array - return np.asarray(self.im) - - -def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): - """ - x: Features to be visualized - module_type: Module type - stage: Module stage within model - n: Maximum number of feature maps to plot - save_dir: Directory to save results - """ - if 'Detect' not in module_type: - batch, channels, height, width = x.shape # batch, channels, height, width - if height > 1 and width > 1: - f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename - - blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels - n = min(n, channels) # number of plots - fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols - ax = ax.ravel() - plt.subplots_adjust(wspace=0.05, hspace=0.05) - for i in range(n): - ax[i].imshow(blocks[i].squeeze()) # cmap='gray' - ax[i].axis('off') - - LOGGER.info(f'Saving {f}... ({n}/{channels})') - plt.title('Features') - plt.savefig(f, dpi=300, bbox_inches='tight') - plt.close() - np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save - - -def hist2d(x, y, n=100): - # 2d histogram used in labels.png and evolve.png - xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) - hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) - xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) - yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) - return np.log(hist[xidx, yidx]) - - -def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): - from scipy.signal import butter, filtfilt - - # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy - def butter_lowpass(cutoff, fs, order): - nyq = 0.5 * fs - normal_cutoff = cutoff / nyq - return butter(order, normal_cutoff, btype='low', analog=False) - - b, a = butter_lowpass(cutoff, fs, order=order) - return filtfilt(b, a, data) # forward-backward filter - - -def output_to_target(output): - # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] - targets = [] - for i, o in enumerate(output): - targets.extend([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf] for *box, conf, cls in o.cpu().numpy()) - return np.array(targets) - - -@threaded -def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=1920, max_subplots=16): - # Plot image grid with labels - if isinstance(images, torch.Tensor): - images = images.cpu().float().numpy() - if isinstance(targets, torch.Tensor): - targets = targets.cpu().numpy() - if np.max(images[0]) <= 1: - images *= 255 # de-normalise (optional) - bs, _, h, w = images.shape # batch size, _, height, width - bs = min(bs, max_subplots) # limit plot images - ns = np.ceil(bs ** 0.5) # number of subplots (square) - - # Build Image - mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init - for i, im in enumerate(images): - if i == max_subplots: # if last batch has fewer images than we expect - break - x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin - im = im.transpose(1, 2, 0) - mosaic[y:y + h, x:x + w, :] = im - - # Resize (optional) - scale = max_size / ns / max(h, w) - if scale < 1: - h = math.ceil(scale * h) - w = math.ceil(scale * w) - mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) - - # Annotate - fs = int((h + w) * ns * 0.01) # font size - annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) - for i in range(i + 1): - x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin - annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders - if paths: - annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames - if len(targets) > 0: - ti = targets[targets[:, 0] == i] # image targets - boxes = xywh2xyxy(ti[:, 2:6]).T - classes = ti[:, 1].astype('int') - labels = ti.shape[1] == 6 # labels if no conf column - conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) - - if boxes.shape[1]: - if boxes.max() <= 1.01: # if normalized with tolerance 0.01 - boxes[[0, 2]] *= w # scale to pixels - boxes[[1, 3]] *= h - elif scale < 1: # absolute coords need scale if image scales - boxes *= scale - boxes[[0, 2]] += x - boxes[[1, 3]] += y - for j, box in enumerate(boxes.T.tolist()): - cls = classes[j] - color = colors(cls) - cls = names[cls] if names else cls - if labels or conf[j] > 0.25: # 0.25 conf thresh - label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' - annotator.box_label(box, label, color=color) - annotator.im.save(fname) # save - - -def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): - # Plot LR simulating training for full epochs - optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals - y = [] - for _ in range(epochs): - scheduler.step() - y.append(optimizer.param_groups[0]['lr']) - plt.plot(y, '.-', label='LR') - plt.xlabel('epoch') - plt.ylabel('LR') - plt.grid() - plt.xlim(0, epochs) - plt.ylim(0) - plt.savefig(Path(save_dir) / 'LR.png', dpi=200) - plt.close() - - -def plot_val_txt(): # from utils.plots import *; plot_val() - # Plot val.txt histograms - x = np.loadtxt('val.txt', dtype=np.float32) - box = xyxy2xywh(x[:, :4]) - cx, cy = box[:, 0], box[:, 1] - - fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True) - ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0) - ax.set_aspect('equal') - plt.savefig('hist2d.png', dpi=300) - - fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True) - ax[0].hist(cx, bins=600) - ax[1].hist(cy, bins=600) - plt.savefig('hist1d.png', dpi=200) - - -def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() - # Plot targets.txt histograms - x = np.loadtxt('targets.txt', dtype=np.float32).T - s = ['x targets', 'y targets', 'width targets', 'height targets'] - fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) - ax = ax.ravel() - for i in range(4): - ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}') - ax[i].legend() - ax[i].set_title(s[i]) - plt.savefig('targets.jpg', dpi=200) - - -def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study() - # Plot file=study.txt generated by val.py (or plot all study*.txt in dir) - save_dir = Path(file).parent if file else Path(dir) - plot2 = False # plot additional results - if plot2: - ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel() - - fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) - # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]: - for f in sorted(save_dir.glob('study*.txt')): - y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T - x = np.arange(y.shape[1]) if x is None else np.array(x) - if plot2: - s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)'] - for i in range(7): - ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) - ax[i].set_title(s[i]) - - j = y[3].argmax() + 1 - ax2.plot(y[5, 1:j], - y[3, 1:j] * 1E2, - '.-', - linewidth=2, - markersize=8, - label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) - - ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], - 'k.-', - linewidth=2, - markersize=8, - alpha=.25, - label='EfficientDet') - - ax2.grid(alpha=0.2) - ax2.set_yticks(np.arange(20, 60, 5)) - ax2.set_xlim(0, 57) - ax2.set_ylim(25, 55) - ax2.set_xlabel('GPU Speed (ms/img)') - ax2.set_ylabel('COCO AP val') - ax2.legend(loc='lower right') - f = save_dir / 'study.png' - print(f'Saving {f}...') - plt.savefig(f, dpi=300) - - -@TryExcept() # known issue https://github.com/ultralytics/yolov5/issues/5395 -def plot_labels(labels, names=(), save_dir=Path('')): - # plot dataset labels - LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ") - c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes - nc = int(c.max() + 1) # number of classes - x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) - - # seaborn correlogram - sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) - plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) - plt.close() - - # matplotlib labels - matplotlib.use('svg') # faster - ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() - y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) - with contextlib.suppress(Exception): # color histogram bars by class - [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # known issue #3195 - ax[0].set_ylabel('instances') - if 0 < len(names) < 30: - ax[0].set_xticks(range(len(names))) - ax[0].set_xticklabels(list(names.values()), rotation=90, fontsize=10) - else: - ax[0].set_xlabel('classes') - sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) - sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) - - # rectangles - labels[:, 1:3] = 0.5 # center - labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000 - img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255) - for cls, *box in labels[:1000]: - ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot - ax[1].imshow(img) - ax[1].axis('off') - - for a in [0, 1, 2, 3]: - for s in ['top', 'right', 'left', 'bottom']: - ax[a].spines[s].set_visible(False) - - plt.savefig(save_dir / 'labels.jpg', dpi=200) - matplotlib.use('Agg') - plt.close() - - -def imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f=Path('images.jpg')): - # Show classification image grid with labels (optional) and predictions (optional) - from utils.augmentations import denormalize - - names = names or [f'class{i}' for i in range(1000)] - blocks = torch.chunk(denormalize(im.clone()).cpu().float(), len(im), - dim=0) # select batch index 0, block by channels - n = min(len(blocks), nmax) # number of plots - m = min(8, round(n ** 0.5)) # 8 x 8 default - fig, ax = plt.subplots(math.ceil(n / m), m) # 8 rows x n/8 cols - ax = ax.ravel() if m > 1 else [ax] - # plt.subplots_adjust(wspace=0.05, hspace=0.05) - for i in range(n): - ax[i].imshow(blocks[i].squeeze().permute((1, 2, 0)).numpy().clip(0.0, 1.0)) - ax[i].axis('off') - if labels is not None: - s = names[labels[i]] + (f'—{names[pred[i]]}' if pred is not None else '') - ax[i].set_title(s, fontsize=8, verticalalignment='top') - plt.savefig(f, dpi=300, bbox_inches='tight') - plt.close() - if verbose: - LOGGER.info(f"Saving {f}") - if labels is not None: - LOGGER.info('True: ' + ' '.join(f'{names[i]:3s}' for i in labels[:nmax])) - if pred is not None: - LOGGER.info('Predicted:' + ' '.join(f'{names[i]:3s}' for i in pred[:nmax])) - return f - - -def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve() - # Plot evolve.csv hyp evolution results - evolve_csv = Path(evolve_csv) - data = pd.read_csv(evolve_csv) - keys = [x.strip() for x in data.columns] - x = data.values - f = fitness(x) - j = np.argmax(f) # max fitness index - plt.figure(figsize=(10, 12), tight_layout=True) - matplotlib.rc('font', **{'size': 8}) - print(f'Best results from row {j} of {evolve_csv}:') - for i, k in enumerate(keys[7:]): - v = x[:, 7 + i] - mu = v[j] # best single result - plt.subplot(6, 5, i + 1) - plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none') - plt.plot(mu, f.max(), 'k+', markersize=15) - plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters - if i % 5 != 0: - plt.yticks([]) - print(f'{k:>15}: {mu:.3g}') - f = evolve_csv.with_suffix('.png') # filename - plt.savefig(f, dpi=200) - plt.close() - print(f'Saved {f}') - - -def plot_results(file='path/to/results.csv', dir=''): - # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') - save_dir = Path(file).parent if file else Path(dir) - fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) - ax = ax.ravel() - files = list(save_dir.glob('results*.csv')) - assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' - for f in files: - try: - data = pd.read_csv(f) - s = [x.strip() for x in data.columns] - x = data.values[:, 0] - for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]): - y = data.values[:, j].astype('float') - # y[y == 0] = np.nan # don't show zero values - ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8) - ax[i].set_title(s[j], fontsize=12) - # if j in [8, 9, 10]: # share train and val loss y axes - # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) - except Exception as e: - LOGGER.info(f'Warning: Plotting error for {f}: {e}') - ax[1].legend() - fig.savefig(save_dir / 'results.png', dpi=200) - plt.close() - - -def profile_idetection(start=0, stop=0, labels=(), save_dir=''): - # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() - ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() - s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] - files = list(Path(save_dir).glob('frames*.txt')) - for fi, f in enumerate(files): - try: - results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows - n = results.shape[1] # number of rows - x = np.arange(start, min(stop, n) if stop else n) - results = results[:, x] - t = (results[0] - results[0].min()) # set t0=0s - results[0] = x - for i, a in enumerate(ax): - if i < len(results): - label = labels[fi] if len(labels) else f.stem.replace('frames_', '') - a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) - a.set_title(s[i]) - a.set_xlabel('time (s)') - # if fi == len(files) - 1: - # a.set_ylim(bottom=0) - for side in ['top', 'right']: - a.spines[side].set_visible(False) - else: - a.remove() - except Exception as e: - print(f'Warning: Plotting error for {f}; {e}') - ax[1].legend() - plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) - - -def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True): - # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop - xyxy = torch.tensor(xyxy).view(-1, 4) - b = xyxy2xywh(xyxy) # boxes - if square: - b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square - b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad - xyxy = xywh2xyxy(b).long() - clip_coords(xyxy, im.shape) - crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] - if save: - file.parent.mkdir(parents=True, exist_ok=True) # make directory - f = str(increment_path(file).with_suffix('.jpg')) - # cv2.imwrite(f, crop) # save BGR, https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue - Image.fromarray(crop[..., ::-1]).save(f, quality=95, subsampling=0) # save RGB - return crop diff --git a/src/yolov5_ros/src/yolov5/utils/torch_utils.py b/src/yolov5_ros/src/yolov5/utils/torch_utils.py deleted file mode 100644 index 8a3366c..0000000 --- a/src/yolov5_ros/src/yolov5/utils/torch_utils.py +++ /dev/null @@ -1,430 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -PyTorch utils -""" - -import math -import os -import platform -import subprocess -import time -import warnings -from contextlib import contextmanager -from copy import deepcopy -from pathlib import Path - -import torch -import torch.distributed as dist -import torch.nn as nn -import torch.nn.functional as F -from torch.nn.parallel import DistributedDataParallel as DDP - -from utils.general import LOGGER, check_version, colorstr, file_date, git_describe - -LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html -RANK = int(os.getenv('RANK', -1)) -WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) - -try: - import thop # for FLOPs computation -except ImportError: - thop = None - -# Suppress PyTorch warnings -warnings.filterwarnings('ignore', message='User provided device_type of \'cuda\', but CUDA is not available. Disabling') - - -def smart_inference_mode(torch_1_9=check_version(torch.__version__, '1.9.0')): - # Applies torch.inference_mode() decorator if torch>=1.9.0 else torch.no_grad() decorator - def decorate(fn): - return (torch.inference_mode if torch_1_9 else torch.no_grad)()(fn) - - return decorate - - -def smartCrossEntropyLoss(label_smoothing=0.0): - # Returns nn.CrossEntropyLoss with label smoothing enabled for torch>=1.10.0 - if check_version(torch.__version__, '1.10.0'): - return nn.CrossEntropyLoss(label_smoothing=label_smoothing) - if label_smoothing > 0: - LOGGER.warning(f'WARNING: label smoothing {label_smoothing} requires torch>=1.10.0') - return nn.CrossEntropyLoss() - - -def smart_DDP(model): - # Model DDP creation with checks - assert not check_version(torch.__version__, '1.12.0', pinned=True), \ - 'torch==1.12.0 torchvision==0.13.0 DDP training is not supported due to a known issue. ' \ - 'Please upgrade or downgrade torch to use DDP. See https://github.com/ultralytics/yolov5/issues/8395' - if check_version(torch.__version__, '1.11.0'): - return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK, static_graph=True) - else: - return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK) - - -def reshape_classifier_output(model, n=1000): - # Update a TorchVision classification model to class count 'n' if required - from models.common import Classify - name, m = list((model.model if hasattr(model, 'model') else model).named_children())[-1] # last module - if isinstance(m, Classify): # YOLOv5 Classify() head - if m.linear.out_features != n: - m.linear = nn.Linear(m.linear.in_features, n) - elif isinstance(m, nn.Linear): # ResNet, EfficientNet - if m.out_features != n: - setattr(model, name, nn.Linear(m.in_features, n)) - elif isinstance(m, nn.Sequential): - types = [type(x) for x in m] - if nn.Linear in types: - i = types.index(nn.Linear) # nn.Linear index - if m[i].out_features != n: - m[i] = nn.Linear(m[i].in_features, n) - elif nn.Conv2d in types: - i = types.index(nn.Conv2d) # nn.Conv2d index - if m[i].out_channels != n: - m[i] = nn.Conv2d(m[i].in_channels, n, m[i].kernel_size, m[i].stride, bias=m[i].bias) - - -@contextmanager -def torch_distributed_zero_first(local_rank: int): - # Decorator to make all processes in distributed training wait for each local_master to do something - if local_rank not in [-1, 0]: - dist.barrier(device_ids=[local_rank]) - yield - if local_rank == 0: - dist.barrier(device_ids=[0]) - - -def device_count(): - # Returns number of CUDA devices available. Safe version of torch.cuda.device_count(). Supports Linux and Windows - assert platform.system() in ('Linux', 'Windows'), 'device_count() only supported on Linux or Windows' - try: - cmd = 'nvidia-smi -L | wc -l' if platform.system() == 'Linux' else 'nvidia-smi -L | find /c /v ""' # Windows - return int(subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]) - except Exception: - return 0 - - -def select_device(device='', batch_size=0, newline=True): - # device = None or 'cpu' or 0 or '0' or '0,1,2,3' - s = f'YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} ' - device = str(device).strip().lower().replace('cuda:', '').replace('none', '') # to string, 'cuda:0' to '0' - cpu = device == 'cpu' - mps = device == 'mps' # Apple Metal Performance Shaders (MPS) - if cpu or mps: - os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False - elif device: # non-cpu device requested - os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available() - assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \ - f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)" - - if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available - devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7 - n = len(devices) # device count - if n > 1 and batch_size > 0: # check batch_size is divisible by device_count - assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' - space = ' ' * (len(s) + 1) - for i, d in enumerate(devices): - p = torch.cuda.get_device_properties(i) - s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\n" # bytes to MB - arg = 'cuda:0' - elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available(): # prefer MPS if available - s += 'MPS\n' - arg = 'mps' - else: # revert to CPU - s += 'CPU\n' - arg = 'cpu' - - if not newline: - s = s.rstrip() - LOGGER.info(s) - return torch.device(arg) - - -def time_sync(): - # PyTorch-accurate time - if torch.cuda.is_available(): - torch.cuda.synchronize() - return time.time() - - -def profile(input, ops, n=10, device=None): - """ YOLOv5 speed/memory/FLOPs profiler - Usage: - input = torch.randn(16, 3, 640, 640) - m1 = lambda x: x * torch.sigmoid(x) - m2 = nn.SiLU() - profile(input, [m1, m2], n=100) # profile over 100 iterations - """ - results = [] - if not isinstance(device, torch.device): - device = select_device(device) - print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" - f"{'input':>24s}{'output':>24s}") - - for x in input if isinstance(input, list) else [input]: - x = x.to(device) - x.requires_grad = True - for m in ops if isinstance(ops, list) else [ops]: - m = m.to(device) if hasattr(m, 'to') else m # device - m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m - tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward - try: - flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs - except Exception: - flops = 0 - - try: - for _ in range(n): - t[0] = time_sync() - y = m(x) - t[1] = time_sync() - try: - _ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward() - t[2] = time_sync() - except Exception: # no backward method - # print(e) # for debug - t[2] = float('nan') - tf += (t[1] - t[0]) * 1000 / n # ms per op forward - tb += (t[2] - t[1]) * 1000 / n # ms per op backward - mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB) - s_in, s_out = (tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' for x in (x, y)) # shapes - p = sum(x.numel() for x in m.parameters()) if isinstance(m, nn.Module) else 0 # parameters - print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}') - results.append([p, flops, mem, tf, tb, s_in, s_out]) - except Exception as e: - print(e) - results.append(None) - torch.cuda.empty_cache() - return results - - -def is_parallel(model): - # Returns True if model is of type DP or DDP - return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) - - -def de_parallel(model): - # De-parallelize a model: returns single-GPU model if model is of type DP or DDP - return model.module if is_parallel(model) else model - - -def initialize_weights(model): - for m in model.modules(): - t = type(m) - if t is nn.Conv2d: - pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif t is nn.BatchNorm2d: - m.eps = 1e-3 - m.momentum = 0.03 - elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: - m.inplace = True - - -def find_modules(model, mclass=nn.Conv2d): - # Finds layer indices matching module class 'mclass' - return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)] - - -def sparsity(model): - # Return global model sparsity - a, b = 0, 0 - for p in model.parameters(): - a += p.numel() - b += (p == 0).sum() - return b / a - - -def prune(model, amount=0.3): - # Prune model to requested global sparsity - import torch.nn.utils.prune as prune - for name, m in model.named_modules(): - if isinstance(m, nn.Conv2d): - prune.l1_unstructured(m, name='weight', amount=amount) # prune - prune.remove(m, 'weight') # make permanent - LOGGER.info(f'Model pruned to {sparsity(model):.3g} global sparsity') - - -def fuse_conv_and_bn(conv, bn): - # Fuse Conv2d() and BatchNorm2d() layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ - fusedconv = nn.Conv2d(conv.in_channels, - conv.out_channels, - kernel_size=conv.kernel_size, - stride=conv.stride, - padding=conv.padding, - dilation=conv.dilation, - groups=conv.groups, - bias=True).requires_grad_(False).to(conv.weight.device) - - # Prepare filters - w_conv = conv.weight.clone().view(conv.out_channels, -1) - w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) - fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) - - # Prepare spatial bias - b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias - b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) - fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) - - return fusedconv - - -def model_info(model, verbose=False, imgsz=640): - # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320] - n_p = sum(x.numel() for x in model.parameters()) # number parameters - n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients - if verbose: - print(f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}") - for i, (name, p) in enumerate(model.named_parameters()): - name = name.replace('module_list.', '') - print('%5g %40s %9s %12g %20s %10.3g %10.3g' % - (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) - - try: # FLOPs - p = next(model.parameters()) - stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 # max stride - im = torch.empty((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format - flops = thop.profile(deepcopy(model), inputs=(im,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs - imgsz = imgsz if isinstance(imgsz, list) else [imgsz, imgsz] # expand if int/float - fs = f', {flops * imgsz[0] / stride * imgsz[1] / stride:.1f} GFLOPs' # 640x640 GFLOPs - except Exception: - fs = '' - - name = Path(model.yaml_file).stem.replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model' - LOGGER.info(f"{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") - - -def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) - # Scales img(bs,3,y,x) by ratio constrained to gs-multiple - if ratio == 1.0: - return img - h, w = img.shape[2:] - s = (int(h * ratio), int(w * ratio)) # new size - img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize - if not same_shape: # pad/crop img - h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w)) - return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean - - -def copy_attr(a, b, include=(), exclude=()): - # Copy attributes from b to a, options to only include [...] and to exclude [...] - for k, v in b.__dict__.items(): - if (len(include) and k not in include) or k.startswith('_') or k in exclude: - continue - else: - setattr(a, k, v) - - -def smart_optimizer(model, name='Adam', lr=0.001, momentum=0.9, decay=1e-5): - # YOLOv5 3-param group optimizer: 0) weights with decay, 1) weights no decay, 2) biases no decay - g = [], [], [] # optimizer parameter groups - bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k) # normalization layers, i.e. BatchNorm2d() - for v in model.modules(): - if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias (no decay) - g[2].append(v.bias) - if isinstance(v, bn): # weight (no decay) - g[1].append(v.weight) - elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay) - g[0].append(v.weight) - - if name == 'Adam': - optimizer = torch.optim.Adam(g[2], lr=lr, betas=(momentum, 0.999)) # adjust beta1 to momentum - elif name == 'AdamW': - optimizer = torch.optim.AdamW(g[2], lr=lr, betas=(momentum, 0.999), weight_decay=0.0) - elif name == 'RMSProp': - optimizer = torch.optim.RMSprop(g[2], lr=lr, momentum=momentum) - elif name == 'SGD': - optimizer = torch.optim.SGD(g[2], lr=lr, momentum=momentum, nesterov=True) - else: - raise NotImplementedError(f'Optimizer {name} not implemented.') - - optimizer.add_param_group({'params': g[0], 'weight_decay': decay}) # add g0 with weight_decay - optimizer.add_param_group({'params': g[1], 'weight_decay': 0.0}) # add g1 (BatchNorm2d weights) - LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}) with parameter groups " - f"{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias") - return optimizer - - -def smart_hub_load(repo='ultralytics/yolov5', model='yolov5s', **kwargs): - # YOLOv5 torch.hub.load() wrapper with smart error/issue handling - if check_version(torch.__version__, '1.9.1'): - kwargs['skip_validation'] = True # validation causes GitHub API rate limit errors - if check_version(torch.__version__, '1.12.0'): - kwargs['trust_repo'] = True # argument required starting in torch 0.12 - try: - return torch.hub.load(repo, model, **kwargs) - except Exception: - return torch.hub.load(repo, model, force_reload=True, **kwargs) - - -def smart_resume(ckpt, optimizer, ema=None, weights='yolov5s.pt', epochs=300, resume=True): - # Resume training from a partially trained checkpoint - best_fitness = 0.0 - start_epoch = ckpt['epoch'] + 1 - if ckpt['optimizer'] is not None: - optimizer.load_state_dict(ckpt['optimizer']) # optimizer - best_fitness = ckpt['best_fitness'] - if ema and ckpt.get('ema'): - ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) # EMA - ema.updates = ckpt['updates'] - if resume: - assert start_epoch > 0, f'{weights} training to {epochs} epochs is finished, nothing to resume.\n' \ - f"Start a new training without --resume, i.e. 'python train.py --weights {weights}'" - LOGGER.info(f'Resuming training from {weights} from epoch {start_epoch} to {epochs} total epochs') - if epochs < start_epoch: - LOGGER.info(f"{weights} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {epochs} more epochs.") - epochs += ckpt['epoch'] # finetune additional epochs - return best_fitness, start_epoch, epochs - - -class EarlyStopping: - # YOLOv5 simple early stopper - def __init__(self, patience=30): - self.best_fitness = 0.0 # i.e. mAP - self.best_epoch = 0 - self.patience = patience or float('inf') # epochs to wait after fitness stops improving to stop - self.possible_stop = False # possible stop may occur next epoch - - def __call__(self, epoch, fitness): - if fitness >= self.best_fitness: # >= 0 to allow for early zero-fitness stage of training - self.best_epoch = epoch - self.best_fitness = fitness - delta = epoch - self.best_epoch # epochs without improvement - self.possible_stop = delta >= (self.patience - 1) # possible stop may occur next epoch - stop = delta >= self.patience # stop training if patience exceeded - if stop: - LOGGER.info(f'Stopping training early as no improvement observed in last {self.patience} epochs. ' - f'Best results observed at epoch {self.best_epoch}, best model saved as best.pt.\n' - f'To update EarlyStopping(patience={self.patience}) pass a new patience value, ' - f'i.e. `python train.py --patience 300` or use `--patience 0` to disable EarlyStopping.') - return stop - - -class ModelEMA: - """ Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models - Keeps a moving average of everything in the model state_dict (parameters and buffers) - For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage - """ - - def __init__(self, model, decay=0.9999, tau=2000, updates=0): - # Create EMA - self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA - self.updates = updates # number of EMA updates - self.decay = lambda x: decay * (1 - math.exp(-x / tau)) # decay exponential ramp (to help early epochs) - for p in self.ema.parameters(): - p.requires_grad_(False) - - def update(self, model): - # Update EMA parameters - self.updates += 1 - d = self.decay(self.updates) - - msd = de_parallel(model).state_dict() # model state_dict - for k, v in self.ema.state_dict().items(): - if v.dtype.is_floating_point: # true for FP16 and FP32 - v *= d - v += (1 - d) * msd[k].detach() - # assert v.dtype == msd[k].dtype == torch.float32, f'{k}: EMA {v.dtype} and model {msd[k].dtype} must be FP32' - - def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): - # Update EMA attributes - copy_attr(self.ema, model, include, exclude) diff --git a/src/yolov5_ros/src/yolov5/val.py b/src/yolov5_ros/src/yolov5/val.py deleted file mode 100644 index 4b0bddd..0000000 --- a/src/yolov5_ros/src/yolov5/val.py +++ /dev/null @@ -1,398 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Validate a trained YOLOv5 detection model on a detection dataset - -Usage: - $ python val.py --weights yolov5s.pt --data coco128.yaml --img 640 - -Usage - formats: - $ python val.py --weights yolov5s.pt # PyTorch - yolov5s.torchscript # TorchScript - yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s.xml # OpenVINO - yolov5s.engine # TensorRT - yolov5s.mlmodel # CoreML (macOS-only) - yolov5s_saved_model # TensorFlow SavedModel - yolov5s.pb # TensorFlow GraphDef - yolov5s.tflite # TensorFlow Lite - yolov5s_edgetpu.tflite # TensorFlow Edge TPU - yolov5s_paddle_model # PaddlePaddle -""" - -import argparse -import json -import os -import sys -from pathlib import Path - -import numpy as np -import torch -from tqdm import tqdm - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[0] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -from models.common import DetectMultiBackend -from utils.callbacks import Callbacks -from utils.dataloaders import create_dataloader -from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_yaml, - coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, - scale_coords, xywh2xyxy, xyxy2xywh) -from utils.metrics import ConfusionMatrix, ap_per_class, box_iou -from utils.plots import output_to_target, plot_images, plot_val_study -from utils.torch_utils import select_device, smart_inference_mode - - -def save_one_txt(predn, save_conf, shape, file): - # Save one txt result - gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh - for *xyxy, conf, cls in predn.tolist(): - xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh - line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format - with open(file, 'a') as f: - f.write(('%g ' * len(line)).rstrip() % line + '\n') - - -def save_one_json(predn, jdict, path, class_map): - # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} - image_id = int(path.stem) if path.stem.isnumeric() else path.stem - box = xyxy2xywh(predn[:, :4]) # xywh - box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner - for p, b in zip(predn.tolist(), box.tolist()): - jdict.append({ - 'image_id': image_id, - 'category_id': class_map[int(p[5])], - 'bbox': [round(x, 3) for x in b], - 'score': round(p[4], 5)}) - - -def process_batch(detections, labels, iouv): - """ - Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format. - Arguments: - detections (Array[N, 6]), x1, y1, x2, y2, conf, class - labels (Array[M, 5]), class, x1, y1, x2, y2 - Returns: - correct (Array[N, 10]), for 10 IoU levels - """ - correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) - iou = box_iou(labels[:, 1:], detections[:, :4]) - correct_class = labels[:, 0:1] == detections[:, 5] - for i in range(len(iouv)): - x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match - if x[0].shape[0]: - matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] - if x[0].shape[0] > 1: - matches = matches[matches[:, 2].argsort()[::-1]] - matches = matches[np.unique(matches[:, 1], return_index=True)[1]] - # matches = matches[matches[:, 2].argsort()[::-1]] - matches = matches[np.unique(matches[:, 0], return_index=True)[1]] - correct[matches[:, 1].astype(int), i] = True - return torch.tensor(correct, dtype=torch.bool, device=iouv.device) - - -@smart_inference_mode() -def run( - data, - weights=None, # model.pt path(s) - batch_size=32, # batch size - imgsz=640, # inference size (pixels) - conf_thres=0.001, # confidence threshold - iou_thres=0.6, # NMS IoU threshold - task='val', # train, val, test, speed or study - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - workers=8, # max dataloader workers (per RANK in DDP mode) - single_cls=False, # treat as single-class dataset - augment=False, # augmented inference - verbose=False, # verbose output - save_txt=False, # save results to *.txt - save_hybrid=False, # save label+prediction hybrid results to *.txt - save_conf=False, # save confidences in --save-txt labels - save_json=False, # save a COCO-JSON results file - project=ROOT / 'runs/val', # save to project/name - name='exp', # save to project/name - exist_ok=False, # existing project/name ok, do not increment - half=True, # use FP16 half-precision inference - dnn=False, # use OpenCV DNN for ONNX inference - model=None, - dataloader=None, - save_dir=Path(''), - plots=True, - callbacks=Callbacks(), - compute_loss=None, -): - # Initialize/load model and set device - training = model is not None - if training: # called by train.py - device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model - half &= device.type != 'cpu' # half precision only supported on CUDA - model.half() if half else model.float() - else: # called directly - device = select_device(device, batch_size=batch_size) - - # Directories - save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run - (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir - - # Load model - model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) - stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine - imgsz = check_img_size(imgsz, s=stride) # check image size - half = model.fp16 # FP16 supported on limited backends with CUDA - if engine: - batch_size = model.batch_size - else: - device = model.device - if not (pt or jit): - batch_size = 1 # export.py models default to batch-size 1 - LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') - - # Data - data = check_dataset(data) # check - - # Configure - model.eval() - cuda = device.type != 'cpu' - is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset - nc = 1 if single_cls else int(data['nc']) # number of classes - iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95 - niou = iouv.numel() - - # Dataloader - if not training: - if pt and not single_cls: # check --weights are trained on --data - ncm = model.model.nc - assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ - f'classes). Pass correct combination of --weights and --data that are trained together.' - model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup - pad = 0.0 if task in ('speed', 'benchmark') else 0.5 - rect = False if task == 'benchmark' else pt # square inference for benchmarks - task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images - dataloader = create_dataloader(data[task], - imgsz, - batch_size, - stride, - single_cls, - pad=pad, - rect=rect, - workers=workers, - prefix=colorstr(f'{task}: '))[0] - - seen = 0 - confusion_matrix = ConfusionMatrix(nc=nc) - names = model.names if hasattr(model, 'names') else model.module.names # get class names - if isinstance(names, (list, tuple)): # old format - names = dict(enumerate(names)) - class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) - s = ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') - dt, p, r, f1, mp, mr, map50, map = (Profile(), Profile(), Profile()), 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 - loss = torch.zeros(3, device=device) - jdict, stats, ap, ap_class = [], [], [], [] - callbacks.run('on_val_start') - pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar - for batch_i, (im, targets, paths, shapes) in enumerate(pbar): - callbacks.run('on_val_batch_start') - with dt[0]: - if cuda: - im = im.to(device, non_blocking=True) - targets = targets.to(device) - im = im.half() if half else im.float() # uint8 to fp16/32 - im /= 255 # 0 - 255 to 0.0 - 1.0 - nb, _, height, width = im.shape # batch size, channels, height, width - - # Inference - with dt[1]: - out, train_out = model(im) if compute_loss else (model(im, augment=augment), None) - - # Loss - if compute_loss: - loss += compute_loss(train_out, targets)[1] # box, obj, cls - - # NMS - targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels - lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling - with dt[2]: - out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) - - # Metrics - for si, pred in enumerate(out): - labels = targets[targets[:, 0] == si, 1:] - nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions - path, shape = Path(paths[si]), shapes[si][0] - correct = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init - seen += 1 - - if npr == 0: - if nl: - stats.append((correct, *torch.zeros((2, 0), device=device), labels[:, 0])) - if plots: - confusion_matrix.process_batch(detections=None, labels=labels[:, 0]) - continue - - # Predictions - if single_cls: - pred[:, 5] = 0 - predn = pred.clone() - scale_coords(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred - - # Evaluate - if nl: - tbox = xywh2xyxy(labels[:, 1:5]) # target boxes - scale_coords(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels - labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels - correct = process_batch(predn, labelsn, iouv) - if plots: - confusion_matrix.process_batch(predn, labelsn) - stats.append((correct, pred[:, 4], pred[:, 5], labels[:, 0])) # (correct, conf, pcls, tcls) - - # Save/log - if save_txt: - save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') - if save_json: - save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary - callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) - - # Plot images - if plots and batch_i < 3: - plot_images(im, targets, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) # labels - plot_images(im, output_to_target(out), paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred - - callbacks.run('on_val_batch_end', batch_i, im, targets, paths, shapes, out) - - # Compute metrics - stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy - if len(stats) and stats[0].any(): - tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names) - ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95 - mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean() - nt = np.bincount(stats[3].astype(int), minlength=nc) # number of targets per class - - # Print results - pf = '%22s' + '%11i' * 2 + '%11.3g' * 4 # print format - LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) - if nt.sum() == 0: - LOGGER.warning(f'WARNING: no labels found in {task} set, can not compute metrics without labels ⚠️') - - # Print results per class - if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): - for i, c in enumerate(ap_class): - LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) - - # Print speeds - t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image - if not training: - shape = (batch_size, 3, imgsz, imgsz) - LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) - - # Plots - if plots: - confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) - callbacks.run('on_val_end', nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) - - # Save JSON - if save_json and len(jdict): - w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights - anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json - pred_json = str(save_dir / f"{w}_predictions.json") # predictions json - LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') - with open(pred_json, 'w') as f: - json.dump(jdict, f) - - try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb - check_requirements('pycocotools') - from pycocotools.coco import COCO - from pycocotools.cocoeval import COCOeval - - anno = COCO(anno_json) # init annotations api - pred = anno.loadRes(pred_json) # init predictions api - eval = COCOeval(anno, pred, 'bbox') - if is_coco: - eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # image IDs to evaluate - eval.evaluate() - eval.accumulate() - eval.summarize() - map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5) - except Exception as e: - LOGGER.info(f'pycocotools unable to run: {e}') - - # Return results - model.float() # for training - if not training: - s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' - LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") - maps = np.zeros(nc) + map - for i, c in enumerate(ap_class): - maps[c] = ap[i] - return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t - - -def parse_opt(): - parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)') - parser.add_argument('--batch-size', type=int, default=32, help='batch size') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') - parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold') - parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold') - parser.add_argument('--task', default='val', help='train, val, test, speed or study') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') - parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') - parser.add_argument('--augment', action='store_true', help='augmented inference') - parser.add_argument('--verbose', action='store_true', help='report mAP by class') - parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') - parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') - parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') - parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') - parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') - parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') - opt = parser.parse_args() - opt.data = check_yaml(opt.data) # check YAML - opt.save_json |= opt.data.endswith('coco.yaml') - opt.save_txt |= opt.save_hybrid - print_args(vars(opt)) - return opt - - -def main(opt): - check_requirements(exclude=('tensorboard', 'thop')) - - if opt.task in ('train', 'val', 'test'): # run normally - if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 - LOGGER.info(f'WARNING: confidence threshold {opt.conf_thres} > 0.001 produces invalid results ⚠️') - if opt.save_hybrid: - LOGGER.info('WARNING: --save-hybrid will return high mAP from hybrid labels, not from predictions alone ⚠️') - run(**vars(opt)) - - else: - weights = opt.weights if isinstance(opt.weights, list) else [opt.weights] - opt.half = True # FP16 for fastest results - if opt.task == 'speed': # speed benchmarks - # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt... - opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False - for opt.weights in weights: - run(**vars(opt), plots=False) - - elif opt.task == 'study': # speed vs mAP benchmarks - # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt... - for opt.weights in weights: - f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to - x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis - for opt.imgsz in x: # img-size - LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...') - r, _, t = run(**vars(opt), plots=False) - y.append(r + t) # results and times - np.savetxt(f, y, fmt='%10.4g') # save - os.system('zip -r study.zip study_*.txt') - plot_val_study(x=x) # plot - - -if __name__ == "__main__": - opt = parse_opt() - main(opt)