On branch master

Initial commit
	new file:   .gitignore
	new file:   .vscode/c_cpp_properties.json
	new file:   .vscode/settings.json
	new file:   .vscode/tasks.json
	new file:   src/detection_msgs/CMakeLists.txt
	new file:   src/detection_msgs/msg/BoundingBox.msg
	new file:   src/detection_msgs/msg/BoundingBoxes.msg
	new file:   src/detection_msgs/package.xml
	new file:   src/maintain/CMakeLists.txt
	new file:   src/maintain/launch/maintain.launch
	new file:   src/maintain/package.xml
	new file:   src/maintain/scripts/test.py
	new file:   src/vision_opencv/README.rst
	new file:   src/vision_opencv/cv_bridge/CHANGELOG.rst
	new file:   src/vision_opencv/cv_bridge/CMakeLists.txt
	new file:   src/vision_opencv/cv_bridge/cmake/cv_bridge-extras.cmake.in
	new file:   src/vision_opencv/cv_bridge/doc/conf.py
	new file:   src/vision_opencv/cv_bridge/doc/index.rst
	new file:   src/vision_opencv/cv_bridge/doc/mainpage.dox
	new file:   src/vision_opencv/cv_bridge/include/cv_bridge/cv_bridge.h
	new file:   src/vision_opencv/cv_bridge/include/cv_bridge/rgb_colors.h
	new file:   src/vision_opencv/cv_bridge/package.xml
	new file:   src/vision_opencv/cv_bridge/python/CMakeLists.txt
	new file:   src/vision_opencv/cv_bridge/python/__init__.py.plain.in
	new file:   src/vision_opencv/cv_bridge/python/cv_bridge/__init__.py
	new file:   src/vision_opencv/cv_bridge/python/cv_bridge/__pycache__/core.cpython-38.pyc
	new file:   src/vision_opencv/cv_bridge/python/cv_bridge/core.py
	new file:   src/vision_opencv/cv_bridge/rosdoc.yaml
	new file:   src/vision_opencv/cv_bridge/setup.py
	new file:   src/vision_opencv/cv_bridge/src/CMakeLists.txt
	new file:   src/vision_opencv/cv_bridge/src/boost/README
	new file:   src/vision_opencv/cv_bridge/src/boost/core/scoped_enum.hpp
	new file:   src/vision_opencv/cv_bridge/src/boost/endian/conversion.hpp
	new file:   src/vision_opencv/cv_bridge/src/boost/endian/detail/intrinsic.hpp
	new file:   src/vision_opencv/cv_bridge/src/boost/predef/detail/_cassert.h
	new file:   src/vision_opencv/cv_bridge/src/boost/predef/detail/endian_compat.h
	new file:   src/vision_opencv/cv_bridge/src/boost/predef/detail/test.h
	new file:   src/vision_opencv/cv_bridge/src/boost/predef/library/c/_prefix.h
	new file:   src/vision_opencv/cv_bridge/src/boost/predef/library/c/gnu.h
	new file:   src/vision_opencv/cv_bridge/src/boost/predef/make.h
	new file:   src/vision_opencv/cv_bridge/src/boost/predef/os/android.h
	new file:   src/vision_opencv/cv_bridge/src/boost/predef/os/bsd.h
	new file:   src/vision_opencv/cv_bridge/src/boost/predef/os/bsd/bsdi.h
	new file:   src/vision_opencv/cv_bridge/src/boost/predef/os/bsd/dragonfly.h
	new file:   src/vision_opencv/cv_bridge/src/boost/predef/os/bsd/free.h
	new file:   src/vision_opencv/cv_bridge/src/boost/predef/os/bsd/net.h
	new file:   src/vision_opencv/cv_bridge/src/boost/predef/os/bsd/open.h
	new file:   src/vision_opencv/cv_bridge/src/boost/predef/os/ios.h
	new file:   src/vision_opencv/cv_bridge/src/boost/predef/os/macos.h
	new file:   src/vision_opencv/cv_bridge/src/boost/predef/other/endian.h
	new file:   src/vision_opencv/cv_bridge/src/boost/predef/version_number.h
	new file:   src/vision_opencv/cv_bridge/src/cv_bridge.cpp
	new file:   src/vision_opencv/cv_bridge/src/module.cpp
	new file:   src/vision_opencv/cv_bridge/src/module.hpp
	new file:   src/vision_opencv/cv_bridge/src/module_opencv2.cpp
	new file:   src/vision_opencv/cv_bridge/src/module_opencv3.cpp
	new file:   src/vision_opencv/cv_bridge/src/pycompat.hpp
	new file:   src/vision_opencv/cv_bridge/src/rgb_colors.cpp
	new file:   src/vision_opencv/cv_bridge/test/CMakeLists.txt
	new file:   src/vision_opencv/cv_bridge/test/conversions.py
	new file:   src/vision_opencv/cv_bridge/test/enumerants.py
	new file:   src/vision_opencv/cv_bridge/test/python_bindings.py
	new file:   src/vision_opencv/cv_bridge/test/test_compression.cpp
	new file:   src/vision_opencv/cv_bridge/test/test_endian.cpp
	new file:   src/vision_opencv/cv_bridge/test/test_rgb_colors.cpp
	new file:   src/vision_opencv/cv_bridge/test/utest.cpp
	new file:   src/vision_opencv/cv_bridge/test/utest2.cpp
	new file:   src/vision_opencv/image_geometry/CHANGELOG.rst
	new file:   src/vision_opencv/image_geometry/CMakeLists.txt
	new file:   src/vision_opencv/image_geometry/doc/conf.py
	new file:   src/vision_opencv/image_geometry/doc/index.rst
	new file:   src/vision_opencv/image_geometry/doc/mainpage.dox
	new file:   src/vision_opencv/image_geometry/include/image_geometry/exports.h
	new file:   src/vision_opencv/image_geometry/include/image_geometry/pinhole_camera_model.h
	new file:   src/vision_opencv/image_geometry/include/image_geometry/stereo_camera_model.h
	new file:   src/vision_opencv/image_geometry/package.xml
	new file:   src/vision_opencv/image_geometry/rosdoc.yaml
	new file:   src/vision_opencv/image_geometry/setup.py
	new file:   src/vision_opencv/image_geometry/src/image_geometry/__init__.py
	new file:   src/vision_opencv/image_geometry/src/image_geometry/cameramodels.py
	new file:   src/vision_opencv/image_geometry/src/pinhole_camera_model.cpp
	new file:   src/vision_opencv/image_geometry/src/stereo_camera_model.cpp
	new file:   src/vision_opencv/image_geometry/test/CMakeLists.txt
	new file:   src/vision_opencv/image_geometry/test/directed.py
	new file:   src/vision_opencv/image_geometry/test/utest.cpp
	new file:   src/vision_opencv/opencv_tests/CHANGELOG.rst
	new file:   src/vision_opencv/opencv_tests/CMakeLists.txt
	new file:   src/vision_opencv/opencv_tests/launch/pong.launch
	new file:   src/vision_opencv/opencv_tests/mainpage.dox
	new file:   src/vision_opencv/opencv_tests/nodes/broadcast.py
	new file:   src/vision_opencv/opencv_tests/nodes/rosfacedetect.py
	new file:   src/vision_opencv/opencv_tests/nodes/source.py
	new file:   src/vision_opencv/opencv_tests/package.xml
	new file:   src/vision_opencv/vision_opencv/CHANGELOG.rst
	new file:   src/vision_opencv/vision_opencv/CMakeLists.txt
	new file:   src/vision_opencv/vision_opencv/package.xml
	new file:   src/yolov5_ros/CMakeLists.txt
	new file:   src/yolov5_ros/LICENSE
	new file:   src/yolov5_ros/README.md
	new file:   src/yolov5_ros/launch/yolov5.launch
	new file:   src/yolov5_ros/launch/yolov5_d435.launch
	new file:   src/yolov5_ros/package.xml
	new file:   src/yolov5_ros/src/detect.py
	new file:   src/yolov5_ros/src/yolov5/.dockerignore
	new file:   src/yolov5_ros/src/yolov5/.gitattributes
	new file:   src/yolov5_ros/src/yolov5/.gitignore
	new file:   src/yolov5_ros/src/yolov5/.pre-commit-config.yaml
	new file:   src/yolov5_ros/src/yolov5/CONTRIBUTING.md
	new file:   src/yolov5_ros/src/yolov5/LICENSE
	new file:   src/yolov5_ros/src/yolov5/README.md
	new file:   src/yolov5_ros/src/yolov5/classify/predict.py
	new file:   src/yolov5_ros/src/yolov5/classify/train.py
	new file:   src/yolov5_ros/src/yolov5/classify/val.py
	new file:   src/yolov5_ros/src/yolov5/data/Argoverse.yaml
	new file:   src/yolov5_ros/src/yolov5/data/GlobalWheat2020.yaml
	new file:   src/yolov5_ros/src/yolov5/data/ImageNet.yaml
	new file:   src/yolov5_ros/src/yolov5/data/Objects365.yaml
	new file:   src/yolov5_ros/src/yolov5/data/SKU-110K.yaml
	new file:   src/yolov5_ros/src/yolov5/data/VOC.yaml
	new file:   src/yolov5_ros/src/yolov5/data/VisDrone.yaml
	new file:   src/yolov5_ros/src/yolov5/data/coco.yaml
	new file:   src/yolov5_ros/src/yolov5/data/coco128.yaml
	new file:   src/yolov5_ros/src/yolov5/data/hyps/hyp.Objects365.yaml
	new file:   src/yolov5_ros/src/yolov5/data/hyps/hyp.VOC.yaml
	new file:   src/yolov5_ros/src/yolov5/data/hyps/hyp.scratch-high.yaml
	new file:   src/yolov5_ros/src/yolov5/data/hyps/hyp.scratch-low.yaml
	new file:   src/yolov5_ros/src/yolov5/data/hyps/hyp.scratch-med.yaml
	new file:   src/yolov5_ros/src/yolov5/data/images/bus.jpg
	new file:   src/yolov5_ros/src/yolov5/data/images/zidane.jpg
	new file:   src/yolov5_ros/src/yolov5/data/mydata.yaml
	new file:   src/yolov5_ros/src/yolov5/data/scripts/download_weights.sh
	new file:   src/yolov5_ros/src/yolov5/data/scripts/get_coco.sh
	new file:   src/yolov5_ros/src/yolov5/data/scripts/get_coco128.sh
	new file:   src/yolov5_ros/src/yolov5/data/scripts/get_imagenet.sh
	new file:   src/yolov5_ros/src/yolov5/data/xView.yaml
	new file:   src/yolov5_ros/src/yolov5/detect.py
	new file:   src/yolov5_ros/src/yolov5/export.py
	new file:   src/yolov5_ros/src/yolov5/hubconf.py
	new file:   src/yolov5_ros/src/yolov5/models/__init__.py
	new file:   src/yolov5_ros/src/yolov5/models/common.py
	new file:   src/yolov5_ros/src/yolov5/models/experimental.py
	new file:   src/yolov5_ros/src/yolov5/models/hub/anchors.yaml
	new file:   src/yolov5_ros/src/yolov5/models/hub/yolov3-spp.yaml
	new file:   src/yolov5_ros/src/yolov5/models/hub/yolov3-tiny.yaml
	new file:   src/yolov5_ros/src/yolov5/models/hub/yolov3.yaml
	new file:   src/yolov5_ros/src/yolov5/models/hub/yolov5-bifpn.yaml
	new file:   src/yolov5_ros/src/yolov5/models/hub/yolov5-fpn.yaml
	new file:   src/yolov5_ros/src/yolov5/models/hub/yolov5-p2.yaml
	new file:   src/yolov5_ros/src/yolov5/models/hub/yolov5-p34.yaml
	new file:   src/yolov5_ros/src/yolov5/models/hub/yolov5-p6.yaml
	new file:   src/yolov5_ros/src/yolov5/models/hub/yolov5-p7.yaml
	new file:   src/yolov5_ros/src/yolov5/models/hub/yolov5-panet.yaml
	new file:   src/yolov5_ros/src/yolov5/models/hub/yolov5l6.yaml
	new file:   src/yolov5_ros/src/yolov5/models/hub/yolov5m6.yaml
	new file:   src/yolov5_ros/src/yolov5/models/hub/yolov5n6.yaml
	new file:   src/yolov5_ros/src/yolov5/models/hub/yolov5s-ghost.yaml
	new file:   src/yolov5_ros/src/yolov5/models/hub/yolov5s-transformer.yaml
	new file:   src/yolov5_ros/src/yolov5/models/hub/yolov5s6.yaml
	new file:   src/yolov5_ros/src/yolov5/models/hub/yolov5x6.yaml
	new file:   src/yolov5_ros/src/yolov5/models/tf.py
	new file:   src/yolov5_ros/src/yolov5/models/yolo.py
	new file:   src/yolov5_ros/src/yolov5/models/yolov5l.yaml
	new file:   src/yolov5_ros/src/yolov5/models/yolov5m.yaml
	new file:   src/yolov5_ros/src/yolov5/models/yolov5n.yaml
	new file:   src/yolov5_ros/src/yolov5/models/yolov5s.yaml
	new file:   src/yolov5_ros/src/yolov5/models/yolov5x.yaml
	new file:   src/yolov5_ros/src/yolov5/requirements.txt
	new file:   src/yolov5_ros/src/yolov5/setup.cfg
	new file:   src/yolov5_ros/src/yolov5/train.py
	new file:   src/yolov5_ros/src/yolov5/tutorial.ipynb
	new file:   src/yolov5_ros/src/yolov5/utils/__init__.py
	new file:   src/yolov5_ros/src/yolov5/utils/activations.py
	new file:   src/yolov5_ros/src/yolov5/utils/augmentations.py
	new file:   src/yolov5_ros/src/yolov5/utils/autoanchor.py
	new file:   src/yolov5_ros/src/yolov5/utils/autobatch.py
	new file:   src/yolov5_ros/src/yolov5/utils/aws/__init__.py
	new file:   src/yolov5_ros/src/yolov5/utils/aws/mime.sh
	new file:   src/yolov5_ros/src/yolov5/utils/aws/resume.py
	new file:   src/yolov5_ros/src/yolov5/utils/aws/userdata.sh
	new file:   src/yolov5_ros/src/yolov5/utils/benchmarks.py
	new file:   src/yolov5_ros/src/yolov5/utils/callbacks.py
	new file:   src/yolov5_ros/src/yolov5/utils/dataloaders.py
	new file:   src/yolov5_ros/src/yolov5/utils/docker/Dockerfile
	new file:   src/yolov5_ros/src/yolov5/utils/docker/Dockerfile-arm64
	new file:   src/yolov5_ros/src/yolov5/utils/docker/Dockerfile-cpu
	new file:   src/yolov5_ros/src/yolov5/utils/downloads.py
	new file:   src/yolov5_ros/src/yolov5/utils/flask_rest_api/README.md
	new file:   src/yolov5_ros/src/yolov5/utils/flask_rest_api/example_request.py
	new file:   src/yolov5_ros/src/yolov5/utils/flask_rest_api/restapi.py
	new file:   src/yolov5_ros/src/yolov5/utils/general.py
	new file:   src/yolov5_ros/src/yolov5/utils/google_app_engine/Dockerfile
	new file:   src/yolov5_ros/src/yolov5/utils/google_app_engine/additional_requirements.txt
	new file:   src/yolov5_ros/src/yolov5/utils/google_app_engine/app.yaml
	new file:   src/yolov5_ros/src/yolov5/utils/loggers/__init__.py
	new file:   src/yolov5_ros/src/yolov5/utils/loggers/clearml/README.md
	new file:   src/yolov5_ros/src/yolov5/utils/loggers/clearml/__init__.py
	new file:   src/yolov5_ros/src/yolov5/utils/loggers/clearml/clearml_utils.py
	new file:   src/yolov5_ros/src/yolov5/utils/loggers/clearml/hpo.py
	new file:   src/yolov5_ros/src/yolov5/utils/loggers/comet/README.md
	new file:   src/yolov5_ros/src/yolov5/utils/loggers/comet/__init__.py
	new file:   src/yolov5_ros/src/yolov5/utils/loggers/comet/comet_utils.py
	new file:   src/yolov5_ros/src/yolov5/utils/loggers/comet/hpo.py
	new file:   src/yolov5_ros/src/yolov5/utils/loggers/wandb/README.md
	new file:   src/yolov5_ros/src/yolov5/utils/loggers/wandb/__init__.py
	new file:   src/yolov5_ros/src/yolov5/utils/loggers/wandb/log_dataset.py
	new file:   src/yolov5_ros/src/yolov5/utils/loggers/wandb/sweep.py
	new file:   src/yolov5_ros/src/yolov5/utils/loggers/wandb/sweep.yaml
	new file:   src/yolov5_ros/src/yolov5/utils/loggers/wandb/wandb_utils.py
	new file:   src/yolov5_ros/src/yolov5/utils/loss.py
	new file:   src/yolov5_ros/src/yolov5/utils/metrics.py
	new file:   src/yolov5_ros/src/yolov5/utils/plots.py
	new file:   src/yolov5_ros/src/yolov5/utils/torch_utils.py
	new file:   src/yolov5_ros/src/yolov5/val.py
This commit is contained in:
wxchen
2023-04-26 16:37:20 +08:00
commit 7f5901bed3
213 changed files with 30971 additions and 0 deletions

4
.gitignore vendored Normal file
View File

@@ -0,0 +1,4 @@
.catkin_tools
/build
/devel
/logs

20
.vscode/c_cpp_properties.json vendored Normal file
View File

@@ -0,0 +1,20 @@
{
"configurations": [
{
"browse": {
"databaseFilename": "${default}",
"limitSymbolsToIncludedHeaders": false
},
"includePath": [
"/opt/ros/melodic/include/**",
"/usr/include/**"
],
"name": "ROS",
"intelliSenseMode": "gcc-x64",
"compilerPath": "/usr/bin/gcc",
"cStandard": "gnu11",
"cppStandard": "c++14"
}
],
"version": 4
}

8
.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,8 @@
{
"python.autoComplete.extraPaths": [
"/opt/ros/melodic/lib/python2.7/dist-packages"
],
"python.analysis.extraPaths": [
"/opt/ros/melodic/lib/python2.7/dist-packages"
]
}

17
.vscode/tasks.json vendored Normal file
View File

@@ -0,0 +1,17 @@
{
"tasks": [
{
"type": "shell",
"command": "catkin",
"args": [
"build",
"-DPYTHON_EXECUTABLE=/home/da/miniconda3/envs/gsmini/bin/python",
],
"problemMatcher": [
"$catkin-gcc"
],
"group": "build",
"label": "catkin: build"
}
]
}

View File

@@ -0,0 +1,205 @@
cmake_minimum_required(VERSION 3.0.2)
project(detection_msgs)
## Compile as C++11, supported in ROS Kinetic and newer
# add_compile_options(-std=c++11)
## Find catkin macros and libraries
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
## is used, also find other catkin packages
find_package(catkin REQUIRED COMPONENTS
std_msgs
message_generation
)
## System dependencies are found with CMake's conventions
# find_package(Boost REQUIRED COMPONENTS system)
## Uncomment this if the package has a setup.py. This macro ensures
## modules and global scripts declared therein get installed
## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
# catkin_python_setup()
################################################
## Declare ROS messages, services and actions ##
################################################
## To declare and build messages, services or actions from within this
## package, follow these steps:
## * Let MSG_DEP_SET be the set of packages whose message types you use in
## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
## * In the file package.xml:
## * add a build_depend tag for "message_generation"
## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
## but can be declared for certainty nonetheless:
## * add a exec_depend tag for "message_runtime"
## * In this file (CMakeLists.txt):
## * add "message_generation" and every package in MSG_DEP_SET to
## find_package(catkin REQUIRED COMPONENTS ...)
## * add "message_runtime" and every package in MSG_DEP_SET to
## catkin_package(CATKIN_DEPENDS ...)
## * uncomment the add_*_files sections below as needed
## and list every .msg/.srv/.action file to be processed
## * uncomment the generate_messages entry below
## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
## Generate messages in the 'msg' folder
add_message_files(
FILES
BoundingBox.msg
BoundingBoxes.msg
)
## Generate services in the 'srv' folder
# add_service_files(
# FILES
# Service1.srv
# Service2.srv
# )
## Generate actions in the 'action' folder
# add_action_files(
# FILES
# Action1.action
# Action2.action
# )
## Generate added messages and services with any dependencies listed here
generate_messages(
DEPENDENCIES
std_msgs
)
################################################
## Declare ROS dynamic reconfigure parameters ##
################################################
## To declare and build dynamic reconfigure parameters within this
## package, follow these steps:
## * In the file package.xml:
## * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
## * In this file (CMakeLists.txt):
## * add "dynamic_reconfigure" to
## find_package(catkin REQUIRED COMPONENTS ...)
## * uncomment the "generate_dynamic_reconfigure_options" section below
## and list every .cfg file to be processed
## Generate dynamic reconfigure parameters in the 'cfg' folder
# generate_dynamic_reconfigure_options(
# cfg/DynReconf1.cfg
# cfg/DynReconf2.cfg
# )
###################################
## catkin specific configuration ##
###################################
## The catkin_package macro generates cmake config files for your package
## Declare things to be passed to dependent projects
## INCLUDE_DIRS: uncomment this if your package contains header files
## LIBRARIES: libraries you create in this project that dependent projects also need
## CATKIN_DEPENDS: catkin_packages dependent projects also need
## DEPENDS: system dependencies of this project that dependent projects also need
catkin_package(
# INCLUDE_DIRS include
# LIBRARIES detection_msgs
CATKIN_DEPENDS std_msgs message_runtime
# DEPENDS system_lib
)
###########
## Build ##
###########
## Specify additional locations of header files
## Your package locations should be listed before other locations
include_directories(
# include
${catkin_INCLUDE_DIRS}
)
## Declare a C++ library
# add_library(${PROJECT_NAME}
# src/${PROJECT_NAME}/detection_msgs.cpp
# )
## Add cmake target dependencies of the library
## as an example, code may need to be generated before libraries
## either from message generation or dynamic reconfigure
# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Declare a C++ executable
## With catkin_make all packages are built within a single CMake context
## The recommended prefix ensures that target names across packages don't collide
# add_executable(${PROJECT_NAME}_node src/detection_msgs_node.cpp)
## Rename C++ executable without prefix
## The above recommended prefix causes long target names, the following renames the
## target back to the shorter version for ease of user use
## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
## Add cmake target dependencies of the executable
## same as for the library above
# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Specify libraries to link a library or executable target against
# target_link_libraries(${PROJECT_NAME}_node
# ${catkin_LIBRARIES}
# )
#############
## Install ##
#############
# all install targets should use catkin DESTINATION variables
# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
## Mark executable scripts (Python etc.) for installation
## in contrast to setup.py, you can choose the destination
# catkin_install_python(PROGRAMS
# scripts/my_python_script
# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark executables for installation
## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html
# install(TARGETS ${PROJECT_NAME}_node
# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark libraries for installation
## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html
# install(TARGETS ${PROJECT_NAME}
# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
# )
## Mark cpp header files for installation
# install(DIRECTORY include/${PROJECT_NAME}/
# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
# FILES_MATCHING PATTERN "*.h"
# PATTERN ".svn" EXCLUDE
# )
## Mark other files for installation (e.g. launch and bag files, etc.)
# install(FILES
# # myfile1
# # myfile2
# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
# )
#############
## Testing ##
#############
## Add gtest based cpp test target and link libraries
# catkin_add_gtest(${PROJECT_NAME}-test test/test_detection_msgs.cpp)
# if(TARGET ${PROJECT_NAME}-test)
# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
# endif()
## Add folders to be run by python nosetests
# catkin_add_nosetests(test)

View File

@@ -0,0 +1,31 @@
# Copyright (c) 2017, Marko Bjelonic, Robotic Systems Lab, ETH Zurich
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
string Class
float64 probability
int64 xmin
int64 ymin
int64 xmax
int64 ymax

View File

@@ -0,0 +1,28 @@
# Copyright (c) 2017, Marko Bjelonic, Robotic Systems Lab, ETH Zurich
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Header header
Header image_header
BoundingBox[] bounding_boxes

View File

@@ -0,0 +1,63 @@
<?xml version="1.0"?>
<package format="2">
<name>detection_msgs</name>
<version>0.0.0</version>
<description>The detection_msgs package</description>
<!-- One maintainer tag required, multiple allowed, one person per tag -->
<!-- Example: -->
<!-- <maintainer email="jane.doe@example.com">Jane Doe</maintainer> -->
<maintainer email="nle17@todo.todo">nle17</maintainer>
<!-- One license tag required, multiple allowed, one license per tag -->
<!-- Commonly used license strings: -->
<!-- BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 -->
<license>TODO</license>
<!-- Url tags are optional, but multiple are allowed, one per tag -->
<!-- Optional attribute type can be: website, bugtracker, or repository -->
<!-- Example: -->
<!-- <url type="website">http://wiki.ros.org/detection_msgs</url> -->
<!-- Author tags are optional, multiple are allowed, one per tag -->
<!-- Authors do not have to be maintainers, but could be -->
<!-- Example: -->
<!-- <author email="jane.doe@example.com">Jane Doe</author> -->
<!-- The *depend tags are used to specify dependencies -->
<!-- Dependencies can be catkin packages or system dependencies -->
<!-- Examples: -->
<!-- Use depend as a shortcut for packages that are both build and exec dependencies -->
<!-- <depend>roscpp</depend> -->
<!-- Note that this is equivalent to the following: -->
<!-- <build_depend>roscpp</build_depend> -->
<!-- <exec_depend>roscpp</exec_depend> -->
<!-- Use build_depend for packages you need at compile time: -->
<build_depend>message_generation</build_depend>
<!-- Use build_export_depend for packages you need in order to build against this package: -->
<!-- <build_export_depend>message_generation</build_export_depend> -->
<!-- Use buildtool_depend for build tool packages: -->
<!-- <buildtool_depend>catkin</buildtool_depend> -->
<!-- Use exec_depend for packages you need at runtime: -->
<exec_depend>message_runtime</exec_depend>
<!-- Use test_depend for packages you need only for testing: -->
<!-- <test_depend>gtest</test_depend> -->
<!-- Use doc_depend for packages you need only for building documentation: -->
<!-- <doc_depend>doxygen</doc_depend> -->
<buildtool_depend>catkin</buildtool_depend>
<build_depend>std_msgs</build_depend>
<build_export_depend>std_msgs</build_export_depend>
<exec_depend>std_msgs</exec_depend>
<!-- The export tag contains other, unspecified, tags -->
<export>
<!-- Other tools can request additional information be placed here -->
</export>
</package>

202
src/maintain/CMakeLists.txt Normal file
View File

@@ -0,0 +1,202 @@
cmake_minimum_required(VERSION 3.0.2)
project(maintain)
## Compile as C++11, supported in ROS Kinetic and newer
# add_compile_options(-std=c++11)
## Find catkin macros and libraries
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
## is used, also find other catkin packages
find_package(catkin REQUIRED)
## System dependencies are found with CMake's conventions
# find_package(Boost REQUIRED COMPONENTS system)
## Uncomment this if the package has a setup.py. This macro ensures
## modules and global scripts declared therein get installed
## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
# catkin_python_setup()
################################################
## Declare ROS messages, services and actions ##
################################################
## To declare and build messages, services or actions from within this
## package, follow these steps:
## * Let MSG_DEP_SET be the set of packages whose message types you use in
## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
## * In the file package.xml:
## * add a build_depend tag for "message_generation"
## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
## but can be declared for certainty nonetheless:
## * add a exec_depend tag for "message_runtime"
## * In this file (CMakeLists.txt):
## * add "message_generation" and every package in MSG_DEP_SET to
## find_package(catkin REQUIRED COMPONENTS ...)
## * add "message_runtime" and every package in MSG_DEP_SET to
## catkin_package(CATKIN_DEPENDS ...)
## * uncomment the add_*_files sections below as needed
## and list every .msg/.srv/.action file to be processed
## * uncomment the generate_messages entry below
## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
## Generate messages in the 'msg' folder
# add_message_files(
# FILES
# Message1.msg
# Message2.msg
# )
## Generate services in the 'srv' folder
# add_service_files(
# FILES
# Service1.srv
# Service2.srv
# )
## Generate actions in the 'action' folder
# add_action_files(
# FILES
# Action1.action
# Action2.action
# )
## Generate added messages and services with any dependencies listed here
# generate_messages(
# DEPENDENCIES
# std_msgs # Or other packages containing msgs
# )
################################################
## Declare ROS dynamic reconfigure parameters ##
################################################
## To declare and build dynamic reconfigure parameters within this
## package, follow these steps:
## * In the file package.xml:
## * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
## * In this file (CMakeLists.txt):
## * add "dynamic_reconfigure" to
## find_package(catkin REQUIRED COMPONENTS ...)
## * uncomment the "generate_dynamic_reconfigure_options" section below
## and list every .cfg file to be processed
## Generate dynamic reconfigure parameters in the 'cfg' folder
# generate_dynamic_reconfigure_options(
# cfg/DynReconf1.cfg
# cfg/DynReconf2.cfg
# )
###################################
## catkin specific configuration ##
###################################
## The catkin_package macro generates cmake config files for your package
## Declare things to be passed to dependent projects
## INCLUDE_DIRS: uncomment this if your package contains header files
## LIBRARIES: libraries you create in this project that dependent projects also need
## CATKIN_DEPENDS: catkin_packages dependent projects also need
## DEPENDS: system dependencies of this project that dependent projects also need
catkin_package(
# INCLUDE_DIRS include
# LIBRARIES maintain
# CATKIN_DEPENDS other_catkin_pkg
# DEPENDS system_lib
)
###########
## Build ##
###########
## Specify additional locations of header files
## Your package locations should be listed before other locations
include_directories(
# include
# ${catkin_INCLUDE_DIRS}
)
## Declare a C++ library
# add_library(${PROJECT_NAME}
# src/${PROJECT_NAME}/maintain.cpp
# )
## Add cmake target dependencies of the library
## as an example, code may need to be generated before libraries
## either from message generation or dynamic reconfigure
# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Declare a C++ executable
## With catkin_make all packages are built within a single CMake context
## The recommended prefix ensures that target names across packages don't collide
# add_executable(${PROJECT_NAME}_node src/maintain_node.cpp)
## Rename C++ executable without prefix
## The above recommended prefix causes long target names, the following renames the
## target back to the shorter version for ease of user use
## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
## Add cmake target dependencies of the executable
## same as for the library above
# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Specify libraries to link a library or executable target against
# target_link_libraries(${PROJECT_NAME}_node
# ${catkin_LIBRARIES}
# )
#############
## Install ##
#############
# all install targets should use catkin DESTINATION variables
# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
## Mark executable scripts (Python etc.) for installation
## in contrast to setup.py, you can choose the destination
# catkin_install_python(PROGRAMS
# scripts/my_python_script
# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark executables for installation
## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html
# install(TARGETS ${PROJECT_NAME}_node
# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark libraries for installation
## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html
# install(TARGETS ${PROJECT_NAME}
# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
# )
## Mark cpp header files for installation
# install(DIRECTORY include/${PROJECT_NAME}/
# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
# FILES_MATCHING PATTERN "*.h"
# PATTERN ".svn" EXCLUDE
# )
## Mark other files for installation (e.g. launch and bag files, etc.)
# install(FILES
# # myfile1
# # myfile2
# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
# )
#############
## Testing ##
#############
## Add gtest based cpp test target and link libraries
# catkin_add_gtest(${PROJECT_NAME}-test test/test_maintain.cpp)
# if(TARGET ${PROJECT_NAME}-test)
# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
# endif()
## Add folders to be run by python nosetests
# catkin_add_nosetests(test)

View File

@@ -0,0 +1,4 @@
<launch>
<node pkg="maintain" type="test.py" name="maintain" output="screen">
</node>
</launch>

59
src/maintain/package.xml Normal file
View File

@@ -0,0 +1,59 @@
<?xml version="1.0"?>
<package format="2">
<name>maintain</name>
<version>0.0.0</version>
<description>The maintain package</description>
<!-- One maintainer tag required, multiple allowed, one person per tag -->
<!-- Example: -->
<!-- <maintainer email="jane.doe@example.com">Jane Doe</maintainer> -->
<maintainer email="wxchen@todo.todo">wxchen</maintainer>
<!-- One license tag required, multiple allowed, one license per tag -->
<!-- Commonly used license strings: -->
<!-- BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 -->
<license>TODO</license>
<!-- Url tags are optional, but multiple are allowed, one per tag -->
<!-- Optional attribute type can be: website, bugtracker, or repository -->
<!-- Example: -->
<!-- <url type="website">http://wiki.ros.org/maintain</url> -->
<!-- Author tags are optional, multiple are allowed, one per tag -->
<!-- Authors do not have to be maintainers, but could be -->
<!-- Example: -->
<!-- <author email="jane.doe@example.com">Jane Doe</author> -->
<!-- The *depend tags are used to specify dependencies -->
<!-- Dependencies can be catkin packages or system dependencies -->
<!-- Examples: -->
<!-- Use depend as a shortcut for packages that are both build and exec dependencies -->
<!-- <depend>roscpp</depend> -->
<!-- Note that this is equivalent to the following: -->
<!-- <build_depend>roscpp</build_depend> -->
<!-- <exec_depend>roscpp</exec_depend> -->
<!-- Use build_depend for packages you need at compile time: -->
<!-- <build_depend>message_generation</build_depend> -->
<!-- Use build_export_depend for packages you need in order to build against this package: -->
<!-- <build_export_depend>message_generation</build_export_depend> -->
<!-- Use buildtool_depend for build tool packages: -->
<!-- <buildtool_depend>catkin</buildtool_depend> -->
<!-- Use exec_depend for packages you need at runtime: -->
<!-- <exec_depend>message_runtime</exec_depend> -->
<!-- Use test_depend for packages you need only for testing: -->
<!-- <test_depend>gtest</test_depend> -->
<!-- Use doc_depend for packages you need only for building documentation: -->
<!-- <doc_depend>doxygen</doc_depend> -->
<buildtool_depend>catkin</buildtool_depend>
<!-- The export tag contains other, unspecified, tags -->
<export>
<!-- Other tools can request additional information be placed here -->
</export>
</package>

93
src/maintain/scripts/test.py Executable file
View File

@@ -0,0 +1,93 @@
#! /home/wxchen/.conda/envs/gsmini/bin/python
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
import rospy
from sensor_msgs.msg import Image
import message_filters
from cv_bridge import CvBridge, CvBridgeError
import rospkg
MIN_MATCH_COUNT = 10
pkg_path = rospkg.RosPack().get_path('maintain')
rospy.loginfo(pkg_path)
img_template = cv.imread(pkg_path + '/scripts/tt.png',0)
def callback(rgb, depth):
rospy.loginfo("callback")
bridge = CvBridge()
# rospy.loginfo(rgb.header.stamp)
# rospy.loginfo(depth.header.stamp)
try:
rgb_image = bridge.imgmsg_to_cv2(rgb, 'bgr8')
depth_image = bridge.imgmsg_to_cv2(depth, '16UC1')
img_matcher = matcher(rgb_image)
cv.imshow("img_matcher", img_matcher)
cv.waitKey(1000)
except CvBridgeError as e:
print(e)
def matcher(img):
try:
# Initiate SIFT detector
sift = cv.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img_template,None)
kp2, des2 = sift.detectAndCompute(img,None)
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w = img_template.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv.perspectiveTransform(pts,M)
roi = img[np.int32(dst)[0][0][1]:np.int32(dst)[2][0][1], np.int32(dst)[0][0][0]:np.int32(dst)[2][0][0]]
# roi = detect_black(roi)
# img2 = cv.polylines(img2,[np.int32(dst)],True,255,3, cv.LINE_AA)
else:
print( "Not enough matches are found - {}/{}".format(len(good), MIN_MATCH_COUNT) )
return roi
except Exception as e:
print(e)
if __name__ == "__main__":
rospy.init_node("maintain")
rospy.loginfo("maintain task start ......")
rgb_sub = message_filters.Subscriber("/camera/color/image_raw", Image)
depth_sub = message_filters.Subscriber("/camera/aligned_depth_to_color/image_raw", Image)
ts = message_filters.TimeSynchronizer([rgb_sub, depth_sub], 1)
ts.registerCallback(callback)
rospy.spin()

View File

@@ -0,0 +1,7 @@
vision_opencv
=============
.. image:: https://travis-ci.org/ros-perception/vision_opencv.svg?branch=indigo
:target: https://travis-ci.org/ros-perception/vision_opencv
Packages for interfacing ROS with OpenCV, a library of programming functions for real time computer vision.

View File

@@ -0,0 +1,431 @@
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Changelog for package cv_bridge
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
1.13.1 (2022-10-03)
-------------------
* Fix endian mismatch issue per boostorg/python PR `#218 <https://github.com/ros-perception/vision_opencv/issues/218>`_
* Update CMakeLists.txt for Windows build environment (`#265 <https://github.com/ros-perception/vision_opencv/issues/265>`_)
* Remove path splash separator from 'package_dir' (`#267 <https://github.com/ros-perception/vision_opencv/issues/267>`_)
* Fix travis. (`#269 <https://github.com/ros-perception/vision_opencv/issues/269>`_)
* Contributors: Duan Yutong, James Xu, Kenji Brameld, Sean Yen
1.13.0 (2018-04-30)
-------------------
* Use rosdep OpenCV and not ROS one.
We defintely don't need the whole OpenCV.
We need to clean the rosdep keys.
* Contributors: Vincent Rabaud
1.12.8 (2018-04-17)
-------------------
* Merge pull request `#191 <https://github.com/ros-perception/vision_opencv/issues/191>`_ from patrickelectric/kinetic
cv2_to_imgmsg: step must be int
* cv2_to_imgmsg: step must be int
Signed-off-by: Patrick José Pereira <patrickelectric@gmail.com>
* Contributors: Patrick José Pereira, Vincent Rabaud
1.12.7 (2017-11-12)
-------------------
1.12.6 (2017-11-11)
-------------------
* fix endianness issues
* Contributors: Vincent Rabaud
1.12.5 (2017-11-05)
-------------------
* add version_gte for opencv3
@vrabaud If you'll update opencv3 version as discussed in https://discourse.ros.org/t/opencv-3-3/2674/4, I think we'd better to add 'version_gte' tag so that apt-get install ros-kinetic-cv-bridge also pulls openv3.3 from repository, to avoid API breaking issue between opencv2 and opencv3.
* Simplify the dependency components of cv_bridge
Fixes `#183 <https://github.com/ros-perception/vision_opencv/issues/183>`_
* Fixes `#177 <https://github.com/ros-perception/vision_opencv/issues/177>`_
The Python bridge was wrong on OpenCV2 with mono8 (and any Mat
with only two dimensions btw). Took the official Python bridge
from OpenCV.
* Add missing test file
This fixes `#171 <https://github.com/ros-perception/vision_opencv/issues/171>`_
* Properly deal with alpha in image compression.
That fixes `#169 <https://github.com/ros-perception/vision_opencv/issues/169>`_
* Silence warnings about un-used variables
* export OpenCV variables
* Contributors: Kei Okada, Victor Lamoine, Vincent Rabaud
1.12.4 (2017-01-29)
-------------------
* properly find Boost Python 2 or 3
This fixes `#158 <https://github.com/ros-perception/vision_opencv/issues/158>`_
* Contributors: Vincent Rabaud
1.12.3 (2016-12-04)
-------------------
* Use api in sensor_msgs to get byte_depth and num channels
* Implement cpp conversion of N channel image
This is cpp version of https://github.com/ros-perception/vision_opencv/pull/141,
which is one for python.
* Fill black color to depth nan region
* address gcc6 build error in cv_bridge and tune
With gcc6, compiling fails with `stdlib.h: No such file or directory`,
as including '-isystem /usr/include' breaks with gcc6, cf.,
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70129
This commit addresses this issue for cv_bridge in the same way
it was done in the commit ead421b8 [1] for image_geometry.
This issue was also addressed in various other ROS packages.
A list of related commits and pull requests is at:
https://github.com/ros/rosdistro/issues/12783
[1] https://github.com/ros-perception/vision_opencv/commit/ead421b85eeb750cbf7988657015296ed6789bcf
Signed-off-by: Lukas Bulwahn <lukas.bulwahn@oss.bmw-carit.de>
* cv_bridge: Add missing test_depend on numpy
* Contributors: Kentaro Wada, Lukas Bulwahn, Maarten de Vries
1.12.2 (2016-09-24)
-------------------
* Specify background label when colorizing label image
* Adjust to arbitrary image channels like 32FC40
Proper fix for `#141 <https://github.com/ros-perception/vision_opencv/issues/141>`_
* Remove unexpectedly included print statement
* Contributors: Kentaro Wada, Vincent Rabaud
1.12.1 (2016-07-11)
-------------------
* split the conversion tests out of enumerants
* support is_bigendian in Python
Fixes `#114 <https://github.com/ros-perception/vision_opencv/issues/114>`_
Also fixes mono16 test
* Support compressed Images messages in python for indigo
- Add cv2_to_comprssed_imgmsg: Convert from cv2 image to compressed image ros msg.
- Add comprssed_imgmsg_to_cv2: Convert the compress message to a new image.
- Add compressed image tests.
- Add time to msgs (compressed and regular).
add enumerants test for compressed image.
merge the compressed tests with the regular ones.
better comment explanation. I will squash this commit.
Fix indentation
fix typo mistage: from .imgmsg_to_compressed_cv2 to .compressed_imgmsg_to_cv2.
remove cv2.CV_8UC1
remove rospy and time depndency.
change from IMREAD_COLOR to IMREAD_ANYCOLOR.
- make indentaion of 4.
- remove space trailer.
- remove space from empty lines.
- another set of for loops, it will make things easier to track. In that new set, just have the number of channels in ([],1,3,4) (ignore two for jpg). from: https://github.com/ros-perception/vision_opencv/pull/132#discussion_r66721943
- keep the OpenCV error message. from: https://github.com/ros-perception/vision_opencv/pull/132#discussion_r66721013
add debug print for test.
add case for 4 channels in test.
remove 4 channels case from compressed test.
add debug print for test.
change typo of format.
fix typo in format. change from dip to dib.
change to IMREAD_ANYCOLOR as python code. (as it should).
rename TIFF to tiff
Sperate the tests one for regular images and one for compressed.
update comment
* Add CvtColorForDisplayOptions with new colormap param
* fix doc jobs
* Add python binding for cv_bridge::cvtColorForDisplay
* Don't colorize float image as label image
This is a bug and image whose encoding is other than 32SC1 should not be
colorized. (currently, depth images with 32FC1 is also colorized.)
* Fix compilation of cv_bridge with opencv3 and python3.
* Contributors: Kentaro Wada, Maarten de Vries, Vincent Rabaud, talregev
1.12.0 (2016-03-18)
-------------------
* depend on OpenCV3 only
* Contributors: Vincent Rabaud
1.11.12 (2016-03-10)
--------------------
* Fix my typo
* Remove another eval
Because `cvtype2_to_dtype_with_channels('8UCimport os; os.system("rm -rf /")')` should never have a chance of happening.
* Remove eval, and other fixes
Also, extend from object, so as not to get a python 2.2-style class, and use the new-style raise statement
* Contributors: Eric Wieser
1.11.11 (2016-01-31)
--------------------
* clean up the doc files
* fix a few warnings in doc jobs
* Contributors: Vincent Rabaud
1.11.10 (2016-01-16)
--------------------
* fix OpenCV3 build
* Describe about converting label to bgr image in cvtColorForDisplay
* Convert label to BGR image to display
* Add test for rgb_colors.cpp
* Add rgb_colors util
* Update doc for converting to BGR in cvtColorForDisplay
* Convert to BGR from any encoding
* Refactor: sensor_msgs::image_encodings -> enc
* Contributors: Kentaro Wada, Vincent Rabaud
1.11.9 (2015-11-29)
-------------------
* deal with endianness
* add cvtColorForDisplay
* Improved efficiency by using toCvShare instead of toCvCopy.
* Add format enum for easy use and choose format.
* fix compilation warnings
* start to extend the cv_bridge with cvCompressedImage class, that will convert from cv::Mat opencv images to CompressedImage ros messages and vice versa
* Contributors: Carlos Costa, Vincent Rabaud, talregev
1.11.8 (2015-07-15)
-------------------
* Simplify some OpenCV3 distinction
* fix tests
* fix test under OpenCV3
* Remove Python for Android
* Contributors: Gary Servin, Vincent Rabaud
1.11.7 (2014-12-14)
-------------------
* check that the type is indeed a Numpy one
This is in response to `#51 <https://github.com/ros-perception/vision_opencv/issues/51>`_
* Contributors: Vincent Rabaud
1.11.6 (2014-11-16)
-------------------
* chnage the behavior when there is only one channel
* cleanup tests
* Contributors: Vincent Rabaud
1.11.5 (2014-09-21)
-------------------
* get code to work with OpenCV3
actually fixes `#46 <https://github.com/ros-perception/vision_opencv/issues/46>`_ properly
* Contributors: Vincent Rabaud
1.11.4 (2014-07-27)
-------------------
* Fix `#42 <https://github.com/ros-perception/vision_opencv/issues/42>`_
* Contributors: Libor Wagner
1.11.3 (2014-06-08)
-------------------
* Correct dependency from non-existent package to cv_bridge
* Contributors: Isaac Isao Saito
1.11.2 (2014-04-28)
-------------------
* Add depend on python for cv_bridge
* Contributors: Scott K Logan
1.11.1 (2014-04-16)
-------------------
* fixes `#34 <https://github.com/ros-perception/vision_opencv/issues/34>`_
* Contributors: Vincent Rabaud
1.11.0 (2014-02-15)
-------------------
* remove deprecated API and fixes `#33 <https://github.com/ros-perception/vision_opencv/issues/33>`_
* fix OpenCV dependencies
* Contributors: Vincent Rabaud
1.10.15 (2014-02-07)
--------------------
* fix python 3 error at configure time
* Contributors: Dirk Thomas
1.10.14 (2013-11-23 16:17)
--------------------------
* update changelog
* Find NumPy include directory
* Contributors: Brian Jensen, Vincent Rabaud
1.10.13 (2013-11-23 09:19)
--------------------------
* fix compilation on older NumPy
* Contributors: Vincent Rabaud
1.10.12 (2013-11-22)
--------------------
* bump changelog
* Fixed issue with image message step size
* fix crash for non char data
* fix `#26 <https://github.com/ros-perception/vision_opencv/issues/26>`_
* Contributors: Brian Jensen, Vincent Rabaud
1.10.11 (2013-10-23)
--------------------
* fix bad image check and improve it too
* Contributors: Vincent Rabaud
1.10.10 (2013-10-19)
--------------------
* fixes `#25 <https://github.com/ros-perception/vision_opencv/issues/25>`_
* Contributors: Vincent Rabaud
1.10.9 (2013-10-07)
-------------------
* fixes `#20 <https://github.com/ros-perception/vision_opencv/issues/20>`_
* Contributors: Vincent Rabaud
1.10.8 (2013-09-09)
-------------------
* fixes `#22 <https://github.com/ros-perception/vision_opencv/issues/22>`_
* fixes `#17 <https://github.com/ros-perception/vision_opencv/issues/17>`_
* check for CATKIN_ENABLE_TESTING
* fixes `#16 <https://github.com/ros-perception/vision_opencv/issues/16>`_
* update email address
* Contributors: Lukas Bulwahn, Vincent Rabaud
1.10.7 (2013-07-17)
-------------------
1.10.6 (2013-03-01)
-------------------
* make sure conversion are applied for depth differences
* Contributors: Vincent Rabaud
1.10.5 (2013-02-11)
-------------------
1.10.4 (2013-02-02)
-------------------
* fix installation of the boost package
* Contributors: Vincent Rabaud
1.10.3 (2013-01-17)
-------------------
* Link against PTYHON_LIBRARIES
* Contributors: William Woodall
1.10.2 (2013-01-13)
-------------------
* use CATKIN_DEVEL_PREFIX instead of obsolete CATKIN_BUILD_PREFIX
* Contributors: Dirk Thomas
1.10.1 (2013-01-10)
-------------------
* add licenses
* fixes `#5 <https://github.com/ros-perception/vision_opencv/issues/5>`_ by removing the logic from Python and using wrapped C++ and adding a test for it
* fix a bug discovered when running the opencv_tests
* use some C++ logic
* add a Boost Python module to have the C++ logix used directly in Python
* Contributors: Vincent Rabaud
1.10.0 (2013-01-03)
-------------------
* add conversion from Bayer to gray
* Contributors: Vincent Rabaud
1.9.15 (2013-01-02)
-------------------
* use the reverted isColor behavior
* Contributors: Vincent Rabaud
1.9.14 (2012-12-30)
-------------------
1.9.13 (2012-12-15)
-------------------
* use the catkin macros for the setup.py
* fix `#3 <https://github.com/ros-perception/vision_opencv/issues/3>`_
* Contributors: Vincent Rabaud
1.9.12 (2012-12-14)
-------------------
* buildtool_depend catkin fix
* CMakeLists.txt clean up.
* Contributors: William Woodall
1.9.11 (2012-12-10)
-------------------
* fix issue `#1 <https://github.com/ros-perception/vision_opencv/issues/1>`_
* Cleanup of package.xml
* Contributors: Vincent Rabaud, William Woodall
1.9.10 (2012-10-04)
-------------------
* fix the bad include folder
* Contributors: Vincent Rabaud
1.9.9 (2012-10-01)
------------------
* fix dependencies
* Contributors: Vincent Rabaud
1.9.8 (2012-09-30)
------------------
* fix some dependencies
* add rosconsole as a dependency
* fix missing Python at install and fix some dependencies
* Contributors: Vincent Rabaud
1.9.7 (2012-09-28 21:07)
------------------------
* add missing stuff
* make sure we find catkin
* Contributors: Vincent Rabaud
1.9.6 (2012-09-28 15:17)
------------------------
* move the test to where it belongs
* fix the tests and the API to not handle conversion from CV_TYPE to Color type (does not make sense)
* comply to the new Catkin API
* backport the YUV422 bug fix from Fuerte
* apply patch from https://code.ros.org/trac/ros-pkg/ticket/5556
* Contributors: Vincent Rabaud
1.9.5 (2012-09-15)
------------------
* remove dependencies to the opencv2 ROS package
* Contributors: Vincent Rabaud
1.9.4 (2012-09-13)
------------------
* make sure the include folders are copied to the right place
* Contributors: Vincent Rabaud
1.9.3 (2012-09-12)
------------------
1.9.2 (2012-09-07)
------------------
* be more compliant to the latest catkin
* added catkin_project() to cv_bridge, image_geometry, and opencv_tests
* Contributors: Jonathan Binney, Vincent Rabaud
1.9.1 (2012-08-28 22:06)
------------------------
* remove things that were marked as ROS_DEPRECATED
* Contributors: Vincent Rabaud
1.9.0 (2012-08-28 14:29)
------------------------
* catkinized opencv_tests by Jon Binney
* catkinized cv_bridge package... others disable for now by Jon Binney
* remove the version check, let's trust OpenCV :)
* revert the removal of opencv2
* vision_opencv: Export OpenCV flags in manifests for image_geometry, cv_bridge.
* finally get rid of opencv2 as it is a system dependency now
* bump REQUIRED version of OpenCV to 2.3.2, which is what's in ros-fuerte-opencv
* switch rosdep name to opencv2, to refer to ros-fuerte-opencv2
* added missing header
* Added constructor to CvImage to make converting a cv::Mat to sensor_msgs::Image less verbose.
* cv_bridge: Added unit test for `#5206 <https://github.com/ros-perception/vision_opencv/issues/5206>`_
* cv_bridge: Applied patch from mdesnoyer to fix handling of non-continuous OpenCV images. `#5206 <https://github.com/ros-perception/vision_opencv/issues/5206>`_
* Adding opencv2 to all manifests, so that client packages may
not break when using them.
* baking in opencv debs and attempting a pre-release
* cv_bridge: Support for new 16-bit encodings.
* cv_bridge: Deprecate old C++ cv_bridge API.
* cv_bridge: Correctly scale for MONO8 <-> MONO16 conversions.
* cv_bridge: Fixed issue where pointer version to toCvCopy would ignore the requested encoding (http://answers.ros.org/question/258/converting-kinect-rgb-image-to-opencv-gives-wrong).
* fixed doc build by taking a static snapshot
* cv_bridge: Marking doc reviewed.
* cv_bridge: Tweaks to make docs look better.
* cv_bridge: Added cvtColor(). License notices. Documented that CvBridge class is obsolete.
* cv_bridge: Added redesigned C++ cv_bridge.
* Doc cleanup
* Trigger doc rebuild
* mono16 -> bgr conversion tested and fixed in C
* Added Ubuntu platform tags to manifest
* Handle mono16 properly
* Raise exception when imgMsgToCv() gets an image encoding it does not recognise, `#3489 <https://github.com/ros-perception/vision_opencv/issues/3489>`_
* Remove use of deprecated rosbuild macros
* Fixed example
* cv_bridge split from opencv2
* Contributors: Vincent Rabaud, ethanrublee, gerkey, jamesb, mihelich, vrabaud, wheeler

View File

@@ -0,0 +1,48 @@
cmake_minimum_required(VERSION 2.8)
project(cv_bridge)
find_package(catkin REQUIRED COMPONENTS rosconsole sensor_msgs)
if(NOT ANDROID)
find_package(PythonLibs)
if(PYTHONLIBS_VERSION_STRING VERSION_LESS 3)
find_package(Boost REQUIRED python)
else()
find_package(Boost REQUIRED python3)
endif()
else()
find_package(Boost REQUIRED)
endif()
find_package(OpenCV 3 REQUIRED
COMPONENTS
opencv_core
opencv_imgproc
opencv_imgcodecs
CONFIG
)
catkin_package(
INCLUDE_DIRS include
LIBRARIES ${PROJECT_NAME}
CATKIN_DEPENDS rosconsole sensor_msgs
DEPENDS OpenCV
CFG_EXTRAS cv_bridge-extras.cmake
)
catkin_python_setup()
include_directories(include ${Boost_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS} ${catkin_INCLUDE_DIRS})
if(NOT ANDROID)
add_subdirectory(python)
endif()
add_subdirectory(src)
if(CATKIN_ENABLE_TESTING)
add_subdirectory(test)
endif()
# install the include folder
install(
DIRECTORY include/${PROJECT_NAME}/
DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
)

View File

@@ -0,0 +1,12 @@
set(OpenCV_VERSION @OpenCV_VERSION@)
set(OpenCV_VERSION_MAJOR @OpenCV_VERSION_MAJOR@)
set(OpenCV_VERSION_MINOR @OpenCV_VERSION_MINOR@)
set(OpenCV_VERSION_PATCH @OpenCV_VERSION_PATCH@)
set(OpenCV_SHARED @OpenCV_SHARED@)
set(OpenCV_CONFIG_PATH @OpenCV_CONFIG_PATH@)
set(OpenCV_INSTALL_PATH @OpenCV_INSTALL_PATH@)
set(OpenCV_LIB_COMPONENTS @OpenCV_LIB_COMPONENTS@)
set(OpenCV_USE_MANGLED_PATHS @OpenCV_USE_MANGLED_PATHS@)
set(OpenCV_MODULES_SUFFIX @OpenCV_MODULES_SUFFIX@)

View File

@@ -0,0 +1,201 @@
# -*- coding: utf-8 -*-
#
# cv_bridge documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 1 14:21:53 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cv_bridge'
copyright = u'2009, Willow Garage, Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'cv_bridgedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'cv_bridge.tex', u'stereo\\_utils Documentation',
u'James Bowman', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'http://docs.python.org/': None,
'http://docs.scipy.org/doc/numpy' : None,
}

View File

@@ -0,0 +1,18 @@
cv_bridge
=========
``cv_bridge`` contains a single class :class:`CvBridge` that converts ROS Image messages to
OpenCV images.
.. module:: cv_bridge
.. autoclass:: cv_bridge.CvBridge
:members:
.. autoclass:: cv_bridge.CvBridgeError
Indices and tables
==================
* :ref:`genindex`
* :ref:`search`

View File

@@ -0,0 +1,14 @@
/**
\mainpage
\htmlinclude manifest.html
\b cv_bridge contains classes for easily converting between ROS
sensor_msgs/Image messages and OpenCV images.
\section codeapi Code API
- cv_bridge::CvImage
- toCvCopy()
- toCvShare()
*/

View File

@@ -0,0 +1,429 @@
/*********************************************************************
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc,
* Copyright (c) 2015, Tal Regev.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Willow Garage nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*********************************************************************/
#ifndef CV_BRIDGE_CV_BRIDGE_H
#define CV_BRIDGE_CV_BRIDGE_H
#include <sensor_msgs/Image.h>
#include <sensor_msgs/CompressedImage.h>
#include <sensor_msgs/image_encodings.h>
#include <ros/static_assert.h>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/imgproc/types_c.h>
#include <stdexcept>
namespace cv_bridge {
class Exception : public std::runtime_error
{
public:
Exception(const std::string& description) : std::runtime_error(description) {}
};
class CvImage;
typedef boost::shared_ptr<CvImage> CvImagePtr;
typedef boost::shared_ptr<CvImage const> CvImageConstPtr;
//from: http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#Mat imread(const string& filename, int flags)
typedef enum {
BMP, DIB,
JPG, JPEG, JPE,
JP2,
PNG,
PBM, PGM, PPM,
SR, RAS,
TIFF, TIF,
} Format;
/**
* \brief Image message class that is interoperable with sensor_msgs/Image but uses a
* more convenient cv::Mat representation for the image data.
*/
class CvImage
{
public:
std_msgs::Header header; //!< ROS header
std::string encoding; //!< Image encoding ("mono8", "bgr8", etc.)
cv::Mat image; //!< Image data for use with OpenCV
/**
* \brief Empty constructor.
*/
CvImage() {}
/**
* \brief Constructor.
*/
CvImage(const std_msgs::Header& header, const std::string& encoding,
const cv::Mat& image = cv::Mat())
: header(header), encoding(encoding), image(image)
{
}
/**
* \brief Convert this message to a ROS sensor_msgs::Image message.
*
* The returned sensor_msgs::Image message contains a copy of the image data.
*/
sensor_msgs::ImagePtr toImageMsg() const;
/**
* dst_format is compress the image to desire format.
* Default value is empty string that will convert to jpg format.
* can be: jpg, jp2, bmp, png, tif at the moment
* support this format from opencv:
* http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#Mat imread(const string& filename, int flags)
*/
sensor_msgs::CompressedImagePtr toCompressedImageMsg(const Format dst_format = JPG) const;
/**
* \brief Copy the message data to a ROS sensor_msgs::Image message.
*
* This overload is intended mainly for aggregate messages such as stereo_msgs::DisparityImage,
* which contains a sensor_msgs::Image as a data member.
*/
void toImageMsg(sensor_msgs::Image& ros_image) const;
/**
* dst_format is compress the image to desire format.
* Default value is empty string that will convert to jpg format.
* can be: jpg, jp2, bmp, png, tif at the moment
* support this format from opencv:
* http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#Mat imread(const string& filename, int flags)
*/
void toCompressedImageMsg(sensor_msgs::CompressedImage& ros_image, const Format dst_format = JPG) const;
typedef boost::shared_ptr<CvImage> Ptr;
typedef boost::shared_ptr<CvImage const> ConstPtr;
protected:
boost::shared_ptr<void const> tracked_object_; // for sharing ownership
/// @cond DOXYGEN_IGNORE
friend
CvImageConstPtr toCvShare(const sensor_msgs::Image& source,
const boost::shared_ptr<void const>& tracked_object,
const std::string& encoding);
/// @endcond
};
/**
* \brief Convert a sensor_msgs::Image message to an OpenCV-compatible CvImage, copying the
* image data.
*
* \param source A shared_ptr to a sensor_msgs::Image message
* \param encoding The desired encoding of the image data, one of the following strings:
* - \c "mono8"
* - \c "bgr8"
* - \c "bgra8"
* - \c "rgb8"
* - \c "rgba8"
* - \c "mono16"
*
* If \a encoding is the empty string (the default), the returned CvImage has the same encoding
* as \a source.
*/
CvImagePtr toCvCopy(const sensor_msgs::ImageConstPtr& source,
const std::string& encoding = std::string());
CvImagePtr toCvCopy(const sensor_msgs::CompressedImageConstPtr& source,
const std::string& encoding = std::string());
/**
* \brief Convert a sensor_msgs::Image message to an OpenCV-compatible CvImage, copying the
* image data.
*
* \param source A sensor_msgs::Image message
* \param encoding The desired encoding of the image data, one of the following strings:
* - \c "mono8"
* - \c "bgr8"
* - \c "bgra8"
* - \c "rgb8"
* - \c "rgba8"
* - \c "mono16"
*
* If \a encoding is the empty string (the default), the returned CvImage has the same encoding
* as \a source.
* If the source is 8bit and the encoding 16 or vice-versa, a scaling is applied (65535/255 and
* 255/65535 respectively). Otherwise, no scaling is applied and the rules from the convertTo OpenCV
* function are applied (capping): http://docs.opencv.org/modules/core/doc/basic_structures.html#mat-convertto
*/
CvImagePtr toCvCopy(const sensor_msgs::Image& source,
const std::string& encoding = std::string());
CvImagePtr toCvCopy(const sensor_msgs::CompressedImage& source,
const std::string& encoding = std::string());
/**
* \brief Convert an immutable sensor_msgs::Image message to an OpenCV-compatible CvImage, sharing
* the image data if possible.
*
* If the source encoding and desired encoding are the same, the returned CvImage will share
* the image data with \a source without copying it. The returned CvImage cannot be modified, as that
* could modify the \a source data.
*
* \param source A shared_ptr to a sensor_msgs::Image message
* \param encoding The desired encoding of the image data, one of the following strings:
* - \c "mono8"
* - \c "bgr8"
* - \c "bgra8"
* - \c "rgb8"
* - \c "rgba8"
* - \c "mono16"
*
* If \a encoding is the empty string (the default), the returned CvImage has the same encoding
* as \a source.
*/
CvImageConstPtr toCvShare(const sensor_msgs::ImageConstPtr& source,
const std::string& encoding = std::string());
/**
* \brief Convert an immutable sensor_msgs::Image message to an OpenCV-compatible CvImage, sharing
* the image data if possible.
*
* If the source encoding and desired encoding are the same, the returned CvImage will share
* the image data with \a source without copying it. The returned CvImage cannot be modified, as that
* could modify the \a source data.
*
* This overload is useful when you have a shared_ptr to a message that contains a
* sensor_msgs::Image, and wish to share ownership with the containing message.
*
* \param source The sensor_msgs::Image message
* \param tracked_object A shared_ptr to an object owning the sensor_msgs::Image
* \param encoding The desired encoding of the image data, one of the following strings:
* - \c "mono8"
* - \c "bgr8"
* - \c "bgra8"
* - \c "rgb8"
* - \c "rgba8"
* - \c "mono16"
*
* If \a encoding is the empty string (the default), the returned CvImage has the same encoding
* as \a source.
*/
CvImageConstPtr toCvShare(const sensor_msgs::Image& source,
const boost::shared_ptr<void const>& tracked_object,
const std::string& encoding = std::string());
/**
* \brief Convert a CvImage to another encoding using the same rules as toCvCopy
*/
CvImagePtr cvtColor(const CvImageConstPtr& source,
const std::string& encoding);
struct CvtColorForDisplayOptions {
CvtColorForDisplayOptions() :
do_dynamic_scaling(false),
min_image_value(0.0),
max_image_value(0.0),
colormap(-1),
bg_label(-1) {}
bool do_dynamic_scaling;
double min_image_value;
double max_image_value;
int colormap;
int bg_label;
};
/**
* \brief Converts an immutable sensor_msgs::Image message to another CvImage for display purposes,
* using practical conversion rules if needed.
*
* Data will be shared between input and output if possible.
*
* Recall: sensor_msgs::image_encodings::isColor and isMono tell whether an image contains R,G,B,A, mono
* (or any combination/subset) with 8 or 16 bit depth.
*
* The following rules apply:
* - if the output encoding is empty, the fact that the input image is mono or multiple-channel is
* preserved in the ouput image. The bit depth will be 8. it tries to convert to BGR no matter what
* encoding image is passed.
* - if the output encoding is not empty, it must have sensor_msgs::image_encodings::isColor and
* isMono return true. It must also be 8 bit in depth
* - if the input encoding is an OpenCV format (e.g. 8UC1), and if we have 1,3 or 4 channels, it is
* respectively converted to mono, BGR or BGRA.
* - if the input encoding is 32SC1, this estimate that image as label image and will convert it as
* bgr image with different colors for each label.
*
* \param source A shared_ptr to a sensor_msgs::Image message
* \param encoding Either an encoding string that returns true in sensor_msgs::image_encodings::isColor
* isMono or the empty string as explained above.
* \param options (cv_bridge::CvtColorForDisplayOptions) Options to convert the source image with.
* - do_dynamic_scaling If true, the image is dynamically scaled between its minimum and maximum value
* before being converted to its final encoding.
* - min_image_value Independently from do_dynamic_scaling, if min_image_value and max_image_value are
* different, the image is scaled between these two values before being converted to its final encoding.
* - max_image_value Maximum image value
* - colormap Colormap which the source image converted with.
*/
CvImageConstPtr cvtColorForDisplay(const CvImageConstPtr& source,
const std::string& encoding = std::string(),
const CvtColorForDisplayOptions options = CvtColorForDisplayOptions());
/**
* \brief Get the OpenCV type enum corresponding to the encoding.
*
* For example, "bgr8" -> CV_8UC3, "32FC1" -> CV_32FC1, and "32FC10" -> CV_32FC10.
*/
int getCvType(const std::string& encoding);
} // namespace cv_bridge
// CvImage as a first class message type
// The rest of this file hooks into the roscpp serialization API to make CvImage
// a first-class message type you can publish and subscribe to directly.
// Unfortunately this doesn't yet work with image_transport, so don't rewrite all
// your callbacks to use CvImage! It might be useful for specific tasks, like
// processing bag files.
/// @cond DOXYGEN_IGNORE
namespace ros {
namespace message_traits {
template<> struct MD5Sum<cv_bridge::CvImage>
{
static const char* value() { return MD5Sum<sensor_msgs::Image>::value(); }
static const char* value(const cv_bridge::CvImage&) { return value(); }
static const uint64_t static_value1 = MD5Sum<sensor_msgs::Image>::static_value1;
static const uint64_t static_value2 = MD5Sum<sensor_msgs::Image>::static_value2;
// If the definition of sensor_msgs/Image changes, we'll get a compile error here.
ROS_STATIC_ASSERT(MD5Sum<sensor_msgs::Image>::static_value1 == 0x060021388200f6f0ULL);
ROS_STATIC_ASSERT(MD5Sum<sensor_msgs::Image>::static_value2 == 0xf447d0fcd9c64743ULL);
};
template<> struct DataType<cv_bridge::CvImage>
{
static const char* value() { return DataType<sensor_msgs::Image>::value(); }
static const char* value(const cv_bridge::CvImage&) { return value(); }
};
template<> struct Definition<cv_bridge::CvImage>
{
static const char* value() { return Definition<sensor_msgs::Image>::value(); }
static const char* value(const cv_bridge::CvImage&) { return value(); }
};
template<> struct HasHeader<cv_bridge::CvImage> : TrueType {};
} // namespace ros::message_traits
namespace serialization {
template<> struct Serializer<cv_bridge::CvImage>
{
/// @todo Still ignoring endianness...
template<typename Stream>
inline static void write(Stream& stream, const cv_bridge::CvImage& m)
{
stream.next(m.header);
stream.next((uint32_t)m.image.rows); // height
stream.next((uint32_t)m.image.cols); // width
stream.next(m.encoding);
uint8_t is_bigendian = 0;
stream.next(is_bigendian);
stream.next((uint32_t)m.image.step);
size_t data_size = m.image.step*m.image.rows;
stream.next((uint32_t)data_size);
if (data_size > 0)
memcpy(stream.advance(data_size), m.image.data, data_size);
}
template<typename Stream>
inline static void read(Stream& stream, cv_bridge::CvImage& m)
{
stream.next(m.header);
uint32_t height, width;
stream.next(height);
stream.next(width);
stream.next(m.encoding);
uint8_t is_bigendian;
stream.next(is_bigendian);
uint32_t step, data_size;
stream.next(step);
stream.next(data_size);
int type = cv_bridge::getCvType(m.encoding);
// Construct matrix pointing to the stream data, then copy it to m.image
cv::Mat tmp((int)height, (int)width, type, stream.advance(data_size), (size_t)step);
tmp.copyTo(m.image);
}
inline static uint32_t serializedLength(const cv_bridge::CvImage& m)
{
size_t data_size = m.image.step*m.image.rows;
return serializationLength(m.header) + serializationLength(m.encoding) + 17 + data_size;
}
};
} // namespace ros::serialization
namespace message_operations {
template<> struct Printer<cv_bridge::CvImage>
{
template<typename Stream>
static void stream(Stream&, const std::string&, const cv_bridge::CvImage&)
{
/// @todo Replicate printing for sensor_msgs::Image
}
};
} // namespace ros::message_operations
} // namespace ros
namespace cv_bridge {
inline std::ostream& operator<<(std::ostream& s, const CvImage& m)
{
ros::message_operations::Printer<CvImage>::stream(s, "", m);
return s;
}
} // namespace cv_bridge
/// @endcond
#endif

View File

@@ -0,0 +1,211 @@
// -*- mode: c++ -*-
/*********************************************************************
* Original color definition is at scikit-image distributed with
* following license disclaimer:
*
* Copyright (C) 2011, the scikit-image team
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name of skimage nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*********************************************************************/
#ifndef CV_BRIDGE_RGB_COLORS_H_
#define CV_BRIDGE_RGB_COLORS_H_
#include <opencv2/opencv.hpp>
namespace cv_bridge
{
namespace rgb_colors
{
/**
* @brief
* 146 rgb colors
*/
enum Colors {
ALICEBLUE,
ANTIQUEWHITE,
AQUA,
AQUAMARINE,
AZURE,
BEIGE,
BISQUE,
BLACK,
BLANCHEDALMOND,
BLUE,
BLUEVIOLET,
BROWN,
BURLYWOOD,
CADETBLUE,
CHARTREUSE,
CHOCOLATE,
CORAL,
CORNFLOWERBLUE,
CORNSILK,
CRIMSON,
CYAN,
DARKBLUE,
DARKCYAN,
DARKGOLDENROD,
DARKGRAY,
DARKGREEN,
DARKGREY,
DARKKHAKI,
DARKMAGENTA,
DARKOLIVEGREEN,
DARKORANGE,
DARKORCHID,
DARKRED,
DARKSALMON,
DARKSEAGREEN,
DARKSLATEBLUE,
DARKSLATEGRAY,
DARKSLATEGREY,
DARKTURQUOISE,
DARKVIOLET,
DEEPPINK,
DEEPSKYBLUE,
DIMGRAY,
DIMGREY,
DODGERBLUE,
FIREBRICK,
FLORALWHITE,
FORESTGREEN,
FUCHSIA,
GAINSBORO,
GHOSTWHITE,
GOLD,
GOLDENROD,
GRAY,
GREEN,
GREENYELLOW,
GREY,
HONEYDEW,
HOTPINK,
INDIANRED,
INDIGO,
IVORY,
KHAKI,
LAVENDER,
LAVENDERBLUSH,
LAWNGREEN,
LEMONCHIFFON,
LIGHTBLUE,
LIGHTCORAL,
LIGHTCYAN,
LIGHTGOLDENRODYELLOW,
LIGHTGRAY,
LIGHTGREEN,
LIGHTGREY,
LIGHTPINK,
LIGHTSALMON,
LIGHTSEAGREEN,
LIGHTSKYBLUE,
LIGHTSLATEGRAY,
LIGHTSLATEGREY,
LIGHTSTEELBLUE,
LIGHTYELLOW,
LIME,
LIMEGREEN,
LINEN,
MAGENTA,
MAROON,
MEDIUMAQUAMARINE,
MEDIUMBLUE,
MEDIUMORCHID,
MEDIUMPURPLE,
MEDIUMSEAGREEN,
MEDIUMSLATEBLUE,
MEDIUMSPRINGGREEN,
MEDIUMTURQUOISE,
MEDIUMVIOLETRED,
MIDNIGHTBLUE,
MINTCREAM,
MISTYROSE,
MOCCASIN,
NAVAJOWHITE,
NAVY,
OLDLACE,
OLIVE,
OLIVEDRAB,
ORANGE,
ORANGERED,
ORCHID,
PALEGOLDENROD,
PALEGREEN,
PALEVIOLETRED,
PAPAYAWHIP,
PEACHPUFF,
PERU,
PINK,
PLUM,
POWDERBLUE,
PURPLE,
RED,
ROSYBROWN,
ROYALBLUE,
SADDLEBROWN,
SALMON,
SANDYBROWN,
SEAGREEN,
SEASHELL,
SIENNA,
SILVER,
SKYBLUE,
SLATEBLUE,
SLATEGRAY,
SLATEGREY,
SNOW,
SPRINGGREEN,
STEELBLUE,
TAN,
TEAL,
THISTLE,
TOMATO,
TURQUOISE,
VIOLET,
WHEAT,
WHITE,
WHITESMOKE,
YELLOW,
YELLOWGREEN,
};
/**
* @brief
* get rgb color with enum.
*/
cv::Vec3d getRGBColor(const int color);
} // namespace rgb_colors
} // namespace cv_bridge
#endif

View File

@@ -0,0 +1,41 @@
<package format="2">
<name>cv_bridge</name>
<version>1.13.1</version>
<description>
This contains CvBridge, which converts between ROS
Image messages and OpenCV images.
</description>
<author>Patrick Mihelich</author>
<author>James Bowman</author>
<maintainer email="vincent.rabaud@gmail.com">Vincent Rabaud</maintainer>
<license>BSD</license>
<url type="website">http://www.ros.org/wiki/cv_bridge</url>
<url type="repository">https://github.com/ros-perception/vision_opencv</url>
<url type="bugtracker">https://github.com/ros-perception/vision_opencv/issues</url>
<export>
<rosdoc config="rosdoc.yaml" />
</export>
<buildtool_depend>catkin</buildtool_depend>
<build_depend>boost</build_depend>
<build_depend>libopencv-dev</build_depend>
<build_depend>python</build_depend>
<build_depend>python-opencv</build_depend>
<build_depend>rosconsole</build_depend>
<build_depend>sensor_msgs</build_depend>
<exec_depend>boost</exec_depend>
<exec_depend>libopencv-dev</exec_depend>
<exec_depend>python</exec_depend>
<exec_depend>python-opencv</exec_depend>
<exec_depend>rosconsole</exec_depend>
<build_export_depend>libopencv-dev</build_export_depend>
<build_export_depend>sensor_msgs</build_export_depend>
<test_depend>rostest</test_depend>
<test_depend>python-numpy</test_depend>
<doc_depend>dvipng</doc_depend>
</package>

View File

@@ -0,0 +1,8 @@
configure_file(__init__.py.plain.in
${CATKIN_DEVEL_PREFIX}/${CATKIN_PACKAGE_PYTHON_DESTINATION}/boost/__init__.py
@ONLY
)
install(FILES ${CATKIN_DEVEL_PREFIX}/${CATKIN_PACKAGE_PYTHON_DESTINATION}/boost/__init__.py
DESTINATION ${CATKIN_PACKAGE_PYTHON_DESTINATION}/boost/
)

View File

@@ -0,0 +1,8 @@
from .core import CvBridge, CvBridgeError
# python bindings
try:
# This try is just to satisfy doc jobs that are built differently.
from cv_bridge.boost.cv_bridge_boost import cvtColorForDisplay, getCvType
except ImportError:
pass

View File

@@ -0,0 +1,266 @@
# Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# Copyright (c) 2016, Tal Regev.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sensor_msgs.msg
import sys
class CvBridgeError(TypeError):
"""
This is the error raised by :class:`cv_bridge.CvBridge` methods when they fail.
"""
pass
class CvBridge(object):
"""
The CvBridge is an object that converts between OpenCV Images and ROS Image messages.
.. doctest::
:options: -ELLIPSIS, +NORMALIZE_WHITESPACE
>>> import cv2
>>> import numpy as np
>>> from cv_bridge import CvBridge
>>> br = CvBridge()
>>> dtype, n_channels = br.encoding_as_cvtype2('8UC3')
>>> im = np.ndarray(shape=(480, 640, n_channels), dtype=dtype)
>>> msg = br.cv2_to_imgmsg(im) # Convert the image to a message
>>> im2 = br.imgmsg_to_cv2(msg) # Convert the message to a new image
>>> cmprsmsg = br.cv2_to_compressed_imgmsg(im) # Convert the image to a compress message
>>> im22 = br.compressed_imgmsg_to_cv2(msg) # Convert the compress message to a new image
>>> cv2.imwrite("this_was_a_message_briefly.png", im2)
"""
def __init__(self):
import cv2
self.cvtype_to_name = {}
self.cvdepth_to_numpy_depth = {cv2.CV_8U: 'uint8', cv2.CV_8S: 'int8', cv2.CV_16U: 'uint16',
cv2.CV_16S: 'int16', cv2.CV_32S:'int32', cv2.CV_32F:'float32',
cv2.CV_64F: 'float64'}
for t in ["8U", "8S", "16U", "16S", "32S", "32F", "64F"]:
for c in [1, 2, 3, 4]:
nm = "%sC%d" % (t, c)
self.cvtype_to_name[getattr(cv2, "CV_%s" % nm)] = nm
self.numpy_type_to_cvtype = {'uint8': '8U', 'int8': '8S', 'uint16': '16U',
'int16': '16S', 'int32': '32S', 'float32': '32F',
'float64': '64F'}
self.numpy_type_to_cvtype.update(dict((v, k) for (k, v) in self.numpy_type_to_cvtype.items()))
def dtype_with_channels_to_cvtype2(self, dtype, n_channels):
return '%sC%d' % (self.numpy_type_to_cvtype[dtype.name], n_channels)
def cvtype2_to_dtype_with_channels(self, cvtype):
from cv_bridge.boost.cv_bridge_boost import CV_MAT_CNWrap, CV_MAT_DEPTHWrap
return self.cvdepth_to_numpy_depth[CV_MAT_DEPTHWrap(cvtype)], CV_MAT_CNWrap(cvtype)
def encoding_to_cvtype2(self, encoding):
from cv_bridge.boost.cv_bridge_boost import getCvType
try:
return getCvType(encoding)
except RuntimeError as e:
raise CvBridgeError(e)
def encoding_to_dtype_with_channels(self, encoding):
return self.cvtype2_to_dtype_with_channels(self.encoding_to_cvtype2(encoding))
def compressed_imgmsg_to_cv2(self, cmprs_img_msg, desired_encoding = "passthrough"):
"""
Convert a sensor_msgs::CompressedImage message to an OpenCV :cpp:type:`cv::Mat`.
:param cmprs_img_msg: A :cpp:type:`sensor_msgs::CompressedImage` message
:param desired_encoding: The encoding of the image data, one of the following strings:
* ``"passthrough"``
* one of the standard strings in sensor_msgs/image_encodings.h
:rtype: :cpp:type:`cv::Mat`
:raises CvBridgeError: when conversion is not possible.
If desired_encoding is ``"passthrough"``, then the returned image has the same format as img_msg.
Otherwise desired_encoding must be one of the standard image encodings
This function returns an OpenCV :cpp:type:`cv::Mat` message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure.
If the image only has one channel, the shape has size 2 (width and height)
"""
import cv2
import numpy as np
str_msg = cmprs_img_msg.data
buf = np.ndarray(shape=(1, len(str_msg)),
dtype=np.uint8, buffer=cmprs_img_msg.data)
im = cv2.imdecode(buf, cv2.IMREAD_ANYCOLOR)
if desired_encoding == "passthrough":
return im
from cv_bridge.boost.cv_bridge_boost import cvtColor2
try:
res = cvtColor2(im, "bgr8", desired_encoding)
except RuntimeError as e:
raise CvBridgeError(e)
return res
def imgmsg_to_cv2(self, img_msg, desired_encoding = "passthrough"):
"""
Convert a sensor_msgs::Image message to an OpenCV :cpp:type:`cv::Mat`.
:param img_msg: A :cpp:type:`sensor_msgs::Image` message
:param desired_encoding: The encoding of the image data, one of the following strings:
* ``"passthrough"``
* one of the standard strings in sensor_msgs/image_encodings.h
:rtype: :cpp:type:`cv::Mat`
:raises CvBridgeError: when conversion is not possible.
If desired_encoding is ``"passthrough"``, then the returned image has the same format as img_msg.
Otherwise desired_encoding must be one of the standard image encodings
This function returns an OpenCV :cpp:type:`cv::Mat` message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure.
If the image only has one channel, the shape has size 2 (width and height)
"""
import cv2
import numpy as np
dtype, n_channels = self.encoding_to_dtype_with_channels(img_msg.encoding)
dtype = np.dtype(dtype)
dtype = dtype.newbyteorder('>' if img_msg.is_bigendian else '<')
if n_channels == 1:
im = np.ndarray(shape=(img_msg.height, img_msg.width),
dtype=dtype, buffer=img_msg.data)
else:
im = np.ndarray(shape=(img_msg.height, img_msg.width, n_channels),
dtype=dtype, buffer=img_msg.data)
# If the byt order is different between the message and the system.
if img_msg.is_bigendian == (sys.byteorder == 'little'):
im = im.byteswap().newbyteorder()
if desired_encoding == "passthrough":
return im
from cv_bridge.boost.cv_bridge_boost import cvtColor2
try:
res = cvtColor2(im, img_msg.encoding, desired_encoding)
except RuntimeError as e:
raise CvBridgeError(e)
return res
def cv2_to_compressed_imgmsg(self, cvim, dst_format = "jpg"):
"""
Convert an OpenCV :cpp:type:`cv::Mat` type to a ROS sensor_msgs::CompressedImage message.
:param cvim: An OpenCV :cpp:type:`cv::Mat`
:param dst_format: The format of the image data, one of the following strings:
* from http://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html
* from http://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#Mat imread(const string& filename, int flags)
* bmp, dib
* jpeg, jpg, jpe
* jp2
* png
* pbm, pgm, ppm
* sr, ras
* tiff, tif
:rtype: A sensor_msgs.msg.CompressedImage message
:raises CvBridgeError: when the ``cvim`` has a type that is incompatible with ``format``
This function returns a sensor_msgs::Image message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure.
"""
import cv2
import numpy as np
if not isinstance(cvim, (np.ndarray, np.generic)):
raise TypeError('Your input type is not a numpy array')
cmprs_img_msg = sensor_msgs.msg.CompressedImage()
cmprs_img_msg.format = dst_format
ext_format = '.' + dst_format
try:
cmprs_img_msg.data = np.array(cv2.imencode(ext_format, cvim)[1]).tostring()
except RuntimeError as e:
raise CvBridgeError(e)
return cmprs_img_msg
def cv2_to_imgmsg(self, cvim, encoding = "passthrough"):
"""
Convert an OpenCV :cpp:type:`cv::Mat` type to a ROS sensor_msgs::Image message.
:param cvim: An OpenCV :cpp:type:`cv::Mat`
:param encoding: The encoding of the image data, one of the following strings:
* ``"passthrough"``
* one of the standard strings in sensor_msgs/image_encodings.h
:rtype: A sensor_msgs.msg.Image message
:raises CvBridgeError: when the ``cvim`` has a type that is incompatible with ``encoding``
If encoding is ``"passthrough"``, then the message has the same encoding as the image's OpenCV type.
Otherwise desired_encoding must be one of the standard image encodings
This function returns a sensor_msgs::Image message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure.
"""
import cv2
import numpy as np
if not isinstance(cvim, (np.ndarray, np.generic)):
raise TypeError('Your input type is not a numpy array')
img_msg = sensor_msgs.msg.Image()
img_msg.height = cvim.shape[0]
img_msg.width = cvim.shape[1]
if len(cvim.shape) < 3:
cv_type = self.dtype_with_channels_to_cvtype2(cvim.dtype, 1)
else:
cv_type = self.dtype_with_channels_to_cvtype2(cvim.dtype, cvim.shape[2])
if encoding == "passthrough":
img_msg.encoding = cv_type
else:
img_msg.encoding = encoding
# Verify that the supplied encoding is compatible with the type of the OpenCV image
if self.cvtype_to_name[self.encoding_to_cvtype2(encoding)] != cv_type:
raise CvBridgeError("encoding specified as %s, but image has incompatible type %s" % (encoding, cv_type))
if cvim.dtype.byteorder == '>':
img_msg.is_bigendian = True
img_msg.data = cvim.tostring()
img_msg.step = len(img_msg.data) // img_msg.height
return img_msg

View File

@@ -0,0 +1,8 @@
- builder: doxygen
name: C++ API
output_dir: c++
file_patterns: '*.c *.cpp *.h *.cc *.hh *.dox'
- builder: sphinx
name: Python API
output_dir: python
sphinx_root_dir: doc

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup()
d['packages'] = ['cv_bridge']
d['package_dir'] = {'' : 'python'}
setup(**d)

View File

@@ -0,0 +1,61 @@
# add library
include_directories(./)
add_library(${PROJECT_NAME} cv_bridge.cpp rgb_colors.cpp)
add_dependencies(${PROJECT_NAME} ${catkin_EXPORTED_TARGETS})
target_link_libraries(${PROJECT_NAME} ${OpenCV_LIBRARIES} ${catkin_LIBRARIES})
install(TARGETS ${PROJECT_NAME} DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION})
if(NOT ANDROID)
# add a Boost Python library
find_package(PythonInterp REQUIRED)
find_package(PythonLibs "${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}")
#Get the numpy include directory from its python module
if(NOT PYTHON_NUMPY_INCLUDE_DIR)
execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import numpy; print(numpy.get_include())"
RESULT_VARIABLE PYTHON_NUMPY_PROCESS
OUTPUT_VARIABLE PYTHON_NUMPY_INCLUDE_DIR
OUTPUT_STRIP_TRAILING_WHITESPACE)
if(PYTHON_NUMPY_PROCESS EQUAL 0)
file(TO_CMAKE_PATH "${PYTHON_NUMPY_INCLUDE_DIR}" PYTHON_NUMPY_INCLUDE_CMAKE_PATH)
set(PYTHON_NUMPY_INCLUDE_DIR ${PYTHON_NUMPY_INCLUDE_CMAKE_PATH} CACHE PATH "Numpy include directory")
else(PYTHON_NUMPY_PROCESS EQUAL 0)
message(SEND_ERROR "Could not determine the NumPy include directory, verify that NumPy was installed correctly.")
endif(PYTHON_NUMPY_PROCESS EQUAL 0)
endif(NOT PYTHON_NUMPY_INCLUDE_DIR)
include_directories(${PYTHON_INCLUDE_PATH} ${Boost_INCLUDE_DIRS} ${PYTHON_NUMPY_INCLUDE_DIR})
if (PYTHON_VERSION_MAJOR VERSION_EQUAL 3)
add_definitions(-DPYTHON3)
endif()
if (OpenCV_VERSION_MAJOR VERSION_EQUAL 3)
add_library(${PROJECT_NAME}_boost module.cpp module_opencv3.cpp)
else()
add_library(${PROJECT_NAME}_boost module.cpp module_opencv2.cpp)
endif()
target_link_libraries(${PROJECT_NAME}_boost ${Boost_LIBRARIES}
${catkin_LIBRARIES}
${PYTHON_LIBRARIES}
${PROJECT_NAME}
)
set_target_properties(${PROJECT_NAME}_boost PROPERTIES
LIBRARY_OUTPUT_DIRECTORY ${CATKIN_DEVEL_PREFIX}/${CATKIN_GLOBAL_PYTHON_DESTINATION}/${PROJECT_NAME}/boost/
RUNTIME_OUTPUT_DIRECTORY ${CATKIN_DEVEL_PREFIX}/${CATKIN_GLOBAL_PYTHON_DESTINATION}/${PROJECT_NAME}/boost/
PREFIX ""
)
if(APPLE)
set_target_properties(${PROJECT_NAME}_boost PROPERTIES
SUFFIX ".so")
endif()
if(MSVC)
set_target_properties(${PROJECT_NAME}_boost PROPERTIES
SUFFIX ".pyd")
endif()
install(TARGETS ${PROJECT_NAME}_boost DESTINATION ${CATKIN_PACKAGE_PYTHON_DESTINATION}/boost/)
endif()

View File

@@ -0,0 +1,2 @@
this code is taken from Boost at https://github.com/boostorg/endian.git
We should remove this folder once Boost 1.58 or above is the default.

View File

@@ -0,0 +1,192 @@
// scoped_enum.hpp ---------------------------------------------------------//
// Copyright Beman Dawes, 2009
// Copyright (C) 2011-2012 Vicente J. Botet Escriba
// Copyright (C) 2012 Anthony Williams
// Distributed under the Boost Software License, Version 1.0.
// See http://www.boost.org/LICENSE_1_0.txt
#ifndef BOOST_CORE_SCOPED_ENUM_HPP
#define BOOST_CORE_SCOPED_ENUM_HPP
#include <boost/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost
{
#ifdef BOOST_NO_CXX11_SCOPED_ENUMS
/**
* Meta-function to get the native enum type associated to an enum class or its emulation.
*/
template <typename EnumType>
struct native_type
{
/**
* The member typedef type names the native enum type associated to the scoped enum,
* which is it self if the compiler supports scoped enums or EnumType::enum_type if it is an emulated scoped enum.
*/
typedef typename EnumType::enum_type type;
};
/**
* Casts a scoped enum to its underlying type.
*
* This function is useful when working with scoped enum classes, which doens't implicitly convert to the underlying type.
* @param v A scoped enum.
* @returns The underlying type.
* @throws No-throws.
*/
template <typename UnderlyingType, typename EnumType>
UnderlyingType underlying_cast(EnumType v)
{
return v.get_underlying_value_();
}
/**
* Casts a scoped enum to its native enum type.
*
* This function is useful to make programs portable when the scoped enum emulation can not be use where native enums can.
*
* EnumType the scoped enum type
*
* @param v A scoped enum.
* @returns The native enum value.
* @throws No-throws.
*/
template <typename EnumType>
inline
typename EnumType::enum_type native_value(EnumType e)
{
return e.get_native_value_();
}
#else // BOOST_NO_CXX11_SCOPED_ENUMS
template <typename EnumType>
struct native_type
{
typedef EnumType type;
};
template <typename UnderlyingType, typename EnumType>
UnderlyingType underlying_cast(EnumType v)
{
return static_cast<UnderlyingType>(v);
}
template <typename EnumType>
inline
EnumType native_value(EnumType e)
{
return e;
}
#endif // BOOST_NO_CXX11_SCOPED_ENUMS
}
#ifdef BOOST_NO_CXX11_SCOPED_ENUMS
#ifndef BOOST_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS
#define BOOST_SCOPED_ENUM_UT_DECLARE_CONVERSION_OPERATOR \
explicit operator underlying_type() const BOOST_NOEXCEPT { return get_underlying_value_(); }
#else
#define BOOST_SCOPED_ENUM_UT_DECLARE_CONVERSION_OPERATOR
#endif
/**
* Start a declaration of a scoped enum.
*
* @param EnumType The new scoped enum.
* @param UnderlyingType The underlying type.
*/
#define BOOST_SCOPED_ENUM_UT_DECLARE_BEGIN(EnumType, UnderlyingType) \
struct EnumType { \
typedef void is_boost_scoped_enum_tag; \
typedef UnderlyingType underlying_type; \
EnumType() BOOST_NOEXCEPT {} \
explicit EnumType(underlying_type v) BOOST_NOEXCEPT : v_(v) {} \
underlying_type get_underlying_value_() const BOOST_NOEXCEPT { return v_; } \
BOOST_SCOPED_ENUM_UT_DECLARE_CONVERSION_OPERATOR \
private: \
underlying_type v_; \
typedef EnumType self_type; \
public: \
enum enum_type
#define BOOST_SCOPED_ENUM_DECLARE_END2() \
enum_type get_native_value_() const BOOST_NOEXCEPT { return enum_type(v_); } \
friend bool operator ==(self_type lhs, self_type rhs) BOOST_NOEXCEPT { return enum_type(lhs.v_)==enum_type(rhs.v_); } \
friend bool operator ==(self_type lhs, enum_type rhs) BOOST_NOEXCEPT { return enum_type(lhs.v_)==rhs; } \
friend bool operator ==(enum_type lhs, self_type rhs) BOOST_NOEXCEPT { return lhs==enum_type(rhs.v_); } \
friend bool operator !=(self_type lhs, self_type rhs) BOOST_NOEXCEPT { return enum_type(lhs.v_)!=enum_type(rhs.v_); } \
friend bool operator !=(self_type lhs, enum_type rhs) BOOST_NOEXCEPT { return enum_type(lhs.v_)!=rhs; } \
friend bool operator !=(enum_type lhs, self_type rhs) BOOST_NOEXCEPT { return lhs!=enum_type(rhs.v_); } \
friend bool operator <(self_type lhs, self_type rhs) BOOST_NOEXCEPT { return enum_type(lhs.v_)<enum_type(rhs.v_); } \
friend bool operator <(self_type lhs, enum_type rhs) BOOST_NOEXCEPT { return enum_type(lhs.v_)<rhs; } \
friend bool operator <(enum_type lhs, self_type rhs) BOOST_NOEXCEPT { return lhs<enum_type(rhs.v_); } \
friend bool operator <=(self_type lhs, self_type rhs) BOOST_NOEXCEPT { return enum_type(lhs.v_)<=enum_type(rhs.v_); } \
friend bool operator <=(self_type lhs, enum_type rhs) BOOST_NOEXCEPT { return enum_type(lhs.v_)<=rhs; } \
friend bool operator <=(enum_type lhs, self_type rhs) BOOST_NOEXCEPT { return lhs<=enum_type(rhs.v_); } \
friend bool operator >(self_type lhs, self_type rhs) BOOST_NOEXCEPT { return enum_type(lhs.v_)>enum_type(rhs.v_); } \
friend bool operator >(self_type lhs, enum_type rhs) BOOST_NOEXCEPT { return enum_type(lhs.v_)>rhs; } \
friend bool operator >(enum_type lhs, self_type rhs) BOOST_NOEXCEPT { return lhs>enum_type(rhs.v_); } \
friend bool operator >=(self_type lhs, self_type rhs) BOOST_NOEXCEPT { return enum_type(lhs.v_)>=enum_type(rhs.v_); } \
friend bool operator >=(self_type lhs, enum_type rhs) BOOST_NOEXCEPT { return enum_type(lhs.v_)>=rhs; } \
friend bool operator >=(enum_type lhs, self_type rhs) BOOST_NOEXCEPT { return lhs>=enum_type(rhs.v_); } \
};
#define BOOST_SCOPED_ENUM_DECLARE_END(EnumType) \
; \
EnumType(enum_type v) BOOST_NOEXCEPT : v_(v) {} \
BOOST_SCOPED_ENUM_DECLARE_END2()
/**
* Starts a declaration of a scoped enum with the default int underlying type.
*
* @param EnumType The new scoped enum.
*/
#define BOOST_SCOPED_ENUM_DECLARE_BEGIN(EnumType) \
BOOST_SCOPED_ENUM_UT_DECLARE_BEGIN(EnumType,int)
/**
* Name of the native enum type.
*
* @param EnumType The new scoped enum.
*/
#define BOOST_SCOPED_ENUM_NATIVE(EnumType) EnumType::enum_type
/**
* Forward declares an scoped enum.
*
* @param EnumType The scoped enum.
*/
#define BOOST_SCOPED_ENUM_FORWARD_DECLARE(EnumType) struct EnumType
#else // BOOST_NO_CXX11_SCOPED_ENUMS
#define BOOST_SCOPED_ENUM_UT_DECLARE_BEGIN(EnumType,UnderlyingType) enum class EnumType : UnderlyingType
#define BOOST_SCOPED_ENUM_DECLARE_BEGIN(EnumType) enum class EnumType
#define BOOST_SCOPED_ENUM_DECLARE_END2()
#define BOOST_SCOPED_ENUM_DECLARE_END(EnumType) ;
#define BOOST_SCOPED_ENUM_NATIVE(EnumType) EnumType
#define BOOST_SCOPED_ENUM_FORWARD_DECLARE(EnumType) enum class EnumType
#endif // BOOST_NO_CXX11_SCOPED_ENUMS
// Deprecated macros
#define BOOST_SCOPED_ENUM_START(name) BOOST_SCOPED_ENUM_DECLARE_BEGIN(name)
#define BOOST_SCOPED_ENUM_END BOOST_SCOPED_ENUM_DECLARE_END2()
#define BOOST_SCOPED_ENUM(name) BOOST_SCOPED_ENUM_NATIVE(name)
#endif // BOOST_CORE_SCOPED_ENUM_HPP

View File

@@ -0,0 +1,488 @@
// boost/endian/conversion.hpp -------------------------------------------------------//
// Copyright Beman Dawes 2010, 2011, 2014
// Distributed under the Boost Software License, Version 1.0.
// http://www.boost.org/LICENSE_1_0.txt
#ifndef BOOST_ENDIAN_CONVERSION_HPP
#define BOOST_ENDIAN_CONVERSION_HPP
#include <boost/config.hpp>
#include <boost/predef/detail/endian_compat.h>
#include <boost/cstdint.hpp>
#include <boost/endian/detail/intrinsic.hpp>
#include <boost/core/scoped_enum.hpp>
#include <boost/static_assert.hpp>
#include <algorithm>
#include <cstring> // for memcpy
//------------------------------------- synopsis ---------------------------------------//
namespace boost
{
namespace endian
{
BOOST_SCOPED_ENUM_START(order)
{
big, little,
# ifdef BOOST_BIG_ENDIAN
native = big
# else
native = little
# endif
}; BOOST_SCOPED_ENUM_END
//--------------------------------------------------------------------------------------//
// //
// return-by-value interfaces //
// suggested by Phil Endecott //
// //
// user-defined types (UDTs) //
// //
// All return-by-value conversion function templates are required to be implemented in //
// terms of an unqualified call to "endian_reverse(x)", a function returning the //
// value of x with endianness reversed. This provides a customization point for any //
// UDT that provides a "endian_reverse" free-function meeting the requirements. //
// It must be defined in the same namespace as the UDT itself so that it will be found //
// by argument dependent lookup (ADL). //
// //
//--------------------------------------------------------------------------------------//
// customization for exact-length arithmetic types. See doc/conversion.html/#FAQ.
// Note: The omission of an overloads for the arithmetic type (typically long, or
// long long) not assigned to one of the exact length typedefs is a deliberate
// design decision. Such overloads would be non-portable and thus error prone.
inline int8_t endian_reverse(int8_t x) BOOST_NOEXCEPT;
inline int16_t endian_reverse(int16_t x) BOOST_NOEXCEPT;
inline int32_t endian_reverse(int32_t x) BOOST_NOEXCEPT;
inline int64_t endian_reverse(int64_t x) BOOST_NOEXCEPT;
inline uint8_t endian_reverse(uint8_t x) BOOST_NOEXCEPT;
inline uint16_t endian_reverse(uint16_t x) BOOST_NOEXCEPT;
inline uint32_t endian_reverse(uint32_t x) BOOST_NOEXCEPT;
inline uint64_t endian_reverse(uint64_t x) BOOST_NOEXCEPT;
// reverse byte order unless native endianness is big
template <class EndianReversible >
inline EndianReversible big_to_native(EndianReversible x) BOOST_NOEXCEPT;
// Returns: x if native endian order is big, otherwise endian_reverse(x)
template <class EndianReversible >
inline EndianReversible native_to_big(EndianReversible x) BOOST_NOEXCEPT;
// Returns: x if native endian order is big, otherwise endian_reverse(x)
// reverse byte order unless native endianness is little
template <class EndianReversible >
inline EndianReversible little_to_native(EndianReversible x) BOOST_NOEXCEPT;
// Returns: x if native endian order is little, otherwise endian_reverse(x)
template <class EndianReversible >
inline EndianReversible native_to_little(EndianReversible x) BOOST_NOEXCEPT;
// Returns: x if native endian order is little, otherwise endian_reverse(x)
// generic conditional reverse byte order
template <BOOST_SCOPED_ENUM(order) From, BOOST_SCOPED_ENUM(order) To,
class EndianReversible>
inline EndianReversible conditional_reverse(EndianReversible from) BOOST_NOEXCEPT;
// Returns: If From == To have different values, from.
// Otherwise endian_reverse(from).
// Remarks: The From == To test, and as a consequence which form the return takes, is
// is determined at compile time.
// runtime conditional reverse byte order
template <class EndianReversible >
inline EndianReversible conditional_reverse(EndianReversible from,
BOOST_SCOPED_ENUM(order) from_order, BOOST_SCOPED_ENUM(order) to_order)
BOOST_NOEXCEPT;
// Returns: from_order == to_order ? from : endian_reverse(from).
//------------------------------------------------------------------------------------//
// Q: What happended to bswap, htobe, and the other synonym functions based on names
// popularized by BSD, OS X, and Linux?
// A: Turned out these may be implemented as macros on some systems. Ditto POSIX names
// for such functionality. Since macros would cause endless problems with functions
// of the same names, and these functions are just synonyms anyhow, they have been
// removed.
//------------------------------------------------------------------------------------//
// //
// reverse in place interfaces //
// //
// user-defined types (UDTs) //
// //
// All reverse in place function templates are required to be implemented in terms //
// of an unqualified call to "endian_reverse_inplace(x)", a function reversing //
// the endianness of x, which is a non-const reference. This provides a //
// customization point for any UDT that provides a "reverse_inplace" free-function //
// meeting the requirements. The free-function must be declared in the same //
// namespace as the UDT itself so that it will be found by argument-dependent //
// lookup (ADL). //
// //
//------------------------------------------------------------------------------------//
// reverse in place
template <class EndianReversible>
inline void endian_reverse_inplace(EndianReversible& x) BOOST_NOEXCEPT;
// Effects: x = endian_reverse(x)
// reverse in place unless native endianness is big
template <class EndianReversibleInplace>
inline void big_to_native_inplace(EndianReversibleInplace& x) BOOST_NOEXCEPT;
// Effects: none if native byte-order is big, otherwise endian_reverse_inplace(x)
template <class EndianReversibleInplace>
inline void native_to_big_inplace(EndianReversibleInplace& x) BOOST_NOEXCEPT;
// Effects: none if native byte-order is big, otherwise endian_reverse_inplace(x)
// reverse in place unless native endianness is little
template <class EndianReversibleInplace>
inline void little_to_native_inplace(EndianReversibleInplace& x) BOOST_NOEXCEPT;
// Effects: none if native byte-order is little, otherwise endian_reverse_inplace(x);
template <class EndianReversibleInplace>
inline void native_to_little_inplace(EndianReversibleInplace& x) BOOST_NOEXCEPT;
// Effects: none if native byte-order is little, otherwise endian_reverse_inplace(x);
// generic conditional reverse in place
template <BOOST_SCOPED_ENUM(order) From, BOOST_SCOPED_ENUM(order) To,
class EndianReversibleInplace>
inline void conditional_reverse_inplace(EndianReversibleInplace& x) BOOST_NOEXCEPT;
// runtime reverse in place
template <class EndianReversibleInplace>
inline void conditional_reverse_inplace(EndianReversibleInplace& x,
BOOST_SCOPED_ENUM(order) from_order, BOOST_SCOPED_ENUM(order) to_order)
BOOST_NOEXCEPT;
//----------------------------------- end synopsis -------------------------------------//
namespace detail
{
// generic reverse function template implementation approach using std::reverse
// suggested by Mathias Gaunard. Primary motivation for inclusion is to have an
// independent implementation to test against.
template <class T>
inline T std_endian_reverse(T x) BOOST_NOEXCEPT
{
T tmp(x);
std::reverse(
reinterpret_cast<unsigned char*>(&tmp),
reinterpret_cast<unsigned char*>(&tmp) + sizeof(T));
return tmp;
}
// conditional unaligned reverse copy, patterned after std::reverse_copy
template <class T>
inline void big_reverse_copy(T from, char* to) BOOST_NOEXCEPT;
template <class T>
inline void big_reverse_copy(const char* from, T& to) BOOST_NOEXCEPT;
template <class T>
inline void little_reverse_copy(T from, char* to) BOOST_NOEXCEPT;
template <class T>
inline void little_reverse_copy(const char* from, T& to) BOOST_NOEXCEPT;
} // namespace detail
//--------------------------------------------------------------------------------------//
// //
// return-by-value implementation //
// //
// -- portable approach suggested by tymofey, with avoidance of undefined behavior //
// as suggested by Giovanni Piero Deretta, with a further refinement suggested //
// by Pyry Jahkola. //
// -- intrinsic approach suggested by reviewers, and by David Stone, who provided //
// his Boost licensed macro implementation (detail/intrinsic.hpp) //
// //
//--------------------------------------------------------------------------------------//
inline int8_t endian_reverse(int8_t x) BOOST_NOEXCEPT
{
return x;
}
inline int16_t endian_reverse(int16_t x) BOOST_NOEXCEPT
{
# ifdef BOOST_ENDIAN_NO_INTRINSICS
return (static_cast<uint16_t>(x) << 8)
| (static_cast<uint16_t>(x) >> 8);
# else
return BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_2(static_cast<uint16_t>(x));
# endif
}
inline int32_t endian_reverse(int32_t x) BOOST_NOEXCEPT
{
# ifdef BOOST_ENDIAN_NO_INTRINSICS
uint32_t step16;
step16 = static_cast<uint32_t>(x) << 16 | static_cast<uint32_t>(x) >> 16;
return
((static_cast<uint32_t>(step16) << 8) & 0xff00ff00)
| ((static_cast<uint32_t>(step16) >> 8) & 0x00ff00ff);
# else
return BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_4(static_cast<uint32_t>(x));
# endif
}
inline int64_t endian_reverse(int64_t x) BOOST_NOEXCEPT
{
# ifdef BOOST_ENDIAN_NO_INTRINSICS
uint64_t step32, step16;
step32 = static_cast<uint64_t>(x) << 32 | static_cast<uint64_t>(x) >> 32;
step16 = (step32 & 0x0000FFFF0000FFFFULL) << 16
| (step32 & 0xFFFF0000FFFF0000ULL) >> 16;
return static_cast<int64_t>((step16 & 0x00FF00FF00FF00FFULL) << 8
| (step16 & 0xFF00FF00FF00FF00ULL) >> 8);
# else
return BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_8(static_cast<uint64_t>(x));
# endif
}
inline uint8_t endian_reverse(uint8_t x) BOOST_NOEXCEPT
{
return x;
}
inline uint16_t endian_reverse(uint16_t x) BOOST_NOEXCEPT
{
# ifdef BOOST_ENDIAN_NO_INTRINSICS
return (x << 8)
| (x >> 8);
# else
return BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_2(x);
# endif
}
inline uint32_t endian_reverse(uint32_t x) BOOST_NOEXCEPT
{
# ifdef BOOST_ENDIAN_NO_INTRINSICS
uint32_t step16;
step16 = x << 16 | x >> 16;
return
((step16 << 8) & 0xff00ff00)
| ((step16 >> 8) & 0x00ff00ff);
# else
return BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_4(x);
# endif
}
inline uint64_t endian_reverse(uint64_t x) BOOST_NOEXCEPT
{
# ifdef BOOST_ENDIAN_NO_INTRINSICS
uint64_t step32, step16;
step32 = x << 32 | x >> 32;
step16 = (step32 & 0x0000FFFF0000FFFFULL) << 16
| (step32 & 0xFFFF0000FFFF0000ULL) >> 16;
return (step16 & 0x00FF00FF00FF00FFULL) << 8
| (step16 & 0xFF00FF00FF00FF00ULL) >> 8;
# else
return BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_8(x);
# endif
}
template <class EndianReversible >
inline EndianReversible big_to_native(EndianReversible x) BOOST_NOEXCEPT
{
# ifdef BOOST_BIG_ENDIAN
return x;
# else
return endian_reverse(x);
# endif
}
template <class EndianReversible >
inline EndianReversible native_to_big(EndianReversible x) BOOST_NOEXCEPT
{
# ifdef BOOST_BIG_ENDIAN
return x;
# else
return endian_reverse(x);
# endif
}
template <class EndianReversible >
inline EndianReversible little_to_native(EndianReversible x) BOOST_NOEXCEPT
{
# ifdef BOOST_LITTLE_ENDIAN
return x;
# else
return endian_reverse(x);
# endif
}
template <class EndianReversible >
inline EndianReversible native_to_little(EndianReversible x) BOOST_NOEXCEPT
{
# ifdef BOOST_LITTLE_ENDIAN
return x;
# else
return endian_reverse(x);
# endif
}
namespace detail
{
// Primary template and specializations to support endian_reverse().
// See rationale in endian_reverse() below.
template <BOOST_SCOPED_ENUM(order) From, BOOST_SCOPED_ENUM(order) To,
class EndianReversible>
class value_converter ; // primary template
template <class T> class value_converter <order::big, order::big, T>
{public: T operator()(T x) BOOST_NOEXCEPT {return x;}};
template <class T> class value_converter <order::little, order::little, T>
{public: T operator()(T x) BOOST_NOEXCEPT {return x;}};
template <class T> class value_converter <order::big, order::little, T>
{public: T operator()(T x) BOOST_NOEXCEPT {return endian_reverse(x);}};
template <class T> class value_converter <order::little, order::big, T>
{public: T operator()(T x) BOOST_NOEXCEPT {return endian_reverse(x);}};
}
// generic conditional reverse
template <BOOST_SCOPED_ENUM(order) From, BOOST_SCOPED_ENUM(order) To,
class EndianReversible>
inline EndianReversible conditional_reverse(EndianReversible from) BOOST_NOEXCEPT {
// work around lack of function template partial specialization by instantiating
// a function object of a class that is partially specialized on the two order
// template parameters, and then calling its operator().
detail::value_converter <From, To, EndianReversible> tmp;
return tmp(from);
}
// runtime conditional reverse
template <class EndianReversible >
inline EndianReversible conditional_reverse(EndianReversible from,
BOOST_SCOPED_ENUM(order) from_order, BOOST_SCOPED_ENUM(order) to_order) BOOST_NOEXCEPT
{
return from_order == to_order ? from : endian_reverse(from);
}
//--------------------------------------------------------------------------------------//
// reverse-in-place implementation //
//--------------------------------------------------------------------------------------//
// reverse in place
template <class EndianReversible>
inline void endian_reverse_inplace(EndianReversible& x) BOOST_NOEXCEPT
{
x = endian_reverse(x);
}
template <class EndianReversibleInplace>
# ifdef BOOST_BIG_ENDIAN
inline void big_to_native_inplace(EndianReversibleInplace&) BOOST_NOEXCEPT {}
# else
inline void big_to_native_inplace(EndianReversibleInplace& x) BOOST_NOEXCEPT
{ endian_reverse_inplace(x); }
# endif
template <class EndianReversibleInplace>
# ifdef BOOST_BIG_ENDIAN
inline void native_to_big_inplace(EndianReversibleInplace&) BOOST_NOEXCEPT {}
# else
inline void native_to_big_inplace(EndianReversibleInplace& x) BOOST_NOEXCEPT
{
endian_reverse_inplace(x);
}
# endif
template <class EndianReversibleInplace>
# ifdef BOOST_LITTLE_ENDIAN
inline void little_to_native_inplace(EndianReversibleInplace&) BOOST_NOEXCEPT {}
# else
inline void little_to_native_inplace(EndianReversibleInplace& x) BOOST_NOEXCEPT
{ endian_reverse_inplace(x); }
# endif
template <class EndianReversibleInplace>
# ifdef BOOST_LITTLE_ENDIAN
inline void native_to_little_inplace(EndianReversibleInplace&) BOOST_NOEXCEPT {}
# else
inline void native_to_little_inplace(EndianReversibleInplace& x) BOOST_NOEXCEPT
{
endian_reverse_inplace(x);
}
# endif
namespace detail
{
// Primary template and specializations support generic
// endian_reverse_inplace().
// See rationale in endian_reverse_inplace() below.
template <BOOST_SCOPED_ENUM(order) From, BOOST_SCOPED_ENUM(order) To,
class EndianReversibleInplace>
class converter; // primary template
template <class T> class converter<order::big, order::big, T>
{public: void operator()(T&) BOOST_NOEXCEPT {/*no effect*/}};
template <class T> class converter<order::little, order::little, T>
{public: void operator()(T&) BOOST_NOEXCEPT {/*no effect*/}};
template <class T> class converter<order::big, order::little, T>
{public: void operator()(T& x) BOOST_NOEXCEPT { endian_reverse_inplace(x); }};
template <class T> class converter<order::little, order::big, T>
{public: void operator()(T& x) BOOST_NOEXCEPT { endian_reverse_inplace(x); }};
} // namespace detail
// generic conditional reverse in place
template <BOOST_SCOPED_ENUM(order) From, BOOST_SCOPED_ENUM(order) To,
class EndianReversibleInplace>
inline void conditional_reverse_inplace(EndianReversibleInplace& x) BOOST_NOEXCEPT
{
// work around lack of function template partial specialization by instantiating
// a function object of a class that is partially specialized on the two order
// template parameters, and then calling its operator().
detail::converter<From, To, EndianReversibleInplace> tmp;
tmp(x); // call operator ()
}
// runtime reverse in place
template <class EndianReversibleInplace>
inline void conditional_reverse_inplace(EndianReversibleInplace& x,
BOOST_SCOPED_ENUM(order) from_order, BOOST_SCOPED_ENUM(order) to_order)
BOOST_NOEXCEPT
{
if (from_order != to_order)
endian_reverse_inplace(x);
}
namespace detail
{
template <class T>
inline void big_reverse_copy(T from, char* to) BOOST_NOEXCEPT
{
# ifdef BOOST_BIG_ENDIAN
std::memcpy(to, reinterpret_cast<const char*>(&from), sizeof(T));
# else
std::reverse_copy(reinterpret_cast<const char*>(&from),
reinterpret_cast<const char*>(&from) + sizeof(T), to);
# endif
}
template <class T>
inline void big_reverse_copy(const char* from, T& to) BOOST_NOEXCEPT
{
# ifdef BOOST_BIG_ENDIAN
std::memcpy(reinterpret_cast<char*>(&to), from, sizeof(T));
# else
std::reverse_copy(from, from + sizeof(T), reinterpret_cast<char*>(&to));
# endif
}
template <class T>
inline void little_reverse_copy(T from, char* to) BOOST_NOEXCEPT
{
# ifdef BOOST_LITTLE_ENDIAN
std::memcpy(to, reinterpret_cast<const char*>(&from), sizeof(T));
# else
std::reverse_copy(reinterpret_cast<const char*>(&from),
reinterpret_cast<const char*>(&from) + sizeof(T), to);
# endif
}
template <class T>
inline void little_reverse_copy(const char* from, T& to) BOOST_NOEXCEPT
{
# ifdef BOOST_LITTLE_ENDIAN
std::memcpy(reinterpret_cast<char*>(&to), from, sizeof(T));
# else
std::reverse_copy(from, from + sizeof(T), reinterpret_cast<char*>(&to));
# endif
}
} // namespace detail
} // namespace endian
} // namespace boost
#endif // BOOST_ENDIAN_CONVERSION_HPP

View File

@@ -0,0 +1,64 @@
// endian/detail/intrinsic.hpp -------------------------------------------------------//
// Copyright (C) 2012 David Stone
// Copyright Beman Dawes 2013
// Distributed under the Boost Software License, Version 1.0.
// http://www.boost.org/LICENSE_1_0.txt
#ifndef BOOST_ENDIAN_INTRINSIC_HPP
#define BOOST_ENDIAN_INTRINSIC_HPP
// Allow user to force BOOST_ENDIAN_NO_INTRINSICS in case they aren't available for a
// particular platform/compiler combination. Please report such platform/compiler
// combinations to the Boost mailing list.
#ifndef BOOST_ENDIAN_NO_INTRINSICS
#ifndef __has_builtin // Optional of course
#define __has_builtin(x) 0 // Compatibility with non-clang compilers
#endif
// GCC and Clang recent versions provide intrinsic byte swaps via builtins
#if (defined(__clang__) && __has_builtin(__builtin_bswap32) && __has_builtin(__builtin_bswap64)) \
|| (defined(__GNUC__ ) && \
(__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)))
# define BOOST_ENDIAN_INTRINSIC_MSG "__builtin_bswap16, etc."
// prior to 4.8, gcc did not provide __builtin_bswap16 on some platforms so we emulate it
// see http://gcc.gnu.org/bugzilla/show_bug.cgi?id=52624
// Clang has a similar problem, but their feature test macros make it easier to detect
# if (defined(__clang__) && __has_builtin(__builtin_bswap16)) \
|| (defined(__GNUC__) &&(__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))
# define BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_2(x) __builtin_bswap16(x)
# else
# define BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_2(x) __builtin_bswap32((x) << 16)
# endif
# define BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_4(x) __builtin_bswap32(x)
# define BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_8(x) __builtin_bswap64(x)
// Linux systems provide the byteswap.h header, with
#elif defined(__linux__)
// don't check for obsolete forms defined(linux) and defined(__linux) on the theory that
// compilers that predefine only these are so old that byteswap.h probably isn't present.
# define BOOST_ENDIAN_INTRINSIC_MSG "byteswap.h bswap_16, etc."
# include <byteswap.h>
# define BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_2(x) bswap_16(x)
# define BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_4(x) bswap_32(x)
# define BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_8(x) bswap_64(x)
#elif defined(_MSC_VER)
// Microsoft documents these as being compatible since Windows 95 and specificly
// lists runtime library support since Visual Studio 2003 (aka 7.1).
# define BOOST_ENDIAN_INTRINSIC_MSG "cstdlib _byteswap_ushort, etc."
# include <cstdlib>
# define BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_2(x) _byteswap_ushort(x)
# define BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_4(x) _byteswap_ulong(x)
# define BOOST_ENDIAN_INTRINSIC_BYTE_SWAP_8(x) _byteswap_uint64(x)
#else
# define BOOST_ENDIAN_NO_INTRINSICS
# define BOOST_ENDIAN_INTRINSIC_MSG "no byte swap intrinsics"
#endif
#elif !defined(BOOST_ENDIAN_INTRINSIC_MSG)
# define BOOST_ENDIAN_INTRINSIC_MSG "no byte swap intrinsics"
#endif // BOOST_ENDIAN_NO_INTRINSICS
#endif // BOOST_ENDIAN_INTRINSIC_HPP

View File

@@ -0,0 +1,17 @@
/*
Copyright Rene Rivera 2011-2012
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_PREDEF_DETAIL__CASSERT_H
#define BOOST_PREDEF_DETAIL__CASSERT_H
#if defined(__cplusplus)
#include <cassert>
#else
#include <assert.h>
#endif
#endif

View File

@@ -0,0 +1,26 @@
/*
Copyright Rene Rivera 2013
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_PREDEF_DETAIL_ENDIAN_COMPAT_H
#define BOOST_PREDEF_DETAIL_ENDIAN_COMPAT_H
#include <boost/predef/other/endian.h>
#if BOOST_ENDIAN_BIG_BYTE
# define BOOST_BIG_ENDIAN
# define BOOST_BYTE_ORDER 4321
#endif
#if BOOST_ENDIAN_LITTLE_BYTE
# define BOOST_LITTLE_ENDIAN
# define BOOST_BYTE_ORDER 1234
#endif
#if BOOST_ENDIAN_LITTLE_WORD
# define BOOST_PDP_ENDIAN
# define BOOST_BYTE_ORDER 2134
#endif
#endif

View File

@@ -0,0 +1,17 @@
/*
Copyright Rene Rivera 2011-2012
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_PREDEF_DETAIL_TEST_H
#define BOOST_PREDEF_DETAIL_TEST_H
#if !defined(BOOST_PREDEF_INTERNAL_GENERATE_TESTS)
#define BOOST_PREDEF_DECLARE_TEST(x,s)
#endif
#endif

View File

@@ -0,0 +1,13 @@
/*
Copyright Rene Rivera 2008-2013
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_PREDEF_LIBRARY_C__PREFIX_H
#define BOOST_PREDEF_LIBRARY_C__PREFIX_H
#include <boost/predef/detail/_cassert.h>
#endif

View File

@@ -0,0 +1,61 @@
/*
Copyright Rene Rivera 2008-2015
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_PREDEF_LIBRARY_C_GNU_H
#define BOOST_PREDEF_LIBRARY_C_GNU_H
#include <boost/predef/version_number.h>
#include <boost/predef/make.h>
#include <boost/predef/library/c/_prefix.h>
#if defined(__STDC__)
#include <stddef.h>
#elif defined(__cplusplus)
#include <cstddef>
#endif
/*`
[heading `BOOST_LIB_C_GNU`]
[@http://en.wikipedia.org/wiki/Glibc GNU glibc] Standard C library.
Version number available as major, and minor.
[table
[[__predef_symbol__] [__predef_version__]]
[[`__GLIBC__`] [__predef_detection__]]
[[`__GNU_LIBRARY__`] [__predef_detection__]]
[[`__GLIBC__`, `__GLIBC_MINOR__`] [V.R.0]]
[[`__GNU_LIBRARY__`, `__GNU_LIBRARY_MINOR__`] [V.R.0]]
]
*/
#define BOOST_LIB_C_GNU BOOST_VERSION_NUMBER_NOT_AVAILABLE
#if defined(__GLIBC__) || defined(__GNU_LIBRARY__)
# undef BOOST_LIB_C_GNU
# if defined(__GLIBC__)
# define BOOST_LIB_C_GNU \
BOOST_VERSION_NUMBER(__GLIBC__,__GLIBC_MINOR__,0)
# else
# define BOOST_LIB_C_GNU \
BOOST_VERSION_NUMBER(__GNU_LIBRARY__,__GNU_LIBRARY_MINOR__,0)
# endif
#endif
#if BOOST_LIB_C_GNU
# define BOOST_LIB_C_GNU_AVAILABLE
#endif
#define BOOST_LIB_C_GNU_NAME "GNU"
#endif
#include <boost/predef/detail/test.h>
BOOST_PREDEF_DECLARE_TEST(BOOST_LIB_C_GNU,BOOST_LIB_C_GNU_NAME)

View File

@@ -0,0 +1,89 @@
/*
Copyright Rene Rivera 2008-2015
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
#include <boost/predef/detail/test.h>
#ifndef BOOST_PREDEF_MAKE_H
#define BOOST_PREDEF_MAKE_H
/*
Shorthands for the common version number formats used by vendors...
*/
/*`
[heading `BOOST_PREDEF_MAKE_..` macros]
These set of macros decompose common vendor version number
macros which are composed version, revision, and patch digits.
The naming convention indicates:
* The base of the specified version number. "`BOOST_PREDEF_MAKE_0X`" for
hexadecimal digits, and "`BOOST_PREDEF_MAKE_10`" for decimal digits.
* The format of the vendor version number. Where "`V`" indicates the version digits,
"`R`" indicates the revision digits, "`P`" indicates the patch digits, and "`0`"
indicates an ignored digit.
Macros are:
*/
/*` `BOOST_PREDEF_MAKE_0X_VRP(V)` */
#define BOOST_PREDEF_MAKE_0X_VRP(V) BOOST_VERSION_NUMBER((V&0xF00)>>8,(V&0xF0)>>4,(V&0xF))
/*` `BOOST_PREDEF_MAKE_0X_VVRP(V)` */
#define BOOST_PREDEF_MAKE_0X_VVRP(V) BOOST_VERSION_NUMBER((V&0xFF00)>>8,(V&0xF0)>>4,(V&0xF))
/*` `BOOST_PREDEF_MAKE_0X_VRPP(V)` */
#define BOOST_PREDEF_MAKE_0X_VRPP(V) BOOST_VERSION_NUMBER((V&0xF000)>>12,(V&0xF00)>>8,(V&0xFF))
/*` `BOOST_PREDEF_MAKE_0X_VVRR(V)` */
#define BOOST_PREDEF_MAKE_0X_VVRR(V) BOOST_VERSION_NUMBER((V&0xFF00)>>8,(V&0xFF),0)
/*` `BOOST_PREDEF_MAKE_0X_VRRPPPP(V)` */
#define BOOST_PREDEF_MAKE_0X_VRRPPPP(V) BOOST_VERSION_NUMBER((V&0xF000000)>>24,(V&0xFF0000)>>16,(V&0xFFFF))
/*` `BOOST_PREDEF_MAKE_0X_VVRRP(V)` */
#define BOOST_PREDEF_MAKE_0X_VVRRP(V) BOOST_VERSION_NUMBER((V&0xFF000)>>12,(V&0xFF0)>>4,(V&0xF))
/*` `BOOST_PREDEF_MAKE_0X_VRRPP000(V)` */
#define BOOST_PREDEF_MAKE_0X_VRRPP000(V) BOOST_VERSION_NUMBER((V&0xF0000000)>>28,(V&0xFF00000)>>20,(V&0xFF000)>>12)
/*` `BOOST_PREDEF_MAKE_0X_VVRRPP(V)` */
#define BOOST_PREDEF_MAKE_0X_VVRRPP(V) BOOST_VERSION_NUMBER((V&0xFF0000)>>16,(V&0xFF00)>>8,(V&0xFF))
/*` `BOOST_PREDEF_MAKE_10_VPPP(V)` */
#define BOOST_PREDEF_MAKE_10_VPPP(V) BOOST_VERSION_NUMBER(((V)/1000)%10,0,(V)%1000)
/*` `BOOST_PREDEF_MAKE_10_VRP(V)` */
#define BOOST_PREDEF_MAKE_10_VRP(V) BOOST_VERSION_NUMBER(((V)/100)%10,((V)/10)%10,(V)%10)
/*` `BOOST_PREDEF_MAKE_10_VRP000(V)` */
#define BOOST_PREDEF_MAKE_10_VRP000(V) BOOST_VERSION_NUMBER(((V)/100000)%10,((V)/10000)%10,((V)/1000)%10)
/*` `BOOST_PREDEF_MAKE_10_VRPP(V)` */
#define BOOST_PREDEF_MAKE_10_VRPP(V) BOOST_VERSION_NUMBER(((V)/1000)%10,((V)/100)%10,(V)%100)
/*` `BOOST_PREDEF_MAKE_10_VRR(V)` */
#define BOOST_PREDEF_MAKE_10_VRR(V) BOOST_VERSION_NUMBER(((V)/100)%10,(V)%100,0)
/*` `BOOST_PREDEF_MAKE_10_VRRPP(V)` */
#define BOOST_PREDEF_MAKE_10_VRRPP(V) BOOST_VERSION_NUMBER(((V)/10000)%10,((V)/100)%100,(V)%100)
/*` `BOOST_PREDEF_MAKE_10_VRR000(V)` */
#define BOOST_PREDEF_MAKE_10_VRR000(V) BOOST_VERSION_NUMBER(((V)/100000)%10,((V)/1000)%100,0)
/*` `BOOST_PREDEF_MAKE_10_VV00(V)` */
#define BOOST_PREDEF_MAKE_10_VV00(V) BOOST_VERSION_NUMBER(((V)/100)%100,0,0)
/*` `BOOST_PREDEF_MAKE_10_VVRR(V)` */
#define BOOST_PREDEF_MAKE_10_VVRR(V) BOOST_VERSION_NUMBER(((V)/100)%100,(V)%100,0)
/*` `BOOST_PREDEF_MAKE_10_VVRRPP(V)` */
#define BOOST_PREDEF_MAKE_10_VVRRPP(V) BOOST_VERSION_NUMBER(((V)/10000)%100,((V)/100)%100,(V)%100)
/*` `BOOST_PREDEF_MAKE_10_VVRR0PP00(V)` */
#define BOOST_PREDEF_MAKE_10_VVRR0PP00(V) BOOST_VERSION_NUMBER(((V)/10000000)%100,((V)/100000)%100,((V)/100)%100)
/*` `BOOST_PREDEF_MAKE_10_VVRR0PPPP(V)` */
#define BOOST_PREDEF_MAKE_10_VVRR0PPPP(V) BOOST_VERSION_NUMBER(((V)/10000000)%100,((V)/100000)%100,(V)%10000)
/*` `BOOST_PREDEF_MAKE_10_VVRR00PP00(V)` */
#define BOOST_PREDEF_MAKE_10_VVRR00PP00(V) BOOST_VERSION_NUMBER(((V)/100000000)%100,((V)/1000000)%100,((V)/100)%100)
/*`
[heading `BOOST_PREDEF_MAKE_*..` date macros]
Date decomposition macros return a date in the relative to the 1970
Epoch date. If the month is not available, January 1st is used as the month and day.
If the day is not available, but the month is, the 1st of the month is used as the day.
*/
/*` `BOOST_PREDEF_MAKE_DATE(Y,M,D)` */
#define BOOST_PREDEF_MAKE_DATE(Y,M,D) BOOST_VERSION_NUMBER((Y)%10000-1970,(M)%100,(D)%100)
/*` `BOOST_PREDEF_MAKE_YYYYMMDD(V)` */
#define BOOST_PREDEF_MAKE_YYYYMMDD(V) BOOST_PREDEF_MAKE_DATE(((V)/10000)%10000,((V)/100)%100,(V)%100)
/*` `BOOST_PREDEF_MAKE_YYYY(V)` */
#define BOOST_PREDEF_MAKE_YYYY(V) BOOST_PREDEF_MAKE_DATE(V,1,1)
/*` `BOOST_PREDEF_MAKE_YYYYMM(V)` */
#define BOOST_PREDEF_MAKE_YYYYMM(V) BOOST_PREDEF_MAKE_DATE((V)/100,(V)%100,1)
#endif

View File

@@ -0,0 +1,45 @@
/*
Copyright Rene Rivera 2015
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_PREDEF_OS_ADROID_H
#define BOOST_PREDEF_OS_ADROID_H
#include <boost/predef/version_number.h>
#include <boost/predef/make.h>
/*`
[heading `BOOST_OS_ANDROID`]
[@http://en.wikipedia.org/wiki/Android_%28operating_system%29 Android] operating system.
[table
[[__predef_symbol__] [__predef_version__]]
[[`__ANDROID__`] [__predef_detection__]]
]
*/
#define BOOST_OS_ANDROID BOOST_VERSION_NUMBER_NOT_AVAILABLE
#if !defined(BOOST_PREDEF_DETAIL_OS_DETECTED) && ( \
defined(__ANDROID__) \
)
# undef BOOST_OS_ANDROID
# define BOOST_OS_ANDROID BOOST_VERSION_NUMBER_AVAILABLE
#endif
#if BOOST_OS_ANDROID
# define BOOST_OS_ANDROID_AVAILABLE
# include <boost/predef/detail/os_detected.h>
#endif
#define BOOST_OS_ANDROID_NAME "Android"
#endif
#include <boost/predef/detail/test.h>
BOOST_PREDEF_DECLARE_TEST(BOOST_OS_ANDROID,BOOST_OS_ANDROID_NAME)

View File

@@ -0,0 +1,103 @@
/*
Copyright Rene Rivera 2008-2015
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_PREDEF_OS_BSD_H
#define BOOST_PREDEF_OS_BSD_H
/* Special case: OSX will define BSD predefs if the sys/param.h
* header is included. We can guard against that, but only if we
* detect OSX first. Hence we will force include OSX detection
* before doing any BSD detection.
*/
#include <boost/predef/os/macos.h>
#include <boost/predef/version_number.h>
#include <boost/predef/make.h>
/*`
[heading `BOOST_OS_BSD`]
[@http://en.wikipedia.org/wiki/Berkeley_Software_Distribution BSD] operating system.
BSD has various branch operating systems possible and each detected
individually. This detects the following variations and sets a specific
version number macro to match:
* `BOOST_OS_BSD_DRAGONFLY` [@http://en.wikipedia.org/wiki/DragonFly_BSD DragonFly BSD]
* `BOOST_OS_BSD_FREE` [@http://en.wikipedia.org/wiki/Freebsd FreeBSD]
* `BOOST_OS_BSD_BSDI` [@http://en.wikipedia.org/wiki/BSD/OS BSDi BSD/OS]
* `BOOST_OS_BSD_NET` [@http://en.wikipedia.org/wiki/Netbsd NetBSD]
* `BOOST_OS_BSD_OPEN` [@http://en.wikipedia.org/wiki/Openbsd OpenBSD]
[note The general `BOOST_OS_BSD` is set in all cases to indicate some form
of BSD. If the above variants is detected the corresponding macro is also set.]
[table
[[__predef_symbol__] [__predef_version__]]
[[`BSD`] [__predef_detection__]]
[[`_SYSTYPE_BSD`] [__predef_detection__]]
[[`BSD4_2`] [4.2.0]]
[[`BSD4_3`] [4.3.0]]
[[`BSD4_4`] [4.4.0]]
[[`BSD`] [V.R.0]]
]
*/
#include <boost/predef/os/bsd/bsdi.h>
#include <boost/predef/os/bsd/dragonfly.h>
#include <boost/predef/os/bsd/free.h>
#include <boost/predef/os/bsd/open.h>
#include <boost/predef/os/bsd/net.h>
#ifndef BOOST_OS_BSD
#define BOOST_OS_BSD BOOST_VERSION_NUMBER_NOT_AVAILABLE
#endif
#if !defined(BOOST_PREDEF_DETAIL_OS_DETECTED) && ( \
defined(BSD) || \
defined(_SYSTYPE_BSD) \
)
# undef BOOST_OS_BSD
# include <sys/param.h>
# if !defined(BOOST_OS_BSD) && defined(BSD4_4)
# define BOOST_OS_BSD BOOST_VERSION_NUMBER(4,4,0)
# endif
# if !defined(BOOST_OS_BSD) && defined(BSD4_3)
# define BOOST_OS_BSD BOOST_VERSION_NUMBER(4,3,0)
# endif
# if !defined(BOOST_OS_BSD) && defined(BSD4_2)
# define BOOST_OS_BSD BOOST_VERSION_NUMBER(4,2,0)
# endif
# if !defined(BOOST_OS_BSD) && defined(BSD)
# define BOOST_OS_BSD BOOST_PREDEF_MAKE_10_VVRR(BSD)
# endif
# if !defined(BOOST_OS_BSD)
# define BOOST_OS_BSD BOOST_VERSION_NUMBER_AVAILABLE
# endif
#endif
#if BOOST_OS_BSD
# define BOOST_OS_BSD_AVAILABLE
# include <boost/predef/detail/os_detected.h>
#endif
#define BOOST_OS_BSD_NAME "BSD"
#else
#include <boost/predef/os/bsd/bsdi.h>
#include <boost/predef/os/bsd/dragonfly.h>
#include <boost/predef/os/bsd/free.h>
#include <boost/predef/os/bsd/open.h>
#include <boost/predef/os/bsd/net.h>
#endif
#include <boost/predef/detail/test.h>
BOOST_PREDEF_DECLARE_TEST(BOOST_OS_BSD,BOOST_OS_BSD_NAME)

View File

@@ -0,0 +1,48 @@
/*
Copyright Rene Rivera 2012-2015
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_PREDEF_OS_BSD_BSDI_H
#define BOOST_PREDEF_OS_BSD_BSDI_H
#include <boost/predef/os/bsd.h>
/*`
[heading `BOOST_OS_BSD_BSDI`]
[@http://en.wikipedia.org/wiki/BSD/OS BSDi BSD/OS] operating system.
[table
[[__predef_symbol__] [__predef_version__]]
[[`__bsdi__`] [__predef_detection__]]
]
*/
#define BOOST_OS_BSD_BSDI BOOST_VERSION_NUMBER_NOT_AVAILABLE
#if !defined(BOOST_PREDEF_DETAIL_OS_DETECTED) && ( \
defined(__bsdi__) \
)
# ifndef BOOST_OS_BSD_AVAILABLE
# define BOOST_OS_BSD BOOST_VERSION_NUMBER_AVAILABLE
# define BOOST_OS_BSD_AVAILABLE
# endif
# undef BOOST_OS_BSD_BSDI
# define BOOST_OS_BSD_BSDI BOOST_VERSION_NUMBER_AVAILABLE
#endif
#if BOOST_OS_BSD_BSDI
# define BOOST_OS_BSD_BSDI_AVAILABLE
# include <boost/predef/detail/os_detected.h>
#endif
#define BOOST_OS_BSD_BSDI_NAME "BSDi BSD/OS"
#endif
#include <boost/predef/detail/test.h>
BOOST_PREDEF_DECLARE_TEST(BOOST_OS_BSD_BSDI,BOOST_OS_BSD_BSDI_NAME)

View File

@@ -0,0 +1,50 @@
/*
Copyright Rene Rivera 2012-2015
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_PREDEF_OS_BSD_DRAGONFLY_H
#define BOOST_PREDEF_OS_BSD_DRAGONFLY_H
#include <boost/predef/os/bsd.h>
/*`
[heading `BOOST_OS_BSD_DRAGONFLY`]
[@http://en.wikipedia.org/wiki/DragonFly_BSD DragonFly BSD] operating system.
[table
[[__predef_symbol__] [__predef_version__]]
[[`__DragonFly__`] [__predef_detection__]]
]
*/
#define BOOST_OS_BSD_DRAGONFLY BOOST_VERSION_NUMBER_NOT_AVAILABLE
#if !defined(BOOST_PREDEF_DETAIL_OS_DETECTED) && ( \
defined(__DragonFly__) \
)
# ifndef BOOST_OS_BSD_AVAILABLE
# define BOOST_OS_BSD BOOST_VERSION_NUMBER_AVAILABLE
# define BOOST_OS_BSD_AVAILABLE
# endif
# undef BOOST_OS_BSD_DRAGONFLY
# if defined(__DragonFly__)
# define BOOST_OS_DRAGONFLY_BSD BOOST_VERSION_NUMBER_AVAILABLE
# endif
#endif
#if BOOST_OS_BSD_DRAGONFLY
# define BOOST_OS_BSD_DRAGONFLY_AVAILABLE
# include <boost/predef/detail/os_detected.h>
#endif
#define BOOST_OS_BSD_DRAGONFLY_NAME "DragonFly BSD"
#endif
#include <boost/predef/detail/test.h>
BOOST_PREDEF_DECLARE_TEST(BOOST_OS_BSD_DRAGONFLY,BOOST_OS_BSD_DRAGONFLY_NAME)

View File

@@ -0,0 +1,60 @@
/*
Copyright Rene Rivera 2012-2015
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_PREDEF_OS_BSD_FREE_H
#define BOOST_PREDEF_OS_BSD_FREE_H
#include <boost/predef/os/bsd.h>
/*`
[heading `BOOST_OS_BSD_FREE`]
[@http://en.wikipedia.org/wiki/Freebsd FreeBSD] operating system.
[table
[[__predef_symbol__] [__predef_version__]]
[[`__FreeBSD__`] [__predef_detection__]]
[[`__FreeBSD_version`] [V.R.P]]
]
*/
#define BOOST_OS_BSD_FREE BOOST_VERSION_NUMBER_NOT_AVAILABLE
#if !defined(BOOST_PREDEF_DETAIL_OS_DETECTED) && ( \
defined(__FreeBSD__) \
)
# ifndef BOOST_OS_BSD_AVAILABLE
# define BOOST_OS_BSD BOOST_VERSION_NUMBER_AVAILABLE
# define BOOST_OS_BSD_AVAILABLE
# endif
# undef BOOST_OS_BSD_FREE
# if defined(__FreeBSD_version)
# if __FreeBSD_version < 500000
# define BOOST_OS_BSD_FREE \
BOOST_PREDEF_MAKE_10_VRP000(__FreeBSD_version)
# else
# define BOOST_OS_BSD_FREE \
BOOST_PREDEF_MAKE_10_VRR000(__FreeBSD_version)
# endif
# else
# define BOOST_OS_BSD_FREE BOOST_VERSION_NUMBER_AVAILABLE
# endif
#endif
#if BOOST_OS_BSD_FREE
# define BOOST_OS_BSD_FREE_AVAILABLE
# include <boost/predef/detail/os_detected.h>
#endif
#define BOOST_OS_BSD_FREE_NAME "Free BSD"
#endif
#include <boost/predef/detail/test.h>
BOOST_PREDEF_DECLARE_TEST(BOOST_OS_BSD_FREE,BOOST_OS_BSD_FREE_NAME)

View File

@@ -0,0 +1,84 @@
/*
Copyright Rene Rivera 2012-2015
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_PREDEF_OS_BSD_NET_H
#define BOOST_PREDEF_OS_BSD_NET_H
#include <boost/predef/os/bsd.h>
/*`
[heading `BOOST_OS_BSD_NET`]
[@http://en.wikipedia.org/wiki/Netbsd NetBSD] operating system.
[table
[[__predef_symbol__] [__predef_version__]]
[[`__NETBSD__`] [__predef_detection__]]
[[`__NetBSD__`] [__predef_detection__]]
[[`__NETBSD_version`] [V.R.P]]
[[`NetBSD0_8`] [0.8.0]]
[[`NetBSD0_9`] [0.9.0]]
[[`NetBSD1_0`] [1.0.0]]
[[`__NetBSD_Version`] [V.R.P]]
]
*/
#define BOOST_OS_BSD_NET BOOST_VERSION_NUMBER_NOT_AVAILABLE
#if !defined(BOOST_PREDEF_DETAIL_OS_DETECTED) && ( \
defined(__NETBSD__) || defined(__NetBSD__) \
)
# ifndef BOOST_OS_BSD_AVAILABLE
# define BOOST_OS_BSD BOOST_VERSION_NUMBER_AVAILABLE
# define BOOST_OS_BSD_AVAILABLE
# endif
# undef BOOST_OS_BSD_NET
# if defined(__NETBSD__)
# if defined(__NETBSD_version)
# if __NETBSD_version < 500000
# define BOOST_OS_BSD_NET \
BOOST_PREDEF_MAKE_10_VRP000(__NETBSD_version)
# else
# define BOOST_OS_BSD_NET \
BOOST_PREDEF_MAKE_10_VRR000(__NETBSD_version)
# endif
# else
# define BOOST_OS_BSD_NET BOOST_VERSION_NUMBER_AVAILABLE
# endif
# elif defined(__NetBSD__)
# if !defined(BOOST_OS_BSD_NET) && defined(NetBSD0_8)
# define BOOST_OS_BSD_NET BOOST_VERSION_NUMBER(0,8,0)
# endif
# if !defined(BOOST_OS_BSD_NET) && defined(NetBSD0_9)
# define BOOST_OS_BSD_NET BOOST_VERSION_NUMBER(0,9,0)
# endif
# if !defined(BOOST_OS_BSD_NET) && defined(NetBSD1_0)
# define BOOST_OS_BSD_NET BOOST_VERSION_NUMBER(1,0,0)
# endif
# if !defined(BOOST_OS_BSD_NET) && defined(__NetBSD_Version)
# define BOOST_OS_BSD_NET \
BOOST_PREDEF_MAKE_10_VVRR00PP00(__NetBSD_Version)
# endif
# if !defined(BOOST_OS_BSD_NET)
# define BOOST_OS_BSD_NET BOOST_VERSION_NUMBER_AVAILABLE
# endif
# endif
#endif
#if BOOST_OS_BSD_NET
# define BOOST_OS_BSD_NET_AVAILABLE
# include <boost/predef/detail/os_detected.h>
#endif
#define BOOST_OS_BSD_NET_NAME "DragonFly BSD"
#endif
#include <boost/predef/detail/test.h>
BOOST_PREDEF_DECLARE_TEST(BOOST_OS_BSD_NET,BOOST_OS_BSD_NET_NAME)

View File

@@ -0,0 +1,171 @@
/*
Copyright Rene Rivera 2012-2015
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_PREDEF_OS_BSD_OPEN_H
#define BOOST_PREDEF_OS_BSD_OPEN_H
#include <boost/predef/os/bsd.h>
/*`
[heading `BOOST_OS_BSD_OPEN`]
[@http://en.wikipedia.org/wiki/Openbsd OpenBSD] operating system.
[table
[[__predef_symbol__] [__predef_version__]]
[[`__OpenBSD__`] [__predef_detection__]]
[[`OpenBSD2_0`] [2.0.0]]
[[`OpenBSD2_1`] [2.1.0]]
[[`OpenBSD2_2`] [2.2.0]]
[[`OpenBSD2_3`] [2.3.0]]
[[`OpenBSD2_4`] [2.4.0]]
[[`OpenBSD2_5`] [2.5.0]]
[[`OpenBSD2_6`] [2.6.0]]
[[`OpenBSD2_7`] [2.7.0]]
[[`OpenBSD2_8`] [2.8.0]]
[[`OpenBSD2_9`] [2.9.0]]
[[`OpenBSD3_0`] [3.0.0]]
[[`OpenBSD3_1`] [3.1.0]]
[[`OpenBSD3_2`] [3.2.0]]
[[`OpenBSD3_3`] [3.3.0]]
[[`OpenBSD3_4`] [3.4.0]]
[[`OpenBSD3_5`] [3.5.0]]
[[`OpenBSD3_6`] [3.6.0]]
[[`OpenBSD3_7`] [3.7.0]]
[[`OpenBSD3_8`] [3.8.0]]
[[`OpenBSD3_9`] [3.9.0]]
[[`OpenBSD4_0`] [4.0.0]]
[[`OpenBSD4_1`] [4.1.0]]
[[`OpenBSD4_2`] [4.2.0]]
[[`OpenBSD4_3`] [4.3.0]]
[[`OpenBSD4_4`] [4.4.0]]
[[`OpenBSD4_5`] [4.5.0]]
[[`OpenBSD4_6`] [4.6.0]]
[[`OpenBSD4_7`] [4.7.0]]
[[`OpenBSD4_8`] [4.8.0]]
[[`OpenBSD4_9`] [4.9.0]]
]
*/
#define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER_NOT_AVAILABLE
#if !defined(BOOST_PREDEF_DETAIL_OS_DETECTED) && ( \
defined(__OpenBSD__) \
)
# ifndef BOOST_OS_BSD_AVAILABLE
# define BOOST_OS_BSD BOOST_VERSION_NUMBER_AVAILABLE
# define BOOST_OS_BSD_AVAILABLE
# endif
# undef BOOST_OS_BSD_OPEN
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD2_0)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(2,0,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD2_1)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(2,1,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD2_2)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(2,2,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD2_3)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(2,3,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD2_4)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(2,4,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD2_5)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(2,5,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD2_6)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(2,6,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD2_7)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(2,7,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD2_8)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(2,8,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD2_9)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(2,9,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD3_0)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(3,0,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD3_1)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(3,1,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD3_2)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(3,2,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD3_3)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(3,3,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD3_4)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(3,4,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD3_5)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(3,5,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD3_6)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(3,6,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD3_7)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(3,7,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD3_8)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(3,8,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD3_9)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(3,9,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD4_0)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(4,0,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD4_1)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(4,1,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD4_2)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(4,2,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD4_3)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(4,3,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD4_4)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(4,4,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD4_5)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(4,5,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD4_6)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(4,6,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD4_7)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(4,7,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD4_8)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(4,8,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN) && defined(OpenBSD4_9)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER(4,9,0)
# endif
# if !defined(BOOST_OS_BSD_OPEN)
# define BOOST_OS_BSD_OPEN BOOST_VERSION_NUMBER_AVAILABLE
# endif
#endif
#if BOOST_OS_BSD_OPEN
# define BOOST_OS_BSD_OPEN_AVAILABLE
# include <boost/predef/detail/os_detected.h>
#endif
#define BOOST_OS_BSD_OPEN_NAME "OpenBSD"
#endif
#include <boost/predef/detail/test.h>
BOOST_PREDEF_DECLARE_TEST(BOOST_OS_BSD_OPEN,BOOST_OS_BSD_OPEN_NAME)

View File

@@ -0,0 +1,51 @@
/*
Copyright Franz Detro 2014
Copyright Rene Rivera 2015
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_PREDEF_OS_IOS_H
#define BOOST_PREDEF_OS_IOS_H
#include <boost/predef/version_number.h>
#include <boost/predef/make.h>
/*`
[heading `BOOST_OS_IOS`]
[@http://en.wikipedia.org/wiki/iOS iOS] operating system.
[table
[[__predef_symbol__] [__predef_version__]]
[[`__APPLE__`] [__predef_detection__]]
[[`__MACH__`] [__predef_detection__]]
[[`__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__`] [__predef_detection__]]
[[`__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__`] [__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__*1000]]
]
*/
#define BOOST_OS_IOS BOOST_VERSION_NUMBER_NOT_AVAILABLE
#if !defined(BOOST_PREDEF_DETAIL_OS_DETECTED) && ( \
defined(__APPLE__) && defined(__MACH__) && \
defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) \
)
# undef BOOST_OS_IOS
# define BOOST_OS_IOS (__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__*1000)
#endif
#if BOOST_OS_IOS
# define BOOST_OS_IOS_AVAILABLE
# include <boost/predef/detail/os_detected.h>
#endif
#define BOOST_OS_IOS_NAME "iOS"
#endif
#include <boost/predef/detail/test.h>
BOOST_PREDEF_DECLARE_TEST(BOOST_OS_IOS,BOOST_OS_IOS_NAME)

View File

@@ -0,0 +1,65 @@
/*
Copyright Rene Rivera 2008-2015
Copyright Franz Detro 2014
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_PREDEF_OS_MACOS_H
#define BOOST_PREDEF_OS_MACOS_H
/* Special case: iOS will define the same predefs as MacOS, and additionally
'__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__'. We can guard against that,
but only if we detect iOS first. Hence we will force include iOS detection
* before doing any MacOS detection.
*/
#include <boost/predef/os/ios.h>
#include <boost/predef/version_number.h>
#include <boost/predef/make.h>
/*`
[heading `BOOST_OS_MACOS`]
[@http://en.wikipedia.org/wiki/Mac_OS Mac OS] operating system.
[table
[[__predef_symbol__] [__predef_version__]]
[[`macintosh`] [__predef_detection__]]
[[`Macintosh`] [__predef_detection__]]
[[`__APPLE__`] [__predef_detection__]]
[[`__MACH__`] [__predef_detection__]]
[[`__APPLE__`, `__MACH__`] [10.0.0]]
[[ /otherwise/ ] [9.0.0]]
]
*/
#define BOOST_OS_MACOS BOOST_VERSION_NUMBER_NOT_AVAILABLE
#if !defined(BOOST_PREDEF_DETAIL_OS_DETECTED) && ( \
defined(macintosh) || defined(Macintosh) || \
(defined(__APPLE__) && defined(__MACH__)) \
)
# undef BOOST_OS_MACOS
# if !defined(BOOST_OS_MACOS) && defined(__APPLE__) && defined(__MACH__)
# define BOOST_OS_MACOS BOOST_VERSION_NUMBER(10,0,0)
# endif
# if !defined(BOOST_OS_MACOS)
# define BOOST_OS_MACOS BOOST_VERSION_NUMBER(9,0,0)
# endif
#endif
#if BOOST_OS_MACOS
# define BOOST_OS_MACOS_AVAILABLE
# include <boost/predef/detail/os_detected.h>
#endif
#define BOOST_OS_MACOS_NAME "Mac OS"
#endif
#include <boost/predef/detail/test.h>
BOOST_PREDEF_DECLARE_TEST(BOOST_OS_MACOS,BOOST_OS_MACOS_NAME)

View File

@@ -0,0 +1,204 @@
/*
Copyright Rene Rivera 2013-2015
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_PREDEF_ENDIAN_H
#define BOOST_PREDEF_ENDIAN_H
#include <boost/predef/version_number.h>
#include <boost/predef/make.h>
#include <boost/predef/library/c/gnu.h>
#include <boost/predef/os/macos.h>
#include <boost/predef/os/bsd.h>
#include <boost/predef/os/android.h>
/*`
[heading `BOOST_ENDIAN_*`]
Detection of endian memory ordering. There are four defined macros
in this header that define the various generally possible endian
memory orderings:
* `BOOST_ENDIAN_BIG_BYTE`, byte-swapped big-endian.
* `BOOST_ENDIAN_BIG_WORD`, word-swapped big-endian.
* `BOOST_ENDIAN_LITTLE_BYTE`, byte-swapped little-endian.
* `BOOST_ENDIAN_LITTLE_WORD`, word-swapped little-endian.
The detection is conservative in that it only identifies endianness
that it knows for certain. In particular bi-endianness is not
indicated as is it not practically possible to determine the
endianness from anything but an operating system provided
header. And the currently known headers do not define that
programatic bi-endianness is available.
This implementation is a compilation of various publicly available
information and acquired knowledge:
# The indispensable documentation of "Pre-defined Compiler Macros"
[@http://sourceforge.net/p/predef/wiki/Endianness Endianness].
# The various endian specifications available in the
[@http://wikipedia.org/ Wikipedia] computer architecture pages.
# Generally available searches for headers that define endianness.
*/
#define BOOST_ENDIAN_BIG_BYTE BOOST_VERSION_NUMBER_NOT_AVAILABLE
#define BOOST_ENDIAN_BIG_WORD BOOST_VERSION_NUMBER_NOT_AVAILABLE
#define BOOST_ENDIAN_LITTLE_BYTE BOOST_VERSION_NUMBER_NOT_AVAILABLE
#define BOOST_ENDIAN_LITTLE_WORD BOOST_VERSION_NUMBER_NOT_AVAILABLE
/* GNU libc provides a header defining __BYTE_ORDER, or _BYTE_ORDER.
* And some OSs provide some for of endian header also.
*/
#if !BOOST_ENDIAN_BIG_BYTE && !BOOST_ENDIAN_BIG_WORD && \
!BOOST_ENDIAN_LITTLE_BYTE && !BOOST_ENDIAN_LITTLE_WORD
# if BOOST_LIB_C_GNU || BOOST_OS_ANDROID
# include <endian.h>
# else
# if BOOST_OS_MACOS
# include <machine/endian.h>
# else
# if BOOST_OS_BSD
# if BOOST_OS_BSD_OPEN
# include <machine/endian.h>
# else
# include <sys/endian.h>
# endif
# endif
# endif
# endif
# if defined(__BYTE_ORDER)
# if defined(__BIG_ENDIAN) && (__BYTE_ORDER == __BIG_ENDIAN)
# undef BOOST_ENDIAN_BIG_BYTE
# define BOOST_ENDIAN_BIG_BYTE BOOST_VERSION_NUMBER_AVAILABLE
# endif
# if defined(__LITTLE_ENDIAN) && (__BYTE_ORDER == __LITTLE_ENDIAN)
# undef BOOST_ENDIAN_LITTLE_BYTE
# define BOOST_ENDIAN_LITTLE_BYTE BOOST_VERSION_NUMBER_AVAILABLE
# endif
# if defined(__PDP_ENDIAN) && (__BYTE_ORDER == __PDP_ENDIAN)
# undef BOOST_ENDIAN_LITTLE_WORD
# define BOOST_ENDIAN_LITTLE_WORD BOOST_VERSION_NUMBER_AVAILABLE
# endif
# endif
# if !defined(__BYTE_ORDER) && defined(_BYTE_ORDER)
# if defined(_BIG_ENDIAN) && (_BYTE_ORDER == _BIG_ENDIAN)
# undef BOOST_ENDIAN_BIG_BYTE
# define BOOST_ENDIAN_BIG_BYTE BOOST_VERSION_NUMBER_AVAILABLE
# endif
# if defined(_LITTLE_ENDIAN) && (_BYTE_ORDER == _LITTLE_ENDIAN)
# undef BOOST_ENDIAN_LITTLE_BYTE
# define BOOST_ENDIAN_LITTLE_BYTE BOOST_VERSION_NUMBER_AVAILABLE
# endif
# if defined(_PDP_ENDIAN) && (_BYTE_ORDER == _PDP_ENDIAN)
# undef BOOST_ENDIAN_LITTLE_WORD
# define BOOST_ENDIAN_LITTLE_WORD BOOST_VERSION_NUMBER_AVAILABLE
# endif
# endif
#endif
/* Built-in byte-swpped big-endian macros.
*/
#if !BOOST_ENDIAN_BIG_BYTE && !BOOST_ENDIAN_BIG_WORD && \
!BOOST_ENDIAN_LITTLE_BYTE && !BOOST_ENDIAN_LITTLE_WORD
# if (defined(__BIG_ENDIAN__) && !defined(__LITTLE_ENDIAN__)) || \
(defined(_BIG_ENDIAN) && !defined(_LITTLE_ENDIAN)) || \
defined(__ARMEB__) || \
defined(__THUMBEB__) || \
defined(__AARCH64EB__) || \
defined(_MIPSEB) || \
defined(__MIPSEB) || \
defined(__MIPSEB__)
# undef BOOST_ENDIAN_BIG_BYTE
# define BOOST_ENDIAN_BIG_BYTE BOOST_VERSION_NUMBER_AVAILABLE
# endif
#endif
/* Built-in byte-swpped little-endian macros.
*/
#if !BOOST_ENDIAN_BIG_BYTE && !BOOST_ENDIAN_BIG_WORD && \
!BOOST_ENDIAN_LITTLE_BYTE && !BOOST_ENDIAN_LITTLE_WORD
# if (defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)) || \
(defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN)) || \
defined(__ARMEL__) || \
defined(__THUMBEL__) || \
defined(__AARCH64EL__) || \
defined(_MIPSEL) || \
defined(__MIPSEL) || \
defined(__MIPSEL__)
# undef BOOST_ENDIAN_LITTLE_BYTE
# define BOOST_ENDIAN_LITTLE_BYTE BOOST_VERSION_NUMBER_AVAILABLE
# endif
#endif
/* Some architectures are strictly one endianess (as opposed
* the current common bi-endianess).
*/
#if !BOOST_ENDIAN_BIG_BYTE && !BOOST_ENDIAN_BIG_WORD && \
!BOOST_ENDIAN_LITTLE_BYTE && !BOOST_ENDIAN_LITTLE_WORD
# include <boost/predef/architecture.h>
# if BOOST_ARCH_M68K || \
BOOST_ARCH_PARISC || \
BOOST_ARCH_SPARC || \
BOOST_ARCH_SYS370 || \
BOOST_ARCH_SYS390 || \
BOOST_ARCH_Z
# undef BOOST_ENDIAN_BIG_BYTE
# define BOOST_ENDIAN_BIG_BYTE BOOST_VERSION_NUMBER_AVAILABLE
# endif
# if BOOST_ARCH_AMD64 || \
BOOST_ARCH_IA64 || \
BOOST_ARCH_X86 || \
BOOST_ARCH_BLACKFIN
# undef BOOST_ENDIAN_LITTLE_BYTE
# define BOOST_ENDIAN_LITTLE_BYTE BOOST_VERSION_NUMBER_AVAILABLE
# endif
#endif
/* Windows on ARM, if not otherwise detected/specified, is always
* byte-swaped little-endian.
*/
#if !BOOST_ENDIAN_BIG_BYTE && !BOOST_ENDIAN_BIG_WORD && \
!BOOST_ENDIAN_LITTLE_BYTE && !BOOST_ENDIAN_LITTLE_WORD
# if BOOST_ARCH_ARM
# include <boost/predef/os/windows.h>
# if BOOST_OS_WINDOWS
# undef BOOST_ENDIAN_LITTLE_BYTE
# define BOOST_ENDIAN_LITTLE_BYTE BOOST_VERSION_NUMBER_AVAILABLE
# endif
# endif
#endif
#if BOOST_ENDIAN_BIG_BYTE
# define BOOST_ENDIAN_BIG_BYTE_AVAILABLE
#endif
#if BOOST_ENDIAN_BIG_WORD
# define BOOST_ENDIAN_BIG_WORD_BYTE_AVAILABLE
#endif
#if BOOST_ENDIAN_LITTLE_BYTE
# define BOOST_ENDIAN_LITTLE_BYTE_AVAILABLE
#endif
#if BOOST_ENDIAN_LITTLE_WORD
# define BOOST_ENDIAN_LITTLE_WORD_BYTE_AVAILABLE
#endif
#define BOOST_ENDIAN_BIG_BYTE_NAME "Byte-Swapped Big-Endian"
#define BOOST_ENDIAN_BIG_WORD_NAME "Word-Swapped Big-Endian"
#define BOOST_ENDIAN_LITTLE_BYTE_NAME "Byte-Swapped Little-Endian"
#define BOOST_ENDIAN_LITTLE_WORD_NAME "Word-Swapped Little-Endian"
#endif
#include <boost/predef/detail/test.h>
BOOST_PREDEF_DECLARE_TEST(BOOST_ENDIAN_BIG_BYTE,BOOST_ENDIAN_BIG_BYTE_NAME)
#include <boost/predef/detail/test.h>
BOOST_PREDEF_DECLARE_TEST(BOOST_ENDIAN_BIG_WORD,BOOST_ENDIAN_BIG_WORD_NAME)
#include <boost/predef/detail/test.h>
BOOST_PREDEF_DECLARE_TEST(BOOST_ENDIAN_LITTLE_BYTE,BOOST_ENDIAN_LITTLE_BYTE_NAME)
#include <boost/predef/detail/test.h>
BOOST_PREDEF_DECLARE_TEST(BOOST_ENDIAN_LITTLE_WORD,BOOST_ENDIAN_LITTLE_WORD_NAME)

View File

@@ -0,0 +1,53 @@
/*
Copyright Rene Rivera 2005, 2008-2013
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_PREDEF_VERSION_NUMBER_H
#define BOOST_PREDEF_VERSION_NUMBER_H
/*`
[heading `BOOST_VERSION_NUMBER`]
``
BOOST_VERSION_NUMBER(major,minor,patch)
``
Defines standard version numbers, with these properties:
* Decimal base whole numbers in the range \[0,1000000000).
The number range is designed to allow for a (2,2,5) triplet.
Which fits within a 32 bit value.
* The `major` number can be in the \[0,99\] range.
* The `minor` number can be in the \[0,99\] range.
* The `patch` number can be in the \[0,99999\] range.
* Values can be specified in any base. As the defined value
is an constant expression.
* Value can be directly used in both preprocessor and compiler
expressions for comparison to other similarly defined values.
* The implementation enforces the individual ranges for the
major, minor, and patch numbers. And values over the ranges
are truncated (modulo).
*/
#define BOOST_VERSION_NUMBER(major,minor,patch) \
( (((major)%100)*10000000) + (((minor)%100)*100000) + ((patch)%100000) )
#define BOOST_VERSION_NUMBER_MAX \
BOOST_VERSION_NUMBER(99,99,99999)
#define BOOST_VERSION_NUMBER_ZERO \
BOOST_VERSION_NUMBER(0,0,0)
#define BOOST_VERSION_NUMBER_MIN \
BOOST_VERSION_NUMBER(0,0,1)
#define BOOST_VERSION_NUMBER_AVAILABLE \
BOOST_VERSION_NUMBER_MIN
#define BOOST_VERSION_NUMBER_NOT_AVAILABLE \
BOOST_VERSION_NUMBER_ZERO
#endif

View File

@@ -0,0 +1,699 @@
/*********************************************************************
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* Copyright (c) 2015, Tal Regev.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Willow Garage nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*********************************************************************/
#include "boost/endian/conversion.hpp"
#include <map>
#include <boost/make_shared.hpp>
#include <boost/regex.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <sensor_msgs/image_encodings.h>
#include <cv_bridge/cv_bridge.h>
#include <cv_bridge/rgb_colors.h>
namespace enc = sensor_msgs::image_encodings;
namespace cv_bridge {
static int depthStrToInt(const std::string depth) {
if (depth == "8U") {
return 0;
} else if (depth == "8S") {
return 1;
} else if (depth == "16U") {
return 2;
} else if (depth == "16S") {
return 3;
} else if (depth == "32S") {
return 4;
} else if (depth == "32F") {
return 5;
}
return 6;
}
int getCvType(const std::string& encoding)
{
// Check for the most common encodings first
if (encoding == enc::BGR8) return CV_8UC3;
if (encoding == enc::MONO8) return CV_8UC1;
if (encoding == enc::RGB8) return CV_8UC3;
if (encoding == enc::MONO16) return CV_16UC1;
if (encoding == enc::BGR16) return CV_16UC3;
if (encoding == enc::RGB16) return CV_16UC3;
if (encoding == enc::BGRA8) return CV_8UC4;
if (encoding == enc::RGBA8) return CV_8UC4;
if (encoding == enc::BGRA16) return CV_16UC4;
if (encoding == enc::RGBA16) return CV_16UC4;
// For bayer, return one-channel
if (encoding == enc::BAYER_RGGB8) return CV_8UC1;
if (encoding == enc::BAYER_BGGR8) return CV_8UC1;
if (encoding == enc::BAYER_GBRG8) return CV_8UC1;
if (encoding == enc::BAYER_GRBG8) return CV_8UC1;
if (encoding == enc::BAYER_RGGB16) return CV_16UC1;
if (encoding == enc::BAYER_BGGR16) return CV_16UC1;
if (encoding == enc::BAYER_GBRG16) return CV_16UC1;
if (encoding == enc::BAYER_GRBG16) return CV_16UC1;
// Miscellaneous
if (encoding == enc::YUV422) return CV_8UC2;
// Check all the generic content encodings
boost::cmatch m;
if (boost::regex_match(encoding.c_str(), m,
boost::regex("(8U|8S|16U|16S|32S|32F|64F)C([0-9]+)"))) {
return CV_MAKETYPE(depthStrToInt(m[1].str()), atoi(m[2].str().c_str()));
}
if (boost::regex_match(encoding.c_str(), m,
boost::regex("(8U|8S|16U|16S|32S|32F|64F)"))) {
return CV_MAKETYPE(depthStrToInt(m[1].str()), 1);
}
throw Exception("Unrecognized image encoding [" + encoding + "]");
}
/// @cond DOXYGEN_IGNORE
enum Encoding { INVALID = -1, GRAY = 0, RGB, BGR, RGBA, BGRA, YUV422, BAYER_RGGB, BAYER_BGGR, BAYER_GBRG, BAYER_GRBG};
Encoding getEncoding(const std::string& encoding)
{
if ((encoding == enc::MONO8) || (encoding == enc::MONO16)) return GRAY;
if ((encoding == enc::BGR8) || (encoding == enc::BGR16)) return BGR;
if ((encoding == enc::RGB8) || (encoding == enc::RGB16)) return RGB;
if ((encoding == enc::BGRA8) || (encoding == enc::BGRA16)) return BGRA;
if ((encoding == enc::RGBA8) || (encoding == enc::RGBA16)) return RGBA;
if (encoding == enc::YUV422) return YUV422;
if ((encoding == enc::BAYER_RGGB8) || (encoding == enc::BAYER_RGGB16)) return BAYER_RGGB;
if ((encoding == enc::BAYER_BGGR8) || (encoding == enc::BAYER_BGGR16)) return BAYER_BGGR;
if ((encoding == enc::BAYER_GBRG8) || (encoding == enc::BAYER_GBRG16)) return BAYER_GBRG;
if ((encoding == enc::BAYER_GRBG8) || (encoding == enc::BAYER_GRBG16)) return BAYER_GRBG;
// We don't support conversions to/from other types
return INVALID;
}
static const int SAME_FORMAT = -1;
/** Return a lit of OpenCV conversion codes to get from one Format to the other
* The key is a pair: <FromFormat, ToFormat> and the value a succession of OpenCV code conversion
* It's not efficient code but it is only called once and the structure is small enough
*/
std::map<std::pair<Encoding, Encoding>, std::vector<int> > getConversionCodes() {
std::map<std::pair<Encoding, Encoding>, std::vector<int> > res;
for(int i=0; i<=5; ++i)
res[std::pair<Encoding, Encoding>(Encoding(i),Encoding(i))].push_back(SAME_FORMAT);
res[std::make_pair(GRAY, RGB)].push_back(cv::COLOR_GRAY2RGB);
res[std::make_pair(GRAY, BGR)].push_back(cv::COLOR_GRAY2BGR);
res[std::make_pair(GRAY, RGBA)].push_back(cv::COLOR_GRAY2RGBA);
res[std::make_pair(GRAY, BGRA)].push_back(cv::COLOR_GRAY2BGRA);
res[std::make_pair(RGB, GRAY)].push_back(cv::COLOR_RGB2GRAY);
res[std::make_pair(RGB, BGR)].push_back(cv::COLOR_RGB2BGR);
res[std::make_pair(RGB, RGBA)].push_back(cv::COLOR_RGB2RGBA);
res[std::make_pair(RGB, BGRA)].push_back(cv::COLOR_RGB2BGRA);
res[std::make_pair(BGR, GRAY)].push_back(cv::COLOR_BGR2GRAY);
res[std::make_pair(BGR, RGB)].push_back(cv::COLOR_BGR2RGB);
res[std::make_pair(BGR, RGBA)].push_back(cv::COLOR_BGR2RGBA);
res[std::make_pair(BGR, BGRA)].push_back(cv::COLOR_BGR2BGRA);
res[std::make_pair(RGBA, GRAY)].push_back(cv::COLOR_RGBA2GRAY);
res[std::make_pair(RGBA, RGB)].push_back(cv::COLOR_RGBA2RGB);
res[std::make_pair(RGBA, BGR)].push_back(cv::COLOR_RGBA2BGR);
res[std::make_pair(RGBA, BGRA)].push_back(cv::COLOR_RGBA2BGRA);
res[std::make_pair(BGRA, GRAY)].push_back(cv::COLOR_BGRA2GRAY);
res[std::make_pair(BGRA, RGB)].push_back(cv::COLOR_BGRA2RGB);
res[std::make_pair(BGRA, BGR)].push_back(cv::COLOR_BGRA2BGR);
res[std::make_pair(BGRA, RGBA)].push_back(cv::COLOR_BGRA2RGBA);
res[std::make_pair(YUV422, GRAY)].push_back(cv::COLOR_YUV2GRAY_UYVY);
res[std::make_pair(YUV422, RGB)].push_back(cv::COLOR_YUV2RGB_UYVY);
res[std::make_pair(YUV422, BGR)].push_back(cv::COLOR_YUV2BGR_UYVY);
res[std::make_pair(YUV422, RGBA)].push_back(cv::COLOR_YUV2RGBA_UYVY);
res[std::make_pair(YUV422, BGRA)].push_back(cv::COLOR_YUV2BGRA_UYVY);
// Deal with Bayer
res[std::make_pair(BAYER_RGGB, GRAY)].push_back(cv::COLOR_BayerBG2GRAY);
res[std::make_pair(BAYER_RGGB, RGB)].push_back(cv::COLOR_BayerBG2RGB);
res[std::make_pair(BAYER_RGGB, BGR)].push_back(cv::COLOR_BayerBG2BGR);
res[std::make_pair(BAYER_BGGR, GRAY)].push_back(cv::COLOR_BayerRG2GRAY);
res[std::make_pair(BAYER_BGGR, RGB)].push_back(cv::COLOR_BayerRG2RGB);
res[std::make_pair(BAYER_BGGR, BGR)].push_back(cv::COLOR_BayerRG2BGR);
res[std::make_pair(BAYER_GBRG, GRAY)].push_back(cv::COLOR_BayerGR2GRAY);
res[std::make_pair(BAYER_GBRG, RGB)].push_back(cv::COLOR_BayerGR2RGB);
res[std::make_pair(BAYER_GBRG, BGR)].push_back(cv::COLOR_BayerGR2BGR);
res[std::make_pair(BAYER_GRBG, GRAY)].push_back(cv::COLOR_BayerGB2GRAY);
res[std::make_pair(BAYER_GRBG, RGB)].push_back(cv::COLOR_BayerGB2RGB);
res[std::make_pair(BAYER_GRBG, BGR)].push_back(cv::COLOR_BayerGB2BGR);
return res;
}
const std::vector<int> getConversionCode(std::string src_encoding, std::string dst_encoding)
{
Encoding src_encod = getEncoding(src_encoding);
Encoding dst_encod = getEncoding(dst_encoding);
bool is_src_color_format = enc::isColor(src_encoding) || enc::isMono(src_encoding) ||
enc::isBayer(src_encoding) || (src_encoding == enc::YUV422);
bool is_dst_color_format = enc::isColor(dst_encoding) || enc::isMono(dst_encoding) ||
enc::isBayer(dst_encoding) || (dst_encoding == enc::YUV422);
bool is_num_channels_the_same = (enc::numChannels(src_encoding) == enc::numChannels(dst_encoding));
// If we have no color info in the source, we can only convert to the same format which
// was resolved in the previous condition. Otherwise, fail
if (!is_src_color_format) {
if (is_dst_color_format)
throw Exception("[" + src_encoding + "] is not a color format. but [" + dst_encoding +
"] is. The conversion does not make sense");
if (!is_num_channels_the_same)
throw Exception("[" + src_encoding + "] and [" + dst_encoding + "] do not have the same number of channel");
return std::vector<int>(1, SAME_FORMAT);
}
// If we are converting from a color type to a non color type, we can only do so if we stick
// to the number of channels
if (!is_dst_color_format) {
if (!is_num_channels_the_same)
throw Exception("[" + src_encoding + "] is a color format but [" + dst_encoding + "] " +
"is not so they must have the same OpenCV type, CV_8UC3, CV16UC1 ....");
return std::vector<int>(1, SAME_FORMAT);
}
// If we are converting from a color type to another type, then everything is fine
static const std::map<std::pair<Encoding, Encoding>, std::vector<int> > CONVERSION_CODES = getConversionCodes();
std::pair<Encoding, Encoding> key(src_encod, dst_encod);
std::map<std::pair<Encoding, Encoding>, std::vector<int> >::const_iterator val = CONVERSION_CODES.find(key);
if (val == CONVERSION_CODES.end())
throw Exception("Unsupported conversion from [" + src_encoding +
"] to [" + dst_encoding + "]");
// And deal with depth differences if the colors are different
std::vector<int> res = val->second;
if ((enc::bitDepth(src_encoding) != enc::bitDepth(dst_encoding)) && (getEncoding(src_encoding) != getEncoding(dst_encoding)))
res.push_back(SAME_FORMAT);
return res;
}
/////////////////////////////////////// Image ///////////////////////////////////////////
// Converts a ROS Image to a cv::Mat by sharing the data or changing its endianness if needed
cv::Mat matFromImage(const sensor_msgs::Image& source)
{
int source_type = getCvType(source.encoding);
int byte_depth = enc::bitDepth(source.encoding) / 8;
int num_channels = enc::numChannels(source.encoding);
if (source.step < source.width * byte_depth * num_channels)
{
std::stringstream ss;
ss << "Image is wrongly formed: step < width * byte_depth * num_channels or " << source.step << " != " <<
source.width << " * " << byte_depth << " * " << num_channels;
throw Exception(ss.str());
}
if (source.height * source.step != source.data.size())
{
std::stringstream ss;
ss << "Image is wrongly formed: height * step != size or " << source.height << " * " <<
source.step << " != " << source.data.size();
throw Exception(ss.str());
}
// If the endianness is the same as locally, share the data
cv::Mat mat(source.height, source.width, source_type, const_cast<uchar*>(&source.data[0]), source.step);
if ((boost::endian::order::native == boost::endian::order::big && source.is_bigendian) ||
(boost::endian::order::native == boost::endian::order::little && !source.is_bigendian) ||
byte_depth == 1)
return mat;
// Otherwise, reinterpret the data as bytes and switch the channels accordingly
mat = cv::Mat(source.height, source.width, CV_MAKETYPE(CV_8U, num_channels*byte_depth),
const_cast<uchar*>(&source.data[0]), source.step);
cv::Mat mat_swap(source.height, source.width, mat.type());
std::vector<int> fromTo;
fromTo.reserve(num_channels*byte_depth);
for(int i = 0; i < num_channels; ++i)
for(int j = 0; j < byte_depth; ++j)
{
fromTo.push_back(byte_depth*i + j);
fromTo.push_back(byte_depth*i + byte_depth - 1 - j);
}
cv::mixChannels(std::vector<cv::Mat>(1, mat), std::vector<cv::Mat>(1, mat_swap), fromTo);
// Interpret mat_swap back as the proper type
mat_swap.reshape(num_channels);
return mat_swap;
}
// Internal, used by toCvCopy and cvtColor
CvImagePtr toCvCopyImpl(const cv::Mat& source,
const std_msgs::Header& src_header,
const std::string& src_encoding,
const std::string& dst_encoding)
{
// Copy metadata
CvImagePtr ptr = boost::make_shared<CvImage>();
ptr->header = src_header;
// Copy to new buffer if same encoding requested
if (dst_encoding.empty() || dst_encoding == src_encoding)
{
ptr->encoding = src_encoding;
source.copyTo(ptr->image);
}
else
{
// Convert the source data to the desired encoding
const std::vector<int> conversion_codes = getConversionCode(src_encoding, dst_encoding);
cv::Mat image1 = source;
cv::Mat image2;
for(size_t i=0; i<conversion_codes.size(); ++i) {
int conversion_code = conversion_codes[i];
if (conversion_code == SAME_FORMAT)
{
// Same number of channels, but different bit depth
int src_depth = enc::bitDepth(src_encoding);
int dst_depth = enc::bitDepth(dst_encoding);
// Keep the number of channels for now but changed to the final depth
int image2_type = CV_MAKETYPE(CV_MAT_DEPTH(getCvType(dst_encoding)), image1.channels());
// Do scaling between CV_8U [0,255] and CV_16U [0,65535] images.
if (src_depth == 8 && dst_depth == 16)
image1.convertTo(image2, image2_type, 65535. / 255.);
else if (src_depth == 16 && dst_depth == 8)
image1.convertTo(image2, image2_type, 255. / 65535.);
else
image1.convertTo(image2, image2_type);
}
else
{
// Perform color conversion
cv::cvtColor(image1, image2, conversion_code);
}
image1 = image2;
}
ptr->image = image2;
ptr->encoding = dst_encoding;
}
return ptr;
}
/// @endcond
sensor_msgs::ImagePtr CvImage::toImageMsg() const
{
sensor_msgs::ImagePtr ptr = boost::make_shared<sensor_msgs::Image>();
toImageMsg(*ptr);
return ptr;
}
void CvImage::toImageMsg(sensor_msgs::Image& ros_image) const
{
ros_image.header = header;
ros_image.height = image.rows;
ros_image.width = image.cols;
ros_image.encoding = encoding;
ros_image.is_bigendian = (boost::endian::order::native == boost::endian::order::big);
ros_image.step = image.cols * image.elemSize();
size_t size = ros_image.step * image.rows;
ros_image.data.resize(size);
if (image.isContinuous())
{
memcpy((char*)(&ros_image.data[0]), image.data, size);
}
else
{
// Copy by row by row
uchar* ros_data_ptr = (uchar*)(&ros_image.data[0]);
uchar* cv_data_ptr = image.data;
for (int i = 0; i < image.rows; ++i)
{
memcpy(ros_data_ptr, cv_data_ptr, ros_image.step);
ros_data_ptr += ros_image.step;
cv_data_ptr += image.step;
}
}
}
// Deep copy data, returnee is mutable
CvImagePtr toCvCopy(const sensor_msgs::ImageConstPtr& source,
const std::string& encoding)
{
return toCvCopy(*source, encoding);
}
CvImagePtr toCvCopy(const sensor_msgs::Image& source,
const std::string& encoding)
{
// Construct matrix pointing to source data
return toCvCopyImpl(matFromImage(source), source.header, source.encoding, encoding);
}
// Share const data, returnee is immutable
CvImageConstPtr toCvShare(const sensor_msgs::ImageConstPtr& source,
const std::string& encoding)
{
return toCvShare(*source, source, encoding);
}
CvImageConstPtr toCvShare(const sensor_msgs::Image& source,
const boost::shared_ptr<void const>& tracked_object,
const std::string& encoding)
{
// If the encoding different or the endianness different, you have to copy
if ((!encoding.empty() && source.encoding != encoding) || (source.is_bigendian &&
(boost::endian::order::native != boost::endian::order::big)))
return toCvCopy(source, encoding);
CvImagePtr ptr = boost::make_shared<CvImage>();
ptr->header = source.header;
ptr->encoding = source.encoding;
ptr->tracked_object_ = tracked_object;
ptr->image = matFromImage(source);
return ptr;
}
CvImagePtr cvtColor(const CvImageConstPtr& source,
const std::string& encoding)
{
return toCvCopyImpl(source->image, source->header, source->encoding, encoding);
}
/////////////////////////////////////// CompressedImage ///////////////////////////////////////////
sensor_msgs::CompressedImagePtr CvImage::toCompressedImageMsg(const Format dst_format) const
{
sensor_msgs::CompressedImagePtr ptr = boost::make_shared<sensor_msgs::CompressedImage>();
toCompressedImageMsg(*ptr,dst_format);
return ptr;
}
std::string getFormat(Format format) {
switch (format) {
case DIB:
return "dib";
case BMP:
return "bmp";
case JPG:
return "jpg";
case JPEG:
return "jpeg";
case JPE:
return "jpe";
case JP2:
return "jp2";
case PNG:
return "png";
case PBM:
return "pbm";
case PGM:
return "pgm";
case PPM:
return "ppm";
case RAS:
return "ras";
case SR:
return "sr";
case TIF:
return "tif";
case TIFF:
return "tiff";
}
throw Exception("Unrecognized image format");
}
void CvImage::toCompressedImageMsg(sensor_msgs::CompressedImage& ros_image, const Format dst_format) const
{
ros_image.header = header;
cv::Mat image;
if (encoding == enc::BGR8 || encoding == enc::BGRA8)
{
image = this->image;
}
else
{
CvImagePtr tempThis = boost::make_shared<CvImage>(*this);
CvImagePtr temp;
if (enc::hasAlpha(encoding))
{
temp = cvtColor(tempThis, enc::BGRA8);
}
else
{
temp = cvtColor(tempThis, enc::BGR8);
}
image = temp->image;
}
std::string format = getFormat(dst_format);
ros_image.format = format;
cv::imencode("." + format, image, ros_image.data);
}
// Deep copy data, returnee is mutable
CvImagePtr toCvCopy(const sensor_msgs::CompressedImageConstPtr& source,
const std::string& encoding)
{
return toCvCopy(*source, encoding);
}
CvImagePtr toCvCopy(const sensor_msgs::CompressedImage& source, const std::string& encoding)
{
// Construct matrix pointing to source data
const cv::Mat_<uchar> in(1, source.data.size(), const_cast<uchar*>(&source.data[0]));
// Loads as BGR or BGRA.
const cv::Mat rgb_a = cv::imdecode(in, cv::IMREAD_UNCHANGED);
switch (rgb_a.channels())
{
case 4:
return toCvCopyImpl(rgb_a, source.header, enc::BGRA8, encoding);
break;
case 3:
return toCvCopyImpl(rgb_a, source.header, enc::BGR8, encoding);
break;
case 1:
return toCvCopyImpl(rgb_a, source.header, enc::MONO8, encoding);
break;
default:
return CvImagePtr();
}
}
CvImageConstPtr cvtColorForDisplay(const CvImageConstPtr& source,
const std::string& encoding_out,
const CvtColorForDisplayOptions options)
{
double min_image_value = options.min_image_value;
double max_image_value = options.max_image_value;
if (!source)
throw Exception("cv_bridge.cvtColorForDisplay() called with empty image.");
// let's figure out what to do with the empty encoding
std::string encoding = encoding_out;
if (encoding.empty())
{
try
{
// Let's decide upon an output format
if (enc::numChannels(source->encoding) == 1)
{
if ((source->encoding == enc::TYPE_32SC1) ||
(enc::bitDepth(source->encoding) == 8) ||
(enc::bitDepth(source->encoding) == 16) ||
(enc::bitDepth(source->encoding) == 32))
encoding = enc::BGR8;
else
throw std::runtime_error("Unsupported depth of the source encoding " + encoding);
}
else
{
// We choose BGR by default here as we assume people will use OpenCV
if ((enc::bitDepth(source->encoding) == 8) ||
(enc::bitDepth(source->encoding) == 16))
encoding = enc::BGR8;
else
throw std::runtime_error("Unsupported depth of the source encoding " + encoding);
}
}
// We could have cv_bridge exception or std_runtime_error from sensor_msgs::image_codings routines
catch (const std::runtime_error& e)
{
throw Exception("cv_bridge.cvtColorForDisplay() output encoding is empty and cannot be guessed.");
}
}
else
{
if ((!enc::isColor(encoding_out) && !enc::isMono(encoding_out)) ||
(enc::bitDepth(encoding) != 8))
throw Exception("cv_bridge.cvtColorForDisplay() does not have an output encoding that is color or mono, and has is bit in depth");
}
// Convert label to bgr image
if (encoding == sensor_msgs::image_encodings::BGR8 &&
source->encoding == enc::TYPE_32SC1)
{
CvImagePtr result(new CvImage());
result->header = source->header;
result->encoding = encoding;
result->image = cv::Mat(source->image.rows, source->image.cols, CV_8UC3);
for (size_t j = 0; j < source->image.rows; ++j) {
for (size_t i = 0; i < source->image.cols; ++i) {
int label = source->image.at<int>(j, i);
if (label == options.bg_label) { // background label
result->image.at<cv::Vec3b>(j, i) = cv::Vec3b(0, 0, 0);
}
else
{
cv::Vec3d rgb = rgb_colors::getRGBColor(label);
// result image should be BGR
result->image.at<cv::Vec3b>(j, i) = cv::Vec3b(int(rgb[2] * 255), int(rgb[1] * 255), int(rgb[0] * 255));
}
}
}
return result;
}
// Perform scaling if asked for
if (options.do_dynamic_scaling)
{
cv::minMaxLoc(source->image, &min_image_value, &max_image_value);
if (min_image_value == max_image_value)
{
CvImagePtr result(new CvImage());
result->header = source->header;
result->encoding = encoding;
if (enc::bitDepth(encoding) == 1)
{
result->image = cv::Mat(source->image.size(), CV_8UC1);
result->image.setTo(255./2.);
} else {
result->image = cv::Mat(source->image.size(), CV_8UC3);
result->image.setTo(cv::Scalar(1., 1., 1.)*255./2.);
}
return result;
}
}
if (min_image_value != max_image_value)
{
if (enc::numChannels(source->encoding) != 1)
throw Exception("cv_bridge.cvtColorForDisplay() scaling for images with more than one channel is unsupported");
CvImagePtr img_scaled(new CvImage());
img_scaled->header = source->header;
if (options.colormap == -1) {
img_scaled->encoding = enc::MONO8;
cv::Mat(source->image-min_image_value).convertTo(img_scaled->image, CV_8UC1, 255.0 /
(max_image_value - min_image_value));
} else {
img_scaled->encoding = enc::BGR8;
cv::Mat(source->image-min_image_value).convertTo(img_scaled->image, CV_8UC3, 255.0 /
(max_image_value - min_image_value));
cv::applyColorMap(img_scaled->image, img_scaled->image, options.colormap);
// Fill black color to the nan region.
if (source->encoding == enc::TYPE_32FC1) {
for (size_t j = 0; j < source->image.rows; ++j) {
for (size_t i = 0; i < source->image.cols; ++i) {
float float_value = source->image.at<float>(j, i);
if (std::isnan(float_value)) {
img_scaled->image.at<cv::Vec3b>(j, i) = cv::Vec3b(0, 0, 0);
}
}
}
}
}
return cvtColor(img_scaled, encoding);
}
// If no color conversion is possible, we must "guess" the input format
CvImagePtr source_typed(new CvImage());
source_typed->image = source->image;
source_typed->header = source->header;
source_typed->encoding = source->encoding;
// If we get the OpenCV format, if we have 1,3 or 4 channels, we are most likely in mono, BGR or BGRA modes
if (source->encoding == "CV_8UC1")
source_typed->encoding = enc::MONO8;
else if (source->encoding == "16UC1")
source_typed->encoding = enc::MONO16;
else if (source->encoding == "CV_8UC3")
source_typed->encoding = enc::BGR8;
else if (source->encoding == "CV_8UC4")
source_typed->encoding = enc::BGRA8;
else if (source->encoding == "CV_16UC3")
source_typed->encoding = enc::BGR8;
else if (source->encoding == "CV_16UC4")
source_typed->encoding = enc::BGRA8;
// If no conversion is needed, don't convert
if (source_typed->encoding == encoding)
return source;
try
{
// Now that the output is a proper color format, try to see if any conversion is possible
return cvtColor(source_typed, encoding);
}
catch (cv_bridge::Exception& e)
{
throw Exception("cv_bridge.cvtColorForDisplay() while trying to convert image from '" + source->encoding + "' to '" + encoding + "' an exception was thrown (" + e.what() + ")");
}
}
} //namespace cv_bridge

View File

@@ -0,0 +1,110 @@
/*********************************************************************
* Software License Agreement (BSD License)
*
* Copyright (c) 2012, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Willow Garage nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*********************************************************************/
#include "module.hpp"
PyObject *mod_opencv;
bp::object
cvtColor2Wrap(bp::object obj_in, const std::string & encoding_in, const std::string & encoding_out) {
// Convert the Python input to an image
cv::Mat mat_in;
convert_to_CvMat2(obj_in.ptr(), mat_in);
// Call cv_bridge for color conversion
cv_bridge::CvImagePtr cv_image(new cv_bridge::CvImage(std_msgs::Header(), encoding_in, mat_in));
cv::Mat mat = cv_bridge::cvtColor(cv_image, encoding_out)->image;
return bp::object(boost::python::handle<>(pyopencv_from(mat)));
}
bp::object
cvtColorForDisplayWrap(bp::object obj_in,
const std::string & encoding_in,
const std::string & encoding_out,
bool do_dynamic_scaling = false,
double min_image_value = 0.0,
double max_image_value = 0.0) {
// Convert the Python input to an image
cv::Mat mat_in;
convert_to_CvMat2(obj_in.ptr(), mat_in);
cv_bridge::CvImagePtr cv_image(new cv_bridge::CvImage(std_msgs::Header(), encoding_in, mat_in));
cv_bridge::CvtColorForDisplayOptions options;
options.do_dynamic_scaling = do_dynamic_scaling;
options.min_image_value = min_image_value;
options.max_image_value = max_image_value;
cv::Mat mat = cv_bridge::cvtColorForDisplay(/*source=*/cv_image,
/*encoding_out=*/encoding_out,
/*options=*/options)->image;
return bp::object(boost::python::handle<>(pyopencv_from(mat)));
}
BOOST_PYTHON_FUNCTION_OVERLOADS(cvtColorForDisplayWrap_overloads, cvtColorForDisplayWrap, 3, 6)
int CV_MAT_CNWrap(int i) {
return CV_MAT_CN(i);
}
int CV_MAT_DEPTHWrap(int i) {
return CV_MAT_DEPTH(i);
}
BOOST_PYTHON_MODULE(cv_bridge_boost)
{
do_numpy_import();
mod_opencv = PyImport_ImportModule("cv2");
// Wrap the function to get encodings as OpenCV types
boost::python::def("getCvType", cv_bridge::getCvType);
boost::python::def("cvtColor2", cvtColor2Wrap);
boost::python::def("CV_MAT_CNWrap", CV_MAT_CNWrap);
boost::python::def("CV_MAT_DEPTHWrap", CV_MAT_DEPTHWrap);
boost::python::def("cvtColorForDisplay", cvtColorForDisplayWrap,
cvtColorForDisplayWrap_overloads(
boost::python::args("source", "encoding_in", "encoding_out", "do_dynamic_scaling",
"min_image_value", "max_image_value"),
"Convert image to display with specified encodings.\n\n"
"Args:\n"
" - source (numpy.ndarray): input image\n"
" - encoding_in (str): input image encoding\n"
" - encoding_out (str): encoding to which the image conveted\n"
" - do_dynamic_scaling (bool): flag to do dynamic scaling with min/max value\n"
" - min_image_value (float): minimum pixel value for dynamic scaling\n"
" - max_image_value (float): maximum pixel value for dynamic scaling\n"
));
}

View File

@@ -0,0 +1,49 @@
/*
* Copyright 2014 Open Source Robotics Foundation, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CV_BRIDGE_MODULE_HPP_
#define CV_BRIDGE_MODULE_HPP_
#include <iostream>
#include <boost/python.hpp>
#include <cv_bridge/cv_bridge.h>
#include <Python.h>
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#include <numpy/ndarrayobject.h>
#include <opencv2/core/core.hpp>
namespace bp = boost::python;
int convert_to_CvMat2(const PyObject* o, cv::Mat& m);
PyObject* pyopencv_from(const cv::Mat& m);
#if PYTHON3
static void * do_numpy_import( )
{
import_array( );
return nullptr;
}
#else
static void do_numpy_import( )
{
import_array( );
}
#endif
#endif

View File

@@ -0,0 +1,262 @@
/*********************************************************************
* Software License Agreement (BSD License)
*
* Copyright (c) 2012, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Willow Garage nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*********************************************************************/
#include "module.hpp"
using namespace cv;
// These are sucky, sketchy versions of the real things in OpenCV Python,
// inferior in every way.
static int failmsg(const char *fmt, ...)
{
char str[1000];
va_list ap;
va_start(ap, fmt);
vsnprintf(str, sizeof(str), fmt, ap);
va_end(ap);
PyErr_SetString(PyExc_TypeError, str);
return 0;
}
static PyObject* opencv_error = 0;
class PyAllowThreads
{
public:
PyAllowThreads() : _state(PyEval_SaveThread()) {}
~PyAllowThreads()
{
PyEval_RestoreThread(_state);
}
private:
PyThreadState* _state;
};
#define ERRWRAP2(expr) \
try \
{ \
PyAllowThreads allowThreads; \
expr; \
} \
catch (const cv::Exception &e) \
{ \
PyErr_SetString(opencv_error, e.what()); \
return 0; \
}
// Taken from http://stackoverflow.com/questions/19136944/call-c-opencv-functions-from-python-send-a-cv-mat-to-c-dll-which-is-usi
static size_t REFCOUNT_OFFSET = ( size_t )&((( PyObject* )0)->ob_refcnt ) +
( 0x12345678 != *( const size_t* )"\x78\x56\x34\x12\0\0\0\0\0" )*sizeof( int );
static inline PyObject* pyObjectFromRefcount( const int* refcount )
{
return ( PyObject* )(( size_t )refcount - REFCOUNT_OFFSET );
}
static inline int* refcountFromPyObject( const PyObject* obj )
{
return ( int* )(( size_t )obj + REFCOUNT_OFFSET );
}
class NumpyAllocator : public cv::MatAllocator
{
public:
NumpyAllocator( ) { }
~NumpyAllocator( ) { }
void allocate( int dims, const int* sizes, int type, int*& refcount,
uchar*& datastart, uchar*& data, size_t* step );
void deallocate( int* refcount, uchar* datastart, uchar* data );
};
void NumpyAllocator::allocate( int dims, const int* sizes, int type, int*& refcount, uchar*& datastart, uchar*& data, size_t* step )
{
int depth = CV_MAT_DEPTH( type );
int cn = CV_MAT_CN( type );
const int f = ( int )( sizeof( size_t )/8 );
int typenum = depth == CV_8U ? NPY_UBYTE : depth == CV_8S ? NPY_BYTE :
depth == CV_16U ? NPY_USHORT : depth == CV_16S ? NPY_SHORT :
depth == CV_32S ? NPY_INT : depth == CV_32F ? NPY_FLOAT :
depth == CV_64F ? NPY_DOUBLE : f*NPY_ULONGLONG + (f^1)*NPY_UINT;
int i;
npy_intp _sizes[CV_MAX_DIM+1];
for( i = 0; i < dims; i++ )
_sizes[i] = sizes[i];
if( cn > 1 )
{
/*if( _sizes[dims-1] == 1 )
_sizes[dims-1] = cn;
else*/
_sizes[dims++] = cn;
}
PyObject* o = PyArray_SimpleNew( dims, _sizes, typenum );
if( !o )
CV_Error_(CV_StsError, ("The numpy array of typenum=%d, ndims=%d can not be created", typenum, dims));
refcount = refcountFromPyObject(o);
npy_intp* _strides = PyArray_STRIDES((PyArrayObject*) o);
for( i = 0; i < dims - (cn > 1); i++ )
step[i] = (size_t)_strides[i];
datastart = data = (uchar*)PyArray_DATA((PyArrayObject*)o);
}
void NumpyAllocator::deallocate( int* refcount, uchar* datastart, uchar* data )
{
if( !refcount )
return;
PyObject* o = pyObjectFromRefcount(refcount);
Py_INCREF(o);
Py_DECREF(o);
}
// Declare the object
NumpyAllocator g_numpyAllocator;
int convert_to_CvMat2(const PyObject* o, cv::Mat& m)
{
// to avoid PyArray_Check() to crash even with valid array
do_numpy_import();
if(!o || o == Py_None)
{
if( !m.data )
m.allocator = &g_numpyAllocator;
return true;
}
if( !PyArray_Check(o) )
{
failmsg("Not a numpy array");
return false;
}
// NPY_LONG (64 bit) is converted to CV_32S (32 bit)
int typenum = PyArray_TYPE((PyArrayObject*) o);
int type = typenum == NPY_UBYTE ? CV_8U : typenum == NPY_BYTE ? CV_8S :
typenum == NPY_USHORT ? CV_16U : typenum == NPY_SHORT ? CV_16S :
typenum == NPY_INT || typenum == NPY_LONG ? CV_32S :
typenum == NPY_FLOAT ? CV_32F :
typenum == NPY_DOUBLE ? CV_64F : -1;
if( type < 0 )
{
failmsg("data type = %d is not supported", typenum);
return false;
}
int ndims = PyArray_NDIM((PyArrayObject*) o);
if(ndims >= CV_MAX_DIM)
{
failmsg("dimensionality (=%d) is too high", ndims);
return false;
}
int size[CV_MAX_DIM+1];
size_t step[CV_MAX_DIM+1], elemsize = CV_ELEM_SIZE1(type);
const npy_intp* _sizes = PyArray_DIMS((PyArrayObject*) o);
const npy_intp* _strides = PyArray_STRIDES((PyArrayObject*) o);
bool transposed = false;
for(int i = 0; i < ndims; i++)
{
size[i] = (int)_sizes[i];
step[i] = (size_t)_strides[i];
}
if( ndims == 0 || step[ndims-1] > elemsize ) {
size[ndims] = 1;
step[ndims] = elemsize;
ndims++;
}
if( ndims >= 2 && step[0] < step[1] )
{
std::swap(size[0], size[1]);
std::swap(step[0], step[1]);
transposed = true;
}
if( ndims == 3 && size[2] <= CV_CN_MAX && step[1] == elemsize*size[2] )
{
ndims--;
type |= CV_MAKETYPE(0, size[2]);
}
if( ndims > 2 )
{
failmsg("more than 2 dimensions");
return false;
}
m = cv::Mat(ndims, size, type, PyArray_DATA((PyArrayObject*) o), step);
if( m.data )
{
m.refcount = refcountFromPyObject(o);
m.addref(); // protect the original numpy array from deallocation
// (since Mat destructor will decrement the reference counter)
};
m.allocator = &g_numpyAllocator;
if( transposed )
{
cv::Mat tmp;
tmp.allocator = &g_numpyAllocator;
transpose(m, tmp);
m = tmp;
}
return true;
}
PyObject* pyopencv_from(const Mat& m)
{
if( !m.data )
Py_RETURN_NONE;
Mat temp, *p = (Mat*)&m;
if(!p->refcount || p->allocator != &g_numpyAllocator)
{
temp.allocator = &g_numpyAllocator;
ERRWRAP2(m.copyTo(temp));
p = &temp;
}
p->addref();
return pyObjectFromRefcount(p->refcount);
}

View File

@@ -0,0 +1,367 @@
// Taken from opencv/modules/python/src2/cv2.cpp
#include "module.hpp"
#include "opencv2/core/types_c.h"
#include "opencv2/opencv_modules.hpp"
#include "pycompat.hpp"
static PyObject* opencv_error = 0;
static int failmsg(const char *fmt, ...)
{
char str[1000];
va_list ap;
va_start(ap, fmt);
vsnprintf(str, sizeof(str), fmt, ap);
va_end(ap);
PyErr_SetString(PyExc_TypeError, str);
return 0;
}
struct ArgInfo
{
const char * name;
bool outputarg;
// more fields may be added if necessary
ArgInfo(const char * name_, bool outputarg_)
: name(name_)
, outputarg(outputarg_) {}
// to match with older pyopencv_to function signature
operator const char *() const { return name; }
};
class PyAllowThreads
{
public:
PyAllowThreads() : _state(PyEval_SaveThread()) {}
~PyAllowThreads()
{
PyEval_RestoreThread(_state);
}
private:
PyThreadState* _state;
};
class PyEnsureGIL
{
public:
PyEnsureGIL() : _state(PyGILState_Ensure()) {}
~PyEnsureGIL()
{
PyGILState_Release(_state);
}
private:
PyGILState_STATE _state;
};
#define ERRWRAP2(expr) \
try \
{ \
PyAllowThreads allowThreads; \
expr; \
} \
catch (const cv::Exception &e) \
{ \
PyErr_SetString(opencv_error, e.what()); \
return 0; \
}
using namespace cv;
static PyObject* failmsgp(const char *fmt, ...)
{
char str[1000];
va_list ap;
va_start(ap, fmt);
vsnprintf(str, sizeof(str), fmt, ap);
va_end(ap);
PyErr_SetString(PyExc_TypeError, str);
return 0;
}
class NumpyAllocator : public MatAllocator
{
public:
NumpyAllocator() { stdAllocator = Mat::getStdAllocator(); }
~NumpyAllocator() {}
UMatData* allocate(PyObject* o, int dims, const int* sizes, int type, size_t* step) const
{
UMatData* u = new UMatData(this);
u->data = u->origdata = (uchar*)PyArray_DATA((PyArrayObject*) o);
npy_intp* _strides = PyArray_STRIDES((PyArrayObject*) o);
for( int i = 0; i < dims - 1; i++ )
step[i] = (size_t)_strides[i];
step[dims-1] = CV_ELEM_SIZE(type);
u->size = sizes[0]*step[0];
u->userdata = o;
return u;
}
UMatData* allocate(int dims0, const int* sizes, int type, void* data, size_t* step, int flags, UMatUsageFlags usageFlags) const
{
if( data != 0 )
{
CV_Error(Error::StsAssert, "The data should normally be NULL!");
// probably this is safe to do in such extreme case
return stdAllocator->allocate(dims0, sizes, type, data, step, flags, usageFlags);
}
PyEnsureGIL gil;
int depth = CV_MAT_DEPTH(type);
int cn = CV_MAT_CN(type);
const int f = (int)(sizeof(size_t)/8);
int typenum = depth == CV_8U ? NPY_UBYTE : depth == CV_8S ? NPY_BYTE :
depth == CV_16U ? NPY_USHORT : depth == CV_16S ? NPY_SHORT :
depth == CV_32S ? NPY_INT : depth == CV_32F ? NPY_FLOAT :
depth == CV_64F ? NPY_DOUBLE : f*NPY_ULONGLONG + (f^1)*NPY_UINT;
int i, dims = dims0;
cv::AutoBuffer<npy_intp> _sizes(dims + 1);
for( i = 0; i < dims; i++ )
_sizes[i] = sizes[i];
if( cn > 1 )
_sizes[dims++] = cn;
PyObject* o = PyArray_SimpleNew(dims, _sizes, typenum);
if(!o)
CV_Error_(Error::StsError, ("The numpy array of typenum=%d, ndims=%d can not be created", typenum, dims));
return allocate(o, dims0, sizes, type, step);
}
bool allocate(UMatData* u, int accessFlags, UMatUsageFlags usageFlags) const
{
return stdAllocator->allocate(u, accessFlags, usageFlags);
}
void deallocate(UMatData* u) const
{
if(u)
{
PyEnsureGIL gil;
PyObject* o = (PyObject*)u->userdata;
Py_XDECREF(o);
delete u;
}
}
const MatAllocator* stdAllocator;
};
NumpyAllocator g_numpyAllocator;
template<typename T> static
bool pyopencv_to(PyObject* obj, T& p, const char* name = "<unknown>");
template<typename T> static
PyObject* pyopencv_from(const T& src);
enum { ARG_NONE = 0, ARG_MAT = 1, ARG_SCALAR = 2 };
// special case, when the convertor needs full ArgInfo structure
static bool pyopencv_to(PyObject* o, Mat& m, const ArgInfo info)
{
// to avoid PyArray_Check() to crash even with valid array
do_numpy_import( );
bool allowND = true;
if(!o || o == Py_None)
{
if( !m.data )
m.allocator = &g_numpyAllocator;
return true;
}
if( PyInt_Check(o) )
{
double v[] = {(double)PyInt_AsLong((PyObject*)o), 0., 0., 0.};
m = Mat(4, 1, CV_64F, v).clone();
return true;
}
if( PyFloat_Check(o) )
{
double v[] = {PyFloat_AsDouble((PyObject*)o), 0., 0., 0.};
m = Mat(4, 1, CV_64F, v).clone();
return true;
}
if( PyTuple_Check(o) )
{
int i, sz = (int)PyTuple_Size((PyObject*)o);
m = Mat(sz, 1, CV_64F);
for( i = 0; i < sz; i++ )
{
PyObject* oi = PyTuple_GET_ITEM(o, i);
if( PyInt_Check(oi) )
m.at<double>(i) = (double)PyInt_AsLong(oi);
else if( PyFloat_Check(oi) )
m.at<double>(i) = (double)PyFloat_AsDouble(oi);
else
{
failmsg("%s is not a numerical tuple", info.name);
m.release();
return false;
}
}
return true;
}
if( !PyArray_Check(o) )
{
failmsg("%s is not a numpy array, neither a scalar", info.name);
return false;
}
PyArrayObject* oarr = (PyArrayObject*) o;
bool needcopy = false, needcast = false;
int typenum = PyArray_TYPE(oarr), new_typenum = typenum;
int type = typenum == NPY_UBYTE ? CV_8U :
typenum == NPY_BYTE ? CV_8S :
typenum == NPY_USHORT ? CV_16U :
typenum == NPY_SHORT ? CV_16S :
typenum == NPY_INT ? CV_32S :
typenum == NPY_INT32 ? CV_32S :
typenum == NPY_FLOAT ? CV_32F :
typenum == NPY_DOUBLE ? CV_64F : -1;
if( type < 0 )
{
if( typenum == NPY_INT64 || typenum == NPY_UINT64 || type == NPY_LONG )
{
needcopy = needcast = true;
new_typenum = NPY_INT;
type = CV_32S;
}
else
{
failmsg("%s data type = %d is not supported", info.name, typenum);
return false;
}
}
#ifndef CV_MAX_DIM
const int CV_MAX_DIM = 32;
#endif
int ndims = PyArray_NDIM(oarr);
if(ndims >= CV_MAX_DIM)
{
failmsg("%s dimensionality (=%d) is too high", info.name, ndims);
return false;
}
int size[CV_MAX_DIM+1];
size_t step[CV_MAX_DIM+1];
size_t elemsize = CV_ELEM_SIZE1(type);
const npy_intp* _sizes = PyArray_DIMS(oarr);
const npy_intp* _strides = PyArray_STRIDES(oarr);
bool ismultichannel = ndims == 3 && _sizes[2] <= CV_CN_MAX;
for( int i = ndims-1; i >= 0 && !needcopy; i-- )
{
// these checks handle cases of
// a) multi-dimensional (ndims > 2) arrays, as well as simpler 1- and 2-dimensional cases
// b) transposed arrays, where _strides[] elements go in non-descending order
// c) flipped arrays, where some of _strides[] elements are negative
if( (i == ndims-1 && (size_t)_strides[i] != elemsize) ||
(i < ndims-1 && _strides[i] < _strides[i+1]) )
needcopy = true;
}
if( ismultichannel && _strides[1] != (npy_intp)elemsize*_sizes[2] )
needcopy = true;
if (needcopy)
{
if (info.outputarg)
{
failmsg("Layout of the output array %s is incompatible with cv::Mat (step[ndims-1] != elemsize or step[1] != elemsize*nchannels)", info.name);
return false;
}
if( needcast ) {
o = PyArray_Cast(oarr, new_typenum);
oarr = (PyArrayObject*) o;
}
else {
oarr = PyArray_GETCONTIGUOUS(oarr);
o = (PyObject*) oarr;
}
_strides = PyArray_STRIDES(oarr);
}
for(int i = 0; i < ndims; i++)
{
size[i] = (int)_sizes[i];
step[i] = (size_t)_strides[i];
}
// handle degenerate case
if( ndims == 0) {
size[ndims] = 1;
step[ndims] = elemsize;
ndims++;
}
if( ismultichannel )
{
ndims--;
type |= CV_MAKETYPE(0, size[2]);
}
if( ndims > 2 && !allowND )
{
failmsg("%s has more than 2 dimensions", info.name);
return false;
}
m = Mat(ndims, size, type, PyArray_DATA(oarr), step);
m.u = g_numpyAllocator.allocate(o, ndims, size, type, step);
m.addref();
if( !needcopy )
{
Py_INCREF(o);
}
m.allocator = &g_numpyAllocator;
return true;
}
template<>
bool pyopencv_to(PyObject* o, Mat& m, const char* name)
{
return pyopencv_to(o, m, ArgInfo(name, 0));
}
PyObject* pyopencv_from(const Mat& m)
{
if( !m.data )
Py_RETURN_NONE;
Mat temp, *p = (Mat*)&m;
if(!p->u || p->allocator != &g_numpyAllocator)
{
temp.allocator = &g_numpyAllocator;
ERRWRAP2(m.copyTo(temp));
p = &temp;
}
PyObject* o = (PyObject*)p->u->userdata;
Py_INCREF(o);
return o;
}
int convert_to_CvMat2(const PyObject* o, cv::Mat& m)
{
pyopencv_to(const_cast<PyObject*>(o), m, "unknown");
return 0;
}

View File

@@ -0,0 +1,70 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
// Defines for Python 2/3 compatibility.
#ifndef __PYCOMPAT_HPP__
#define __PYCOMPAT_HPP__
#if PY_MAJOR_VERSION >= 3
// Python3 treats all ints as longs, PyInt_X functions have been removed.
#define PyInt_Check PyLong_Check
#define PyInt_CheckExact PyLong_CheckExact
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_FromLong PyLong_FromLong
#define PyNumber_Int PyNumber_Long
// Python3 strings are unicode, these defines mimic the Python2 functionality.
#define PyString_Check PyUnicode_Check
#define PyString_FromString PyUnicode_FromString
#define PyString_FromStringAndSize PyUnicode_FromStringAndSize
#define PyString_Size PyUnicode_GET_SIZE
// PyUnicode_AsUTF8 isn't available until Python 3.3
#if (PY_VERSION_HEX < 0x03030000)
#define PyString_AsString _PyUnicode_AsString
#else
#define PyString_AsString PyUnicode_AsUTF8
#endif
#endif
#endif // END HEADER GUARD

View File

@@ -0,0 +1,202 @@
// -*- mode: c++ -*-
/*********************************************************************
* Original color definition is at scikit-image distributed with
* following license disclaimer:
*
* Copyright (C) 2011, the scikit-image team
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name of skimage nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*********************************************************************/
#include "cv_bridge/rgb_colors.h"
#include <opencv2/core/core.hpp>
namespace cv_bridge
{
namespace rgb_colors
{
cv::Vec3d getRGBColor(const int color)
{
cv::Vec3d c;
switch (color % 146) {
case ALICEBLUE: c = cv::Vec3d(0.941, 0.973, 1); break;
case ANTIQUEWHITE: c = cv::Vec3d(0.98, 0.922, 0.843); break;
case AQUA: c = cv::Vec3d(0, 1, 1); break;
case AQUAMARINE: c = cv::Vec3d(0.498, 1, 0.831); break;
case AZURE: c = cv::Vec3d(0.941, 1, 1); break;
case BEIGE: c = cv::Vec3d(0.961, 0.961, 0.863); break;
case BISQUE: c = cv::Vec3d(1, 0.894, 0.769); break;
case BLACK: c = cv::Vec3d(0, 0, 0); break;
case BLANCHEDALMOND: c = cv::Vec3d(1, 0.922, 0.804); break;
case BLUE: c = cv::Vec3d(0, 0, 1); break;
case BLUEVIOLET: c = cv::Vec3d(0.541, 0.169, 0.886); break;
case BROWN: c = cv::Vec3d(0.647, 0.165, 0.165); break;
case BURLYWOOD: c = cv::Vec3d(0.871, 0.722, 0.529); break;
case CADETBLUE: c = cv::Vec3d(0.373, 0.62, 0.627); break;
case CHARTREUSE: c = cv::Vec3d(0.498, 1, 0); break;
case CHOCOLATE: c = cv::Vec3d(0.824, 0.412, 0.118); break;
case CORAL: c = cv::Vec3d(1, 0.498, 0.314); break;
case CORNFLOWERBLUE: c = cv::Vec3d(0.392, 0.584, 0.929); break;
case CORNSILK: c = cv::Vec3d(1, 0.973, 0.863); break;
case CRIMSON: c = cv::Vec3d(0.863, 0.0784, 0.235); break;
case CYAN: c = cv::Vec3d(0, 1, 1); break;
case DARKBLUE: c = cv::Vec3d(0, 0, 0.545); break;
case DARKCYAN: c = cv::Vec3d(0, 0.545, 0.545); break;
case DARKGOLDENROD: c = cv::Vec3d(0.722, 0.525, 0.0431); break;
case DARKGRAY: c = cv::Vec3d(0.663, 0.663, 0.663); break;
case DARKGREEN: c = cv::Vec3d(0, 0.392, 0); break;
case DARKGREY: c = cv::Vec3d(0.663, 0.663, 0.663); break;
case DARKKHAKI: c = cv::Vec3d(0.741, 0.718, 0.42); break;
case DARKMAGENTA: c = cv::Vec3d(0.545, 0, 0.545); break;
case DARKOLIVEGREEN: c = cv::Vec3d(0.333, 0.42, 0.184); break;
case DARKORANGE: c = cv::Vec3d(1, 0.549, 0); break;
case DARKORCHID: c = cv::Vec3d(0.6, 0.196, 0.8); break;
case DARKRED: c = cv::Vec3d(0.545, 0, 0); break;
case DARKSALMON: c = cv::Vec3d(0.914, 0.588, 0.478); break;
case DARKSEAGREEN: c = cv::Vec3d(0.561, 0.737, 0.561); break;
case DARKSLATEBLUE: c = cv::Vec3d(0.282, 0.239, 0.545); break;
case DARKSLATEGRAY: c = cv::Vec3d(0.184, 0.31, 0.31); break;
case DARKSLATEGREY: c = cv::Vec3d(0.184, 0.31, 0.31); break;
case DARKTURQUOISE: c = cv::Vec3d(0, 0.808, 0.82); break;
case DARKVIOLET: c = cv::Vec3d(0.58, 0, 0.827); break;
case DEEPPINK: c = cv::Vec3d(1, 0.0784, 0.576); break;
case DEEPSKYBLUE: c = cv::Vec3d(0, 0.749, 1); break;
case DIMGRAY: c = cv::Vec3d(0.412, 0.412, 0.412); break;
case DIMGREY: c = cv::Vec3d(0.412, 0.412, 0.412); break;
case DODGERBLUE: c = cv::Vec3d(0.118, 0.565, 1); break;
case FIREBRICK: c = cv::Vec3d(0.698, 0.133, 0.133); break;
case FLORALWHITE: c = cv::Vec3d(1, 0.98, 0.941); break;
case FORESTGREEN: c = cv::Vec3d(0.133, 0.545, 0.133); break;
case FUCHSIA: c = cv::Vec3d(1, 0, 1); break;
case GAINSBORO: c = cv::Vec3d(0.863, 0.863, 0.863); break;
case GHOSTWHITE: c = cv::Vec3d(0.973, 0.973, 1); break;
case GOLD: c = cv::Vec3d(1, 0.843, 0); break;
case GOLDENROD: c = cv::Vec3d(0.855, 0.647, 0.125); break;
case GRAY: c = cv::Vec3d(0.502, 0.502, 0.502); break;
case GREEN: c = cv::Vec3d(0, 0.502, 0); break;
case GREENYELLOW: c = cv::Vec3d(0.678, 1, 0.184); break;
case GREY: c = cv::Vec3d(0.502, 0.502, 0.502); break;
case HONEYDEW: c = cv::Vec3d(0.941, 1, 0.941); break;
case HOTPINK: c = cv::Vec3d(1, 0.412, 0.706); break;
case INDIANRED: c = cv::Vec3d(0.804, 0.361, 0.361); break;
case INDIGO: c = cv::Vec3d(0.294, 0, 0.51); break;
case IVORY: c = cv::Vec3d(1, 1, 0.941); break;
case KHAKI: c = cv::Vec3d(0.941, 0.902, 0.549); break;
case LAVENDER: c = cv::Vec3d(0.902, 0.902, 0.98); break;
case LAVENDERBLUSH: c = cv::Vec3d(1, 0.941, 0.961); break;
case LAWNGREEN: c = cv::Vec3d(0.486, 0.988, 0); break;
case LEMONCHIFFON: c = cv::Vec3d(1, 0.98, 0.804); break;
case LIGHTBLUE: c = cv::Vec3d(0.678, 0.847, 0.902); break;
case LIGHTCORAL: c = cv::Vec3d(0.941, 0.502, 0.502); break;
case LIGHTCYAN: c = cv::Vec3d(0.878, 1, 1); break;
case LIGHTGOLDENRODYELLOW: c = cv::Vec3d(0.98, 0.98, 0.824); break;
case LIGHTGRAY: c = cv::Vec3d(0.827, 0.827, 0.827); break;
case LIGHTGREEN: c = cv::Vec3d(0.565, 0.933, 0.565); break;
case LIGHTGREY: c = cv::Vec3d(0.827, 0.827, 0.827); break;
case LIGHTPINK: c = cv::Vec3d(1, 0.714, 0.757); break;
case LIGHTSALMON: c = cv::Vec3d(1, 0.627, 0.478); break;
case LIGHTSEAGREEN: c = cv::Vec3d(0.125, 0.698, 0.667); break;
case LIGHTSKYBLUE: c = cv::Vec3d(0.529, 0.808, 0.98); break;
case LIGHTSLATEGRAY: c = cv::Vec3d(0.467, 0.533, 0.6); break;
case LIGHTSLATEGREY: c = cv::Vec3d(0.467, 0.533, 0.6); break;
case LIGHTSTEELBLUE: c = cv::Vec3d(0.69, 0.769, 0.871); break;
case LIGHTYELLOW: c = cv::Vec3d(1, 1, 0.878); break;
case LIME: c = cv::Vec3d(0, 1, 0); break;
case LIMEGREEN: c = cv::Vec3d(0.196, 0.804, 0.196); break;
case LINEN: c = cv::Vec3d(0.98, 0.941, 0.902); break;
case MAGENTA: c = cv::Vec3d(1, 0, 1); break;
case MAROON: c = cv::Vec3d(0.502, 0, 0); break;
case MEDIUMAQUAMARINE: c = cv::Vec3d(0.4, 0.804, 0.667); break;
case MEDIUMBLUE: c = cv::Vec3d(0, 0, 0.804); break;
case MEDIUMORCHID: c = cv::Vec3d(0.729, 0.333, 0.827); break;
case MEDIUMPURPLE: c = cv::Vec3d(0.576, 0.439, 0.859); break;
case MEDIUMSEAGREEN: c = cv::Vec3d(0.235, 0.702, 0.443); break;
case MEDIUMSLATEBLUE: c = cv::Vec3d(0.482, 0.408, 0.933); break;
case MEDIUMSPRINGGREEN: c = cv::Vec3d(0, 0.98, 0.604); break;
case MEDIUMTURQUOISE: c = cv::Vec3d(0.282, 0.82, 0.8); break;
case MEDIUMVIOLETRED: c = cv::Vec3d(0.78, 0.0824, 0.522); break;
case MIDNIGHTBLUE: c = cv::Vec3d(0.098, 0.098, 0.439); break;
case MINTCREAM: c = cv::Vec3d(0.961, 1, 0.98); break;
case MISTYROSE: c = cv::Vec3d(1, 0.894, 0.882); break;
case MOCCASIN: c = cv::Vec3d(1, 0.894, 0.71); break;
case NAVAJOWHITE: c = cv::Vec3d(1, 0.871, 0.678); break;
case NAVY: c = cv::Vec3d(0, 0, 0.502); break;
case OLDLACE: c = cv::Vec3d(0.992, 0.961, 0.902); break;
case OLIVE: c = cv::Vec3d(0.502, 0.502, 0); break;
case OLIVEDRAB: c = cv::Vec3d(0.42, 0.557, 0.137); break;
case ORANGE: c = cv::Vec3d(1, 0.647, 0); break;
case ORANGERED: c = cv::Vec3d(1, 0.271, 0); break;
case ORCHID: c = cv::Vec3d(0.855, 0.439, 0.839); break;
case PALEGOLDENROD: c = cv::Vec3d(0.933, 0.91, 0.667); break;
case PALEGREEN: c = cv::Vec3d(0.596, 0.984, 0.596); break;
case PALEVIOLETRED: c = cv::Vec3d(0.686, 0.933, 0.933); break;
case PAPAYAWHIP: c = cv::Vec3d(1, 0.937, 0.835); break;
case PEACHPUFF: c = cv::Vec3d(1, 0.855, 0.725); break;
case PERU: c = cv::Vec3d(0.804, 0.522, 0.247); break;
case PINK: c = cv::Vec3d(1, 0.753, 0.796); break;
case PLUM: c = cv::Vec3d(0.867, 0.627, 0.867); break;
case POWDERBLUE: c = cv::Vec3d(0.69, 0.878, 0.902); break;
case PURPLE: c = cv::Vec3d(0.502, 0, 0.502); break;
case RED: c = cv::Vec3d(1, 0, 0); break;
case ROSYBROWN: c = cv::Vec3d(0.737, 0.561, 0.561); break;
case ROYALBLUE: c = cv::Vec3d(0.255, 0.412, 0.882); break;
case SADDLEBROWN: c = cv::Vec3d(0.545, 0.271, 0.0745); break;
case SALMON: c = cv::Vec3d(0.98, 0.502, 0.447); break;
case SANDYBROWN: c = cv::Vec3d(0.98, 0.643, 0.376); break;
case SEAGREEN: c = cv::Vec3d(0.18, 0.545, 0.341); break;
case SEASHELL: c = cv::Vec3d(1, 0.961, 0.933); break;
case SIENNA: c = cv::Vec3d(0.627, 0.322, 0.176); break;
case SILVER: c = cv::Vec3d(0.753, 0.753, 0.753); break;
case SKYBLUE: c = cv::Vec3d(0.529, 0.808, 0.922); break;
case SLATEBLUE: c = cv::Vec3d(0.416, 0.353, 0.804); break;
case SLATEGRAY: c = cv::Vec3d(0.439, 0.502, 0.565); break;
case SLATEGREY: c = cv::Vec3d(0.439, 0.502, 0.565); break;
case SNOW: c = cv::Vec3d(1, 0.98, 0.98); break;
case SPRINGGREEN: c = cv::Vec3d(0, 1, 0.498); break;
case STEELBLUE: c = cv::Vec3d(0.275, 0.51, 0.706); break;
case TAN: c = cv::Vec3d(0.824, 0.706, 0.549); break;
case TEAL: c = cv::Vec3d(0, 0.502, 0.502); break;
case THISTLE: c = cv::Vec3d(0.847, 0.749, 0.847); break;
case TOMATO: c = cv::Vec3d(1, 0.388, 0.278); break;
case TURQUOISE: c = cv::Vec3d(0.251, 0.878, 0.816); break;
case VIOLET: c = cv::Vec3d(0.933, 0.51, 0.933); break;
case WHEAT: c = cv::Vec3d(0.961, 0.871, 0.702); break;
case WHITE: c = cv::Vec3d(1, 1, 1); break;
case WHITESMOKE: c = cv::Vec3d(0.961, 0.961, 0.961); break;
case YELLOW: c = cv::Vec3d(1, 1, 0); break;
case YELLOWGREEN: c = cv::Vec3d(0.604, 0.804, 0.196); break;
} // switch
return c;
}
} // namespace rgb_colors
} // namespace cv_bridge

View File

@@ -0,0 +1,15 @@
# add the tests
# add boost directories for now
include_directories("../src")
catkin_add_gtest(${PROJECT_NAME}-utest test_endian.cpp test_compression.cpp utest.cpp utest2.cpp test_rgb_colors.cpp)
target_link_libraries(${PROJECT_NAME}-utest
${PROJECT_NAME}
${OpenCV_LIBRARIES}
${catkin_LIBRARIES}
)
catkin_add_nosetests(enumerants.py)
catkin_add_nosetests(conversions.py)
catkin_add_nosetests(python_bindings.py)

View File

@@ -0,0 +1,87 @@
#!/usr/bin/env python
import rostest
import unittest
import numpy as np
import sensor_msgs.msg
from cv_bridge import CvBridge, CvBridgeError
class TestConversions(unittest.TestCase):
def test_mono16_cv2(self):
import numpy as np
br = CvBridge()
im = np.uint8(np.random.randint(0, 255, size=(480, 640, 3)))
self.assertRaises(CvBridgeError, lambda: br.imgmsg_to_cv2(br.cv2_to_imgmsg(im), "mono16"))
br.imgmsg_to_cv2(br.cv2_to_imgmsg(im,"rgb8"), "mono16")
def test_encode_decode_cv2(self):
import cv2
import numpy as np
fmts = [cv2.CV_8U, cv2.CV_8S, cv2.CV_16U, cv2.CV_16S, cv2.CV_32S, cv2.CV_32F, cv2.CV_64F]
cvb_en = CvBridge()
cvb_de = CvBridge()
for w in range(100, 800, 100):
for h in range(100, 800, 100):
for f in fmts:
for channels in ([], 1, 2, 3, 4, 5):
if channels == []:
original = np.uint8(np.random.randint(0, 255, size=(h, w)))
else:
original = np.uint8(np.random.randint(0, 255, size=(h, w, channels)))
rosmsg = cvb_en.cv2_to_imgmsg(original)
newimg = cvb_de.imgmsg_to_cv2(rosmsg)
self.assert_(original.dtype == newimg.dtype)
if channels == 1:
# in that case, a gray image has a shape of size 2
self.assert_(original.shape[:2] == newimg.shape[:2])
else:
self.assert_(original.shape == newimg.shape)
self.assert_(len(original.tostring()) == len(newimg.tostring()))
def test_encode_decode_cv2_compressed(self):
import numpy as np
# from: http://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#Mat imread(const string& filename, int flags)
# NOTE: remove jp2(a.k.a JPEG2000) as its JASPER codec is disabled within Ubuntu opencv library
# due to security issues, but it works once you rebuild your opencv library with JASPER enabled
formats = ["jpg", "jpeg", "jpe", "png", "bmp", "dib", "ppm", "pgm", "pbm",
"sr", "ras", "tif", "tiff"] # this formats rviz is not support
cvb_en = CvBridge()
cvb_de = CvBridge()
for w in range(100, 800, 100):
for h in range(100, 800, 100):
for f in formats:
for channels in ([], 1, 3):
if channels == []:
original = np.uint8(np.random.randint(0, 255, size=(h, w)))
else:
original = np.uint8(np.random.randint(0, 255, size=(h, w, channels)))
compress_rosmsg = cvb_en.cv2_to_compressed_imgmsg(original, f)
newimg = cvb_de.compressed_imgmsg_to_cv2(compress_rosmsg)
self.assert_(original.dtype == newimg.dtype)
if channels == 1:
# in that case, a gray image has a shape of size 2
self.assert_(original.shape[:2] == newimg.shape[:2])
else:
self.assert_(original.shape == newimg.shape)
self.assert_(len(original.tostring()) == len(newimg.tostring()))
def test_endianness(self):
br = CvBridge()
dtype = np.dtype('int32')
# Set to big endian.
dtype = dtype.newbyteorder('>')
img = np.random.randint(0, 255, size=(30, 40))
msg = br.cv2_to_imgmsg(img.astype(dtype))
self.assert_(msg.is_bigendian == True)
self.assert_((br.imgmsg_to_cv2(msg) == img).all())
if __name__ == '__main__':
rosunit.unitrun('opencv_tests', 'conversions', TestConversions)

View File

@@ -0,0 +1,47 @@
#!/usr/bin/env python
import rostest
import unittest
import numpy as np
import cv2
import sensor_msgs.msg
from cv_bridge import CvBridge, CvBridgeError, getCvType
class TestEnumerants(unittest.TestCase):
def test_enumerants_cv2(self):
img_msg = sensor_msgs.msg.Image()
img_msg.width = 640
img_msg.height = 480
img_msg.encoding = "rgba8"
img_msg.step = 640*4
img_msg.data = (640 * 480) * "1234"
bridge_ = CvBridge()
cvim = bridge_.imgmsg_to_cv2(img_msg, "rgb8")
import sys
self.assertRaises(sys.getrefcount(cvim) == 2)
# A 3 channel image cannot be sent as an rgba8
self.assertRaises(CvBridgeError, lambda: bridge_.cv2_to_imgmsg(cvim, "rgba8"))
# but it can be sent as rgb8 and bgr8
bridge_.cv2_to_imgmsg(cvim, "rgb8")
bridge_.cv2_to_imgmsg(cvim, "bgr8")
self.assertRaises(getCvType("32FC4") == cv2.CV_8UC4)
self.assertRaises(getCvType("8UC1") == cv2.CV_8UC1)
self.assertRaises(getCvType("8U") == cv2.CV_8UC1)
def test_numpy_types(self):
import cv2
import numpy as np
bridge_ = CvBridge()
self.assertRaises(TypeError, lambda: bridge_.cv2_to_imgmsg(1, "rgba8"))
if hasattr(cv2, 'cv'):
self.assertRaises(TypeError, lambda: bridge_.cv2_to_imgmsg(cv2.cv(), "rgba8"))
if __name__ == '__main__':
rosunit.unitrun('opencv_tests', 'enumerants', TestEnumerants)

View File

@@ -0,0 +1,35 @@
from nose.tools import assert_equal
import numpy as np
import cv_bridge
def test_cvtColorForDisplay():
# convert label image to display
label = np.zeros((480, 640), dtype=np.int32)
height, width = label.shape[:2]
label_value = 0
grid_num_y, grid_num_x = 3, 4
for grid_row in xrange(grid_num_y):
grid_size_y = height / grid_num_y
min_y = grid_size_y * grid_row
max_y = min_y + grid_size_y
for grid_col in xrange(grid_num_x):
grid_size_x = width / grid_num_x
min_x = grid_size_x * grid_col
max_x = min_x + grid_size_x
label[min_y:max_y, min_x:max_x] = label_value
label_value += 1
label_viz = cv_bridge.cvtColorForDisplay(label, '32SC1', 'bgr8')
assert_equal(label_viz.dtype, np.uint8)
assert_equal(label_viz.min(), 0)
assert_equal(label_viz.max(), 255)
# Check that mono8 conversion returns the right shape.
bridge = cv_bridge.CvBridge()
mono = np.random.random((100, 100)) * 255
mono = mono.astype(np.uint8)
input_msg = bridge.cv2_to_imgmsg(mono, encoding='mono8')
output = bridge.imgmsg_to_cv2(input_msg, desired_encoding='mono8')
assert_equal(output.shape, (100,100))

View File

@@ -0,0 +1,36 @@
#include <cv_bridge/cv_bridge.h>
#include <stdio.h>
#include <gtest/gtest.h>
TEST(CvBridgeTest, compression)
{
cv::RNG rng(0);
std_msgs::Header header;
// Test 3 channel images.
for (int i = 0; i < 2; ++i)
{
const std::string format = (i == 0) ? "bgr8" : "rgb8";
cv::Mat_<cv::Vec3b> in(10, 10);
rng.fill(in, cv::RNG::UNIFORM, 0, 256);
sensor_msgs::CompressedImagePtr msg = cv_bridge::CvImage(header, format, in).toCompressedImageMsg(cv_bridge::PNG);
const cv_bridge::CvImageConstPtr out = cv_bridge::toCvCopy(msg, format);
EXPECT_EQ(out->image.channels(), 3);
EXPECT_EQ(cv::norm(out->image, in), 0);
}
// Test 4 channel images.
for (int i = 0; i < 2; ++i)
{
const std::string format = (i == 0) ? "bgra8" : "rgba8";
cv::Mat_<cv::Vec4b> in(10, 10);
rng.fill(in, cv::RNG::UNIFORM, 0, 256);
sensor_msgs::CompressedImagePtr msg = cv_bridge::CvImage(header, format, in).toCompressedImageMsg(cv_bridge::PNG);
const cv_bridge::CvImageConstPtr out = cv_bridge::toCvCopy(msg, format);
EXPECT_EQ(out->image.channels(), 4);
EXPECT_EQ(cv::norm(out->image, in), 0);
}
}

View File

@@ -0,0 +1,38 @@
#include "boost/endian/conversion.hpp"
#include <boost/make_shared.hpp>
#include <cv_bridge/cv_bridge.h>
#include <gtest/gtest.h>
TEST(CvBridgeTest, endianness)
{
using namespace boost::endian;
// Create an image of the type opposite to the platform
sensor_msgs::Image msg;
msg.height = 1;
msg.width = 1;
msg.encoding = "32SC2";
msg.step = 8;
msg.data.resize(msg.step);
int32_t* data = reinterpret_cast<int32_t*>(&msg.data[0]);
// Write 1 and 2 in order, but with an endianness opposite to the platform
if (order::native == order::little)
{
msg.is_bigendian = true;
*(data++) = native_to_big(static_cast<int32_t>(1));
*data = native_to_big(static_cast<int32_t>(2));
} else {
msg.is_bigendian = false;
*(data++) = native_to_little(static_cast<int32_t>(1));
*data = native_to_little(static_cast<int32_t>(2));
}
// Make sure the values are still the same
cv_bridge::CvImageConstPtr img = cv_bridge::toCvShare(boost::make_shared<sensor_msgs::Image>(msg));
EXPECT_EQ(img->image.at<cv::Vec2i>(0, 0)[0], 1);
EXPECT_EQ(img->image.at<cv::Vec2i>(0, 0)[1], 2);
// Make sure we cannot share data
EXPECT_NE(img->image.data, &msg.data[0]);
}

View File

@@ -0,0 +1,19 @@
#include "cv_bridge/rgb_colors.h"
#include <opencv2/opencv.hpp>
#include <gtest/gtest.h>
TEST(RGBColors, testGetRGBColor)
{
cv::Vec3d color;
// red
color = cv_bridge::rgb_colors::getRGBColor(cv_bridge::rgb_colors::RED);
EXPECT_EQ(1, color[0]);
EXPECT_EQ(0, color[1]);
EXPECT_EQ(0, color[2]);
// gray
color = cv_bridge::rgb_colors::getRGBColor(cv_bridge::rgb_colors::GRAY);
EXPECT_EQ(0.502, color[0]);
EXPECT_EQ(0.502, color[1]);
EXPECT_EQ(0.502, color[2]);
}

View File

@@ -0,0 +1,141 @@
#include "cv_bridge/cv_bridge.h"
#include <sensor_msgs/image_encodings.h>
#include <gtest/gtest.h>
// Tests conversion of non-continuous cv::Mat. #5206
TEST(CvBridgeTest, NonContinuous)
{
cv::Mat full = cv::Mat::eye(8, 8, CV_16U);
cv::Mat partial = full.colRange(2, 5);
cv_bridge::CvImage cvi;
cvi.encoding = sensor_msgs::image_encodings::MONO16;
cvi.image = partial;
sensor_msgs::ImagePtr msg = cvi.toImageMsg();
EXPECT_EQ(msg->height, 8);
EXPECT_EQ(msg->width, 3);
EXPECT_EQ(msg->encoding, cvi.encoding);
EXPECT_EQ(msg->step, 6);
}
TEST(CvBridgeTest, ChannelOrder)
{
cv::Mat_<uint16_t> mat(200, 200);
mat.setTo(cv::Scalar(1000,0,0,0));
sensor_msgs::ImagePtr image(new sensor_msgs::Image());
image = cv_bridge::CvImage(image->header, sensor_msgs::image_encodings::MONO16, mat).toImageMsg();
cv_bridge::CvImageConstPtr cv_ptr = cv_bridge::toCvShare(image);
cv_bridge::CvImagePtr res = cv_bridge::cvtColor(cv_ptr, sensor_msgs::image_encodings::BGR8);
EXPECT_EQ(res->encoding, sensor_msgs::image_encodings::BGR8);
EXPECT_EQ(res->image.type(), cv_bridge::getCvType(res->encoding));
EXPECT_EQ(res->image.channels(), sensor_msgs::image_encodings::numChannels(res->encoding));
EXPECT_EQ(res->image.depth(), CV_8U);
// The matrix should be the following
cv::Mat_<cv::Vec3b> gt(200, 200);
gt.setTo(cv::Scalar(1, 1, 1)*1000.*255./65535.);
ASSERT_EQ(res->image.type(), gt.type());
EXPECT_EQ(cv::norm(res->image, gt, cv::NORM_INF), 0);
}
TEST(CvBridgeTest, initialization)
{
sensor_msgs::Image image;
cv_bridge::CvImagePtr cv_ptr;
image.encoding = "bgr8";
image.height = 200;
image.width = 200;
try {
cv_ptr = cv_bridge::toCvCopy(image, "mono8");
// Before the fix, it would never get here, as it would segfault
EXPECT_EQ(1, 0);
} catch (cv_bridge::Exception& e) {
EXPECT_EQ(1, 1);
}
// Check some normal images with different ratios
for(int height = 100; height <= 300; ++height) {
image.encoding = sensor_msgs::image_encodings::RGB8;
image.step = image.width*3;
image.data.resize(image.height*image.step);
cv_ptr = cv_bridge::toCvCopy(image, "mono8");
}
}
TEST(CvBridgeTest, imageMessageStep)
{
// Test 1: image step is padded
sensor_msgs::Image image;
cv_bridge::CvImagePtr cv_ptr;
image.encoding = "mono8";
image.height = 220;
image.width = 200;
image.is_bigendian = false;
image.step = 208;
image.data.resize(image.height*image.step);
ASSERT_NO_THROW(cv_ptr = cv_bridge::toCvCopy(image, "mono8"));
ASSERT_EQ(220, cv_ptr->image.rows);
ASSERT_EQ(200, cv_ptr->image.cols);
//OpenCV copyTo argument removes the stride
ASSERT_EQ(200, cv_ptr->image.step[0]);
//Test 2: image step is invalid
image.step = 199;
ASSERT_THROW(cv_ptr = cv_bridge::toCvCopy(image, "mono8"), cv_bridge::Exception);
//Test 3: image step == image.width * element size * number of channels
image.step = 200;
image.data.resize(image.height*image.step);
ASSERT_NO_THROW(cv_ptr = cv_bridge::toCvCopy(image, "mono8"));
ASSERT_EQ(220, cv_ptr->image.rows);
ASSERT_EQ(200, cv_ptr->image.cols);
ASSERT_EQ(200, cv_ptr->image.step[0]);
}
TEST(CvBridgeTest, imageMessageConversion)
{
sensor_msgs::Image imgmsg;
cv_bridge::CvImagePtr cv_ptr;
imgmsg.height = 220;
imgmsg.width = 200;
imgmsg.is_bigendian = false;
// image with data type float32 and 1 channels
imgmsg.encoding = "32FC1";
imgmsg.step = imgmsg.width * 32 / 8 * 1;
imgmsg.data.resize(imgmsg.height * imgmsg.step);
ASSERT_NO_THROW(cv_ptr = cv_bridge::toCvCopy(imgmsg, imgmsg.encoding));
ASSERT_EQ(imgmsg.height, cv_ptr->image.rows);
ASSERT_EQ(imgmsg.width, cv_ptr->image.cols);
ASSERT_EQ(1, cv_ptr->image.channels());
ASSERT_EQ(imgmsg.step, cv_ptr->image.step[0]);
// image with data type float32 and 10 channels
imgmsg.encoding = "32FC10";
imgmsg.step = imgmsg.width * 32 / 8 * 10;
imgmsg.data.resize(imgmsg.height * imgmsg.step);
ASSERT_NO_THROW(cv_ptr = cv_bridge::toCvCopy(imgmsg, imgmsg.encoding));
ASSERT_EQ(imgmsg.height, cv_ptr->image.rows);
ASSERT_EQ(imgmsg.width, cv_ptr->image.cols);
ASSERT_EQ(10, cv_ptr->image.channels());
ASSERT_EQ(imgmsg.step, cv_ptr->image.step[0]);
}
int main(int argc, char** argv)
{
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@@ -0,0 +1,150 @@
/*********************************************************************
* Software License Agreement (BSD License)
*
* Copyright (c) 2009, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Willow Garage nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*********************************************************************/
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "opencv2/core/core.hpp"
#include "cv_bridge/cv_bridge.h"
#include <sensor_msgs/Image.h>
#include <sensor_msgs/image_encodings.h>
using namespace sensor_msgs::image_encodings;
bool isUnsigned(const std::string & encoding) {
return encoding == RGB8 || encoding == RGBA8 || encoding == RGB16 || encoding == RGBA16 || encoding == BGR8 || encoding == BGRA8 || encoding == BGR16 || encoding == BGRA16 || encoding == MONO8 || encoding == MONO16 ||
encoding == MONO8 || encoding == MONO16 || encoding == TYPE_8UC1 || encoding == TYPE_8UC2 || encoding == TYPE_8UC3 || encoding == TYPE_8UC4 ||
encoding == TYPE_16UC1 || encoding == TYPE_16UC2 || encoding == TYPE_16UC3 || encoding == TYPE_16UC4;
//BAYER_RGGB8, BAYER_BGGR8, BAYER_GBRG8, BAYER_GRBG8, BAYER_RGGB16, BAYER_BGGR16, BAYER_GBRG16, BAYER_GRBG16,
//YUV422
}
std::vector<std::string>
getEncodings() {
// TODO for Groovy, the following types should be uncommented
std::string encodings[] = { RGB8, RGBA8, RGB16, RGBA16, BGR8, BGRA8, BGR16, BGRA16, MONO8, MONO16,
TYPE_8UC1, /*TYPE_8UC2,*/ TYPE_8UC3, TYPE_8UC4,
TYPE_8SC1, /*TYPE_8SC2,*/ TYPE_8SC3, TYPE_8SC4,
TYPE_16UC1, /*TYPE_16UC2,*/ TYPE_16UC3, TYPE_16UC4,
TYPE_16SC1, /*TYPE_16SC2,*/ TYPE_16SC3, TYPE_16SC4,
TYPE_32SC1, /*TYPE_32SC2,*/ TYPE_32SC3, TYPE_32SC4,
TYPE_32FC1, /*TYPE_32FC2,*/ TYPE_32FC3, TYPE_32FC4,
TYPE_64FC1, /*TYPE_64FC2,*/ TYPE_64FC3, TYPE_64FC4,
//BAYER_RGGB8, BAYER_BGGR8, BAYER_GBRG8, BAYER_GRBG8, BAYER_RGGB16, BAYER_BGGR16, BAYER_GBRG16, BAYER_GRBG16,
YUV422
};
return std::vector<std::string>(encodings, encodings+47-8-7);
}
TEST(OpencvTests, testCase_encode_decode)
{
std::vector<std::string> encodings = getEncodings();
for(size_t i=0; i<encodings.size(); ++i) {
std::string src_encoding = encodings[i];
bool is_src_color_format = isColor(src_encoding) || isMono(src_encoding) || (src_encoding == sensor_msgs::image_encodings::YUV422);
cv::Mat image_original(cv::Size(400, 400), cv_bridge::getCvType(src_encoding));
cv::RNG r(77);
r.fill(image_original, cv::RNG::UNIFORM, 0, 127);
sensor_msgs::Image image_message;
cv_bridge::CvImage image_bridge(std_msgs::Header(), src_encoding, image_original);
// Convert to a sensor_msgs::Image
sensor_msgs::ImagePtr image_msg = image_bridge.toImageMsg();
for(size_t j=0; j<encodings.size(); ++j) {
std::string dst_encoding = encodings[j];
bool is_dst_color_format = isColor(dst_encoding) || isMono(dst_encoding) || (dst_encoding == sensor_msgs::image_encodings::YUV422);
bool is_num_channels_the_same = (numChannels(src_encoding) == numChannels(dst_encoding));
cv_bridge::CvImageConstPtr cv_image;
cv::Mat image_back;
// If the first type does not contain any color information
if (!is_src_color_format) {
// Converting from a non color type to a color type does no make sense
if (is_dst_color_format) {
EXPECT_THROW(cv_bridge::toCvShare(image_msg, dst_encoding), cv_bridge::Exception);
continue;
}
// We can only convert non-color types with the same number of channels
if (!is_num_channels_the_same) {
EXPECT_THROW(cv_bridge::toCvShare(image_msg, dst_encoding), cv_bridge::Exception);
continue;
}
cv_image = cv_bridge::toCvShare(image_msg, dst_encoding);
} else {
// If we are converting to a non-color, you cannot convert to a different number of channels
if (!is_dst_color_format) {
if (!is_num_channels_the_same) {
EXPECT_THROW(cv_bridge::toCvShare(image_msg, dst_encoding), cv_bridge::Exception);
continue;
}
cv_image = cv_bridge::toCvShare(image_msg, dst_encoding);
// We cannot convert from non-color to color
EXPECT_THROW(cvtColor(cv_image, src_encoding)->image, cv_bridge::Exception);
continue;
}
// We do not support conversion to YUV422 for now, except from YUV422
if ((dst_encoding == YUV422) && (src_encoding != YUV422)) {
EXPECT_THROW(cv_bridge::toCvShare(image_msg, dst_encoding), cv_bridge::Exception);
continue;
}
cv_image = cv_bridge::toCvShare(image_msg, dst_encoding);
// We do not support conversion to YUV422 for now, except from YUV422
if ((src_encoding == YUV422) && (dst_encoding != YUV422)) {
EXPECT_THROW(cvtColor(cv_image, src_encoding)->image, cv_bridge::Exception);
continue;
}
}
// And convert back to a cv::Mat
image_back = cvtColor(cv_image, src_encoding)->image;
// If the number of channels,s different some information got lost at some point, so no possible test
if (!is_num_channels_the_same)
continue;
if (bitDepth(src_encoding) >= 32) {
// In the case where the input has floats, we will lose precision but no more than 1
EXPECT_LT(cv::norm(image_original, image_back, cv::NORM_INF), 1) << "problem converting from " << src_encoding << " to " << dst_encoding << " and back.";
} else if ((bitDepth(src_encoding) == 16) && (bitDepth(dst_encoding) == 8)) {
// In the case where the input has floats, we will lose precision but no more than 1 * max(127)
EXPECT_LT(cv::norm(image_original, image_back, cv::NORM_INF), 128) << "problem converting from " << src_encoding << " to " << dst_encoding << " and back.";
} else {
EXPECT_EQ(cv::norm(image_original, image_back, cv::NORM_INF), 0) << "problem converting from " << src_encoding << " to " << dst_encoding << " and back.";
}
}
}
}

View File

@@ -0,0 +1,383 @@
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Changelog for package image_geometry
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
1.13.1 (2022-10-03)
-------------------
* Update CMakeLists.txt for Windows build environment (`#265 <https://github.com/ros-perception/vision_opencv/issues/265>`_)
* Windows bringup
* Correct binary locations for shared libraries.
* Fix build break.
* Fix cv_bridge_boost.pyd.
* remove hard-coded STATIC (`#3 <https://github.com/ros-perception/vision_opencv/issues/3>`_)
* remove WINDOWS_EXPORT_ALL_SYMBOLS property (`#4 <https://github.com/ros-perception/vision_opencv/issues/4>`_)
* add DLL import/export macros (`#266 <https://github.com/ros-perception/vision_opencv/issues/266>`_)
* update macro names (`#2 <https://github.com/ros-perception/vision_opencv/issues/2>`_)
* add exports.h and dll import/export macros
* Contributors: James Xu
1.13.0 (2018-04-30)
-------------------
* Use rosdep OpenCV and not ROS one.
We defintely don't need the whole OpenCV.
We need to clean the rosdep keys.
* Contributors: Vincent Rabaud
1.12.8 (2018-04-17)
-------------------
* Merge pull request `#189 <https://github.com/ros-perception/vision_opencv/issues/189>`_ from ros2/python3_support_in_test
python 3 compatibility in test
* python 3 compatibility in test
* fix doc job
* Contributors: Mikael Arguedas, Vincent Rabaud
1.12.7 (2017-11-12)
-------------------
* get shared_ptr from boost or C++11
* Contributors: Vincent Rabaud
1.12.6 (2017-11-11)
-------------------
* missing STL includes
* Contributors: Mikael Arguedas, Vincent Rabaud
1.12.5 (2017-11-05)
-------------------
* Fix compilation issues.
Fix suggested by `#173 <https://github.com/ros-perception/vision_opencv/issues/173>`_ comment
* Make sure to initialize the distorted_image Mat.
Otherwise, valgrind throws errors about accessing uninitialized
memory.
Signed-off-by: Chris Lalancette <clalancette@osrfoundation.org>
* Remove the last remnants of boost from image_geometry.
All of its functionality can be had from std:: in C++11, so
use that instead. This also requires us to add the -std=c++11
flag.
Signed-off-by: Chris Lalancette <clalancette@osrfoundation.org>
* Contributors: Chris Lalancette, Vincent Rabaud
1.12.4 (2017-01-29)
-------------------
* Import using __future_\_ for python 3 compatibility.
* Contributors: Hans Gaiser
1.12.3 (2016-12-04)
-------------------
1.12.2 (2016-09-24)
-------------------
* Fix "stdlib.h: No such file or directory" errors in GCC-6
Including '-isystem /usr/include' breaks building with GCC-6.
See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70129
* Merge pull request `#142 <https://github.com/ros-perception/vision_opencv/issues/142>`_ from YuOhara/remap_with_nan_border_value
remap with nan border if mat value is float or double
* remap with nan border if mat value is float or double
* Contributors: Hodorgasm, Vincent Rabaud, YuOhara
1.12.1 (2016-07-11)
-------------------
* Add fullResolution getter to PinholeCameraModel
* add a missing dependency when building the doc
* fix sphinx doc path
* Contributors: Jacob Panikulam, Vincent Rabaud
1.12.0 (2016-03-18)
-------------------
* depend on OpenCV3 only
* Contributors: Vincent Rabaud
1.11.12 (2016-03-10)
--------------------
* issue `#117 <https://github.com/ros-perception/vision_opencv/issues/117>`_ pull request `#118 <https://github.com/ros-perception/vision_opencv/issues/118>`_ check all distortion coefficients to see if rectification ought to be done
* Contributors: Lucas Walter
1.11.11 (2016-01-31)
--------------------
* clean up the doc files
* fix a few warnings in doc jobs
* Contributors: Vincent Rabaud
1.11.10 (2016-01-16)
--------------------
1.11.9 (2015-11-29)
-------------------
* add a condition if D=None
* fix compilation warnings
* Contributors: Vincent Rabaud, YuOhara
1.11.8 (2015-07-15)
-------------------
* fixes `#62 <https://github.com/ros-perception/vision_opencv/issues/62>`_, bug in Python rectifyPoint old opencv1 API
* Simplify some OpenCV3 distinction
* Contributors: Basheer Subei, Vincent Rabaud
1.11.7 (2014-12-14)
-------------------
* Merge pull request `#53 <https://github.com/ros-perception/vision_opencv/issues/53>`_ from carnegieroboticsllc/patch-1
Update stereo_camera_model.cpp
* Updated inline math for reprojecting a single disparity
* Update stereo_camera_model.cpp
Correct slight error in the Q matrix derivation
* Updated Q matrix to account for cameras with different Fx and Fy values
* Contributors: Carnegie Robotics LLC, Matt Alvarado, Vincent Rabaud
1.11.6 (2014-11-16)
-------------------
* Fixes in image_geometry for Python cv2 API
* Fixed typo: np -> numpy
* Added missing tfFrame method to Python PinholeCameraModel.
* Removed trailing whitespace.
* Contributors: Daniel Maturana
1.11.5 (2014-09-21)
-------------------
* get code to work with OpenCV3
actually fixes `#46 <https://github.com/ros-perception/vision_opencv/issues/46>`_ properly
* Contributors: Vincent Rabaud
1.11.4 (2014-07-27)
-------------------
1.11.3 (2014-06-08)
-------------------
* pinhole_camera_model: fix implicit shared_ptr cast to bool for C++11
In C++11 boost::shared_ptr does not provide the implicit bool conversion
operator anymore, so make the cast in pinhole_camera_model.h explicit.
That does not hurt in older C++ standards and makes compilation with C++11
possible.
* Contributors: Max Schwarz
1.11.2 (2014-04-28)
-------------------
1.11.1 (2014-04-16)
-------------------
1.11.0 (2014-02-15)
-------------------
* remove OpenCV 1 API
* fixes `#6 <https://github.com/ros-perception/vision_opencv/issues/6>`_
* fix OpenCV dependencies
* Contributors: Vincent Rabaud
1.10.15 (2014-02-07)
--------------------
* add assignment operator for StereoCameraModel
* fixed Python rectifyImage implementation in PinholeCameraModel
* Added operator= for the PinholeCameraModel.
Added the operator= for the PinholeCameraModel. I am not sure if the
PinholeCameraModel needs to have a destructor, too. To follow the
'rule of three' it should actually have one.
* Contributors: Tobias Bar, Valsamis Ntouskos, Vincent Rabaud
1.10.14 (2013-11-23 16:17)
--------------------------
* Contributors: Vincent Rabaud
1.10.13 (2013-11-23 09:19)
--------------------------
* Contributors: Vincent Rabaud
1.10.12 (2013-11-22)
--------------------
* "1.10.12"
* Contributors: Vincent Rabaud
1.10.11 (2013-10-23)
--------------------
* Contributors: Vincent Rabaud
1.10.10 (2013-10-19)
--------------------
* Contributors: Vincent Rabaud
1.10.9 (2013-10-07)
-------------------
* fixes `#23 <https://github.com/ros-perception/vision_opencv/issues/23>`_
* Contributors: Vincent Rabaud
1.10.8 (2013-09-09)
-------------------
* check for CATKIN_ENABLE_TESTING
* update email address
* Contributors: Lukas Bulwahn, Vincent Rabaud
1.10.7 (2013-07-17)
-------------------
1.10.6 (2013-03-01)
-------------------
1.10.5 (2013-02-11)
-------------------
* Add dependency on generated messages
Catkin requires explicit enumeration of dependencies on generated messages.
Add this declaration to properly flatten the dependency graph and force Catkin
to generate geometry_msgs before compiling image_geometry.
* Contributors: Adam Hachey
1.10.4 (2013-02-02)
-------------------
1.10.3 (2013-01-17)
-------------------
1.10.2 (2013-01-13)
-------------------
* fix ticket 4253
* Contributors: Vincent Rabaud
1.10.1 (2013-01-10)
-------------------
1.10.0 (2013-01-03)
-------------------
1.9.15 (2013-01-02)
-------------------
1.9.14 (2012-12-30)
-------------------
* add feature for https://code.ros.org/trac/ros-pkg/ticket/5592
* CMake cleanups
* fix a failing test
* Contributors: Vincent Rabaud
1.9.13 (2012-12-15)
-------------------
* use the catkin macros for the setup.py
* Contributors: Vincent Rabaud
1.9.12 (2012-12-14)
-------------------
* buildtool_depend catkin fix
* Contributors: William Woodall
1.9.11 (2012-12-10)
-------------------
* Fixing image_geometry package.xml
* fix https://code.ros.org/trac/ros-pkg/ticket/5570
* Contributors: Vincent Rabaud, William Woodall
1.9.10 (2012-10-04)
-------------------
1.9.9 (2012-10-01)
------------------
* fix dependencies
* Contributors: Vincent Rabaud
1.9.8 (2012-09-30)
------------------
* fix some dependencies
* fix missing Python at install and fix some dependencies
* Contributors: Vincent Rabaud
1.9.7 (2012-09-28 21:07)
------------------------
* add missing stuff
* make sure we find catkin
* Contributors: Vincent Rabaud
1.9.6 (2012-09-28 15:17)
------------------------
* make all the tests pass
* comply to the new Catkin API
* Contributors: Vincent Rabaud
1.9.5 (2012-09-15)
------------------
* remove dependencies to the opencv2 ROS package
* Contributors: Vincent Rabaud
1.9.4 (2012-09-13)
------------------
* make sure the include folders are copied to the right place
* Contributors: Vincent Rabaud
1.9.3 (2012-09-12)
------------------
1.9.2 (2012-09-07)
------------------
* be more compliant to the latest catkin
* added catkin_project() to cv_bridge, image_geometry, and opencv_tests
* Contributors: Jonathan Binney, Vincent Rabaud
1.9.1 (2012-08-28 22:06)
------------------------
* remove things that were marked as ROS_DEPRECATED
* Contributors: Vincent Rabaud
1.9.0 (2012-08-28 14:29)
------------------------
* catkinized opencv_tests by Jon Binney
* fix ticket 5449
* use OpenCV's projectPoints
* remove the version check, let's trust OpenCV :)
* revert the removal of opencv2
* vision_opencv: Export OpenCV flags in manifests for image_geometry, cv_bridge.
* finally get rid of opencv2 as it is a system dependency now
* bump REQUIRED version of OpenCV to 2.3.2, which is what's in ros-fuerte-opencv
* switch rosdep name to opencv2, to refer to ros-fuerte-opencv2
* Adding a few missing headers so that client code may compile against pinhole camera model.
* Adding opencv2 to all manifests, so that client packages may
not break when using them.
* baking in opencv debs and attempting a pre-release
* image_geometry: (Python) Adjust K and P for ROI/binning. Also expose full resolution K and P. Add raw_roi property.
* image_geometry: Add Tx, Ty getters (Python).
* image_geometry: Added tf_frame and stamp properties. Only generate undistort maps when rectifyImage is called.
* image_geometry: Fix for when D is empty (Python).
* image_geometry: Take all D coefficients, not just the first 4 (Python).
* image_geometry: Fix rectification in the presence of binning (`#4848 <https://github.com/ros-perception/vision_opencv/issues/4848>`_).
* image_geometry: Fixed wg-ros-pkg `#5019 <https://github.com/ros-perception/vision_opencv/issues/5019>`_, error updating StereoCameraModel. Removed manifest dependency on cv_bridge.
* image_geometry: fromCameraInfo() returns bool, true if parameters have changed since last call.
* image_geometry: Accessors for full-res K, P.
* image_geometry: Implemented toFullResolution(), toReducedResolution().
* image_geometry: Implemented reducedResolution().
* image_geometry: Implemented rectifiedRoi() with caching. Fixed bug that would cause rectification maps to be regenerated every time.
* image_geometry: Implemented rectifyRoi().
* image_geometry: Overloads of projection functions that return the output directly instead of through a reference parameter. Implemented unrectifyRoi(). Added fullResolution(), rawRoi().
* image_geometry: Library-specific exception class.
* image_geometry: PIMPL pattern for cached data, so I can change in patch releases if necessary. Changed projectPixelTo3dRay() to normalize to z=1.
* image_geometry (rep0104): Added binning. Partially fixed ROI (not finding rectified ROI yet). Now interpreting distortion_model. Lots of code cleanup.
* image_geometry (rep0104): Got tests passing again, were issues with D becoming variable-length.
* image_geometry: Fixed swapped width/height in computing ROI undistort maps. Partially fixes `#4206 <https://github.com/ros-perception/vision_opencv/issues/4206>`_.
* image_geometry: getDelta functions, getZ and getDisparity in C++ and Python. Docs and tests for them. Changed Python fx() and friends to pull values out of P instead of K.
* image_geometry: Added C++ getDeltaU and getDeltaV.
* `#4201 <https://github.com/ros-perception/vision_opencv/issues/4201>`_, implement/doc/test for getDeltaU getDeltaX getDeltaV getDeltaY
* Added Ubuntu platform tags to manifest
* `#4083 <https://github.com/ros-perception/vision_opencv/issues/4083>`_, projectPixelTo3dRay implemented
* image_geometry: Added PinholeCameraModel::stamp() returning the time stamp.
* image_geometry: Fixed bugs related to ignoring Tx & Ty in projectPixelTo3dRay and unrectifyPoint. Added Tx() and Ty() accessors.
* image_geometry: Fixed `#4063 <https://github.com/ros-perception/vision_opencv/issues/4063>`_, PinholeCameraModel ignores Tx term in P matrix.
* image_geometry: Implemented projectDisparityTo3d, `#4019 <https://github.com/ros-perception/vision_opencv/issues/4019>`_.
* image_geometry: Implemented unrectifyPoint, with unit tests.
* image_geometry: Fixed bug in rectifyPoint due to cv::undistortPoints not accepting double pt data, `#4053 <https://github.com/ros-perception/vision_opencv/issues/4053>`_.
* image_geometry: Tweaked manifest.
* image_geometry: Better manifest description.
* Removed tfFrame sample
* image_geometry: Doxygen main page, manifest updates.
* image_geometry: Doxygen for StereoCameraModel.
* image_geometry: Made Q calculation match old stereoproc one.
* image_geometry: Tweaked projectDisparityImageTo3D API for handling missing values.
* image_geometry: Added method to project disparity image to 3d. Added ConstPtr version of fromCameraInfo in StereoCameraModel.
* image_geometry: Export linker flags. Fixed bug that could cause rectification maps to not be initialized before use.
* Fixed path-handling on gtest for CMake 2.6
* image_geometry: Added missing source file.
* image_geometry: Added some C++ docs.
* image_geometry: Minor cleanup of StereoCameraModel, added it to build. Put in copy constructors.
* image_geometry: Switched pinhole_camera_model to use new C++ OpenCV types and functions.
* Remove use of deprecated rosbuild macros
* image_geometry (C++): Unit test for projecting points uv <-> xyz.
* image_geometry (C++): Implemented more projection functions, added beginnings of the unit tests.
* trigger rebuild
* Enable rosdoc.yaml
* docs
* image_geometry: Started C++ API. PinholeCameraModel is in theory (untested) able to track state efficiently and rectify images.
* First stereo test
* Checkpoint
* Skeleton of test
* First cut
* Contributors: Vincent Rabaud, ethanrublee, gerkey, jamesb, mihelich, vrabaud, wheeler

View File

@@ -0,0 +1,37 @@
cmake_minimum_required(VERSION 2.8)
project(image_geometry)
find_package(catkin REQUIRED sensor_msgs)
find_package(OpenCV REQUIRED)
catkin_package(CATKIN_DEPENDS sensor_msgs
DEPENDS OpenCV
INCLUDE_DIRS include
LIBRARIES ${PROJECT_NAME}
)
catkin_python_setup()
include_directories(include)
include_directories(${catkin_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS})
# add a library
add_library(${PROJECT_NAME} src/pinhole_camera_model.cpp src/stereo_camera_model.cpp)
target_link_libraries(${PROJECT_NAME} ${OpenCV_LIBRARIES})
add_dependencies(${PROJECT_NAME} ${catkin_EXPORTED_TARGETS})
install(DIRECTORY include/${PROJECT_NAME}/
DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}/
)
# install library
install(TARGETS ${PROJECT_NAME}
ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
)
# add tests
if(CATKIN_ENABLE_TESTING)
add_subdirectory(test)
endif()

View File

@@ -0,0 +1,201 @@
# -*- coding: utf-8 -*-
#
# image_geometry documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 1 14:21:53 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'image_geometry'
copyright = u'2009, Willow Garage, Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'image_geometrydoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'image_geometry.tex', u'stereo\\_utils Documentation',
u'James Bowman', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'http://docs.python.org/': None,
'http://docs.scipy.org/doc/numpy' : None,
'http://docs.ros.org/api/tf/html/python/' : None,
}

View File

@@ -0,0 +1,21 @@
image_geometry
==============
image_geometry simplifies interpreting images geometrically using the
parameters from sensor_msgs/CameraInfo.
.. module:: image_geometry
.. autoclass:: image_geometry.PinholeCameraModel
:members: fromCameraInfo, rectifyImage, rectifyPoint, tfFrame, project3dToPixel, projectPixelTo3dRay, distortionCoeffs, intrinsicMatrix, projectionMatrix, rotationMatrix, cx, cy, fx, fy
.. autoclass:: image_geometry.StereoCameraModel
:members:
Indices and tables
==================
* :ref:`genindex`
* :ref:`search`

View File

@@ -0,0 +1,29 @@
/**
\mainpage
\htmlinclude manifest.html
\b image_geometry contains camera model classes that simplify interpreting
images geometrically using the calibration parameters from
sensor_msgs/CameraInfo messages. They may be efficiently updated in your
image callback:
\code
image_geometry::PinholeCameraModel model_;
void imageCb(const sensor_msgs::ImageConstPtr& raw_image,
const sensor_msgs::CameraInfoConstPtr& cam_info)
{
// Update the camera model (usually a no-op)
model_.fromCameraInfo(cam_info);
// Do processing...
}
\endcode
\section codeapi Code API
\b image_geometry contains two classes:
- image_geometry::PinholeCameraModel - models a pinhole camera with distortion.
- image_geometry::StereoCameraModel - models a stereo pair of pinhole cameras.
*/

View File

@@ -0,0 +1,18 @@
#ifndef IMAGE_GEOMETRY_EXPORTS_H
#define IMAGE_GEOMETRY_EXPORTS_H
#include <ros/macros.h>
// Import/export for windows dll's and visibility for gcc shared libraries.
#ifdef ROS_BUILD_SHARED_LIBS // ros is being built around shared libraries
#ifdef image_geometry_EXPORTS // we are building a shared lib/dll
#define IMAGE_GEOMETRY_DECL ROS_HELPER_EXPORT
#else // we are using shared lib/dll
#define IMAGE_GEOMETRY_DECL ROS_HELPER_IMPORT
#endif
#else // ros is being built around static libraries
#define IMAGE_GEOMETRY_DECL
#endif
#endif // IMAGE_GEOMETRY_EXPORTS_H

View File

@@ -0,0 +1,344 @@
#ifndef IMAGE_GEOMETRY_PINHOLE_CAMERA_MODEL_H
#define IMAGE_GEOMETRY_PINHOLE_CAMERA_MODEL_H
#include <sensor_msgs/CameraInfo.h>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <stdexcept>
#include <string>
#include "exports.h"
namespace image_geometry {
class Exception : public std::runtime_error
{
public:
Exception(const std::string& description) : std::runtime_error(description) {}
};
/**
* \brief Simplifies interpreting images geometrically using the parameters from
* sensor_msgs/CameraInfo.
*/
class IMAGE_GEOMETRY_DECL PinholeCameraModel
{
public:
PinholeCameraModel();
PinholeCameraModel(const PinholeCameraModel& other);
PinholeCameraModel& operator=(const PinholeCameraModel& other);
/**
* \brief Set the camera parameters from the sensor_msgs/CameraInfo message.
*/
bool fromCameraInfo(const sensor_msgs::CameraInfo& msg);
/**
* \brief Set the camera parameters from the sensor_msgs/CameraInfo message.
*/
bool fromCameraInfo(const sensor_msgs::CameraInfoConstPtr& msg);
/**
* \brief Get the name of the camera coordinate frame in tf.
*/
std::string tfFrame() const;
/**
* \brief Get the time stamp associated with this camera model.
*/
ros::Time stamp() const;
/**
* \brief The resolution at which the camera was calibrated.
*
* The maximum resolution at which the camera can be used with the current
* calibration; normally this is the same as the imager resolution.
*/
cv::Size fullResolution() const;
/**
* \brief The resolution of the current rectified image.
*
* The size of the rectified image associated with the latest CameraInfo, as
* reduced by binning/ROI and affected by distortion. If binning and ROI are
* not in use, this is the same as fullResolution().
*/
cv::Size reducedResolution() const;
cv::Point2d toFullResolution(const cv::Point2d& uv_reduced) const;
cv::Rect toFullResolution(const cv::Rect& roi_reduced) const;
cv::Point2d toReducedResolution(const cv::Point2d& uv_full) const;
cv::Rect toReducedResolution(const cv::Rect& roi_full) const;
/**
* \brief The current raw ROI, as used for capture by the camera driver.
*/
cv::Rect rawRoi() const;
/**
* \brief The current rectified ROI, which best fits the raw ROI.
*/
cv::Rect rectifiedRoi() const;
/**
* \brief Project a 3d point to rectified pixel coordinates.
*
* This is the inverse of projectPixelTo3dRay().
*
* \param xyz 3d point in the camera coordinate frame
* \return (u,v) in rectified pixel coordinates
*/
cv::Point2d project3dToPixel(const cv::Point3d& xyz) const;
/**
* \brief Project a rectified pixel to a 3d ray.
*
* Returns the unit vector in the camera coordinate frame in the direction of rectified
* pixel (u,v) in the image plane. This is the inverse of project3dToPixel().
*
* In 1.4.x, the vector has z = 1.0. Previously, this function returned a unit vector.
*
* \param uv_rect Rectified pixel coordinates
* \return 3d ray passing through (u,v)
*/
cv::Point3d projectPixelTo3dRay(const cv::Point2d& uv_rect) const;
/**
* \brief Rectify a raw camera image.
*/
void rectifyImage(const cv::Mat& raw, cv::Mat& rectified,
int interpolation = cv::INTER_LINEAR) const;
/**
* \brief Apply camera distortion to a rectified image.
*/
void unrectifyImage(const cv::Mat& rectified, cv::Mat& raw,
int interpolation = cv::INTER_LINEAR) const;
/**
* \brief Compute the rectified image coordinates of a pixel in the raw image.
*/
cv::Point2d rectifyPoint(const cv::Point2d& uv_raw) const;
/**
* \brief Compute the raw image coordinates of a pixel in the rectified image.
*/
cv::Point2d unrectifyPoint(const cv::Point2d& uv_rect) const;
/**
* \brief Compute the rectified ROI best fitting a raw ROI.
*/
cv::Rect rectifyRoi(const cv::Rect& roi_raw) const;
/**
* \brief Compute the raw ROI best fitting a rectified ROI.
*/
cv::Rect unrectifyRoi(const cv::Rect& roi_rect) const;
/**
* \brief Returns the camera info message held internally
*/
const sensor_msgs::CameraInfo& cameraInfo() const;
/**
* \brief Returns the original camera matrix.
*/
const cv::Matx33d& intrinsicMatrix() const;
/**
* \brief Returns the distortion coefficients.
*/
const cv::Mat_<double>& distortionCoeffs() const;
/**
* \brief Returns the rotation matrix.
*/
const cv::Matx33d& rotationMatrix() const;
/**
* \brief Returns the projection matrix.
*/
const cv::Matx34d& projectionMatrix() const;
/**
* \brief Returns the original camera matrix for full resolution.
*/
const cv::Matx33d& fullIntrinsicMatrix() const;
/**
* \brief Returns the projection matrix for full resolution.
*/
const cv::Matx34d& fullProjectionMatrix() const;
/**
* \brief Returns the focal length (pixels) in x direction of the rectified image.
*/
double fx() const;
/**
* \brief Returns the focal length (pixels) in y direction of the rectified image.
*/
double fy() const;
/**
* \brief Returns the x coordinate of the optical center.
*/
double cx() const;
/**
* \brief Returns the y coordinate of the optical center.
*/
double cy() const;
/**
* \brief Returns the x-translation term of the projection matrix.
*/
double Tx() const;
/**
* \brief Returns the y-translation term of the projection matrix.
*/
double Ty() const;
/**
* \brief Returns the number of columns in each bin.
*/
uint32_t binningX() const;
/**
* \brief Returns the number of rows in each bin.
*/
uint32_t binningY() const;
/**
* \brief Compute delta u, given Z and delta X in Cartesian space.
*
* For given Z, this is the inverse of getDeltaX().
*
* \param deltaX Delta X, in Cartesian space
* \param Z Z (depth), in Cartesian space
*/
double getDeltaU(double deltaX, double Z) const;
/**
* \brief Compute delta v, given Z and delta Y in Cartesian space.
*
* For given Z, this is the inverse of getDeltaY().
*
* \param deltaY Delta Y, in Cartesian space
* \param Z Z (depth), in Cartesian space
*/
double getDeltaV(double deltaY, double Z) const;
/**
* \brief Compute delta X, given Z in Cartesian space and delta u in pixels.
*
* For given Z, this is the inverse of getDeltaU().
*
* \param deltaU Delta u, in pixels
* \param Z Z (depth), in Cartesian space
*/
double getDeltaX(double deltaU, double Z) const;
/**
* \brief Compute delta Y, given Z in Cartesian space and delta v in pixels.
*
* For given Z, this is the inverse of getDeltaV().
*
* \param deltaV Delta v, in pixels
* \param Z Z (depth), in Cartesian space
*/
double getDeltaY(double deltaV, double Z) const;
/**
* \brief Returns true if the camera has been initialized
*/
bool initialized() const { return (bool)cache_; }
protected:
sensor_msgs::CameraInfo cam_info_;
cv::Mat_<double> D_; // Unaffected by binning, ROI
cv::Matx33d R_; // Unaffected by binning, ROI
cv::Matx33d K_; // Describe current image (includes binning, ROI)
cv::Matx34d P_; // Describe current image (includes binning, ROI)
cv::Matx33d K_full_; // Describe full-res image, needed for full maps
cv::Matx34d P_full_; // Describe full-res image, needed for full maps
// Use PIMPL here so we can change internals in patch updates if needed
struct Cache;
#ifdef BOOST_SHARED_PTR_HPP_INCLUDED
boost::shared_ptr<Cache> cache_; // Holds cached data for internal use
#else
std::shared_ptr<Cache> cache_; // Holds cached data for internal use
#endif
void initRectificationMaps() const;
friend class StereoCameraModel;
};
/* Trivial inline functions */
inline std::string PinholeCameraModel::tfFrame() const
{
assert( initialized() );
return cam_info_.header.frame_id;
}
inline ros::Time PinholeCameraModel::stamp() const
{
assert( initialized() );
return cam_info_.header.stamp;
}
inline const sensor_msgs::CameraInfo& PinholeCameraModel::cameraInfo() const { return cam_info_; }
inline const cv::Matx33d& PinholeCameraModel::intrinsicMatrix() const { return K_; }
inline const cv::Mat_<double>& PinholeCameraModel::distortionCoeffs() const { return D_; }
inline const cv::Matx33d& PinholeCameraModel::rotationMatrix() const { return R_; }
inline const cv::Matx34d& PinholeCameraModel::projectionMatrix() const { return P_; }
inline const cv::Matx33d& PinholeCameraModel::fullIntrinsicMatrix() const { return K_full_; }
inline const cv::Matx34d& PinholeCameraModel::fullProjectionMatrix() const { return P_full_; }
inline double PinholeCameraModel::fx() const { return P_(0,0); }
inline double PinholeCameraModel::fy() const { return P_(1,1); }
inline double PinholeCameraModel::cx() const { return P_(0,2); }
inline double PinholeCameraModel::cy() const { return P_(1,2); }
inline double PinholeCameraModel::Tx() const { return P_(0,3); }
inline double PinholeCameraModel::Ty() const { return P_(1,3); }
inline uint32_t PinholeCameraModel::binningX() const { return cam_info_.binning_x; }
inline uint32_t PinholeCameraModel::binningY() const { return cam_info_.binning_y; }
inline double PinholeCameraModel::getDeltaU(double deltaX, double Z) const
{
assert( initialized() );
return fx() * deltaX / Z;
}
inline double PinholeCameraModel::getDeltaV(double deltaY, double Z) const
{
assert( initialized() );
return fy() * deltaY / Z;
}
inline double PinholeCameraModel::getDeltaX(double deltaU, double Z) const
{
assert( initialized() );
return Z * deltaU / fx();
}
inline double PinholeCameraModel::getDeltaY(double deltaV, double Z) const
{
assert( initialized() );
return Z * deltaV / fy();
}
} //namespace image_geometry
#endif

View File

@@ -0,0 +1,131 @@
#ifndef IMAGE_GEOMETRY_STEREO_CAMERA_MODEL_H
#define IMAGE_GEOMETRY_STEREO_CAMERA_MODEL_H
#include "image_geometry/pinhole_camera_model.h"
#include "exports.h"
namespace image_geometry {
/**
* \brief Simplifies interpreting stereo image pairs geometrically using the
* parameters from the left and right sensor_msgs/CameraInfo.
*/
class IMAGE_GEOMETRY_DECL StereoCameraModel
{
public:
StereoCameraModel();
StereoCameraModel(const StereoCameraModel& other);
StereoCameraModel& operator=(const StereoCameraModel& other);
/**
* \brief Set the camera parameters from the sensor_msgs/CameraInfo messages.
*/
bool fromCameraInfo(const sensor_msgs::CameraInfo& left,
const sensor_msgs::CameraInfo& right);
/**
* \brief Set the camera parameters from the sensor_msgs/CameraInfo messages.
*/
bool fromCameraInfo(const sensor_msgs::CameraInfoConstPtr& left,
const sensor_msgs::CameraInfoConstPtr& right);
/**
* \brief Get the left monocular camera model.
*/
const PinholeCameraModel& left() const;
/**
* \brief Get the right monocular camera model.
*/
const PinholeCameraModel& right() const;
/**
* \brief Get the name of the camera coordinate frame in tf.
*
* For stereo cameras, both the left and right CameraInfo should be in the left
* optical frame.
*/
std::string tfFrame() const;
/**
* \brief Project a rectified pixel with disparity to a 3d point.
*/
void projectDisparityTo3d(const cv::Point2d& left_uv_rect, float disparity, cv::Point3d& xyz) const;
/**
* \brief Project a disparity image to a 3d point cloud.
*
* If handleMissingValues = true, all points with minimal disparity (outliers) have
* Z set to MISSING_Z (currently 10000.0).
*/
void projectDisparityImageTo3d(const cv::Mat& disparity, cv::Mat& point_cloud,
bool handleMissingValues = false) const;
static const double MISSING_Z;
/**
* \brief Returns the disparity reprojection matrix.
*/
const cv::Matx44d& reprojectionMatrix() const;
/**
* \brief Returns the horizontal baseline in world coordinates.
*/
double baseline() const;
/**
* \brief Returns the depth at which a point is observed with a given disparity.
*
* This is the inverse of getDisparity().
*/
double getZ(double disparity) const;
/**
* \brief Returns the disparity observed for a point at depth Z.
*
* This is the inverse of getZ().
*/
double getDisparity(double Z) const;
/**
* \brief Returns true if the camera has been initialized
*/
bool initialized() const { return left_.initialized() && right_.initialized(); }
protected:
PinholeCameraModel left_, right_;
cv::Matx44d Q_;
void updateQ();
};
/* Trivial inline functions */
inline const PinholeCameraModel& StereoCameraModel::left() const { return left_; }
inline const PinholeCameraModel& StereoCameraModel::right() const { return right_; }
inline std::string StereoCameraModel::tfFrame() const { return left_.tfFrame(); }
inline const cv::Matx44d& StereoCameraModel::reprojectionMatrix() const { return Q_; }
inline double StereoCameraModel::baseline() const
{
/// @todo Currently assuming horizontal baseline
return -right_.Tx() / right_.fx();
}
inline double StereoCameraModel::getZ(double disparity) const
{
assert( initialized() );
return -right_.Tx() / (disparity - (left().cx() - right().cx()));
}
inline double StereoCameraModel::getDisparity(double Z) const
{
assert( initialized() );
return -right_.Tx() / Z + (left().cx() - right().cx()); ;
}
} //namespace image_geometry
#endif

View File

@@ -0,0 +1,30 @@
<package format="2">
<name>image_geometry</name>
<version>1.13.1</version>
<description>
`image_geometry` contains C++ and Python libraries for interpreting images
geometrically. It interfaces the calibration parameters in sensor_msgs/CameraInfo
messages with OpenCV functions such as image rectification, much as cv_bridge
interfaces ROS sensor_msgs/Image with OpenCV data types.
</description>
<author>Patrick Mihelich</author>
<maintainer email="vincent.rabaud@gmail.com">Vincent Rabaud</maintainer>
<license>BSD</license>
<url>http://www.ros.org/wiki/image_geometry</url>
<export>
<rosdoc config="rosdoc.yaml" />
</export>
<buildtool_depend>catkin</buildtool_depend>
<build_depend>libopencv-dev</build_depend>
<build_depend>sensor_msgs</build_depend>
<exec_depend>libopencv-dev</exec_depend>
<build_export_depend>libopencv-dev</build_export_depend>
<build_export_depend>sensor_msgs</build_export_depend>
<doc_depend>dvipng</doc_depend>
<doc_depend>texlive-latex-extra</doc_depend>
</package>

View File

@@ -0,0 +1,8 @@
- builder: sphinx
name: Python API
output_dir: python
sphinx_root_dir: doc
- builder: doxygen
name: C++ API
output_dir: c++
file_patterns: '*.c *.cpp *.h *.cc *.hh *.dox'

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup()
d['packages'] = ['image_geometry']
d['package_dir'] = {'' : 'src'}
setup(**d)

View File

@@ -0,0 +1,2 @@
from __future__ import absolute_import
from .cameramodels import PinholeCameraModel, StereoCameraModel

View File

@@ -0,0 +1,373 @@
import array
import cv2
import sensor_msgs.msg
import math
import copy
import numpy
def mkmat(rows, cols, L):
mat = numpy.matrix(L, dtype='float64')
mat.resize((rows,cols))
return mat
class PinholeCameraModel:
"""
A pinhole camera is an idealized monocular camera.
"""
def __init__(self):
self.K = None
self.D = None
self.R = None
self.P = None
self.full_K = None
self.full_P = None
self.width = None
self.height = None
self.binning_x = None
self.binning_y = None
self.raw_roi = None
self.tf_frame = None
self.stamp = None
def fromCameraInfo(self, msg):
"""
:param msg: camera parameters
:type msg: sensor_msgs.msg.CameraInfo
Set the camera parameters from the :class:`sensor_msgs.msg.CameraInfo` message.
"""
self.K = mkmat(3, 3, msg.K)
if msg.D:
self.D = mkmat(len(msg.D), 1, msg.D)
else:
self.D = None
self.R = mkmat(3, 3, msg.R)
self.P = mkmat(3, 4, msg.P)
self.full_K = mkmat(3, 3, msg.K)
self.full_P = mkmat(3, 4, msg.P)
self.width = msg.width
self.height = msg.height
self.binning_x = max(1, msg.binning_x)
self.binning_y = max(1, msg.binning_y)
self.resolution = (msg.width, msg.height)
self.raw_roi = copy.copy(msg.roi)
# ROI all zeros is considered the same as full resolution
if (self.raw_roi.x_offset == 0 and self.raw_roi.y_offset == 0 and
self.raw_roi.width == 0 and self.raw_roi.height == 0):
self.raw_roi.width = self.width
self.raw_roi.height = self.height
self.tf_frame = msg.header.frame_id
self.stamp = msg.header.stamp
# Adjust K and P for binning and ROI
self.K[0,0] /= self.binning_x
self.K[1,1] /= self.binning_y
self.K[0,2] = (self.K[0,2] - self.raw_roi.x_offset) / self.binning_x
self.K[1,2] = (self.K[1,2] - self.raw_roi.y_offset) / self.binning_y
self.P[0,0] /= self.binning_x
self.P[1,1] /= self.binning_y
self.P[0,2] = (self.P[0,2] - self.raw_roi.x_offset) / self.binning_x
self.P[1,2] = (self.P[1,2] - self.raw_roi.y_offset) / self.binning_y
def rectifyImage(self, raw, rectified):
"""
:param raw: input image
:type raw: :class:`CvMat` or :class:`IplImage`
:param rectified: rectified output image
:type rectified: :class:`CvMat` or :class:`IplImage`
Applies the rectification specified by camera parameters :math:`K` and and :math:`D` to image `raw` and writes the resulting image `rectified`.
"""
self.mapx = numpy.ndarray(shape=(self.height, self.width, 1),
dtype='float32')
self.mapy = numpy.ndarray(shape=(self.height, self.width, 1),
dtype='float32')
cv2.initUndistortRectifyMap(self.K, self.D, self.R, self.P,
(self.width, self.height), cv2.CV_32FC1, self.mapx, self.mapy)
cv2.remap(raw, self.mapx, self.mapy, cv2.INTER_CUBIC, rectified)
def rectifyPoint(self, uv_raw):
"""
:param uv_raw: pixel coordinates
:type uv_raw: (u, v)
Applies the rectification specified by camera parameters
:math:`K` and and :math:`D` to point (u, v) and returns the
pixel coordinates of the rectified point.
"""
src = mkmat(1, 2, list(uv_raw))
src.resize((1,1,2))
dst = cv2.undistortPoints(src, self.K, self.D, R=self.R, P=self.P)
return dst[0,0]
def project3dToPixel(self, point):
"""
:param point: 3D point
:type point: (x, y, z)
Returns the rectified pixel coordinates (u, v) of the 3D point,
using the camera :math:`P` matrix.
This is the inverse of :meth:`projectPixelTo3dRay`.
"""
src = mkmat(4, 1, [point[0], point[1], point[2], 1.0])
dst = self.P * src
x = dst[0,0]
y = dst[1,0]
w = dst[2,0]
if w != 0:
return (x / w, y / w)
else:
return (float('nan'), float('nan'))
def projectPixelTo3dRay(self, uv):
"""
:param uv: rectified pixel coordinates
:type uv: (u, v)
Returns the unit vector which passes from the camera center to through rectified pixel (u, v),
using the camera :math:`P` matrix.
This is the inverse of :meth:`project3dToPixel`.
"""
x = (uv[0] - self.cx()) / self.fx()
y = (uv[1] - self.cy()) / self.fy()
norm = math.sqrt(x*x + y*y + 1)
x /= norm
y /= norm
z = 1.0 / norm
return (x, y, z)
def getDeltaU(self, deltaX, Z):
"""
:param deltaX: delta X, in cartesian space
:type deltaX: float
:param Z: Z, in cartesian space
:type Z: float
:rtype: float
Compute delta u, given Z and delta X in Cartesian space.
For given Z, this is the inverse of :meth:`getDeltaX`.
"""
fx = self.P[0, 0]
if Z == 0:
return float('inf')
else:
return fx * deltaX / Z
def getDeltaV(self, deltaY, Z):
"""
:param deltaY: delta Y, in cartesian space
:type deltaY: float
:param Z: Z, in cartesian space
:type Z: float
:rtype: float
Compute delta v, given Z and delta Y in Cartesian space.
For given Z, this is the inverse of :meth:`getDeltaY`.
"""
fy = self.P[1, 1]
if Z == 0:
return float('inf')
else:
return fy * deltaY / Z
def getDeltaX(self, deltaU, Z):
"""
:param deltaU: delta u in pixels
:type deltaU: float
:param Z: Z, in cartesian space
:type Z: float
:rtype: float
Compute delta X, given Z in cartesian space and delta u in pixels.
For given Z, this is the inverse of :meth:`getDeltaU`.
"""
fx = self.P[0, 0]
return Z * deltaU / fx
def getDeltaY(self, deltaV, Z):
"""
:param deltaV: delta v in pixels
:type deltaV: float
:param Z: Z, in cartesian space
:type Z: float
:rtype: float
Compute delta Y, given Z in cartesian space and delta v in pixels.
For given Z, this is the inverse of :meth:`getDeltaV`.
"""
fy = self.P[1, 1]
return Z * deltaV / fy
def fullResolution(self):
"""Returns the full resolution of the camera"""
return self.resolution
def intrinsicMatrix(self):
""" Returns :math:`K`, also called camera_matrix in cv docs """
return self.K
def distortionCoeffs(self):
""" Returns :math:`D` """
return self.D
def rotationMatrix(self):
""" Returns :math:`R` """
return self.R
def projectionMatrix(self):
""" Returns :math:`P` """
return self.P
def fullIntrinsicMatrix(self):
""" Return the original camera matrix for full resolution """
return self.full_K
def fullProjectionMatrix(self):
""" Return the projection matrix for full resolution """
return self.full_P
def cx(self):
""" Returns x center """
return self.P[0,2]
def cy(self):
""" Returns y center """
return self.P[1,2]
def fx(self):
""" Returns x focal length """
return self.P[0,0]
def fy(self):
""" Returns y focal length """
return self.P[1,1]
def Tx(self):
""" Return the x-translation term of the projection matrix """
return self.P[0,3]
def Ty(self):
""" Return the y-translation term of the projection matrix """
return self.P[1,3]
def tfFrame(self):
""" Returns the tf frame name - a string - of the camera.
This is the frame of the :class:`sensor_msgs.msg.CameraInfo` message.
"""
return self.tf_frame
class StereoCameraModel:
"""
An idealized stereo camera.
"""
def __init__(self):
self.left = PinholeCameraModel()
self.right = PinholeCameraModel()
def fromCameraInfo(self, left_msg, right_msg):
"""
:param left_msg: left camera parameters
:type left_msg: sensor_msgs.msg.CameraInfo
:param right_msg: right camera parameters
:type right_msg: sensor_msgs.msg.CameraInfo
Set the camera parameters from the :class:`sensor_msgs.msg.CameraInfo` messages.
"""
self.left.fromCameraInfo(left_msg)
self.right.fromCameraInfo(right_msg)
# [ Fx, 0, Cx, Fx*-Tx ]
# [ 0, Fy, Cy, 0 ]
# [ 0, 0, 1, 0 ]
fx = self.right.P[0, 0]
fy = self.right.P[1, 1]
cx = self.right.P[0, 2]
cy = self.right.P[1, 2]
tx = -self.right.P[0, 3] / fx
# Q is:
# [ 1, 0, 0, -Clx ]
# [ 0, 1, 0, -Cy ]
# [ 0, 0, 0, Fx ]
# [ 0, 0, 1 / Tx, (Crx-Clx)/Tx ]
self.Q = numpy.zeros((4, 4), dtype='float64')
self.Q[0, 0] = 1.0
self.Q[0, 3] = -cx
self.Q[1, 1] = 1.0
self.Q[1, 3] = -cy
self.Q[2, 3] = fx
self.Q[3, 2] = 1 / tx
def tfFrame(self):
"""
Returns the tf frame name - a string - of the 3d points. This is
the frame of the :class:`sensor_msgs.msg.CameraInfo` message. It
may be used as a source frame in :class:`tf.TransformListener`.
"""
return self.left.tfFrame()
def project3dToPixel(self, point):
"""
:param point: 3D point
:type point: (x, y, z)
Returns the rectified pixel coordinates (u, v) of the 3D point, for each camera, as ((u_left, v_left), (u_right, v_right))
using the cameras' :math:`P` matrices.
This is the inverse of :meth:`projectPixelTo3d`.
"""
l = self.left.project3dToPixel(point)
r = self.right.project3dToPixel(point)
return (l, r)
def projectPixelTo3d(self, left_uv, disparity):
"""
:param left_uv: rectified pixel coordinates
:type left_uv: (u, v)
:param disparity: disparity, in pixels
:type disparity: float
Returns the 3D point (x, y, z) for the given pixel position,
using the cameras' :math:`P` matrices.
This is the inverse of :meth:`project3dToPixel`.
Note that a disparity of zero implies that the 3D point is at infinity.
"""
src = mkmat(4, 1, [left_uv[0], left_uv[1], disparity, 1.0])
dst = self.Q * src
x = dst[0, 0]
y = dst[1, 0]
z = dst[2, 0]
w = dst[3, 0]
if w != 0:
return (x / w, y / w, z / w)
else:
return (0.0, 0.0, 0.0)
def getZ(self, disparity):
"""
:param disparity: disparity, in pixels
:type disparity: float
Returns the depth at which a point is observed with a given disparity.
This is the inverse of :meth:`getDisparity`.
Note that a disparity of zero implies Z is infinite.
"""
if disparity == 0:
return float('inf')
Tx = -self.right.P[0, 3]
return Tx / disparity
def getDisparity(self, Z):
"""
:param Z: Z (depth), in cartesian space
:type Z: float
Returns the disparity observed for a point at depth Z.
This is the inverse of :meth:`getZ`.
"""
if Z == 0:
return float('inf')
Tx = -self.right.P[0, 3]
return Tx / Z

View File

@@ -0,0 +1,484 @@
#include "image_geometry/pinhole_camera_model.h"
#include <sensor_msgs/distortion_models.h>
#ifdef BOOST_SHARED_PTR_HPP_INCLUDED
#include <boost/make_shared.hpp>
#endif
namespace image_geometry {
enum DistortionState { NONE, CALIBRATED, UNKNOWN };
struct PinholeCameraModel::Cache
{
DistortionState distortion_state;
cv::Mat_<double> K_binned, P_binned; // Binning applied, but not cropping
mutable bool full_maps_dirty;
mutable cv::Mat full_map1, full_map2;
mutable bool reduced_maps_dirty;
mutable cv::Mat reduced_map1, reduced_map2;
mutable bool rectified_roi_dirty;
mutable cv::Rect rectified_roi;
Cache()
: full_maps_dirty(true),
reduced_maps_dirty(true),
rectified_roi_dirty(true)
{
}
};
PinholeCameraModel::PinholeCameraModel()
{
}
PinholeCameraModel& PinholeCameraModel::operator=(const PinholeCameraModel& other)
{
if (other.initialized())
this->fromCameraInfo(other.cameraInfo());
return *this;
}
PinholeCameraModel::PinholeCameraModel(const PinholeCameraModel& other)
{
if (other.initialized())
fromCameraInfo(other.cam_info_);
}
// For uint32_t, string, bool...
template<typename T>
bool update(const T& new_val, T& my_val)
{
if (my_val == new_val)
return false;
my_val = new_val;
return true;
}
// For std::vector
template<typename MatT>
bool updateMat(const MatT& new_mat, MatT& my_mat, cv::Mat_<double>& cv_mat, int rows, int cols)
{
if ((my_mat == new_mat) && (my_mat.size() == cv_mat.rows*cv_mat.cols))
return false;
my_mat = new_mat;
// D may be empty if camera is uncalibrated or distortion model is non-standard
cv_mat = (my_mat.size() == 0) ? cv::Mat_<double>() : cv::Mat_<double>(rows, cols, &my_mat[0]);
return true;
}
template<typename MatT, typename MatU>
bool updateMat(const MatT& new_mat, MatT& my_mat, MatU& cv_mat)
{
if ((my_mat == new_mat) && (my_mat.size() == cv_mat.rows*cv_mat.cols))
return false;
my_mat = new_mat;
// D may be empty if camera is uncalibrated or distortion model is non-standard
cv_mat = MatU(&my_mat[0]);
return true;
}
bool PinholeCameraModel::fromCameraInfo(const sensor_msgs::CameraInfo& msg)
{
// Create our repository of cached data (rectification maps, etc.)
if (!cache_)
#ifdef BOOST_SHARED_PTR_HPP_INCLUDED
cache_ = boost::make_shared<Cache>();
#else
cache_ = std::make_shared<Cache>();
#endif
// Binning = 0 is considered the same as binning = 1 (no binning).
uint32_t binning_x = msg.binning_x ? msg.binning_x : 1;
uint32_t binning_y = msg.binning_y ? msg.binning_y : 1;
// ROI all zeros is considered the same as full resolution.
sensor_msgs::RegionOfInterest roi = msg.roi;
if (roi.x_offset == 0 && roi.y_offset == 0 && roi.width == 0 && roi.height == 0) {
roi.width = msg.width;
roi.height = msg.height;
}
// Update time stamp (and frame_id if that changes for some reason)
cam_info_.header = msg.header;
// Update any parameters that have changed. The full rectification maps are
// invalidated by any change in the calibration parameters OR binning.
bool &full_dirty = cache_->full_maps_dirty;
full_dirty |= update(msg.height, cam_info_.height);
full_dirty |= update(msg.width, cam_info_.width);
full_dirty |= update(msg.distortion_model, cam_info_.distortion_model);
full_dirty |= updateMat(msg.D, cam_info_.D, D_, 1, msg.D.size());
full_dirty |= updateMat(msg.K, cam_info_.K, K_full_);
full_dirty |= updateMat(msg.R, cam_info_.R, R_);
full_dirty |= updateMat(msg.P, cam_info_.P, P_full_);
full_dirty |= update(binning_x, cam_info_.binning_x);
full_dirty |= update(binning_y, cam_info_.binning_y);
// The reduced rectification maps are invalidated by any of the above or a
// change in ROI.
bool &reduced_dirty = cache_->reduced_maps_dirty;
reduced_dirty = full_dirty;
reduced_dirty |= update(roi.x_offset, cam_info_.roi.x_offset);
reduced_dirty |= update(roi.y_offset, cam_info_.roi.y_offset);
reduced_dirty |= update(roi.height, cam_info_.roi.height);
reduced_dirty |= update(roi.width, cam_info_.roi.width);
reduced_dirty |= update(roi.do_rectify, cam_info_.roi.do_rectify);
// As is the rectified ROI
cache_->rectified_roi_dirty = reduced_dirty;
// Figure out how to handle the distortion
if (cam_info_.distortion_model == sensor_msgs::distortion_models::PLUMB_BOB ||
cam_info_.distortion_model == sensor_msgs::distortion_models::RATIONAL_POLYNOMIAL) {
// If any distortion coefficient is non-zero, then need to apply the distortion
cache_->distortion_state = NONE;
for (size_t i = 0; i < cam_info_.D.size(); ++i)
{
if (cam_info_.D[i] != 0)
{
cache_->distortion_state = CALIBRATED;
break;
}
}
}
else
cache_->distortion_state = UNKNOWN;
// If necessary, create new K_ and P_ adjusted for binning and ROI
/// @todo Calculate and use rectified ROI
bool adjust_binning = (binning_x > 1) || (binning_y > 1);
bool adjust_roi = (roi.x_offset != 0) || (roi.y_offset != 0);
if (!adjust_binning && !adjust_roi) {
K_ = K_full_;
P_ = P_full_;
}
else {
K_ = K_full_;
P_ = P_full_;
// ROI is in full image coordinates, so change it first
if (adjust_roi) {
// Move principal point by the offset
/// @todo Adjust P by rectified ROI instead
K_(0,2) -= roi.x_offset;
K_(1,2) -= roi.y_offset;
P_(0,2) -= roi.x_offset;
P_(1,2) -= roi.y_offset;
}
if (binning_x > 1) {
double scale_x = 1.0 / binning_x;
K_(0,0) *= scale_x;
K_(0,2) *= scale_x;
P_(0,0) *= scale_x;
P_(0,2) *= scale_x;
P_(0,3) *= scale_x;
}
if (binning_y > 1) {
double scale_y = 1.0 / binning_y;
K_(1,1) *= scale_y;
K_(1,2) *= scale_y;
P_(1,1) *= scale_y;
P_(1,2) *= scale_y;
P_(1,3) *= scale_y;
}
}
return reduced_dirty;
}
bool PinholeCameraModel::fromCameraInfo(const sensor_msgs::CameraInfoConstPtr& msg)
{
return fromCameraInfo(*msg);
}
cv::Size PinholeCameraModel::fullResolution() const
{
assert( initialized() );
return cv::Size(cam_info_.width, cam_info_.height);
}
cv::Size PinholeCameraModel::reducedResolution() const
{
assert( initialized() );
cv::Rect roi = rectifiedRoi();
return cv::Size(roi.width / binningX(), roi.height / binningY());
}
cv::Point2d PinholeCameraModel::toFullResolution(const cv::Point2d& uv_reduced) const
{
cv::Rect roi = rectifiedRoi();
return cv::Point2d(uv_reduced.x * binningX() + roi.x,
uv_reduced.y * binningY() + roi.y);
}
cv::Rect PinholeCameraModel::toFullResolution(const cv::Rect& roi_reduced) const
{
cv::Rect roi = rectifiedRoi();
return cv::Rect(roi_reduced.x * binningX() + roi.x,
roi_reduced.y * binningY() + roi.y,
roi_reduced.width * binningX(),
roi_reduced.height * binningY());
}
cv::Point2d PinholeCameraModel::toReducedResolution(const cv::Point2d& uv_full) const
{
cv::Rect roi = rectifiedRoi();
return cv::Point2d((uv_full.x - roi.x) / binningX(),
(uv_full.y - roi.y) / binningY());
}
cv::Rect PinholeCameraModel::toReducedResolution(const cv::Rect& roi_full) const
{
cv::Rect roi = rectifiedRoi();
return cv::Rect((roi_full.x - roi.x) / binningX(),
(roi_full.y - roi.y) / binningY(),
roi_full.width / binningX(),
roi_full.height / binningY());
}
cv::Rect PinholeCameraModel::rawRoi() const
{
assert( initialized() );
return cv::Rect(cam_info_.roi.x_offset, cam_info_.roi.y_offset,
cam_info_.roi.width, cam_info_.roi.height);
}
cv::Rect PinholeCameraModel::rectifiedRoi() const
{
assert( initialized() );
if (cache_->rectified_roi_dirty)
{
if (!cam_info_.roi.do_rectify)
cache_->rectified_roi = rawRoi();
else
cache_->rectified_roi = rectifyRoi(rawRoi());
cache_->rectified_roi_dirty = false;
}
return cache_->rectified_roi;
}
cv::Point2d PinholeCameraModel::project3dToPixel(const cv::Point3d& xyz) const
{
assert( initialized() );
assert(P_(2, 3) == 0.0); // Calibrated stereo cameras should be in the same plane
// [U V W]^T = P * [X Y Z 1]^T
// u = U/W
// v = V/W
cv::Point2d uv_rect;
uv_rect.x = (fx()*xyz.x + Tx()) / xyz.z + cx();
uv_rect.y = (fy()*xyz.y + Ty()) / xyz.z + cy();
return uv_rect;
}
cv::Point3d PinholeCameraModel::projectPixelTo3dRay(const cv::Point2d& uv_rect) const
{
assert( initialized() );
cv::Point3d ray;
ray.x = (uv_rect.x - cx() - Tx()) / fx();
ray.y = (uv_rect.y - cy() - Ty()) / fy();
ray.z = 1.0;
return ray;
}
void PinholeCameraModel::rectifyImage(const cv::Mat& raw, cv::Mat& rectified, int interpolation) const
{
assert( initialized() );
switch (cache_->distortion_state) {
case NONE:
raw.copyTo(rectified);
break;
case CALIBRATED:
initRectificationMaps();
if (raw.depth() == CV_32F || raw.depth() == CV_64F)
{
cv::remap(raw, rectified, cache_->reduced_map1, cache_->reduced_map2, interpolation, cv::BORDER_CONSTANT, std::numeric_limits<float>::quiet_NaN());
}
else {
cv::remap(raw, rectified, cache_->reduced_map1, cache_->reduced_map2, interpolation);
}
break;
default:
assert(cache_->distortion_state == UNKNOWN);
throw Exception("Cannot call rectifyImage when distortion is unknown.");
}
}
void PinholeCameraModel::unrectifyImage(const cv::Mat& rectified, cv::Mat& raw, int interpolation) const
{
assert( initialized() );
throw Exception("PinholeCameraModel::unrectifyImage is unimplemented.");
/// @todo Implement unrectifyImage()
// Similar to rectifyImage, but need to build separate set of inverse maps (raw->rectified)...
// - Build src_pt Mat with all the raw pixel coordinates (or do it one row at a time)
// - Do cv::undistortPoints(src_pt, dst_pt, K_, D_, R_, P_)
// - Use convertMaps() to convert dst_pt to fast fixed-point maps
// - cv::remap(rectified, raw, ...)
// Need interpolation argument. Same caching behavior?
}
cv::Point2d PinholeCameraModel::rectifyPoint(const cv::Point2d& uv_raw) const
{
assert( initialized() );
if (cache_->distortion_state == NONE)
return uv_raw;
if (cache_->distortion_state == UNKNOWN)
throw Exception("Cannot call rectifyPoint when distortion is unknown.");
assert(cache_->distortion_state == CALIBRATED);
/// @todo cv::undistortPoints requires the point data to be float, should allow double
cv::Point2f raw32 = uv_raw, rect32;
const cv::Mat src_pt(1, 1, CV_32FC2, &raw32.x);
cv::Mat dst_pt(1, 1, CV_32FC2, &rect32.x);
cv::undistortPoints(src_pt, dst_pt, K_, D_, R_, P_);
return rect32;
}
cv::Point2d PinholeCameraModel::unrectifyPoint(const cv::Point2d& uv_rect) const
{
assert( initialized() );
if (cache_->distortion_state == NONE)
return uv_rect;
if (cache_->distortion_state == UNKNOWN)
throw Exception("Cannot call unrectifyPoint when distortion is unknown.");
assert(cache_->distortion_state == CALIBRATED);
// Convert to a ray
cv::Point3d ray = projectPixelTo3dRay(uv_rect);
// Project the ray on the image
cv::Mat r_vec, t_vec = cv::Mat_<double>::zeros(3, 1);
cv::Rodrigues(R_.t(), r_vec);
std::vector<cv::Point2d> image_point;
cv::projectPoints(std::vector<cv::Point3d>(1, ray), r_vec, t_vec, K_, D_, image_point);
return image_point[0];
}
cv::Rect PinholeCameraModel::rectifyRoi(const cv::Rect& roi_raw) const
{
assert( initialized() );
/// @todo Actually implement "best fit" as described by REP 104.
// For now, just unrectify the four corners and take the bounding box.
cv::Point2d rect_tl = rectifyPoint(cv::Point2d(roi_raw.x, roi_raw.y));
cv::Point2d rect_tr = rectifyPoint(cv::Point2d(roi_raw.x + roi_raw.width, roi_raw.y));
cv::Point2d rect_br = rectifyPoint(cv::Point2d(roi_raw.x + roi_raw.width,
roi_raw.y + roi_raw.height));
cv::Point2d rect_bl = rectifyPoint(cv::Point2d(roi_raw.x, roi_raw.y + roi_raw.height));
cv::Point roi_tl(std::ceil (std::min(rect_tl.x, rect_bl.x)),
std::ceil (std::min(rect_tl.y, rect_tr.y)));
cv::Point roi_br(std::floor(std::max(rect_tr.x, rect_br.x)),
std::floor(std::max(rect_bl.y, rect_br.y)));
return cv::Rect(roi_tl.x, roi_tl.y, roi_br.x - roi_tl.x, roi_br.y - roi_tl.y);
}
cv::Rect PinholeCameraModel::unrectifyRoi(const cv::Rect& roi_rect) const
{
assert( initialized() );
/// @todo Actually implement "best fit" as described by REP 104.
// For now, just unrectify the four corners and take the bounding box.
cv::Point2d raw_tl = unrectifyPoint(cv::Point2d(roi_rect.x, roi_rect.y));
cv::Point2d raw_tr = unrectifyPoint(cv::Point2d(roi_rect.x + roi_rect.width, roi_rect.y));
cv::Point2d raw_br = unrectifyPoint(cv::Point2d(roi_rect.x + roi_rect.width,
roi_rect.y + roi_rect.height));
cv::Point2d raw_bl = unrectifyPoint(cv::Point2d(roi_rect.x, roi_rect.y + roi_rect.height));
cv::Point roi_tl(std::floor(std::min(raw_tl.x, raw_bl.x)),
std::floor(std::min(raw_tl.y, raw_tr.y)));
cv::Point roi_br(std::ceil (std::max(raw_tr.x, raw_br.x)),
std::ceil (std::max(raw_bl.y, raw_br.y)));
return cv::Rect(roi_tl.x, roi_tl.y, roi_br.x - roi_tl.x, roi_br.y - roi_tl.y);
}
void PinholeCameraModel::initRectificationMaps() const
{
/// @todo For large binning settings, can drop extra rows/cols at bottom/right boundary.
/// Make sure we're handling that 100% correctly.
if (cache_->full_maps_dirty) {
// Create the full-size map at the binned resolution
/// @todo Should binned resolution, K, P be part of public API?
cv::Size binned_resolution = fullResolution();
binned_resolution.width /= binningX();
binned_resolution.height /= binningY();
cv::Matx33d K_binned;
cv::Matx34d P_binned;
if (binningX() == 1 && binningY() == 1) {
K_binned = K_full_;
P_binned = P_full_;
}
else {
K_binned = K_full_;
P_binned = P_full_;
if (binningX() > 1) {
double scale_x = 1.0 / binningX();
K_binned(0,0) *= scale_x;
K_binned(0,2) *= scale_x;
P_binned(0,0) *= scale_x;
P_binned(0,2) *= scale_x;
P_binned(0,3) *= scale_x;
}
if (binningY() > 1) {
double scale_y = 1.0 / binningY();
K_binned(1,1) *= scale_y;
K_binned(1,2) *= scale_y;
P_binned(1,1) *= scale_y;
P_binned(1,2) *= scale_y;
P_binned(1,3) *= scale_y;
}
}
// Note: m1type=CV_16SC2 to use fast fixed-point maps (see cv::remap)
cv::initUndistortRectifyMap(K_binned, D_, R_, P_binned, binned_resolution,
CV_16SC2, cache_->full_map1, cache_->full_map2);
cache_->full_maps_dirty = false;
}
if (cache_->reduced_maps_dirty) {
/// @todo Use rectified ROI
cv::Rect roi(cam_info_.roi.x_offset, cam_info_.roi.y_offset,
cam_info_.roi.width, cam_info_.roi.height);
if (roi.x != 0 || roi.y != 0 ||
roi.height != (int)cam_info_.height ||
roi.width != (int)cam_info_.width) {
// map1 contains integer (x,y) offsets, which we adjust by the ROI offset
// map2 contains LUT index for subpixel interpolation, which we can leave as-is
roi.x /= binningX();
roi.y /= binningY();
roi.width /= binningX();
roi.height /= binningY();
cache_->reduced_map1 = cache_->full_map1(roi) - cv::Scalar(roi.x, roi.y);
cache_->reduced_map2 = cache_->full_map2(roi);
}
else {
// Otherwise we're rectifying the full image
cache_->reduced_map1 = cache_->full_map1;
cache_->reduced_map2 = cache_->full_map2;
}
cache_->reduced_maps_dirty = false;
}
}
} //namespace image_geometry

View File

@@ -0,0 +1,140 @@
#include "image_geometry/stereo_camera_model.h"
namespace image_geometry {
StereoCameraModel::StereoCameraModel()
: Q_(0.0)
{
Q_(0,0) = Q_(1,1) = 1.0;
}
StereoCameraModel::StereoCameraModel(const StereoCameraModel& other)
: left_(other.left_), right_(other.right_),
Q_(0.0)
{
Q_(0,0) = Q_(1,1) = 1.0;
if (other.initialized())
updateQ();
}
StereoCameraModel& StereoCameraModel::operator=(const StereoCameraModel& other)
{
if (other.initialized())
this->fromCameraInfo(other.left_.cameraInfo(), other.right_.cameraInfo());
return *this;
}
bool StereoCameraModel::fromCameraInfo(const sensor_msgs::CameraInfo& left,
const sensor_msgs::CameraInfo& right)
{
bool changed_left = left_.fromCameraInfo(left);
bool changed_right = right_.fromCameraInfo(right);
bool changed = changed_left || changed_right;
// Note: don't require identical time stamps to allow imperfectly synced stereo.
assert( left_.tfFrame() == right_.tfFrame() );
assert( left_.fx() == right_.fx() );
assert( left_.fy() == right_.fy() );
assert( left_.cy() == right_.cy() );
// cx may differ for verged cameras
if (changed)
updateQ();
return changed;
}
bool StereoCameraModel::fromCameraInfo(const sensor_msgs::CameraInfoConstPtr& left,
const sensor_msgs::CameraInfoConstPtr& right)
{
return fromCameraInfo(*left, *right);
}
void StereoCameraModel::updateQ()
{
// Update variable fields of reprojection matrix
/*
From Springer Handbook of Robotics, p. 524:
[ Fx 0 Cx 0 ]
P = [ 0 Fy Cy 0 ]
[ 0 0 1 0 ]
[ Fx 0 Cx' FxTx ]
P' = [ 0 Fy Cy 0 ]
[ 0 0 1 0 ]
where primed parameters are from the left projection matrix, unprimed from the right.
[u v 1]^T = P * [x y z 1]^T
[u-d v 1]^T = P' * [x y z 1]^T
Combining the two equations above results in the following equation
[u v u-d 1]^T = [ Fx 0 Cx 0 ] * [ x y z 1]^T
[ 0 Fy Cy 0 ]
[ Fx 0 Cx' FxTx ]
[ 0 0 1 0 ]
Subtracting the 3rd from from the first and inverting the expression
results in the following equation.
[x y z 1]^T = Q * [u v d 1]^T
Where Q is defined as
Q = [ FyTx 0 0 -FyCxTx ]
[ 0 FxTx 0 -FxCyTx ]
[ 0 0 0 FxFyTx ]
[ 0 0 -Fy Fy(Cx-Cx') ]
Using the assumption Fx = Fy Q can be simplified to the following. But for
compatibility with stereo cameras with different focal lengths we will use
the full Q matrix.
[ 1 0 0 -Cx ]
Q = [ 0 1 0 -Cy ]
[ 0 0 0 Fx ]
[ 0 0 -1/Tx (Cx-Cx')/Tx ]
Disparity = x_left - x_right
For compatibility with stereo cameras with different focal lengths we will use
the full Q matrix.
*/
double Tx = -baseline(); // The baseline member negates our Tx. Undo this negation
Q_(0,0) = left_.fy() * Tx;
Q_(0,3) = -left_.fy() * left_.cx() * Tx;
Q_(1,1) = left_.fx() * Tx;
Q_(1,3) = -left_.fx() * left_.cy() * Tx;
Q_(2,3) = left_.fx() * left_.fy() * Tx;
Q_(3,2) = -left_.fy();
Q_(3,3) = left_.fy() * (left_.cx() - right_.cx()); // zero when disparities are pre-adjusted
}
void StereoCameraModel::projectDisparityTo3d(const cv::Point2d& left_uv_rect, float disparity,
cv::Point3d& xyz) const
{
assert( initialized() );
// Do the math inline:
// [X Y Z W]^T = Q * [u v d 1]^T
// Point = (X/W, Y/W, Z/W)
// cv::perspectiveTransform could be used but with more overhead.
double u = left_uv_rect.x, v = left_uv_rect.y;
cv::Point3d XYZ( (Q_(0,0) * u) + Q_(0,3), (Q_(1,1) * v) + Q_(1,3), Q_(2,3));
double W = Q_(3,2)*disparity + Q_(3,3);
xyz = XYZ * (1.0/W);
}
const double StereoCameraModel::MISSING_Z = 10000.;
void StereoCameraModel::projectDisparityImageTo3d(const cv::Mat& disparity, cv::Mat& point_cloud,
bool handleMissingValues) const
{
assert( initialized() );
cv::reprojectImageTo3D(disparity, point_cloud, Q_, handleMissingValues);
}
} //namespace image_geometry

View File

@@ -0,0 +1,4 @@
catkin_add_nosetests(directed.py)
catkin_add_gtest(${PROJECT_NAME}-utest utest.cpp)
target_link_libraries(${PROJECT_NAME}-utest ${PROJECT_NAME} ${OpenCV_LIBS})

View File

@@ -0,0 +1,77 @@
from __future__ import print_function
import rostest
import rospy
import unittest
import sensor_msgs.msg
from image_geometry import PinholeCameraModel, StereoCameraModel
class TestDirected(unittest.TestCase):
def setUp(self):
pass
def test_monocular(self):
ci = sensor_msgs.msg.CameraInfo()
ci.width = 640
ci.height = 480
print(ci)
cam = PinholeCameraModel()
cam.fromCameraInfo(ci)
print(cam.rectifyPoint((0, 0)))
print(cam.project3dToPixel((0,0,0)))
def test_stereo(self):
lmsg = sensor_msgs.msg.CameraInfo()
rmsg = sensor_msgs.msg.CameraInfo()
for m in (lmsg, rmsg):
m.width = 640
m.height = 480
# These parameters taken from a real camera calibration
lmsg.D = [-0.363528858080088, 0.16117037733986861, -8.1109585007538829e-05, -0.00044776712298447841, 0.0]
lmsg.K = [430.15433020105519, 0.0, 311.71339830549732, 0.0, 430.60920415473657, 221.06824942698509, 0.0, 0.0, 1.0]
lmsg.R = [0.99806560714807102, 0.0068562422224214027, 0.061790256276695904, -0.0067522959054715113, 0.99997541519165112, -0.0018909025066874664, -0.061801701660692349, 0.0014700186639396652, 0.99808736527268516]
lmsg.P = [295.53402059708782, 0.0, 285.55760765075684, 0.0, 0.0, 295.53402059708782, 223.29617881774902, 0.0, 0.0, 0.0, 1.0, 0.0]
rmsg.D = [-0.3560641041112021, 0.15647260261553159, -0.00016442960757099968, -0.00093175810713916221]
rmsg.K = [428.38163131344191, 0.0, 327.95553847249192, 0.0, 428.85728580588329, 217.54828640915309, 0.0, 0.0, 1.0]
rmsg.R = [0.9982082576219119, 0.0067433328293516528, 0.059454199832973849, -0.0068433268864187356, 0.99997549128605434, 0.0014784127772287513, -0.059442773257581252, -0.0018826283666309878, 0.99822993965212292]
rmsg.P = [295.53402059708782, 0.0, 285.55760765075684, -26.507895206214123, 0.0, 295.53402059708782, 223.29617881774902, 0.0, 0.0, 0.0, 1.0, 0.0]
cam = StereoCameraModel()
cam.fromCameraInfo(lmsg, rmsg)
for x in (16, 320, m.width - 16):
for y in (16, 240, m.height - 16):
for d in range(1, 10):
pt3d = cam.projectPixelTo3d((x, y), d)
((lx, ly), (rx, ry)) = cam.project3dToPixel(pt3d)
self.assertAlmostEqual(y, ly, 3)
self.assertAlmostEqual(y, ry, 3)
self.assertAlmostEqual(x, lx, 3)
self.assertAlmostEqual(x, rx + d, 3)
u = 100.0
v = 200.0
du = 17.0
dv = 23.0
Z = 2.0
xyz0 = cam.left.projectPixelTo3dRay((u, v))
xyz0 = (xyz0[0] * (Z / xyz0[2]), xyz0[1] * (Z / xyz0[2]), Z)
xyz1 = cam.left.projectPixelTo3dRay((u + du, v + dv))
xyz1 = (xyz1[0] * (Z / xyz1[2]), xyz1[1] * (Z / xyz1[2]), Z)
self.assertAlmostEqual(cam.left.getDeltaU(xyz1[0] - xyz0[0], Z), du, 3)
self.assertAlmostEqual(cam.left.getDeltaV(xyz1[1] - xyz0[1], Z), dv, 3)
self.assertAlmostEqual(cam.left.getDeltaX(du, Z), xyz1[0] - xyz0[0], 3)
self.assertAlmostEqual(cam.left.getDeltaY(dv, Z), xyz1[1] - xyz0[1], 3)
if __name__ == '__main__':
if 1:
rostest.unitrun('image_geometry', 'directed', TestDirected)
else:
suite = unittest.TestSuite()
suite.addTest(TestDirected('test_stereo'))
unittest.TextTestRunner(verbosity=2).run(suite)

View File

@@ -0,0 +1,259 @@
#include "image_geometry/pinhole_camera_model.h"
#include <sensor_msgs/distortion_models.h>
#include <gtest/gtest.h>
/// @todo Tests with simple values (R = identity, D = 0, P = K or simple scaling)
/// @todo Test projection functions for right stereo values, P(:,3) != 0
/// @todo Tests for [un]rectifyImage
/// @todo Tests using ROI, needs support from PinholeCameraModel
/// @todo Tests for StereoCameraModel
class PinholeTest : public testing::Test
{
protected:
virtual void SetUp()
{
/// @todo Just load these from file
// These parameters taken from a real camera calibration
double D[] = {-0.363528858080088, 0.16117037733986861, -8.1109585007538829e-05, -0.00044776712298447841, 0.0};
double K[] = {430.15433020105519, 0.0, 311.71339830549732,
0.0, 430.60920415473657, 221.06824942698509,
0.0, 0.0, 1.0};
double R[] = {0.99806560714807102, 0.0068562422224214027, 0.061790256276695904,
-0.0067522959054715113, 0.99997541519165112, -0.0018909025066874664,
-0.061801701660692349, 0.0014700186639396652, 0.99808736527268516};
double P[] = {295.53402059708782, 0.0, 285.55760765075684, 0.0,
0.0, 295.53402059708782, 223.29617881774902, 0.0,
0.0, 0.0, 1.0, 0.0};
cam_info_.header.frame_id = "tf_frame";
cam_info_.height = 480;
cam_info_.width = 640;
// No ROI
cam_info_.D.resize(5);
std::copy(D, D+5, cam_info_.D.begin());
std::copy(K, K+9, cam_info_.K.begin());
std::copy(R, R+9, cam_info_.R.begin());
std::copy(P, P+12, cam_info_.P.begin());
cam_info_.distortion_model = sensor_msgs::distortion_models::PLUMB_BOB;
model_.fromCameraInfo(cam_info_);
}
sensor_msgs::CameraInfo cam_info_;
image_geometry::PinholeCameraModel model_;
};
TEST_F(PinholeTest, accessorsCorrect)
{
EXPECT_STREQ("tf_frame", model_.tfFrame().c_str());
EXPECT_EQ(cam_info_.P[0], model_.fx());
EXPECT_EQ(cam_info_.P[5], model_.fy());
EXPECT_EQ(cam_info_.P[2], model_.cx());
EXPECT_EQ(cam_info_.P[6], model_.cy());
}
TEST_F(PinholeTest, projectPoint)
{
// Spot test an arbitrary point.
{
cv::Point2d uv(100, 100);
cv::Point3d xyz = model_.projectPixelTo3dRay(uv);
EXPECT_NEAR(-0.62787224048135637, xyz.x, 1e-8);
EXPECT_NEAR(-0.41719792045817677, xyz.y, 1e-8);
EXPECT_DOUBLE_EQ(1.0, xyz.z);
}
// Principal point should project straight out.
{
cv::Point2d uv(model_.cx(), model_.cy());
cv::Point3d xyz = model_.projectPixelTo3dRay(uv);
EXPECT_DOUBLE_EQ(0.0, xyz.x);
EXPECT_DOUBLE_EQ(0.0, xyz.y);
EXPECT_DOUBLE_EQ(1.0, xyz.z);
}
// Check projecting to 3d and back over entire image is accurate.
const size_t step = 10;
for (size_t row = 0; row <= cam_info_.height; row += step) {
for (size_t col = 0; col <= cam_info_.width; col += step) {
cv::Point2d uv(row, col), uv_back;
cv::Point3d xyz = model_.projectPixelTo3dRay(uv);
uv_back = model_.project3dToPixel(xyz);
// Measured max error at 1.13687e-13
EXPECT_NEAR(uv.x, uv_back.x, 1.14e-13) << "at (" << row << ", " << col << ")";
EXPECT_NEAR(uv.y, uv_back.y, 1.14e-13) << "at (" << row << ", " << col << ")";
}
}
}
TEST_F(PinholeTest, rectifyPoint)
{
// Spot test an arbitrary point.
{
cv::Point2d uv_raw(100, 100), uv_rect;
uv_rect = model_.rectifyPoint(uv_raw);
EXPECT_DOUBLE_EQ(142.30311584472656, uv_rect.x);
EXPECT_DOUBLE_EQ(132.061065673828, uv_rect.y);
}
/// @todo Need R = identity for the principal point tests.
#if 0
// Test rectifyPoint takes (c'x, c'y) [from K] -> (cx, cy) [from P].
double cxp = model_.intrinsicMatrix()(0,2), cyp = model_.intrinsicMatrix()(1,2);
{
cv::Point2d uv_raw(cxp, cyp), uv_rect;
model_.rectifyPoint(uv_raw, uv_rect);
EXPECT_NEAR(uv_rect.x, model_.cx(), 1e-4);
EXPECT_NEAR(uv_rect.y, model_.cy(), 1e-4);
}
// Test unrectifyPoint takes (cx, cy) [from P] -> (c'x, c'y) [from K].
{
cv::Point2d uv_rect(model_.cx(), model_.cy()), uv_raw;
model_.unrectifyPoint(uv_rect, uv_raw);
EXPECT_NEAR(uv_raw.x, cxp, 1e-4);
EXPECT_NEAR(uv_raw.y, cyp, 1e-4);
}
#endif
// Check rectifying then unrectifying over most of the image is accurate.
const size_t step = 5;
const size_t border = 65; // Expect bad accuracy far from the center of the image.
for (size_t row = border; row <= cam_info_.height - border; row += step) {
for (size_t col = border; col <= cam_info_.width - border; col += step) {
cv::Point2d uv_raw(row, col), uv_rect, uv_unrect;
uv_rect = model_.rectifyPoint(uv_raw);
uv_unrect = model_.unrectifyPoint(uv_rect);
// Check that we're at least within a pixel...
EXPECT_NEAR(uv_raw.x, uv_unrect.x, 1.0);
EXPECT_NEAR(uv_raw.y, uv_unrect.y, 1.0);
}
}
}
TEST_F(PinholeTest, getDeltas)
{
double u = 100.0, v = 200.0, du = 17.0, dv = 23.0, Z = 2.0;
cv::Point2d uv0(u, v), uv1(u + du, v + dv);
cv::Point3d xyz0, xyz1;
xyz0 = model_.projectPixelTo3dRay(uv0);
xyz0 *= (Z / xyz0.z);
xyz1 = model_.projectPixelTo3dRay(uv1);
xyz1 *= (Z / xyz1.z);
EXPECT_NEAR(model_.getDeltaU(xyz1.x - xyz0.x, Z), du, 1e-4);
EXPECT_NEAR(model_.getDeltaV(xyz1.y - xyz0.y, Z), dv, 1e-4);
EXPECT_NEAR(model_.getDeltaX(du, Z), xyz1.x - xyz0.x, 1e-4);
EXPECT_NEAR(model_.getDeltaY(dv, Z), xyz1.y - xyz0.y, 1e-4);
}
TEST_F(PinholeTest, initialization)
{
sensor_msgs::CameraInfo info;
image_geometry::PinholeCameraModel camera;
camera.fromCameraInfo(info);
EXPECT_EQ(camera.initialized(), 1);
EXPECT_EQ(camera.projectionMatrix().rows, 3);
EXPECT_EQ(camera.projectionMatrix().cols, 4);
}
TEST_F(PinholeTest, rectifyIfCalibrated)
{
/// @todo use forward distortion for a better test
// Ideally this test would have two images stored on disk
// one which is distorted and the other which is rectified,
// and then rectification would take place here and the output
// image compared to the one on disk (which would mean if
// the distortion coefficients above can't change once paired with
// an image).
// Later could incorporate distort code
// (https://github.com/lucasw/vimjay/blob/master/src/standalone/distort_image.cpp)
// to take any image distort it, then undistort with rectifyImage,
// and given the distortion coefficients are consistent the input image
// and final output image should be mostly the same (though some
// interpolation error
// creeps in), except for outside a masked region where information was lost.
// The masked region can be generated with a pure white image that
// goes through the same process (if it comes out completely black
// then the distortion parameters are problematic).
// For now generate an image and pass the test simply if
// the rectified image does not match the distorted image.
// Then zero out the first distortion coefficient and run
// the test again.
// Then zero out all the distortion coefficients and test
// that the output image is the same as the input.
cv::Mat distorted_image(cv::Size(cam_info_.width, cam_info_.height), CV_8UC3, cv::Scalar(0, 0, 0));
// draw a grid
const cv::Scalar color = cv::Scalar(255, 255, 255);
// draw the lines thick so the proportion of error due to
// interpolation is reduced
const int thickness = 7;
const int type = 8;
for (size_t y = 0; y <= cam_info_.height; y += cam_info_.height/10)
{
cv::line(distorted_image,
cv::Point(0, y), cv::Point(cam_info_.width, y),
color, type, thickness);
}
for (size_t x = 0; x <= cam_info_.width; x += cam_info_.width/10)
{
// draw the lines thick so the prorportion of interpolation error is reduced
cv::line(distorted_image,
cv::Point(x, 0), cv::Point(x, cam_info_.height),
color, type, thickness);
}
cv::Mat rectified_image;
// Just making this number up, maybe ought to be larger
// since a completely different image would be on the order of
// width * height * 255 = 78e6
const double diff_threshold = 10000.0;
double error;
// Test that rectified image is sufficiently different
// using default distortion
model_.rectifyImage(distorted_image, rectified_image);
error = cv::norm(distorted_image, rectified_image, cv::NORM_L1);
// Just making this number up, maybe ought to be larger
EXPECT_GT(error, diff_threshold);
// Test that rectified image is sufficiently different
// using default distortion but with first element zeroed
// out.
sensor_msgs::CameraInfo cam_info_2 = cam_info_;
cam_info_2.D[0] = 0.0;
model_.fromCameraInfo(cam_info_2);
model_.rectifyImage(distorted_image, rectified_image);
error = cv::norm(distorted_image, rectified_image, cv::NORM_L1);
EXPECT_GT(error, diff_threshold);
// Test that rectified image is the same using zero distortion
cam_info_2.D.assign(cam_info_2.D.size(), 0);
model_.fromCameraInfo(cam_info_2);
model_.rectifyImage(distorted_image, rectified_image);
error = cv::norm(distorted_image, rectified_image, cv::NORM_L1);
EXPECT_EQ(error, 0);
// Test that rectified image is the same using empty distortion
cam_info_2.D.clear();
model_.fromCameraInfo(cam_info_2);
model_.rectifyImage(distorted_image, rectified_image);
error = cv::norm(distorted_image, rectified_image, cv::NORM_L1);
EXPECT_EQ(error, 0);
// restore original distortion
model_.fromCameraInfo(cam_info_);
}
int main(int argc, char** argv)
{
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@@ -0,0 +1,281 @@
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Changelog for package opencv_tests
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
1.13.1 (2022-10-03)
-------------------
1.13.0 (2018-04-30)
-------------------
1.12.8 (2018-04-17)
-------------------
1.12.7 (2017-11-12)
-------------------
1.12.6 (2017-11-11)
-------------------
1.12.5 (2017-11-05)
-------------------
1.12.4 (2017-01-29)
-------------------
1.12.3 (2016-12-04)
-------------------
1.12.2 (2016-09-24)
-------------------
1.12.1 (2016-07-11)
-------------------
* Support compressed Images messages in python for indigo
- Add cv2_to_comprssed_imgmsg: Convert from cv2 image to compressed image ros msg.
- Add comprssed_imgmsg_to_cv2: Convert the compress message to a new image.
- Add compressed image tests.
- Add time to msgs (compressed and regular).
add enumerants test for compressed image.
merge the compressed tests with the regular ones.
better comment explanation. I will squash this commit.
Fix indentation
fix typo mistage: from .imgmsg_to_compressed_cv2 to .compressed_imgmsg_to_cv2.
remove cv2.CV_8UC1
remove rospy and time depndency.
change from IMREAD_COLOR to IMREAD_ANYCOLOR.
- make indentaion of 4.
- remove space trailer.
- remove space from empty lines.
- another set of for loops, it will make things easier to track. In that new set, just have the number of channels in ([],1,3,4) (ignore two for jpg). from: https://github.com/ros-perception/vision_opencv/pull/132#discussion_r66721943
- keep the OpenCV error message. from: https://github.com/ros-perception/vision_opencv/pull/132#discussion_r66721013
add debug print for test.
add case for 4 channels in test.
remove 4 channels case from compressed test.
add debug print for test.
change typo of format.
fix typo in format. change from dip to dib.
change to IMREAD_ANYCOLOR as python code. (as it should).
rename TIFF to tiff
Sperate the tests one for regular images and one for compressed.
update comment
* Contributors: talregev
1.12.0 (2016-03-18)
-------------------
1.11.12 (2016-03-10)
--------------------
1.11.11 (2016-01-31)
--------------------
* fix a few warnings in doc jobs
* Contributors: Vincent Rabaud
1.11.10 (2016-01-16)
--------------------
1.11.9 (2015-11-29)
-------------------
1.11.8 (2015-07-15)
-------------------
* simplify dependencies
* Contributors: Vincent Rabaud
1.11.7 (2014-12-14)
-------------------
1.11.6 (2014-11-16)
-------------------
1.11.5 (2014-09-21)
-------------------
1.11.4 (2014-07-27)
-------------------
1.11.3 (2014-06-08)
-------------------
* remove file whose functinality is now in cv_bridge
* remove references to cv (use cv2)
* Correct dependency from non-existent package to cv_bridge
* Contributors: Isaac Isao Saito, Vincent Rabaud
1.11.2 (2014-04-28)
-------------------
1.11.1 (2014-04-16)
-------------------
1.11.0 (2014-02-15)
-------------------
1.10.15 (2014-02-07)
--------------------
1.10.14 (2013-11-23 16:17)
--------------------------
* Contributors: Vincent Rabaud
1.10.13 (2013-11-23 09:19)
--------------------------
* Contributors: Vincent Rabaud
1.10.12 (2013-11-22)
--------------------
* Contributors: Vincent Rabaud
1.10.11 (2013-10-23)
--------------------
* Contributors: Vincent Rabaud
1.10.10 (2013-10-19)
--------------------
* Contributors: Vincent Rabaud
1.10.9 (2013-10-07)
-------------------
* Contributors: Vincent Rabaud
1.10.8 (2013-09-09)
-------------------
* update email address
* Contributors: Vincent Rabaud
1.10.7 (2013-07-17)
-------------------
1.10.6 (2013-03-01)
-------------------
1.10.5 (2013-02-11)
-------------------
1.10.4 (2013-02-02)
-------------------
1.10.3 (2013-01-17)
-------------------
1.10.2 (2013-01-13)
-------------------
1.10.1 (2013-01-10)
-------------------
* fixes `#5 <https://github.com/ros-perception/vision_opencv/issues/5>`_ by removing the logic from Python and using wrapped C++ and adding a test for it
* Contributors: Vincent Rabaud
1.10.0 (2013-01-03)
-------------------
1.9.15 (2013-01-02)
-------------------
1.9.14 (2012-12-30)
-------------------
1.9.13 (2012-12-15)
-------------------
1.9.12 (2012-12-14)
-------------------
* Removed brief tag
Conflicts:
opencv_tests/package.xml
* buildtool_depend catkin fix
* Contributors: William Woodall
1.9.11 (2012-12-10)
-------------------
1.9.10 (2012-10-04)
-------------------
1.9.9 (2012-10-01)
------------------
1.9.8 (2012-09-30)
------------------
1.9.7 (2012-09-28 21:07)
------------------------
* add missing stuff
* make sure we find catkin
* Contributors: Vincent Rabaud
1.9.6 (2012-09-28 15:17)
------------------------
* move the test to where it belongs
* fix the tests and the API to not handle conversion from CV_TYPE to Color type (does not make sense)
* make all the tests pass
* comply to the new Catkin API
* backport the C++ test from Fuerte
* Contributors: Vincent Rabaud
1.9.5 (2012-09-15)
------------------
* remove dependencies to the opencv2 ROS package
* Contributors: Vincent Rabaud
1.9.4 (2012-09-13)
------------------
1.9.3 (2012-09-12)
------------------
* update to nosetests
* Contributors: Vincent Rabaud
1.9.2 (2012-09-07)
------------------
* be more compliant to the latest catkin
* added catkin_project() to cv_bridge, image_geometry, and opencv_tests
* Contributors: Jonathan Binney, Vincent Rabaud
1.9.1 (2012-08-28 22:06)
------------------------
* remove a deprecated header
* Contributors: Vincent Rabaud
1.9.0 (2012-08-28 14:29)
------------------------
* cleanup by Jon Binney
* catkinized opencv_tests by Jon Binney
* remove the version check, let's trust OpenCV :)
* revert the removal of opencv2
* finally get rid of opencv2 as it is a system dependency now
* bump REQUIRED version of OpenCV to 2.3.2, which is what's in ros-fuerte-opencv
* switch rosdep name to opencv2, to refer to ros-fuerte-opencv2
* Fixing link lines for gtest against opencv.
* Adding opencv2 to all manifests, so that client packages may
not break when using them.
* baking in opencv debs and attempting a pre-release
* Another hack for prerelease to quiet test failures.
* Dissable a dubious opencv test. Temporary HACK.
* Changing to expect for more verbose failure.
* Minor change to test.
* Making this depend on libopencv-2.3-dev debian available in ros-shadow.
* mono16 -> bgr conversion tested and fixed in C
* Added Ubuntu platform tags to manifest
* Tuned for parc loop
* Demo of ROS node face detecton
* mono16 support, ticket `#2890 <https://github.com/ros-perception/vision_opencv/issues/2890>`_
* Remove use of deprecated rosbuild macros
* cv_bridge split from opencv2
* Name changes for opencv -> vision_opencv
* Validation for image message encoding
* utest changed to reflect rosimgtocv change to imgmsgtocv
* Add opencvpython as empty package
* New methods for cv image conversion
* Disabling tests on OSX, `#2769 <https://github.com/ros-perception/vision_opencv/issues/2769>`_
* New Python CvBridge, rewrote C CvBridge, regression test for C and Python CvBridge
* Fix underscore problem, test 8UC3->BGR8, fix 8UC3->BGR8
* New image format
* Image message and CvBridge change
* Rename rows,cols to height,width in Image message
* New node bbc for image testing
* Make executable
* Pong demo
* Missing utest.cpp
* New sensor_msgs::Image message
* Contributors: Vincent Rabaud, ethanrublee, gerkey, jamesb, jamesbowman, pantofaru, vrabaud, wheeler

View File

@@ -0,0 +1,5 @@
cmake_minimum_required(VERSION 2.8)
project(opencv_tests)
find_package(catkin REQUIRED)
catkin_package()

View File

@@ -0,0 +1,5 @@
<launch>
<master auto="start"/>
<node pkg="image_view" type="image_view" args="image:=/opencv_tests/images" respawn="false" output="screen"/>
<node pkg="opencv_tests" type="source.py" respawn="false" />
</launch>

View File

@@ -0,0 +1,119 @@
/**
\mainpage
\htmlinclude manifest.html
\b opencv_tests is ...
<!--
In addition to providing an overview of your package,
this is the section where the specification and design/architecture
should be detailed. While the original specification may be done on the
wiki, it should be transferred here once your package starts to take shape.
You can then link to this documentation page from the Wiki.
-->
\section codeapi Code API
<!--
Provide links to specific auto-generated API documentation within your
package that is of particular interest to a reader. Doxygen will
document pretty much every part of your code, so do your best here to
point the reader to the actual API.
If your codebase is fairly large or has different sets of APIs, you
should use the doxygen 'group' tag to keep these APIs together. For
example, the roscpp documentation has 'libros' group.
-->
\section rosapi ROS API
<!--
Names are very important in ROS because they can be remapped on the
command-line, so it is VERY IMPORTANT THAT YOU LIST NAMES AS THEY
APPEAR IN THE CODE. You should list names of every topic, service and
parameter used in your code. There is a template below that you can
use to document each node separately.
List of nodes:
- \b node_name1
- \b node_name2
-->
<!-- START: copy from here to 'END' for each node
<hr>
\subsection node_name node_name
node_name does (provide a basic description of your node)
\subsubsection Usage
\verbatim
$ node_type1 [standard ROS args]
\endverbatim
\par Example
\verbatim
$ node_type1
\endverbatim
\subsubsection topics ROS topics
Subscribes to:
- \b "in": [std_msgs/FooType] description of in
Publishes to:
- \b "out": [std_msgs/FooType] description of out
\subsubsection parameters ROS parameters
Reads the following parameters from the parameter server
- \b "~param_name" : \b [type] description of param_name
- \b "~my_param" : \b [string] description of my_param
Sets the following parameters on the parameter server
- \b "~param_name" : \b [type] description of param_name
\subsubsection services ROS services
- \b "foo_service": [std_srvs/FooType] description of foo_service
END: copy for each node -->
<!-- START: Uncomment if you have any command-line tools
\section commandline Command-line tools
This section is a catch-all for any additional tools that your package
provides or uses that may be of use to the reader. For example:
- tools/scripts (e.g. rospack, roscd)
- roslaunch .launch files
- xmlparam files
\subsection script_name script_name
Description of what this script/file does.
\subsubsection Usage
\verbatim
$ ./script_name [args]
\endverbatim
\par Example
\verbatim
$ ./script_name foo bar
\endverbatim
END: Command-Line Tools Section -->
*/

View File

@@ -0,0 +1,78 @@
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# Copyright (c) 2016, Tal Regev.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import time
import math
import rospy
import cv2
import sensor_msgs.msg
from cv_bridge import CvBridge
# Send each image by iterate it from given array of files names to a given topic,
# as a regular and compressed ROS Images msgs.
class Source:
def __init__(self, topic, filenames):
self.pub = rospy.Publisher(topic, sensor_msgs.msg.Image)
self.pub_compressed = rospy.Publisher(topic + "/compressed", sensor_msgs.msg.CompressedImage)
self.filenames = filenames
def spin(self):
time.sleep(1.0)
cvb = CvBridge()
while not rospy.core.is_shutdown():
cvim = cv2.imload(self.filenames[0])
self.pub.publish(cvb.cv2_to_imgmsg(cvim))
self.pub_compressed.publish(cvb.cv2_to_compressed_imgmsg(cvim))
self.filenames = self.filenames[1:] + [self.filenames[0]]
time.sleep(1)
def main(args):
s = Source(args[1], args[2:])
rospy.init_node('Source')
try:
s.spin()
rospy.spin()
outcome = 'test completed'
except KeyboardInterrupt:
print "shutting down"
outcome = 'keyboard interrupt'
rospy.core.signal_shutdown(outcome)
if __name__ == '__main__':
main(sys.argv)

View File

@@ -0,0 +1,105 @@
#!/usr/bin/python
"""
This program is demonstration for face and object detection using haar-like features.
The program finds faces in a camera image or video stream and displays a red box around them.
Original C implementation by: ?
Python implementation by: Roman Stanchak, James Bowman
Updated: Copyright (c) 2016, Tal Regev.
"""
import sys
import os
from optparse import OptionParser
import rospy
import sensor_msgs.msg
from cv_bridge import CvBridge
import cv2
import numpy
# Parameters for haar detection
# From the API:
# The default parameters (scale_factor=2, min_neighbors=3, flags=0) are tuned
# for accurate yet slow object detection. For a faster operation on real video
# images the settings are:
# scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING,
# min_size=<minimum possible face size
min_size = (10, 10)
image_scale = 2
haar_scale = 1.2
min_neighbors = 2
haar_flags = 0
if __name__ == '__main__':
# TODO add this file in the repository and make it relative to this python script. (not all people will run this from linux)
haarfile = '/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml'
parser = OptionParser(usage = "usage: %prog [options]")
parser.add_option("-c", "--cascade", action="store", dest="cascade", type="str", help="Haar cascade file, default %default", default = haarfile)
parser.add_option("-t", "--topic", action="store", dest="topic", type="str", help="Topic to find a face on, default %default", default = '/camera/rgb/image_raw')
parser.add_option("-ct", "--ctopic", action="store", dest="ctopic", type="str", help="Compressed topic to find a face on, default %default", default = '/camera/rgb/image/compressed')
(options, args) = parser.parse_args()
cascade = cv2.CascadeClassifier()
cascade.load(options.cascade)
br = CvBridge()
def detect_and_draw(imgmsg):
img = br.imgmsg_to_cv2(imgmsg, "bgr8")
# allocate temporary images
new_size = (int(img.shape[1] / image_scale), int(img.shape[0] / image_scale))
# convert color input image to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# scale input image for faster processing
small_img = cv2.resize(gray, new_size, interpolation = cv2.INTER_LINEAR)
small_img = cv2.equalizeHist(small_img)
if(cascade):
faces = cascade.detectMultiScale(small_img, haar_scale, min_neighbors, haar_flags, min_size)
if faces is not None:
for (x, y, w, h) in faces:
# the input to detectMultiScale was resized, so scale the
# bounding box of each face and convert it to two CvPoints
pt1 = (int(x * image_scale), int(y * image_scale))
pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
cv2.rectangle(img, pt1, pt2, (255, 0, 0), 3, 8, 0)
cv2.imshow("result", img)
cv2.waitKey(6)
def compressed_detect_and_draw(compressed_imgmsg):
img = br.compressed_imgmsg_to_cv2(compressed_imgmsg, "bgr8")
# allocate temporary images
new_size = (int(img.shape[1] / image_scale), int(img.shape[0] / image_scale))
# convert color input image to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# scale input image for faster processing
small_img = cv2.resize(gray, new_size, interpolation = cv2.INTER_LINEAR)
small_img = cv2.equalizeHist(small_img)
if(cascade):
faces = cascade.detectMultiScale(small_img, haar_scale, min_neighbors, haar_flags, min_size)
if faces is not None:
for (x, y, w, h) in faces:
# the input to detectMultiScale was resized, so scale the
# bounding box of each face and convert it to two CvPoints
pt1 = (int(x * image_scale), int(y * image_scale))
pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
cv2.rectangle(img, pt1, pt2, (255, 0, 0), 3, 8, 0)
cv2.imshow("compressed_result", img)
cv2.waitKey(6)
rospy.init_node('rosfacedetect')
rospy.Subscriber(options.topic, sensor_msgs.msg.Image, detect_and_draw)
rospy.Subscriber(options.ctopic, sensor_msgs.msg.CompressedImage, compressed_detect_and_draw)
rospy.spin()

View File

@@ -0,0 +1,95 @@
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# Copyright (c) 2016, Tal Regev.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import time
import math
import rospy
import numpy
import cv2
import sensor_msgs.msg
from cv_bridge import CvBridge
# Send black pic with a circle as regular and compressed ros msgs.
class Source:
def __init__(self):
self.pub = rospy.Publisher("/opencv_tests/images", sensor_msgs.msg.Image)
self.pub_compressed = rospy.Publisher("/opencv_tests/images/compressed", sensor_msgs.msg.CompressedImage)
def spin(self):
time.sleep(1.0)
started = time.time()
counter = 0
cvim = numpy.zeros((480, 640, 1), numpy.uint8)
ball_xv = 10
ball_yv = 10
ball_x = 100
ball_y = 100
cvb = CvBridge()
while not rospy.core.is_shutdown():
cvim.fill(0)
cv2.circle(cvim, (ball_x, ball_y), 10, 255, -1)
ball_x += ball_xv
ball_y += ball_yv
if ball_x in [10, 630]:
ball_xv = -ball_xv
if ball_y in [10, 470]:
ball_yv = -ball_yv
self.pub.publish(cvb.cv2_to_imgmsg(cvim))
self.pub_compressed.publish(cvb.cv2_to_compressed_imgmsg(cvim))
time.sleep(0.03)
def main(args):
s = Source()
rospy.init_node('Source')
try:
s.spin()
rospy.spin()
outcome = 'test completed'
except KeyboardInterrupt:
print "shutting down"
outcome = 'keyboard interrupt'
rospy.core.signal_shutdown(outcome)
if __name__ == '__main__':
main(sys.argv)

View File

@@ -0,0 +1,15 @@
<package>
<name>opencv_tests</name>
<version>1.13.1</version>
<description>
Tests the enumerants of the ROS Image message, and functionally tests the Python and C++ implementations of CvBridge.
</description>
<author>James Bowman</author>
<maintainer email="vincent.rabaud@gmail.com">Vincent Rabaud</maintainer>
<license>BSD</license>
<url>http://wiki.ros.org/opencv_tests</url>
<buildtool_depend>catkin</buildtool_depend>
<run_depend>cv_bridge</run_depend>
</package>

View File

@@ -0,0 +1,189 @@
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Changelog for package vision_opencv
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
1.13.1 (2022-10-03)
-------------------
1.13.0 (2018-04-30)
-------------------
1.12.8 (2018-04-17)
-------------------
1.12.7 (2017-11-12)
-------------------
1.12.6 (2017-11-11)
-------------------
1.12.5 (2017-11-05)
-------------------
1.12.4 (2017-01-29)
-------------------
1.12.3 (2016-12-04)
-------------------
1.12.2 (2016-09-24)
-------------------
1.12.1 (2016-07-11)
-------------------
1.12.0 (2016-03-18)
-------------------
* remove opencv_apps from vision_opencv
* Contributors: Vincent Rabaud
1.11.12 (2016-03-10)
--------------------
1.11.11 (2016-01-31)
--------------------
1.11.10 (2016-01-16)
--------------------
1.11.9 (2015-11-29)
-------------------
* Add opencv_apps to vision_opencv dependency
* Contributors: Ryohei Ueda
1.11.8 (2015-07-15)
-------------------
1.11.7 (2014-12-14)
-------------------
1.11.6 (2014-11-16)
-------------------
1.11.5 (2014-09-21)
-------------------
1.11.4 (2014-07-27)
-------------------
1.11.3 (2014-06-08)
-------------------
1.11.2 (2014-04-28)
-------------------
1.11.1 (2014-04-16)
-------------------
1.11.0 (2014-02-15)
-------------------
1.10.15 (2014-02-07)
--------------------
1.10.14 (2013-11-23 16:17)
--------------------------
* Contributors: Vincent Rabaud
1.10.13 (2013-11-23 09:19)
--------------------------
* Contributors: Vincent Rabaud
1.10.12 (2013-11-22)
--------------------
* Contributors: Vincent Rabaud
1.10.11 (2013-10-23)
--------------------
* Contributors: Vincent Rabaud
1.10.10 (2013-10-19)
--------------------
* Contributors: Vincent Rabaud
1.10.9 (2013-10-07)
-------------------
* Contributors: Vincent Rabaud
1.10.8 (2013-09-09)
-------------------
* update email address
* Contributors: Vincent Rabaud
1.10.7 (2013-07-17)
-------------------
* update to REP 0127
* Contributors: Vincent Rabaud
1.10.6 (2013-03-01)
-------------------
1.10.5 (2013-02-11)
-------------------
1.10.4 (2013-02-02)
-------------------
1.10.3 (2013-01-17)
-------------------
1.10.2 (2013-01-13)
-------------------
1.10.1 (2013-01-10)
-------------------
1.10.0 (2013-01-03)
-------------------
1.9.15 (2013-01-02)
-------------------
1.9.14 (2012-12-30)
-------------------
1.9.13 (2012-12-15)
-------------------
1.9.12 (2012-12-14)
-------------------
1.9.11 (2012-12-10)
-------------------
1.9.10 (2012-10-04)
-------------------
* the CMake file is useless
* add the missing CMake file
* re-add the meta-package
* Contributors: Vincent Rabaud
1.9.9 (2012-10-01)
------------------
1.9.8 (2012-09-30)
------------------
1.9.7 (2012-09-28 21:07)
------------------------
1.9.6 (2012-09-28 15:17)
------------------------
1.9.5 (2012-09-15)
------------------
1.9.4 (2012-09-13)
------------------
1.9.3 (2012-09-12)
------------------
1.9.2 (2012-09-07)
------------------
1.9.1 (2012-08-28 22:06)
------------------------
1.9.0 (2012-08-28 14:29)
------------------------

View File

@@ -0,0 +1,4 @@
cmake_minimum_required(VERSION 2.8.3)
project(vision_opencv)
find_package(catkin REQUIRED)
catkin_metapackage()

View File

@@ -0,0 +1,22 @@
<package>
<name>vision_opencv</name>
<version>1.13.1</version>
<description>Packages for interfacing ROS with OpenCV, a library of programming functions for real time computer vision.</description>
<author>Patrick Mihelich</author>
<author>James Bowman</author>
<maintainer email="vincent.rabaud@gmail.com">Vincent Rabaud</maintainer>
<license>BSD</license>
<url type="website">http://www.ros.org/wiki/vision_opencv</url>
<url type="bugtracker">https://github.com/ros-perception/vision_opencv/issues</url>
<url type="repository">https://github.com/ros-perception/vision_opencv</url>
<buildtool_depend>catkin</buildtool_depend>
<run_depend>cv_bridge</run_depend>
<run_depend>image_geometry</run_depend>
<export>
<metapackage/>
</export>
</package>

View File

@@ -0,0 +1,207 @@
cmake_minimum_required(VERSION 3.0.2)
project(yolov5_ros)
## Compile as C++11, supported in ROS Kinetic and newer
# add_compile_options(-std=c++11)
## Find catkin macros and libraries
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
## is used, also find other catkin packages
find_package(catkin REQUIRED COMPONENTS
rospy
sensor_msgs
std_msgs
detection_msgs
)
## System dependencies are found with CMake's conventions
# find_package(Boost REQUIRED COMPONENTS system)
## Uncomment this if the package has a setup.py. This macro ensures
## modules and global scripts declared therein get installed
## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
# catkin_python_setup()
################################################
## Declare ROS messages, services and actions ##
################################################
## To declare and build messages, services or actions from within this
## package, follow these steps:
## * Let MSG_DEP_SET be the set of packages whose message types you use in
## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
## * In the file package.xml:
## * add a build_depend tag for "message_generation"
## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
## but can be declared for certainty nonetheless:
## * add a exec_depend tag for "message_runtime"
## * In this file (CMakeLists.txt):
## * add "message_generation" and every package in MSG_DEP_SET to
## find_package(catkin REQUIRED COMPONENTS ...)
## * add "message_runtime" and every package in MSG_DEP_SET to
## catkin_package(CATKIN_DEPENDS ...)
## * uncomment the add_*_files sections below as needed
## and list every .msg/.srv/.action file to be processed
## * uncomment the generate_messages entry below
## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
## Generate messages in the 'msg' folder
# add_message_files(
# FILES
# Message1.msg
# Message2.msg
# )
## Generate services in the 'srv' folder
# add_service_files(
# FILES
# Service1.srv
# Service2.srv
# )
## Generate actions in the 'action' folder
# add_action_files(
# FILES
# Action1.action
# Action2.action
# )
## Generate added messages and services with any dependencies listed here
# generate_messages(
# DEPENDENCIES
# sensor_msgs# std_msgs
# )
################################################
## Declare ROS dynamic reconfigure parameters ##
################################################
## To declare and build dynamic reconfigure parameters within this
## package, follow these steps:
## * In the file package.xml:
## * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
## * In this file (CMakeLists.txt):
## * add "dynamic_reconfigure" to
## find_package(catkin REQUIRED COMPONENTS ...)
## * uncomment the "generate_dynamic_reconfigure_options" section below
## and list every .cfg file to be processed
## Generate dynamic reconfigure parameters in the 'cfg' folder
# generate_dynamic_reconfigure_options(
# cfg/DynReconf1.cfg
# cfg/DynReconf2.cfg
# )
###################################
## catkin specific configuration ##
###################################
## The catkin_package macro generates cmake config files for your package
## Declare things to be passed to dependent projects
## INCLUDE_DIRS: uncomment this if your package contains header files
## LIBRARIES: libraries you create in this project that dependent projects also need
## CATKIN_DEPENDS: catkin_packages dependent projects also need
## DEPENDS: system dependencies of this project that dependent projects also need
catkin_package(
# INCLUDE_DIRS include
# LIBRARIES yolov5_ros
# CATKIN_DEPENDS rospy sensor_msgs std_msgs
# DEPENDS system_lib
)
###########
## Build ##
###########
## Specify additional locations of header files
## Your package locations should be listed before other locations
include_directories(
# include
${catkin_INCLUDE_DIRS}
)
## Declare a C++ library
# add_library(${PROJECT_NAME}
# src/${PROJECT_NAME}/yolov5_ros.cpp
# )
## Add cmake target dependencies of the library
## as an example, code may need to be generated before libraries
## either from message generation or dynamic reconfigure
# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Declare a C++ executable
## With catkin_make all packages are built within a single CMake context
## The recommended prefix ensures that target names across packages don't collide
# add_executable(${PROJECT_NAME}_node src/yolov5_ros_node.cpp)
## Rename C++ executable without prefix
## The above recommended prefix causes long target names, the following renames the
## target back to the shorter version for ease of user use
## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
## Add cmake target dependencies of the executable
## same as for the library above
# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Specify libraries to link a library or executable target against
# target_link_libraries(${PROJECT_NAME}_node
# ${catkin_LIBRARIES}
# )
#############
## Install ##
#############
# all install targets should use catkin DESTINATION variables
# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
## Mark executable scripts (Python etc.) for installation
## in contrast to setup.py, you can choose the destination
catkin_install_python(PROGRAMS
src/detect.py
DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
)
## Mark executables for installation
## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html
# install(TARGETS ${PROJECT_NAME}_node
# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark libraries for installation
## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html
# install(TARGETS ${PROJECT_NAME}
# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
# )
## Mark cpp header files for installation
# install(DIRECTORY include/${PROJECT_NAME}/
# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
# FILES_MATCHING PATTERN "*.h"
# PATTERN ".svn" EXCLUDE
# )
## Mark other files for installation (e.g. launch and bag files, etc.)
# install(FILES
# # myfile1
# # myfile2
# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
# )
#############
## Testing ##
#############
## Add gtest based cpp test target and link libraries
# catkin_add_gtest(${PROJECT_NAME}-test test/test_yolov5_ros.cpp)
# if(TARGET ${PROJECT_NAME}-test)
# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
# endif()
## Add folders to be run by python nosetests
# catkin_add_nosetests(test)

674
src/yolov5_ros/LICENSE Normal file
View File

@@ -0,0 +1,674 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.

44
src/yolov5_ros/README.md Normal file
View File

@@ -0,0 +1,44 @@
# YOLOv5 ROS
This is a ROS interface for using YOLOv5 for real time object detection on a ROS image topic. It supports inference on multiple deep learning frameworks used in the [official YOLOv5 repository](https://github.com/ultralytics/yolov5).
## Installation
### Dependencies
This package is built and tested on Ubuntu 20.04 LTS and ROS Noetic with Python 3.8.
* Clone the packages to ROS workspace and install requirement for YOLOv5 submodule:
```bash
cd <ros_workspace>/src
git clone https://github.com/mats-robotics/detection_msgs.git
git clone --recurse-submodules https://github.com/mats-robotics/yolov5_ros.git
cd yolov5_ros/src/yolov5
pip install -r requirements.txt # install the requirements for yolov5
```
* Build the ROS package:
```bash
cd <ros_workspace>
catkin build yolov5_ros # build the ROS package
```
* Make the Python script executable
```bash
cd <ros_workspace>/src/yolov5_ros/src
chmod +x detect.py
```
## Basic usage
Change the parameter for `input_image_topic` in launch/yolov5.launch to any ROS topic with message type of `sensor_msgs/Image` or `sensor_msgs/CompressedImage`. Other parameters can be modified or used as is.
* Launch the node:
```bash
roslaunch yolov5_ros yolov5.launch
```
## Using custom weights and dataset (Working)
* Put your weights into `yolov5_ros/src/yolov5`
* Put the yaml file for your dataset classes into `yolov5_ros/src/yolov5/data`
* Change related ROS parameters in yolov5.launch: `weights`, `data`
## Reference
* YOLOv5 official repository: https://github.com/ultralytics/yolov5
* YOLOv3 ROS PyTorch: https://github.com/eriklindernoren/PyTorch-YOLOv3
* Darknet ROS: https://github.com/leggedrobotics/darknet_ros

View File

@@ -0,0 +1,56 @@
<launch>
<!-- Detection configuration -->
<arg name="weights" default="$(find yolov5_ros)/src/yolov5/best.pt"/>
<arg name="data" default="$(find yolov5_ros)/src/yolov5/data/mydata.yaml"/>
<arg name="confidence_threshold" default="0.75"/>
<arg name="iou_threshold" default="0.45"/>
<arg name="maximum_detections" default="1000"/>
<arg name="device" default="0"/>
<arg name="agnostic_nms" default="true"/>
<arg name="line_thickness" default="3"/>
<arg name="dnn" default="true"/>
<arg name="half" default="false"/>
<!-- replace imgsz -->
<arg name="inference_size_h" default="640"/>
<arg name="inference_size_w" default="640"/>
<!-- Visualize using OpenCV window -->
<arg name="view_image" default="true"/>
<!-- ROS topics -->
<arg name="input_image_topic" default="/camera/color/image_raw"/>
<arg name="output_topic" default="/yolov5/detections"/>
<!-- Optional topic (publishing annotated image) -->
<arg name="publish_image" default="false"/>
<arg name="output_image_topic" default="/yolov5/image_out"/>
<node pkg="yolov5_ros" name="detect" type="detect.py" output="screen">
<param name="weights" value="$(arg weights)"/>
<param name="data" value="$(arg data)"/>
<param name="confidence_threshold" value="$(arg confidence_threshold)"/>
<param name="iou_threshold" value="$(arg iou_threshold)" />
<param name="maximum_detections" value="$(arg maximum_detections)"/>
<param name="device" value="$(arg device)" />
<param name="agnostic_nms" value="$(arg agnostic_nms)" />
<param name="line_thickness" value="$(arg line_thickness)"/>
<param name="dnn" value="$(arg dnn)"/>
<param name="half" value="$(arg half)"/>
<param name="inference_size_h" value="$(arg inference_size_h)"/>
<param name="inference_size_w" value="$(arg inference_size_w)"/>
<param name="input_image_topic" value="$(arg input_image_topic)"/>
<param name="output_topic" value="$(arg output_topic)"/>
<param name="view_image" value="$(arg view_image)"/>
<param name="publish_image" value="$(arg publish_image)"/>
<param name="output_image_topic" value="$(arg output_image_topic)"/>
</node>
<!-- <include file="$(find camera_launch)/launch/d435.launch"/> -->
</launch>

Some files were not shown because too many files have changed in this diff Show More