From ea1e22e36a9559448919d7ae2dd24a25743861ec Mon Sep 17 00:00:00 2001 From: siddhu8990 Date: Thu, 25 Aug 2016 10:29:32 +0530 Subject: Basic image prcessing working for RPi --- src/c/hardware/rasberrypi/libraries/libjasper.a | Bin 0 -> 1029234 bytes src/c/hardware/rasberrypi/libraries/libjpeg.a | Bin 0 -> 1119888 bytes src/c/hardware/rasberrypi/libraries/libtiff.a | Bin 0 -> 1380758 bytes .../libraries/opencv/libopencv_calib3d.so | Bin 0 -> 3629128 bytes .../libraries/opencv/libopencv_contrib.so | Bin 0 -> 6065088 bytes .../rasberrypi/libraries/opencv/libopencv_core.so | Bin 0 -> 7376312 bytes .../libraries/opencv/libopencv_features2d.so | Bin 0 -> 4200032 bytes .../rasberrypi/libraries/opencv/libopencv_flann.so | Bin 0 -> 2697296 bytes .../rasberrypi/libraries/opencv/libopencv_gpu.so | Bin 0 -> 2619680 bytes .../libraries/opencv/libopencv_highgui.so | Bin 0 -> 7838352 bytes .../libraries/opencv/libopencv_imgproc.so | Bin 0 -> 7284984 bytes .../libraries/opencv/libopencv_legacy.so | Bin 0 -> 6518888 bytes .../rasberrypi/libraries/opencv/libopencv_ml.so | Bin 0 -> 1971600 bytes .../libraries/opencv/libopencv_nonfree.so | Bin 0 -> 1091828 bytes .../libraries/opencv/libopencv_objdetect.so | Bin 0 -> 3734856 bytes .../rasberrypi/libraries/opencv/libopencv_ocl.so | Bin 0 -> 7169952 bytes .../rasberrypi/libraries/opencv/libopencv_photo.so | Bin 0 -> 410344 bytes .../libraries/opencv/libopencv_stitching.so | Bin 0 -> 3714560 bytes .../libraries/opencv/libopencv_superres.so | Bin 0 -> 1213364 bytes .../rasberrypi/libraries/opencv/libopencv_ts.a | Bin 0 -> 8438392 bytes .../rasberrypi/libraries/opencv/libopencv_video.so | Bin 0 -> 1385940 bytes .../libraries/opencv/libopencv_videostab.so | Bin 0 -> 1473068 bytes .../libraries/opencv/opencv2/calib3d.hpp | 43 + .../libraries/opencv/opencv2/calib3d/calib3d.hpp | 811 + .../libraries/opencv/opencv2/contrib/contrib.hpp | 998 + .../opencv2/contrib/detection_based_tracker.hpp | 106 + .../opencv/opencv2/contrib/hybridtracker.hpp | 220 + .../opencv/opencv2/contrib/openfabmap.hpp | 405 + .../libraries/opencv/opencv2/contrib/retina.hpp | 354 + .../rasberrypi/libraries/opencv/opencv2/core.hpp | 43 + .../libraries/opencv/opencv2/core/affine.hpp | 513 + .../libraries/opencv/opencv2/core/core.hpp | 4924 +++++ .../libraries/opencv/opencv2/core/core_c.h | 1886 ++ .../libraries/opencv/opencv2/core/cuda_devptrs.hpp | 199 + .../libraries/opencv/opencv2/core/devmem2d.hpp | 43 + .../libraries/opencv/opencv2/core/eigen.hpp | 280 + .../libraries/opencv/opencv2/core/gpumat.hpp | 564 + .../libraries/opencv/opencv2/core/internal.hpp | 795 + .../libraries/opencv/opencv2/core/mat.hpp | 2625 +++ .../opencv/opencv2/core/opengl_interop.hpp | 284 + .../opencv2/core/opengl_interop_deprecated.hpp | 300 + .../libraries/opencv/opencv2/core/operations.hpp | 4123 ++++ .../libraries/opencv/opencv2/core/types_c.h | 1923 ++ .../libraries/opencv/opencv2/core/version.hpp | 72 + .../libraries/opencv/opencv2/core/wimage.hpp | 621 + .../libraries/opencv/opencv2/features2d.hpp | 43 + .../opencv/opencv2/features2d/features2d.hpp | 1616 ++ .../rasberrypi/libraries/opencv/opencv2/flann.hpp | 43 + .../libraries/opencv/opencv2/flann/all_indices.h | 155 + .../libraries/opencv/opencv2/flann/allocator.h | 188 + .../libraries/opencv/opencv2/flann/any.h | 318 + .../opencv/opencv2/flann/autotuned_index.h | 595 + .../opencv/opencv2/flann/composite_index.h | 201 + .../libraries/opencv/opencv2/flann/config.h | 38 + .../libraries/opencv/opencv2/flann/defines.h | 176 + .../libraries/opencv/opencv2/flann/dist.h | 937 + .../libraries/opencv/opencv2/flann/dummy.h | 45 + .../opencv/opencv2/flann/dynamic_bitset.h | 159 + .../libraries/opencv/opencv2/flann/flann.hpp | 427 + .../libraries/opencv/opencv2/flann/flann_base.hpp | 301 + .../libraries/opencv/opencv2/flann/general.h | 52 + .../libraries/opencv/opencv2/flann/ground_truth.h | 94 + .../libraries/opencv/opencv2/flann/hdf5.h | 231 + .../libraries/opencv/opencv2/flann/heap.h | 165 + .../opencv2/flann/hierarchical_clustering_index.h | 776 + .../libraries/opencv/opencv2/flann/index_testing.h | 318 + .../libraries/opencv/opencv2/flann/kdtree_index.h | 628 + .../opencv/opencv2/flann/kdtree_single_index.h | 641 + .../libraries/opencv/opencv2/flann/kmeans_index.h | 1133 ++ .../libraries/opencv/opencv2/flann/linear_index.h | 139 + .../libraries/opencv/opencv2/flann/logger.h | 130 + .../libraries/opencv/opencv2/flann/lsh_index.h | 420 + .../libraries/opencv/opencv2/flann/lsh_table.h | 497 + .../libraries/opencv/opencv2/flann/matrix.h | 116 + .../libraries/opencv/opencv2/flann/miniflann.hpp | 163 + .../libraries/opencv/opencv2/flann/nn_index.h | 184 + .../opencv/opencv2/flann/object_factory.h | 91 + .../libraries/opencv/opencv2/flann/params.h | 99 + .../libraries/opencv/opencv2/flann/random.h | 133 + .../libraries/opencv/opencv2/flann/result_set.h | 543 + .../libraries/opencv/opencv2/flann/sampling.h | 81 + .../libraries/opencv/opencv2/flann/saving.h | 187 + .../opencv/opencv2/flann/simplex_downhill.h | 186 + .../libraries/opencv/opencv2/flann/timer.h | 93 + .../libraries/opencv/opencv2/gpu/device/block.hpp | 203 + .../opencv2/gpu/device/border_interpolate.hpp | 714 + .../libraries/opencv/opencv2/gpu/device/color.hpp | 301 + .../libraries/opencv/opencv2/gpu/device/common.hpp | 118 + .../opencv/opencv2/gpu/device/datamov_utils.hpp | 105 + .../opencv2/gpu/device/detail/color_detail.hpp | 2219 ++ .../opencv/opencv2/gpu/device/detail/reduce.hpp | 361 + .../opencv2/gpu/device/detail/reduce_key_val.hpp | 498 + .../opencv2/gpu/device/detail/transform_detail.hpp | 395 + .../gpu/device/detail/type_traits_detail.hpp | 187 + .../gpu/device/detail/vec_distance_detail.hpp | 117 + .../opencv/opencv2/gpu/device/dynamic_smem.hpp | 80 + .../opencv/opencv2/gpu/device/emulation.hpp | 138 + .../opencv/opencv2/gpu/device/filters.hpp | 278 + .../opencv/opencv2/gpu/device/funcattrib.hpp | 71 + .../opencv/opencv2/gpu/device/functional.hpp | 789 + .../libraries/opencv/opencv2/gpu/device/limits.hpp | 122 + .../libraries/opencv/opencv2/gpu/device/reduce.hpp | 197 + .../opencv/opencv2/gpu/device/saturate_cast.hpp | 284 + .../libraries/opencv/opencv2/gpu/device/scan.hpp | 250 + .../opencv/opencv2/gpu/device/simd_functions.hpp | 909 + .../opencv/opencv2/gpu/device/static_check.hpp | 67 + .../opencv/opencv2/gpu/device/transform.hpp | 67 + .../opencv/opencv2/gpu/device/type_traits.hpp | 82 + .../opencv/opencv2/gpu/device/utility.hpp | 213 + .../opencv/opencv2/gpu/device/vec_distance.hpp | 224 + .../opencv/opencv2/gpu/device/vec_math.hpp | 922 + .../opencv/opencv2/gpu/device/vec_traits.hpp | 280 + .../libraries/opencv/opencv2/gpu/device/warp.hpp | 131 + .../opencv/opencv2/gpu/device/warp_reduce.hpp | 68 + .../opencv/opencv2/gpu/device/warp_shuffle.hpp | 145 + .../libraries/opencv/opencv2/gpu/devmem2d.hpp | 43 + .../libraries/opencv/opencv2/gpu/gpu.hpp | 2530 +++ .../libraries/opencv/opencv2/gpu/gpumat.hpp | 43 + .../opencv/opencv2/gpu/stream_accessor.hpp | 65 + .../libraries/opencv/opencv2/highgui.hpp | 43 + .../libraries/opencv/opencv2/highgui/cap_ios.h | 171 + .../libraries/opencv/opencv2/highgui/highgui.hpp | 255 + .../libraries/opencv/opencv2/highgui/highgui_c.h | 660 + .../libraries/opencv/opencv2/highgui/ios.h | 49 + .../libraries/opencv/opencv2/imgproc.hpp | 43 + .../libraries/opencv/opencv2/imgproc/imgproc.hpp | 1299 ++ .../libraries/opencv/opencv2/imgproc/imgproc_c.h | 623 + .../libraries/opencv/opencv2/imgproc/types_c.h | 640 + .../libraries/opencv/opencv2/legacy/blobtrack.hpp | 948 + .../libraries/opencv/opencv2/legacy/compat.hpp | 740 + .../libraries/opencv/opencv2/legacy/legacy.hpp | 3436 ++++ .../libraries/opencv/opencv2/legacy/streams.hpp | 92 + .../rasberrypi/libraries/opencv/opencv2/ml.hpp | 41 + .../rasberrypi/libraries/opencv/opencv2/ml/ml.hpp | 2147 ++ .../opencv/opencv2/nonfree/features2d.hpp | 155 + .../libraries/opencv/opencv2/nonfree/gpu.hpp | 128 + .../libraries/opencv/opencv2/nonfree/nonfree.hpp | 57 + .../libraries/opencv/opencv2/nonfree/ocl.hpp | 140 + .../libraries/opencv/opencv2/objdetect.hpp | 43 + .../opencv/opencv2/objdetect/objdetect.hpp | 1073 + .../opencv/opencv2/ocl/matrix_operations.hpp | 490 + .../libraries/opencv/opencv2/ocl/ocl.hpp | 1998 ++ .../rasberrypi/libraries/opencv/opencv2/opencv.hpp | 83 + .../libraries/opencv/opencv2/opencv_modules.hpp | 29 + .../rasberrypi/libraries/opencv/opencv2/photo.hpp | 43 + .../libraries/opencv/opencv2/photo/photo.hpp | 91 + .../libraries/opencv/opencv2/photo/photo_c.h | 69 + .../libraries/opencv/opencv2/stitching.hpp | 43 + .../opencv/opencv2/stitching/detail/autocalib.hpp | 65 + .../opencv/opencv2/stitching/detail/blenders.hpp | 137 + .../opencv/opencv2/stitching/detail/camera.hpp | 69 + .../stitching/detail/exposure_compensate.hpp | 106 + .../opencv/opencv2/stitching/detail/matchers.hpp | 192 + .../opencv2/stitching/detail/motion_estimators.hpp | 205 + .../opencv2/stitching/detail/seam_finders.hpp | 267 + .../opencv/opencv2/stitching/detail/util.hpp | 162 + .../opencv/opencv2/stitching/detail/util_inl.hpp | 127 + .../opencv/opencv2/stitching/detail/warpers.hpp | 510 + .../opencv2/stitching/detail/warpers_inl.hpp | 765 + .../opencv/opencv2/stitching/stitcher.hpp | 174 + .../libraries/opencv/opencv2/stitching/warpers.hpp | 170 + .../libraries/opencv/opencv2/superres.hpp | 43 + .../opencv/opencv2/superres/optical_flow.hpp | 76 + .../libraries/opencv/opencv2/superres/superres.hpp | 99 + .../rasberrypi/libraries/opencv/opencv2/ts.hpp | 43 + .../libraries/opencv/opencv2/ts/gpu_perf.hpp | 115 + .../libraries/opencv/opencv2/ts/gpu_test.hpp | 360 + .../rasberrypi/libraries/opencv/opencv2/ts/ts.hpp | 638 + .../libraries/opencv/opencv2/ts/ts_gtest.h | 20125 +++++++++++++++++++ .../libraries/opencv/opencv2/ts/ts_perf.hpp | 618 + .../rasberrypi/libraries/opencv/opencv2/video.hpp | 43 + .../opencv/opencv2/video/background_segm.hpp | 263 + .../libraries/opencv/opencv2/video/tracking.hpp | 373 + .../libraries/opencv/opencv2/video/video.hpp | 58 + .../libraries/opencv/opencv2/videostab.hpp | 43 + .../opencv/opencv2/videostab/deblurring.hpp | 110 + .../opencv/opencv2/videostab/fast_marching.hpp | 103 + .../opencv/opencv2/videostab/fast_marching_inl.hpp | 166 + .../opencv/opencv2/videostab/frame_source.hpp | 91 + .../opencv/opencv2/videostab/global_motion.hpp | 141 + .../opencv/opencv2/videostab/inpainting.hpp | 200 + .../libraries/opencv/opencv2/videostab/log.hpp | 75 + .../opencv2/videostab/motion_stabilizing.hpp | 106 + .../opencv/opencv2/videostab/optical_flow.hpp | 120 + .../opencv/opencv2/videostab/stabilizer.hpp | 187 + .../opencv/opencv2/videostab/videostab.hpp | 48 + 186 files changed, 90512 insertions(+) create mode 100644 src/c/hardware/rasberrypi/libraries/libjasper.a create mode 100644 src/c/hardware/rasberrypi/libraries/libjpeg.a create mode 100644 src/c/hardware/rasberrypi/libraries/libtiff.a create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/libopencv_calib3d.so create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/libopencv_contrib.so create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/libopencv_core.so create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/libopencv_features2d.so create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/libopencv_flann.so create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/libopencv_gpu.so create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/libopencv_highgui.so create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/libopencv_imgproc.so create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/libopencv_legacy.so create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/libopencv_ml.so create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/libopencv_nonfree.so create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/libopencv_objdetect.so create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/libopencv_ocl.so create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/libopencv_photo.so create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/libopencv_stitching.so create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/libopencv_superres.so create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/libopencv_ts.a create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/libopencv_video.so create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/libopencv_videostab.so create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/calib3d.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/calib3d/calib3d.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/contrib.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/detection_based_tracker.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/hybridtracker.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/openfabmap.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/retina.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/core.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/affine.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/core.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/core_c.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/cuda_devptrs.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/devmem2d.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/eigen.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/gpumat.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/internal.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/mat.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/opengl_interop.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/opengl_interop_deprecated.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/operations.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/types_c.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/version.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/wimage.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/features2d.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/features2d/features2d.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/all_indices.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/allocator.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/any.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/autotuned_index.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/composite_index.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/config.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/defines.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/dist.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/dummy.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/dynamic_bitset.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/flann.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/flann_base.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/general.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/ground_truth.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/hdf5.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/heap.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/hierarchical_clustering_index.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/index_testing.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/kdtree_index.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/kdtree_single_index.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/kmeans_index.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/linear_index.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/logger.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/lsh_index.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/lsh_table.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/matrix.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/miniflann.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/nn_index.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/object_factory.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/params.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/random.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/result_set.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/sampling.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/saving.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/simplex_downhill.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/timer.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/block.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/border_interpolate.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/color.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/common.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/datamov_utils.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/color_detail.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/reduce.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/reduce_key_val.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/transform_detail.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/type_traits_detail.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/vec_distance_detail.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/dynamic_smem.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/emulation.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/filters.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/funcattrib.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/functional.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/limits.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/reduce.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/saturate_cast.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/scan.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/simd_functions.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/static_check.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/transform.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/type_traits.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/utility.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/vec_distance.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/vec_math.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/vec_traits.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/warp.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/warp_reduce.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/warp_shuffle.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/devmem2d.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/gpu.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/gpumat.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/stream_accessor.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/cap_ios.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/highgui.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/highgui_c.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/ios.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc/imgproc.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc/imgproc_c.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc/types_c.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/blobtrack.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/compat.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/legacy.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/streams.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/ml.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/ml/ml.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/features2d.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/gpu.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/nonfree.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/ocl.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/objdetect.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/objdetect/objdetect.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/ocl/matrix_operations.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/ocl/ocl.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/opencv.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/opencv_modules.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/photo.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/photo/photo.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/photo/photo_c.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/autocalib.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/blenders.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/camera.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/exposure_compensate.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/matchers.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/motion_estimators.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/seam_finders.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/util.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/util_inl.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/warpers.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/warpers_inl.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/stitcher.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/warpers.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/superres.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/superres/optical_flow.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/superres/superres.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts/gpu_perf.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts/gpu_test.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts/ts.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts/ts_gtest.h create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts/ts_perf.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/video.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/video/background_segm.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/video/tracking.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/video/video.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/deblurring.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/fast_marching.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/fast_marching_inl.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/frame_source.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/global_motion.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/inpainting.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/log.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/motion_stabilizing.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/optical_flow.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/stabilizer.hpp create mode 100644 src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/videostab.hpp (limited to 'src/c/hardware/rasberrypi') diff --git a/src/c/hardware/rasberrypi/libraries/libjasper.a b/src/c/hardware/rasberrypi/libraries/libjasper.a new file mode 100644 index 0000000..6867f83 Binary files /dev/null and b/src/c/hardware/rasberrypi/libraries/libjasper.a differ diff --git a/src/c/hardware/rasberrypi/libraries/libjpeg.a b/src/c/hardware/rasberrypi/libraries/libjpeg.a new file mode 100644 index 0000000..4716fd8 Binary files /dev/null and b/src/c/hardware/rasberrypi/libraries/libjpeg.a differ diff --git a/src/c/hardware/rasberrypi/libraries/libtiff.a b/src/c/hardware/rasberrypi/libraries/libtiff.a new file mode 100644 index 0000000..743428e Binary files /dev/null and b/src/c/hardware/rasberrypi/libraries/libtiff.a differ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/libopencv_calib3d.so b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_calib3d.so new file mode 100644 index 0000000..357be83 Binary files /dev/null and b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_calib3d.so differ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/libopencv_contrib.so b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_contrib.so new file mode 100644 index 0000000..642ed00 Binary files /dev/null and b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_contrib.so differ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/libopencv_core.so b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_core.so new file mode 100644 index 0000000..900f46d Binary files /dev/null and b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_core.so differ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/libopencv_features2d.so b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_features2d.so new file mode 100644 index 0000000..b4d4296 Binary files /dev/null and b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_features2d.so differ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/libopencv_flann.so b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_flann.so new file mode 100644 index 0000000..b4af830 Binary files /dev/null and b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_flann.so differ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/libopencv_gpu.so b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_gpu.so new file mode 100644 index 0000000..c30af67 Binary files /dev/null and b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_gpu.so differ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/libopencv_highgui.so b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_highgui.so new file mode 100644 index 0000000..9a50229 Binary files /dev/null and b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_highgui.so differ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/libopencv_imgproc.so b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_imgproc.so new file mode 100644 index 0000000..28bd161 Binary files /dev/null and b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_imgproc.so differ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/libopencv_legacy.so b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_legacy.so new file mode 100644 index 0000000..37d231f Binary files /dev/null and b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_legacy.so differ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/libopencv_ml.so b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_ml.so new file mode 100644 index 0000000..b31bf42 Binary files /dev/null and b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_ml.so differ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/libopencv_nonfree.so b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_nonfree.so new file mode 100644 index 0000000..53fdba2 Binary files /dev/null and b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_nonfree.so differ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/libopencv_objdetect.so b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_objdetect.so new file mode 100644 index 0000000..2fd4468 Binary files /dev/null and b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_objdetect.so differ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/libopencv_ocl.so b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_ocl.so new file mode 100644 index 0000000..6543e94 Binary files /dev/null and b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_ocl.so differ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/libopencv_photo.so b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_photo.so new file mode 100644 index 0000000..4ef7a4e Binary files /dev/null and b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_photo.so differ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/libopencv_stitching.so b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_stitching.so new file mode 100644 index 0000000..8e4ed8e Binary files /dev/null and b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_stitching.so differ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/libopencv_superres.so b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_superres.so new file mode 100644 index 0000000..0233531 Binary files /dev/null and b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_superres.so differ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/libopencv_ts.a b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_ts.a new file mode 100644 index 0000000..aea7e97 Binary files /dev/null and b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_ts.a differ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/libopencv_video.so b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_video.so new file mode 100644 index 0000000..9c05c5d Binary files /dev/null and b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_video.so differ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/libopencv_videostab.so b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_videostab.so new file mode 100644 index 0000000..a44efbc Binary files /dev/null and b/src/c/hardware/rasberrypi/libraries/opencv/libopencv_videostab.so differ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/calib3d.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/calib3d.hpp new file mode 100644 index 0000000..7356c15 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/calib3d.hpp @@ -0,0 +1,43 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "opencv2/calib3d/calib3d.hpp" diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/calib3d/calib3d.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/calib3d/calib3d.hpp new file mode 100644 index 0000000..5e9cde8 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/calib3d/calib3d.hpp @@ -0,0 +1,811 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CALIB3D_HPP__ +#define __OPENCV_CALIB3D_HPP__ + +#include "opencv2/core/core.hpp" +#include "opencv2/features2d/features2d.hpp" +#include "opencv2/core/affine.hpp" + +#ifdef __cplusplus +extern "C" { +#endif + +/****************************************************************************************\ +* Camera Calibration, Pose Estimation and Stereo * +\****************************************************************************************/ + +typedef struct CvPOSITObject CvPOSITObject; + +/* Allocates and initializes CvPOSITObject structure before doing cvPOSIT */ +CVAPI(CvPOSITObject*) cvCreatePOSITObject( CvPoint3D32f* points, int point_count ); + + +/* Runs POSIT (POSe from ITeration) algorithm for determining 3d position of + an object given its model and projection in a weak-perspective case */ +CVAPI(void) cvPOSIT( CvPOSITObject* posit_object, CvPoint2D32f* image_points, + double focal_length, CvTermCriteria criteria, + float* rotation_matrix, float* translation_vector); + +/* Releases CvPOSITObject structure */ +CVAPI(void) cvReleasePOSITObject( CvPOSITObject** posit_object ); + +/* updates the number of RANSAC iterations */ +CVAPI(int) cvRANSACUpdateNumIters( double p, double err_prob, + int model_points, int max_iters ); + +CVAPI(void) cvConvertPointsHomogeneous( const CvMat* src, CvMat* dst ); + +/* Calculates fundamental matrix given a set of corresponding points */ +#define CV_FM_7POINT 1 +#define CV_FM_8POINT 2 + +#define CV_LMEDS 4 +#define CV_RANSAC 8 + +#define CV_FM_LMEDS_ONLY CV_LMEDS +#define CV_FM_RANSAC_ONLY CV_RANSAC +#define CV_FM_LMEDS CV_LMEDS +#define CV_FM_RANSAC CV_RANSAC + +enum +{ + CV_ITERATIVE = 0, + CV_EPNP = 1, // F.Moreno-Noguer, V.Lepetit and P.Fua "EPnP: Efficient Perspective-n-Point Camera Pose Estimation" + CV_P3P = 2 // X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang; "Complete Solution Classification for the Perspective-Three-Point Problem" +}; + +CVAPI(int) cvFindFundamentalMat( const CvMat* points1, const CvMat* points2, + CvMat* fundamental_matrix, + int method CV_DEFAULT(CV_FM_RANSAC), + double param1 CV_DEFAULT(3.), double param2 CV_DEFAULT(0.99), + CvMat* status CV_DEFAULT(NULL) ); + +/* For each input point on one of images + computes parameters of the corresponding + epipolar line on the other image */ +CVAPI(void) cvComputeCorrespondEpilines( const CvMat* points, + int which_image, + const CvMat* fundamental_matrix, + CvMat* correspondent_lines ); + +/* Triangulation functions */ + +CVAPI(void) cvTriangulatePoints(CvMat* projMatr1, CvMat* projMatr2, + CvMat* projPoints1, CvMat* projPoints2, + CvMat* points4D); + +CVAPI(void) cvCorrectMatches(CvMat* F, CvMat* points1, CvMat* points2, + CvMat* new_points1, CvMat* new_points2); + + +/* Computes the optimal new camera matrix according to the free scaling parameter alpha: + alpha=0 - only valid pixels will be retained in the undistorted image + alpha=1 - all the source image pixels will be retained in the undistorted image +*/ +CVAPI(void) cvGetOptimalNewCameraMatrix( const CvMat* camera_matrix, + const CvMat* dist_coeffs, + CvSize image_size, double alpha, + CvMat* new_camera_matrix, + CvSize new_imag_size CV_DEFAULT(cvSize(0,0)), + CvRect* valid_pixel_ROI CV_DEFAULT(0), + int center_principal_point CV_DEFAULT(0)); + +/* Converts rotation vector to rotation matrix or vice versa */ +CVAPI(int) cvRodrigues2( const CvMat* src, CvMat* dst, + CvMat* jacobian CV_DEFAULT(0) ); + +/* Finds perspective transformation between the object plane and image (view) plane */ +CVAPI(int) cvFindHomography( const CvMat* src_points, + const CvMat* dst_points, + CvMat* homography, + int method CV_DEFAULT(0), + double ransacReprojThreshold CV_DEFAULT(3), + CvMat* mask CV_DEFAULT(0)); + +/* Computes RQ decomposition for 3x3 matrices */ +CVAPI(void) cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ, + CvMat *matrixQx CV_DEFAULT(NULL), + CvMat *matrixQy CV_DEFAULT(NULL), + CvMat *matrixQz CV_DEFAULT(NULL), + CvPoint3D64f *eulerAngles CV_DEFAULT(NULL)); + +/* Computes projection matrix decomposition */ +CVAPI(void) cvDecomposeProjectionMatrix( const CvMat *projMatr, CvMat *calibMatr, + CvMat *rotMatr, CvMat *posVect, + CvMat *rotMatrX CV_DEFAULT(NULL), + CvMat *rotMatrY CV_DEFAULT(NULL), + CvMat *rotMatrZ CV_DEFAULT(NULL), + CvPoint3D64f *eulerAngles CV_DEFAULT(NULL)); + +/* Computes d(AB)/dA and d(AB)/dB */ +CVAPI(void) cvCalcMatMulDeriv( const CvMat* A, const CvMat* B, CvMat* dABdA, CvMat* dABdB ); + +/* Computes r3 = rodrigues(rodrigues(r2)*rodrigues(r1)), + t3 = rodrigues(r2)*t1 + t2 and the respective derivatives */ +CVAPI(void) cvComposeRT( const CvMat* _rvec1, const CvMat* _tvec1, + const CvMat* _rvec2, const CvMat* _tvec2, + CvMat* _rvec3, CvMat* _tvec3, + CvMat* dr3dr1 CV_DEFAULT(0), CvMat* dr3dt1 CV_DEFAULT(0), + CvMat* dr3dr2 CV_DEFAULT(0), CvMat* dr3dt2 CV_DEFAULT(0), + CvMat* dt3dr1 CV_DEFAULT(0), CvMat* dt3dt1 CV_DEFAULT(0), + CvMat* dt3dr2 CV_DEFAULT(0), CvMat* dt3dt2 CV_DEFAULT(0) ); + +/* Projects object points to the view plane using + the specified extrinsic and intrinsic camera parameters */ +CVAPI(void) cvProjectPoints2( const CvMat* object_points, const CvMat* rotation_vector, + const CvMat* translation_vector, const CvMat* camera_matrix, + const CvMat* distortion_coeffs, CvMat* image_points, + CvMat* dpdrot CV_DEFAULT(NULL), CvMat* dpdt CV_DEFAULT(NULL), + CvMat* dpdf CV_DEFAULT(NULL), CvMat* dpdc CV_DEFAULT(NULL), + CvMat* dpddist CV_DEFAULT(NULL), + double aspect_ratio CV_DEFAULT(0)); + +/* Finds extrinsic camera parameters from + a few known corresponding point pairs and intrinsic parameters */ +CVAPI(void) cvFindExtrinsicCameraParams2( const CvMat* object_points, + const CvMat* image_points, + const CvMat* camera_matrix, + const CvMat* distortion_coeffs, + CvMat* rotation_vector, + CvMat* translation_vector, + int use_extrinsic_guess CV_DEFAULT(0) ); + +/* Computes initial estimate of the intrinsic camera parameters + in case of planar calibration target (e.g. chessboard) */ +CVAPI(void) cvInitIntrinsicParams2D( const CvMat* object_points, + const CvMat* image_points, + const CvMat* npoints, CvSize image_size, + CvMat* camera_matrix, + double aspect_ratio CV_DEFAULT(1.) ); + +#define CV_CALIB_CB_ADAPTIVE_THRESH 1 +#define CV_CALIB_CB_NORMALIZE_IMAGE 2 +#define CV_CALIB_CB_FILTER_QUADS 4 +#define CV_CALIB_CB_FAST_CHECK 8 + +// Performs a fast check if a chessboard is in the input image. This is a workaround to +// a problem of cvFindChessboardCorners being slow on images with no chessboard +// - src: input image +// - size: chessboard size +// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called, +// 0 if there is no chessboard, -1 in case of error +CVAPI(int) cvCheckChessboard(IplImage* src, CvSize size); + + /* Detects corners on a chessboard calibration pattern */ +CVAPI(int) cvFindChessboardCorners( const void* image, CvSize pattern_size, + CvPoint2D32f* corners, + int* corner_count CV_DEFAULT(NULL), + int flags CV_DEFAULT(CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE) ); + +/* Draws individual chessboard corners or the whole chessboard detected */ +CVAPI(void) cvDrawChessboardCorners( CvArr* image, CvSize pattern_size, + CvPoint2D32f* corners, + int count, int pattern_was_found ); + +#define CV_CALIB_USE_INTRINSIC_GUESS 1 +#define CV_CALIB_FIX_ASPECT_RATIO 2 +#define CV_CALIB_FIX_PRINCIPAL_POINT 4 +#define CV_CALIB_ZERO_TANGENT_DIST 8 +#define CV_CALIB_FIX_FOCAL_LENGTH 16 +#define CV_CALIB_FIX_K1 32 +#define CV_CALIB_FIX_K2 64 +#define CV_CALIB_FIX_K3 128 +#define CV_CALIB_FIX_K4 2048 +#define CV_CALIB_FIX_K5 4096 +#define CV_CALIB_FIX_K6 8192 +#define CV_CALIB_RATIONAL_MODEL 16384 + +/* Finds intrinsic and extrinsic camera parameters + from a few views of known calibration pattern */ +CVAPI(double) cvCalibrateCamera2( const CvMat* object_points, + const CvMat* image_points, + const CvMat* point_counts, + CvSize image_size, + CvMat* camera_matrix, + CvMat* distortion_coeffs, + CvMat* rotation_vectors CV_DEFAULT(NULL), + CvMat* translation_vectors CV_DEFAULT(NULL), + int flags CV_DEFAULT(0), + CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria( + CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON)) ); + +/* Computes various useful characteristics of the camera from the data computed by + cvCalibrateCamera2 */ +CVAPI(void) cvCalibrationMatrixValues( const CvMat *camera_matrix, + CvSize image_size, + double aperture_width CV_DEFAULT(0), + double aperture_height CV_DEFAULT(0), + double *fovx CV_DEFAULT(NULL), + double *fovy CV_DEFAULT(NULL), + double *focal_length CV_DEFAULT(NULL), + CvPoint2D64f *principal_point CV_DEFAULT(NULL), + double *pixel_aspect_ratio CV_DEFAULT(NULL)); + +#define CV_CALIB_FIX_INTRINSIC 256 +#define CV_CALIB_SAME_FOCAL_LENGTH 512 + +/* Computes the transformation from one camera coordinate system to another one + from a few correspondent views of the same calibration target. Optionally, calibrates + both cameras */ +CVAPI(double) cvStereoCalibrate( const CvMat* object_points, const CvMat* image_points1, + const CvMat* image_points2, const CvMat* npoints, + CvMat* camera_matrix1, CvMat* dist_coeffs1, + CvMat* camera_matrix2, CvMat* dist_coeffs2, + CvSize image_size, CvMat* R, CvMat* T, + CvMat* E CV_DEFAULT(0), CvMat* F CV_DEFAULT(0), + CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria( + CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,1e-6)), + int flags CV_DEFAULT(CV_CALIB_FIX_INTRINSIC)); + +#define CV_CALIB_ZERO_DISPARITY 1024 + +/* Computes 3D rotations (+ optional shift) for each camera coordinate system to make both + views parallel (=> to make all the epipolar lines horizontal or vertical) */ +CVAPI(void) cvStereoRectify( const CvMat* camera_matrix1, const CvMat* camera_matrix2, + const CvMat* dist_coeffs1, const CvMat* dist_coeffs2, + CvSize image_size, const CvMat* R, const CvMat* T, + CvMat* R1, CvMat* R2, CvMat* P1, CvMat* P2, + CvMat* Q CV_DEFAULT(0), + int flags CV_DEFAULT(CV_CALIB_ZERO_DISPARITY), + double alpha CV_DEFAULT(-1), + CvSize new_image_size CV_DEFAULT(cvSize(0,0)), + CvRect* valid_pix_ROI1 CV_DEFAULT(0), + CvRect* valid_pix_ROI2 CV_DEFAULT(0)); + +/* Computes rectification transformations for uncalibrated pair of images using a set + of point correspondences */ +CVAPI(int) cvStereoRectifyUncalibrated( const CvMat* points1, const CvMat* points2, + const CvMat* F, CvSize img_size, + CvMat* H1, CvMat* H2, + double threshold CV_DEFAULT(5)); + + + +/* stereo correspondence parameters and functions */ + +#define CV_STEREO_BM_NORMALIZED_RESPONSE 0 +#define CV_STEREO_BM_XSOBEL 1 + +/* Block matching algorithm structure */ +typedef struct CvStereoBMState +{ + // pre-filtering (normalization of input images) + int preFilterType; // =CV_STEREO_BM_NORMALIZED_RESPONSE now + int preFilterSize; // averaging window size: ~5x5..21x21 + int preFilterCap; // the output of pre-filtering is clipped by [-preFilterCap,preFilterCap] + + // correspondence using Sum of Absolute Difference (SAD) + int SADWindowSize; // ~5x5..21x21 + int minDisparity; // minimum disparity (can be negative) + int numberOfDisparities; // maximum disparity - minimum disparity (> 0) + + // post-filtering + int textureThreshold; // the disparity is only computed for pixels + // with textured enough neighborhood + int uniquenessRatio; // accept the computed disparity d* only if + // SAD(d) >= SAD(d*)*(1 + uniquenessRatio/100.) + // for any d != d*+/-1 within the search range. + int speckleWindowSize; // disparity variation window + int speckleRange; // acceptable range of variation in window + + int trySmallerWindows; // if 1, the results may be more accurate, + // at the expense of slower processing + CvRect roi1, roi2; + int disp12MaxDiff; + + // temporary buffers + CvMat* preFilteredImg0; + CvMat* preFilteredImg1; + CvMat* slidingSumBuf; + CvMat* cost; + CvMat* disp; +} CvStereoBMState; + +#define CV_STEREO_BM_BASIC 0 +#define CV_STEREO_BM_FISH_EYE 1 +#define CV_STEREO_BM_NARROW 2 + +CVAPI(CvStereoBMState*) cvCreateStereoBMState(int preset CV_DEFAULT(CV_STEREO_BM_BASIC), + int numberOfDisparities CV_DEFAULT(0)); + +CVAPI(void) cvReleaseStereoBMState( CvStereoBMState** state ); + +CVAPI(void) cvFindStereoCorrespondenceBM( const CvArr* left, const CvArr* right, + CvArr* disparity, CvStereoBMState* state ); + +CVAPI(CvRect) cvGetValidDisparityROI( CvRect roi1, CvRect roi2, int minDisparity, + int numberOfDisparities, int SADWindowSize ); + +CVAPI(void) cvValidateDisparity( CvArr* disparity, const CvArr* cost, + int minDisparity, int numberOfDisparities, + int disp12MaxDiff CV_DEFAULT(1) ); + +/* Reprojects the computed disparity image to the 3D space using the specified 4x4 matrix */ +CVAPI(void) cvReprojectImageTo3D( const CvArr* disparityImage, + CvArr* _3dImage, const CvMat* Q, + int handleMissingValues CV_DEFAULT(0) ); + + +#ifdef __cplusplus +} + +////////////////////////////////////////////////////////////////////////////////////////// +class CV_EXPORTS CvLevMarq +{ +public: + CvLevMarq(); + CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria= + cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON), + bool completeSymmFlag=false ); + ~CvLevMarq(); + void init( int nparams, int nerrs, CvTermCriteria criteria= + cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON), + bool completeSymmFlag=false ); + bool update( const CvMat*& param, CvMat*& J, CvMat*& err ); + bool updateAlt( const CvMat*& param, CvMat*& JtJ, CvMat*& JtErr, double*& errNorm ); + + void clear(); + void step(); + enum { DONE=0, STARTED=1, CALC_J=2, CHECK_ERR=3 }; + + cv::Ptr mask; + cv::Ptr prevParam; + cv::Ptr param; + cv::Ptr J; + cv::Ptr err; + cv::Ptr JtJ; + cv::Ptr JtJN; + cv::Ptr JtErr; + cv::Ptr JtJV; + cv::Ptr JtJW; + double prevErrNorm, errNorm; + int lambdaLg10; + CvTermCriteria criteria; + int state; + int iters; + bool completeSymmFlag; +}; + +namespace cv +{ +//! converts rotation vector to rotation matrix or vice versa using Rodrigues transformation +CV_EXPORTS_W void Rodrigues(InputArray src, OutputArray dst, OutputArray jacobian=noArray()); + +//! type of the robust estimation algorithm +enum +{ + LMEDS=CV_LMEDS, //!< least-median algorithm + RANSAC=CV_RANSAC //!< RANSAC algorithm +}; + +//! computes the best-fit perspective transformation mapping srcPoints to dstPoints. +CV_EXPORTS_W Mat findHomography( InputArray srcPoints, InputArray dstPoints, + int method=0, double ransacReprojThreshold=3, + OutputArray mask=noArray()); + +//! variant of findHomography for backward compatibility +CV_EXPORTS Mat findHomography( InputArray srcPoints, InputArray dstPoints, + OutputArray mask, int method=0, double ransacReprojThreshold=3); + +//! Computes RQ decomposition of 3x3 matrix +CV_EXPORTS_W Vec3d RQDecomp3x3( InputArray src, OutputArray mtxR, OutputArray mtxQ, + OutputArray Qx=noArray(), + OutputArray Qy=noArray(), + OutputArray Qz=noArray()); + +//! Decomposes the projection matrix into camera matrix and the rotation martix and the translation vector +CV_EXPORTS_W void decomposeProjectionMatrix( InputArray projMatrix, OutputArray cameraMatrix, + OutputArray rotMatrix, OutputArray transVect, + OutputArray rotMatrixX=noArray(), + OutputArray rotMatrixY=noArray(), + OutputArray rotMatrixZ=noArray(), + OutputArray eulerAngles=noArray() ); + +//! computes derivatives of the matrix product w.r.t each of the multiplied matrix coefficients +CV_EXPORTS_W void matMulDeriv( InputArray A, InputArray B, + OutputArray dABdA, + OutputArray dABdB ); + +//! composes 2 [R|t] transformations together. Also computes the derivatives of the result w.r.t the arguments +CV_EXPORTS_W void composeRT( InputArray rvec1, InputArray tvec1, + InputArray rvec2, InputArray tvec2, + OutputArray rvec3, OutputArray tvec3, + OutputArray dr3dr1=noArray(), OutputArray dr3dt1=noArray(), + OutputArray dr3dr2=noArray(), OutputArray dr3dt2=noArray(), + OutputArray dt3dr1=noArray(), OutputArray dt3dt1=noArray(), + OutputArray dt3dr2=noArray(), OutputArray dt3dt2=noArray() ); + +//! projects points from the model coordinate space to the image coordinates. Also computes derivatives of the image coordinates w.r.t the intrinsic and extrinsic camera parameters +CV_EXPORTS_W void projectPoints( InputArray objectPoints, + InputArray rvec, InputArray tvec, + InputArray cameraMatrix, InputArray distCoeffs, + OutputArray imagePoints, + OutputArray jacobian=noArray(), + double aspectRatio=0 ); + +//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are not handled. +enum +{ + ITERATIVE=CV_ITERATIVE, + EPNP=CV_EPNP, + P3P=CV_P3P +}; +CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints, + InputArray cameraMatrix, InputArray distCoeffs, + OutputArray rvec, OutputArray tvec, + bool useExtrinsicGuess=false, int flags=ITERATIVE); + +//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are possible. +CV_EXPORTS_W void solvePnPRansac( InputArray objectPoints, + InputArray imagePoints, + InputArray cameraMatrix, + InputArray distCoeffs, + OutputArray rvec, + OutputArray tvec, + bool useExtrinsicGuess = false, + int iterationsCount = 100, + float reprojectionError = 8.0, + int minInliersCount = 100, + OutputArray inliers = noArray(), + int flags = ITERATIVE); + +//! initializes camera matrix from a few 3D points and the corresponding projections. +CV_EXPORTS_W Mat initCameraMatrix2D( InputArrayOfArrays objectPoints, + InputArrayOfArrays imagePoints, + Size imageSize, double aspectRatio=1. ); + +enum { CALIB_CB_ADAPTIVE_THRESH = 1, CALIB_CB_NORMALIZE_IMAGE = 2, + CALIB_CB_FILTER_QUADS = 4, CALIB_CB_FAST_CHECK = 8 }; + +//! finds checkerboard pattern of the specified size in the image +CV_EXPORTS_W bool findChessboardCorners( InputArray image, Size patternSize, + OutputArray corners, + int flags=CALIB_CB_ADAPTIVE_THRESH+CALIB_CB_NORMALIZE_IMAGE ); + +//! finds subpixel-accurate positions of the chessboard corners +CV_EXPORTS bool find4QuadCornerSubpix(InputArray img, InputOutputArray corners, Size region_size); + +//! draws the checkerboard pattern (found or partly found) in the image +CV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSize, + InputArray corners, bool patternWasFound ); + +enum { CALIB_CB_SYMMETRIC_GRID = 1, CALIB_CB_ASYMMETRIC_GRID = 2, + CALIB_CB_CLUSTERING = 4 }; + +//! finds circles' grid pattern of the specified size in the image +CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize, + OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID, + const Ptr &blobDetector = new SimpleBlobDetector()); + +//! the deprecated function. Use findCirclesGrid() instead of it. +CV_EXPORTS_W bool findCirclesGridDefault( InputArray image, Size patternSize, + OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID ); +enum +{ + CALIB_USE_INTRINSIC_GUESS = CV_CALIB_USE_INTRINSIC_GUESS, + CALIB_FIX_ASPECT_RATIO = CV_CALIB_FIX_ASPECT_RATIO, + CALIB_FIX_PRINCIPAL_POINT = CV_CALIB_FIX_PRINCIPAL_POINT, + CALIB_ZERO_TANGENT_DIST = CV_CALIB_ZERO_TANGENT_DIST, + CALIB_FIX_FOCAL_LENGTH = CV_CALIB_FIX_FOCAL_LENGTH, + CALIB_FIX_K1 = CV_CALIB_FIX_K1, + CALIB_FIX_K2 = CV_CALIB_FIX_K2, + CALIB_FIX_K3 = CV_CALIB_FIX_K3, + CALIB_FIX_K4 = CV_CALIB_FIX_K4, + CALIB_FIX_K5 = CV_CALIB_FIX_K5, + CALIB_FIX_K6 = CV_CALIB_FIX_K6, + CALIB_RATIONAL_MODEL = CV_CALIB_RATIONAL_MODEL, + // only for stereo + CALIB_FIX_INTRINSIC = CV_CALIB_FIX_INTRINSIC, + CALIB_SAME_FOCAL_LENGTH = CV_CALIB_SAME_FOCAL_LENGTH, + // for stereo rectification + CALIB_ZERO_DISPARITY = CV_CALIB_ZERO_DISPARITY +}; + +//! finds intrinsic and extrinsic camera parameters from several fews of a known calibration pattern. +CV_EXPORTS_W double calibrateCamera( InputArrayOfArrays objectPoints, + InputArrayOfArrays imagePoints, + Size imageSize, + CV_OUT InputOutputArray cameraMatrix, + CV_OUT InputOutputArray distCoeffs, + OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, + int flags=0, TermCriteria criteria = TermCriteria( + TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON) ); + +//! computes several useful camera characteristics from the camera matrix, camera frame resolution and the physical sensor size. +CV_EXPORTS_W void calibrationMatrixValues( InputArray cameraMatrix, + Size imageSize, + double apertureWidth, + double apertureHeight, + CV_OUT double& fovx, + CV_OUT double& fovy, + CV_OUT double& focalLength, + CV_OUT Point2d& principalPoint, + CV_OUT double& aspectRatio ); + +//! finds intrinsic and extrinsic parameters of a stereo camera +CV_EXPORTS_W double stereoCalibrate( InputArrayOfArrays objectPoints, + InputArrayOfArrays imagePoints1, + InputArrayOfArrays imagePoints2, + CV_OUT InputOutputArray cameraMatrix1, + CV_OUT InputOutputArray distCoeffs1, + CV_OUT InputOutputArray cameraMatrix2, + CV_OUT InputOutputArray distCoeffs2, + Size imageSize, OutputArray R, + OutputArray T, OutputArray E, OutputArray F, + TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6), + int flags=CALIB_FIX_INTRINSIC ); + + +//! computes the rectification transformation for a stereo camera from its intrinsic and extrinsic parameters +CV_EXPORTS_W void stereoRectify( InputArray cameraMatrix1, InputArray distCoeffs1, + InputArray cameraMatrix2, InputArray distCoeffs2, + Size imageSize, InputArray R, InputArray T, + OutputArray R1, OutputArray R2, + OutputArray P1, OutputArray P2, + OutputArray Q, int flags=CALIB_ZERO_DISPARITY, + double alpha=-1, Size newImageSize=Size(), + CV_OUT Rect* validPixROI1=0, CV_OUT Rect* validPixROI2=0 ); + +//! computes the rectification transformation for an uncalibrated stereo camera (zero distortion is assumed) +CV_EXPORTS_W bool stereoRectifyUncalibrated( InputArray points1, InputArray points2, + InputArray F, Size imgSize, + OutputArray H1, OutputArray H2, + double threshold=5 ); + +//! computes the rectification transformations for 3-head camera, where all the heads are on the same line. +CV_EXPORTS_W float rectify3Collinear( InputArray cameraMatrix1, InputArray distCoeffs1, + InputArray cameraMatrix2, InputArray distCoeffs2, + InputArray cameraMatrix3, InputArray distCoeffs3, + InputArrayOfArrays imgpt1, InputArrayOfArrays imgpt3, + Size imageSize, InputArray R12, InputArray T12, + InputArray R13, InputArray T13, + OutputArray R1, OutputArray R2, OutputArray R3, + OutputArray P1, OutputArray P2, OutputArray P3, + OutputArray Q, double alpha, Size newImgSize, + CV_OUT Rect* roi1, CV_OUT Rect* roi2, int flags ); + +//! returns the optimal new camera matrix +CV_EXPORTS_W Mat getOptimalNewCameraMatrix( InputArray cameraMatrix, InputArray distCoeffs, + Size imageSize, double alpha, Size newImgSize=Size(), + CV_OUT Rect* validPixROI=0, bool centerPrincipalPoint=false); + +//! converts point coordinates from normal pixel coordinates to homogeneous coordinates ((x,y)->(x,y,1)) +CV_EXPORTS_W void convertPointsToHomogeneous( InputArray src, OutputArray dst ); + +//! converts point coordinates from homogeneous to normal pixel coordinates ((x,y,z)->(x/z, y/z)) +CV_EXPORTS_W void convertPointsFromHomogeneous( InputArray src, OutputArray dst ); + +//! for backward compatibility +CV_EXPORTS void convertPointsHomogeneous( InputArray src, OutputArray dst ); + +//! the algorithm for finding fundamental matrix +enum +{ + FM_7POINT = CV_FM_7POINT, //!< 7-point algorithm + FM_8POINT = CV_FM_8POINT, //!< 8-point algorithm + FM_LMEDS = CV_FM_LMEDS, //!< least-median algorithm + FM_RANSAC = CV_FM_RANSAC //!< RANSAC algorithm +}; + +//! finds fundamental matrix from a set of corresponding 2D points +CV_EXPORTS_W Mat findFundamentalMat( InputArray points1, InputArray points2, + int method=FM_RANSAC, + double param1=3., double param2=0.99, + OutputArray mask=noArray()); + +//! variant of findFundamentalMat for backward compatibility +CV_EXPORTS Mat findFundamentalMat( InputArray points1, InputArray points2, + OutputArray mask, int method=FM_RANSAC, + double param1=3., double param2=0.99); + +//! finds coordinates of epipolar lines corresponding the specified points +CV_EXPORTS_W void computeCorrespondEpilines( InputArray points, + int whichImage, InputArray F, + OutputArray lines ); + +CV_EXPORTS_W void triangulatePoints( InputArray projMatr1, InputArray projMatr2, + InputArray projPoints1, InputArray projPoints2, + OutputArray points4D ); + +CV_EXPORTS_W void correctMatches( InputArray F, InputArray points1, InputArray points2, + OutputArray newPoints1, OutputArray newPoints2 ); + +template<> CV_EXPORTS void Ptr::delete_obj(); + +/*! + Block Matching Stereo Correspondence Algorithm + + The class implements BM stereo correspondence algorithm by K. Konolige. +*/ +class CV_EXPORTS_W StereoBM +{ +public: + enum { PREFILTER_NORMALIZED_RESPONSE = 0, PREFILTER_XSOBEL = 1, + BASIC_PRESET=0, FISH_EYE_PRESET=1, NARROW_PRESET=2 }; + + //! the default constructor + CV_WRAP StereoBM(); + //! the full constructor taking the camera-specific preset, number of disparities and the SAD window size + CV_WRAP StereoBM(int preset, int ndisparities=0, int SADWindowSize=21); + //! the method that reinitializes the state. The previous content is destroyed + void init(int preset, int ndisparities=0, int SADWindowSize=21); + //! the stereo correspondence operator. Finds the disparity for the specified rectified stereo pair + CV_WRAP_AS(compute) void operator()( InputArray left, InputArray right, + OutputArray disparity, int disptype=CV_16S ); + + //! pointer to the underlying CvStereoBMState + Ptr state; +}; + + +/*! + Semi-Global Block Matching Stereo Correspondence Algorithm + + The class implements the original SGBM stereo correspondence algorithm by H. Hirschmuller and some its modification. + */ +class CV_EXPORTS_W StereoSGBM +{ +public: + enum { DISP_SHIFT=4, DISP_SCALE = (1<(X,Y,Z) using the matrix Q returned by cv::stereoRectify +CV_EXPORTS_W void reprojectImageTo3D( InputArray disparity, + OutputArray _3dImage, InputArray Q, + bool handleMissingValues=false, + int ddepth=-1 ); + +CV_EXPORTS_W int estimateAffine3D(InputArray src, InputArray dst, + OutputArray out, OutputArray inliers, + double ransacThreshold=3, double confidence=0.99); + +namespace fisheye +{ + enum{ + CALIB_USE_INTRINSIC_GUESS = 1, + CALIB_RECOMPUTE_EXTRINSIC = 2, + CALIB_CHECK_COND = 4, + CALIB_FIX_SKEW = 8, + CALIB_FIX_K1 = 16, + CALIB_FIX_K2 = 32, + CALIB_FIX_K3 = 64, + CALIB_FIX_K4 = 128, + CALIB_FIX_INTRINSIC = 256 + }; + + //! projects 3D points using fisheye model + CV_EXPORTS void projectPoints(InputArray objectPoints, OutputArray imagePoints, const Affine3d& affine, + InputArray K, InputArray D, double alpha = 0, OutputArray jacobian = noArray()); + + //! projects points using fisheye model + CV_EXPORTS void projectPoints(InputArray objectPoints, OutputArray imagePoints, InputArray rvec, InputArray tvec, + InputArray K, InputArray D, double alpha = 0, OutputArray jacobian = noArray()); + + //! distorts 2D points using fisheye model + CV_EXPORTS void distortPoints(InputArray undistorted, OutputArray distorted, InputArray K, InputArray D, double alpha = 0); + + //! undistorts 2D points using fisheye model + CV_EXPORTS void undistortPoints(InputArray distorted, OutputArray undistorted, + InputArray K, InputArray D, InputArray R = noArray(), InputArray P = noArray()); + + //! computing undistortion and rectification maps for image transform by cv::remap() + //! If D is empty zero distortion is used, if R or P is empty identity matrixes are used + CV_EXPORTS void initUndistortRectifyMap(InputArray K, InputArray D, InputArray R, InputArray P, + const cv::Size& size, int m1type, OutputArray map1, OutputArray map2); + + //! undistorts image, optionally changes resolution and camera matrix. If Knew zero identity matrix is used + CV_EXPORTS void undistortImage(InputArray distorted, OutputArray undistorted, + InputArray K, InputArray D, InputArray Knew = cv::noArray(), const Size& new_size = Size()); + + //! estimates new camera matrix for undistortion or rectification + CV_EXPORTS void estimateNewCameraMatrixForUndistortRectify(InputArray K, InputArray D, const Size &image_size, InputArray R, + OutputArray P, double balance = 0.0, const Size& new_size = Size(), double fov_scale = 1.0); + + //! performs camera calibaration + CV_EXPORTS double calibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, const Size& image_size, + InputOutputArray K, InputOutputArray D, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags = 0, + TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON)); + + //! stereo rectification estimation + CV_EXPORTS void stereoRectify(InputArray K1, InputArray D1, InputArray K2, InputArray D2, const Size &imageSize, InputArray R, InputArray tvec, + OutputArray R1, OutputArray R2, OutputArray P1, OutputArray P2, OutputArray Q, int flags, const Size &newImageSize = Size(), + double balance = 0.0, double fov_scale = 1.0); + + //! performs stereo calibaration + CV_EXPORTS double stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, + InputOutputArray K1, InputOutputArray D1, InputOutputArray K2, InputOutputArray D2, Size imageSize, + OutputArray R, OutputArray T, int flags = CALIB_FIX_INTRINSIC, + TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON)); + +} + +} + +#endif +#endif diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/contrib.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/contrib.hpp new file mode 100644 index 0000000..d587942 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/contrib.hpp @@ -0,0 +1,998 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CONTRIB_HPP__ +#define __OPENCV_CONTRIB_HPP__ + +#include "opencv2/core/core.hpp" +#include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/features2d/features2d.hpp" +#include "opencv2/objdetect/objdetect.hpp" + +#ifdef __cplusplus + +/****************************************************************************************\ +* Adaptive Skin Detector * +\****************************************************************************************/ + +class CV_EXPORTS CvAdaptiveSkinDetector +{ +private: + enum { + GSD_HUE_LT = 3, + GSD_HUE_UT = 33, + GSD_INTENSITY_LT = 15, + GSD_INTENSITY_UT = 250 + }; + + class CV_EXPORTS Histogram + { + private: + enum { + HistogramSize = (GSD_HUE_UT - GSD_HUE_LT + 1) + }; + + protected: + int findCoverageIndex(double surfaceToCover, int defaultValue = 0); + + public: + CvHistogram *fHistogram; + Histogram(); + virtual ~Histogram(); + + void findCurveThresholds(int &x1, int &x2, double percent = 0.05); + void mergeWith(Histogram *source, double weight); + }; + + int nStartCounter, nFrameCount, nSkinHueLowerBound, nSkinHueUpperBound, nMorphingMethod, nSamplingDivider; + double fHistogramMergeFactor, fHuePercentCovered; + Histogram histogramHueMotion, skinHueHistogram; + IplImage *imgHueFrame, *imgSaturationFrame, *imgLastGrayFrame, *imgMotionFrame, *imgFilteredFrame; + IplImage *imgShrinked, *imgTemp, *imgGrayFrame, *imgHSVFrame; + +protected: + void initData(IplImage *src, int widthDivider, int heightDivider); + void adaptiveFilter(); + +public: + + enum { + MORPHING_METHOD_NONE = 0, + MORPHING_METHOD_ERODE = 1, + MORPHING_METHOD_ERODE_ERODE = 2, + MORPHING_METHOD_ERODE_DILATE = 3 + }; + + CvAdaptiveSkinDetector(int samplingDivider = 1, int morphingMethod = MORPHING_METHOD_NONE); + virtual ~CvAdaptiveSkinDetector(); + + virtual void process(IplImage *inputBGRImage, IplImage *outputHueMask); +}; + + +/****************************************************************************************\ + * Fuzzy MeanShift Tracker * + \****************************************************************************************/ + +class CV_EXPORTS CvFuzzyPoint { +public: + double x, y, value; + + CvFuzzyPoint(double _x, double _y); +}; + +class CV_EXPORTS CvFuzzyCurve { +private: + std::vector points; + double value, centre; + + bool between(double x, double x1, double x2); + +public: + CvFuzzyCurve(); + ~CvFuzzyCurve(); + + void setCentre(double _centre); + double getCentre(); + void clear(); + void addPoint(double x, double y); + double calcValue(double param); + double getValue(); + void setValue(double _value); +}; + +class CV_EXPORTS CvFuzzyFunction { +public: + std::vector curves; + + CvFuzzyFunction(); + ~CvFuzzyFunction(); + void addCurve(CvFuzzyCurve *curve, double value = 0); + void resetValues(); + double calcValue(); + CvFuzzyCurve *newCurve(); +}; + +class CV_EXPORTS CvFuzzyRule { +private: + CvFuzzyCurve *fuzzyInput1, *fuzzyInput2; + CvFuzzyCurve *fuzzyOutput; +public: + CvFuzzyRule(); + ~CvFuzzyRule(); + void setRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1); + double calcValue(double param1, double param2); + CvFuzzyCurve *getOutputCurve(); +}; + +class CV_EXPORTS CvFuzzyController { +private: + std::vector rules; +public: + CvFuzzyController(); + ~CvFuzzyController(); + void addRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1); + double calcOutput(double param1, double param2); +}; + +class CV_EXPORTS CvFuzzyMeanShiftTracker +{ +private: + class FuzzyResizer + { + private: + CvFuzzyFunction iInput, iOutput; + CvFuzzyController fuzzyController; + public: + FuzzyResizer(); + int calcOutput(double edgeDensity, double density); + }; + + class SearchWindow + { + public: + FuzzyResizer *fuzzyResizer; + int x, y; + int width, height, maxWidth, maxHeight, ellipseHeight, ellipseWidth; + int ldx, ldy, ldw, ldh, numShifts, numIters; + int xGc, yGc; + long m00, m01, m10, m11, m02, m20; + double ellipseAngle; + double density; + unsigned int depthLow, depthHigh; + int verticalEdgeLeft, verticalEdgeRight, horizontalEdgeTop, horizontalEdgeBottom; + + SearchWindow(); + ~SearchWindow(); + void setSize(int _x, int _y, int _width, int _height); + void initDepthValues(IplImage *maskImage, IplImage *depthMap); + bool shift(); + void extractInfo(IplImage *maskImage, IplImage *depthMap, bool initDepth); + void getResizeAttribsEdgeDensityLinear(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh); + void getResizeAttribsInnerDensity(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh); + void getResizeAttribsEdgeDensityFuzzy(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh); + bool meanShift(IplImage *maskImage, IplImage *depthMap, int maxIteration, bool initDepth); + }; + +public: + enum TrackingState + { + tsNone = 0, + tsSearching = 1, + tsTracking = 2, + tsSetWindow = 3, + tsDisabled = 10 + }; + + enum ResizeMethod { + rmEdgeDensityLinear = 0, + rmEdgeDensityFuzzy = 1, + rmInnerDensity = 2 + }; + + enum { + MinKernelMass = 1000 + }; + + SearchWindow kernel; + int searchMode; + +private: + enum + { + MaxMeanShiftIteration = 5, + MaxSetSizeIteration = 5 + }; + + void findOptimumSearchWindow(SearchWindow &searchWindow, IplImage *maskImage, IplImage *depthMap, int maxIteration, int resizeMethod, bool initDepth); + +public: + CvFuzzyMeanShiftTracker(); + ~CvFuzzyMeanShiftTracker(); + + void track(IplImage *maskImage, IplImage *depthMap, int resizeMethod, bool resetSearch, int minKernelMass = MinKernelMass); +}; + + +namespace cv +{ + + class CV_EXPORTS Octree + { + public: + struct Node + { + Node() {} + int begin, end; + float x_min, x_max, y_min, y_max, z_min, z_max; + int maxLevels; + bool isLeaf; + int children[8]; + }; + + Octree(); + Octree( const vector& points, int maxLevels = 10, int minPoints = 20 ); + virtual ~Octree(); + + virtual void buildTree( const vector& points, int maxLevels = 10, int minPoints = 20 ); + virtual void getPointsWithinSphere( const Point3f& center, float radius, + vector& points ) const; + const vector& getNodes() const { return nodes; } + private: + int minPoints; + vector points; + vector nodes; + + virtual void buildNext(size_t node_ind); + }; + + + class CV_EXPORTS Mesh3D + { + public: + struct EmptyMeshException {}; + + Mesh3D(); + Mesh3D(const vector& vtx); + ~Mesh3D(); + + void buildOctree(); + void clearOctree(); + float estimateResolution(float tryRatio = 0.1f); + void computeNormals(float normalRadius, int minNeighbors = 20); + void computeNormals(const vector& subset, float normalRadius, int minNeighbors = 20); + + void writeAsVrml(const String& file, const vector& colors = vector()) const; + + vector vtx; + vector normals; + float resolution; + Octree octree; + + const static Point3f allzero; + }; + + class CV_EXPORTS SpinImageModel + { + public: + + /* model parameters, leave unset for default or auto estimate */ + float normalRadius; + int minNeighbors; + + float binSize; + int imageWidth; + + float lambda; + float gamma; + + float T_GeometriccConsistency; + float T_GroupingCorespondances; + + /* public interface */ + SpinImageModel(); + explicit SpinImageModel(const Mesh3D& mesh); + ~SpinImageModel(); + + void setLogger(std::ostream* log); + void selectRandomSubset(float ratio); + void setSubset(const vector& subset); + void compute(); + + void match(const SpinImageModel& scene, vector< vector >& result); + + Mat packRandomScaledSpins(bool separateScale = false, size_t xCount = 10, size_t yCount = 10) const; + + size_t getSpinCount() const { return spinImages.rows; } + Mat getSpinImage(size_t index) const { return spinImages.row((int)index); } + const Point3f& getSpinVertex(size_t index) const { return mesh.vtx[subset[index]]; } + const Point3f& getSpinNormal(size_t index) const { return mesh.normals[subset[index]]; } + + const Mesh3D& getMesh() const { return mesh; } + Mesh3D& getMesh() { return mesh; } + + /* static utility functions */ + static bool spinCorrelation(const Mat& spin1, const Mat& spin2, float lambda, float& result); + + static Point2f calcSpinMapCoo(const Point3f& point, const Point3f& vertex, const Point3f& normal); + + static float geometricConsistency(const Point3f& pointScene1, const Point3f& normalScene1, + const Point3f& pointModel1, const Point3f& normalModel1, + const Point3f& pointScene2, const Point3f& normalScene2, + const Point3f& pointModel2, const Point3f& normalModel2); + + static float groupingCreteria(const Point3f& pointScene1, const Point3f& normalScene1, + const Point3f& pointModel1, const Point3f& normalModel1, + const Point3f& pointScene2, const Point3f& normalScene2, + const Point3f& pointModel2, const Point3f& normalModel2, + float gamma); + protected: + void defaultParams(); + + void matchSpinToModel(const Mat& spin, vector& indeces, + vector& corrCoeffs, bool useExtremeOutliers = true) const; + + void repackSpinImages(const vector& mask, Mat& spinImages, bool reAlloc = true) const; + + vector subset; + Mesh3D mesh; + Mat spinImages; + std::ostream* out; + }; + + class CV_EXPORTS TickMeter + { + public: + TickMeter(); + void start(); + void stop(); + + int64 getTimeTicks() const; + double getTimeMicro() const; + double getTimeMilli() const; + double getTimeSec() const; + int64 getCounter() const; + + void reset(); + private: + int64 counter; + int64 sumTime; + int64 startTime; + }; + + CV_EXPORTS std::ostream& operator<<(std::ostream& out, const TickMeter& tm); + + class CV_EXPORTS SelfSimDescriptor + { + public: + SelfSimDescriptor(); + SelfSimDescriptor(int _ssize, int _lsize, + int _startDistanceBucket=DEFAULT_START_DISTANCE_BUCKET, + int _numberOfDistanceBuckets=DEFAULT_NUM_DISTANCE_BUCKETS, + int _nangles=DEFAULT_NUM_ANGLES); + SelfSimDescriptor(const SelfSimDescriptor& ss); + virtual ~SelfSimDescriptor(); + SelfSimDescriptor& operator = (const SelfSimDescriptor& ss); + + size_t getDescriptorSize() const; + Size getGridSize( Size imgsize, Size winStride ) const; + + virtual void compute(const Mat& img, vector& descriptors, Size winStride=Size(), + const vector& locations=vector()) const; + virtual void computeLogPolarMapping(Mat& mappingMask) const; + virtual void SSD(const Mat& img, Point pt, Mat& ssd) const; + + int smallSize; + int largeSize; + int startDistanceBucket; + int numberOfDistanceBuckets; + int numberOfAngles; + + enum { DEFAULT_SMALL_SIZE = 5, DEFAULT_LARGE_SIZE = 41, + DEFAULT_NUM_ANGLES = 20, DEFAULT_START_DISTANCE_BUCKET = 3, + DEFAULT_NUM_DISTANCE_BUCKETS = 7 }; + }; + + + typedef bool (*BundleAdjustCallback)(int iteration, double norm_error, void* user_data); + + class CV_EXPORTS LevMarqSparse { + public: + LevMarqSparse(); + LevMarqSparse(int npoints, // number of points + int ncameras, // number of cameras + int nPointParams, // number of params per one point (3 in case of 3D points) + int nCameraParams, // number of parameters per one camera + int nErrParams, // number of parameters in measurement vector + // for 1 point at one camera (2 in case of 2D projections) + Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras + // 1 - point is visible for the camera, 0 - invisible + Mat& P0, // starting vector of parameters, first cameras then points + Mat& X, // measurements, in order of visibility. non visible cases are skipped + TermCriteria criteria, // termination criteria + + // callback for estimation of Jacobian matrices + void (CV_CDECL * fjac)(int i, int j, Mat& point_params, + Mat& cam_params, Mat& A, Mat& B, void* data), + // callback for estimation of backprojection errors + void (CV_CDECL * func)(int i, int j, Mat& point_params, + Mat& cam_params, Mat& estim, void* data), + void* data, // user-specific data passed to the callbacks + BundleAdjustCallback cb, void* user_data + ); + + virtual ~LevMarqSparse(); + + virtual void run( int npoints, // number of points + int ncameras, // number of cameras + int nPointParams, // number of params per one point (3 in case of 3D points) + int nCameraParams, // number of parameters per one camera + int nErrParams, // number of parameters in measurement vector + // for 1 point at one camera (2 in case of 2D projections) + Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras + // 1 - point is visible for the camera, 0 - invisible + Mat& P0, // starting vector of parameters, first cameras then points + Mat& X, // measurements, in order of visibility. non visible cases are skipped + TermCriteria criteria, // termination criteria + + // callback for estimation of Jacobian matrices + void (CV_CDECL * fjac)(int i, int j, Mat& point_params, + Mat& cam_params, Mat& A, Mat& B, void* data), + // callback for estimation of backprojection errors + void (CV_CDECL * func)(int i, int j, Mat& point_params, + Mat& cam_params, Mat& estim, void* data), + void* data // user-specific data passed to the callbacks + ); + + virtual void clear(); + + // useful function to do simple bundle adjustment tasks + static void bundleAdjust(vector& points, // positions of points in global coordinate system (input and output) + const vector >& imagePoints, // projections of 3d points for every camera + const vector >& visibility, // visibility of 3d points for every camera + vector& cameraMatrix, // intrinsic matrices of all cameras (input and output) + vector& R, // rotation matrices of all cameras (input and output) + vector& T, // translation vector of all cameras (input and output) + vector& distCoeffs, // distortion coefficients of all cameras (input and output) + const TermCriteria& criteria= + TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON), + BundleAdjustCallback cb = 0, void* user_data = 0); + + public: + virtual void optimize(CvMat &_vis); //main function that runs minimization + + //iteratively asks for measurement for visible camera-point pairs + void ask_for_proj(CvMat &_vis,bool once=false); + //iteratively asks for Jacobians for every camera_point pair + void ask_for_projac(CvMat &_vis); + + CvMat* err; //error X-hX + double prevErrNorm, errNorm; + double lambda; + CvTermCriteria criteria; + int iters; + + CvMat** U; //size of array is equal to number of cameras + CvMat** V; //size of array is equal to number of points + CvMat** inv_V_star; //inverse of V* + + CvMat** A; + CvMat** B; + CvMat** W; + + CvMat* X; //measurement + CvMat* hX; //current measurement extimation given new parameter vector + + CvMat* prevP; //current already accepted parameter. + CvMat* P; // parameters used to evaluate function with new params + // this parameters may be rejected + + CvMat* deltaP; //computed increase of parameters (result of normal system solution ) + + CvMat** ea; // sum_i AijT * e_ij , used as right part of normal equation + // length of array is j = number of cameras + CvMat** eb; // sum_j BijT * e_ij , used as right part of normal equation + // length of array is i = number of points + + CvMat** Yj; //length of array is i = num_points + + CvMat* S; //big matrix of block Sjk , each block has size num_cam_params x num_cam_params + + CvMat* JtJ_diag; //diagonal of JtJ, used to backup diagonal elements before augmentation + + CvMat* Vis_index; // matrix which element is index of measurement for point i and camera j + + int num_cams; + int num_points; + int num_err_param; + int num_cam_param; + int num_point_param; + + //target function and jacobian pointers, which needs to be initialized + void (*fjac)(int i, int j, Mat& point_params, Mat& cam_params, Mat& A, Mat& B, void* data); + void (*func)(int i, int j, Mat& point_params, Mat& cam_params, Mat& estim, void* data); + + void* data; + + BundleAdjustCallback cb; + void* user_data; + }; + + CV_EXPORTS_W int chamerMatching( Mat& img, Mat& templ, + CV_OUT vector >& results, CV_OUT vector& cost, + double templScale=1, int maxMatches = 20, + double minMatchDistance = 1.0, int padX = 3, + int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6, + double orientationWeight = 0.5, double truncate = 20); + + + class CV_EXPORTS_W StereoVar + { + public: + // Flags + enum {USE_INITIAL_DISPARITY = 1, USE_EQUALIZE_HIST = 2, USE_SMART_ID = 4, USE_AUTO_PARAMS = 8, USE_MEDIAN_FILTERING = 16}; + enum {CYCLE_O, CYCLE_V}; + enum {PENALIZATION_TICHONOV, PENALIZATION_CHARBONNIER, PENALIZATION_PERONA_MALIK}; + + //! the default constructor + CV_WRAP StereoVar(); + + //! the full constructor taking all the necessary algorithm parameters + CV_WRAP StereoVar(int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags); + + //! the destructor + virtual ~StereoVar(); + + //! the stereo correspondence operator that computes disparity map for the specified rectified stereo pair + CV_WRAP_AS(compute) virtual void operator()(const Mat& left, const Mat& right, CV_OUT Mat& disp); + + CV_PROP_RW int levels; + CV_PROP_RW double pyrScale; + CV_PROP_RW int nIt; + CV_PROP_RW int minDisp; + CV_PROP_RW int maxDisp; + CV_PROP_RW int poly_n; + CV_PROP_RW double poly_sigma; + CV_PROP_RW float fi; + CV_PROP_RW float lambda; + CV_PROP_RW int penalization; + CV_PROP_RW int cycle; + CV_PROP_RW int flags; + + private: + void autoParams(); + void FMG(Mat &I1, Mat &I2, Mat &I2x, Mat &u, int level); + void VCycle_MyFAS(Mat &I1_h, Mat &I2_h, Mat &I2x_h, Mat &u_h, int level); + void VariationalSolver(Mat &I1_h, Mat &I2_h, Mat &I2x_h, Mat &u_h, int level); + }; + + CV_EXPORTS void polyfit(const Mat& srcx, const Mat& srcy, Mat& dst, int order); + + class CV_EXPORTS Directory + { + public: + static std::vector GetListFiles ( const std::string& path, const std::string & exten = "*", bool addPath = true ); + static std::vector GetListFilesR ( const std::string& path, const std::string & exten = "*", bool addPath = true ); + static std::vector GetListFolders( const std::string& path, const std::string & exten = "*", bool addPath = true ); + }; + + /* + * Generation of a set of different colors by the following way: + * 1) generate more then need colors (in "factor" times) in RGB, + * 2) convert them to Lab, + * 3) choose the needed count of colors from the set that are more different from + * each other, + * 4) convert the colors back to RGB + */ + CV_EXPORTS void generateColors( std::vector& colors, size_t count, size_t factor=100 ); + + + /* + * Estimate the rigid body motion from frame0 to frame1. The method is based on the paper + * "Real-Time Visual Odometry from Dense RGB-D Images", F. Steinbucker, J. Strum, D. Cremers, ICCV, 2011. + */ + enum { ROTATION = 1, + TRANSLATION = 2, + RIGID_BODY_MOTION = 4 + }; + CV_EXPORTS bool RGBDOdometry( Mat& Rt, const Mat& initRt, + const Mat& image0, const Mat& depth0, const Mat& mask0, + const Mat& image1, const Mat& depth1, const Mat& mask1, + const Mat& cameraMatrix, float minDepth=0.f, float maxDepth=4.f, float maxDepthDiff=0.07f, + const std::vector& iterCounts=std::vector(), + const std::vector& minGradientMagnitudes=std::vector(), + int transformType=RIGID_BODY_MOTION ); + + /** + *Bilinear interpolation technique. + * + *The value of a desired cortical pixel is obtained through a bilinear interpolation of the values + *of the four nearest neighbouring Cartesian pixels to the center of the RF. + *The same principle is applied to the inverse transformation. + * + *More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5 + */ + class CV_EXPORTS LogPolar_Interp + { + public: + + LogPolar_Interp() {} + + /** + *Constructor + *\param w the width of the input image + *\param h the height of the input image + *\param center the transformation center: where the output precision is maximal + *\param R the number of rings of the cortical image (default value 70 pixel) + *\param ro0 the radius of the blind spot (default value 3 pixel) + *\param interp interpolation algorithm + *\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle. + * \a 0 means that the retinal image is computed within the inscribed circle. + *\param S the number of sectors of the cortical image (default value 70 pixel). + * Its value is usually internally computed to obtain a pixel aspect ratio equals to 1. + *\param sp \a 1 (default value) means that the parameter \a S is internally computed. + * \a 0 means that the parameter \a S is provided by the user. + */ + LogPolar_Interp(int w, int h, Point2i center, int R=70, double ro0=3.0, + int interp=INTER_LINEAR, int full=1, int S=117, int sp=1); + /** + *Transformation from Cartesian image to cortical (log-polar) image. + *\param source the Cartesian image + *\return the transformed image (cortical image) + */ + const Mat to_cortical(const Mat &source); + /** + *Transformation from cortical image to retinal (inverse log-polar) image. + *\param source the cortical image + *\return the transformed image (retinal image) + */ + const Mat to_cartesian(const Mat &source); + /** + *Destructor + */ + ~LogPolar_Interp(); + + protected: + + Mat Rsri; + Mat Csri; + + int S, R, M, N; + int top, bottom,left,right; + double ro0, romax, a, q; + int interp; + + Mat ETAyx; + Mat CSIyx; + + void create_map(int M, int N, int R, int S, double ro0); + }; + + /** + *Overlapping circular receptive fields technique + * + *The Cartesian plane is divided in two regions: the fovea and the periphery. + *The fovea (oversampling) is handled by using the bilinear interpolation technique described above, whereas in + *the periphery we use the overlapping Gaussian circular RFs. + * + *More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5 + */ + class CV_EXPORTS LogPolar_Overlapping + { + public: + LogPolar_Overlapping() {} + + /** + *Constructor + *\param w the width of the input image + *\param h the height of the input image + *\param center the transformation center: where the output precision is maximal + *\param R the number of rings of the cortical image (default value 70 pixel) + *\param ro0 the radius of the blind spot (default value 3 pixel) + *\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle. + * \a 0 means that the retinal image is computed within the inscribed circle. + *\param S the number of sectors of the cortical image (default value 70 pixel). + * Its value is usually internally computed to obtain a pixel aspect ratio equals to 1. + *\param sp \a 1 (default value) means that the parameter \a S is internally computed. + * \a 0 means that the parameter \a S is provided by the user. + */ + LogPolar_Overlapping(int w, int h, Point2i center, int R=70, + double ro0=3.0, int full=1, int S=117, int sp=1); + /** + *Transformation from Cartesian image to cortical (log-polar) image. + *\param source the Cartesian image + *\return the transformed image (cortical image) + */ + const Mat to_cortical(const Mat &source); + /** + *Transformation from cortical image to retinal (inverse log-polar) image. + *\param source the cortical image + *\return the transformed image (retinal image) + */ + const Mat to_cartesian(const Mat &source); + /** + *Destructor + */ + ~LogPolar_Overlapping(); + + protected: + + Mat Rsri; + Mat Csri; + vector Rsr; + vector Csr; + vector Wsr; + + int S, R, M, N, ind1; + int top, bottom,left,right; + double ro0, romax, a, q; + + struct kernel + { + kernel() { w = 0; } + vector weights; + int w; + }; + + Mat ETAyx; + Mat CSIyx; + vector w_ker_2D; + + void create_map(int M, int N, int R, int S, double ro0); + }; + + /** + * Adjacent receptive fields technique + * + *All the Cartesian pixels, whose coordinates in the cortical domain share the same integer part, are assigned to the same RF. + *The precision of the boundaries of the RF can be improved by breaking each pixel into subpixels and assigning each of them to the correct RF. + *This technique is implemented from: Traver, V., Pla, F.: Log-polar mapping template design: From task-level requirements + *to geometry parameters. Image Vision Comput. 26(10) (2008) 1354-1370 + * + *More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5 + */ + class CV_EXPORTS LogPolar_Adjacent + { + public: + LogPolar_Adjacent() {} + + /** + *Constructor + *\param w the width of the input image + *\param h the height of the input image + *\param center the transformation center: where the output precision is maximal + *\param R the number of rings of the cortical image (default value 70 pixel) + *\param ro0 the radius of the blind spot (default value 3 pixel) + *\param smin the size of the subpixel (default value 0.25 pixel) + *\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle. + * \a 0 means that the retinal image is computed within the inscribed circle. + *\param S the number of sectors of the cortical image (default value 70 pixel). + * Its value is usually internally computed to obtain a pixel aspect ratio equals to 1. + *\param sp \a 1 (default value) means that the parameter \a S is internally computed. + * \a 0 means that the parameter \a S is provided by the user. + */ + LogPolar_Adjacent(int w, int h, Point2i center, int R=70, double ro0=3.0, double smin=0.25, int full=1, int S=117, int sp=1); + /** + *Transformation from Cartesian image to cortical (log-polar) image. + *\param source the Cartesian image + *\return the transformed image (cortical image) + */ + const Mat to_cortical(const Mat &source); + /** + *Transformation from cortical image to retinal (inverse log-polar) image. + *\param source the cortical image + *\return the transformed image (retinal image) + */ + const Mat to_cartesian(const Mat &source); + /** + *Destructor + */ + ~LogPolar_Adjacent(); + + protected: + struct pixel + { + pixel() { u = v = 0; a = 0.; } + int u; + int v; + double a; + }; + int S, R, M, N; + int top, bottom,left,right; + double ro0, romax, a, q; + vector > L; + vector A; + + void subdivide_recursively(double x, double y, int i, int j, double length, double smin); + bool get_uv(double x, double y, int&u, int&v); + void create_map(int M, int N, int R, int S, double ro0, double smin); + }; + + CV_EXPORTS Mat subspaceProject(InputArray W, InputArray mean, InputArray src); + CV_EXPORTS Mat subspaceReconstruct(InputArray W, InputArray mean, InputArray src); + + class CV_EXPORTS LDA + { + public: + // Initializes a LDA with num_components (default 0). + LDA(int num_components = 0) : + _num_components(num_components) {}; + + // Initializes and performs a Discriminant Analysis with Fisher's + // Optimization Criterion on given data in src and corresponding labels + // in labels. If 0 (or less) number of components are given, they are + // automatically determined for given data in computation. + LDA(const Mat& src, vector labels, + int num_components = 0) : + _num_components(num_components) + { + this->compute(src, labels); //! compute eigenvectors and eigenvalues + } + + // Initializes and performs a Discriminant Analysis with Fisher's + // Optimization Criterion on given data in src and corresponding labels + // in labels. If 0 (or less) number of components are given, they are + // automatically determined for given data in computation. + LDA(InputArrayOfArrays src, InputArray labels, + int num_components = 0) : + _num_components(num_components) + { + this->compute(src, labels); //! compute eigenvectors and eigenvalues + } + + // Serializes this object to a given filename. + void save(const string& filename) const; + + // Deserializes this object from a given filename. + void load(const string& filename); + + // Serializes this object to a given cv::FileStorage. + void save(FileStorage& fs) const; + + // Deserializes this object from a given cv::FileStorage. + void load(const FileStorage& node); + + // Destructor. + ~LDA() {} + + /** Compute the discriminants for data in src (row aligned) and labels. + */ + void compute(InputArrayOfArrays src, InputArray labels); + + /** Projects samples into the LDA subspace. + src may be one or more row aligned samples. + */ + Mat project(InputArray src); + + /** Reconstructs projections from the LDA subspace. + src may be one or more row aligned projections. + */ + Mat reconstruct(InputArray src); + + // Returns the eigenvectors of this LDA. + Mat eigenvectors() const { return _eigenvectors; }; + + // Returns the eigenvalues of this LDA. + Mat eigenvalues() const { return _eigenvalues; } + + protected: + bool _dataAsRow; // unused, but needed for ABI compatibility. + int _num_components; + Mat _eigenvectors; + Mat _eigenvalues; + + void lda(InputArrayOfArrays src, InputArray labels); + }; + + class CV_EXPORTS_W FaceRecognizer : public Algorithm + { + public: + //! virtual destructor + virtual ~FaceRecognizer() {} + + // Trains a FaceRecognizer. + CV_WRAP virtual void train(InputArrayOfArrays src, InputArray labels) = 0; + + // Updates a FaceRecognizer. + CV_WRAP void update(InputArrayOfArrays src, InputArray labels); + + // Gets a prediction from a FaceRecognizer. + virtual int predict(InputArray src) const = 0; + + // Predicts the label and confidence for a given sample. + CV_WRAP virtual void predict(InputArray src, CV_OUT int &label, CV_OUT double &confidence) const = 0; + + // Serializes this object to a given filename. + CV_WRAP virtual void save(const string& filename) const; + + // Deserializes this object from a given filename. + CV_WRAP virtual void load(const string& filename); + + // Serializes this object to a given cv::FileStorage. + virtual void save(FileStorage& fs) const = 0; + + // Deserializes this object from a given cv::FileStorage. + virtual void load(const FileStorage& fs) = 0; + + // Sets additional information as pairs label - info. + void setLabelsInfo(const std::map& labelsInfo); + + // Gets string information by label + string getLabelInfo(const int &label); + + // Gets labels by string + vector getLabelsByString(const string& str); + }; + + CV_EXPORTS_W Ptr createEigenFaceRecognizer(int num_components = 0, double threshold = DBL_MAX); + CV_EXPORTS_W Ptr createFisherFaceRecognizer(int num_components = 0, double threshold = DBL_MAX); + CV_EXPORTS_W Ptr createLBPHFaceRecognizer(int radius=1, int neighbors=8, + int grid_x=8, int grid_y=8, double threshold = DBL_MAX); + + enum + { + COLORMAP_AUTUMN = 0, + COLORMAP_BONE = 1, + COLORMAP_JET = 2, + COLORMAP_WINTER = 3, + COLORMAP_RAINBOW = 4, + COLORMAP_OCEAN = 5, + COLORMAP_SUMMER = 6, + COLORMAP_SPRING = 7, + COLORMAP_COOL = 8, + COLORMAP_HSV = 9, + COLORMAP_PINK = 10, + COLORMAP_HOT = 11 + }; + + CV_EXPORTS_W void applyColorMap(InputArray src, OutputArray dst, int colormap); + + CV_EXPORTS bool initModule_contrib(); +} + +#include "opencv2/contrib/retina.hpp" + +#include "opencv2/contrib/openfabmap.hpp" + +#endif + +#endif diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/detection_based_tracker.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/detection_based_tracker.hpp new file mode 100644 index 0000000..56aa1cc --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/detection_based_tracker.hpp @@ -0,0 +1,106 @@ +#pragma once + +#if defined(__linux__) || defined(LINUX) || defined(__APPLE__) || defined(ANDROID) + +#include +#include + +#include + +class DetectionBasedTracker +{ + public: + struct Parameters + { + int minObjectSize; + int maxObjectSize; + double scaleFactor; + int maxTrackLifetime; + int minNeighbors; + int minDetectionPeriod; //the minimal time between run of the big object detector (on the whole frame) in ms (1000 mean 1 sec), default=0 + + Parameters(); + }; + + DetectionBasedTracker(const std::string& cascadeFilename, const Parameters& params); + virtual ~DetectionBasedTracker(); + + virtual bool run(); + virtual void stop(); + virtual void resetTracking(); + + virtual void process(const cv::Mat& imageGray); + + bool setParameters(const Parameters& params); + const Parameters& getParameters(); + + + typedef std::pair Object; + virtual void getObjects(std::vector& result) const; + virtual void getObjects(std::vector& result) const; + + protected: + class SeparateDetectionWork; + cv::Ptr separateDetectionWork; + friend void* workcycleObjectDetectorFunction(void* p); + + + struct InnerParameters + { + int numLastPositionsToTrack; + int numStepsToWaitBeforeFirstShow; + int numStepsToTrackWithoutDetectingIfObjectHasNotBeenShown; + int numStepsToShowWithoutDetecting; + + float coeffTrackingWindowSize; + float coeffObjectSizeToTrack; + float coeffObjectSpeedUsingInPrediction; + + InnerParameters(); + }; + Parameters parameters; + InnerParameters innerParameters; + + struct TrackedObject + { + typedef std::vector PositionsVector; + + PositionsVector lastPositions; + + int numDetectedFrames; + int numFramesNotDetected; + int id; + + TrackedObject(const cv::Rect& rect):numDetectedFrames(1), numFramesNotDetected(0) + { + lastPositions.push_back(rect); + id=getNextId(); + }; + + static int getNextId() + { + static int _id=0; + return _id++; + } + }; + + int numTrackedSteps; + std::vector trackedObjects; + + std::vector weightsPositionsSmoothing; + std::vector weightsSizesSmoothing; + + cv::CascadeClassifier cascadeForTracking; + + + void updateTrackedObjects(const std::vector& detectedObjects); + cv::Rect calcTrackedObjectPositionToShow(int i) const; + void detectInRegion(const cv::Mat& img, const cv::Rect& r, std::vector& detectedObjectsInRegions); +}; + +namespace cv +{ + using ::DetectionBasedTracker; +} //end of cv namespace + +#endif diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/hybridtracker.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/hybridtracker.hpp new file mode 100644 index 0000000..3a1f722 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/hybridtracker.hpp @@ -0,0 +1,220 @@ +//*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_HYBRIDTRACKER_H_ +#define __OPENCV_HYBRIDTRACKER_H_ + +#include "opencv2/core/core.hpp" +#include "opencv2/core/operations.hpp" +#include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/features2d/features2d.hpp" +#include "opencv2/video/tracking.hpp" +#include "opencv2/ml/ml.hpp" + +#ifdef __cplusplus + +namespace cv +{ + +// Motion model for tracking algorithm. Currently supports objects that do not move much. +// To add Kalman filter +struct CV_EXPORTS CvMotionModel +{ + enum {LOW_PASS_FILTER = 0, KALMAN_FILTER = 1, EM = 2}; + + CvMotionModel() + { + } + + float low_pass_gain; // low pass gain +}; + +// Mean Shift Tracker parameters for specifying use of HSV channel and CamShift parameters. +struct CV_EXPORTS CvMeanShiftTrackerParams +{ + enum { H = 0, HS = 1, HSV = 2 }; + CvMeanShiftTrackerParams(int tracking_type = CvMeanShiftTrackerParams::HS, + CvTermCriteria term_crit = CvTermCriteria()); + + int tracking_type; + vector h_range; + vector s_range; + vector v_range; + CvTermCriteria term_crit; +}; + +// Feature tracking parameters +struct CV_EXPORTS CvFeatureTrackerParams +{ + enum { SIFT = 0, SURF = 1, OPTICAL_FLOW = 2 }; + CvFeatureTrackerParams(int featureType = 0, int windowSize = 0) + { + feature_type = featureType; + window_size = windowSize; + } + + int feature_type; // Feature type to use + int window_size; // Window size in pixels around which to search for new window +}; + +// Hybrid Tracking parameters for specifying weights of individual trackers and motion model. +struct CV_EXPORTS CvHybridTrackerParams +{ + CvHybridTrackerParams(float ft_tracker_weight = 0.5, float ms_tracker_weight = 0.5, + CvFeatureTrackerParams ft_params = CvFeatureTrackerParams(), + CvMeanShiftTrackerParams ms_params = CvMeanShiftTrackerParams(), + CvMotionModel model = CvMotionModel()); + + float ft_tracker_weight; + float ms_tracker_weight; + CvFeatureTrackerParams ft_params; + CvMeanShiftTrackerParams ms_params; + int motion_model; + float low_pass_gain; +}; + +// Performs Camshift using parameters from MeanShiftTrackerParams +class CV_EXPORTS CvMeanShiftTracker +{ +private: + Mat hsv, hue; + Mat backproj; + Mat mask, maskroi; + MatND hist; + Rect prev_trackwindow; + RotatedRect prev_trackbox; + Point2f prev_center; + +public: + CvMeanShiftTrackerParams params; + + CvMeanShiftTracker(); + explicit CvMeanShiftTracker(CvMeanShiftTrackerParams _params); + ~CvMeanShiftTracker(); + void newTrackingWindow(Mat image, Rect selection); + RotatedRect updateTrackingWindow(Mat image); + Mat getHistogramProjection(int type); + void setTrackingWindow(Rect _window); + Rect getTrackingWindow(); + RotatedRect getTrackingEllipse(); + Point2f getTrackingCenter(); +}; + +// Performs SIFT/SURF feature tracking using parameters from FeatureTrackerParams +class CV_EXPORTS CvFeatureTracker +{ +private: + Ptr dd; + Ptr matcher; + vector matches; + + Mat prev_image; + Mat prev_image_bw; + Rect prev_trackwindow; + Point2d prev_center; + + int ittr; + vector features[2]; + +public: + Mat disp_matches; + CvFeatureTrackerParams params; + + CvFeatureTracker(); + explicit CvFeatureTracker(CvFeatureTrackerParams params); + ~CvFeatureTracker(); + void newTrackingWindow(Mat image, Rect selection); + Rect updateTrackingWindow(Mat image); + Rect updateTrackingWindowWithSIFT(Mat image); + Rect updateTrackingWindowWithFlow(Mat image); + void setTrackingWindow(Rect _window); + Rect getTrackingWindow(); + Point2f getTrackingCenter(); +}; + +// Performs Hybrid Tracking and combines individual trackers using EM or filters +class CV_EXPORTS CvHybridTracker +{ +private: + CvMeanShiftTracker* mstracker; + CvFeatureTracker* fttracker; + + CvMat* samples; + CvMat* labels; + + Rect prev_window; + Point2f prev_center; + Mat prev_proj; + RotatedRect trackbox; + + int ittr; + Point2f curr_center; + + inline float getL2Norm(Point2f p1, Point2f p2); + Mat getDistanceProjection(Mat image, Point2f center); + Mat getGaussianProjection(Mat image, int ksize, double sigma, Point2f center); + void updateTrackerWithEM(Mat image); + void updateTrackerWithLowPassFilter(Mat image); + +public: + CvHybridTrackerParams params; + CvHybridTracker(); + explicit CvHybridTracker(CvHybridTrackerParams params); + ~CvHybridTracker(); + + void newTracker(Mat image, Rect selection); + void updateTracker(Mat image); + Rect getTrackingWindow(); +}; + +typedef CvMotionModel MotionModel; +typedef CvMeanShiftTrackerParams MeanShiftTrackerParams; +typedef CvFeatureTrackerParams FeatureTrackerParams; +typedef CvHybridTrackerParams HybridTrackerParams; +typedef CvMeanShiftTracker MeanShiftTracker; +typedef CvFeatureTracker FeatureTracker; +typedef CvHybridTracker HybridTracker; +} + +#endif + +#endif diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/openfabmap.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/openfabmap.hpp new file mode 100644 index 0000000..6b2834e --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/openfabmap.hpp @@ -0,0 +1,405 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// This file originates from the openFABMAP project: +// [http://code.google.com/p/openfabmap/] +// +// For published work which uses all or part of OpenFABMAP, please cite: +// [http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=6224843] +// +// Original Algorithm by Mark Cummins and Paul Newman: +// [http://ijr.sagepub.com/content/27/6/647.short] +// [http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=5613942] +// [http://ijr.sagepub.com/content/30/9/1100.abstract] +// +// License Agreement +// +// Copyright (C) 2012 Arren Glover [aj.glover@qut.edu.au] and +// Will Maddern [w.maddern@qut.edu.au], all rights reserved. +// +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_OPENFABMAP_H_ +#define __OPENCV_OPENFABMAP_H_ + +#include "opencv2/core/core.hpp" +#include "opencv2/features2d/features2d.hpp" + +#include +#include +#include +#include +#include + +namespace cv { + +namespace of2 { + +using std::list; +using std::map; +using std::multiset; + +/* + Return data format of a FABMAP compare call +*/ +struct CV_EXPORTS IMatch { + + IMatch() : + queryIdx(-1), imgIdx(-1), likelihood(-DBL_MAX), match(-DBL_MAX) { + } + IMatch(int _queryIdx, int _imgIdx, double _likelihood, double _match) : + queryIdx(_queryIdx), imgIdx(_imgIdx), likelihood(_likelihood), match( + _match) { + } + + int queryIdx; //query index + int imgIdx; //test index + + double likelihood; //raw loglikelihood + double match; //normalised probability + + bool operator<(const IMatch& m) const { + return match < m.match; + } + +}; + +/* + Base FabMap class. Each FabMap method inherits from this class. +*/ +class CV_EXPORTS FabMap { +public: + + //FabMap options + enum { + MEAN_FIELD = 1, + SAMPLED = 2, + NAIVE_BAYES = 4, + CHOW_LIU = 8, + MOTION_MODEL = 16 + }; + + FabMap(const Mat& clTree, double PzGe, double PzGNe, int flags, + int numSamples = 0); + virtual ~FabMap(); + + //methods to add training data for sampling method + virtual void addTraining(const Mat& queryImgDescriptor); + virtual void addTraining(const vector& queryImgDescriptors); + + //methods to add to the test data + virtual void add(const Mat& queryImgDescriptor); + virtual void add(const vector& queryImgDescriptors); + + //accessors + const vector& getTrainingImgDescriptors() const; + const vector& getTestImgDescriptors() const; + + //Main FabMap image comparison + void compare(const Mat& queryImgDescriptor, + vector& matches, bool addQuery = false, + const Mat& mask = Mat()); + void compare(const Mat& queryImgDescriptor, + const Mat& testImgDescriptors, vector& matches, + const Mat& mask = Mat()); + void compare(const Mat& queryImgDescriptor, + const vector& testImgDescriptors, + vector& matches, const Mat& mask = Mat()); + void compare(const vector& queryImgDescriptors, vector< + IMatch>& matches, bool addQuery = false, const Mat& mask = + Mat()); + void compare(const vector& queryImgDescriptors, + const vector& testImgDescriptors, + vector& matches, const Mat& mask = Mat()); + +protected: + + void compareImgDescriptor(const Mat& queryImgDescriptor, + int queryIndex, const vector& testImgDescriptors, + vector& matches); + + void addImgDescriptor(const Mat& queryImgDescriptor); + + //the getLikelihoods method is overwritten for each different FabMap + //method. + virtual void getLikelihoods(const Mat& queryImgDescriptor, + const vector& testImgDescriptors, + vector& matches); + virtual double getNewPlaceLikelihood(const Mat& queryImgDescriptor); + + //turn likelihoods into probabilities (also add in motion model if used) + void normaliseDistribution(vector& matches); + + //Chow-Liu Tree + int pq(int q); + double Pzq(int q, bool zq); + double PzqGzpq(int q, bool zq, bool zpq); + + //FAB-MAP Core + double PzqGeq(bool zq, bool eq); + double PeqGL(int q, bool Lzq, bool eq); + double PzqGL(int q, bool zq, bool zpq, bool Lzq); + double PzqGzpqL(int q, bool zq, bool zpq, bool Lzq); + double (FabMap::*PzGL)(int q, bool zq, bool zpq, bool Lzq); + + //data + Mat clTree; + vector trainingImgDescriptors; + vector testImgDescriptors; + vector priorMatches; + + //parameters + double PzGe; + double PzGNe; + double Pnew; + + double mBias; + double sFactor; + + int flags; + int numSamples; + +}; + +/* + The original FAB-MAP algorithm, developed based on: + http://ijr.sagepub.com/content/27/6/647.short +*/ +class CV_EXPORTS FabMap1: public FabMap { +public: + FabMap1(const Mat& clTree, double PzGe, double PzGNe, int flags, + int numSamples = 0); + virtual ~FabMap1(); +protected: + + //FabMap1 implementation of likelihood comparison + void getLikelihoods(const Mat& queryImgDescriptor, const vector< + Mat>& testImgDescriptors, vector& matches); +}; + +/* + A computationally faster version of the original FAB-MAP algorithm. A look- + up-table is used to precompute many of the reoccuring calculations +*/ +class CV_EXPORTS FabMapLUT: public FabMap { +public: + FabMapLUT(const Mat& clTree, double PzGe, double PzGNe, + int flags, int numSamples = 0, int precision = 6); + virtual ~FabMapLUT(); +protected: + + //FabMap look-up-table implementation of the likelihood comparison + void getLikelihoods(const Mat& queryImgDescriptor, const vector< + Mat>& testImgDescriptors, vector& matches); + + //precomputed data + int (*table)[8]; + + //data precision + int precision; +}; + +/* + The Accelerated FAB-MAP algorithm, developed based on: + http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=5613942 +*/ +class CV_EXPORTS FabMapFBO: public FabMap { +public: + FabMapFBO(const Mat& clTree, double PzGe, double PzGNe, int flags, + int numSamples = 0, double rejectionThreshold = 1e-8, double PsGd = + 1e-8, int bisectionStart = 512, int bisectionIts = 9); + virtual ~FabMapFBO(); + +protected: + + //FabMap Fast Bail-out implementation of the likelihood comparison + void getLikelihoods(const Mat& queryImgDescriptor, const vector< + Mat>& testImgDescriptors, vector& matches); + + //stucture used to determine word comparison order + struct WordStats { + WordStats() : + q(0), info(0), V(0), M(0) { + } + + WordStats(int _q, double _info) : + q(_q), info(_info), V(0), M(0) { + } + + int q; + double info; + mutable double V; + mutable double M; + + bool operator<(const WordStats& w) const { + return info < w.info; + } + + }; + + //private fast bail-out necessary functions + void setWordStatistics(const Mat& queryImgDescriptor, multiset& wordData); + double limitbisection(double v, double m); + double bennettInequality(double v, double m, double delta); + static bool compInfo(const WordStats& first, const WordStats& second); + + //parameters + double PsGd; + double rejectionThreshold; + int bisectionStart; + int bisectionIts; +}; + +/* + The FAB-MAP2.0 algorithm, developed based on: + http://ijr.sagepub.com/content/30/9/1100.abstract +*/ +class CV_EXPORTS FabMap2: public FabMap { +public: + + FabMap2(const Mat& clTree, double PzGe, double PzGNe, int flags); + virtual ~FabMap2(); + + //FabMap2 builds the inverted index and requires an additional training/test + //add function + void addTraining(const Mat& queryImgDescriptors) { + FabMap::addTraining(queryImgDescriptors); + } + void addTraining(const vector& queryImgDescriptors); + + void add(const Mat& queryImgDescriptors) { + FabMap::add(queryImgDescriptors); + } + void add(const vector& queryImgDescriptors); + +protected: + + //FabMap2 implementation of the likelihood comparison + void getLikelihoods(const Mat& queryImgDescriptor, const vector< + Mat>& testImgDescriptors, vector& matches); + double getNewPlaceLikelihood(const Mat& queryImgDescriptor); + + //the likelihood function using the inverted index + void getIndexLikelihoods(const Mat& queryImgDescriptor, vector< + double>& defaults, map >& invertedMap, + vector& matches); + void addToIndex(const Mat& queryImgDescriptor, + vector& defaults, + map >& invertedMap); + + //data + vector d1, d2, d3, d4; + vector > children; + + // TODO: inverted map a vector? + + vector trainingDefaults; + map > trainingInvertedMap; + + vector testDefaults; + map > testInvertedMap; + +}; +/* + A Chow-Liu tree is required by FAB-MAP. The Chow-Liu tree provides an + estimate of the full distribution of visual words using a minimum spanning + tree. The tree is generated through training data. +*/ +class CV_EXPORTS ChowLiuTree { +public: + ChowLiuTree(); + virtual ~ChowLiuTree(); + + //add data to the chow-liu tree before calling make + void add(const Mat& imgDescriptor); + void add(const vector& imgDescriptors); + + const vector& getImgDescriptors() const; + + Mat make(double infoThreshold = 0.0); + +private: + vector imgDescriptors; + Mat mergedImgDescriptors; + + typedef struct info { + float score; + short word1; + short word2; + } info; + + //probabilities extracted from mergedImgDescriptors + double P(int a, bool za); + double JP(int a, bool za, int b, bool zb); //a & b + double CP(int a, bool za, int b, bool zb); // a | b + + //calculating mutual information of all edges + void createBaseEdges(list& edges, double infoThreshold); + double calcMutInfo(int word1, int word2); + static bool sortInfoScores(const info& first, const info& second); + + //selecting minimum spanning egdges with maximum information + bool reduceEdgesToMinSpan(list& edges); + + //building the tree sctructure + Mat buildTree(int root_word, list &edges); + void recAddToTree(Mat &cltree, int q, int pq, + list &remaining_edges); + vector extractChildren(list &remaining_edges, int q); + +}; + +/* + A custom vocabulary training method based on: + http://www.springerlink.com/content/d1h6j8x552532003/ +*/ +class CV_EXPORTS BOWMSCTrainer: public BOWTrainer { +public: + BOWMSCTrainer(double clusterSize = 0.4); + virtual ~BOWMSCTrainer(); + + // Returns trained vocabulary (i.e. cluster centers). + virtual Mat cluster() const; + virtual Mat cluster(const Mat& descriptors) const; + +protected: + + double clusterSize; + +}; + +} + +} + +#endif /* OPENFABMAP_H_ */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/retina.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/retina.hpp new file mode 100644 index 0000000..f261bb4 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/retina.hpp @@ -0,0 +1,354 @@ +/*#****************************************************************************** + ** IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. + ** + ** By downloading, copying, installing or using the software you agree to this license. + ** If you do not agree to this license, do not download, install, + ** copy or use the software. + ** + ** + ** HVStools : interfaces allowing OpenCV users to integrate Human Vision System models. Presented models originate from Jeanny Herault's original research and have been reused and adapted by the author&collaborators for computed vision applications since his thesis with Alice Caplier at Gipsa-Lab. + ** Use: extract still images & image sequences features, from contours details to motion spatio-temporal features, etc. for high level visual scene analysis. Also contribute to image enhancement/compression such as tone mapping. + ** + ** Maintainers : Listic lab (code author current affiliation & applications) and Gipsa Lab (original research origins & applications) + ** + ** Creation - enhancement process 2007-2011 + ** Author: Alexandre Benoit (benoit.alexandre.vision@gmail.com), LISTIC lab, Annecy le vieux, France + ** + ** Theses algorithm have been developped by Alexandre BENOIT since his thesis with Alice Caplier at Gipsa-Lab (www.gipsa-lab.inpg.fr) and the research he pursues at LISTIC Lab (www.listic.univ-savoie.fr). + ** Refer to the following research paper for more information: + ** Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011 + ** This work have been carried out thanks to Jeanny Herault who's research and great discussions are the basis of all this work, please take a look at his book: + ** Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891. + ** + ** The retina filter includes the research contributions of phd/research collegues from which code has been redrawn by the author : + ** _take a look at the retinacolor.hpp module to discover Brice Chaix de Lavarene color mosaicing/demosaicing and the reference paper: + ** ====> B. Chaix de Lavarene, D. Alleysson, B. Durette, J. Herault (2007). "Efficient demosaicing through recursive filtering", IEEE International Conference on Image Processing ICIP 2007 + ** _take a look at imagelogpolprojection.hpp to discover retina spatial log sampling which originates from Barthelemy Durette phd with Jeanny Herault. A Retina / V1 cortex projection is also proposed and originates from Jeanny's discussions. + ** ====> more informations in the above cited Jeanny Heraults's book. + ** + ** License Agreement + ** For Open Source Computer Vision Library + ** + ** Copyright (C) 2000-2008, Intel Corporation, all rights reserved. + ** Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved. + ** + ** For Human Visual System tools (hvstools) + ** Copyright (C) 2007-2011, LISTIC Lab, Annecy le Vieux and GIPSA Lab, Grenoble, France, all rights reserved. + ** + ** Third party copyrights are property of their respective owners. + ** + ** Redistribution and use in source and binary forms, with or without modification, + ** are permitted provided that the following conditions are met: + ** + ** * Redistributions of source code must retain the above copyright notice, + ** this list of conditions and the following disclaimer. + ** + ** * Redistributions in binary form must reproduce the above copyright notice, + ** this list of conditions and the following disclaimer in the documentation + ** and/or other materials provided with the distribution. + ** + ** * The name of the copyright holders may not be used to endorse or promote products + ** derived from this software without specific prior written permission. + ** + ** This software is provided by the copyright holders and contributors "as is" and + ** any express or implied warranties, including, but not limited to, the implied + ** warranties of merchantability and fitness for a particular purpose are disclaimed. + ** In no event shall the Intel Corporation or contributors be liable for any direct, + ** indirect, incidental, special, exemplary, or consequential damages + ** (including, but not limited to, procurement of substitute goods or services; + ** loss of use, data, or profits; or business interruption) however caused + ** and on any theory of liability, whether in contract, strict liability, + ** or tort (including negligence or otherwise) arising in any way out of + ** the use of this software, even if advised of the possibility of such damage. + *******************************************************************************/ + +#ifndef __OPENCV_CONTRIB_RETINA_HPP__ +#define __OPENCV_CONTRIB_RETINA_HPP__ + +/* + * Retina.hpp + * + * Created on: Jul 19, 2011 + * Author: Alexandre Benoit + */ + +#include "opencv2/core/core.hpp" // for all OpenCV core functionalities access, including cv::Exception support +#include + +namespace cv +{ + +enum RETINA_COLORSAMPLINGMETHOD +{ + RETINA_COLOR_RANDOM, //!< each pixel position is either R, G or B in a random choice + RETINA_COLOR_DIAGONAL,//!< color sampling is RGBRGBRGB..., line 2 BRGBRGBRG..., line 3, GBRGBRGBR... + RETINA_COLOR_BAYER//!< standard bayer sampling +}; + +class RetinaFilter; + +/** + * a wrapper class which allows the Gipsa/Listic Labs model to be used. + * This retina model allows spatio-temporal image processing (applied on still images, video sequences). + * As a summary, these are the retina model properties: + * => It applies a spectral whithening (mid-frequency details enhancement) + * => high frequency spatio-temporal noise reduction + * => low frequency luminance to be reduced (luminance range compression) + * => local logarithmic luminance compression allows details to be enhanced in low light conditions + * + * USE : this model can be used basically for spatio-temporal video effects but also for : + * _using the getParvo method output matrix : texture analysiswith enhanced signal to noise ratio and enhanced details robust against input images luminance ranges + * _using the getMagno method output matrix : motion analysis also with the previously cited properties + * + * for more information, reer to the following papers : + * Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011 + * Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891. + * + * The retina filter includes the research contributions of phd/research collegues from which code has been redrawn by the author : + * _take a look at the retinacolor.hpp module to discover Brice Chaix de Lavarene color mosaicing/demosaicing and the reference paper: + * ====> B. Chaix de Lavarene, D. Alleysson, B. Durette, J. Herault (2007). "Efficient demosaicing through recursive filtering", IEEE International Conference on Image Processing ICIP 2007 + * _take a look at imagelogpolprojection.hpp to discover retina spatial log sampling which originates from Barthelemy Durette phd with Jeanny Herault. A Retina / V1 cortex projection is also proposed and originates from Jeanny's discussions. + * ====> more informations in the above cited Jeanny Heraults's book. + */ +class CV_EXPORTS Retina { + +public: + + // parameters structure for better clarity, check explenations on the comments of methods : setupOPLandIPLParvoChannel and setupIPLMagnoChannel + struct RetinaParameters{ + struct OPLandIplParvoParameters{ // Outer Plexiform Layer (OPL) and Inner Plexiform Layer Parvocellular (IplParvo) parameters + OPLandIplParvoParameters():colorMode(true), + normaliseOutput(true), + photoreceptorsLocalAdaptationSensitivity(0.7f), + photoreceptorsTemporalConstant(0.5f), + photoreceptorsSpatialConstant(0.53f), + horizontalCellsGain(0.0f), + hcellsTemporalConstant(1.f), + hcellsSpatialConstant(7.f), + ganglionCellsSensitivity(0.7f){};// default setup + bool colorMode, normaliseOutput; + float photoreceptorsLocalAdaptationSensitivity, photoreceptorsTemporalConstant, photoreceptorsSpatialConstant, horizontalCellsGain, hcellsTemporalConstant, hcellsSpatialConstant, ganglionCellsSensitivity; + }; + struct IplMagnoParameters{ // Inner Plexiform Layer Magnocellular channel (IplMagno) + IplMagnoParameters(): + normaliseOutput(true), + parasolCells_beta(0.f), + parasolCells_tau(0.f), + parasolCells_k(7.f), + amacrinCellsTemporalCutFrequency(1.2f), + V0CompressionParameter(0.95f), + localAdaptintegration_tau(0.f), + localAdaptintegration_k(7.f){};// default setup + bool normaliseOutput; + float parasolCells_beta, parasolCells_tau, parasolCells_k, amacrinCellsTemporalCutFrequency, V0CompressionParameter, localAdaptintegration_tau, localAdaptintegration_k; + }; + struct OPLandIplParvoParameters OPLandIplParvo; + struct IplMagnoParameters IplMagno; + }; + + /** + * Main constructor with most commun use setup : create an instance of color ready retina model + * @param inputSize : the input frame size + */ + Retina(Size inputSize); + + /** + * Complete Retina filter constructor which allows all basic structural parameters definition + * @param inputSize : the input frame size + * @param colorMode : the chosen processing mode : with or without color processing + * @param colorSamplingMethod: specifies which kind of color sampling will be used + * @param useRetinaLogSampling: activate retina log sampling, if true, the 2 following parameters can be used + * @param reductionFactor: only usefull if param useRetinaLogSampling=true, specifies the reduction factor of the output frame (as the center (fovea) is high resolution and corners can be underscaled, then a reduction of the output is allowed without precision leak + * @param samplingStrenght: only usefull if param useRetinaLogSampling=true, specifies the strenght of the log scale that is applied + */ + Retina(Size inputSize, const bool colorMode, RETINA_COLORSAMPLINGMETHOD colorSamplingMethod=RETINA_COLOR_BAYER, const bool useRetinaLogSampling=false, const double reductionFactor=1.0, const double samplingStrenght=10.0); + + virtual ~Retina(); + + /** + * retreive retina input buffer size + */ + Size inputSize(); + + /** + * retreive retina output buffer size + */ + Size outputSize(); + + /** + * try to open an XML retina parameters file to adjust current retina instance setup + * => if the xml file does not exist, then default setup is applied + * => warning, Exceptions are thrown if read XML file is not valid + * @param retinaParameterFile : the parameters filename + * @param applyDefaultSetupOnFailure : set to true if an error must be thrown on error + */ + void setup(std::string retinaParameterFile="", const bool applyDefaultSetupOnFailure=true); + + + /** + * try to open an XML retina parameters file to adjust current retina instance setup + * => if the xml file does not exist, then default setup is applied + * => warning, Exceptions are thrown if read XML file is not valid + * @param fs : the open Filestorage which contains retina parameters + * @param applyDefaultSetupOnFailure : set to true if an error must be thrown on error + */ + void setup(cv::FileStorage &fs, const bool applyDefaultSetupOnFailure=true); + + /** + * try to open an XML retina parameters file to adjust current retina instance setup + * => if the xml file does not exist, then default setup is applied + * => warning, Exceptions are thrown if read XML file is not valid + * @param newParameters : a parameters structures updated with the new target configuration + */ + void setup(RetinaParameters newParameters); + + /** + * @return the current parameters setup + */ + Retina::RetinaParameters getParameters(); + + /** + * parameters setup display method + * @return a string which contains formatted parameters information + */ + const std::string printSetup(); + + /** + * write xml/yml formated parameters information + * @param fs : the filename of the xml file that will be open and writen with formatted parameters information + */ + virtual void write( std::string fs ) const; + + + /** + * write xml/yml formated parameters information + * @param fs : a cv::Filestorage object ready to be filled + */ + virtual void write( FileStorage& fs ) const; + + /** + * setup the OPL and IPL parvo channels (see biologocal model) + * OPL is referred as Outer Plexiform Layer of the retina, it allows the spatio-temporal filtering which withens the spectrum and reduces spatio-temporal noise while attenuating global luminance (low frequency energy) + * IPL parvo is the OPL next processing stage, it refers to Inner Plexiform layer of the retina, it allows high contours sensitivity in foveal vision. + * for more informations, please have a look at the paper Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011 + * @param colorMode : specifies if (true) color is processed of not (false) to then processing gray level image + * @param normaliseOutput : specifies if (true) output is rescaled between 0 and 255 of not (false) + * @param photoreceptorsLocalAdaptationSensitivity: the photoreceptors sensitivity renage is 0-1 (more log compression effect when value increases) + * @param photoreceptorsTemporalConstant: the time constant of the first order low pass filter of the photoreceptors, use it to cut high temporal frequencies (noise or fast motion), unit is frames, typical value is 1 frame + * @param photoreceptorsSpatialConstant: the spatial constant of the first order low pass filter of the photoreceptors, use it to cut high spatial frequencies (noise or thick contours), unit is pixels, typical value is 1 pixel + * @param horizontalCellsGain: gain of the horizontal cells network, if 0, then the mean value of the output is zero, if the parameter is near 1, then, the luminance is not filtered and is still reachable at the output, typicall value is 0 + * @param HcellsTemporalConstant: the time constant of the first order low pass filter of the horizontal cells, use it to cut low temporal frequencies (local luminance variations), unit is frames, typical value is 1 frame, as the photoreceptors + * @param HcellsSpatialConstant: the spatial constant of the first order low pass filter of the horizontal cells, use it to cut low spatial frequencies (local luminance), unit is pixels, typical value is 5 pixel, this value is also used for local contrast computing when computing the local contrast adaptation at the ganglion cells level (Inner Plexiform Layer parvocellular channel model) + * @param ganglionCellsSensitivity: the compression strengh of the ganglion cells local adaptation output, set a value between 160 and 250 for best results, a high value increases more the low value sensitivity... and the output saturates faster, recommended value: 230 + */ + void setupOPLandIPLParvoChannel(const bool colorMode=true, const bool normaliseOutput = true, const float photoreceptorsLocalAdaptationSensitivity=0.7f, const float photoreceptorsTemporalConstant=0.5f, const float photoreceptorsSpatialConstant=0.53f, const float horizontalCellsGain=0, const float HcellsTemporalConstant=1, const float HcellsSpatialConstant=7, const float ganglionCellsSensitivity=0.7f); + + /** + * set parameters values for the Inner Plexiform Layer (IPL) magnocellular channel + * this channel processes signals outpint from OPL processing stage in peripheral vision, it allows motion information enhancement. It is decorrelated from the details channel. See reference paper for more details. + * @param normaliseOutput : specifies if (true) output is rescaled between 0 and 255 of not (false) + * @param parasolCells_beta: the low pass filter gain used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), typical value is 0 + * @param parasolCells_tau: the low pass filter time constant used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), unit is frame, typical value is 0 (immediate response) + * @param parasolCells_k: the low pass filter spatial constant used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), unit is pixels, typical value is 5 + * @param amacrinCellsTemporalCutFrequency: the time constant of the first order high pass fiter of the magnocellular way (motion information channel), unit is frames, tipicall value is 5 + * @param V0CompressionParameter: the compression strengh of the ganglion cells local adaptation output, set a value between 160 and 250 for best results, a high value increases more the low value sensitivity... and the output saturates faster, recommended value: 200 + * @param localAdaptintegration_tau: specifies the temporal constant of the low pas filter involved in the computation of the local "motion mean" for the local adaptation computation + * @param localAdaptintegration_k: specifies the spatial constant of the low pas filter involved in the computation of the local "motion mean" for the local adaptation computation + */ + void setupIPLMagnoChannel(const bool normaliseOutput = true, const float parasolCells_beta=0, const float parasolCells_tau=0, const float parasolCells_k=7, const float amacrinCellsTemporalCutFrequency=1.2f, const float V0CompressionParameter=0.95f, const float localAdaptintegration_tau=0, const float localAdaptintegration_k=7); + + /** + * method which allows retina to be applied on an input image, after run, encapsulated retina module is ready to deliver its outputs using dedicated acccessors, see getParvo and getMagno methods + * @param inputImage : the input cv::Mat image to be processed, can be gray level or BGR coded in any format (from 8bit to 16bits) + */ + void run(const Mat &inputImage); + + /** + * accessor of the details channel of the retina (models foveal vision) + * @param retinaOutput_parvo : the output buffer (reallocated if necessary), this output is rescaled for standard 8bits image processing use in OpenCV + */ + void getParvo(Mat &retinaOutput_parvo); + + /** + * accessor of the details channel of the retina (models foveal vision) + * @param retinaOutput_parvo : the output buffer (reallocated if necessary), this output is the original retina filter model output, without any quantification or rescaling + */ + void getParvo(std::valarray &retinaOutput_parvo); + + /** + * accessor of the motion channel of the retina (models peripheral vision) + * @param retinaOutput_magno : the output buffer (reallocated if necessary), this output is rescaled for standard 8bits image processing use in OpenCV + */ + void getMagno(Mat &retinaOutput_magno); + + /** + * accessor of the motion channel of the retina (models peripheral vision) + * @param retinaOutput_magno : the output buffer (reallocated if necessary), this output is the original retina filter model output, without any quantification or rescaling + */ + void getMagno(std::valarray &retinaOutput_magno); + + // original API level data accessors : get buffers addresses... + const std::valarray & getMagno() const; + const std::valarray & getParvo() const; + + /** + * activate color saturation as the final step of the color demultiplexing process + * -> this saturation is a sigmoide function applied to each channel of the demultiplexed image. + * @param saturateColors: boolean that activates color saturation (if true) or desactivate (if false) + * @param colorSaturationValue: the saturation factor + */ + void setColorSaturation(const bool saturateColors=true, const float colorSaturationValue=4.0); + + /** + * clear all retina buffers (equivalent to opening the eyes after a long period of eye close ;o) + */ + void clearBuffers(); + + /** + * Activate/desactivate the Magnocellular pathway processing (motion information extraction), by default, it is activated + * @param activate: true if Magnocellular output should be activated, false if not + */ + void activateMovingContoursProcessing(const bool activate); + + /** + * Activate/desactivate the Parvocellular pathway processing (contours information extraction), by default, it is activated + * @param activate: true if Parvocellular (contours information extraction) output should be activated, false if not + */ + void activateContoursProcessing(const bool activate); + +protected: + // Parameteres setup members + RetinaParameters _retinaParameters; // structure of parameters + + // Retina model related modules + std::valarray _inputBuffer; //!< buffer used to convert input cv::Mat to internal retina buffers format (valarrays) + + // pointer to retina model + RetinaFilter* _retinaFilter; //!< the pointer to the retina module, allocated with instance construction + + /** + * exports a valarray buffer outing from HVStools objects to a cv::Mat in CV_8UC1 (gray level picture) or CV_8UC3 (color) format + * @param grayMatrixToConvert the valarray to export to OpenCV + * @param nbRows : the number of rows of the valarray flatten matrix + * @param nbColumns : the number of rows of the valarray flatten matrix + * @param colorMode : a flag which mentions if matrix is color (true) or graylevel (false) + * @param outBuffer : the output matrix which is reallocated to satisfy Retina output buffer dimensions + */ + void _convertValarrayBuffer2cvMat(const std::valarray &grayMatrixToConvert, const unsigned int nbRows, const unsigned int nbColumns, const bool colorMode, Mat &outBuffer); + + /** + * + * @param inputMatToConvert : the OpenCV cv::Mat that has to be converted to gray or RGB valarray buffer that will be processed by the retina model + * @param outputValarrayMatrix : the output valarray + * @return the input image color mode (color=true, gray levels=false) + */ + bool _convertCvMat2ValarrayBuffer(const cv::Mat inputMatToConvert, std::valarray &outputValarrayMatrix); + + //! private method called by constructors, gathers their parameters and use them in a unified way + void _init(const Size inputSize, const bool colorMode, RETINA_COLORSAMPLINGMETHOD colorSamplingMethod=RETINA_COLOR_BAYER, const bool useRetinaLogSampling=false, const double reductionFactor=1.0, const double samplingStrenght=10.0); + + +}; + +} +#endif /* __OPENCV_CONTRIB_RETINA_HPP__ */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core.hpp new file mode 100644 index 0000000..12773f8 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core.hpp @@ -0,0 +1,43 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "opencv2/core/core.hpp" diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/affine.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/affine.hpp new file mode 100644 index 0000000..1b560c8 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/affine.hpp @@ -0,0 +1,513 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_AFFINE3_HPP__ +#define __OPENCV_CORE_AFFINE3_HPP__ + +#ifdef __cplusplus + +#include + +/*! @file */ + +namespace cv +{ + template + class Affine3 + { + public: + typedef T float_type; + typedef Matx Mat3; + typedef Matx Mat4; + typedef Vec Vec3; + + Affine3(); + + //Augmented affine matrix + Affine3(const Mat4& affine); + + //Rotation matrix + Affine3(const Mat3& R, const Vec3& t = Vec3::all(0)); + + //Rodrigues vector + Affine3(const Vec3& rvec, const Vec3& t = Vec3::all(0)); + + //Combines all contructors above. Supports 4x4, 4x3, 3x3, 1x3, 3x1 sizes of data matrix + explicit Affine3(const Mat& data, const Vec3& t = Vec3::all(0)); + + //From 16th element array + explicit Affine3(const float_type* vals); + + static Affine3 Identity(); + + //Rotation matrix + void rotation(const Mat3& R); + + //Rodrigues vector + void rotation(const Vec3& rvec); + + //Combines rotation methods above. Suports 3x3, 1x3, 3x1 sizes of data matrix; + void rotation(const Mat& data); + + void linear(const Mat3& L); + void translation(const Vec3& t); + + Mat3 rotation() const; + Mat3 linear() const; + Vec3 translation() const; + + //Rodrigues vector + Vec3 rvec() const; + + Affine3 inv(int method = cv::DECOMP_SVD) const; + + // a.rotate(R) is equivalent to Affine(R, 0) * a; + Affine3 rotate(const Mat3& R) const; + + // a.rotate(R) is equivalent to Affine(rvec, 0) * a; + Affine3 rotate(const Vec3& rvec) const; + + // a.translate(t) is equivalent to Affine(E, t) * a; + Affine3 translate(const Vec3& t) const; + + // a.concatenate(affine) is equivalent to affine * a; + Affine3 concatenate(const Affine3& affine) const; + + template operator Affine3() const; + + template Affine3 cast() const; + + Mat4 matrix; + +#if defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H + Affine3(const Eigen::Transform& affine); + Affine3(const Eigen::Transform& affine); + operator Eigen::Transform() const; + operator Eigen::Transform() const; +#endif + }; + + template static + Affine3 operator*(const Affine3& affine1, const Affine3& affine2); + + template static + V operator*(const Affine3& affine, const V& vector); + + typedef Affine3 Affine3f; + typedef Affine3 Affine3d; + + static Vec3f operator*(const Affine3f& affine, const Vec3f& vector); + static Vec3d operator*(const Affine3d& affine, const Vec3d& vector); + + template class DataType< Affine3<_Tp> > + { + public: + typedef Affine3<_Tp> value_type; + typedef Affine3::work_type> work_type; + typedef _Tp channel_type; + + enum { generic_type = 0, + depth = DataType::depth, + channels = 16, + fmt = DataType::fmt + ((channels - 1) << 8), + type = CV_MAKETYPE(depth, channels) + }; + + typedef Vec vec_type; + }; +} + + +/////////////////////////////////////////////////////////////////////////////////// +/// Implementaiton + +template inline +cv::Affine3::Affine3() + : matrix(Mat4::eye()) +{} + +template inline +cv::Affine3::Affine3(const Mat4& affine) + : matrix(affine) +{} + +template inline +cv::Affine3::Affine3(const Mat3& R, const Vec3& t) +{ + rotation(R); + translation(t); + matrix.val[12] = matrix.val[13] = matrix.val[14] = 0; + matrix.val[15] = 1; +} + +template inline +cv::Affine3::Affine3(const Vec3& _rvec, const Vec3& t) +{ + rotation(_rvec); + translation(t); + matrix.val[12] = matrix.val[13] = matrix.val[14] = 0; + matrix.val[15] = 1; +} + +template inline +cv::Affine3::Affine3(const cv::Mat& data, const Vec3& t) +{ + CV_Assert(data.type() == cv::DataType::type); + + if (data.cols == 4 && data.rows == 4) + { + data.copyTo(matrix); + return; + } + else if (data.cols == 4 && data.rows == 3) + { + rotation(data(Rect(0, 0, 3, 3))); + translation(data(Rect(3, 0, 1, 3))); + return; + } + + rotation(data); + translation(t); + matrix.val[12] = matrix.val[13] = matrix.val[14] = 0; + matrix.val[15] = 1; +} + +template inline +cv::Affine3::Affine3(const float_type* vals) : matrix(vals) +{} + +template inline +cv::Affine3 cv::Affine3::Identity() +{ + return Affine3(cv::Affine3::Mat4::eye()); +} + +template inline +void cv::Affine3::rotation(const Mat3& R) +{ + linear(R); +} + +template inline +void cv::Affine3::rotation(const Vec3& _rvec) +{ + double rx = _rvec[0], ry = _rvec[1], rz = _rvec[2]; + double theta = std::sqrt(rx*rx + ry*ry + rz*rz); + + if (theta < DBL_EPSILON) + rotation(Mat3::eye()); + else + { + const double I[] = { 1, 0, 0, 0, 1, 0, 0, 0, 1 }; + + double c = std::cos(theta); + double s = std::sin(theta); + double c1 = 1. - c; + double itheta = (theta != 0) ? 1./theta : 0.; + + rx *= itheta; ry *= itheta; rz *= itheta; + + double rrt[] = { rx*rx, rx*ry, rx*rz, rx*ry, ry*ry, ry*rz, rx*rz, ry*rz, rz*rz }; + double _r_x_[] = { 0, -rz, ry, rz, 0, -rx, -ry, rx, 0 }; + Mat3 R; + + // R = cos(theta)*I + (1 - cos(theta))*r*rT + sin(theta)*[r_x] + // where [r_x] is [0 -rz ry; rz 0 -rx; -ry rx 0] + for(int k = 0; k < 9; ++k) + R.val[k] = static_cast(c*I[k] + c1*rrt[k] + s*_r_x_[k]); + + rotation(R); + } +} + +//Combines rotation methods above. Suports 3x3, 1x3, 3x1 sizes of data matrix; +template inline +void cv::Affine3::rotation(const cv::Mat& data) +{ + CV_Assert(data.type() == cv::DataType::type); + + if (data.cols == 3 && data.rows == 3) + { + Mat3 R; + data.copyTo(R); + rotation(R); + } + else if ((data.cols == 3 && data.rows == 1) || (data.cols == 1 && data.rows == 3)) + { + Vec3 _rvec; + data.reshape(1, 3).copyTo(_rvec); + rotation(_rvec); + } + else + CV_Assert(!"Input marix can be 3x3, 1x3 or 3x1"); +} + +template inline +void cv::Affine3::linear(const Mat3& L) +{ + matrix.val[0] = L.val[0]; matrix.val[1] = L.val[1]; matrix.val[ 2] = L.val[2]; + matrix.val[4] = L.val[3]; matrix.val[5] = L.val[4]; matrix.val[ 6] = L.val[5]; + matrix.val[8] = L.val[6]; matrix.val[9] = L.val[7]; matrix.val[10] = L.val[8]; +} + +template inline +void cv::Affine3::translation(const Vec3& t) +{ + matrix.val[3] = t[0]; matrix.val[7] = t[1]; matrix.val[11] = t[2]; +} + +template inline +typename cv::Affine3::Mat3 cv::Affine3::rotation() const +{ + return linear(); +} + +template inline +typename cv::Affine3::Mat3 cv::Affine3::linear() const +{ + typename cv::Affine3::Mat3 R; + R.val[0] = matrix.val[0]; R.val[1] = matrix.val[1]; R.val[2] = matrix.val[ 2]; + R.val[3] = matrix.val[4]; R.val[4] = matrix.val[5]; R.val[5] = matrix.val[ 6]; + R.val[6] = matrix.val[8]; R.val[7] = matrix.val[9]; R.val[8] = matrix.val[10]; + return R; +} + +template inline +typename cv::Affine3::Vec3 cv::Affine3::translation() const +{ + return Vec3(matrix.val[3], matrix.val[7], matrix.val[11]); +} + +template inline +typename cv::Affine3::Vec3 cv::Affine3::rvec() const +{ + cv::Vec3d w; + cv::Matx33d u, vt, R = rotation(); + cv::SVD::compute(R, w, u, vt, cv::SVD::FULL_UV + cv::SVD::MODIFY_A); + R = u * vt; + + double rx = R.val[7] - R.val[5]; + double ry = R.val[2] - R.val[6]; + double rz = R.val[3] - R.val[1]; + + double s = std::sqrt((rx*rx + ry*ry + rz*rz)*0.25); + double c = (R.val[0] + R.val[4] + R.val[8] - 1) * 0.5; + c = c > 1.0 ? 1.0 : c < -1.0 ? -1.0 : c; + double theta = acos(c); + + if( s < 1e-5 ) + { + if( c > 0 ) + rx = ry = rz = 0; + else + { + double t; + t = (R.val[0] + 1) * 0.5; + rx = std::sqrt(std::max(t, 0.0)); + t = (R.val[4] + 1) * 0.5; + ry = std::sqrt(std::max(t, 0.0)) * (R.val[1] < 0 ? -1.0 : 1.0); + t = (R.val[8] + 1) * 0.5; + rz = std::sqrt(std::max(t, 0.0)) * (R.val[2] < 0 ? -1.0 : 1.0); + + if( fabs(rx) < fabs(ry) && fabs(rx) < fabs(rz) && (R.val[5] > 0) != (ry*rz > 0) ) + rz = -rz; + theta /= std::sqrt(rx*rx + ry*ry + rz*rz); + rx *= theta; + ry *= theta; + rz *= theta; + } + } + else + { + double vth = 1/(2*s); + vth *= theta; + rx *= vth; ry *= vth; rz *= vth; + } + + return cv::Vec3d(rx, ry, rz); +} + +template inline +cv::Affine3 cv::Affine3::inv(int method) const +{ + return matrix.inv(method); +} + +template inline +cv::Affine3 cv::Affine3::rotate(const Mat3& R) const +{ + Mat3 Lc = linear(); + Vec3 tc = translation(); + Mat4 result; + result.val[12] = result.val[13] = result.val[14] = 0; + result.val[15] = 1; + + for(int j = 0; j < 3; ++j) + { + for(int i = 0; i < 3; ++i) + { + float_type value = 0; + for(int k = 0; k < 3; ++k) + value += R(j, k) * Lc(k, i); + result(j, i) = value; + } + + result(j, 3) = R.row(j).dot(tc.t()); + } + return result; +} + +template inline +cv::Affine3 cv::Affine3::rotate(const Vec3& _rvec) const +{ + return rotate(Affine3f(_rvec).rotation()); +} + +template inline +cv::Affine3 cv::Affine3::translate(const Vec3& t) const +{ + Mat4 m = matrix; + m.val[ 3] += t[0]; + m.val[ 7] += t[1]; + m.val[11] += t[2]; + return m; +} + +template inline +cv::Affine3 cv::Affine3::concatenate(const Affine3& affine) const +{ + return (*this).rotate(affine.rotation()).translate(affine.translation()); +} + +template template inline +cv::Affine3::operator Affine3() const +{ + return Affine3(matrix); +} + +template template inline +cv::Affine3 cv::Affine3::cast() const +{ + return Affine3(matrix); +} + +/** @cond IGNORED */ +template inline +cv::Affine3 cv::operator*(const cv::Affine3& affine1, const cv::Affine3& affine2) +{ + return affine2.concatenate(affine1); +} + +template inline +V cv::operator*(const cv::Affine3& affine, const V& v) +{ + const typename Affine3::Mat4& m = affine.matrix; + + V r; + r.x = m.val[0] * v.x + m.val[1] * v.y + m.val[ 2] * v.z + m.val[ 3]; + r.y = m.val[4] * v.x + m.val[5] * v.y + m.val[ 6] * v.z + m.val[ 7]; + r.z = m.val[8] * v.x + m.val[9] * v.y + m.val[10] * v.z + m.val[11]; + return r; +} +/** @endcond */ + +static inline +cv::Vec3f cv::operator*(const cv::Affine3f& affine, const cv::Vec3f& v) +{ + const cv::Matx44f& m = affine.matrix; + cv::Vec3f r; + r.val[0] = m.val[0] * v[0] + m.val[1] * v[1] + m.val[ 2] * v[2] + m.val[ 3]; + r.val[1] = m.val[4] * v[0] + m.val[5] * v[1] + m.val[ 6] * v[2] + m.val[ 7]; + r.val[2] = m.val[8] * v[0] + m.val[9] * v[1] + m.val[10] * v[2] + m.val[11]; + return r; +} + +static inline +cv::Vec3d cv::operator*(const cv::Affine3d& affine, const cv::Vec3d& v) +{ + const cv::Matx44d& m = affine.matrix; + cv::Vec3d r; + r.val[0] = m.val[0] * v[0] + m.val[1] * v[1] + m.val[ 2] * v[2] + m.val[ 3]; + r.val[1] = m.val[4] * v[0] + m.val[5] * v[1] + m.val[ 6] * v[2] + m.val[ 7]; + r.val[2] = m.val[8] * v[0] + m.val[9] * v[1] + m.val[10] * v[2] + m.val[11]; + return r; +} + + + +#if defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H + +template inline +cv::Affine3::Affine3(const Eigen::Transform& affine) +{ + cv::Mat(4, 4, cv::DataType::type, affine.matrix().data()).copyTo(matrix); +} + +template inline +cv::Affine3::Affine3(const Eigen::Transform& affine) +{ + Eigen::Transform a = affine; + cv::Mat(4, 4, cv::DataType::type, a.matrix().data()).copyTo(matrix); +} + +template inline +cv::Affine3::operator Eigen::Transform() const +{ + Eigen::Transform r; + cv::Mat hdr(4, 4, cv::DataType::type, r.matrix().data()); + cv::Mat(matrix, false).copyTo(hdr); + return r; +} + +template inline +cv::Affine3::operator Eigen::Transform() const +{ + return this->operator Eigen::Transform(); +} + +#endif /* defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H */ + + +#endif /* __cplusplus */ + +#endif /* __OPENCV_CORE_AFFINE3_HPP__ */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/core.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/core.hpp new file mode 100644 index 0000000..591d50a --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/core.hpp @@ -0,0 +1,4924 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_HPP__ +#define __OPENCV_CORE_HPP__ + +#include "opencv2/core/types_c.h" +#include "opencv2/core/version.hpp" + +#ifdef __cplusplus + +#ifndef SKIP_INCLUDES +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif // SKIP_INCLUDES + +/*! \namespace cv + Namespace where all the C++ OpenCV functionality resides +*/ +namespace cv { + +#undef abs +#undef min +#undef max +#undef Complex + +using std::vector; +using std::string; +using std::ptrdiff_t; + +template class Size_; +template class Point_; +template class Rect_; +template class Vec; +template class Matx; + +typedef std::string String; + +class Mat; +class SparseMat; +typedef Mat MatND; + +namespace ogl { + class Buffer; + class Texture2D; + class Arrays; +} + +// < Deprecated +class GlBuffer; +class GlTexture; +class GlArrays; +class GlCamera; +// > + +namespace gpu { + class GpuMat; +} + +class CV_EXPORTS MatExpr; +class CV_EXPORTS MatOp_Base; +class CV_EXPORTS MatArg; +class CV_EXPORTS MatConstIterator; + +template class Mat_; +template class MatIterator_; +template class MatConstIterator_; +template class MatCommaInitializer_; + +#if !defined(ANDROID) || (defined(_GLIBCXX_USE_WCHAR_T) && _GLIBCXX_USE_WCHAR_T) +typedef std::basic_string WString; + +CV_EXPORTS string fromUtf16(const WString& str); +CV_EXPORTS WString toUtf16(const string& str); +#endif + +CV_EXPORTS string format( const char* fmt, ... ); +CV_EXPORTS string tempfile( const char* suffix CV_DEFAULT(0)); + +// matrix decomposition types +enum { DECOMP_LU=0, DECOMP_SVD=1, DECOMP_EIG=2, DECOMP_CHOLESKY=3, DECOMP_QR=4, DECOMP_NORMAL=16 }; +enum { NORM_INF=1, NORM_L1=2, NORM_L2=4, NORM_L2SQR=5, NORM_HAMMING=6, NORM_HAMMING2=7, NORM_TYPE_MASK=7, NORM_RELATIVE=8, NORM_MINMAX=32 }; +enum { CMP_EQ=0, CMP_GT=1, CMP_GE=2, CMP_LT=3, CMP_LE=4, CMP_NE=5 }; +enum { GEMM_1_T=1, GEMM_2_T=2, GEMM_3_T=4 }; +enum { DFT_INVERSE=1, DFT_SCALE=2, DFT_ROWS=4, DFT_COMPLEX_OUTPUT=16, DFT_REAL_OUTPUT=32, + DCT_INVERSE = DFT_INVERSE, DCT_ROWS=DFT_ROWS }; + + +/*! + The standard OpenCV exception class. + Instances of the class are thrown by various functions and methods in the case of critical errors. + */ +class CV_EXPORTS Exception : public std::exception +{ +public: + /*! + Default constructor + */ + Exception(); + /*! + Full constructor. Normally the constuctor is not called explicitly. + Instead, the macros CV_Error(), CV_Error_() and CV_Assert() are used. + */ + Exception(int _code, const string& _err, const string& _func, const string& _file, int _line); + virtual ~Exception() throw(); + + /*! + \return the error description and the context as a text string. + */ + virtual const char *what() const throw(); + void formatMessage(); + + string msg; ///< the formatted error message + + int code; ///< error code @see CVStatus + string err; ///< error description + string func; ///< function name. Available only when the compiler supports getting it + string file; ///< source file name where the error has occured + int line; ///< line number in the source file where the error has occured +}; + + +//! Signals an error and raises the exception. + +/*! + By default the function prints information about the error to stderr, + then it either stops if setBreakOnError() had been called before or raises the exception. + It is possible to alternate error processing by using redirectError(). + + \param exc the exception raisen. + */ +CV_EXPORTS void error( const Exception& exc ); + +//! Sets/resets the break-on-error mode. + +/*! + When the break-on-error mode is set, the default error handler + issues a hardware exception, which can make debugging more convenient. + + \return the previous state + */ +CV_EXPORTS bool setBreakOnError(bool flag); + +typedef int (CV_CDECL *ErrorCallback)( int status, const char* func_name, + const char* err_msg, const char* file_name, + int line, void* userdata ); + +//! Sets the new error handler and the optional user data. + +/*! + The function sets the new error handler, called from cv::error(). + + \param errCallback the new error handler. If NULL, the default error handler is used. + \param userdata the optional user data pointer, passed to the callback. + \param prevUserdata the optional output parameter where the previous user data pointer is stored + + \return the previous error handler +*/ +CV_EXPORTS ErrorCallback redirectError( ErrorCallback errCallback, + void* userdata=0, void** prevUserdata=0); + + +#if defined __GNUC__ +#define CV_Func __func__ +#elif defined _MSC_VER +#define CV_Func __FUNCTION__ +#else +#define CV_Func "" +#endif + +#define CV_Error( code, msg ) cv::error( cv::Exception(code, msg, CV_Func, __FILE__, __LINE__) ) +#define CV_Error_( code, args ) cv::error( cv::Exception(code, cv::format args, CV_Func, __FILE__, __LINE__) ) +#define CV_Assert( expr ) if(!!(expr)) ; else cv::error( cv::Exception(CV_StsAssert, #expr, CV_Func, __FILE__, __LINE__) ) + +#ifdef _DEBUG +#define CV_DbgAssert(expr) CV_Assert(expr) +#else +#define CV_DbgAssert(expr) +#endif + +CV_EXPORTS void glob(String pattern, std::vector& result, bool recursive = false); + +CV_EXPORTS_W void setNumThreads(int nthreads); +CV_EXPORTS_W int getNumThreads(); +CV_EXPORTS_W int getThreadNum(); + +CV_EXPORTS_W const string& getBuildInformation(); + +//! Returns the number of ticks. + +/*! + The function returns the number of ticks since the certain event (e.g. when the machine was turned on). + It can be used to initialize cv::RNG or to measure a function execution time by reading the tick count + before and after the function call. The granularity of ticks depends on the hardware and OS used. Use + cv::getTickFrequency() to convert ticks to seconds. +*/ +CV_EXPORTS_W int64 getTickCount(); + +/*! + Returns the number of ticks per seconds. + + The function returns the number of ticks (as returned by cv::getTickCount()) per second. + The following code computes the execution time in milliseconds: + + \code + double exec_time = (double)getTickCount(); + // do something ... + exec_time = ((double)getTickCount() - exec_time)*1000./getTickFrequency(); + \endcode +*/ +CV_EXPORTS_W double getTickFrequency(); + +/*! + Returns the number of CPU ticks. + + On platforms where the feature is available, the function returns the number of CPU ticks + since the certain event (normally, the system power-on moment). Using this function + one can accurately measure the execution time of very small code fragments, + for which cv::getTickCount() granularity is not enough. +*/ +CV_EXPORTS_W int64 getCPUTickCount(); + +/*! + Returns SSE etc. support status + + The function returns true if certain hardware features are available. + Currently, the following features are recognized: + - CV_CPU_MMX - MMX + - CV_CPU_SSE - SSE + - CV_CPU_SSE2 - SSE 2 + - CV_CPU_SSE3 - SSE 3 + - CV_CPU_SSSE3 - SSSE 3 + - CV_CPU_SSE4_1 - SSE 4.1 + - CV_CPU_SSE4_2 - SSE 4.2 + - CV_CPU_POPCNT - POPCOUNT + - CV_CPU_AVX - AVX + - CV_CPU_AVX2 - AVX2 + + \note {Note that the function output is not static. Once you called cv::useOptimized(false), + most of the hardware acceleration is disabled and thus the function will returns false, + until you call cv::useOptimized(true)} +*/ +CV_EXPORTS_W bool checkHardwareSupport(int feature); + +//! returns the number of CPUs (including hyper-threading) +CV_EXPORTS_W int getNumberOfCPUs(); + +/*! + Allocates memory buffer + + This is specialized OpenCV memory allocation function that returns properly aligned memory buffers. + The usage is identical to malloc(). The allocated buffers must be freed with cv::fastFree(). + If there is not enough memory, the function calls cv::error(), which raises an exception. + + \param bufSize buffer size in bytes + \return the allocated memory buffer. +*/ +CV_EXPORTS void* fastMalloc(size_t bufSize); + +/*! + Frees the memory allocated with cv::fastMalloc + + This is the corresponding deallocation function for cv::fastMalloc(). + When ptr==NULL, the function has no effect. +*/ +CV_EXPORTS void fastFree(void* ptr); + +template static inline _Tp* allocate(size_t n) +{ + return new _Tp[n]; +} + +template static inline void deallocate(_Tp* ptr, size_t) +{ + delete[] ptr; +} + +/*! + Aligns pointer by the certain number of bytes + + This small inline function aligns the pointer by the certian number of bytes by shifting + it forward by 0 or a positive offset. +*/ +template static inline _Tp* alignPtr(_Tp* ptr, int n=(int)sizeof(_Tp)) +{ + return (_Tp*)(((size_t)ptr + n-1) & -n); +} + +/*! + Aligns buffer size by the certain number of bytes + + This small inline function aligns a buffer size by the certian number of bytes by enlarging it. +*/ +static inline size_t alignSize(size_t sz, int n) +{ + assert((n & (n - 1)) == 0); // n is a power of 2 + return (sz + n-1) & -n; +} + +/*! + Turns on/off available optimization + + The function turns on or off the optimized code in OpenCV. Some optimization can not be enabled + or disabled, but, for example, most of SSE code in OpenCV can be temporarily turned on or off this way. + + \note{Since optimization may imply using special data structures, it may be unsafe + to call this function anywhere in the code. Instead, call it somewhere at the top level.} +*/ +CV_EXPORTS_W void setUseOptimized(bool onoff); + +/*! + Returns the current optimization status + + The function returns the current optimization status, which is controlled by cv::setUseOptimized(). +*/ +CV_EXPORTS_W bool useOptimized(); + +/*! + The STL-compilant memory Allocator based on cv::fastMalloc() and cv::fastFree() +*/ +template class Allocator +{ +public: + typedef _Tp value_type; + typedef value_type* pointer; + typedef const value_type* const_pointer; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef size_t size_type; + typedef ptrdiff_t difference_type; + template class rebind { typedef Allocator other; }; + + explicit Allocator() {} + ~Allocator() {} + explicit Allocator(Allocator const&) {} + template + explicit Allocator(Allocator const&) {} + + // address + pointer address(reference r) { return &r; } + const_pointer address(const_reference r) { return &r; } + + pointer allocate(size_type count, const void* =0) + { return reinterpret_cast(fastMalloc(count * sizeof (_Tp))); } + + void deallocate(pointer p, size_type) {fastFree(p); } + + size_type max_size() const + { return max(static_cast<_Tp>(-1)/sizeof(_Tp), 1); } + + void construct(pointer p, const _Tp& v) { new(static_cast(p)) _Tp(v); } + void destroy(pointer p) { p->~_Tp(); } +}; + +/////////////////////// Vec (used as element of multi-channel images ///////////////////// + +/*! + A helper class for cv::DataType + + The class is specialized for each fundamental numerical data type supported by OpenCV. + It provides DataDepth::value constant. +*/ +template class DataDepth {}; + +template<> class DataDepth { public: enum { value = CV_8U, fmt=(int)'u' }; }; +template<> class DataDepth { public: enum { value = CV_8U, fmt=(int)'u' }; }; +template<> class DataDepth { public: enum { value = CV_8S, fmt=(int)'c' }; }; +template<> class DataDepth { public: enum { value = CV_8S, fmt=(int)'c' }; }; +template<> class DataDepth { public: enum { value = CV_16U, fmt=(int)'w' }; }; +template<> class DataDepth { public: enum { value = CV_16S, fmt=(int)'s' }; }; +template<> class DataDepth { public: enum { value = CV_32S, fmt=(int)'i' }; }; +// this is temporary solution to support 32-bit unsigned integers +template<> class DataDepth { public: enum { value = CV_32S, fmt=(int)'i' }; }; +template<> class DataDepth { public: enum { value = CV_32F, fmt=(int)'f' }; }; +template<> class DataDepth { public: enum { value = CV_64F, fmt=(int)'d' }; }; +template class DataDepth<_Tp*> { public: enum { value = CV_USRTYPE1, fmt=(int)'r' }; }; + + +////////////////////////////// Small Matrix /////////////////////////// + +/*! + A short numerical vector. + + This template class represents short numerical vectors (of 1, 2, 3, 4 ... elements) + on which you can perform basic arithmetical operations, access individual elements using [] operator etc. + The vectors are allocated on stack, as opposite to std::valarray, std::vector, cv::Mat etc., + which elements are dynamically allocated in the heap. + + The template takes 2 parameters: + -# _Tp element type + -# cn the number of elements + + In addition to the universal notation like Vec, you can use shorter aliases + for the most popular specialized variants of Vec, e.g. Vec3f ~ Vec. + */ + +struct CV_EXPORTS Matx_AddOp {}; +struct CV_EXPORTS Matx_SubOp {}; +struct CV_EXPORTS Matx_ScaleOp {}; +struct CV_EXPORTS Matx_MulOp {}; +struct CV_EXPORTS Matx_MatMulOp {}; +struct CV_EXPORTS Matx_TOp {}; + +template class Matx +{ +public: + typedef _Tp value_type; + typedef Matx<_Tp, (m < n ? m : n), 1> diag_type; + typedef Matx<_Tp, m, n> mat_type; + enum { depth = DataDepth<_Tp>::value, rows = m, cols = n, channels = rows*cols, + type = CV_MAKETYPE(depth, channels) }; + + //! default constructor + Matx(); + + Matx(_Tp v0); //!< 1x1 matrix + Matx(_Tp v0, _Tp v1); //!< 1x2 or 2x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2); //!< 1x3 or 3x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3); //!< 1x4, 2x2 or 4x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4); //!< 1x5 or 5x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5); //!< 1x6, 2x3, 3x2 or 6x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6); //!< 1x7 or 7x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7); //!< 1x8, 2x4, 4x2 or 8x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8); //!< 1x9, 3x3 or 9x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9); //!< 1x10, 2x5 or 5x2 or 10x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9, _Tp v10, _Tp v11); //!< 1x12, 2x6, 3x4, 4x3, 6x2 or 12x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9, _Tp v10, _Tp v11, + _Tp v12, _Tp v13, _Tp v14, _Tp v15); //!< 1x16, 4x4 or 16x1 matrix + explicit Matx(const _Tp* vals); //!< initialize from a plain array + + static Matx all(_Tp alpha); + static Matx zeros(); + static Matx ones(); + static Matx eye(); + static Matx diag(const diag_type& d); + static Matx randu(_Tp a, _Tp b); + static Matx randn(_Tp a, _Tp b); + + //! dot product computed with the default precision + _Tp dot(const Matx<_Tp, m, n>& v) const; + + //! dot product computed in double-precision arithmetics + double ddot(const Matx<_Tp, m, n>& v) const; + + //! conversion to another data type + template operator Matx() const; + + //! change the matrix shape + template Matx<_Tp, m1, n1> reshape() const; + + //! extract part of the matrix + template Matx<_Tp, m1, n1> get_minor(int i, int j) const; + + //! extract the matrix row + Matx<_Tp, 1, n> row(int i) const; + + //! extract the matrix column + Matx<_Tp, m, 1> col(int i) const; + + //! extract the matrix diagonal + diag_type diag() const; + + //! transpose the matrix + Matx<_Tp, n, m> t() const; + + //! invert matrix the matrix + Matx<_Tp, n, m> inv(int method=DECOMP_LU) const; + + //! solve linear system + template Matx<_Tp, n, l> solve(const Matx<_Tp, m, l>& rhs, int flags=DECOMP_LU) const; + Vec<_Tp, n> solve(const Vec<_Tp, m>& rhs, int method) const; + + //! multiply two matrices element-wise + Matx<_Tp, m, n> mul(const Matx<_Tp, m, n>& a) const; + + //! element access + const _Tp& operator ()(int i, int j) const; + _Tp& operator ()(int i, int j); + + //! 1D element access + const _Tp& operator ()(int i) const; + _Tp& operator ()(int i); + + Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_AddOp); + Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_SubOp); + template Matx(const Matx<_Tp, m, n>& a, _T2 alpha, Matx_ScaleOp); + Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_MulOp); + template Matx(const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b, Matx_MatMulOp); + Matx(const Matx<_Tp, n, m>& a, Matx_TOp); + + _Tp val[m*n]; //< matrix elements +}; + + +typedef Matx Matx12f; +typedef Matx Matx12d; +typedef Matx Matx13f; +typedef Matx Matx13d; +typedef Matx Matx14f; +typedef Matx Matx14d; +typedef Matx Matx16f; +typedef Matx Matx16d; + +typedef Matx Matx21f; +typedef Matx Matx21d; +typedef Matx Matx31f; +typedef Matx Matx31d; +typedef Matx Matx41f; +typedef Matx Matx41d; +typedef Matx Matx61f; +typedef Matx Matx61d; + +typedef Matx Matx22f; +typedef Matx Matx22d; +typedef Matx Matx23f; +typedef Matx Matx23d; +typedef Matx Matx32f; +typedef Matx Matx32d; + +typedef Matx Matx33f; +typedef Matx Matx33d; + +typedef Matx Matx34f; +typedef Matx Matx34d; +typedef Matx Matx43f; +typedef Matx Matx43d; + +typedef Matx Matx44f; +typedef Matx Matx44d; +typedef Matx Matx66f; +typedef Matx Matx66d; + + +/*! + A short numerical vector. + + This template class represents short numerical vectors (of 1, 2, 3, 4 ... elements) + on which you can perform basic arithmetical operations, access individual elements using [] operator etc. + The vectors are allocated on stack, as opposite to std::valarray, std::vector, cv::Mat etc., + which elements are dynamically allocated in the heap. + + The template takes 2 parameters: + -# _Tp element type + -# cn the number of elements + + In addition to the universal notation like Vec, you can use shorter aliases + for the most popular specialized variants of Vec, e.g. Vec3f ~ Vec. +*/ +template class Vec : public Matx<_Tp, cn, 1> +{ +public: + typedef _Tp value_type; + enum { depth = DataDepth<_Tp>::value, channels = cn, type = CV_MAKETYPE(depth, channels) }; + + //! default constructor + Vec(); + + Vec(_Tp v0); //!< 1-element vector constructor + Vec(_Tp v0, _Tp v1); //!< 2-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2); //!< 3-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3); //!< 4-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4); //!< 5-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5); //!< 6-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6); //!< 7-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7); //!< 8-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8); //!< 9-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9); //!< 10-element vector constructor + explicit Vec(const _Tp* values); + + Vec(const Vec<_Tp, cn>& v); + + static Vec all(_Tp alpha); + + //! per-element multiplication + Vec mul(const Vec<_Tp, cn>& v) const; + + //! conjugation (makes sense for complex numbers and quaternions) + Vec conj() const; + + /*! + cross product of the two 3D vectors. + + For other dimensionalities the exception is raised + */ + Vec cross(const Vec& v) const; + //! conversion to another data type + template operator Vec() const; + //! conversion to 4-element CvScalar. + operator CvScalar() const; + + /*! element access */ + const _Tp& operator [](int i) const; + _Tp& operator[](int i); + const _Tp& operator ()(int i) const; + _Tp& operator ()(int i); + + Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_AddOp); + Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_SubOp); + template Vec(const Matx<_Tp, cn, 1>& a, _T2 alpha, Matx_ScaleOp); +}; + + +/* \typedef + + Shorter aliases for the most popular specializations of Vec +*/ +typedef Vec Vec2b; +typedef Vec Vec3b; +typedef Vec Vec4b; + +typedef Vec Vec2s; +typedef Vec Vec3s; +typedef Vec Vec4s; + +typedef Vec Vec2w; +typedef Vec Vec3w; +typedef Vec Vec4w; + +typedef Vec Vec2i; +typedef Vec Vec3i; +typedef Vec Vec4i; +typedef Vec Vec6i; +typedef Vec Vec8i; + +typedef Vec Vec2f; +typedef Vec Vec3f; +typedef Vec Vec4f; +typedef Vec Vec6f; + +typedef Vec Vec2d; +typedef Vec Vec3d; +typedef Vec Vec4d; +typedef Vec Vec6d; + + +//////////////////////////////// Complex ////////////////////////////// + +/*! + A complex number class. + + The template class is similar and compatible with std::complex, however it provides slightly + more convenient access to the real and imaginary parts using through the simple field access, as opposite + to std::complex::real() and std::complex::imag(). +*/ +template class Complex +{ +public: + + //! constructors + Complex(); + Complex( _Tp _re, _Tp _im=0 ); + Complex( const std::complex<_Tp>& c ); + + //! conversion to another data type + template operator Complex() const; + //! conjugation + Complex conj() const; + //! conversion to std::complex + operator std::complex<_Tp>() const; + + _Tp re, im; //< the real and the imaginary parts +}; + + +typedef Complex Complexf; +typedef Complex Complexd; + + +//////////////////////////////// Point_ //////////////////////////////// + +/*! + template 2D point class. + + The class defines a point in 2D space. Data type of the point coordinates is specified + as a template parameter. There are a few shorter aliases available for user convenience. + See cv::Point, cv::Point2i, cv::Point2f and cv::Point2d. +*/ +template class Point_ +{ +public: + typedef _Tp value_type; + + // various constructors + Point_(); + Point_(_Tp _x, _Tp _y); + Point_(const Point_& pt); + Point_(const CvPoint& pt); + Point_(const CvPoint2D32f& pt); + Point_(const Size_<_Tp>& sz); + Point_(const Vec<_Tp, 2>& v); + + Point_& operator = (const Point_& pt); + //! conversion to another data type + template operator Point_<_Tp2>() const; + + //! conversion to the old-style C structures + operator CvPoint() const; + operator CvPoint2D32f() const; + operator Vec<_Tp, 2>() const; + + //! dot product + _Tp dot(const Point_& pt) const; + //! dot product computed in double-precision arithmetics + double ddot(const Point_& pt) const; + //! cross-product + double cross(const Point_& pt) const; + //! checks whether the point is inside the specified rectangle + bool inside(const Rect_<_Tp>& r) const; + + _Tp x, y; //< the point coordinates +}; + +/*! + template 3D point class. + + The class defines a point in 3D space. Data type of the point coordinates is specified + as a template parameter. + + \see cv::Point3i, cv::Point3f and cv::Point3d +*/ +template class Point3_ +{ +public: + typedef _Tp value_type; + + // various constructors + Point3_(); + Point3_(_Tp _x, _Tp _y, _Tp _z); + Point3_(const Point3_& pt); + explicit Point3_(const Point_<_Tp>& pt); + Point3_(const CvPoint3D32f& pt); + Point3_(const Vec<_Tp, 3>& v); + + Point3_& operator = (const Point3_& pt); + //! conversion to another data type + template operator Point3_<_Tp2>() const; + //! conversion to the old-style CvPoint... + operator CvPoint3D32f() const; + //! conversion to cv::Vec<> + operator Vec<_Tp, 3>() const; + + //! dot product + _Tp dot(const Point3_& pt) const; + //! dot product computed in double-precision arithmetics + double ddot(const Point3_& pt) const; + //! cross product of the 2 3D points + Point3_ cross(const Point3_& pt) const; + + _Tp x, y, z; //< the point coordinates +}; + +//////////////////////////////// Size_ //////////////////////////////// + +/*! + The 2D size class + + The class represents the size of a 2D rectangle, image size, matrix size etc. + Normally, cv::Size ~ cv::Size_ is used. +*/ +template class Size_ +{ +public: + typedef _Tp value_type; + + //! various constructors + Size_(); + Size_(_Tp _width, _Tp _height); + Size_(const Size_& sz); + Size_(const CvSize& sz); + Size_(const CvSize2D32f& sz); + Size_(const Point_<_Tp>& pt); + + Size_& operator = (const Size_& sz); + //! the area (width*height) + _Tp area() const; + + //! conversion of another data type. + template operator Size_<_Tp2>() const; + + //! conversion to the old-style OpenCV types + operator CvSize() const; + operator CvSize2D32f() const; + + _Tp width, height; // the width and the height +}; + +//////////////////////////////// Rect_ //////////////////////////////// + +/*! + The 2D up-right rectangle class + + The class represents a 2D rectangle with coordinates of the specified data type. + Normally, cv::Rect ~ cv::Rect_ is used. +*/ +template class Rect_ +{ +public: + typedef _Tp value_type; + + //! various constructors + Rect_(); + Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height); + Rect_(const Rect_& r); + Rect_(const CvRect& r); + Rect_(const Point_<_Tp>& org, const Size_<_Tp>& sz); + Rect_(const Point_<_Tp>& pt1, const Point_<_Tp>& pt2); + + Rect_& operator = ( const Rect_& r ); + //! the top-left corner + Point_<_Tp> tl() const; + //! the bottom-right corner + Point_<_Tp> br() const; + + //! size (width, height) of the rectangle + Size_<_Tp> size() const; + //! area (width*height) of the rectangle + _Tp area() const; + + //! conversion to another data type + template operator Rect_<_Tp2>() const; + //! conversion to the old-style CvRect + operator CvRect() const; + + //! checks whether the rectangle contains the point + bool contains(const Point_<_Tp>& pt) const; + + _Tp x, y, width, height; //< the top-left corner, as well as width and height of the rectangle +}; + + +typedef Point_ Point2i; +typedef Point2i Point; +typedef Size_ Size2i; +typedef Size_ Size2d; +typedef Size2i Size; +typedef Rect_ Rect; +typedef Point_ Point2f; +typedef Point_ Point2d; +typedef Size_ Size2f; +typedef Point3_ Point3i; +typedef Point3_ Point3f; +typedef Point3_ Point3d; + + +/*! + The rotated 2D rectangle. + + The class represents rotated (i.e. not up-right) rectangles on a plane. + Each rectangle is described by the center point (mass center), length of each side + (represented by cv::Size2f structure) and the rotation angle in degrees. +*/ +class CV_EXPORTS RotatedRect +{ +public: + //! various constructors + RotatedRect(); + RotatedRect(const Point2f& center, const Size2f& size, float angle); + RotatedRect(const CvBox2D& box); + + //! returns 4 vertices of the rectangle + void points(Point2f pts[]) const; + //! returns the minimal up-right rectangle containing the rotated rectangle + Rect boundingRect() const; + //! conversion to the old-style CvBox2D structure + operator CvBox2D() const; + + Point2f center; //< the rectangle mass center + Size2f size; //< width and height of the rectangle + float angle; //< the rotation angle. When the angle is 0, 90, 180, 270 etc., the rectangle becomes an up-right rectangle. +}; + +//////////////////////////////// Scalar_ /////////////////////////////// + +/*! + The template scalar class. + + This is partially specialized cv::Vec class with the number of elements = 4, i.e. a short vector of four elements. + Normally, cv::Scalar ~ cv::Scalar_ is used. +*/ +template class Scalar_ : public Vec<_Tp, 4> +{ +public: + //! various constructors + Scalar_(); + Scalar_(_Tp v0, _Tp v1, _Tp v2=0, _Tp v3=0); + Scalar_(const CvScalar& s); + Scalar_(_Tp v0); + + //! returns a scalar with all elements set to v0 + static Scalar_<_Tp> all(_Tp v0); + //! conversion to the old-style CvScalar + operator CvScalar() const; + + //! conversion to another data type + template operator Scalar_() const; + + //! per-element product + Scalar_<_Tp> mul(const Scalar_<_Tp>& t, double scale=1 ) const; + + // returns (v0, -v1, -v2, -v3) + Scalar_<_Tp> conj() const; + + // returns true iff v1 == v2 == v3 == 0 + bool isReal() const; +}; + +typedef Scalar_ Scalar; + +CV_EXPORTS void scalarToRawData(const Scalar& s, void* buf, int type, int unroll_to=0); + +//////////////////////////////// Range ///////////////////////////////// + +/*! + The 2D range class + + This is the class used to specify a continuous subsequence, i.e. part of a contour, or a column span in a matrix. +*/ +class CV_EXPORTS Range +{ +public: + Range(); + Range(int _start, int _end); + Range(const CvSlice& slice); + int size() const; + bool empty() const; + static Range all(); + operator CvSlice() const; + + int start, end; +}; + +/////////////////////////////// DataType //////////////////////////////// + +/*! + Informative template class for OpenCV "scalars". + + The class is specialized for each primitive numerical type supported by OpenCV (such as unsigned char or float), + as well as for more complex types, like cv::Complex<>, std::complex<>, cv::Vec<> etc. + The common property of all such types (called "scalars", do not confuse it with cv::Scalar_) + is that each of them is basically a tuple of numbers of the same type. Each "scalar" can be represented + by the depth id (CV_8U ... CV_64F) and the number of channels. + OpenCV matrices, 2D or nD, dense or sparse, can store "scalars", + as long as the number of channels does not exceed CV_CN_MAX. +*/ +template class DataType +{ +public: + typedef _Tp value_type; + typedef value_type work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 1, depth = -1, channels = 1, fmt=0, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef bool value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef uchar value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef schar value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef schar value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef ushort value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef short value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef int value_type; + typedef value_type work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef float value_type; + typedef value_type work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef double value_type; + typedef value_type work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template class DataType > +{ +public: + typedef Matx<_Tp, m, n> value_type; + typedef Matx::work_type, m, n> work_type; + typedef _Tp channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = m*n, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template class DataType > +{ +public: + typedef Vec<_Tp, cn> value_type; + typedef Vec::work_type, cn> work_type; + typedef _Tp channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = cn, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template class DataType > +{ +public: + typedef std::complex<_Tp> value_type; + typedef value_type work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Complex<_Tp> value_type; + typedef value_type work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Point_<_Tp> value_type; + typedef Point_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Point3_<_Tp> value_type; + typedef Point3_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 3, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Size_<_Tp> value_type; + typedef Size_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Rect_<_Tp> value_type; + typedef Rect_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 4, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Scalar_<_Tp> value_type; + typedef Scalar_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 4, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template<> class DataType +{ +public: + typedef Range value_type; + typedef value_type work_type; + typedef int channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +//////////////////// generic_type ref-counting pointer class for C/C++ objects //////////////////////// + +/*! + Smart pointer to dynamically allocated objects. + + This is template pointer-wrapping class that stores the associated reference counter along with the + object pointer. The class is similar to std::smart_ptr<> from the recent addons to the C++ standard, + but is shorter to write :) and self-contained (i.e. does add any dependency on the compiler or an external library). + + Basically, you can use "Ptr ptr" (or faster "const Ptr& ptr" for read-only access) + everywhere instead of "MyObjectType* ptr", where MyObjectType is some C structure or a C++ class. + To make it all work, you need to specialize Ptr<>::delete_obj(), like: + + \code + template<> void Ptr::delete_obj() { call_destructor_func(obj); } + \endcode + + \note{if MyObjectType is a C++ class with a destructor, you do not need to specialize delete_obj(), + since the default implementation calls "delete obj;"} + + \note{Another good property of the class is that the operations on the reference counter are atomic, + i.e. it is safe to use the class in multi-threaded applications} +*/ +template class Ptr +{ +public: + //! empty constructor + Ptr(); + //! take ownership of the pointer. The associated reference counter is allocated and set to 1 + Ptr(_Tp* _obj); + //! calls release() + ~Ptr(); + //! copy constructor. Copies the members and calls addref() + Ptr(const Ptr& ptr); + template Ptr(const Ptr<_Tp2>& ptr); + //! copy operator. Calls ptr.addref() and release() before copying the members + Ptr& operator = (const Ptr& ptr); + //! increments the reference counter + void addref(); + //! decrements the reference counter. If it reaches 0, delete_obj() is called + void release(); + //! deletes the object. Override if needed + void delete_obj(); + //! returns true iff obj==NULL + bool empty() const; + + //! cast pointer to another type + template Ptr<_Tp2> ptr(); + template const Ptr<_Tp2> ptr() const; + + //! helper operators making "Ptr ptr" use very similar to "T* ptr". + _Tp* operator -> (); + const _Tp* operator -> () const; + + operator _Tp* (); + operator const _Tp*() const; + + _Tp* obj; //< the object pointer. + int* refcount; //< the associated reference counter +}; + +template +Ptr makePtr(); + +template +Ptr makePtr(const A1& a1); + +template +Ptr makePtr(const A1& a1, const A2& a2); + +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3); + +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4); + +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5); + +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6); + +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7); + +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8); + +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9); + +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9, const A10& a10); + +//////////////////////// Input/Output Array Arguments ///////////////////////////////// + +/*! + Proxy datatype for passing Mat's and vector<>'s as input parameters + */ +class CV_EXPORTS _InputArray +{ +public: + enum { + KIND_SHIFT = 16, + FIXED_TYPE = 0x8000 << KIND_SHIFT, + FIXED_SIZE = 0x4000 << KIND_SHIFT, + KIND_MASK = ~(FIXED_TYPE|FIXED_SIZE) - (1 << KIND_SHIFT) + 1, + + NONE = 0 << KIND_SHIFT, + MAT = 1 << KIND_SHIFT, + MATX = 2 << KIND_SHIFT, + STD_VECTOR = 3 << KIND_SHIFT, + STD_VECTOR_VECTOR = 4 << KIND_SHIFT, + STD_VECTOR_MAT = 5 << KIND_SHIFT, + EXPR = 6 << KIND_SHIFT, + OPENGL_BUFFER = 7 << KIND_SHIFT, + OPENGL_TEXTURE = 8 << KIND_SHIFT, + GPU_MAT = 9 << KIND_SHIFT, + OCL_MAT =10 << KIND_SHIFT + }; + _InputArray(); + + _InputArray(const Mat& m); + _InputArray(const MatExpr& expr); + template _InputArray(const _Tp* vec, int n); + template _InputArray(const vector<_Tp>& vec); + template _InputArray(const vector >& vec); + _InputArray(const vector& vec); + template _InputArray(const vector >& vec); + template _InputArray(const Mat_<_Tp>& m); + template _InputArray(const Matx<_Tp, m, n>& matx); + _InputArray(const Scalar& s); + _InputArray(const double& val); + // < Deprecated + _InputArray(const GlBuffer& buf); + _InputArray(const GlTexture& tex); + // > + _InputArray(const gpu::GpuMat& d_mat); + _InputArray(const ogl::Buffer& buf); + _InputArray(const ogl::Texture2D& tex); + + virtual Mat getMat(int i=-1) const; + virtual void getMatVector(vector& mv) const; + // < Deprecated + virtual GlBuffer getGlBuffer() const; + virtual GlTexture getGlTexture() const; + // > + virtual gpu::GpuMat getGpuMat() const; + /*virtual*/ ogl::Buffer getOGlBuffer() const; + /*virtual*/ ogl::Texture2D getOGlTexture2D() const; + + virtual int kind() const; + virtual Size size(int i=-1) const; + virtual size_t total(int i=-1) const; + virtual int type(int i=-1) const; + virtual int depth(int i=-1) const; + virtual int channels(int i=-1) const; + virtual bool empty() const; + +#ifdef OPENCV_CAN_BREAK_BINARY_COMPATIBILITY + virtual ~_InputArray(); +#endif + + int flags; + void* obj; + Size sz; +}; + + +enum +{ + DEPTH_MASK_8U = 1 << CV_8U, + DEPTH_MASK_8S = 1 << CV_8S, + DEPTH_MASK_16U = 1 << CV_16U, + DEPTH_MASK_16S = 1 << CV_16S, + DEPTH_MASK_32S = 1 << CV_32S, + DEPTH_MASK_32F = 1 << CV_32F, + DEPTH_MASK_64F = 1 << CV_64F, + DEPTH_MASK_ALL = (DEPTH_MASK_64F<<1)-1, + DEPTH_MASK_ALL_BUT_8S = DEPTH_MASK_ALL & ~DEPTH_MASK_8S, + DEPTH_MASK_FLT = DEPTH_MASK_32F + DEPTH_MASK_64F +}; + + +/*! + Proxy datatype for passing Mat's and vector<>'s as input parameters + */ +class CV_EXPORTS _OutputArray : public _InputArray +{ +public: + _OutputArray(); + + _OutputArray(Mat& m); + template _OutputArray(vector<_Tp>& vec); + template _OutputArray(vector >& vec); + _OutputArray(vector& vec); + template _OutputArray(vector >& vec); + template _OutputArray(Mat_<_Tp>& m); + template _OutputArray(Matx<_Tp, m, n>& matx); + template _OutputArray(_Tp* vec, int n); + _OutputArray(gpu::GpuMat& d_mat); + _OutputArray(ogl::Buffer& buf); + _OutputArray(ogl::Texture2D& tex); + + _OutputArray(const Mat& m); + template _OutputArray(const vector<_Tp>& vec); + template _OutputArray(const vector >& vec); + _OutputArray(const vector& vec); + template _OutputArray(const vector >& vec); + template _OutputArray(const Mat_<_Tp>& m); + template _OutputArray(const Matx<_Tp, m, n>& matx); + template _OutputArray(const _Tp* vec, int n); + _OutputArray(const gpu::GpuMat& d_mat); + _OutputArray(const ogl::Buffer& buf); + _OutputArray(const ogl::Texture2D& tex); + + virtual bool fixedSize() const; + virtual bool fixedType() const; + virtual bool needed() const; + virtual Mat& getMatRef(int i=-1) const; + /*virtual*/ gpu::GpuMat& getGpuMatRef() const; + /*virtual*/ ogl::Buffer& getOGlBufferRef() const; + /*virtual*/ ogl::Texture2D& getOGlTexture2DRef() const; + virtual void create(Size sz, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const; + virtual void create(int rows, int cols, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const; + virtual void create(int dims, const int* size, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const; + virtual void release() const; + virtual void clear() const; + +#ifdef OPENCV_CAN_BREAK_BINARY_COMPATIBILITY + virtual ~_OutputArray(); +#endif +}; + +typedef const _InputArray& InputArray; +typedef InputArray InputArrayOfArrays; +typedef const _OutputArray& OutputArray; +typedef OutputArray OutputArrayOfArrays; +typedef OutputArray InputOutputArray; +typedef OutputArray InputOutputArrayOfArrays; + +CV_EXPORTS OutputArray noArray(); + +/////////////////////////////////////// Mat /////////////////////////////////////////// + +enum { MAGIC_MASK=0xFFFF0000, TYPE_MASK=0x00000FFF, DEPTH_MASK=7 }; + +static inline size_t getElemSize(int type) { return CV_ELEM_SIZE(type); } + +/*! + Custom array allocator + +*/ +class CV_EXPORTS MatAllocator +{ +public: + MatAllocator() {} + virtual ~MatAllocator() {} + virtual void allocate(int dims, const int* sizes, int type, int*& refcount, + uchar*& datastart, uchar*& data, size_t* step) = 0; + virtual void deallocate(int* refcount, uchar* datastart, uchar* data) = 0; +}; + +/*! + The n-dimensional matrix class. + + The class represents an n-dimensional dense numerical array that can act as + a matrix, image, optical flow map, 3-focal tensor etc. + It is very similar to CvMat and CvMatND types from earlier versions of OpenCV, + and similarly to those types, the matrix can be multi-channel. It also fully supports ROI mechanism. + + There are many different ways to create cv::Mat object. Here are the some popular ones: +
    +
  • using cv::Mat::create(nrows, ncols, type) method or + the similar constructor cv::Mat::Mat(nrows, ncols, type[, fill_value]) constructor. + A new matrix of the specified size and specifed type will be allocated. + "type" has the same meaning as in cvCreateMat function, + e.g. CV_8UC1 means 8-bit single-channel matrix, CV_32FC2 means 2-channel (i.e. complex) + floating-point matrix etc: + + \code + // make 7x7 complex matrix filled with 1+3j. + cv::Mat M(7,7,CV_32FC2,Scalar(1,3)); + // and now turn M to 100x60 15-channel 8-bit matrix. + // The old content will be deallocated + M.create(100,60,CV_8UC(15)); + \endcode + + As noted in the introduction of this chapter, Mat::create() + will only allocate a new matrix when the current matrix dimensionality + or type are different from the specified. + +
  • by using a copy constructor or assignment operator, where on the right side it can + be a matrix or expression, see below. Again, as noted in the introduction, + matrix assignment is O(1) operation because it only copies the header + and increases the reference counter. cv::Mat::clone() method can be used to get a full + (a.k.a. deep) copy of the matrix when you need it. + +
  • by constructing a header for a part of another matrix. It can be a single row, single column, + several rows, several columns, rectangular region in the matrix (called a minor in algebra) or + a diagonal. Such operations are also O(1), because the new header will reference the same data. + You can actually modify a part of the matrix using this feature, e.g. + + \code + // add 5-th row, multiplied by 3 to the 3rd row + M.row(3) = M.row(3) + M.row(5)*3; + + // now copy 7-th column to the 1-st column + // M.col(1) = M.col(7); // this will not work + Mat M1 = M.col(1); + M.col(7).copyTo(M1); + + // create new 320x240 image + cv::Mat img(Size(320,240),CV_8UC3); + // select a roi + cv::Mat roi(img, Rect(10,10,100,100)); + // fill the ROI with (0,255,0) (which is green in RGB space); + // the original 320x240 image will be modified + roi = Scalar(0,255,0); + \endcode + + Thanks to the additional cv::Mat::datastart and cv::Mat::dataend members, it is possible to + compute the relative sub-matrix position in the main "container" matrix using cv::Mat::locateROI(): + + \code + Mat A = Mat::eye(10, 10, CV_32S); + // extracts A columns, 1 (inclusive) to 3 (exclusive). + Mat B = A(Range::all(), Range(1, 3)); + // extracts B rows, 5 (inclusive) to 9 (exclusive). + // that is, C ~ A(Range(5, 9), Range(1, 3)) + Mat C = B(Range(5, 9), Range::all()); + Size size; Point ofs; + C.locateROI(size, ofs); + // size will be (width=10,height=10) and the ofs will be (x=1, y=5) + \endcode + + As in the case of whole matrices, if you need a deep copy, use cv::Mat::clone() method + of the extracted sub-matrices. + +
  • by making a header for user-allocated-data. It can be useful for +
      +
    1. processing "foreign" data using OpenCV (e.g. when you implement + a DirectShow filter or a processing module for gstreamer etc.), e.g. + + \code + void process_video_frame(const unsigned char* pixels, + int width, int height, int step) + { + cv::Mat img(height, width, CV_8UC3, pixels, step); + cv::GaussianBlur(img, img, cv::Size(7,7), 1.5, 1.5); + } + \endcode + +
    2. for quick initialization of small matrices and/or super-fast element access + + \code + double m[3][3] = {{a, b, c}, {d, e, f}, {g, h, i}}; + cv::Mat M = cv::Mat(3, 3, CV_64F, m).inv(); + \endcode +
    + + partial yet very common cases of this "user-allocated data" case are conversions + from CvMat and IplImage to cv::Mat. For this purpose there are special constructors + taking pointers to CvMat or IplImage and the optional + flag indicating whether to copy the data or not. + + Backward conversion from cv::Mat to CvMat or IplImage is provided via cast operators + cv::Mat::operator CvMat() an cv::Mat::operator IplImage(). + The operators do not copy the data. + + + \code + IplImage* img = cvLoadImage("greatwave.jpg", 1); + Mat mtx(img); // convert IplImage* -> cv::Mat + CvMat oldmat = mtx; // convert cv::Mat -> CvMat + CV_Assert(oldmat.cols == img->width && oldmat.rows == img->height && + oldmat.data.ptr == (uchar*)img->imageData && oldmat.step == img->widthStep); + \endcode + +
  • by using MATLAB-style matrix initializers, cv::Mat::zeros(), cv::Mat::ones(), cv::Mat::eye(), e.g.: + + \code + // create a double-precision identity martix and add it to M. + M += Mat::eye(M.rows, M.cols, CV_64F); + \endcode + +
  • by using comma-separated initializer: + + \code + // create 3x3 double-precision identity matrix + Mat M = (Mat_(3,3) << 1, 0, 0, 0, 1, 0, 0, 0, 1); + \endcode + + here we first call constructor of cv::Mat_ class (that we describe further) with the proper matrix, + and then we just put "<<" operator followed by comma-separated values that can be constants, + variables, expressions etc. Also, note the extra parentheses that are needed to avoid compiler errors. + +
+ + Once matrix is created, it will be automatically managed by using reference-counting mechanism + (unless the matrix header is built on top of user-allocated data, + in which case you should handle the data by yourself). + The matrix data will be deallocated when no one points to it; + if you want to release the data pointed by a matrix header before the matrix destructor is called, + use cv::Mat::release(). + + The next important thing to learn about the matrix class is element access. Here is how the matrix is stored. + The elements are stored in row-major order (row by row). The cv::Mat::data member points to the first element of the first row, + cv::Mat::rows contains the number of matrix rows and cv::Mat::cols - the number of matrix columns. There is yet another member, + cv::Mat::step that is used to actually compute address of a matrix element. cv::Mat::step is needed because the matrix can be + a part of another matrix or because there can some padding space in the end of each row for a proper alignment. + + Given these parameters, address of the matrix element M_{ij} is computed as following: + + addr(M_{ij})=M.data + M.step*i + j*M.elemSize() + + if you know the matrix element type, e.g. it is float, then you can use cv::Mat::at() method: + + addr(M_{ij})=&M.at(i,j) + + (where & is used to convert the reference returned by cv::Mat::at() to a pointer). + if you need to process a whole row of matrix, the most efficient way is to get + the pointer to the row first, and then just use plain C operator []: + + \code + // compute sum of positive matrix elements + // (assuming that M is double-precision matrix) + double sum=0; + for(int i = 0; i < M.rows; i++) + { + const double* Mi = M.ptr(i); + for(int j = 0; j < M.cols; j++) + sum += std::max(Mi[j], 0.); + } + \endcode + + Some operations, like the above one, do not actually depend on the matrix shape, + they just process elements of a matrix one by one (or elements from multiple matrices + that are sitting in the same place, e.g. matrix addition). Such operations are called + element-wise and it makes sense to check whether all the input/output matrices are continuous, + i.e. have no gaps in the end of each row, and if yes, process them as a single long row: + + \code + // compute sum of positive matrix elements, optimized variant + double sum=0; + int cols = M.cols, rows = M.rows; + if(M.isContinuous()) + { + cols *= rows; + rows = 1; + } + for(int i = 0; i < rows; i++) + { + const double* Mi = M.ptr(i); + for(int j = 0; j < cols; j++) + sum += std::max(Mi[j], 0.); + } + \endcode + in the case of continuous matrix the outer loop body will be executed just once, + so the overhead will be smaller, which will be especially noticeable in the case of small matrices. + + Finally, there are STL-style iterators that are smart enough to skip gaps between successive rows: + \code + // compute sum of positive matrix elements, iterator-based variant + double sum=0; + MatConstIterator_ it = M.begin(), it_end = M.end(); + for(; it != it_end; ++it) + sum += std::max(*it, 0.); + \endcode + + The matrix iterators are random-access iterators, so they can be passed + to any STL algorithm, including std::sort(). +*/ +class CV_EXPORTS Mat +{ +public: + //! default constructor + Mat(); + //! constructs 2D matrix of the specified size and type + // (_type is CV_8UC1, CV_64FC3, CV_32SC(12) etc.) + Mat(int rows, int cols, int type); + Mat(Size size, int type); + //! constucts 2D matrix and fills it with the specified value _s. + Mat(int rows, int cols, int type, const Scalar& s); + Mat(Size size, int type, const Scalar& s); + + //! constructs n-dimensional matrix + Mat(int ndims, const int* sizes, int type); + Mat(int ndims, const int* sizes, int type, const Scalar& s); + + //! copy constructor + Mat(const Mat& m); + //! constructor for matrix headers pointing to user-allocated data + Mat(int rows, int cols, int type, void* data, size_t step=AUTO_STEP); + Mat(Size size, int type, void* data, size_t step=AUTO_STEP); + Mat(int ndims, const int* sizes, int type, void* data, const size_t* steps=0); + + //! creates a matrix header for a part of the bigger matrix + Mat(const Mat& m, const Range& rowRange, const Range& colRange=Range::all()); + Mat(const Mat& m, const Rect& roi); + Mat(const Mat& m, const Range* ranges); + //! converts old-style CvMat to the new matrix; the data is not copied by default + Mat(const CvMat* m, bool copyData=false); + //! converts old-style CvMatND to the new matrix; the data is not copied by default + Mat(const CvMatND* m, bool copyData=false); + //! converts old-style IplImage to the new matrix; the data is not copied by default + Mat(const IplImage* img, bool copyData=false); + //! builds matrix from std::vector with or without copying the data + template explicit Mat(const vector<_Tp>& vec, bool copyData=false); + //! builds matrix from cv::Vec; the data is copied by default + template explicit Mat(const Vec<_Tp, n>& vec, bool copyData=true); + //! builds matrix from cv::Matx; the data is copied by default + template explicit Mat(const Matx<_Tp, m, n>& mtx, bool copyData=true); + //! builds matrix from a 2D point + template explicit Mat(const Point_<_Tp>& pt, bool copyData=true); + //! builds matrix from a 3D point + template explicit Mat(const Point3_<_Tp>& pt, bool copyData=true); + //! builds matrix from comma initializer + template explicit Mat(const MatCommaInitializer_<_Tp>& commaInitializer); + + //! download data from GpuMat + explicit Mat(const gpu::GpuMat& m); + + //! destructor - calls release() + ~Mat(); + //! assignment operators + Mat& operator = (const Mat& m); + Mat& operator = (const MatExpr& expr); + + //! returns a new matrix header for the specified row + Mat row(int y) const; + //! returns a new matrix header for the specified column + Mat col(int x) const; + //! ... for the specified row span + Mat rowRange(int startrow, int endrow) const; + Mat rowRange(const Range& r) const; + //! ... for the specified column span + Mat colRange(int startcol, int endcol) const; + Mat colRange(const Range& r) const; + //! ... for the specified diagonal + // (d=0 - the main diagonal, + // >0 - a diagonal from the lower half, + // <0 - a diagonal from the upper half) + Mat diag(int d=0) const; + //! constructs a square diagonal matrix which main diagonal is vector "d" + static Mat diag(const Mat& d); + + //! returns deep copy of the matrix, i.e. the data is copied + Mat clone() const; + //! copies the matrix content to "m". + // It calls m.create(this->size(), this->type()). + void copyTo( OutputArray m ) const; + //! copies those matrix elements to "m" that are marked with non-zero mask elements. + void copyTo( OutputArray m, InputArray mask ) const; + //! converts matrix to another datatype with optional scalng. See cvConvertScale. + void convertTo( OutputArray m, int rtype, double alpha=1, double beta=0 ) const; + + void assignTo( Mat& m, int type=-1 ) const; + + //! sets every matrix element to s + Mat& operator = (const Scalar& s); + //! sets some of the matrix elements to s, according to the mask + Mat& setTo(InputArray value, InputArray mask=noArray()); + //! creates alternative matrix header for the same data, with different + // number of channels and/or different number of rows. see cvReshape. + Mat reshape(int cn, int rows=0) const; + Mat reshape(int cn, int newndims, const int* newsz) const; + + //! matrix transposition by means of matrix expressions + MatExpr t() const; + //! matrix inversion by means of matrix expressions + MatExpr inv(int method=DECOMP_LU) const; + //! per-element matrix multiplication by means of matrix expressions + MatExpr mul(InputArray m, double scale=1) const; + + //! computes cross-product of 2 3D vectors + Mat cross(InputArray m) const; + //! computes dot-product + double dot(InputArray m) const; + + //! Matlab-style matrix initialization + static MatExpr zeros(int rows, int cols, int type); + static MatExpr zeros(Size size, int type); + static MatExpr zeros(int ndims, const int* sz, int type); + static MatExpr ones(int rows, int cols, int type); + static MatExpr ones(Size size, int type); + static MatExpr ones(int ndims, const int* sz, int type); + static MatExpr eye(int rows, int cols, int type); + static MatExpr eye(Size size, int type); + + //! allocates new matrix data unless the matrix already has specified size and type. + // previous data is unreferenced if needed. + void create(int rows, int cols, int type); + void create(Size size, int type); + void create(int ndims, const int* sizes, int type); + + //! increases the reference counter; use with care to avoid memleaks + void addref(); + //! decreases reference counter; + // deallocates the data when reference counter reaches 0. + void release(); + + //! deallocates the matrix data + void deallocate(); + //! internal use function; properly re-allocates _size, _step arrays + void copySize(const Mat& m); + + //! reserves enough space to fit sz hyper-planes + void reserve(size_t sz); + //! resizes matrix to the specified number of hyper-planes + void resize(size_t sz); + //! resizes matrix to the specified number of hyper-planes; initializes the newly added elements + void resize(size_t sz, const Scalar& s); + //! internal function + void push_back_(const void* elem); + //! adds element to the end of 1d matrix (or possibly multiple elements when _Tp=Mat) + template void push_back(const _Tp& elem); + template void push_back(const Mat_<_Tp>& elem); + void push_back(const Mat& m); + //! removes several hyper-planes from bottom of the matrix + void pop_back(size_t nelems=1); + + //! locates matrix header within a parent matrix. See below + void locateROI( Size& wholeSize, Point& ofs ) const; + //! moves/resizes the current matrix ROI inside the parent matrix. + Mat& adjustROI( int dtop, int dbottom, int dleft, int dright ); + //! extracts a rectangular sub-matrix + // (this is a generalized form of row, rowRange etc.) + Mat operator()( Range rowRange, Range colRange ) const; + Mat operator()( const Rect& roi ) const; + Mat operator()( const Range* ranges ) const; + + //! converts header to CvMat; no data is copied + operator CvMat() const; + //! converts header to CvMatND; no data is copied + operator CvMatND() const; + //! converts header to IplImage; no data is copied + operator IplImage() const; + + template operator vector<_Tp>() const; + template operator Vec<_Tp, n>() const; + template operator Matx<_Tp, m, n>() const; + + //! returns true iff the matrix data is continuous + // (i.e. when there are no gaps between successive rows). + // similar to CV_IS_MAT_CONT(cvmat->type) + bool isContinuous() const; + + //! returns true if the matrix is a submatrix of another matrix + bool isSubmatrix() const; + + //! returns element size in bytes, + // similar to CV_ELEM_SIZE(cvmat->type) + size_t elemSize() const; + //! returns the size of element channel in bytes. + size_t elemSize1() const; + //! returns element type, similar to CV_MAT_TYPE(cvmat->type) + int type() const; + //! returns element type, similar to CV_MAT_DEPTH(cvmat->type) + int depth() const; + //! returns element type, similar to CV_MAT_CN(cvmat->type) + int channels() const; + //! returns step/elemSize1() + size_t step1(int i=0) const; + //! returns true if matrix data is NULL + bool empty() const; + //! returns the total number of matrix elements + size_t total() const; + + //! returns N if the matrix is 1-channel (N x ptdim) or ptdim-channel (1 x N) or (N x 1); negative number otherwise + int checkVector(int elemChannels, int depth=-1, bool requireContinuous=true) const; + + //! returns pointer to i0-th submatrix along the dimension #0 + uchar* ptr(int i0=0); + const uchar* ptr(int i0=0) const; + + //! returns pointer to (i0,i1) submatrix along the dimensions #0 and #1 + uchar* ptr(int i0, int i1); + const uchar* ptr(int i0, int i1) const; + + //! returns pointer to (i0,i1,i3) submatrix along the dimensions #0, #1, #2 + uchar* ptr(int i0, int i1, int i2); + const uchar* ptr(int i0, int i1, int i2) const; + + //! returns pointer to the matrix element + uchar* ptr(const int* idx); + //! returns read-only pointer to the matrix element + const uchar* ptr(const int* idx) const; + + template uchar* ptr(const Vec& idx); + template const uchar* ptr(const Vec& idx) const; + + //! template version of the above method + template _Tp* ptr(int i0=0); + template const _Tp* ptr(int i0=0) const; + + template _Tp* ptr(int i0, int i1); + template const _Tp* ptr(int i0, int i1) const; + + template _Tp* ptr(int i0, int i1, int i2); + template const _Tp* ptr(int i0, int i1, int i2) const; + + template _Tp* ptr(const int* idx); + template const _Tp* ptr(const int* idx) const; + + template _Tp* ptr(const Vec& idx); + template const _Tp* ptr(const Vec& idx) const; + + //! the same as above, with the pointer dereferencing + template _Tp& at(int i0=0); + template const _Tp& at(int i0=0) const; + + template _Tp& at(int i0, int i1); + template const _Tp& at(int i0, int i1) const; + + template _Tp& at(int i0, int i1, int i2); + template const _Tp& at(int i0, int i1, int i2) const; + + template _Tp& at(const int* idx); + template const _Tp& at(const int* idx) const; + + template _Tp& at(const Vec& idx); + template const _Tp& at(const Vec& idx) const; + + //! special versions for 2D arrays (especially convenient for referencing image pixels) + template _Tp& at(Point pt); + template const _Tp& at(Point pt) const; + + //! template methods for iteration over matrix elements. + // the iterators take care of skipping gaps in the end of rows (if any) + template MatIterator_<_Tp> begin(); + template MatIterator_<_Tp> end(); + template MatConstIterator_<_Tp> begin() const; + template MatConstIterator_<_Tp> end() const; + + enum { MAGIC_VAL=0x42FF0000, AUTO_STEP=0, CONTINUOUS_FLAG=CV_MAT_CONT_FLAG, SUBMATRIX_FLAG=CV_SUBMAT_FLAG }; + + /*! includes several bit-fields: + - the magic signature + - continuity flag + - depth + - number of channels + */ + int flags; + //! the matrix dimensionality, >= 2 + int dims; + //! the number of rows and columns or (-1, -1) when the matrix has more than 2 dimensions + int rows, cols; + //! pointer to the data + uchar* data; + + //! pointer to the reference counter; + // when matrix points to user-allocated data, the pointer is NULL + int* refcount; + + //! helper fields used in locateROI and adjustROI + uchar* datastart; + uchar* dataend; + uchar* datalimit; + + //! custom allocator + MatAllocator* allocator; + + struct CV_EXPORTS MSize + { + MSize(int* _p); + Size operator()() const; + const int& operator[](int i) const; + int& operator[](int i); + operator const int*() const; + bool operator == (const MSize& sz) const; + bool operator != (const MSize& sz) const; + + int* p; + }; + + struct CV_EXPORTS MStep + { + MStep(); + MStep(size_t s); + const size_t& operator[](int i) const; + size_t& operator[](int i); + operator size_t() const; + MStep& operator = (size_t s); + + size_t* p; + size_t buf[2]; + protected: + MStep& operator = (const MStep&); + }; + + MSize size; + MStep step; + +protected: + void initEmpty(); +}; + + +/*! + Random Number Generator + + The class implements RNG using Multiply-with-Carry algorithm +*/ +class CV_EXPORTS RNG +{ +public: + enum { UNIFORM=0, NORMAL=1 }; + + RNG(); + RNG(uint64 state); + //! updates the state and returns the next 32-bit unsigned integer random number + unsigned next(); + + operator uchar(); + operator schar(); + operator ushort(); + operator short(); + operator unsigned(); + //! returns a random integer sampled uniformly from [0, N). + unsigned operator ()(unsigned N); + unsigned operator ()(); + operator int(); + operator float(); + operator double(); + //! returns uniformly distributed integer random number from [a,b) range + int uniform(int a, int b); + //! returns uniformly distributed floating-point random number from [a,b) range + float uniform(float a, float b); + //! returns uniformly distributed double-precision floating-point random number from [a,b) range + double uniform(double a, double b); + void fill( InputOutputArray mat, int distType, InputArray a, InputArray b, bool saturateRange=false ); + //! returns Gaussian random variate with mean zero. + double gaussian(double sigma); + + uint64 state; +}; + +/*! + Random Number Generator - MT + + The class implements RNG using the Mersenne Twister algorithm +*/ +class CV_EXPORTS RNG_MT19937 +{ +public: + RNG_MT19937(); + RNG_MT19937(unsigned s); + void seed(unsigned s); + + unsigned next(); + + operator int(); + operator unsigned(); + operator float(); + operator double(); + + unsigned operator ()(unsigned N); + unsigned operator ()(); + + //! returns uniformly distributed integer random number from [a,b) range + int uniform(int a, int b); + //! returns uniformly distributed floating-point random number from [a,b) range + float uniform(float a, float b); + //! returns uniformly distributed double-precision floating-point random number from [a,b) range + double uniform(double a, double b); + +private: + enum PeriodParameters {N = 624, M = 397}; + unsigned state[N]; + int mti; +}; + +/*! + Termination criteria in iterative algorithms + */ +class CV_EXPORTS TermCriteria +{ +public: + enum + { + COUNT=1, //!< the maximum number of iterations or elements to compute + MAX_ITER=COUNT, //!< ditto + EPS=2 //!< the desired accuracy or change in parameters at which the iterative algorithm stops + }; + + //! default constructor + TermCriteria(); + //! full constructor + TermCriteria(int type, int maxCount, double epsilon); + //! conversion from CvTermCriteria + TermCriteria(const CvTermCriteria& criteria); + //! conversion to CvTermCriteria + operator CvTermCriteria() const; + + int type; //!< the type of termination criteria: COUNT, EPS or COUNT + EPS + int maxCount; // the maximum number of iterations/elements + double epsilon; // the desired accuracy +}; + + +typedef void (*BinaryFunc)(const uchar* src1, size_t step1, + const uchar* src2, size_t step2, + uchar* dst, size_t step, Size sz, + void*); + +CV_EXPORTS BinaryFunc getConvertFunc(int sdepth, int ddepth); +CV_EXPORTS BinaryFunc getConvertScaleFunc(int sdepth, int ddepth); +CV_EXPORTS BinaryFunc getCopyMaskFunc(size_t esz); + +//! swaps two matrices +CV_EXPORTS void swap(Mat& a, Mat& b); + +//! converts array (CvMat or IplImage) to cv::Mat +CV_EXPORTS Mat cvarrToMat(const CvArr* arr, bool copyData=false, + bool allowND=true, int coiMode=0); +//! extracts Channel of Interest from CvMat or IplImage and makes cv::Mat out of it. +CV_EXPORTS void extractImageCOI(const CvArr* arr, OutputArray coiimg, int coi=-1); +//! inserts single-channel cv::Mat into a multi-channel CvMat or IplImage +CV_EXPORTS void insertImageCOI(InputArray coiimg, CvArr* arr, int coi=-1); + +//! adds one matrix to another (dst = src1 + src2) +CV_EXPORTS_W void add(InputArray src1, InputArray src2, OutputArray dst, + InputArray mask=noArray(), int dtype=-1); +//! subtracts one matrix from another (dst = src1 - src2) +CV_EXPORTS_W void subtract(InputArray src1, InputArray src2, OutputArray dst, + InputArray mask=noArray(), int dtype=-1); + +//! computes element-wise weighted product of the two arrays (dst = scale*src1*src2) +CV_EXPORTS_W void multiply(InputArray src1, InputArray src2, + OutputArray dst, double scale=1, int dtype=-1); + +//! computes element-wise weighted quotient of the two arrays (dst = scale*src1/src2) +CV_EXPORTS_W void divide(InputArray src1, InputArray src2, OutputArray dst, + double scale=1, int dtype=-1); + +//! computes element-wise weighted reciprocal of an array (dst = scale/src2) +CV_EXPORTS_W void divide(double scale, InputArray src2, + OutputArray dst, int dtype=-1); + +//! adds scaled array to another one (dst = alpha*src1 + src2) +CV_EXPORTS_W void scaleAdd(InputArray src1, double alpha, InputArray src2, OutputArray dst); + +//! computes weighted sum of two arrays (dst = alpha*src1 + beta*src2 + gamma) +CV_EXPORTS_W void addWeighted(InputArray src1, double alpha, InputArray src2, + double beta, double gamma, OutputArray dst, int dtype=-1); + +//! scales array elements, computes absolute values and converts the results to 8-bit unsigned integers: dst(i)=saturate_castabs(src(i)*alpha+beta) +CV_EXPORTS_W void convertScaleAbs(InputArray src, OutputArray dst, + double alpha=1, double beta=0); +//! transforms array of numbers using a lookup table: dst(i)=lut(src(i)) +CV_EXPORTS_W void LUT(InputArray src, InputArray lut, OutputArray dst, + int interpolation=0); + +//! computes sum of array elements +CV_EXPORTS_AS(sumElems) Scalar sum(InputArray src); +//! computes the number of nonzero array elements +CV_EXPORTS_W int countNonZero( InputArray src ); +//! returns the list of locations of non-zero pixels +CV_EXPORTS_W void findNonZero( InputArray src, OutputArray idx ); + +//! computes mean value of selected array elements +CV_EXPORTS_W Scalar mean(InputArray src, InputArray mask=noArray()); +//! computes mean value and standard deviation of all or selected array elements +CV_EXPORTS_W void meanStdDev(InputArray src, OutputArray mean, OutputArray stddev, + InputArray mask=noArray()); +//! computes norm of the selected array part +CV_EXPORTS_W double norm(InputArray src1, int normType=NORM_L2, InputArray mask=noArray()); +//! computes norm of selected part of the difference between two arrays +CV_EXPORTS_W double norm(InputArray src1, InputArray src2, + int normType=NORM_L2, InputArray mask=noArray()); + +//! naive nearest neighbor finder +CV_EXPORTS_W void batchDistance(InputArray src1, InputArray src2, + OutputArray dist, int dtype, OutputArray nidx, + int normType=NORM_L2, int K=0, + InputArray mask=noArray(), int update=0, + bool crosscheck=false); + +//! scales and shifts array elements so that either the specified norm (alpha) or the minimum (alpha) and maximum (beta) array values get the specified values +CV_EXPORTS_W void normalize( InputArray src, OutputArray dst, double alpha=1, double beta=0, + int norm_type=NORM_L2, int dtype=-1, InputArray mask=noArray()); + +//! finds global minimum and maximum array elements and returns their values and their locations +CV_EXPORTS_W void minMaxLoc(InputArray src, CV_OUT double* minVal, + CV_OUT double* maxVal=0, CV_OUT Point* minLoc=0, + CV_OUT Point* maxLoc=0, InputArray mask=noArray()); +CV_EXPORTS void minMaxIdx(InputArray src, double* minVal, double* maxVal, + int* minIdx=0, int* maxIdx=0, InputArray mask=noArray()); + +//! transforms 2D matrix to 1D row or column vector by taking sum, minimum, maximum or mean value over all the rows +CV_EXPORTS_W void reduce(InputArray src, OutputArray dst, int dim, int rtype, int dtype=-1); + +//! makes multi-channel array out of several single-channel arrays +CV_EXPORTS void merge(const Mat* mv, size_t count, OutputArray dst); +CV_EXPORTS void merge(const vector& mv, OutputArray dst ); + +//! makes multi-channel array out of several single-channel arrays +CV_EXPORTS_W void merge(InputArrayOfArrays mv, OutputArray dst); + +//! copies each plane of a multi-channel array to a dedicated array +CV_EXPORTS void split(const Mat& src, Mat* mvbegin); +CV_EXPORTS void split(const Mat& m, vector& mv ); + +//! copies each plane of a multi-channel array to a dedicated array +CV_EXPORTS_W void split(InputArray m, OutputArrayOfArrays mv); + +//! copies selected channels from the input arrays to the selected channels of the output arrays +CV_EXPORTS void mixChannels(const Mat* src, size_t nsrcs, Mat* dst, size_t ndsts, + const int* fromTo, size_t npairs); +CV_EXPORTS void mixChannels(const vector& src, vector& dst, + const int* fromTo, size_t npairs); +CV_EXPORTS_W void mixChannels(InputArrayOfArrays src, InputArrayOfArrays dst, + const vector& fromTo); + +//! extracts a single channel from src (coi is 0-based index) +CV_EXPORTS_W void extractChannel(InputArray src, OutputArray dst, int coi); + +//! inserts a single channel to dst (coi is 0-based index) +CV_EXPORTS_W void insertChannel(InputArray src, InputOutputArray dst, int coi); + +//! reverses the order of the rows, columns or both in a matrix +CV_EXPORTS_W void flip(InputArray src, OutputArray dst, int flipCode); + +//! replicates the input matrix the specified number of times in the horizontal and/or vertical direction +CV_EXPORTS_W void repeat(InputArray src, int ny, int nx, OutputArray dst); +CV_EXPORTS Mat repeat(const Mat& src, int ny, int nx); + +CV_EXPORTS void hconcat(const Mat* src, size_t nsrc, OutputArray dst); +CV_EXPORTS void hconcat(InputArray src1, InputArray src2, OutputArray dst); +CV_EXPORTS_W void hconcat(InputArrayOfArrays src, OutputArray dst); + +CV_EXPORTS void vconcat(const Mat* src, size_t nsrc, OutputArray dst); +CV_EXPORTS void vconcat(InputArray src1, InputArray src2, OutputArray dst); +CV_EXPORTS_W void vconcat(InputArrayOfArrays src, OutputArray dst); + +//! computes bitwise conjunction of the two arrays (dst = src1 & src2) +CV_EXPORTS_W void bitwise_and(InputArray src1, InputArray src2, + OutputArray dst, InputArray mask=noArray()); +//! computes bitwise disjunction of the two arrays (dst = src1 | src2) +CV_EXPORTS_W void bitwise_or(InputArray src1, InputArray src2, + OutputArray dst, InputArray mask=noArray()); +//! computes bitwise exclusive-or of the two arrays (dst = src1 ^ src2) +CV_EXPORTS_W void bitwise_xor(InputArray src1, InputArray src2, + OutputArray dst, InputArray mask=noArray()); +//! inverts each bit of array (dst = ~src) +CV_EXPORTS_W void bitwise_not(InputArray src, OutputArray dst, + InputArray mask=noArray()); +//! computes element-wise absolute difference of two arrays (dst = abs(src1 - src2)) +CV_EXPORTS_W void absdiff(InputArray src1, InputArray src2, OutputArray dst); +//! set mask elements for those array elements which are within the element-specific bounding box (dst = lowerb <= src && src < upperb) +CV_EXPORTS_W void inRange(InputArray src, InputArray lowerb, + InputArray upperb, OutputArray dst); +//! compares elements of two arrays (dst = src1 \ src2) +CV_EXPORTS_W void compare(InputArray src1, InputArray src2, OutputArray dst, int cmpop); +//! computes per-element minimum of two arrays (dst = min(src1, src2)) +CV_EXPORTS_W void min(InputArray src1, InputArray src2, OutputArray dst); +//! computes per-element maximum of two arrays (dst = max(src1, src2)) +CV_EXPORTS_W void max(InputArray src1, InputArray src2, OutputArray dst); + +//! computes per-element minimum of two arrays (dst = min(src1, src2)) +CV_EXPORTS void min(const Mat& src1, const Mat& src2, Mat& dst); +//! computes per-element minimum of array and scalar (dst = min(src1, src2)) +CV_EXPORTS void min(const Mat& src1, double src2, Mat& dst); +//! computes per-element maximum of two arrays (dst = max(src1, src2)) +CV_EXPORTS void max(const Mat& src1, const Mat& src2, Mat& dst); +//! computes per-element maximum of array and scalar (dst = max(src1, src2)) +CV_EXPORTS void max(const Mat& src1, double src2, Mat& dst); + +//! computes square root of each matrix element (dst = src**0.5) +CV_EXPORTS_W void sqrt(InputArray src, OutputArray dst); +//! raises the input matrix elements to the specified power (b = a**power) +CV_EXPORTS_W void pow(InputArray src, double power, OutputArray dst); +//! computes exponent of each matrix element (dst = e**src) +CV_EXPORTS_W void exp(InputArray src, OutputArray dst); +//! computes natural logarithm of absolute value of each matrix element: dst = log(abs(src)) +CV_EXPORTS_W void log(InputArray src, OutputArray dst); +//! computes cube root of the argument +CV_EXPORTS_W float cubeRoot(float val); +//! computes the angle in degrees (0..360) of the vector (x,y) +CV_EXPORTS_W float fastAtan2(float y, float x); + +CV_EXPORTS void exp(const float* src, float* dst, int n); +CV_EXPORTS void log(const float* src, float* dst, int n); +CV_EXPORTS void fastAtan2(const float* y, const float* x, float* dst, int n, bool angleInDegrees); +CV_EXPORTS void magnitude(const float* x, const float* y, float* dst, int n); + +//! converts polar coordinates to Cartesian +CV_EXPORTS_W void polarToCart(InputArray magnitude, InputArray angle, + OutputArray x, OutputArray y, bool angleInDegrees=false); +//! converts Cartesian coordinates to polar +CV_EXPORTS_W void cartToPolar(InputArray x, InputArray y, + OutputArray magnitude, OutputArray angle, + bool angleInDegrees=false); +//! computes angle (angle(i)) of each (x(i), y(i)) vector +CV_EXPORTS_W void phase(InputArray x, InputArray y, OutputArray angle, + bool angleInDegrees=false); +//! computes magnitude (magnitude(i)) of each (x(i), y(i)) vector +CV_EXPORTS_W void magnitude(InputArray x, InputArray y, OutputArray magnitude); +//! checks that each matrix element is within the specified range. +CV_EXPORTS_W bool checkRange(InputArray a, bool quiet=true, CV_OUT Point* pos=0, + double minVal=-DBL_MAX, double maxVal=DBL_MAX); +//! converts NaN's to the given number +CV_EXPORTS_W void patchNaNs(InputOutputArray a, double val=0); + +//! implements generalized matrix product algorithm GEMM from BLAS +CV_EXPORTS_W void gemm(InputArray src1, InputArray src2, double alpha, + InputArray src3, double beta, OutputArray dst, int flags=0); +//! multiplies matrix by its transposition from the left or from the right +CV_EXPORTS_W void mulTransposed( InputArray src, OutputArray dst, bool aTa, + InputArray delta=noArray(), + double scale=1, int dtype=-1 ); +//! transposes the matrix +CV_EXPORTS_W void transpose(InputArray src, OutputArray dst); +//! performs affine transformation of each element of multi-channel input matrix +CV_EXPORTS_W void transform(InputArray src, OutputArray dst, InputArray m ); +//! performs perspective transformation of each element of multi-channel input matrix +CV_EXPORTS_W void perspectiveTransform(InputArray src, OutputArray dst, InputArray m ); + +//! extends the symmetrical matrix from the lower half or from the upper half +CV_EXPORTS_W void completeSymm(InputOutputArray mtx, bool lowerToUpper=false); +//! initializes scaled identity matrix +CV_EXPORTS_W void setIdentity(InputOutputArray mtx, const Scalar& s=Scalar(1)); +//! computes determinant of a square matrix +CV_EXPORTS_W double determinant(InputArray mtx); +//! computes trace of a matrix +CV_EXPORTS_W Scalar trace(InputArray mtx); +//! computes inverse or pseudo-inverse matrix +CV_EXPORTS_W double invert(InputArray src, OutputArray dst, int flags=DECOMP_LU); +//! solves linear system or a least-square problem +CV_EXPORTS_W bool solve(InputArray src1, InputArray src2, + OutputArray dst, int flags=DECOMP_LU); + +enum +{ + SORT_EVERY_ROW=0, + SORT_EVERY_COLUMN=1, + SORT_ASCENDING=0, + SORT_DESCENDING=16 +}; + +//! sorts independently each matrix row or each matrix column +CV_EXPORTS_W void sort(InputArray src, OutputArray dst, int flags); +//! sorts independently each matrix row or each matrix column +CV_EXPORTS_W void sortIdx(InputArray src, OutputArray dst, int flags); +//! finds real roots of a cubic polynomial +CV_EXPORTS_W int solveCubic(InputArray coeffs, OutputArray roots); +//! finds real and complex roots of a polynomial +CV_EXPORTS_W double solvePoly(InputArray coeffs, OutputArray roots, int maxIters=300); +//! finds eigenvalues of a symmetric matrix +CV_EXPORTS bool eigen(InputArray src, OutputArray eigenvalues, int lowindex=-1, + int highindex=-1); +//! finds eigenvalues and eigenvectors of a symmetric matrix +CV_EXPORTS bool eigen(InputArray src, OutputArray eigenvalues, + OutputArray eigenvectors, + int lowindex=-1, int highindex=-1); +CV_EXPORTS_W bool eigen(InputArray src, bool computeEigenvectors, + OutputArray eigenvalues, OutputArray eigenvectors); + +enum +{ + COVAR_SCRAMBLED=0, + COVAR_NORMAL=1, + COVAR_USE_AVG=2, + COVAR_SCALE=4, + COVAR_ROWS=8, + COVAR_COLS=16 +}; + +//! computes covariation matrix of a set of samples +CV_EXPORTS void calcCovarMatrix( const Mat* samples, int nsamples, Mat& covar, Mat& mean, + int flags, int ctype=CV_64F); +//! computes covariation matrix of a set of samples +CV_EXPORTS_W void calcCovarMatrix( InputArray samples, OutputArray covar, + OutputArray mean, int flags, int ctype=CV_64F); + +/*! + Principal Component Analysis + + The class PCA is used to compute the special basis for a set of vectors. + The basis will consist of eigenvectors of the covariance matrix computed + from the input set of vectors. After PCA is performed, vectors can be transformed from + the original high-dimensional space to the subspace formed by a few most + prominent eigenvectors (called the principal components), + corresponding to the largest eigenvalues of the covariation matrix. + Thus the dimensionality of the vector and the correlation between the coordinates is reduced. + + The following sample is the function that takes two matrices. The first one stores the set + of vectors (a row per vector) that is used to compute PCA, the second one stores another + "test" set of vectors (a row per vector) that are first compressed with PCA, + then reconstructed back and then the reconstruction error norm is computed and printed for each vector. + + \code + using namespace cv; + + PCA compressPCA(const Mat& pcaset, int maxComponents, + const Mat& testset, Mat& compressed) + { + PCA pca(pcaset, // pass the data + Mat(), // we do not have a pre-computed mean vector, + // so let the PCA engine to compute it + CV_PCA_DATA_AS_ROW, // indicate that the vectors + // are stored as matrix rows + // (use CV_PCA_DATA_AS_COL if the vectors are + // the matrix columns) + maxComponents // specify, how many principal components to retain + ); + // if there is no test data, just return the computed basis, ready-to-use + if( !testset.data ) + return pca; + CV_Assert( testset.cols == pcaset.cols ); + + compressed.create(testset.rows, maxComponents, testset.type()); + + Mat reconstructed; + for( int i = 0; i < testset.rows; i++ ) + { + Mat vec = testset.row(i), coeffs = compressed.row(i), reconstructed; + // compress the vector, the result will be stored + // in the i-th row of the output matrix + pca.project(vec, coeffs); + // and then reconstruct it + pca.backProject(coeffs, reconstructed); + // and measure the error + printf("%d. diff = %g\n", i, norm(vec, reconstructed, NORM_L2)); + } + return pca; + } + \endcode +*/ +class CV_EXPORTS PCA +{ +public: + //! default constructor + PCA(); + //! the constructor that performs PCA + PCA(InputArray data, InputArray mean, int flags, int maxComponents=0); + PCA(InputArray data, InputArray mean, int flags, double retainedVariance); + //! operator that performs PCA. The previously stored data, if any, is released + PCA& operator()(InputArray data, InputArray mean, int flags, int maxComponents=0); + PCA& computeVar(InputArray data, InputArray mean, int flags, double retainedVariance); + //! projects vector from the original space to the principal components subspace + Mat project(InputArray vec) const; + //! projects vector from the original space to the principal components subspace + void project(InputArray vec, OutputArray result) const; + //! reconstructs the original vector from the projection + Mat backProject(InputArray vec) const; + //! reconstructs the original vector from the projection + void backProject(InputArray vec, OutputArray result) const; + + Mat eigenvectors; //!< eigenvectors of the covariation matrix + Mat eigenvalues; //!< eigenvalues of the covariation matrix + Mat mean; //!< mean value subtracted before the projection and added after the back projection +}; + +CV_EXPORTS_W void PCACompute(InputArray data, CV_OUT InputOutputArray mean, + OutputArray eigenvectors, int maxComponents=0); + +CV_EXPORTS_W void PCAComputeVar(InputArray data, CV_OUT InputOutputArray mean, + OutputArray eigenvectors, double retainedVariance); + +CV_EXPORTS_W void PCAProject(InputArray data, InputArray mean, + InputArray eigenvectors, OutputArray result); + +CV_EXPORTS_W void PCABackProject(InputArray data, InputArray mean, + InputArray eigenvectors, OutputArray result); + + +/*! + Singular Value Decomposition class + + The class is used to compute Singular Value Decomposition of a floating-point matrix and then + use it to solve least-square problems, under-determined linear systems, invert matrices, + compute condition numbers etc. + + For a bit faster operation you can pass flags=SVD::MODIFY_A|... to modify the decomposed matrix + when it is not necessarily to preserve it. If you want to compute condition number of a matrix + or absolute value of its determinant - you do not need SVD::u or SVD::vt, + so you can pass flags=SVD::NO_UV|... . Another flag SVD::FULL_UV indicates that the full-size SVD::u and SVD::vt + must be computed, which is not necessary most of the time. +*/ +class CV_EXPORTS SVD +{ +public: + enum { MODIFY_A=1, NO_UV=2, FULL_UV=4 }; + //! the default constructor + SVD(); + //! the constructor that performs SVD + SVD( InputArray src, int flags=0 ); + //! the operator that performs SVD. The previously allocated SVD::u, SVD::w are SVD::vt are released. + SVD& operator ()( InputArray src, int flags=0 ); + + //! decomposes matrix and stores the results to user-provided matrices + static void compute( InputArray src, OutputArray w, + OutputArray u, OutputArray vt, int flags=0 ); + //! computes singular values of a matrix + static void compute( InputArray src, OutputArray w, int flags=0 ); + //! performs back substitution + static void backSubst( InputArray w, InputArray u, + InputArray vt, InputArray rhs, + OutputArray dst ); + + template static void compute( const Matx<_Tp, m, n>& a, + Matx<_Tp, nm, 1>& w, Matx<_Tp, m, nm>& u, Matx<_Tp, n, nm>& vt ); + template static void compute( const Matx<_Tp, m, n>& a, + Matx<_Tp, nm, 1>& w ); + template static void backSubst( const Matx<_Tp, nm, 1>& w, + const Matx<_Tp, m, nm>& u, const Matx<_Tp, n, nm>& vt, const Matx<_Tp, m, nb>& rhs, Matx<_Tp, n, nb>& dst ); + + //! finds dst = arg min_{|dst|=1} |m*dst| + static void solveZ( InputArray src, OutputArray dst ); + //! performs back substitution, so that dst is the solution or pseudo-solution of m*dst = rhs, where m is the decomposed matrix + void backSubst( InputArray rhs, OutputArray dst ) const; + + Mat u, w, vt; +}; + +//! computes SVD of src +CV_EXPORTS_W void SVDecomp( InputArray src, CV_OUT OutputArray w, + CV_OUT OutputArray u, CV_OUT OutputArray vt, int flags=0 ); + +//! performs back substitution for the previously computed SVD +CV_EXPORTS_W void SVBackSubst( InputArray w, InputArray u, InputArray vt, + InputArray rhs, CV_OUT OutputArray dst ); + +//! computes Mahalanobis distance between two vectors: sqrt((v1-v2)'*icovar*(v1-v2)), where icovar is the inverse covariation matrix +CV_EXPORTS_W double Mahalanobis(InputArray v1, InputArray v2, InputArray icovar); +//! a synonym for Mahalanobis +CV_EXPORTS double Mahalonobis(InputArray v1, InputArray v2, InputArray icovar); + +//! performs forward or inverse 1D or 2D Discrete Fourier Transformation +CV_EXPORTS_W void dft(InputArray src, OutputArray dst, int flags=0, int nonzeroRows=0); +//! performs inverse 1D or 2D Discrete Fourier Transformation +CV_EXPORTS_W void idft(InputArray src, OutputArray dst, int flags=0, int nonzeroRows=0); +//! performs forward or inverse 1D or 2D Discrete Cosine Transformation +CV_EXPORTS_W void dct(InputArray src, OutputArray dst, int flags=0); +//! performs inverse 1D or 2D Discrete Cosine Transformation +CV_EXPORTS_W void idct(InputArray src, OutputArray dst, int flags=0); +//! computes element-wise product of the two Fourier spectrums. The second spectrum can optionally be conjugated before the multiplication +CV_EXPORTS_W void mulSpectrums(InputArray a, InputArray b, OutputArray c, + int flags, bool conjB=false); +//! computes the minimal vector size vecsize1 >= vecsize so that the dft() of the vector of length vecsize1 can be computed efficiently +CV_EXPORTS_W int getOptimalDFTSize(int vecsize); + +/*! + Various k-Means flags +*/ +enum +{ + KMEANS_RANDOM_CENTERS=0, // Chooses random centers for k-Means initialization + KMEANS_PP_CENTERS=2, // Uses k-Means++ algorithm for initialization + KMEANS_USE_INITIAL_LABELS=1 // Uses the user-provided labels for K-Means initialization +}; +//! clusters the input data using k-Means algorithm +CV_EXPORTS_W double kmeans( InputArray data, int K, CV_OUT InputOutputArray bestLabels, + TermCriteria criteria, int attempts, + int flags, OutputArray centers=noArray() ); + +//! returns the thread-local Random number generator +CV_EXPORTS RNG& theRNG(); + +//! sets state of the thread-local Random number generator +CV_EXPORTS_W void setRNGSeed(int seed); + +//! returns the next unifomly-distributed random number of the specified type +template static inline _Tp randu() { return (_Tp)theRNG(); } + +//! fills array with uniformly-distributed random numbers from the range [low, high) +CV_EXPORTS_W void randu(InputOutputArray dst, InputArray low, InputArray high); + +//! fills array with normally-distributed random numbers with the specified mean and the standard deviation +CV_EXPORTS_W void randn(InputOutputArray dst, InputArray mean, InputArray stddev); + +//! shuffles the input array elements +CV_EXPORTS void randShuffle(InputOutputArray dst, double iterFactor=1., RNG* rng=0); +CV_EXPORTS_AS(randShuffle) void randShuffle_(InputOutputArray dst, double iterFactor=1.); + +//! draws the line segment (pt1, pt2) in the image +CV_EXPORTS_W void line(CV_IN_OUT Mat& img, Point pt1, Point pt2, const Scalar& color, + int thickness=1, int lineType=8, int shift=0); + +//! draws an arrow from pt1 to pt2 in the image +CV_EXPORTS_W void arrowedLine(CV_IN_OUT Mat& img, Point pt1, Point pt2, const Scalar& color, + int thickness=1, int line_type=8, int shift=0, double tipLength=0.1); + +//! draws the rectangle outline or a solid rectangle with the opposite corners pt1 and pt2 in the image +CV_EXPORTS_W void rectangle(CV_IN_OUT Mat& img, Point pt1, Point pt2, + const Scalar& color, int thickness=1, + int lineType=8, int shift=0); + +//! draws the rectangle outline or a solid rectangle covering rec in the image +CV_EXPORTS void rectangle(CV_IN_OUT Mat& img, Rect rec, + const Scalar& color, int thickness=1, + int lineType=8, int shift=0); + +//! draws the circle outline or a solid circle in the image +CV_EXPORTS_W void circle(CV_IN_OUT Mat& img, Point center, int radius, + const Scalar& color, int thickness=1, + int lineType=8, int shift=0); + +//! draws an elliptic arc, ellipse sector or a rotated ellipse in the image +CV_EXPORTS_W void ellipse(CV_IN_OUT Mat& img, Point center, Size axes, + double angle, double startAngle, double endAngle, + const Scalar& color, int thickness=1, + int lineType=8, int shift=0); + +//! draws a rotated ellipse in the image +CV_EXPORTS_W void ellipse(CV_IN_OUT Mat& img, const RotatedRect& box, const Scalar& color, + int thickness=1, int lineType=8); + +/* ----------------------------------------------------------------------------------------- */ +/* ADDING A SET OF PREDEFINED MARKERS WHICH COULD BE USED TO HIGHLIGHT POSITIONS IN AN IMAGE */ +/* ----------------------------------------------------------------------------------------- */ + +//! Possible set of marker types used for the drawMarker function +enum MarkerTypes +{ + MARKER_CROSS = 0, // A crosshair marker shape + MARKER_TILTED_CROSS = 1, // A 45 degree tilted crosshair marker shape + MARKER_STAR = 2, // A star marker shape, combination of cross and tilted cross + MARKER_DIAMOND = 3, // A diamond marker shape + MARKER_SQUARE = 4, // A square marker shape + MARKER_TRIANGLE_UP = 5, // An upwards pointing triangle marker shape + MARKER_TRIANGLE_DOWN = 6 // A downwards pointing triangle marker shape +}; + +/** @brief Draws a marker on a predefined position in an image. + +The function drawMarker draws a marker on a given position in the image. For the moment several +marker types are supported (`MARKER_CROSS`, `MARKER_TILTED_CROSS`, `MARKER_STAR`, `MARKER_DIAMOND`, `MARKER_SQUARE`, +`MARKER_TRIANGLE_UP` and `MARKER_TRIANGLE_DOWN`). + +@param img Image. +@param position The point where the crosshair is positioned. +@param markerType The specific type of marker you want to use, see +@param color Line color. +@param thickness Line thickness. +@param line_type Type of the line, see cv::LineTypes +@param markerSize The length of the marker axis [default = 20 pixels] + */ +CV_EXPORTS_W void drawMarker(CV_IN_OUT Mat& img, Point position, const Scalar& color, + int markerType = MARKER_CROSS, int markerSize=20, int thickness=1, + int line_type=8); + +/* ----------------------------------------------------------------------------------------- */ +/* END OF MARKER SECTION */ +/* ----------------------------------------------------------------------------------------- */ + +//! draws a filled convex polygon in the image +CV_EXPORTS void fillConvexPoly(Mat& img, const Point* pts, int npts, + const Scalar& color, int lineType=8, + int shift=0); +CV_EXPORTS_W void fillConvexPoly(InputOutputArray img, InputArray points, + const Scalar& color, int lineType=8, + int shift=0); + +//! fills an area bounded by one or more polygons +CV_EXPORTS void fillPoly(Mat& img, const Point** pts, + const int* npts, int ncontours, + const Scalar& color, int lineType=8, int shift=0, + Point offset=Point() ); + +CV_EXPORTS_W void fillPoly(InputOutputArray img, InputArrayOfArrays pts, + const Scalar& color, int lineType=8, int shift=0, + Point offset=Point() ); + +//! draws one or more polygonal curves +CV_EXPORTS void polylines(Mat& img, const Point** pts, const int* npts, + int ncontours, bool isClosed, const Scalar& color, + int thickness=1, int lineType=8, int shift=0 ); + +CV_EXPORTS_W void polylines(InputOutputArray img, InputArrayOfArrays pts, + bool isClosed, const Scalar& color, + int thickness=1, int lineType=8, int shift=0 ); + +//! clips the line segment by the rectangle Rect(0, 0, imgSize.width, imgSize.height) +CV_EXPORTS bool clipLine(Size imgSize, CV_IN_OUT Point& pt1, CV_IN_OUT Point& pt2); + +//! clips the line segment by the rectangle imgRect +CV_EXPORTS_W bool clipLine(Rect imgRect, CV_OUT CV_IN_OUT Point& pt1, CV_OUT CV_IN_OUT Point& pt2); + +/*! + Line iterator class + + The class is used to iterate over all the pixels on the raster line + segment connecting two specified points. +*/ +class CV_EXPORTS LineIterator +{ +public: + //! intializes the iterator + LineIterator( const Mat& img, Point pt1, Point pt2, + int connectivity=8, bool leftToRight=false ); + //! returns pointer to the current pixel + uchar* operator *(); + //! prefix increment operator (++it). shifts iterator to the next pixel + LineIterator& operator ++(); + //! postfix increment operator (it++). shifts iterator to the next pixel + LineIterator operator ++(int); + //! returns coordinates of the current pixel + Point pos() const; + + uchar* ptr; + const uchar* ptr0; + int step, elemSize; + int err, count; + int minusDelta, plusDelta; + int minusStep, plusStep; +}; + +//! converts elliptic arc to a polygonal curve +CV_EXPORTS_W void ellipse2Poly( Point center, Size axes, int angle, + int arcStart, int arcEnd, int delta, + CV_OUT vector& pts ); + +enum +{ + FONT_HERSHEY_SIMPLEX = 0, + FONT_HERSHEY_PLAIN = 1, + FONT_HERSHEY_DUPLEX = 2, + FONT_HERSHEY_COMPLEX = 3, + FONT_HERSHEY_TRIPLEX = 4, + FONT_HERSHEY_COMPLEX_SMALL = 5, + FONT_HERSHEY_SCRIPT_SIMPLEX = 6, + FONT_HERSHEY_SCRIPT_COMPLEX = 7, + FONT_ITALIC = 16 +}; + +//! renders text string in the image +CV_EXPORTS_W void putText( Mat& img, const string& text, Point org, + int fontFace, double fontScale, Scalar color, + int thickness=1, int lineType=8, + bool bottomLeftOrigin=false ); + +//! returns bounding box of the text string +CV_EXPORTS_W Size getTextSize(const string& text, int fontFace, + double fontScale, int thickness, + CV_OUT int* baseLine); + +///////////////////////////////// Mat_<_Tp> //////////////////////////////////// + +/*! + Template matrix class derived from Mat + + The class Mat_ is a "thin" template wrapper on top of cv::Mat. It does not have any extra data fields, + nor it or cv::Mat have any virtual methods and thus references or pointers to these two classes + can be safely converted one to another. But do it with care, for example: + + \code + // create 100x100 8-bit matrix + Mat M(100,100,CV_8U); + // this will compile fine. no any data conversion will be done. + Mat_& M1 = (Mat_&)M; + // the program will likely crash at the statement below + M1(99,99) = 1.f; + \endcode + + While cv::Mat is sufficient in most cases, cv::Mat_ can be more convenient if you use a lot of element + access operations and if you know matrix type at compile time. + Note that cv::Mat::at\<_Tp\>(int y, int x) and cv::Mat_\<_Tp\>::operator ()(int y, int x) do absolutely the + same thing and run at the same speed, but the latter is certainly shorter: + + \code + Mat_ M(20,20); + for(int i = 0; i < M.rows; i++) + for(int j = 0; j < M.cols; j++) + M(i,j) = 1./(i+j+1); + Mat E, V; + eigen(M,E,V); + cout << E.at(0,0)/E.at(M.rows-1,0); + \endcode + + It is easy to use Mat_ for multi-channel images/matrices - just pass cv::Vec as cv::Mat_ template parameter: + + \code + // allocate 320x240 color image and fill it with green (in RGB space) + Mat_ img(240, 320, Vec3b(0,255,0)); + // now draw a diagonal white line + for(int i = 0; i < 100; i++) + img(i,i)=Vec3b(255,255,255); + // and now modify the 2nd (red) channel of each pixel + for(int i = 0; i < img.rows; i++) + for(int j = 0; j < img.cols; j++) + img(i,j)[2] ^= (uchar)(i ^ j); // img(y,x)[c] accesses c-th channel of the pixel (x,y) + \endcode +*/ +template class Mat_ : public Mat +{ +public: + typedef _Tp value_type; + typedef typename DataType<_Tp>::channel_type channel_type; + typedef MatIterator_<_Tp> iterator; + typedef MatConstIterator_<_Tp> const_iterator; + + //! default constructor + Mat_(); + //! equivalent to Mat(_rows, _cols, DataType<_Tp>::type) + Mat_(int _rows, int _cols); + //! constructor that sets each matrix element to specified value + Mat_(int _rows, int _cols, const _Tp& value); + //! equivalent to Mat(_size, DataType<_Tp>::type) + explicit Mat_(Size _size); + //! constructor that sets each matrix element to specified value + Mat_(Size _size, const _Tp& value); + //! n-dim array constructor + Mat_(int _ndims, const int* _sizes); + //! n-dim array constructor that sets each matrix element to specified value + Mat_(int _ndims, const int* _sizes, const _Tp& value); + //! copy/conversion contructor. If m is of different type, it's converted + Mat_(const Mat& m); + //! copy constructor + Mat_(const Mat_& m); + //! constructs a matrix on top of user-allocated data. step is in bytes(!!!), regardless of the type + Mat_(int _rows, int _cols, _Tp* _data, size_t _step=AUTO_STEP); + //! constructs n-dim matrix on top of user-allocated data. steps are in bytes(!!!), regardless of the type + Mat_(int _ndims, const int* _sizes, _Tp* _data, const size_t* _steps=0); + //! selects a submatrix + Mat_(const Mat_& m, const Range& rowRange, const Range& colRange=Range::all()); + //! selects a submatrix + Mat_(const Mat_& m, const Rect& roi); + //! selects a submatrix, n-dim version + Mat_(const Mat_& m, const Range* ranges); + //! from a matrix expression + explicit Mat_(const MatExpr& e); + //! makes a matrix out of Vec, std::vector, Point_ or Point3_. The matrix will have a single column + explicit Mat_(const vector<_Tp>& vec, bool copyData=false); + template explicit Mat_(const Vec::channel_type, n>& vec, bool copyData=true); + template explicit Mat_(const Matx::channel_type, m, n>& mtx, bool copyData=true); + explicit Mat_(const Point_::channel_type>& pt, bool copyData=true); + explicit Mat_(const Point3_::channel_type>& pt, bool copyData=true); + explicit Mat_(const MatCommaInitializer_<_Tp>& commaInitializer); + + Mat_& operator = (const Mat& m); + Mat_& operator = (const Mat_& m); + //! set all the elements to s. + Mat_& operator = (const _Tp& s); + //! assign a matrix expression + Mat_& operator = (const MatExpr& e); + + //! iterators; they are smart enough to skip gaps in the end of rows + iterator begin(); + iterator end(); + const_iterator begin() const; + const_iterator end() const; + + //! equivalent to Mat::create(_rows, _cols, DataType<_Tp>::type) + void create(int _rows, int _cols); + //! equivalent to Mat::create(_size, DataType<_Tp>::type) + void create(Size _size); + //! equivalent to Mat::create(_ndims, _sizes, DatType<_Tp>::type) + void create(int _ndims, const int* _sizes); + //! cross-product + Mat_ cross(const Mat_& m) const; + //! data type conversion + template operator Mat_() const; + //! overridden forms of Mat::row() etc. + Mat_ row(int y) const; + Mat_ col(int x) const; + Mat_ diag(int d=0) const; + Mat_ clone() const; + + //! overridden forms of Mat::elemSize() etc. + size_t elemSize() const; + size_t elemSize1() const; + int type() const; + int depth() const; + int channels() const; + size_t step1(int i=0) const; + //! returns step()/sizeof(_Tp) + size_t stepT(int i=0) const; + + //! overridden forms of Mat::zeros() etc. Data type is omitted, of course + static MatExpr zeros(int rows, int cols); + static MatExpr zeros(Size size); + static MatExpr zeros(int _ndims, const int* _sizes); + static MatExpr ones(int rows, int cols); + static MatExpr ones(Size size); + static MatExpr ones(int _ndims, const int* _sizes); + static MatExpr eye(int rows, int cols); + static MatExpr eye(Size size); + + //! some more overriden methods + Mat_& adjustROI( int dtop, int dbottom, int dleft, int dright ); + Mat_ operator()( const Range& rowRange, const Range& colRange ) const; + Mat_ operator()( const Rect& roi ) const; + Mat_ operator()( const Range* ranges ) const; + + //! more convenient forms of row and element access operators + _Tp* operator [](int y); + const _Tp* operator [](int y) const; + + //! returns reference to the specified element + _Tp& operator ()(const int* idx); + //! returns read-only reference to the specified element + const _Tp& operator ()(const int* idx) const; + + //! returns reference to the specified element + template _Tp& operator ()(const Vec& idx); + //! returns read-only reference to the specified element + template const _Tp& operator ()(const Vec& idx) const; + + //! returns reference to the specified element (1D case) + _Tp& operator ()(int idx0); + //! returns read-only reference to the specified element (1D case) + const _Tp& operator ()(int idx0) const; + //! returns reference to the specified element (2D case) + _Tp& operator ()(int idx0, int idx1); + //! returns read-only reference to the specified element (2D case) + const _Tp& operator ()(int idx0, int idx1) const; + //! returns reference to the specified element (3D case) + _Tp& operator ()(int idx0, int idx1, int idx2); + //! returns read-only reference to the specified element (3D case) + const _Tp& operator ()(int idx0, int idx1, int idx2) const; + + _Tp& operator ()(Point pt); + const _Tp& operator ()(Point pt) const; + + //! conversion to vector. + operator vector<_Tp>() const; + //! conversion to Vec + template operator Vec::channel_type, n>() const; + //! conversion to Matx + template operator Matx::channel_type, m, n>() const; +}; + +typedef Mat_ Mat1b; +typedef Mat_ Mat2b; +typedef Mat_ Mat3b; +typedef Mat_ Mat4b; + +typedef Mat_ Mat1s; +typedef Mat_ Mat2s; +typedef Mat_ Mat3s; +typedef Mat_ Mat4s; + +typedef Mat_ Mat1w; +typedef Mat_ Mat2w; +typedef Mat_ Mat3w; +typedef Mat_ Mat4w; + +typedef Mat_ Mat1i; +typedef Mat_ Mat2i; +typedef Mat_ Mat3i; +typedef Mat_ Mat4i; + +typedef Mat_ Mat1f; +typedef Mat_ Mat2f; +typedef Mat_ Mat3f; +typedef Mat_ Mat4f; + +typedef Mat_ Mat1d; +typedef Mat_ Mat2d; +typedef Mat_ Mat3d; +typedef Mat_ Mat4d; + +//////////// Iterators & Comma initializers ////////////////// + +class CV_EXPORTS MatConstIterator +{ +public: + typedef uchar* value_type; + typedef ptrdiff_t difference_type; + typedef const uchar** pointer; + typedef uchar* reference; + typedef std::random_access_iterator_tag iterator_category; + + //! default constructor + MatConstIterator(); + //! constructor that sets the iterator to the beginning of the matrix + MatConstIterator(const Mat* _m); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator(const Mat* _m, int _row, int _col=0); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator(const Mat* _m, Point _pt); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator(const Mat* _m, const int* _idx); + //! copy constructor + MatConstIterator(const MatConstIterator& it); + + //! copy operator + MatConstIterator& operator = (const MatConstIterator& it); + //! returns the current matrix element + uchar* operator *() const; + //! returns the i-th matrix element, relative to the current + uchar* operator [](ptrdiff_t i) const; + + //! shifts the iterator forward by the specified number of elements + MatConstIterator& operator += (ptrdiff_t ofs); + //! shifts the iterator backward by the specified number of elements + MatConstIterator& operator -= (ptrdiff_t ofs); + //! decrements the iterator + MatConstIterator& operator --(); + //! decrements the iterator + MatConstIterator operator --(int); + //! increments the iterator + MatConstIterator& operator ++(); + //! increments the iterator + MatConstIterator operator ++(int); + //! returns the current iterator position + Point pos() const; + //! returns the current iterator position + void pos(int* _idx) const; + ptrdiff_t lpos() const; + void seek(ptrdiff_t ofs, bool relative=false); + void seek(const int* _idx, bool relative=false); + + const Mat* m; + size_t elemSize; + uchar* ptr; + uchar* sliceStart; + uchar* sliceEnd; +}; + +/*! + Matrix read-only iterator + + */ +template +class MatConstIterator_ : public MatConstIterator +{ +public: + typedef _Tp value_type; + typedef ptrdiff_t difference_type; + typedef const _Tp* pointer; + typedef const _Tp& reference; + typedef std::random_access_iterator_tag iterator_category; + + //! default constructor + MatConstIterator_(); + //! constructor that sets the iterator to the beginning of the matrix + MatConstIterator_(const Mat_<_Tp>* _m); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator_(const Mat_<_Tp>* _m, int _row, int _col=0); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator_(const Mat_<_Tp>* _m, Point _pt); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator_(const Mat_<_Tp>* _m, const int* _idx); + //! copy constructor + MatConstIterator_(const MatConstIterator_& it); + + //! copy operator + MatConstIterator_& operator = (const MatConstIterator_& it); + //! returns the current matrix element + _Tp operator *() const; + //! returns the i-th matrix element, relative to the current + _Tp operator [](ptrdiff_t i) const; + + //! shifts the iterator forward by the specified number of elements + MatConstIterator_& operator += (ptrdiff_t ofs); + //! shifts the iterator backward by the specified number of elements + MatConstIterator_& operator -= (ptrdiff_t ofs); + //! decrements the iterator + MatConstIterator_& operator --(); + //! decrements the iterator + MatConstIterator_ operator --(int); + //! increments the iterator + MatConstIterator_& operator ++(); + //! increments the iterator + MatConstIterator_ operator ++(int); + //! returns the current iterator position + Point pos() const; +}; + + +/*! + Matrix read-write iterator + +*/ +template +class MatIterator_ : public MatConstIterator_<_Tp> +{ +public: + typedef _Tp* pointer; + typedef _Tp& reference; + typedef std::random_access_iterator_tag iterator_category; + + //! the default constructor + MatIterator_(); + //! constructor that sets the iterator to the beginning of the matrix + MatIterator_(Mat_<_Tp>* _m); + //! constructor that sets the iterator to the specified element of the matrix + MatIterator_(Mat_<_Tp>* _m, int _row, int _col=0); + //! constructor that sets the iterator to the specified element of the matrix + MatIterator_(const Mat_<_Tp>* _m, Point _pt); + //! constructor that sets the iterator to the specified element of the matrix + MatIterator_(const Mat_<_Tp>* _m, const int* _idx); + //! copy constructor + MatIterator_(const MatIterator_& it); + //! copy operator + MatIterator_& operator = (const MatIterator_<_Tp>& it ); + + //! returns the current matrix element + _Tp& operator *() const; + //! returns the i-th matrix element, relative to the current + _Tp& operator [](ptrdiff_t i) const; + + //! shifts the iterator forward by the specified number of elements + MatIterator_& operator += (ptrdiff_t ofs); + //! shifts the iterator backward by the specified number of elements + MatIterator_& operator -= (ptrdiff_t ofs); + //! decrements the iterator + MatIterator_& operator --(); + //! decrements the iterator + MatIterator_ operator --(int); + //! increments the iterator + MatIterator_& operator ++(); + //! increments the iterator + MatIterator_ operator ++(int); +}; + +template class MatOp_Iter_; + +/*! + Comma-separated Matrix Initializer + + The class instances are usually not created explicitly. + Instead, they are created on "matrix << firstValue" operator. + + The sample below initializes 2x2 rotation matrix: + + \code + double angle = 30, a = cos(angle*CV_PI/180), b = sin(angle*CV_PI/180); + Mat R = (Mat_(2,2) << a, -b, b, a); + \endcode +*/ +template class MatCommaInitializer_ +{ +public: + //! the constructor, created by "matrix << firstValue" operator, where matrix is cv::Mat + MatCommaInitializer_(Mat_<_Tp>* _m); + //! the operator that takes the next value and put it to the matrix + template MatCommaInitializer_<_Tp>& operator , (T2 v); + //! another form of conversion operator + Mat_<_Tp> operator *() const; + operator Mat_<_Tp>() const; +protected: + MatIterator_<_Tp> it; +}; + + +template class MatxCommaInitializer +{ +public: + MatxCommaInitializer(Matx<_Tp, m, n>* _mtx); + template MatxCommaInitializer<_Tp, m, n>& operator , (T2 val); + Matx<_Tp, m, n> operator *() const; + + Matx<_Tp, m, n>* dst; + int idx; +}; + +template class VecCommaInitializer : public MatxCommaInitializer<_Tp, m, 1> +{ +public: + VecCommaInitializer(Vec<_Tp, m>* _vec); + template VecCommaInitializer<_Tp, m>& operator , (T2 val); + Vec<_Tp, m> operator *() const; +}; + +/*! + Automatically Allocated Buffer Class + + The class is used for temporary buffers in functions and methods. + If a temporary buffer is usually small (a few K's of memory), + but its size depends on the parameters, it makes sense to create a small + fixed-size array on stack and use it if it's large enough. If the required buffer size + is larger than the fixed size, another buffer of sufficient size is allocated dynamically + and released after the processing. Therefore, in typical cases, when the buffer size is small, + there is no overhead associated with malloc()/free(). + At the same time, there is no limit on the size of processed data. + + This is what AutoBuffer does. The template takes 2 parameters - type of the buffer elements and + the number of stack-allocated elements. Here is how the class is used: + + \code + void my_func(const cv::Mat& m) + { + cv::AutoBuffer buf; // create automatic buffer containing 1000 floats + + buf.allocate(m.rows); // if m.rows <= 1000, the pre-allocated buffer is used, + // otherwise the buffer of "m.rows" floats will be allocated + // dynamically and deallocated in cv::AutoBuffer destructor + ... + } + \endcode +*/ +template class AutoBuffer +{ +public: + typedef _Tp value_type; + enum { buffer_padding = (int)((16 + sizeof(_Tp) - 1)/sizeof(_Tp)) }; + + //! the default contructor + AutoBuffer(); + //! constructor taking the real buffer size + AutoBuffer(size_t _size); + //! destructor. calls deallocate() + ~AutoBuffer(); + + //! allocates the new buffer of size _size. if the _size is small enough, stack-allocated buffer is used + void allocate(size_t _size); + //! deallocates the buffer if it was dynamically allocated + void deallocate(); + //! returns pointer to the real buffer, stack-allocated or head-allocated + operator _Tp* (); + //! returns read-only pointer to the real buffer, stack-allocated or head-allocated + operator const _Tp* () const; + +protected: + //! pointer to the real buffer, can point to buf if the buffer is small enough + _Tp* ptr; + //! size of the real buffer + size_t size; + //! pre-allocated buffer + _Tp buf[fixed_size+buffer_padding]; +}; + +/////////////////////////// multi-dimensional dense matrix ////////////////////////// + +/*! + n-Dimensional Dense Matrix Iterator Class. + + The class cv::NAryMatIterator is used for iterating over one or more n-dimensional dense arrays (cv::Mat's). + + The iterator is completely different from cv::Mat_ and cv::SparseMat_ iterators. + It iterates through the slices (or planes), not the elements, where "slice" is a continuous part of the arrays. + + Here is the example on how the iterator can be used to normalize 3D histogram: + + \code + void normalizeColorHist(Mat& hist) + { + #if 1 + // intialize iterator (the style is different from STL). + // after initialization the iterator will contain + // the number of slices or planes + // the iterator will go through + Mat* arrays[] = { &hist, 0 }; + Mat planes[1]; + NAryMatIterator it(arrays, planes); + double s = 0; + // iterate through the matrix. on each iteration + // it.planes[i] (of type Mat) will be set to the current plane of + // i-th n-dim matrix passed to the iterator constructor. + for(int p = 0; p < it.nplanes; p++, ++it) + s += sum(it.planes[0])[0]; + it = NAryMatIterator(hist); + s = 1./s; + for(int p = 0; p < it.nplanes; p++, ++it) + it.planes[0] *= s; + #elif 1 + // this is a shorter implementation of the above + // using built-in operations on Mat + double s = sum(hist)[0]; + hist.convertTo(hist, hist.type(), 1./s, 0); + #else + // and this is even shorter one + // (assuming that the histogram elements are non-negative) + normalize(hist, hist, 1, 0, NORM_L1); + #endif + } + \endcode + + You can iterate through several matrices simultaneously as long as they have the same geometry + (dimensionality and all the dimension sizes are the same), which is useful for binary + and n-ary operations on such matrices. Just pass those matrices to cv::MatNDIterator. + Then, during the iteration it.planes[0], it.planes[1], ... will + be the slices of the corresponding matrices +*/ +class CV_EXPORTS NAryMatIterator +{ +public: + //! the default constructor + NAryMatIterator(); + //! the full constructor taking arbitrary number of n-dim matrices + NAryMatIterator(const Mat** arrays, uchar** ptrs, int narrays=-1); + //! the full constructor taking arbitrary number of n-dim matrices + NAryMatIterator(const Mat** arrays, Mat* planes, int narrays=-1); + //! the separate iterator initialization method + void init(const Mat** arrays, Mat* planes, uchar** ptrs, int narrays=-1); + + //! proceeds to the next plane of every iterated matrix + NAryMatIterator& operator ++(); + //! proceeds to the next plane of every iterated matrix (postfix increment operator) + NAryMatIterator operator ++(int); + + //! the iterated arrays + const Mat** arrays; + //! the current planes + Mat* planes; + //! data pointers + uchar** ptrs; + //! the number of arrays + int narrays; + //! the number of hyper-planes that the iterator steps through + size_t nplanes; + //! the size of each segment (in elements) + size_t size; +protected: + int iterdepth; + size_t idx; +}; + +//typedef NAryMatIterator NAryMatNDIterator; + +typedef void (*ConvertData)(const void* from, void* to, int cn); +typedef void (*ConvertScaleData)(const void* from, void* to, int cn, double alpha, double beta); + +//! returns the function for converting pixels from one data type to another +CV_EXPORTS ConvertData getConvertElem(int fromType, int toType); +//! returns the function for converting pixels from one data type to another with the optional scaling +CV_EXPORTS ConvertScaleData getConvertScaleElem(int fromType, int toType); + + +/////////////////////////// multi-dimensional sparse matrix ////////////////////////// + +class SparseMatIterator; +class SparseMatConstIterator; +template class SparseMatIterator_; +template class SparseMatConstIterator_; + +/*! + Sparse matrix class. + + The class represents multi-dimensional sparse numerical arrays. Such a sparse array can store elements + of any type that cv::Mat is able to store. "Sparse" means that only non-zero elements + are stored (though, as a result of some operations on a sparse matrix, some of its stored elements + can actually become 0. It's user responsibility to detect such elements and delete them using cv::SparseMat::erase(). + The non-zero elements are stored in a hash table that grows when it's filled enough, + so that the search time remains O(1) in average. Elements can be accessed using the following methods: + +
    +
  1. Query operations: cv::SparseMat::ptr() and the higher-level cv::SparseMat::ref(), + cv::SparseMat::value() and cv::SparseMat::find, for example: + \code + const int dims = 5; + int size[] = {10, 10, 10, 10, 10}; + SparseMat sparse_mat(dims, size, CV_32F); + for(int i = 0; i < 1000; i++) + { + int idx[dims]; + for(int k = 0; k < dims; k++) + idx[k] = rand()%sparse_mat.size(k); + sparse_mat.ref(idx) += 1.f; + } + \endcode + +
  2. Sparse matrix iterators. Like cv::Mat iterators and unlike cv::Mat iterators, the sparse matrix iterators are STL-style, + that is, the iteration is done as following: + \code + // prints elements of a sparse floating-point matrix and the sum of elements. + SparseMatConstIterator_ + it = sparse_mat.begin(), + it_end = sparse_mat.end(); + double s = 0; + int dims = sparse_mat.dims(); + for(; it != it_end; ++it) + { + // print element indices and the element value + const Node* n = it.node(); + printf("(") + for(int i = 0; i < dims; i++) + printf("%3d%c", n->idx[i], i < dims-1 ? ',' : ')'); + printf(": %f\n", *it); + s += *it; + } + printf("Element sum is %g\n", s); + \endcode + If you run this loop, you will notice that elements are enumerated + in no any logical order (lexicographical etc.), + they come in the same order as they stored in the hash table, i.e. semi-randomly. + + You may collect pointers to the nodes and sort them to get the proper ordering. + Note, however, that pointers to the nodes may become invalid when you add more + elements to the matrix; this is because of possible buffer reallocation. + +
  3. A combination of the above 2 methods when you need to process 2 or more sparse + matrices simultaneously, e.g. this is how you can compute unnormalized + cross-correlation of the 2 floating-point sparse matrices: + \code + double crossCorr(const SparseMat& a, const SparseMat& b) + { + const SparseMat *_a = &a, *_b = &b; + // if b contains less elements than a, + // it's faster to iterate through b + if(_a->nzcount() > _b->nzcount()) + std::swap(_a, _b); + SparseMatConstIterator_ it = _a->begin(), + it_end = _a->end(); + double ccorr = 0; + for(; it != it_end; ++it) + { + // take the next element from the first matrix + float avalue = *it; + const Node* anode = it.node(); + // and try to find element with the same index in the second matrix. + // since the hash value depends only on the element index, + // we reuse hashvalue stored in the node + float bvalue = _b->value(anode->idx,&anode->hashval); + ccorr += avalue*bvalue; + } + return ccorr; + } + \endcode +
+*/ +class CV_EXPORTS SparseMat +{ +public: + typedef SparseMatIterator iterator; + typedef SparseMatConstIterator const_iterator; + + //! the sparse matrix header + struct CV_EXPORTS Hdr + { + Hdr(int _dims, const int* _sizes, int _type); + void clear(); + int refcount; + int dims; + int valueOffset; + size_t nodeSize; + size_t nodeCount; + size_t freeList; + vector pool; + vector hashtab; + int size[CV_MAX_DIM]; + }; + + //! sparse matrix node - element of a hash table + struct CV_EXPORTS Node + { + //! hash value + size_t hashval; + //! index of the next node in the same hash table entry + size_t next; + //! index of the matrix element + int idx[CV_MAX_DIM]; + }; + + //! default constructor + SparseMat(); + //! creates matrix of the specified size and type + SparseMat(int dims, const int* _sizes, int _type); + //! copy constructor + SparseMat(const SparseMat& m); + //! converts dense 2d matrix to the sparse form + /*! + \param m the input matrix + */ + explicit SparseMat(const Mat& m); + //! converts old-style sparse matrix to the new-style. All the data is copied + SparseMat(const CvSparseMat* m); + //! the destructor + ~SparseMat(); + + //! assignment operator. This is O(1) operation, i.e. no data is copied + SparseMat& operator = (const SparseMat& m); + //! equivalent to the corresponding constructor + SparseMat& operator = (const Mat& m); + + //! creates full copy of the matrix + SparseMat clone() const; + + //! copies all the data to the destination matrix. All the previous content of m is erased + void copyTo( SparseMat& m ) const; + //! converts sparse matrix to dense matrix. + void copyTo( Mat& m ) const; + //! multiplies all the matrix elements by the specified scale factor alpha and converts the results to the specified data type + void convertTo( SparseMat& m, int rtype, double alpha=1 ) const; + //! converts sparse matrix to dense n-dim matrix with optional type conversion and scaling. + /*! + \param m Destination matrix + \param rtype The output matrix data type. When it is =-1, the output array will have the same data type as (*this) + \param alpha The scale factor + \param beta The optional delta added to the scaled values before the conversion + */ + void convertTo( Mat& m, int rtype, double alpha=1, double beta=0 ) const; + + // not used now + void assignTo( SparseMat& m, int type=-1 ) const; + + //! reallocates sparse matrix. + /*! + If the matrix already had the proper size and type, + it is simply cleared with clear(), otherwise, + the old matrix is released (using release()) and the new one is allocated. + */ + void create(int dims, const int* _sizes, int _type); + //! sets all the sparse matrix elements to 0, which means clearing the hash table. + void clear(); + //! manually increments the reference counter to the header. + void addref(); + // decrements the header reference counter. When the counter reaches 0, the header and all the underlying data are deallocated. + void release(); + + //! converts sparse matrix to the old-style representation; all the elements are copied. + operator CvSparseMat*() const; + //! returns the size of each element in bytes (not including the overhead - the space occupied by SparseMat::Node elements) + size_t elemSize() const; + //! returns elemSize()/channels() + size_t elemSize1() const; + + //! returns type of sparse matrix elements + int type() const; + //! returns the depth of sparse matrix elements + int depth() const; + //! returns the number of channels + int channels() const; + + //! returns the array of sizes, or NULL if the matrix is not allocated + const int* size() const; + //! returns the size of i-th matrix dimension (or 0) + int size(int i) const; + //! returns the matrix dimensionality + int dims() const; + //! returns the number of non-zero elements (=the number of hash table nodes) + size_t nzcount() const; + + //! computes the element hash value (1D case) + size_t hash(int i0) const; + //! computes the element hash value (2D case) + size_t hash(int i0, int i1) const; + //! computes the element hash value (3D case) + size_t hash(int i0, int i1, int i2) const; + //! computes the element hash value (nD case) + size_t hash(const int* idx) const; + + //@{ + /*! + specialized variants for 1D, 2D, 3D cases and the generic_type one for n-D case. + + return pointer to the matrix element. +
    +
  • if the element is there (it's non-zero), the pointer to it is returned +
  • if it's not there and createMissing=false, NULL pointer is returned +
  • if it's not there and createMissing=true, then the new element + is created and initialized with 0. Pointer to it is returned +
  • if the optional hashval pointer is not NULL, the element hash value is + not computed, but *hashval is taken instead. +
+ */ + //! returns pointer to the specified element (1D case) + uchar* ptr(int i0, bool createMissing, size_t* hashval=0); + //! returns pointer to the specified element (2D case) + uchar* ptr(int i0, int i1, bool createMissing, size_t* hashval=0); + //! returns pointer to the specified element (3D case) + uchar* ptr(int i0, int i1, int i2, bool createMissing, size_t* hashval=0); + //! returns pointer to the specified element (nD case) + uchar* ptr(const int* idx, bool createMissing, size_t* hashval=0); + //@} + + //@{ + /*! + return read-write reference to the specified sparse matrix element. + + ref<_Tp>(i0,...[,hashval]) is equivalent to *(_Tp*)ptr(i0,...,true[,hashval]). + The methods always return a valid reference. + If the element did not exist, it is created and initialiazed with 0. + */ + //! returns reference to the specified element (1D case) + template _Tp& ref(int i0, size_t* hashval=0); + //! returns reference to the specified element (2D case) + template _Tp& ref(int i0, int i1, size_t* hashval=0); + //! returns reference to the specified element (3D case) + template _Tp& ref(int i0, int i1, int i2, size_t* hashval=0); + //! returns reference to the specified element (nD case) + template _Tp& ref(const int* idx, size_t* hashval=0); + //@} + + //@{ + /*! + return value of the specified sparse matrix element. + + value<_Tp>(i0,...[,hashval]) is equivalent + + \code + { const _Tp* p = find<_Tp>(i0,...[,hashval]); return p ? *p : _Tp(); } + \endcode + + That is, if the element did not exist, the methods return 0. + */ + //! returns value of the specified element (1D case) + template _Tp value(int i0, size_t* hashval=0) const; + //! returns value of the specified element (2D case) + template _Tp value(int i0, int i1, size_t* hashval=0) const; + //! returns value of the specified element (3D case) + template _Tp value(int i0, int i1, int i2, size_t* hashval=0) const; + //! returns value of the specified element (nD case) + template _Tp value(const int* idx, size_t* hashval=0) const; + //@} + + //@{ + /*! + Return pointer to the specified sparse matrix element if it exists + + find<_Tp>(i0,...[,hashval]) is equivalent to (_const Tp*)ptr(i0,...false[,hashval]). + + If the specified element does not exist, the methods return NULL. + */ + //! returns pointer to the specified element (1D case) + template const _Tp* find(int i0, size_t* hashval=0) const; + //! returns pointer to the specified element (2D case) + template const _Tp* find(int i0, int i1, size_t* hashval=0) const; + //! returns pointer to the specified element (3D case) + template const _Tp* find(int i0, int i1, int i2, size_t* hashval=0) const; + //! returns pointer to the specified element (nD case) + template const _Tp* find(const int* idx, size_t* hashval=0) const; + + //! erases the specified element (2D case) + void erase(int i0, int i1, size_t* hashval=0); + //! erases the specified element (3D case) + void erase(int i0, int i1, int i2, size_t* hashval=0); + //! erases the specified element (nD case) + void erase(const int* idx, size_t* hashval=0); + + //@{ + /*! + return the sparse matrix iterator pointing to the first sparse matrix element + */ + //! returns the sparse matrix iterator at the matrix beginning + SparseMatIterator begin(); + //! returns the sparse matrix iterator at the matrix beginning + template SparseMatIterator_<_Tp> begin(); + //! returns the read-only sparse matrix iterator at the matrix beginning + SparseMatConstIterator begin() const; + //! returns the read-only sparse matrix iterator at the matrix beginning + template SparseMatConstIterator_<_Tp> begin() const; + //@} + /*! + return the sparse matrix iterator pointing to the element following the last sparse matrix element + */ + //! returns the sparse matrix iterator at the matrix end + SparseMatIterator end(); + //! returns the read-only sparse matrix iterator at the matrix end + SparseMatConstIterator end() const; + //! returns the typed sparse matrix iterator at the matrix end + template SparseMatIterator_<_Tp> end(); + //! returns the typed read-only sparse matrix iterator at the matrix end + template SparseMatConstIterator_<_Tp> end() const; + + //! returns the value stored in the sparse martix node + template _Tp& value(Node* n); + //! returns the value stored in the sparse martix node + template const _Tp& value(const Node* n) const; + + ////////////// some internal-use methods /////////////// + Node* node(size_t nidx); + const Node* node(size_t nidx) const; + + uchar* newNode(const int* idx, size_t hashval); + void removeNode(size_t hidx, size_t nidx, size_t previdx); + void resizeHashTab(size_t newsize); + + enum { MAGIC_VAL=0x42FD0000, MAX_DIM=CV_MAX_DIM, HASH_SCALE=0x5bd1e995, HASH_BIT=0x80000000 }; + + int flags; + Hdr* hdr; +}; + +//! finds global minimum and maximum sparse array elements and returns their values and their locations +CV_EXPORTS void minMaxLoc(const SparseMat& a, double* minVal, + double* maxVal, int* minIdx=0, int* maxIdx=0); +//! computes norm of a sparse matrix +CV_EXPORTS double norm( const SparseMat& src, int normType ); +//! scales and shifts array elements so that either the specified norm (alpha) or the minimum (alpha) and maximum (beta) array values get the specified values +CV_EXPORTS void normalize( const SparseMat& src, SparseMat& dst, double alpha, int normType ); + +/*! + Read-Only Sparse Matrix Iterator. + Here is how to use the iterator to compute the sum of floating-point sparse matrix elements: + + \code + SparseMatConstIterator it = m.begin(), it_end = m.end(); + double s = 0; + CV_Assert( m.type() == CV_32F ); + for( ; it != it_end; ++it ) + s += it.value(); + \endcode +*/ +class CV_EXPORTS SparseMatConstIterator +{ +public: + //! the default constructor + SparseMatConstIterator(); + //! the full constructor setting the iterator to the first sparse matrix element + SparseMatConstIterator(const SparseMat* _m); + //! the copy constructor + SparseMatConstIterator(const SparseMatConstIterator& it); + + //! the assignment operator + SparseMatConstIterator& operator = (const SparseMatConstIterator& it); + + //! template method returning the current matrix element + template const _Tp& value() const; + //! returns the current node of the sparse matrix. it.node->idx is the current element index + const SparseMat::Node* node() const; + + //! moves iterator to the previous element + SparseMatConstIterator& operator --(); + //! moves iterator to the previous element + SparseMatConstIterator operator --(int); + //! moves iterator to the next element + SparseMatConstIterator& operator ++(); + //! moves iterator to the next element + SparseMatConstIterator operator ++(int); + + //! moves iterator to the element after the last element + void seekEnd(); + + const SparseMat* m; + size_t hashidx; + uchar* ptr; +}; + +/*! + Read-write Sparse Matrix Iterator + + The class is similar to cv::SparseMatConstIterator, + but can be used for in-place modification of the matrix elements. +*/ +class CV_EXPORTS SparseMatIterator : public SparseMatConstIterator +{ +public: + //! the default constructor + SparseMatIterator(); + //! the full constructor setting the iterator to the first sparse matrix element + SparseMatIterator(SparseMat* _m); + //! the full constructor setting the iterator to the specified sparse matrix element + SparseMatIterator(SparseMat* _m, const int* idx); + //! the copy constructor + SparseMatIterator(const SparseMatIterator& it); + + //! the assignment operator + SparseMatIterator& operator = (const SparseMatIterator& it); + //! returns read-write reference to the current sparse matrix element + template _Tp& value() const; + //! returns pointer to the current sparse matrix node. it.node->idx is the index of the current element (do not modify it!) + SparseMat::Node* node() const; + + //! moves iterator to the next element + SparseMatIterator& operator ++(); + //! moves iterator to the next element + SparseMatIterator operator ++(int); +}; + +/*! + The Template Sparse Matrix class derived from cv::SparseMat + + The class provides slightly more convenient operations for accessing elements. + + \code + SparseMat m; + ... + SparseMat_ m_ = (SparseMat_&)m; + m_.ref(1)++; // equivalent to m.ref(1)++; + m_.ref(2) += m_(3); // equivalent to m.ref(2) += m.value(3); + \endcode +*/ +template class SparseMat_ : public SparseMat +{ +public: + typedef SparseMatIterator_<_Tp> iterator; + typedef SparseMatConstIterator_<_Tp> const_iterator; + + //! the default constructor + SparseMat_(); + //! the full constructor equivelent to SparseMat(dims, _sizes, DataType<_Tp>::type) + SparseMat_(int dims, const int* _sizes); + //! the copy constructor. If DataType<_Tp>.type != m.type(), the m elements are converted + SparseMat_(const SparseMat& m); + //! the copy constructor. This is O(1) operation - no data is copied + SparseMat_(const SparseMat_& m); + //! converts dense matrix to the sparse form + SparseMat_(const Mat& m); + //! converts the old-style sparse matrix to the C++ class. All the elements are copied + SparseMat_(const CvSparseMat* m); + //! the assignment operator. If DataType<_Tp>.type != m.type(), the m elements are converted + SparseMat_& operator = (const SparseMat& m); + //! the assignment operator. This is O(1) operation - no data is copied + SparseMat_& operator = (const SparseMat_& m); + //! converts dense matrix to the sparse form + SparseMat_& operator = (const Mat& m); + + //! makes full copy of the matrix. All the elements are duplicated + SparseMat_ clone() const; + //! equivalent to cv::SparseMat::create(dims, _sizes, DataType<_Tp>::type) + void create(int dims, const int* _sizes); + //! converts sparse matrix to the old-style CvSparseMat. All the elements are copied + operator CvSparseMat*() const; + + //! returns type of the matrix elements + int type() const; + //! returns depth of the matrix elements + int depth() const; + //! returns the number of channels in each matrix element + int channels() const; + + //! equivalent to SparseMat::ref<_Tp>(i0, hashval) + _Tp& ref(int i0, size_t* hashval=0); + //! equivalent to SparseMat::ref<_Tp>(i0, i1, hashval) + _Tp& ref(int i0, int i1, size_t* hashval=0); + //! equivalent to SparseMat::ref<_Tp>(i0, i1, i2, hashval) + _Tp& ref(int i0, int i1, int i2, size_t* hashval=0); + //! equivalent to SparseMat::ref<_Tp>(idx, hashval) + _Tp& ref(const int* idx, size_t* hashval=0); + + //! equivalent to SparseMat::value<_Tp>(i0, hashval) + _Tp operator()(int i0, size_t* hashval=0) const; + //! equivalent to SparseMat::value<_Tp>(i0, i1, hashval) + _Tp operator()(int i0, int i1, size_t* hashval=0) const; + //! equivalent to SparseMat::value<_Tp>(i0, i1, i2, hashval) + _Tp operator()(int i0, int i1, int i2, size_t* hashval=0) const; + //! equivalent to SparseMat::value<_Tp>(idx, hashval) + _Tp operator()(const int* idx, size_t* hashval=0) const; + + //! returns sparse matrix iterator pointing to the first sparse matrix element + SparseMatIterator_<_Tp> begin(); + //! returns read-only sparse matrix iterator pointing to the first sparse matrix element + SparseMatConstIterator_<_Tp> begin() const; + //! returns sparse matrix iterator pointing to the element following the last sparse matrix element + SparseMatIterator_<_Tp> end(); + //! returns read-only sparse matrix iterator pointing to the element following the last sparse matrix element + SparseMatConstIterator_<_Tp> end() const; +}; + + +/*! + Template Read-Only Sparse Matrix Iterator Class. + + This is the derived from SparseMatConstIterator class that + introduces more convenient operator *() for accessing the current element. +*/ +template class SparseMatConstIterator_ : public SparseMatConstIterator +{ +public: + typedef std::forward_iterator_tag iterator_category; + + //! the default constructor + SparseMatConstIterator_(); + //! the full constructor setting the iterator to the first sparse matrix element + SparseMatConstIterator_(const SparseMat_<_Tp>* _m); + SparseMatConstIterator_(const SparseMat* _m); + //! the copy constructor + SparseMatConstIterator_(const SparseMatConstIterator_& it); + + //! the assignment operator + SparseMatConstIterator_& operator = (const SparseMatConstIterator_& it); + //! the element access operator + const _Tp& operator *() const; + + //! moves iterator to the next element + SparseMatConstIterator_& operator ++(); + //! moves iterator to the next element + SparseMatConstIterator_ operator ++(int); +}; + +/*! + Template Read-Write Sparse Matrix Iterator Class. + + This is the derived from cv::SparseMatConstIterator_ class that + introduces more convenient operator *() for accessing the current element. +*/ +template class SparseMatIterator_ : public SparseMatConstIterator_<_Tp> +{ +public: + typedef std::forward_iterator_tag iterator_category; + + //! the default constructor + SparseMatIterator_(); + //! the full constructor setting the iterator to the first sparse matrix element + SparseMatIterator_(SparseMat_<_Tp>* _m); + SparseMatIterator_(SparseMat* _m); + //! the copy constructor + SparseMatIterator_(const SparseMatIterator_& it); + + //! the assignment operator + SparseMatIterator_& operator = (const SparseMatIterator_& it); + //! returns the reference to the current element + _Tp& operator *() const; + + //! moves the iterator to the next element + SparseMatIterator_& operator ++(); + //! moves the iterator to the next element + SparseMatIterator_ operator ++(int); +}; + +//////////////////// Fast Nearest-Neighbor Search Structure //////////////////// + +/*! + Fast Nearest Neighbor Search Class. + + The class implements D. Lowe BBF (Best-Bin-First) algorithm for the last + approximate (or accurate) nearest neighbor search in multi-dimensional spaces. + + First, a set of vectors is passed to KDTree::KDTree() constructor + or KDTree::build() method, where it is reordered. + + Then arbitrary vectors can be passed to KDTree::findNearest() methods, which + find the K nearest neighbors among the vectors from the initial set. + The user can balance between the speed and accuracy of the search by varying Emax + parameter, which is the number of leaves that the algorithm checks. + The larger parameter values yield more accurate results at the expense of lower processing speed. + + \code + KDTree T(points, false); + const int K = 3, Emax = INT_MAX; + int idx[K]; + float dist[K]; + T.findNearest(query_vec, K, Emax, idx, 0, dist); + CV_Assert(dist[0] <= dist[1] && dist[1] <= dist[2]); + \endcode +*/ +class CV_EXPORTS_W KDTree +{ +public: + /*! + The node of the search tree. + */ + struct Node + { + Node() : idx(-1), left(-1), right(-1), boundary(0.f) {} + Node(int _idx, int _left, int _right, float _boundary) + : idx(_idx), left(_left), right(_right), boundary(_boundary) {} + //! split dimension; >=0 for nodes (dim), < 0 for leaves (index of the point) + int idx; + //! node indices of the left and the right branches + int left, right; + //! go to the left if query_vec[node.idx]<=node.boundary, otherwise go to the right + float boundary; + }; + + //! the default constructor + CV_WRAP KDTree(); + //! the full constructor that builds the search tree + CV_WRAP KDTree(InputArray points, bool copyAndReorderPoints=false); + //! the full constructor that builds the search tree + CV_WRAP KDTree(InputArray points, InputArray _labels, + bool copyAndReorderPoints=false); + //! builds the search tree + CV_WRAP void build(InputArray points, bool copyAndReorderPoints=false); + //! builds the search tree + CV_WRAP void build(InputArray points, InputArray labels, + bool copyAndReorderPoints=false); + //! finds the K nearest neighbors of "vec" while looking at Emax (at most) leaves + CV_WRAP int findNearest(InputArray vec, int K, int Emax, + OutputArray neighborsIdx, + OutputArray neighbors=noArray(), + OutputArray dist=noArray(), + OutputArray labels=noArray()) const; + //! finds all the points from the initial set that belong to the specified box + CV_WRAP void findOrthoRange(InputArray minBounds, + InputArray maxBounds, + OutputArray neighborsIdx, + OutputArray neighbors=noArray(), + OutputArray labels=noArray()) const; + //! returns vectors with the specified indices + CV_WRAP void getPoints(InputArray idx, OutputArray pts, + OutputArray labels=noArray()) const; + //! return a vector with the specified index + const float* getPoint(int ptidx, int* label=0) const; + //! returns the search space dimensionality + CV_WRAP int dims() const; + + vector nodes; //!< all the tree nodes + CV_PROP Mat points; //!< all the points. It can be a reordered copy of the input vector set or the original vector set. + CV_PROP vector labels; //!< the parallel array of labels. + CV_PROP int maxDepth; //!< maximum depth of the search tree. Do not modify it + CV_PROP_RW int normType; //!< type of the distance (cv::NORM_L1 or cv::NORM_L2) used for search. Initially set to cv::NORM_L2, but you can modify it +}; + +//////////////////////////////////////// XML & YAML I/O //////////////////////////////////// + +class CV_EXPORTS FileNode; + +/*! + XML/YAML File Storage Class. + + The class describes an object associated with XML or YAML file. + It can be used to store data to such a file or read and decode the data. + + The storage is organized as a tree of nested sequences (or lists) and mappings. + Sequence is a heterogenious array, which elements are accessed by indices or sequentially using an iterator. + Mapping is analogue of std::map or C structure, which elements are accessed by names. + The most top level structure is a mapping. + Leaves of the file storage tree are integers, floating-point numbers and text strings. + + For example, the following code: + + \code + // open file storage for writing. Type of the file is determined from the extension + FileStorage fs("test.yml", FileStorage::WRITE); + fs << "test_int" << 5 << "test_real" << 3.1 << "test_string" << "ABCDEFGH"; + fs << "test_mat" << Mat::eye(3,3,CV_32F); + + fs << "test_list" << "[" << 0.0000000000001 << 2 << CV_PI << -3435345 << "2-502 2-029 3egegeg" << + "{:" << "month" << 12 << "day" << 31 << "year" << 1969 << "}" << "]"; + fs << "test_map" << "{" << "x" << 1 << "y" << 2 << "width" << 100 << "height" << 200 << "lbp" << "[:"; + + const uchar arr[] = {0, 1, 1, 0, 1, 1, 0, 1}; + fs.writeRaw("u", arr, (int)(sizeof(arr)/sizeof(arr[0]))); + + fs << "]" << "}"; + \endcode + + will produce the following file: + + \verbatim + %YAML:1.0 + test_int: 5 + test_real: 3.1000000000000001e+00 + test_string: ABCDEFGH + test_mat: !!opencv-matrix + rows: 3 + cols: 3 + dt: f + data: [ 1., 0., 0., 0., 1., 0., 0., 0., 1. ] + test_list: + - 1.0000000000000000e-13 + - 2 + - 3.1415926535897931e+00 + - -3435345 + - "2-502 2-029 3egegeg" + - { month:12, day:31, year:1969 } + test_map: + x: 1 + y: 2 + width: 100 + height: 200 + lbp: [ 0, 1, 1, 0, 1, 1, 0, 1 ] + \endverbatim + + and to read the file above, the following code can be used: + + \code + // open file storage for reading. + // Type of the file is determined from the content, not the extension + FileStorage fs("test.yml", FileStorage::READ); + int test_int = (int)fs["test_int"]; + double test_real = (double)fs["test_real"]; + string test_string = (string)fs["test_string"]; + + Mat M; + fs["test_mat"] >> M; + + FileNode tl = fs["test_list"]; + CV_Assert(tl.type() == FileNode::SEQ && tl.size() == 6); + double tl0 = (double)tl[0]; + int tl1 = (int)tl[1]; + double tl2 = (double)tl[2]; + int tl3 = (int)tl[3]; + string tl4 = (string)tl[4]; + CV_Assert(tl[5].type() == FileNode::MAP && tl[5].size() == 3); + + int month = (int)tl[5]["month"]; + int day = (int)tl[5]["day"]; + int year = (int)tl[5]["year"]; + + FileNode tm = fs["test_map"]; + + int x = (int)tm["x"]; + int y = (int)tm["y"]; + int width = (int)tm["width"]; + int height = (int)tm["height"]; + + int lbp_val = 0; + FileNodeIterator it = tm["lbp"].begin(); + + for(int k = 0; k < 8; k++, ++it) + lbp_val |= ((int)*it) << k; + \endcode +*/ +class CV_EXPORTS_W FileStorage +{ +public: + //! file storage mode + enum + { + READ=0, //! read mode + WRITE=1, //! write mode + APPEND=2, //! append mode + MEMORY=4, + FORMAT_MASK=(7<<3), + FORMAT_AUTO=0, + FORMAT_XML=(1<<3), + FORMAT_YAML=(2<<3) + }; + enum + { + UNDEFINED=0, + VALUE_EXPECTED=1, + NAME_EXPECTED=2, + INSIDE_MAP=4 + }; + //! the default constructor + CV_WRAP FileStorage(); + //! the full constructor that opens file storage for reading or writing + CV_WRAP FileStorage(const string& source, int flags, const string& encoding=string()); + //! the constructor that takes pointer to the C FileStorage structure + FileStorage(CvFileStorage* fs); + //! the destructor. calls release() + virtual ~FileStorage(); + + //! opens file storage for reading or writing. The previous storage is closed with release() + CV_WRAP virtual bool open(const string& filename, int flags, const string& encoding=string()); + //! returns true if the object is associated with currently opened file. + CV_WRAP virtual bool isOpened() const; + //! closes the file and releases all the memory buffers + CV_WRAP virtual void release(); + //! closes the file, releases all the memory buffers and returns the text string + CV_WRAP string releaseAndGetString(); + + //! returns the first element of the top-level mapping + CV_WRAP FileNode getFirstTopLevelNode() const; + //! returns the top-level mapping. YAML supports multiple streams + CV_WRAP FileNode root(int streamidx=0) const; + //! returns the specified element of the top-level mapping + FileNode operator[](const string& nodename) const; + //! returns the specified element of the top-level mapping + CV_WRAP FileNode operator[](const char* nodename) const; + + //! returns pointer to the underlying C FileStorage structure + CvFileStorage* operator *() { return fs; } + //! returns pointer to the underlying C FileStorage structure + const CvFileStorage* operator *() const { return fs; } + //! writes one or more numbers of the specified format to the currently written structure + void writeRaw( const string& fmt, const uchar* vec, size_t len ); + //! writes the registered C structure (CvMat, CvMatND, CvSeq). See cvWrite() + void writeObj( const string& name, const void* obj ); + + //! returns the normalized object name for the specified file name + static string getDefaultObjectName(const string& filename); + + Ptr fs; //!< the underlying C FileStorage structure + string elname; //!< the currently written element + vector structs; //!< the stack of written structures + int state; //!< the writer state +}; + +class CV_EXPORTS FileNodeIterator; + +/*! + File Storage Node class + + The node is used to store each and every element of the file storage opened for reading - + from the primitive objects, such as numbers and text strings, to the complex nodes: + sequences, mappings and the registered objects. + + Note that file nodes are only used for navigating file storages opened for reading. + When a file storage is opened for writing, no data is stored in memory after it is written. +*/ +class CV_EXPORTS_W_SIMPLE FileNode +{ +public: + //! type of the file storage node + enum + { + NONE=0, //!< empty node + INT=1, //!< an integer + REAL=2, //!< floating-point number + FLOAT=REAL, //!< synonym or REAL + STR=3, //!< text string in UTF-8 encoding + STRING=STR, //!< synonym for STR + REF=4, //!< integer of size size_t. Typically used for storing complex dynamic structures where some elements reference the others + SEQ=5, //!< sequence + MAP=6, //!< mapping + TYPE_MASK=7, + FLOW=8, //!< compact representation of a sequence or mapping. Used only by YAML writer + USER=16, //!< a registered object (e.g. a matrix) + EMPTY=32, //!< empty structure (sequence or mapping) + NAMED=64 //!< the node has a name (i.e. it is element of a mapping) + }; + //! the default constructor + CV_WRAP FileNode(); + //! the full constructor wrapping CvFileNode structure. + FileNode(const CvFileStorage* fs, const CvFileNode* node); + //! the copy constructor + FileNode(const FileNode& node); + //! returns element of a mapping node + FileNode operator[](const string& nodename) const; + //! returns element of a mapping node + CV_WRAP FileNode operator[](const char* nodename) const; + //! returns element of a sequence node + CV_WRAP FileNode operator[](int i) const; + //! returns type of the node + CV_WRAP int type() const; + + //! returns true if the node is empty + CV_WRAP bool empty() const; + //! returns true if the node is a "none" object + CV_WRAP bool isNone() const; + //! returns true if the node is a sequence + CV_WRAP bool isSeq() const; + //! returns true if the node is a mapping + CV_WRAP bool isMap() const; + //! returns true if the node is an integer + CV_WRAP bool isInt() const; + //! returns true if the node is a floating-point number + CV_WRAP bool isReal() const; + //! returns true if the node is a text string + CV_WRAP bool isString() const; + //! returns true if the node has a name + CV_WRAP bool isNamed() const; + //! returns the node name or an empty string if the node is nameless + CV_WRAP string name() const; + //! returns the number of elements in the node, if it is a sequence or mapping, or 1 otherwise. + CV_WRAP size_t size() const; + //! returns the node content as an integer. If the node stores floating-point number, it is rounded. + operator int() const; + //! returns the node content as float + operator float() const; + //! returns the node content as double + operator double() const; + //! returns the node content as text string + operator string() const; + + //! returns pointer to the underlying file node + CvFileNode* operator *(); + //! returns pointer to the underlying file node + const CvFileNode* operator* () const; + + //! returns iterator pointing to the first node element + FileNodeIterator begin() const; + //! returns iterator pointing to the element following the last node element + FileNodeIterator end() const; + + //! reads node elements to the buffer with the specified format + void readRaw( const string& fmt, uchar* vec, size_t len ) const; + //! reads the registered object and returns pointer to it + void* readObj() const; + + // do not use wrapper pointer classes for better efficiency + const CvFileStorage* fs; + const CvFileNode* node; +}; + + +/*! + File Node Iterator + + The class is used for iterating sequences (usually) and mappings. + */ +class CV_EXPORTS FileNodeIterator +{ +public: + //! the default constructor + FileNodeIterator(); + //! the full constructor set to the ofs-th element of the node + FileNodeIterator(const CvFileStorage* fs, const CvFileNode* node, size_t ofs=0); + //! the copy constructor + FileNodeIterator(const FileNodeIterator& it); + //! returns the currently observed element + FileNode operator *() const; + //! accesses the currently observed element methods + FileNode operator ->() const; + + //! moves iterator to the next node + FileNodeIterator& operator ++ (); + //! moves iterator to the next node + FileNodeIterator operator ++ (int); + //! moves iterator to the previous node + FileNodeIterator& operator -- (); + //! moves iterator to the previous node + FileNodeIterator operator -- (int); + //! moves iterator forward by the specified offset (possibly negative) + FileNodeIterator& operator += (int ofs); + //! moves iterator backward by the specified offset (possibly negative) + FileNodeIterator& operator -= (int ofs); + + //! reads the next maxCount elements (or less, if the sequence/mapping last element occurs earlier) to the buffer with the specified format + FileNodeIterator& readRaw( const string& fmt, uchar* vec, + size_t maxCount=(size_t)INT_MAX ); + + const CvFileStorage* fs; + const CvFileNode* container; + CvSeqReader reader; + size_t remaining; +}; + +////////////// convenient wrappers for operating old-style dynamic structures ////////////// + +template class SeqIterator; + +typedef Ptr MemStorage; + +/*! + Template Sequence Class derived from CvSeq + + The class provides more convenient access to sequence elements, + STL-style operations and iterators. + + \note The class is targeted for simple data types, + i.e. no constructors or destructors + are called for the sequence elements. +*/ +template class Seq +{ +public: + typedef SeqIterator<_Tp> iterator; + typedef SeqIterator<_Tp> const_iterator; + + //! the default constructor + Seq(); + //! the constructor for wrapping CvSeq structure. The real element type in CvSeq should match _Tp. + Seq(const CvSeq* seq); + //! creates the empty sequence that resides in the specified storage + Seq(MemStorage& storage, int headerSize = sizeof(CvSeq)); + //! returns read-write reference to the specified element + _Tp& operator [](int idx); + //! returns read-only reference to the specified element + const _Tp& operator[](int idx) const; + //! returns iterator pointing to the beginning of the sequence + SeqIterator<_Tp> begin() const; + //! returns iterator pointing to the element following the last sequence element + SeqIterator<_Tp> end() const; + //! returns the number of elements in the sequence + size_t size() const; + //! returns the type of sequence elements (CV_8UC1 ... CV_64FC(CV_CN_MAX) ...) + int type() const; + //! returns the depth of sequence elements (CV_8U ... CV_64F) + int depth() const; + //! returns the number of channels in each sequence element + int channels() const; + //! returns the size of each sequence element + size_t elemSize() const; + //! returns index of the specified sequence element + size_t index(const _Tp& elem) const; + //! appends the specified element to the end of the sequence + void push_back(const _Tp& elem); + //! appends the specified element to the front of the sequence + void push_front(const _Tp& elem); + //! appends zero or more elements to the end of the sequence + void push_back(const _Tp* elems, size_t count); + //! appends zero or more elements to the front of the sequence + void push_front(const _Tp* elems, size_t count); + //! inserts the specified element to the specified position + void insert(int idx, const _Tp& elem); + //! inserts zero or more elements to the specified position + void insert(int idx, const _Tp* elems, size_t count); + //! removes element at the specified position + void remove(int idx); + //! removes the specified subsequence + void remove(const Range& r); + + //! returns reference to the first sequence element + _Tp& front(); + //! returns read-only reference to the first sequence element + const _Tp& front() const; + //! returns reference to the last sequence element + _Tp& back(); + //! returns read-only reference to the last sequence element + const _Tp& back() const; + //! returns true iff the sequence contains no elements + bool empty() const; + + //! removes all the elements from the sequence + void clear(); + //! removes the first element from the sequence + void pop_front(); + //! removes the last element from the sequence + void pop_back(); + //! removes zero or more elements from the beginning of the sequence + void pop_front(_Tp* elems, size_t count); + //! removes zero or more elements from the end of the sequence + void pop_back(_Tp* elems, size_t count); + + //! copies the whole sequence or the sequence slice to the specified vector + void copyTo(vector<_Tp>& vec, const Range& range=Range::all()) const; + //! returns the vector containing all the sequence elements + operator vector<_Tp>() const; + + CvSeq* seq; +}; + + +/*! + STL-style Sequence Iterator inherited from the CvSeqReader structure +*/ +template class SeqIterator : public CvSeqReader +{ +public: + //! the default constructor + SeqIterator(); + //! the constructor setting the iterator to the beginning or to the end of the sequence + SeqIterator(const Seq<_Tp>& seq, bool seekEnd=false); + //! positions the iterator within the sequence + void seek(size_t pos); + //! reports the current iterator position + size_t tell() const; + //! returns reference to the current sequence element + _Tp& operator *(); + //! returns read-only reference to the current sequence element + const _Tp& operator *() const; + //! moves iterator to the next sequence element + SeqIterator& operator ++(); + //! moves iterator to the next sequence element + SeqIterator operator ++(int) const; + //! moves iterator to the previous sequence element + SeqIterator& operator --(); + //! moves iterator to the previous sequence element + SeqIterator operator --(int) const; + + //! moves iterator forward by the specified offset (possibly negative) + SeqIterator& operator +=(int); + //! moves iterator backward by the specified offset (possibly negative) + SeqIterator& operator -=(int); + + // this is index of the current element module seq->total*2 + // (to distinguish between 0 and seq->total) + int index; +}; + + +class CV_EXPORTS Algorithm; +class CV_EXPORTS AlgorithmInfo; +struct CV_EXPORTS AlgorithmInfoData; + +template struct ParamType {}; + +/*! + Base class for high-level OpenCV algorithms +*/ +class CV_EXPORTS_W Algorithm +{ +public: + Algorithm(); + virtual ~Algorithm(); + string name() const; + + template typename ParamType<_Tp>::member_type get(const string& name) const; + template typename ParamType<_Tp>::member_type get(const char* name) const; + + CV_WRAP int getInt(const string& name) const; + CV_WRAP double getDouble(const string& name) const; + CV_WRAP bool getBool(const string& name) const; + CV_WRAP string getString(const string& name) const; + CV_WRAP Mat getMat(const string& name) const; + CV_WRAP vector getMatVector(const string& name) const; + CV_WRAP Ptr getAlgorithm(const string& name) const; + + void set(const string& name, int value); + void set(const string& name, double value); + void set(const string& name, bool value); + void set(const string& name, const string& value); + void set(const string& name, const Mat& value); + void set(const string& name, const vector& value); + void set(const string& name, const Ptr& value); + template void set(const string& name, const Ptr<_Tp>& value); + + CV_WRAP void setInt(const string& name, int value); + CV_WRAP void setDouble(const string& name, double value); + CV_WRAP void setBool(const string& name, bool value); + CV_WRAP void setString(const string& name, const string& value); + CV_WRAP void setMat(const string& name, const Mat& value); + CV_WRAP void setMatVector(const string& name, const vector& value); + CV_WRAP void setAlgorithm(const string& name, const Ptr& value); + template void setAlgorithm(const string& name, const Ptr<_Tp>& value); + + void set(const char* name, int value); + void set(const char* name, double value); + void set(const char* name, bool value); + void set(const char* name, const string& value); + void set(const char* name, const Mat& value); + void set(const char* name, const vector& value); + void set(const char* name, const Ptr& value); + template void set(const char* name, const Ptr<_Tp>& value); + + void setInt(const char* name, int value); + void setDouble(const char* name, double value); + void setBool(const char* name, bool value); + void setString(const char* name, const string& value); + void setMat(const char* name, const Mat& value); + void setMatVector(const char* name, const vector& value); + void setAlgorithm(const char* name, const Ptr& value); + template void setAlgorithm(const char* name, const Ptr<_Tp>& value); + + CV_WRAP string paramHelp(const string& name) const; + int paramType(const char* name) const; + CV_WRAP int paramType(const string& name) const; + CV_WRAP void getParams(CV_OUT vector& names) const; + + + virtual void write(FileStorage& fs) const; + virtual void read(const FileNode& fn); + + typedef Algorithm* (*Constructor)(void); + typedef int (Algorithm::*Getter)() const; + typedef void (Algorithm::*Setter)(int); + + CV_WRAP static void getList(CV_OUT vector& algorithms); + CV_WRAP static Ptr _create(const string& name); + template static Ptr<_Tp> create(const string& name); + + virtual AlgorithmInfo* info() const /* TODO: make it = 0;*/ { return 0; } +}; + + +class CV_EXPORTS AlgorithmInfo +{ +public: + friend class Algorithm; + AlgorithmInfo(const string& name, Algorithm::Constructor create); + ~AlgorithmInfo(); + void get(const Algorithm* algo, const char* name, int argType, void* value) const; + void addParam_(Algorithm& algo, const char* name, int argType, + void* value, bool readOnly, + Algorithm::Getter getter, Algorithm::Setter setter, + const string& help=string()); + string paramHelp(const char* name) const; + int paramType(const char* name) const; + void getParams(vector& names) const; + + void write(const Algorithm* algo, FileStorage& fs) const; + void read(Algorithm* algo, const FileNode& fn) const; + string name() const; + + void addParam(Algorithm& algo, const char* name, + int& value, bool readOnly=false, + int (Algorithm::*getter)()=0, + void (Algorithm::*setter)(int)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + short& value, bool readOnly=false, + int (Algorithm::*getter)()=0, + void (Algorithm::*setter)(int)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + bool& value, bool readOnly=false, + int (Algorithm::*getter)()=0, + void (Algorithm::*setter)(int)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + double& value, bool readOnly=false, + double (Algorithm::*getter)()=0, + void (Algorithm::*setter)(double)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + string& value, bool readOnly=false, + string (Algorithm::*getter)()=0, + void (Algorithm::*setter)(const string&)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + Mat& value, bool readOnly=false, + Mat (Algorithm::*getter)()=0, + void (Algorithm::*setter)(const Mat&)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + vector& value, bool readOnly=false, + vector (Algorithm::*getter)()=0, + void (Algorithm::*setter)(const vector&)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + Ptr& value, bool readOnly=false, + Ptr (Algorithm::*getter)()=0, + void (Algorithm::*setter)(const Ptr&)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + float& value, bool readOnly=false, + float (Algorithm::*getter)()=0, + void (Algorithm::*setter)(float)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + unsigned int& value, bool readOnly=false, + unsigned int (Algorithm::*getter)()=0, + void (Algorithm::*setter)(unsigned int)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + uint64& value, bool readOnly=false, + uint64 (Algorithm::*getter)()=0, + void (Algorithm::*setter)(uint64)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + uchar& value, bool readOnly=false, + uchar (Algorithm::*getter)()=0, + void (Algorithm::*setter)(uchar)=0, + const string& help=string()); + template void addParam(Algorithm& algo, const char* name, + Ptr<_Tp>& value, bool readOnly=false, + Ptr<_Tp> (Algorithm::*getter)()=0, + void (Algorithm::*setter)(const Ptr<_Tp>&)=0, + const string& help=string()); + template void addParam(Algorithm& algo, const char* name, + Ptr<_Tp>& value, bool readOnly=false, + Ptr<_Tp> (Algorithm::*getter)()=0, + void (Algorithm::*setter)(const Ptr<_Tp>&)=0, + const string& help=string()); +protected: + AlgorithmInfoData* data; + void set(Algorithm* algo, const char* name, int argType, + const void* value, bool force=false) const; +}; + + +struct CV_EXPORTS Param +{ + enum { INT=0, BOOLEAN=1, REAL=2, STRING=3, MAT=4, MAT_VECTOR=5, ALGORITHM=6, FLOAT=7, UNSIGNED_INT=8, UINT64=9, SHORT=10, UCHAR=11 }; + + Param(); + Param(int _type, bool _readonly, int _offset, + Algorithm::Getter _getter=0, + Algorithm::Setter _setter=0, + const string& _help=string()); + int type; + int offset; + bool readonly; + Algorithm::Getter getter; + Algorithm::Setter setter; + string help; +}; + +template<> struct ParamType +{ + typedef bool const_param_type; + typedef bool member_type; + + enum { type = Param::BOOLEAN }; +}; + +template<> struct ParamType +{ + typedef int const_param_type; + typedef int member_type; + + enum { type = Param::INT }; +}; + +template<> struct ParamType +{ + typedef int const_param_type; + typedef int member_type; + + enum { type = Param::SHORT }; +}; + +template<> struct ParamType +{ + typedef double const_param_type; + typedef double member_type; + + enum { type = Param::REAL }; +}; + +template<> struct ParamType +{ + typedef const string& const_param_type; + typedef string member_type; + + enum { type = Param::STRING }; +}; + +template<> struct ParamType +{ + typedef const Mat& const_param_type; + typedef Mat member_type; + + enum { type = Param::MAT }; +}; + +template<> struct ParamType > +{ + typedef const vector& const_param_type; + typedef vector member_type; + + enum { type = Param::MAT_VECTOR }; +}; + +template<> struct ParamType +{ + typedef const Ptr& const_param_type; + typedef Ptr member_type; + + enum { type = Param::ALGORITHM }; +}; + +template<> struct ParamType +{ + typedef float const_param_type; + typedef float member_type; + + enum { type = Param::FLOAT }; +}; + +template<> struct ParamType +{ + typedef unsigned const_param_type; + typedef unsigned member_type; + + enum { type = Param::UNSIGNED_INT }; +}; + +template<> struct ParamType +{ + typedef uint64 const_param_type; + typedef uint64 member_type; + + enum { type = Param::UINT64 }; +}; + +template<> struct ParamType +{ + typedef uchar const_param_type; + typedef uchar member_type; + + enum { type = Param::UCHAR }; +}; + +/*! +"\nThe CommandLineParser class is designed for command line arguments parsing\n" + "Keys map: \n" + "Before you start to work with CommandLineParser you have to create a map for keys.\n" + " It will look like this\n" + " const char* keys =\n" + " {\n" + " { s| string| 123asd |string parameter}\n" + " { d| digit | 100 |digit parameter }\n" + " { c|noCamera|false |without camera }\n" + " { 1| |some text|help }\n" + " { 2| |333 |another help }\n" + " };\n" + "Usage syntax: \n" + " \"{\" - start of parameter string.\n" + " \"}\" - end of parameter string\n" + " \"|\" - separator between short name, full name, default value and help\n" + "Supported syntax: \n" + " --key1=arg1 \n" + " -key2=arg2 \n" + "Usage: \n" + " Imagine that the input parameters are next:\n" + " -s=string_value --digit=250 --noCamera lena.jpg 10000\n" + " CommandLineParser parser(argc, argv, keys) - create a parser object\n" + " parser.get(\"s\" or \"string\") will return you first parameter value\n" + " parser.get(\"s\", false or \"string\", false) will return you first parameter value\n" + " without spaces in end and begin\n" + " parser.get(\"d\" or \"digit\") will return you second parameter value.\n" + " It also works with 'unsigned int', 'double', and 'float' types>\n" + " parser.get(\"c\" or \"noCamera\") will return you true .\n" + " If you enter this key in commandline>\n" + " It return you false otherwise.\n" + " parser.get(\"1\") will return you the first argument without parameter (lena.jpg) \n" + " parser.get(\"2\") will return you the second argument without parameter (10000)\n" + " It also works with 'unsigned int', 'double', and 'float' types \n" +*/ +class CV_EXPORTS CommandLineParser +{ + public: + + //! the default constructor + CommandLineParser(int argc, const char* const argv[], const char* key_map); + + //! get parameter, you can choose: delete spaces in end and begin or not + template + _Tp get(const std::string& name, bool space_delete=true) + { + if (!has(name)) + { + return _Tp(); + } + std::string str = getString(name); + return analyzeValue<_Tp>(str, space_delete); + } + + //! print short name, full name, current value and help for all params + void printParams(); + + protected: + std::map > data; + std::string getString(const std::string& name); + + bool has(const std::string& keys); + + template + _Tp analyzeValue(const std::string& str, bool space_delete=false); + + template + static _Tp getData(const std::string& str) + { + _Tp res = _Tp(); + std::stringstream s1(str); + s1 >> res; + return res; + } + + template + _Tp fromStringNumber(const std::string& str);//the default conversion function for numbers + + }; + +template<> CV_EXPORTS +bool CommandLineParser::get(const std::string& name, bool space_delete); + +template<> CV_EXPORTS +std::string CommandLineParser::analyzeValue(const std::string& str, bool space_delete); + +template<> CV_EXPORTS +int CommandLineParser::analyzeValue(const std::string& str, bool space_delete); + +template<> CV_EXPORTS +unsigned int CommandLineParser::analyzeValue(const std::string& str, bool space_delete); + +template<> CV_EXPORTS +uint64 CommandLineParser::analyzeValue(const std::string& str, bool space_delete); + +template<> CV_EXPORTS +float CommandLineParser::analyzeValue(const std::string& str, bool space_delete); + +template<> CV_EXPORTS +double CommandLineParser::analyzeValue(const std::string& str, bool space_delete); + + +/////////////////////////////// Parallel Primitives ////////////////////////////////// + +// a base body class +class CV_EXPORTS ParallelLoopBody +{ +public: + virtual ~ParallelLoopBody(); + virtual void operator() (const Range& range) const = 0; +}; + +CV_EXPORTS void parallel_for_(const Range& range, const ParallelLoopBody& body, double nstripes=-1.); + +/////////////////////////// Synchronization Primitives /////////////////////////////// + +class CV_EXPORTS Mutex +{ +public: + Mutex(); + ~Mutex(); + Mutex(const Mutex& m); + Mutex& operator = (const Mutex& m); + + void lock(); + bool trylock(); + void unlock(); + + struct Impl; +protected: + Impl* impl; +}; + +class CV_EXPORTS AutoLock +{ +public: + AutoLock(Mutex& m) : mutex(&m) { mutex->lock(); } + ~AutoLock() { mutex->unlock(); } +protected: + Mutex* mutex; +private: + AutoLock(const AutoLock&); + AutoLock& operator = (const AutoLock&); +}; + +class TLSDataContainer +{ +private: + int key_; +protected: + CV_EXPORTS TLSDataContainer(); + CV_EXPORTS ~TLSDataContainer(); // virtual is not required +public: + virtual void* createDataInstance() const = 0; + virtual void deleteDataInstance(void* data) const = 0; + + CV_EXPORTS void* getData() const; +}; + +template +class TLSData : protected TLSDataContainer +{ +public: + inline TLSData() {} + inline ~TLSData() {} + inline T* get() const { return (T*)getData(); } +private: + virtual void* createDataInstance() const { return new T; } + virtual void deleteDataInstance(void* data) const { delete (T*)data; } +}; + +} + +#endif // __cplusplus + +#include "opencv2/core/operations.hpp" +#include "opencv2/core/mat.hpp" + +#endif /*__OPENCV_CORE_HPP__*/ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/core_c.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/core_c.h new file mode 100644 index 0000000..b9f1090 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/core_c.h @@ -0,0 +1,1886 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + + +#ifndef __OPENCV_CORE_C_H__ +#define __OPENCV_CORE_C_H__ + +#include "opencv2/core/types_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/****************************************************************************************\ +* Array allocation, deallocation, initialization and access to elements * +\****************************************************************************************/ + +/* wrapper. + If there is no enough memory, the function + (as well as other OpenCV functions that call cvAlloc) + raises an error. */ +CVAPI(void*) cvAlloc( size_t size ); + +/* wrapper. + Here and further all the memory releasing functions + (that all call cvFree) take double pointer in order to + to clear pointer to the data after releasing it. + Passing pointer to NULL pointer is Ok: nothing happens in this case +*/ +CVAPI(void) cvFree_( void* ptr ); +#define cvFree(ptr) (cvFree_(*(ptr)), *(ptr)=0) + +/* Allocates and initializes IplImage header */ +CVAPI(IplImage*) cvCreateImageHeader( CvSize size, int depth, int channels ); + +/* Inializes IplImage header */ +CVAPI(IplImage*) cvInitImageHeader( IplImage* image, CvSize size, int depth, + int channels, int origin CV_DEFAULT(0), + int align CV_DEFAULT(4)); + +/* Creates IPL image (header and data) */ +CVAPI(IplImage*) cvCreateImage( CvSize size, int depth, int channels ); + +/* Releases (i.e. deallocates) IPL image header */ +CVAPI(void) cvReleaseImageHeader( IplImage** image ); + +/* Releases IPL image header and data */ +CVAPI(void) cvReleaseImage( IplImage** image ); + +/* Creates a copy of IPL image (widthStep may differ) */ +CVAPI(IplImage*) cvCloneImage( const IplImage* image ); + +/* Sets a Channel Of Interest (only a few functions support COI) - + use cvCopy to extract the selected channel and/or put it back */ +CVAPI(void) cvSetImageCOI( IplImage* image, int coi ); + +/* Retrieves image Channel Of Interest */ +CVAPI(int) cvGetImageCOI( const IplImage* image ); + +/* Sets image ROI (region of interest) (COI is not changed) */ +CVAPI(void) cvSetImageROI( IplImage* image, CvRect rect ); + +/* Resets image ROI and COI */ +CVAPI(void) cvResetImageROI( IplImage* image ); + +/* Retrieves image ROI */ +CVAPI(CvRect) cvGetImageROI( const IplImage* image ); + +/* Allocates and initializes CvMat header */ +CVAPI(CvMat*) cvCreateMatHeader( int rows, int cols, int type ); + +#define CV_AUTOSTEP 0x7fffffff + +/* Initializes CvMat header */ +CVAPI(CvMat*) cvInitMatHeader( CvMat* mat, int rows, int cols, + int type, void* data CV_DEFAULT(NULL), + int step CV_DEFAULT(CV_AUTOSTEP) ); + +/* Allocates and initializes CvMat header and allocates data */ +CVAPI(CvMat*) cvCreateMat( int rows, int cols, int type ); + +/* Releases CvMat header and deallocates matrix data + (reference counting is used for data) */ +CVAPI(void) cvReleaseMat( CvMat** mat ); + +/* Decrements CvMat data reference counter and deallocates the data if + it reaches 0 */ +CV_INLINE void cvDecRefData( CvArr* arr ) +{ + if( CV_IS_MAT( arr )) + { + CvMat* mat = (CvMat*)arr; + mat->data.ptr = NULL; + if( mat->refcount != NULL && --*mat->refcount == 0 ) + cvFree( &mat->refcount ); + mat->refcount = NULL; + } + else if( CV_IS_MATND( arr )) + { + CvMatND* mat = (CvMatND*)arr; + mat->data.ptr = NULL; + if( mat->refcount != NULL && --*mat->refcount == 0 ) + cvFree( &mat->refcount ); + mat->refcount = NULL; + } +} + +/* Increments CvMat data reference counter */ +CV_INLINE int cvIncRefData( CvArr* arr ) +{ + int refcount = 0; + if( CV_IS_MAT( arr )) + { + CvMat* mat = (CvMat*)arr; + if( mat->refcount != NULL ) + refcount = ++*mat->refcount; + } + else if( CV_IS_MATND( arr )) + { + CvMatND* mat = (CvMatND*)arr; + if( mat->refcount != NULL ) + refcount = ++*mat->refcount; + } + return refcount; +} + + +/* Creates an exact copy of the input matrix (except, may be, step value) */ +CVAPI(CvMat*) cvCloneMat( const CvMat* mat ); + + +/* Makes a new matrix from subrectangle of input array. + No data is copied */ +CVAPI(CvMat*) cvGetSubRect( const CvArr* arr, CvMat* submat, CvRect rect ); +#define cvGetSubArr cvGetSubRect + +/* Selects row span of the input array: arr(start_row:delta_row:end_row,:) + (end_row is not included into the span). */ +CVAPI(CvMat*) cvGetRows( const CvArr* arr, CvMat* submat, + int start_row, int end_row, + int delta_row CV_DEFAULT(1)); + +CV_INLINE CvMat* cvGetRow( const CvArr* arr, CvMat* submat, int row ) +{ + return cvGetRows( arr, submat, row, row + 1, 1 ); +} + + +/* Selects column span of the input array: arr(:,start_col:end_col) + (end_col is not included into the span) */ +CVAPI(CvMat*) cvGetCols( const CvArr* arr, CvMat* submat, + int start_col, int end_col ); + +CV_INLINE CvMat* cvGetCol( const CvArr* arr, CvMat* submat, int col ) +{ + return cvGetCols( arr, submat, col, col + 1 ); +} + +/* Select a diagonal of the input array. + (diag = 0 means the main diagonal, >0 means a diagonal above the main one, + <0 - below the main one). + The diagonal will be represented as a column (nx1 matrix). */ +CVAPI(CvMat*) cvGetDiag( const CvArr* arr, CvMat* submat, + int diag CV_DEFAULT(0)); + +/* low-level scalar <-> raw data conversion functions */ +CVAPI(void) cvScalarToRawData( const CvScalar* scalar, void* data, int type, + int extend_to_12 CV_DEFAULT(0) ); + +CVAPI(void) cvRawDataToScalar( const void* data, int type, CvScalar* scalar ); + +/* Allocates and initializes CvMatND header */ +CVAPI(CvMatND*) cvCreateMatNDHeader( int dims, const int* sizes, int type ); + +/* Allocates and initializes CvMatND header and allocates data */ +CVAPI(CvMatND*) cvCreateMatND( int dims, const int* sizes, int type ); + +/* Initializes preallocated CvMatND header */ +CVAPI(CvMatND*) cvInitMatNDHeader( CvMatND* mat, int dims, const int* sizes, + int type, void* data CV_DEFAULT(NULL) ); + +/* Releases CvMatND */ +CV_INLINE void cvReleaseMatND( CvMatND** mat ) +{ + cvReleaseMat( (CvMat**)mat ); +} + +/* Creates a copy of CvMatND (except, may be, steps) */ +CVAPI(CvMatND*) cvCloneMatND( const CvMatND* mat ); + +/* Allocates and initializes CvSparseMat header and allocates data */ +CVAPI(CvSparseMat*) cvCreateSparseMat( int dims, const int* sizes, int type ); + +/* Releases CvSparseMat */ +CVAPI(void) cvReleaseSparseMat( CvSparseMat** mat ); + +/* Creates a copy of CvSparseMat (except, may be, zero items) */ +CVAPI(CvSparseMat*) cvCloneSparseMat( const CvSparseMat* mat ); + +/* Initializes sparse array iterator + (returns the first node or NULL if the array is empty) */ +CVAPI(CvSparseNode*) cvInitSparseMatIterator( const CvSparseMat* mat, + CvSparseMatIterator* mat_iterator ); + +// returns next sparse array node (or NULL if there is no more nodes) +CV_INLINE CvSparseNode* cvGetNextSparseNode( CvSparseMatIterator* mat_iterator ) +{ + if( mat_iterator->node->next ) + return mat_iterator->node = mat_iterator->node->next; + else + { + int idx; + for( idx = ++mat_iterator->curidx; idx < mat_iterator->mat->hashsize; idx++ ) + { + CvSparseNode* node = (CvSparseNode*)mat_iterator->mat->hashtable[idx]; + if( node ) + { + mat_iterator->curidx = idx; + return mat_iterator->node = node; + } + } + return NULL; + } +} + +/**************** matrix iterator: used for n-ary operations on dense arrays *********/ + +#define CV_MAX_ARR 10 + +typedef struct CvNArrayIterator +{ + int count; /* number of arrays */ + int dims; /* number of dimensions to iterate */ + CvSize size; /* maximal common linear size: { width = size, height = 1 } */ + uchar* ptr[CV_MAX_ARR]; /* pointers to the array slices */ + int stack[CV_MAX_DIM]; /* for internal use */ + CvMatND* hdr[CV_MAX_ARR]; /* pointers to the headers of the + matrices that are processed */ +} +CvNArrayIterator; + +#define CV_NO_DEPTH_CHECK 1 +#define CV_NO_CN_CHECK 2 +#define CV_NO_SIZE_CHECK 4 + +/* initializes iterator that traverses through several arrays simulteneously + (the function together with cvNextArraySlice is used for + N-ari element-wise operations) */ +CVAPI(int) cvInitNArrayIterator( int count, CvArr** arrs, + const CvArr* mask, CvMatND* stubs, + CvNArrayIterator* array_iterator, + int flags CV_DEFAULT(0) ); + +/* returns zero value if iteration is finished, non-zero (slice length) otherwise */ +CVAPI(int) cvNextNArraySlice( CvNArrayIterator* array_iterator ); + + +/* Returns type of array elements: + CV_8UC1 ... CV_64FC4 ... */ +CVAPI(int) cvGetElemType( const CvArr* arr ); + +/* Retrieves number of an array dimensions and + optionally sizes of the dimensions */ +CVAPI(int) cvGetDims( const CvArr* arr, int* sizes CV_DEFAULT(NULL) ); + + +/* Retrieves size of a particular array dimension. + For 2d arrays cvGetDimSize(arr,0) returns number of rows (image height) + and cvGetDimSize(arr,1) returns number of columns (image width) */ +CVAPI(int) cvGetDimSize( const CvArr* arr, int index ); + + +/* ptr = &arr(idx0,idx1,...). All indexes are zero-based, + the major dimensions go first (e.g. (y,x) for 2D, (z,y,x) for 3D */ +CVAPI(uchar*) cvPtr1D( const CvArr* arr, int idx0, int* type CV_DEFAULT(NULL)); +CVAPI(uchar*) cvPtr2D( const CvArr* arr, int idx0, int idx1, int* type CV_DEFAULT(NULL) ); +CVAPI(uchar*) cvPtr3D( const CvArr* arr, int idx0, int idx1, int idx2, + int* type CV_DEFAULT(NULL)); + +/* For CvMat or IplImage number of indices should be 2 + (row index (y) goes first, column index (x) goes next). + For CvMatND or CvSparseMat number of infices should match number of and + indices order should match the array dimension order. */ +CVAPI(uchar*) cvPtrND( const CvArr* arr, const int* idx, int* type CV_DEFAULT(NULL), + int create_node CV_DEFAULT(1), + unsigned* precalc_hashval CV_DEFAULT(NULL)); + +/* value = arr(idx0,idx1,...) */ +CVAPI(CvScalar) cvGet1D( const CvArr* arr, int idx0 ); +CVAPI(CvScalar) cvGet2D( const CvArr* arr, int idx0, int idx1 ); +CVAPI(CvScalar) cvGet3D( const CvArr* arr, int idx0, int idx1, int idx2 ); +CVAPI(CvScalar) cvGetND( const CvArr* arr, const int* idx ); + +/* for 1-channel arrays */ +CVAPI(double) cvGetReal1D( const CvArr* arr, int idx0 ); +CVAPI(double) cvGetReal2D( const CvArr* arr, int idx0, int idx1 ); +CVAPI(double) cvGetReal3D( const CvArr* arr, int idx0, int idx1, int idx2 ); +CVAPI(double) cvGetRealND( const CvArr* arr, const int* idx ); + +/* arr(idx0,idx1,...) = value */ +CVAPI(void) cvSet1D( CvArr* arr, int idx0, CvScalar value ); +CVAPI(void) cvSet2D( CvArr* arr, int idx0, int idx1, CvScalar value ); +CVAPI(void) cvSet3D( CvArr* arr, int idx0, int idx1, int idx2, CvScalar value ); +CVAPI(void) cvSetND( CvArr* arr, const int* idx, CvScalar value ); + +/* for 1-channel arrays */ +CVAPI(void) cvSetReal1D( CvArr* arr, int idx0, double value ); +CVAPI(void) cvSetReal2D( CvArr* arr, int idx0, int idx1, double value ); +CVAPI(void) cvSetReal3D( CvArr* arr, int idx0, + int idx1, int idx2, double value ); +CVAPI(void) cvSetRealND( CvArr* arr, const int* idx, double value ); + +/* clears element of ND dense array, + in case of sparse arrays it deletes the specified node */ +CVAPI(void) cvClearND( CvArr* arr, const int* idx ); + +/* Converts CvArr (IplImage or CvMat,...) to CvMat. + If the last parameter is non-zero, function can + convert multi(>2)-dimensional array to CvMat as long as + the last array's dimension is continous. The resultant + matrix will be have appropriate (a huge) number of rows */ +CVAPI(CvMat*) cvGetMat( const CvArr* arr, CvMat* header, + int* coi CV_DEFAULT(NULL), + int allowND CV_DEFAULT(0)); + +/* Converts CvArr (IplImage or CvMat) to IplImage */ +CVAPI(IplImage*) cvGetImage( const CvArr* arr, IplImage* image_header ); + + +/* Changes a shape of multi-dimensional array. + new_cn == 0 means that number of channels remains unchanged. + new_dims == 0 means that number and sizes of dimensions remain the same + (unless they need to be changed to set the new number of channels) + if new_dims == 1, there is no need to specify new dimension sizes + The resultant configuration should be achievable w/o data copying. + If the resultant array is sparse, CvSparseMat header should be passed + to the function else if the result is 1 or 2 dimensional, + CvMat header should be passed to the function + else CvMatND header should be passed */ +CVAPI(CvArr*) cvReshapeMatND( const CvArr* arr, + int sizeof_header, CvArr* header, + int new_cn, int new_dims, int* new_sizes ); + +#define cvReshapeND( arr, header, new_cn, new_dims, new_sizes ) \ + cvReshapeMatND( (arr), sizeof(*(header)), (header), \ + (new_cn), (new_dims), (new_sizes)) + +CVAPI(CvMat*) cvReshape( const CvArr* arr, CvMat* header, + int new_cn, int new_rows CV_DEFAULT(0) ); + +/* Repeats source 2d array several times in both horizontal and + vertical direction to fill destination array */ +CVAPI(void) cvRepeat( const CvArr* src, CvArr* dst ); + +/* Allocates array data */ +CVAPI(void) cvCreateData( CvArr* arr ); + +/* Releases array data */ +CVAPI(void) cvReleaseData( CvArr* arr ); + +/* Attaches user data to the array header. The step is reffered to + the pre-last dimension. That is, all the planes of the array + must be joint (w/o gaps) */ +CVAPI(void) cvSetData( CvArr* arr, void* data, int step ); + +/* Retrieves raw data of CvMat, IplImage or CvMatND. + In the latter case the function raises an error if + the array can not be represented as a matrix */ +CVAPI(void) cvGetRawData( const CvArr* arr, uchar** data, + int* step CV_DEFAULT(NULL), + CvSize* roi_size CV_DEFAULT(NULL)); + +/* Returns width and height of array in elements */ +CVAPI(CvSize) cvGetSize( const CvArr* arr ); + +/* Copies source array to destination array */ +CVAPI(void) cvCopy( const CvArr* src, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Sets all or "masked" elements of input array + to the same value*/ +CVAPI(void) cvSet( CvArr* arr, CvScalar value, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Clears all the array elements (sets them to 0) */ +CVAPI(void) cvSetZero( CvArr* arr ); +#define cvZero cvSetZero + + +/* Splits a multi-channel array into the set of single-channel arrays or + extracts particular [color] plane */ +CVAPI(void) cvSplit( const CvArr* src, CvArr* dst0, CvArr* dst1, + CvArr* dst2, CvArr* dst3 ); + +/* Merges a set of single-channel arrays into the single multi-channel array + or inserts one particular [color] plane to the array */ +CVAPI(void) cvMerge( const CvArr* src0, const CvArr* src1, + const CvArr* src2, const CvArr* src3, + CvArr* dst ); + +/* Copies several channels from input arrays to + certain channels of output arrays */ +CVAPI(void) cvMixChannels( const CvArr** src, int src_count, + CvArr** dst, int dst_count, + const int* from_to, int pair_count ); + +/* Performs linear transformation on every source array element: + dst(x,y,c) = scale*src(x,y,c)+shift. + Arbitrary combination of input and output array depths are allowed + (number of channels must be the same), thus the function can be used + for type conversion */ +CVAPI(void) cvConvertScale( const CvArr* src, CvArr* dst, + double scale CV_DEFAULT(1), + double shift CV_DEFAULT(0) ); +#define cvCvtScale cvConvertScale +#define cvScale cvConvertScale +#define cvConvert( src, dst ) cvConvertScale( (src), (dst), 1, 0 ) + + +/* Performs linear transformation on every source array element, + stores absolute value of the result: + dst(x,y,c) = abs(scale*src(x,y,c)+shift). + destination array must have 8u type. + In other cases one may use cvConvertScale + cvAbsDiffS */ +CVAPI(void) cvConvertScaleAbs( const CvArr* src, CvArr* dst, + double scale CV_DEFAULT(1), + double shift CV_DEFAULT(0) ); +#define cvCvtScaleAbs cvConvertScaleAbs + + +/* checks termination criteria validity and + sets eps to default_eps (if it is not set), + max_iter to default_max_iters (if it is not set) +*/ +CVAPI(CvTermCriteria) cvCheckTermCriteria( CvTermCriteria criteria, + double default_eps, + int default_max_iters ); + +/****************************************************************************************\ +* Arithmetic, logic and comparison operations * +\****************************************************************************************/ + +/* dst(mask) = src1(mask) + src2(mask) */ +CVAPI(void) cvAdd( const CvArr* src1, const CvArr* src2, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(mask) = src(mask) + value */ +CVAPI(void) cvAddS( const CvArr* src, CvScalar value, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(mask) = src1(mask) - src2(mask) */ +CVAPI(void) cvSub( const CvArr* src1, const CvArr* src2, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(mask) = src(mask) - value = src(mask) + (-value) */ +CV_INLINE void cvSubS( const CvArr* src, CvScalar value, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)) +{ + cvAddS( src, cvScalar( -value.val[0], -value.val[1], -value.val[2], -value.val[3]), + dst, mask ); +} + +/* dst(mask) = value - src(mask) */ +CVAPI(void) cvSubRS( const CvArr* src, CvScalar value, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src1(idx) * src2(idx) * scale + (scaled element-wise multiplication of 2 arrays) */ +CVAPI(void) cvMul( const CvArr* src1, const CvArr* src2, + CvArr* dst, double scale CV_DEFAULT(1) ); + +/* element-wise division/inversion with scaling: + dst(idx) = src1(idx) * scale / src2(idx) + or dst(idx) = scale / src2(idx) if src1 == 0 */ +CVAPI(void) cvDiv( const CvArr* src1, const CvArr* src2, + CvArr* dst, double scale CV_DEFAULT(1)); + +/* dst = src1 * scale + src2 */ +CVAPI(void) cvScaleAdd( const CvArr* src1, CvScalar scale, + const CvArr* src2, CvArr* dst ); +#define cvAXPY( A, real_scalar, B, C ) cvScaleAdd(A, cvRealScalar(real_scalar), B, C) + +/* dst = src1 * alpha + src2 * beta + gamma */ +CVAPI(void) cvAddWeighted( const CvArr* src1, double alpha, + const CvArr* src2, double beta, + double gamma, CvArr* dst ); + +/* result = sum_i(src1(i) * src2(i)) (results for all channels are accumulated together) */ +CVAPI(double) cvDotProduct( const CvArr* src1, const CvArr* src2 ); + +/* dst(idx) = src1(idx) & src2(idx) */ +CVAPI(void) cvAnd( const CvArr* src1, const CvArr* src2, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src(idx) & value */ +CVAPI(void) cvAndS( const CvArr* src, CvScalar value, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src1(idx) | src2(idx) */ +CVAPI(void) cvOr( const CvArr* src1, const CvArr* src2, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src(idx) | value */ +CVAPI(void) cvOrS( const CvArr* src, CvScalar value, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src1(idx) ^ src2(idx) */ +CVAPI(void) cvXor( const CvArr* src1, const CvArr* src2, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src(idx) ^ value */ +CVAPI(void) cvXorS( const CvArr* src, CvScalar value, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = ~src(idx) */ +CVAPI(void) cvNot( const CvArr* src, CvArr* dst ); + +/* dst(idx) = lower(idx) <= src(idx) < upper(idx) */ +CVAPI(void) cvInRange( const CvArr* src, const CvArr* lower, + const CvArr* upper, CvArr* dst ); + +/* dst(idx) = lower <= src(idx) < upper */ +CVAPI(void) cvInRangeS( const CvArr* src, CvScalar lower, + CvScalar upper, CvArr* dst ); + +#define CV_CMP_EQ 0 +#define CV_CMP_GT 1 +#define CV_CMP_GE 2 +#define CV_CMP_LT 3 +#define CV_CMP_LE 4 +#define CV_CMP_NE 5 + +/* The comparison operation support single-channel arrays only. + Destination image should be 8uC1 or 8sC1 */ + +/* dst(idx) = src1(idx) _cmp_op_ src2(idx) */ +CVAPI(void) cvCmp( const CvArr* src1, const CvArr* src2, CvArr* dst, int cmp_op ); + +/* dst(idx) = src1(idx) _cmp_op_ value */ +CVAPI(void) cvCmpS( const CvArr* src, double value, CvArr* dst, int cmp_op ); + +/* dst(idx) = min(src1(idx),src2(idx)) */ +CVAPI(void) cvMin( const CvArr* src1, const CvArr* src2, CvArr* dst ); + +/* dst(idx) = max(src1(idx),src2(idx)) */ +CVAPI(void) cvMax( const CvArr* src1, const CvArr* src2, CvArr* dst ); + +/* dst(idx) = min(src(idx),value) */ +CVAPI(void) cvMinS( const CvArr* src, double value, CvArr* dst ); + +/* dst(idx) = max(src(idx),value) */ +CVAPI(void) cvMaxS( const CvArr* src, double value, CvArr* dst ); + +/* dst(x,y,c) = abs(src1(x,y,c) - src2(x,y,c)) */ +CVAPI(void) cvAbsDiff( const CvArr* src1, const CvArr* src2, CvArr* dst ); + +/* dst(x,y,c) = abs(src(x,y,c) - value(c)) */ +CVAPI(void) cvAbsDiffS( const CvArr* src, CvArr* dst, CvScalar value ); +#define cvAbs( src, dst ) cvAbsDiffS( (src), (dst), cvScalarAll(0)) + +/****************************************************************************************\ +* Math operations * +\****************************************************************************************/ + +/* Does cartesian->polar coordinates conversion. + Either of output components (magnitude or angle) is optional */ +CVAPI(void) cvCartToPolar( const CvArr* x, const CvArr* y, + CvArr* magnitude, CvArr* angle CV_DEFAULT(NULL), + int angle_in_degrees CV_DEFAULT(0)); + +/* Does polar->cartesian coordinates conversion. + Either of output components (magnitude or angle) is optional. + If magnitude is missing it is assumed to be all 1's */ +CVAPI(void) cvPolarToCart( const CvArr* magnitude, const CvArr* angle, + CvArr* x, CvArr* y, + int angle_in_degrees CV_DEFAULT(0)); + +/* Does powering: dst(idx) = src(idx)^power */ +CVAPI(void) cvPow( const CvArr* src, CvArr* dst, double power ); + +/* Does exponention: dst(idx) = exp(src(idx)). + Overflow is not handled yet. Underflow is handled. + Maximal relative error is ~7e-6 for single-precision input */ +CVAPI(void) cvExp( const CvArr* src, CvArr* dst ); + +/* Calculates natural logarithms: dst(idx) = log(abs(src(idx))). + Logarithm of 0 gives large negative number(~-700) + Maximal relative error is ~3e-7 for single-precision output +*/ +CVAPI(void) cvLog( const CvArr* src, CvArr* dst ); + +/* Fast arctangent calculation */ +CVAPI(float) cvFastArctan( float y, float x ); + +/* Fast cubic root calculation */ +CVAPI(float) cvCbrt( float value ); + +/* Checks array values for NaNs, Infs or simply for too large numbers + (if CV_CHECK_RANGE is set). If CV_CHECK_QUIET is set, + no runtime errors is raised (function returns zero value in case of "bad" values). + Otherwise cvError is called */ +#define CV_CHECK_RANGE 1 +#define CV_CHECK_QUIET 2 +CVAPI(int) cvCheckArr( const CvArr* arr, int flags CV_DEFAULT(0), + double min_val CV_DEFAULT(0), double max_val CV_DEFAULT(0)); +#define cvCheckArray cvCheckArr + +#define CV_RAND_UNI 0 +#define CV_RAND_NORMAL 1 +CVAPI(void) cvRandArr( CvRNG* rng, CvArr* arr, int dist_type, + CvScalar param1, CvScalar param2 ); + +CVAPI(void) cvRandShuffle( CvArr* mat, CvRNG* rng, + double iter_factor CV_DEFAULT(1.)); + +#define CV_SORT_EVERY_ROW 0 +#define CV_SORT_EVERY_COLUMN 1 +#define CV_SORT_ASCENDING 0 +#define CV_SORT_DESCENDING 16 + +CVAPI(void) cvSort( const CvArr* src, CvArr* dst CV_DEFAULT(NULL), + CvArr* idxmat CV_DEFAULT(NULL), + int flags CV_DEFAULT(0)); + +/* Finds real roots of a cubic equation */ +CVAPI(int) cvSolveCubic( const CvMat* coeffs, CvMat* roots ); + +/* Finds all real and complex roots of a polynomial equation */ +CVAPI(void) cvSolvePoly(const CvMat* coeffs, CvMat *roots2, + int maxiter CV_DEFAULT(20), int fig CV_DEFAULT(100)); + +/****************************************************************************************\ +* Matrix operations * +\****************************************************************************************/ + +/* Calculates cross product of two 3d vectors */ +CVAPI(void) cvCrossProduct( const CvArr* src1, const CvArr* src2, CvArr* dst ); + +/* Matrix transform: dst = A*B + C, C is optional */ +#define cvMatMulAdd( src1, src2, src3, dst ) cvGEMM( (src1), (src2), 1., (src3), 1., (dst), 0 ) +#define cvMatMul( src1, src2, dst ) cvMatMulAdd( (src1), (src2), NULL, (dst)) + +#define CV_GEMM_A_T 1 +#define CV_GEMM_B_T 2 +#define CV_GEMM_C_T 4 +/* Extended matrix transform: + dst = alpha*op(A)*op(B) + beta*op(C), where op(X) is X or X^T */ +CVAPI(void) cvGEMM( const CvArr* src1, const CvArr* src2, double alpha, + const CvArr* src3, double beta, CvArr* dst, + int tABC CV_DEFAULT(0)); +#define cvMatMulAddEx cvGEMM + +/* Transforms each element of source array and stores + resultant vectors in destination array */ +CVAPI(void) cvTransform( const CvArr* src, CvArr* dst, + const CvMat* transmat, + const CvMat* shiftvec CV_DEFAULT(NULL)); +#define cvMatMulAddS cvTransform + +/* Does perspective transform on every element of input array */ +CVAPI(void) cvPerspectiveTransform( const CvArr* src, CvArr* dst, + const CvMat* mat ); + +/* Calculates (A-delta)*(A-delta)^T (order=0) or (A-delta)^T*(A-delta) (order=1) */ +CVAPI(void) cvMulTransposed( const CvArr* src, CvArr* dst, int order, + const CvArr* delta CV_DEFAULT(NULL), + double scale CV_DEFAULT(1.) ); + +/* Tranposes matrix. Square matrices can be transposed in-place */ +CVAPI(void) cvTranspose( const CvArr* src, CvArr* dst ); +#define cvT cvTranspose + +/* Completes the symmetric matrix from the lower (LtoR=0) or from the upper (LtoR!=0) part */ +CVAPI(void) cvCompleteSymm( CvMat* matrix, int LtoR CV_DEFAULT(0) ); + +/* Mirror array data around horizontal (flip=0), + vertical (flip=1) or both(flip=-1) axises: + cvFlip(src) flips images vertically and sequences horizontally (inplace) */ +CVAPI(void) cvFlip( const CvArr* src, CvArr* dst CV_DEFAULT(NULL), + int flip_mode CV_DEFAULT(0)); +#define cvMirror cvFlip + + +#define CV_SVD_MODIFY_A 1 +#define CV_SVD_U_T 2 +#define CV_SVD_V_T 4 + +/* Performs Singular Value Decomposition of a matrix */ +CVAPI(void) cvSVD( CvArr* A, CvArr* W, CvArr* U CV_DEFAULT(NULL), + CvArr* V CV_DEFAULT(NULL), int flags CV_DEFAULT(0)); + +/* Performs Singular Value Back Substitution (solves A*X = B): + flags must be the same as in cvSVD */ +CVAPI(void) cvSVBkSb( const CvArr* W, const CvArr* U, + const CvArr* V, const CvArr* B, + CvArr* X, int flags ); + +#define CV_LU 0 +#define CV_SVD 1 +#define CV_SVD_SYM 2 +#define CV_CHOLESKY 3 +#define CV_QR 4 +#define CV_NORMAL 16 + +/* Inverts matrix */ +CVAPI(double) cvInvert( const CvArr* src, CvArr* dst, + int method CV_DEFAULT(CV_LU)); +#define cvInv cvInvert + +/* Solves linear system (src1)*(dst) = (src2) + (returns 0 if src1 is a singular and CV_LU method is used) */ +CVAPI(int) cvSolve( const CvArr* src1, const CvArr* src2, CvArr* dst, + int method CV_DEFAULT(CV_LU)); + +/* Calculates determinant of input matrix */ +CVAPI(double) cvDet( const CvArr* mat ); + +/* Calculates trace of the matrix (sum of elements on the main diagonal) */ +CVAPI(CvScalar) cvTrace( const CvArr* mat ); + +/* Finds eigen values and vectors of a symmetric matrix */ +CVAPI(void) cvEigenVV( CvArr* mat, CvArr* evects, CvArr* evals, + double eps CV_DEFAULT(0), + int lowindex CV_DEFAULT(-1), + int highindex CV_DEFAULT(-1)); + +///* Finds selected eigen values and vectors of a symmetric matrix */ +//CVAPI(void) cvSelectedEigenVV( CvArr* mat, CvArr* evects, CvArr* evals, +// int lowindex, int highindex ); + +/* Makes an identity matrix (mat_ij = i == j) */ +CVAPI(void) cvSetIdentity( CvArr* mat, CvScalar value CV_DEFAULT(cvRealScalar(1)) ); + +/* Fills matrix with given range of numbers */ +CVAPI(CvArr*) cvRange( CvArr* mat, double start, double end ); + +/* Calculates covariation matrix for a set of vectors */ +/* transpose([v1-avg, v2-avg,...]) * [v1-avg,v2-avg,...] */ +#define CV_COVAR_SCRAMBLED 0 + +/* [v1-avg, v2-avg,...] * transpose([v1-avg,v2-avg,...]) */ +#define CV_COVAR_NORMAL 1 + +/* do not calc average (i.e. mean vector) - use the input vector instead + (useful for calculating covariance matrix by parts) */ +#define CV_COVAR_USE_AVG 2 + +/* scale the covariance matrix coefficients by number of the vectors */ +#define CV_COVAR_SCALE 4 + +/* all the input vectors are stored in a single matrix, as its rows */ +#define CV_COVAR_ROWS 8 + +/* all the input vectors are stored in a single matrix, as its columns */ +#define CV_COVAR_COLS 16 + +CVAPI(void) cvCalcCovarMatrix( const CvArr** vects, int count, + CvArr* cov_mat, CvArr* avg, int flags ); + +#define CV_PCA_DATA_AS_ROW 0 +#define CV_PCA_DATA_AS_COL 1 +#define CV_PCA_USE_AVG 2 +CVAPI(void) cvCalcPCA( const CvArr* data, CvArr* mean, + CvArr* eigenvals, CvArr* eigenvects, int flags ); + +CVAPI(void) cvProjectPCA( const CvArr* data, const CvArr* mean, + const CvArr* eigenvects, CvArr* result ); + +CVAPI(void) cvBackProjectPCA( const CvArr* proj, const CvArr* mean, + const CvArr* eigenvects, CvArr* result ); + +/* Calculates Mahalanobis(weighted) distance */ +CVAPI(double) cvMahalanobis( const CvArr* vec1, const CvArr* vec2, const CvArr* mat ); +#define cvMahalonobis cvMahalanobis + +/****************************************************************************************\ +* Array Statistics * +\****************************************************************************************/ + +/* Finds sum of array elements */ +CVAPI(CvScalar) cvSum( const CvArr* arr ); + +/* Calculates number of non-zero pixels */ +CVAPI(int) cvCountNonZero( const CvArr* arr ); + +/* Calculates mean value of array elements */ +CVAPI(CvScalar) cvAvg( const CvArr* arr, const CvArr* mask CV_DEFAULT(NULL) ); + +/* Calculates mean and standard deviation of pixel values */ +CVAPI(void) cvAvgSdv( const CvArr* arr, CvScalar* mean, CvScalar* std_dev, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Finds global minimum, maximum and their positions */ +CVAPI(void) cvMinMaxLoc( const CvArr* arr, double* min_val, double* max_val, + CvPoint* min_loc CV_DEFAULT(NULL), + CvPoint* max_loc CV_DEFAULT(NULL), + const CvArr* mask CV_DEFAULT(NULL) ); + +/* types of array norm */ +#define CV_C 1 +#define CV_L1 2 +#define CV_L2 4 +#define CV_NORM_MASK 7 +#define CV_RELATIVE 8 +#define CV_DIFF 16 +#define CV_MINMAX 32 + +#define CV_DIFF_C (CV_DIFF | CV_C) +#define CV_DIFF_L1 (CV_DIFF | CV_L1) +#define CV_DIFF_L2 (CV_DIFF | CV_L2) +#define CV_RELATIVE_C (CV_RELATIVE | CV_C) +#define CV_RELATIVE_L1 (CV_RELATIVE | CV_L1) +#define CV_RELATIVE_L2 (CV_RELATIVE | CV_L2) + +/* Finds norm, difference norm or relative difference norm for an array (or two arrays) */ +CVAPI(double) cvNorm( const CvArr* arr1, const CvArr* arr2 CV_DEFAULT(NULL), + int norm_type CV_DEFAULT(CV_L2), + const CvArr* mask CV_DEFAULT(NULL) ); + +CVAPI(void) cvNormalize( const CvArr* src, CvArr* dst, + double a CV_DEFAULT(1.), double b CV_DEFAULT(0.), + int norm_type CV_DEFAULT(CV_L2), + const CvArr* mask CV_DEFAULT(NULL) ); + + +#define CV_REDUCE_SUM 0 +#define CV_REDUCE_AVG 1 +#define CV_REDUCE_MAX 2 +#define CV_REDUCE_MIN 3 + +CVAPI(void) cvReduce( const CvArr* src, CvArr* dst, int dim CV_DEFAULT(-1), + int op CV_DEFAULT(CV_REDUCE_SUM) ); + +/****************************************************************************************\ +* Discrete Linear Transforms and Related Functions * +\****************************************************************************************/ + +#define CV_DXT_FORWARD 0 +#define CV_DXT_INVERSE 1 +#define CV_DXT_SCALE 2 /* divide result by size of array */ +#define CV_DXT_INV_SCALE (CV_DXT_INVERSE + CV_DXT_SCALE) +#define CV_DXT_INVERSE_SCALE CV_DXT_INV_SCALE +#define CV_DXT_ROWS 4 /* transform each row individually */ +#define CV_DXT_MUL_CONJ 8 /* conjugate the second argument of cvMulSpectrums */ + +/* Discrete Fourier Transform: + complex->complex, + real->ccs (forward), + ccs->real (inverse) */ +CVAPI(void) cvDFT( const CvArr* src, CvArr* dst, int flags, + int nonzero_rows CV_DEFAULT(0) ); +#define cvFFT cvDFT + +/* Multiply results of DFTs: DFT(X)*DFT(Y) or DFT(X)*conj(DFT(Y)) */ +CVAPI(void) cvMulSpectrums( const CvArr* src1, const CvArr* src2, + CvArr* dst, int flags ); + +/* Finds optimal DFT vector size >= size0 */ +CVAPI(int) cvGetOptimalDFTSize( int size0 ); + +/* Discrete Cosine Transform */ +CVAPI(void) cvDCT( const CvArr* src, CvArr* dst, int flags ); + +/****************************************************************************************\ +* Dynamic data structures * +\****************************************************************************************/ + +/* Calculates length of sequence slice (with support of negative indices). */ +CVAPI(int) cvSliceLength( CvSlice slice, const CvSeq* seq ); + + +/* Creates new memory storage. + block_size == 0 means that default, + somewhat optimal size, is used (currently, it is 64K) */ +CVAPI(CvMemStorage*) cvCreateMemStorage( int block_size CV_DEFAULT(0)); + + +/* Creates a memory storage that will borrow memory blocks from parent storage */ +CVAPI(CvMemStorage*) cvCreateChildMemStorage( CvMemStorage* parent ); + + +/* Releases memory storage. All the children of a parent must be released before + the parent. A child storage returns all the blocks to parent when it is released */ +CVAPI(void) cvReleaseMemStorage( CvMemStorage** storage ); + + +/* Clears memory storage. This is the only way(!!!) (besides cvRestoreMemStoragePos) + to reuse memory allocated for the storage - cvClearSeq,cvClearSet ... + do not free any memory. + A child storage returns all the blocks to the parent when it is cleared */ +CVAPI(void) cvClearMemStorage( CvMemStorage* storage ); + +/* Remember a storage "free memory" position */ +CVAPI(void) cvSaveMemStoragePos( const CvMemStorage* storage, CvMemStoragePos* pos ); + +/* Restore a storage "free memory" position */ +CVAPI(void) cvRestoreMemStoragePos( CvMemStorage* storage, CvMemStoragePos* pos ); + +/* Allocates continuous buffer of the specified size in the storage */ +CVAPI(void*) cvMemStorageAlloc( CvMemStorage* storage, size_t size ); + +/* Allocates string in memory storage */ +CVAPI(CvString) cvMemStorageAllocString( CvMemStorage* storage, const char* ptr, + int len CV_DEFAULT(-1) ); + +/* Creates new empty sequence that will reside in the specified storage */ +CVAPI(CvSeq*) cvCreateSeq( int seq_flags, size_t header_size, + size_t elem_size, CvMemStorage* storage ); + +/* Changes default size (granularity) of sequence blocks. + The default size is ~1Kbyte */ +CVAPI(void) cvSetSeqBlockSize( CvSeq* seq, int delta_elems ); + + +/* Adds new element to the end of sequence. Returns pointer to the element */ +CVAPI(schar*) cvSeqPush( CvSeq* seq, const void* element CV_DEFAULT(NULL)); + + +/* Adds new element to the beginning of sequence. Returns pointer to it */ +CVAPI(schar*) cvSeqPushFront( CvSeq* seq, const void* element CV_DEFAULT(NULL)); + + +/* Removes the last element from sequence and optionally saves it */ +CVAPI(void) cvSeqPop( CvSeq* seq, void* element CV_DEFAULT(NULL)); + + +/* Removes the first element from sequence and optioanally saves it */ +CVAPI(void) cvSeqPopFront( CvSeq* seq, void* element CV_DEFAULT(NULL)); + + +#define CV_FRONT 1 +#define CV_BACK 0 +/* Adds several new elements to the end of sequence */ +CVAPI(void) cvSeqPushMulti( CvSeq* seq, const void* elements, + int count, int in_front CV_DEFAULT(0) ); + +/* Removes several elements from the end of sequence and optionally saves them */ +CVAPI(void) cvSeqPopMulti( CvSeq* seq, void* elements, + int count, int in_front CV_DEFAULT(0) ); + +/* Inserts a new element in the middle of sequence. + cvSeqInsert(seq,0,elem) == cvSeqPushFront(seq,elem) */ +CVAPI(schar*) cvSeqInsert( CvSeq* seq, int before_index, + const void* element CV_DEFAULT(NULL)); + +/* Removes specified sequence element */ +CVAPI(void) cvSeqRemove( CvSeq* seq, int index ); + + +/* Removes all the elements from the sequence. The freed memory + can be reused later only by the same sequence unless cvClearMemStorage + or cvRestoreMemStoragePos is called */ +CVAPI(void) cvClearSeq( CvSeq* seq ); + + +/* Retrieves pointer to specified sequence element. + Negative indices are supported and mean counting from the end + (e.g -1 means the last sequence element) */ +CVAPI(schar*) cvGetSeqElem( const CvSeq* seq, int index ); + +/* Calculates index of the specified sequence element. + Returns -1 if element does not belong to the sequence */ +CVAPI(int) cvSeqElemIdx( const CvSeq* seq, const void* element, + CvSeqBlock** block CV_DEFAULT(NULL) ); + +/* Initializes sequence writer. The new elements will be added to the end of sequence */ +CVAPI(void) cvStartAppendToSeq( CvSeq* seq, CvSeqWriter* writer ); + + +/* Combination of cvCreateSeq and cvStartAppendToSeq */ +CVAPI(void) cvStartWriteSeq( int seq_flags, int header_size, + int elem_size, CvMemStorage* storage, + CvSeqWriter* writer ); + +/* Closes sequence writer, updates sequence header and returns pointer + to the resultant sequence + (which may be useful if the sequence was created using cvStartWriteSeq)) +*/ +CVAPI(CvSeq*) cvEndWriteSeq( CvSeqWriter* writer ); + + +/* Updates sequence header. May be useful to get access to some of previously + written elements via cvGetSeqElem or sequence reader */ +CVAPI(void) cvFlushSeqWriter( CvSeqWriter* writer ); + + +/* Initializes sequence reader. + The sequence can be read in forward or backward direction */ +CVAPI(void) cvStartReadSeq( const CvSeq* seq, CvSeqReader* reader, + int reverse CV_DEFAULT(0) ); + + +/* Returns current sequence reader position (currently observed sequence element) */ +CVAPI(int) cvGetSeqReaderPos( CvSeqReader* reader ); + + +/* Changes sequence reader position. It may seek to an absolute or + to relative to the current position */ +CVAPI(void) cvSetSeqReaderPos( CvSeqReader* reader, int index, + int is_relative CV_DEFAULT(0)); + +/* Copies sequence content to a continuous piece of memory */ +CVAPI(void*) cvCvtSeqToArray( const CvSeq* seq, void* elements, + CvSlice slice CV_DEFAULT(CV_WHOLE_SEQ) ); + +/* Creates sequence header for array. + After that all the operations on sequences that do not alter the content + can be applied to the resultant sequence */ +CVAPI(CvSeq*) cvMakeSeqHeaderForArray( int seq_type, int header_size, + int elem_size, void* elements, int total, + CvSeq* seq, CvSeqBlock* block ); + +/* Extracts sequence slice (with or without copying sequence elements) */ +CVAPI(CvSeq*) cvSeqSlice( const CvSeq* seq, CvSlice slice, + CvMemStorage* storage CV_DEFAULT(NULL), + int copy_data CV_DEFAULT(0)); + +CV_INLINE CvSeq* cvCloneSeq( const CvSeq* seq, CvMemStorage* storage CV_DEFAULT(NULL)) +{ + return cvSeqSlice( seq, CV_WHOLE_SEQ, storage, 1 ); +} + +/* Removes sequence slice */ +CVAPI(void) cvSeqRemoveSlice( CvSeq* seq, CvSlice slice ); + +/* Inserts a sequence or array into another sequence */ +CVAPI(void) cvSeqInsertSlice( CvSeq* seq, int before_index, const CvArr* from_arr ); + +/* a < b ? -1 : a > b ? 1 : 0 */ +typedef int (CV_CDECL* CvCmpFunc)(const void* a, const void* b, void* userdata ); + +/* Sorts sequence in-place given element comparison function */ +CVAPI(void) cvSeqSort( CvSeq* seq, CvCmpFunc func, void* userdata CV_DEFAULT(NULL) ); + +/* Finds element in a [sorted] sequence */ +CVAPI(schar*) cvSeqSearch( CvSeq* seq, const void* elem, CvCmpFunc func, + int is_sorted, int* elem_idx, + void* userdata CV_DEFAULT(NULL) ); + +/* Reverses order of sequence elements in-place */ +CVAPI(void) cvSeqInvert( CvSeq* seq ); + +/* Splits sequence into one or more equivalence classes using the specified criteria */ +CVAPI(int) cvSeqPartition( const CvSeq* seq, CvMemStorage* storage, + CvSeq** labels, CvCmpFunc is_equal, void* userdata ); + +/************ Internal sequence functions ************/ +CVAPI(void) cvChangeSeqBlock( void* reader, int direction ); +CVAPI(void) cvCreateSeqBlock( CvSeqWriter* writer ); + + +/* Creates a new set */ +CVAPI(CvSet*) cvCreateSet( int set_flags, int header_size, + int elem_size, CvMemStorage* storage ); + +/* Adds new element to the set and returns pointer to it */ +CVAPI(int) cvSetAdd( CvSet* set_header, CvSetElem* elem CV_DEFAULT(NULL), + CvSetElem** inserted_elem CV_DEFAULT(NULL) ); + +/* Fast variant of cvSetAdd */ +CV_INLINE CvSetElem* cvSetNew( CvSet* set_header ) +{ + CvSetElem* elem = set_header->free_elems; + if( elem ) + { + set_header->free_elems = elem->next_free; + elem->flags = elem->flags & CV_SET_ELEM_IDX_MASK; + set_header->active_count++; + } + else + cvSetAdd( set_header, NULL, &elem ); + return elem; +} + +/* Removes set element given its pointer */ +CV_INLINE void cvSetRemoveByPtr( CvSet* set_header, void* elem ) +{ + CvSetElem* _elem = (CvSetElem*)elem; + assert( _elem->flags >= 0 /*&& (elem->flags & CV_SET_ELEM_IDX_MASK) < set_header->total*/ ); + _elem->next_free = set_header->free_elems; + _elem->flags = (_elem->flags & CV_SET_ELEM_IDX_MASK) | CV_SET_ELEM_FREE_FLAG; + set_header->free_elems = _elem; + set_header->active_count--; +} + +/* Removes element from the set by its index */ +CVAPI(void) cvSetRemove( CvSet* set_header, int index ); + +/* Returns a set element by index. If the element doesn't belong to the set, + NULL is returned */ +CV_INLINE CvSetElem* cvGetSetElem( const CvSet* set_header, int idx ) +{ + CvSetElem* elem = (CvSetElem*)(void *)cvGetSeqElem( (CvSeq*)set_header, idx ); + return elem && CV_IS_SET_ELEM( elem ) ? elem : 0; +} + +/* Removes all the elements from the set */ +CVAPI(void) cvClearSet( CvSet* set_header ); + +/* Creates new graph */ +CVAPI(CvGraph*) cvCreateGraph( int graph_flags, int header_size, + int vtx_size, int edge_size, + CvMemStorage* storage ); + +/* Adds new vertex to the graph */ +CVAPI(int) cvGraphAddVtx( CvGraph* graph, const CvGraphVtx* vtx CV_DEFAULT(NULL), + CvGraphVtx** inserted_vtx CV_DEFAULT(NULL) ); + + +/* Removes vertex from the graph together with all incident edges */ +CVAPI(int) cvGraphRemoveVtx( CvGraph* graph, int index ); +CVAPI(int) cvGraphRemoveVtxByPtr( CvGraph* graph, CvGraphVtx* vtx ); + + +/* Link two vertices specifed by indices or pointers if they + are not connected or return pointer to already existing edge + connecting the vertices. + Functions return 1 if a new edge was created, 0 otherwise */ +CVAPI(int) cvGraphAddEdge( CvGraph* graph, + int start_idx, int end_idx, + const CvGraphEdge* edge CV_DEFAULT(NULL), + CvGraphEdge** inserted_edge CV_DEFAULT(NULL) ); + +CVAPI(int) cvGraphAddEdgeByPtr( CvGraph* graph, + CvGraphVtx* start_vtx, CvGraphVtx* end_vtx, + const CvGraphEdge* edge CV_DEFAULT(NULL), + CvGraphEdge** inserted_edge CV_DEFAULT(NULL) ); + +/* Remove edge connecting two vertices */ +CVAPI(void) cvGraphRemoveEdge( CvGraph* graph, int start_idx, int end_idx ); +CVAPI(void) cvGraphRemoveEdgeByPtr( CvGraph* graph, CvGraphVtx* start_vtx, + CvGraphVtx* end_vtx ); + +/* Find edge connecting two vertices */ +CVAPI(CvGraphEdge*) cvFindGraphEdge( const CvGraph* graph, int start_idx, int end_idx ); +CVAPI(CvGraphEdge*) cvFindGraphEdgeByPtr( const CvGraph* graph, + const CvGraphVtx* start_vtx, + const CvGraphVtx* end_vtx ); +#define cvGraphFindEdge cvFindGraphEdge +#define cvGraphFindEdgeByPtr cvFindGraphEdgeByPtr + +/* Remove all vertices and edges from the graph */ +CVAPI(void) cvClearGraph( CvGraph* graph ); + + +/* Count number of edges incident to the vertex */ +CVAPI(int) cvGraphVtxDegree( const CvGraph* graph, int vtx_idx ); +CVAPI(int) cvGraphVtxDegreeByPtr( const CvGraph* graph, const CvGraphVtx* vtx ); + + +/* Retrieves graph vertex by given index */ +#define cvGetGraphVtx( graph, idx ) (CvGraphVtx*)cvGetSetElem((CvSet*)(graph), (idx)) + +/* Retrieves index of a graph vertex given its pointer */ +#define cvGraphVtxIdx( graph, vtx ) ((vtx)->flags & CV_SET_ELEM_IDX_MASK) + +/* Retrieves index of a graph edge given its pointer */ +#define cvGraphEdgeIdx( graph, edge ) ((edge)->flags & CV_SET_ELEM_IDX_MASK) + +#define cvGraphGetVtxCount( graph ) ((graph)->active_count) +#define cvGraphGetEdgeCount( graph ) ((graph)->edges->active_count) + +#define CV_GRAPH_VERTEX 1 +#define CV_GRAPH_TREE_EDGE 2 +#define CV_GRAPH_BACK_EDGE 4 +#define CV_GRAPH_FORWARD_EDGE 8 +#define CV_GRAPH_CROSS_EDGE 16 +#define CV_GRAPH_ANY_EDGE 30 +#define CV_GRAPH_NEW_TREE 32 +#define CV_GRAPH_BACKTRACKING 64 +#define CV_GRAPH_OVER -1 + +#define CV_GRAPH_ALL_ITEMS -1 + +/* flags for graph vertices and edges */ +#define CV_GRAPH_ITEM_VISITED_FLAG (1 << 30) +#define CV_IS_GRAPH_VERTEX_VISITED(vtx) \ + (((CvGraphVtx*)(vtx))->flags & CV_GRAPH_ITEM_VISITED_FLAG) +#define CV_IS_GRAPH_EDGE_VISITED(edge) \ + (((CvGraphEdge*)(edge))->flags & CV_GRAPH_ITEM_VISITED_FLAG) +#define CV_GRAPH_SEARCH_TREE_NODE_FLAG (1 << 29) +#define CV_GRAPH_FORWARD_EDGE_FLAG (1 << 28) + +typedef struct CvGraphScanner +{ + CvGraphVtx* vtx; /* current graph vertex (or current edge origin) */ + CvGraphVtx* dst; /* current graph edge destination vertex */ + CvGraphEdge* edge; /* current edge */ + + CvGraph* graph; /* the graph */ + CvSeq* stack; /* the graph vertex stack */ + int index; /* the lower bound of certainly visited vertices */ + int mask; /* event mask */ +} +CvGraphScanner; + +/* Creates new graph scanner. */ +CVAPI(CvGraphScanner*) cvCreateGraphScanner( CvGraph* graph, + CvGraphVtx* vtx CV_DEFAULT(NULL), + int mask CV_DEFAULT(CV_GRAPH_ALL_ITEMS)); + +/* Releases graph scanner. */ +CVAPI(void) cvReleaseGraphScanner( CvGraphScanner** scanner ); + +/* Get next graph element */ +CVAPI(int) cvNextGraphItem( CvGraphScanner* scanner ); + +/* Creates a copy of graph */ +CVAPI(CvGraph*) cvCloneGraph( const CvGraph* graph, CvMemStorage* storage ); + +/****************************************************************************************\ +* Drawing * +\****************************************************************************************/ + +/****************************************************************************************\ +* Drawing functions work with images/matrices of arbitrary type. * +* For color images the channel order is BGR[A] * +* Antialiasing is supported only for 8-bit image now. * +* All the functions include parameter color that means rgb value (that may be * +* constructed with CV_RGB macro) for color images and brightness * +* for grayscale images. * +* If a drawn figure is partially or completely outside of the image, it is clipped.* +\****************************************************************************************/ + +#define CV_RGB( r, g, b ) cvScalar( (b), (g), (r), 0 ) +#define CV_FILLED -1 + +#define CV_AA 16 + +/* Draws 4-connected, 8-connected or antialiased line segment connecting two points */ +CVAPI(void) cvLine( CvArr* img, CvPoint pt1, CvPoint pt2, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) ); + +/* Draws a rectangle given two opposite corners of the rectangle (pt1 & pt2), + if thickness<0 (e.g. thickness == CV_FILLED), the filled box is drawn */ +CVAPI(void) cvRectangle( CvArr* img, CvPoint pt1, CvPoint pt2, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), + int shift CV_DEFAULT(0)); + +/* Draws a rectangle specified by a CvRect structure */ +CVAPI(void) cvRectangleR( CvArr* img, CvRect r, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), + int shift CV_DEFAULT(0)); + + +/* Draws a circle with specified center and radius. + Thickness works in the same way as with cvRectangle */ +CVAPI(void) cvCircle( CvArr* img, CvPoint center, int radius, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0)); + +/* Draws ellipse outline, filled ellipse, elliptic arc or filled elliptic sector, + depending on , and parameters. The resultant figure + is rotated by . All the angles are in degrees */ +CVAPI(void) cvEllipse( CvArr* img, CvPoint center, CvSize axes, + double angle, double start_angle, double end_angle, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0)); + +CV_INLINE void cvEllipseBox( CvArr* img, CvBox2D box, CvScalar color, + int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) ) +{ + CvSize axes; + axes.width = cvRound(box.size.width*0.5); + axes.height = cvRound(box.size.height*0.5); + + cvEllipse( img, cvPointFrom32f( box.center ), axes, box.angle, + 0, 360, color, thickness, line_type, shift ); +} + +/* Fills convex or monotonous polygon. */ +CVAPI(void) cvFillConvexPoly( CvArr* img, const CvPoint* pts, int npts, CvScalar color, + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0)); + +/* Fills an area bounded by one or more arbitrary polygons */ +CVAPI(void) cvFillPoly( CvArr* img, CvPoint** pts, const int* npts, + int contours, CvScalar color, + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) ); + +/* Draws one or more polygonal curves */ +CVAPI(void) cvPolyLine( CvArr* img, CvPoint** pts, const int* npts, int contours, + int is_closed, CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) ); + +#define cvDrawRect cvRectangle +#define cvDrawLine cvLine +#define cvDrawCircle cvCircle +#define cvDrawEllipse cvEllipse +#define cvDrawPolyLine cvPolyLine + +/* Clips the line segment connecting *pt1 and *pt2 + by the rectangular window + (0<=xptr will point + to pt1 (or pt2, see left_to_right description) location in the image. + Returns the number of pixels on the line between the ending points. */ +CVAPI(int) cvInitLineIterator( const CvArr* image, CvPoint pt1, CvPoint pt2, + CvLineIterator* line_iterator, + int connectivity CV_DEFAULT(8), + int left_to_right CV_DEFAULT(0)); + +/* Moves iterator to the next line point */ +#define CV_NEXT_LINE_POINT( line_iterator ) \ +{ \ + int _line_iterator_mask = (line_iterator).err < 0 ? -1 : 0; \ + (line_iterator).err += (line_iterator).minus_delta + \ + ((line_iterator).plus_delta & _line_iterator_mask); \ + (line_iterator).ptr += (line_iterator).minus_step + \ + ((line_iterator).plus_step & _line_iterator_mask); \ +} + + +/* basic font types */ +#define CV_FONT_HERSHEY_SIMPLEX 0 +#define CV_FONT_HERSHEY_PLAIN 1 +#define CV_FONT_HERSHEY_DUPLEX 2 +#define CV_FONT_HERSHEY_COMPLEX 3 +#define CV_FONT_HERSHEY_TRIPLEX 4 +#define CV_FONT_HERSHEY_COMPLEX_SMALL 5 +#define CV_FONT_HERSHEY_SCRIPT_SIMPLEX 6 +#define CV_FONT_HERSHEY_SCRIPT_COMPLEX 7 + +/* font flags */ +#define CV_FONT_ITALIC 16 + +#define CV_FONT_VECTOR0 CV_FONT_HERSHEY_SIMPLEX + + +/* Font structure */ +typedef struct CvFont +{ + const char* nameFont; //Qt:nameFont + CvScalar color; //Qt:ColorFont -> cvScalar(blue_component, green_component, red\_component[, alpha_component]) + int font_face; //Qt: bool italic /* =CV_FONT_* */ + const int* ascii; /* font data and metrics */ + const int* greek; + const int* cyrillic; + float hscale, vscale; + float shear; /* slope coefficient: 0 - normal, >0 - italic */ + int thickness; //Qt: weight /* letters thickness */ + float dx; /* horizontal interval between letters */ + int line_type; //Qt: PointSize +} +CvFont; + +/* Initializes font structure used further in cvPutText */ +CVAPI(void) cvInitFont( CvFont* font, int font_face, + double hscale, double vscale, + double shear CV_DEFAULT(0), + int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8)); + +CV_INLINE CvFont cvFont( double scale, int thickness CV_DEFAULT(1) ) +{ + CvFont font; + cvInitFont( &font, CV_FONT_HERSHEY_PLAIN, scale, scale, 0, thickness, CV_AA ); + return font; +} + +/* Renders text stroke with specified font and color at specified location. + CvFont should be initialized with cvInitFont */ +CVAPI(void) cvPutText( CvArr* img, const char* text, CvPoint org, + const CvFont* font, CvScalar color ); + +/* Calculates bounding box of text stroke (useful for alignment) */ +CVAPI(void) cvGetTextSize( const char* text_string, const CvFont* font, + CvSize* text_size, int* baseline ); + + + +/* Unpacks color value, if arrtype is CV_8UC?, is treated as + packed color value, otherwise the first channels (depending on arrtype) + of destination scalar are set to the same value = */ +CVAPI(CvScalar) cvColorToScalar( double packed_color, int arrtype ); + +/* Returns the polygon points which make up the given ellipse. The ellipse is define by + the box of size 'axes' rotated 'angle' around the 'center'. A partial sweep + of the ellipse arc can be done by spcifying arc_start and arc_end to be something + other than 0 and 360, respectively. The input array 'pts' must be large enough to + hold the result. The total number of points stored into 'pts' is returned by this + function. */ +CVAPI(int) cvEllipse2Poly( CvPoint center, CvSize axes, + int angle, int arc_start, int arc_end, CvPoint * pts, int delta ); + +/* Draws contour outlines or filled interiors on the image */ +CVAPI(void) cvDrawContours( CvArr *img, CvSeq* contour, + CvScalar external_color, CvScalar hole_color, + int max_level, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), + CvPoint offset CV_DEFAULT(cvPoint(0,0))); + +/* Does look-up transformation. Elements of the source array + (that should be 8uC1 or 8sC1) are used as indexes in lutarr 256-element table */ +CVAPI(void) cvLUT( const CvArr* src, CvArr* dst, const CvArr* lut ); + + +/******************* Iteration through the sequence tree *****************/ +typedef struct CvTreeNodeIterator +{ + const void* node; + int level; + int max_level; +} +CvTreeNodeIterator; + +CVAPI(void) cvInitTreeNodeIterator( CvTreeNodeIterator* tree_iterator, + const void* first, int max_level ); +CVAPI(void*) cvNextTreeNode( CvTreeNodeIterator* tree_iterator ); +CVAPI(void*) cvPrevTreeNode( CvTreeNodeIterator* tree_iterator ); + +/* Inserts sequence into tree with specified "parent" sequence. + If parent is equal to frame (e.g. the most external contour), + then added contour will have null pointer to parent. */ +CVAPI(void) cvInsertNodeIntoTree( void* node, void* parent, void* frame ); + +/* Removes contour from tree (together with the contour children). */ +CVAPI(void) cvRemoveNodeFromTree( void* node, void* frame ); + +/* Gathers pointers to all the sequences, + accessible from the , to the single sequence */ +CVAPI(CvSeq*) cvTreeToNodeSeq( const void* first, int header_size, + CvMemStorage* storage ); + +/* The function implements the K-means algorithm for clustering an array of sample + vectors in a specified number of classes */ +#define CV_KMEANS_USE_INITIAL_LABELS 1 +CVAPI(int) cvKMeans2( const CvArr* samples, int cluster_count, CvArr* labels, + CvTermCriteria termcrit, int attempts CV_DEFAULT(1), + CvRNG* rng CV_DEFAULT(0), int flags CV_DEFAULT(0), + CvArr* _centers CV_DEFAULT(0), double* compactness CV_DEFAULT(0) ); + +/****************************************************************************************\ +* System functions * +\****************************************************************************************/ + +/* Add the function pointers table with associated information to the IPP primitives list */ +CVAPI(int) cvRegisterModule( const CvModuleInfo* module_info ); + +/* Loads optimized functions from IPP, MKL etc. or switches back to pure C code */ +CVAPI(int) cvUseOptimized( int on_off ); + +/* Retrieves information about the registered modules and loaded optimized plugins */ +CVAPI(void) cvGetModuleInfo( const char* module_name, + const char** version, + const char** loaded_addon_plugins ); + +typedef void* (CV_CDECL *CvAllocFunc)(size_t size, void* userdata); +typedef int (CV_CDECL *CvFreeFunc)(void* pptr, void* userdata); + +/* Set user-defined memory managment functions (substitutors for malloc and free) that + will be called by cvAlloc, cvFree and higher-level functions (e.g. cvCreateImage) */ +CVAPI(void) cvSetMemoryManager( CvAllocFunc alloc_func CV_DEFAULT(NULL), + CvFreeFunc free_func CV_DEFAULT(NULL), + void* userdata CV_DEFAULT(NULL)); + + +typedef IplImage* (CV_STDCALL* Cv_iplCreateImageHeader) + (int,int,int,char*,char*,int,int,int,int,int, + IplROI*,IplImage*,void*,IplTileInfo*); +typedef void (CV_STDCALL* Cv_iplAllocateImageData)(IplImage*,int,int); +typedef void (CV_STDCALL* Cv_iplDeallocate)(IplImage*,int); +typedef IplROI* (CV_STDCALL* Cv_iplCreateROI)(int,int,int,int,int); +typedef IplImage* (CV_STDCALL* Cv_iplCloneImage)(const IplImage*); + +/* Makes OpenCV use IPL functions for IplImage allocation/deallocation */ +CVAPI(void) cvSetIPLAllocators( Cv_iplCreateImageHeader create_header, + Cv_iplAllocateImageData allocate_data, + Cv_iplDeallocate deallocate, + Cv_iplCreateROI create_roi, + Cv_iplCloneImage clone_image ); + +#define CV_TURN_ON_IPL_COMPATIBILITY() \ + cvSetIPLAllocators( iplCreateImageHeader, iplAllocateImage, \ + iplDeallocate, iplCreateROI, iplCloneImage ) + +/****************************************************************************************\ +* Data Persistence * +\****************************************************************************************/ + +/********************************** High-level functions ********************************/ + +/* opens existing or creates new file storage */ +CVAPI(CvFileStorage*) cvOpenFileStorage( const char* filename, CvMemStorage* memstorage, + int flags, const char* encoding CV_DEFAULT(NULL) ); + +/* closes file storage and deallocates buffers */ +CVAPI(void) cvReleaseFileStorage( CvFileStorage** fs ); + +/* returns attribute value or 0 (NULL) if there is no such attribute */ +CVAPI(const char*) cvAttrValue( const CvAttrList* attr, const char* attr_name ); + +/* starts writing compound structure (map or sequence) */ +CVAPI(void) cvStartWriteStruct( CvFileStorage* fs, const char* name, + int struct_flags, const char* type_name CV_DEFAULT(NULL), + CvAttrList attributes CV_DEFAULT(cvAttrList())); + +/* finishes writing compound structure */ +CVAPI(void) cvEndWriteStruct( CvFileStorage* fs ); + +/* writes an integer */ +CVAPI(void) cvWriteInt( CvFileStorage* fs, const char* name, int value ); + +/* writes a floating-point number */ +CVAPI(void) cvWriteReal( CvFileStorage* fs, const char* name, double value ); + +/* writes a string */ +CVAPI(void) cvWriteString( CvFileStorage* fs, const char* name, + const char* str, int quote CV_DEFAULT(0) ); + +/* writes a comment */ +CVAPI(void) cvWriteComment( CvFileStorage* fs, const char* comment, + int eol_comment ); + +/* writes instance of a standard type (matrix, image, sequence, graph etc.) + or user-defined type */ +CVAPI(void) cvWrite( CvFileStorage* fs, const char* name, const void* ptr, + CvAttrList attributes CV_DEFAULT(cvAttrList())); + +/* starts the next stream */ +CVAPI(void) cvStartNextStream( CvFileStorage* fs ); + +/* helper function: writes multiple integer or floating-point numbers */ +CVAPI(void) cvWriteRawData( CvFileStorage* fs, const void* src, + int len, const char* dt ); + +/* returns the hash entry corresponding to the specified literal key string or 0 + if there is no such a key in the storage */ +CVAPI(CvStringHashNode*) cvGetHashedKey( CvFileStorage* fs, const char* name, + int len CV_DEFAULT(-1), + int create_missing CV_DEFAULT(0)); + +/* returns file node with the specified key within the specified map + (collection of named nodes) */ +CVAPI(CvFileNode*) cvGetRootFileNode( const CvFileStorage* fs, + int stream_index CV_DEFAULT(0) ); + +/* returns file node with the specified key within the specified map + (collection of named nodes) */ +CVAPI(CvFileNode*) cvGetFileNode( CvFileStorage* fs, CvFileNode* map, + const CvStringHashNode* key, + int create_missing CV_DEFAULT(0) ); + +/* this is a slower version of cvGetFileNode that takes the key as a literal string */ +CVAPI(CvFileNode*) cvGetFileNodeByName( const CvFileStorage* fs, + const CvFileNode* map, + const char* name ); + +CV_INLINE int cvReadInt( const CvFileNode* node, int default_value CV_DEFAULT(0) ) +{ + return !node ? default_value : + CV_NODE_IS_INT(node->tag) ? node->data.i : + CV_NODE_IS_REAL(node->tag) ? cvRound(node->data.f) : 0x7fffffff; +} + + +CV_INLINE int cvReadIntByName( const CvFileStorage* fs, const CvFileNode* map, + const char* name, int default_value CV_DEFAULT(0) ) +{ + return cvReadInt( cvGetFileNodeByName( fs, map, name ), default_value ); +} + + +CV_INLINE double cvReadReal( const CvFileNode* node, double default_value CV_DEFAULT(0.) ) +{ + return !node ? default_value : + CV_NODE_IS_INT(node->tag) ? (double)node->data.i : + CV_NODE_IS_REAL(node->tag) ? node->data.f : 1e300; +} + + +CV_INLINE double cvReadRealByName( const CvFileStorage* fs, const CvFileNode* map, + const char* name, double default_value CV_DEFAULT(0.) ) +{ + return cvReadReal( cvGetFileNodeByName( fs, map, name ), default_value ); +} + + +CV_INLINE const char* cvReadString( const CvFileNode* node, + const char* default_value CV_DEFAULT(NULL) ) +{ + return !node ? default_value : CV_NODE_IS_STRING(node->tag) ? node->data.str.ptr : 0; +} + + +CV_INLINE const char* cvReadStringByName( const CvFileStorage* fs, const CvFileNode* map, + const char* name, const char* default_value CV_DEFAULT(NULL) ) +{ + return cvReadString( cvGetFileNodeByName( fs, map, name ), default_value ); +} + + +/* decodes standard or user-defined object and returns it */ +CVAPI(void*) cvRead( CvFileStorage* fs, CvFileNode* node, + CvAttrList* attributes CV_DEFAULT(NULL)); + +/* decodes standard or user-defined object and returns it */ +CV_INLINE void* cvReadByName( CvFileStorage* fs, const CvFileNode* map, + const char* name, CvAttrList* attributes CV_DEFAULT(NULL) ) +{ + return cvRead( fs, cvGetFileNodeByName( fs, map, name ), attributes ); +} + + +/* starts reading data from sequence or scalar numeric node */ +CVAPI(void) cvStartReadRawData( const CvFileStorage* fs, const CvFileNode* src, + CvSeqReader* reader ); + +/* reads multiple numbers and stores them to array */ +CVAPI(void) cvReadRawDataSlice( const CvFileStorage* fs, CvSeqReader* reader, + int count, void* dst, const char* dt ); + +/* combination of two previous functions for easier reading of whole sequences */ +CVAPI(void) cvReadRawData( const CvFileStorage* fs, const CvFileNode* src, + void* dst, const char* dt ); + +/* writes a copy of file node to file storage */ +CVAPI(void) cvWriteFileNode( CvFileStorage* fs, const char* new_node_name, + const CvFileNode* node, int embed ); + +/* returns name of file node */ +CVAPI(const char*) cvGetFileNodeName( const CvFileNode* node ); + +/*********************************** Adding own types ***********************************/ + +CVAPI(void) cvRegisterType( const CvTypeInfo* info ); +CVAPI(void) cvUnregisterType( const char* type_name ); +CVAPI(CvTypeInfo*) cvFirstType(void); +CVAPI(CvTypeInfo*) cvFindType( const char* type_name ); +CVAPI(CvTypeInfo*) cvTypeOf( const void* struct_ptr ); + +/* universal functions */ +CVAPI(void) cvRelease( void** struct_ptr ); +CVAPI(void*) cvClone( const void* struct_ptr ); + +/* simple API for reading/writing data */ +CVAPI(void) cvSave( const char* filename, const void* struct_ptr, + const char* name CV_DEFAULT(NULL), + const char* comment CV_DEFAULT(NULL), + CvAttrList attributes CV_DEFAULT(cvAttrList())); +CVAPI(void*) cvLoad( const char* filename, + CvMemStorage* memstorage CV_DEFAULT(NULL), + const char* name CV_DEFAULT(NULL), + const char** real_name CV_DEFAULT(NULL) ); + +/*********************************** Measuring Execution Time ***************************/ + +/* helper functions for RNG initialization and accurate time measurement: + uses internal clock counter on x86 */ +CVAPI(int64) cvGetTickCount( void ); +CVAPI(double) cvGetTickFrequency( void ); + +/*********************************** CPU capabilities ***********************************/ + +#define CV_CPU_NONE 0 +#define CV_CPU_MMX 1 +#define CV_CPU_SSE 2 +#define CV_CPU_SSE2 3 +#define CV_CPU_SSE3 4 +#define CV_CPU_SSSE3 5 +#define CV_CPU_SSE4_1 6 +#define CV_CPU_SSE4_2 7 +#define CV_CPU_POPCNT 8 +#define CV_CPU_AVX 10 +#define CV_CPU_AVX2 11 +#define CV_HARDWARE_MAX_FEATURE 255 + +CVAPI(int) cvCheckHardwareSupport(int feature); + +/*********************************** Multi-Threading ************************************/ + +/* retrieve/set the number of threads used in OpenMP implementations */ +CVAPI(int) cvGetNumThreads( void ); +CVAPI(void) cvSetNumThreads( int threads CV_DEFAULT(0) ); +/* get index of the thread being executed */ +CVAPI(int) cvGetThreadNum( void ); + + +/********************************** Error Handling **************************************/ + +/* Get current OpenCV error status */ +CVAPI(int) cvGetErrStatus( void ); + +/* Sets error status silently */ +CVAPI(void) cvSetErrStatus( int status ); + +#define CV_ErrModeLeaf 0 /* Print error and exit program */ +#define CV_ErrModeParent 1 /* Print error and continue */ +#define CV_ErrModeSilent 2 /* Don't print and continue */ + +/* Retrives current error processing mode */ +CVAPI(int) cvGetErrMode( void ); + +/* Sets error processing mode, returns previously used mode */ +CVAPI(int) cvSetErrMode( int mode ); + +/* Sets error status and performs some additonal actions (displaying message box, + writing message to stderr, terminating application etc.) + depending on the current error mode */ +CVAPI(void) cvError( int status, const char* func_name, + const char* err_msg, const char* file_name, int line ); + +/* Retrieves textual description of the error given its code */ +CVAPI(const char*) cvErrorStr( int status ); + +/* Retrieves detailed information about the last error occured */ +CVAPI(int) cvGetErrInfo( const char** errcode_desc, const char** description, + const char** filename, int* line ); + +/* Maps IPP error codes to the counterparts from OpenCV */ +CVAPI(int) cvErrorFromIppStatus( int ipp_status ); + +typedef int (CV_CDECL *CvErrorCallback)( int status, const char* func_name, + const char* err_msg, const char* file_name, int line, void* userdata ); + +/* Assigns a new error-handling function */ +CVAPI(CvErrorCallback) cvRedirectError( CvErrorCallback error_handler, + void* userdata CV_DEFAULT(NULL), + void** prev_userdata CV_DEFAULT(NULL) ); + +/* + Output to: + cvNulDevReport - nothing + cvStdErrReport - console(fprintf(stderr,...)) + cvGuiBoxReport - MessageBox(WIN32) + */ +CVAPI(int) cvNulDevReport( int status, const char* func_name, const char* err_msg, + const char* file_name, int line, void* userdata ); + +CVAPI(int) cvStdErrReport( int status, const char* func_name, const char* err_msg, + const char* file_name, int line, void* userdata ); + +CVAPI(int) cvGuiBoxReport( int status, const char* func_name, const char* err_msg, + const char* file_name, int line, void* userdata ); + +#define OPENCV_ERROR(status,func,context) \ +cvError((status),(func),(context),__FILE__,__LINE__) + +#define OPENCV_ERRCHK(func,context) \ +{if (cvGetErrStatus() >= 0) \ +{OPENCV_ERROR(CV_StsBackTrace,(func),(context));}} + +#define OPENCV_ASSERT(expr,func,context) \ +{if (! (expr)) \ +{OPENCV_ERROR(CV_StsInternal,(func),(context));}} + +#define OPENCV_RSTERR() (cvSetErrStatus(CV_StsOk)) + +#define OPENCV_CALL( Func ) \ +{ \ +Func; \ +} + + +/* CV_FUNCNAME macro defines icvFuncName constant which is used by CV_ERROR macro */ +#ifdef CV_NO_FUNC_NAMES +#define CV_FUNCNAME( Name ) +#define cvFuncName "" +#else +#define CV_FUNCNAME( Name ) \ +static char cvFuncName[] = Name +#endif + + +/* + CV_ERROR macro unconditionally raises error with passed code and message. + After raising error, control will be transferred to the exit label. + */ +#define CV_ERROR( Code, Msg ) \ +{ \ + cvError( (Code), cvFuncName, Msg, __FILE__, __LINE__ ); \ + __CV_EXIT__; \ +} + +/* Simplified form of CV_ERROR */ +#define CV_ERROR_FROM_CODE( code ) \ + CV_ERROR( code, "" ) + +/* + CV_CHECK macro checks error status after CV (or IPL) + function call. If error detected, control will be transferred to the exit + label. + */ +#define CV_CHECK() \ +{ \ + if( cvGetErrStatus() < 0 ) \ + CV_ERROR( CV_StsBackTrace, "Inner function failed." ); \ +} + + +/* + CV_CALL macro calls CV (or IPL) function, checks error status and + signals a error if the function failed. Useful in "parent node" + error procesing mode + */ +#define CV_CALL( Func ) \ +{ \ + Func; \ + CV_CHECK(); \ +} + + +/* Runtime assertion macro */ +#define CV_ASSERT( Condition ) \ +{ \ + if( !(Condition) ) \ + CV_ERROR( CV_StsInternal, "Assertion: " #Condition " failed" ); \ +} + +#define __CV_BEGIN__ { +#define __CV_END__ goto exit; exit: ; } +#define __CV_EXIT__ goto exit + +#ifdef __cplusplus +} + +// classes for automatic module/RTTI data registration/unregistration +struct CV_EXPORTS CvModule +{ + CvModule( CvModuleInfo* _info ); + ~CvModule(); + CvModuleInfo* info; + + static CvModuleInfo* first; + static CvModuleInfo* last; +}; + +struct CV_EXPORTS CvType +{ + CvType( const char* type_name, + CvIsInstanceFunc is_instance, CvReleaseFunc release=0, + CvReadFunc read=0, CvWriteFunc write=0, CvCloneFunc clone=0 ); + ~CvType(); + CvTypeInfo* info; + + static CvTypeInfo* first; + static CvTypeInfo* last; +}; + +#endif + +#endif diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/cuda_devptrs.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/cuda_devptrs.hpp new file mode 100644 index 0000000..1534045 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/cuda_devptrs.hpp @@ -0,0 +1,199 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_DEVPTRS_HPP__ +#define __OPENCV_CORE_DEVPTRS_HPP__ + +#ifdef __cplusplus + +#ifdef __CUDACC__ + #define __CV_GPU_HOST_DEVICE__ __host__ __device__ __forceinline__ +#else + #define __CV_GPU_HOST_DEVICE__ +#endif + +namespace cv +{ + namespace gpu + { + // Simple lightweight structures that encapsulates information about an image on device. + // It is intended to pass to nvcc-compiled code. GpuMat depends on headers that nvcc can't compile + + template struct StaticAssert; + template <> struct StaticAssert {static __CV_GPU_HOST_DEVICE__ void check(){}}; + + template struct DevPtr + { + typedef T elem_type; + typedef int index_type; + + enum { elem_size = sizeof(elem_type) }; + + T* data; + + __CV_GPU_HOST_DEVICE__ DevPtr() : data(0) {} + __CV_GPU_HOST_DEVICE__ DevPtr(T* data_) : data(data_) {} + + __CV_GPU_HOST_DEVICE__ size_t elemSize() const { return elem_size; } + __CV_GPU_HOST_DEVICE__ operator T*() { return data; } + __CV_GPU_HOST_DEVICE__ operator const T*() const { return data; } + }; + + template struct PtrSz : public DevPtr + { + __CV_GPU_HOST_DEVICE__ PtrSz() : size(0) {} + __CV_GPU_HOST_DEVICE__ PtrSz(T* data_, size_t size_) : DevPtr(data_), size(size_) {} + + size_t size; + }; + + template struct PtrStep : public DevPtr + { + __CV_GPU_HOST_DEVICE__ PtrStep() : step(0) {} + __CV_GPU_HOST_DEVICE__ PtrStep(T* data_, size_t step_) : DevPtr(data_), step(step_) {} + + /** \brief stride between two consecutive rows in bytes. Step is stored always and everywhere in bytes!!! */ + size_t step; + + __CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return ( T*)( ( char*)DevPtr::data + y * step); } + __CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return (const T*)( (const char*)DevPtr::data + y * step); } + + __CV_GPU_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; } + __CV_GPU_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; } + }; + + template struct PtrStepSz : public PtrStep + { + __CV_GPU_HOST_DEVICE__ PtrStepSz() : cols(0), rows(0) {} + __CV_GPU_HOST_DEVICE__ PtrStepSz(int rows_, int cols_, T* data_, size_t step_) + : PtrStep(data_, step_), cols(cols_), rows(rows_) {} + + template + explicit PtrStepSz(const PtrStepSz& d) : PtrStep((T*)d.data, d.step), cols(d.cols), rows(d.rows){} + + int cols; + int rows; + }; + + typedef PtrStepSz PtrStepSzb; + typedef PtrStepSz PtrStepSzf; + typedef PtrStepSz PtrStepSzi; + + typedef PtrStep PtrStepb; + typedef PtrStep PtrStepf; + typedef PtrStep PtrStepi; + + +#if defined __GNUC__ + #define __CV_GPU_DEPR_BEFORE__ + #define __CV_GPU_DEPR_AFTER__ __attribute__ ((deprecated)) +#elif defined(__MSVC__) //|| defined(__CUDACC__) + #pragma deprecated(DevMem2D_) + #define __CV_GPU_DEPR_BEFORE__ __declspec(deprecated) + #define __CV_GPU_DEPR_AFTER__ +#else + #define __CV_GPU_DEPR_BEFORE__ + #define __CV_GPU_DEPR_AFTER__ +#endif + + template struct __CV_GPU_DEPR_BEFORE__ DevMem2D_ : public PtrStepSz + { + DevMem2D_() {} + DevMem2D_(int rows_, int cols_, T* data_, size_t step_) : PtrStepSz(rows_, cols_, data_, step_) {} + + template + explicit __CV_GPU_DEPR_BEFORE__ DevMem2D_(const DevMem2D_& d) : PtrStepSz(d.rows, d.cols, (T*)d.data, d.step) {} + } __CV_GPU_DEPR_AFTER__ ; + + typedef DevMem2D_ DevMem2Db; + typedef DevMem2Db DevMem2D; + typedef DevMem2D_ DevMem2Df; + typedef DevMem2D_ DevMem2Di; + + template struct PtrElemStep_ : public PtrStep + { + PtrElemStep_(const DevMem2D_& mem) : PtrStep(mem.data, mem.step) + { + StaticAssert<256 % sizeof(T) == 0>::check(); + + PtrStep::step /= PtrStep::elem_size; + } + __CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return PtrStep::data + y * PtrStep::step; } + __CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return PtrStep::data + y * PtrStep::step; } + + __CV_GPU_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; } + __CV_GPU_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; } + }; + + template struct PtrStep_ : public PtrStep + { + PtrStep_() {} + PtrStep_(const DevMem2D_& mem) : PtrStep(mem.data, mem.step) {} + }; + + typedef PtrElemStep_ PtrElemStep; + typedef PtrElemStep_ PtrElemStepf; + typedef PtrElemStep_ PtrElemStepi; + +//#undef __CV_GPU_DEPR_BEFORE__ +//#undef __CV_GPU_DEPR_AFTER__ + + namespace device + { + using cv::gpu::PtrSz; + using cv::gpu::PtrStep; + using cv::gpu::PtrStepSz; + + using cv::gpu::PtrStepSzb; + using cv::gpu::PtrStepSzf; + using cv::gpu::PtrStepSzi; + + using cv::gpu::PtrStepb; + using cv::gpu::PtrStepf; + using cv::gpu::PtrStepi; + } + } +} + +#endif // __cplusplus + +#endif /* __OPENCV_CORE_DEVPTRS_HPP__ */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/devmem2d.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/devmem2d.hpp new file mode 100644 index 0000000..18dfcd8 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/devmem2d.hpp @@ -0,0 +1,43 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "opencv2/core/cuda_devptrs.hpp" diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/eigen.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/eigen.hpp new file mode 100644 index 0000000..a7b237f --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/eigen.hpp @@ -0,0 +1,280 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_EIGEN_HPP__ +#define __OPENCV_CORE_EIGEN_HPP__ + +#ifdef __cplusplus + +#include "opencv2/core/core_c.h" +#include "opencv2/core/core.hpp" + +#if defined _MSC_VER && _MSC_VER >= 1200 +#pragma warning( disable: 4714 ) //__forceinline is not inlined +#pragma warning( disable: 4127 ) //conditional expression is constant +#pragma warning( disable: 4244 ) //conversion from '__int64' to 'int', possible loss of data +#endif + +namespace cv +{ + +template +void eigen2cv( const Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& src, Mat& dst ) +{ + if( !(src.Flags & Eigen::RowMajorBit) ) + { + Mat _src(src.cols(), src.rows(), DataType<_Tp>::type, + (void*)src.data(), src.stride()*sizeof(_Tp)); + transpose(_src, dst); + } + else + { + Mat _src(src.rows(), src.cols(), DataType<_Tp>::type, + (void*)src.data(), src.stride()*sizeof(_Tp)); + _src.copyTo(dst); + } +} + +template +void cv2eigen( const Mat& src, + Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& dst ) +{ + CV_DbgAssert(src.rows == _rows && src.cols == _cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(src.cols, src.rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + if( src.type() == _dst.type() ) + transpose(src, _dst); + else if( src.cols == src.rows ) + { + src.convertTo(_dst, _dst.type()); + transpose(_dst, _dst); + } + else + Mat(src.t()).convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(src.rows, src.cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + src.convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + +// Matx case +template +void cv2eigen( const Matx<_Tp, _rows, _cols>& src, + Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& dst ) +{ + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(_cols, _rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + transpose(src, _dst); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(_rows, _cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + Mat(src).copyTo(_dst); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + +template +void cv2eigen( const Mat& src, + Eigen::Matrix<_Tp, Eigen::Dynamic, Eigen::Dynamic>& dst ) +{ + dst.resize(src.rows, src.cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(src.cols, src.rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + if( src.type() == _dst.type() ) + transpose(src, _dst); + else if( src.cols == src.rows ) + { + src.convertTo(_dst, _dst.type()); + transpose(_dst, _dst); + } + else + Mat(src.t()).convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(src.rows, src.cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + src.convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + +// Matx case +template +void cv2eigen( const Matx<_Tp, _rows, _cols>& src, + Eigen::Matrix<_Tp, Eigen::Dynamic, Eigen::Dynamic>& dst ) +{ + dst.resize(_rows, _cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(_cols, _rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + transpose(src, _dst); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(_rows, _cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + Mat(src).copyTo(_dst); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + +template +void cv2eigen( const Mat& src, + Eigen::Matrix<_Tp, Eigen::Dynamic, 1>& dst ) +{ + CV_Assert(src.cols == 1); + dst.resize(src.rows); + + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(src.cols, src.rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + if( src.type() == _dst.type() ) + transpose(src, _dst); + else + Mat(src.t()).convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(src.rows, src.cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + src.convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + +// Matx case +template +void cv2eigen( const Matx<_Tp, _rows, 1>& src, + Eigen::Matrix<_Tp, Eigen::Dynamic, 1>& dst ) +{ + dst.resize(_rows); + + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(1, _rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + transpose(src, _dst); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(_rows, 1, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + src.copyTo(_dst); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + + +template +void cv2eigen( const Mat& src, + Eigen::Matrix<_Tp, 1, Eigen::Dynamic>& dst ) +{ + CV_Assert(src.rows == 1); + dst.resize(src.cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(src.cols, src.rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + if( src.type() == _dst.type() ) + transpose(src, _dst); + else + Mat(src.t()).convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(src.rows, src.cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + src.convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + +//Matx +template +void cv2eigen( const Matx<_Tp, 1, _cols>& src, + Eigen::Matrix<_Tp, 1, Eigen::Dynamic>& dst ) +{ + dst.resize(_cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(_cols, 1, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + transpose(src, _dst); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(1, _cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + Mat(src).copyTo(_dst); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + + +} + +#endif + +#endif diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/gpumat.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/gpumat.hpp new file mode 100644 index 0000000..68647d9 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/gpumat.hpp @@ -0,0 +1,564 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPUMAT_HPP__ +#define __OPENCV_GPUMAT_HPP__ + +#ifdef __cplusplus + +#include "opencv2/core/core.hpp" +#include "opencv2/core/cuda_devptrs.hpp" + +namespace cv { namespace gpu +{ + //////////////////////////////// Initialization & Info //////////////////////// + + //! This is the only function that do not throw exceptions if the library is compiled without Cuda. + CV_EXPORTS int getCudaEnabledDeviceCount(); + + //! Functions below throw cv::Expception if the library is compiled without Cuda. + + CV_EXPORTS void setDevice(int device); + CV_EXPORTS int getDevice(); + + //! Explicitly destroys and cleans up all resources associated with the current device in the current process. + //! Any subsequent API call to this device will reinitialize the device. + CV_EXPORTS void resetDevice(); + + enum FeatureSet + { + FEATURE_SET_COMPUTE_10 = 10, + FEATURE_SET_COMPUTE_11 = 11, + FEATURE_SET_COMPUTE_12 = 12, + FEATURE_SET_COMPUTE_13 = 13, + FEATURE_SET_COMPUTE_20 = 20, + FEATURE_SET_COMPUTE_21 = 21, + FEATURE_SET_COMPUTE_30 = 30, + FEATURE_SET_COMPUTE_35 = 35, + + GLOBAL_ATOMICS = FEATURE_SET_COMPUTE_11, + SHARED_ATOMICS = FEATURE_SET_COMPUTE_12, + NATIVE_DOUBLE = FEATURE_SET_COMPUTE_13, + WARP_SHUFFLE_FUNCTIONS = FEATURE_SET_COMPUTE_30, + DYNAMIC_PARALLELISM = FEATURE_SET_COMPUTE_35 + }; + + // Checks whether current device supports the given feature + CV_EXPORTS bool deviceSupports(FeatureSet feature_set); + + // Gives information about what GPU archs this OpenCV GPU module was + // compiled for + class CV_EXPORTS TargetArchs + { + public: + static bool builtWith(FeatureSet feature_set); + static bool has(int major, int minor); + static bool hasPtx(int major, int minor); + static bool hasBin(int major, int minor); + static bool hasEqualOrLessPtx(int major, int minor); + static bool hasEqualOrGreater(int major, int minor); + static bool hasEqualOrGreaterPtx(int major, int minor); + static bool hasEqualOrGreaterBin(int major, int minor); + private: + TargetArchs(); + }; + + // Gives information about the given GPU + class CV_EXPORTS DeviceInfo + { + public: + // Creates DeviceInfo object for the current GPU + DeviceInfo() : device_id_(getDevice()) { query(); } + + // Creates DeviceInfo object for the given GPU + DeviceInfo(int device_id) : device_id_(device_id) { query(); } + + std::string name() const { return name_; } + + // Return compute capability versions + int majorVersion() const { return majorVersion_; } + int minorVersion() const { return minorVersion_; } + + int multiProcessorCount() const { return multi_processor_count_; } + + size_t sharedMemPerBlock() const; + + void queryMemory(size_t& totalMemory, size_t& freeMemory) const; + size_t freeMemory() const; + size_t totalMemory() const; + + // Checks whether device supports the given feature + bool supports(FeatureSet feature_set) const; + + // Checks whether the GPU module can be run on the given device + bool isCompatible() const; + + int deviceID() const { return device_id_; } + + private: + void query(); + + int device_id_; + + std::string name_; + int multi_processor_count_; + int majorVersion_; + int minorVersion_; + }; + + CV_EXPORTS void printCudaDeviceInfo(int device); + CV_EXPORTS void printShortCudaDeviceInfo(int device); + + //////////////////////////////// GpuMat /////////////////////////////// + + //! Smart pointer for GPU memory with reference counting. Its interface is mostly similar with cv::Mat. + class CV_EXPORTS GpuMat + { + public: + //! default constructor + GpuMat(); + + //! constructs GpuMatrix of the specified size and type (_type is CV_8UC1, CV_64FC3, CV_32SC(12) etc.) + GpuMat(int rows, int cols, int type); + GpuMat(Size size, int type); + + //! constucts GpuMatrix and fills it with the specified value _s. + GpuMat(int rows, int cols, int type, Scalar s); + GpuMat(Size size, int type, Scalar s); + + //! copy constructor + GpuMat(const GpuMat& m); + + //! constructor for GpuMatrix headers pointing to user-allocated data + GpuMat(int rows, int cols, int type, void* data, size_t step = Mat::AUTO_STEP); + GpuMat(Size size, int type, void* data, size_t step = Mat::AUTO_STEP); + + //! creates a matrix header for a part of the bigger matrix + GpuMat(const GpuMat& m, Range rowRange, Range colRange); + GpuMat(const GpuMat& m, Rect roi); + + //! builds GpuMat from Mat. Perfom blocking upload to device. + explicit GpuMat(const Mat& m); + + //! destructor - calls release() + ~GpuMat(); + + //! assignment operators + GpuMat& operator = (const GpuMat& m); + + //! pefroms blocking upload data to GpuMat. + void upload(const Mat& m); + + //! downloads data from device to host memory. Blocking calls. + void download(Mat& m) const; + + //! returns a new GpuMatrix header for the specified row + GpuMat row(int y) const; + //! returns a new GpuMatrix header for the specified column + GpuMat col(int x) const; + //! ... for the specified row span + GpuMat rowRange(int startrow, int endrow) const; + GpuMat rowRange(Range r) const; + //! ... for the specified column span + GpuMat colRange(int startcol, int endcol) const; + GpuMat colRange(Range r) const; + + //! returns deep copy of the GpuMatrix, i.e. the data is copied + GpuMat clone() const; + //! copies the GpuMatrix content to "m". + // It calls m.create(this->size(), this->type()). + void copyTo(GpuMat& m) const; + //! copies those GpuMatrix elements to "m" that are marked with non-zero mask elements. + void copyTo(GpuMat& m, const GpuMat& mask) const; + //! converts GpuMatrix to another datatype with optional scalng. See cvConvertScale. + void convertTo(GpuMat& m, int rtype, double alpha = 1, double beta = 0) const; + + void assignTo(GpuMat& m, int type=-1) const; + + //! sets every GpuMatrix element to s + GpuMat& operator = (Scalar s); + //! sets some of the GpuMatrix elements to s, according to the mask + GpuMat& setTo(Scalar s, const GpuMat& mask = GpuMat()); + //! creates alternative GpuMatrix header for the same data, with different + // number of channels and/or different number of rows. see cvReshape. + GpuMat reshape(int cn, int rows = 0) const; + + //! allocates new GpuMatrix data unless the GpuMatrix already has specified size and type. + // previous data is unreferenced if needed. + void create(int rows, int cols, int type); + void create(Size size, int type); + //! decreases reference counter; + // deallocate the data when reference counter reaches 0. + void release(); + + //! swaps with other smart pointer + void swap(GpuMat& mat); + + //! locates GpuMatrix header within a parent GpuMatrix. See below + void locateROI(Size& wholeSize, Point& ofs) const; + //! moves/resizes the current GpuMatrix ROI inside the parent GpuMatrix. + GpuMat& adjustROI(int dtop, int dbottom, int dleft, int dright); + //! extracts a rectangular sub-GpuMatrix + // (this is a generalized form of row, rowRange etc.) + GpuMat operator()(Range rowRange, Range colRange) const; + GpuMat operator()(Rect roi) const; + + //! returns true iff the GpuMatrix data is continuous + // (i.e. when there are no gaps between successive rows). + // similar to CV_IS_GpuMat_CONT(cvGpuMat->type) + bool isContinuous() const; + //! returns element size in bytes, + // similar to CV_ELEM_SIZE(cvMat->type) + size_t elemSize() const; + //! returns the size of element channel in bytes. + size_t elemSize1() const; + //! returns element type, similar to CV_MAT_TYPE(cvMat->type) + int type() const; + //! returns element type, similar to CV_MAT_DEPTH(cvMat->type) + int depth() const; + //! returns element type, similar to CV_MAT_CN(cvMat->type) + int channels() const; + //! returns step/elemSize1() + size_t step1() const; + //! returns GpuMatrix size: + // width == number of columns, height == number of rows + Size size() const; + //! returns true if GpuMatrix data is NULL + bool empty() const; + + //! returns pointer to y-th row + uchar* ptr(int y = 0); + const uchar* ptr(int y = 0) const; + + //! template version of the above method + template _Tp* ptr(int y = 0); + template const _Tp* ptr(int y = 0) const; + + template operator PtrStepSz<_Tp>() const; + template operator PtrStep<_Tp>() const; + + // Deprecated function + __CV_GPU_DEPR_BEFORE__ template operator DevMem2D_<_Tp>() const __CV_GPU_DEPR_AFTER__; + __CV_GPU_DEPR_BEFORE__ template operator PtrStep_<_Tp>() const __CV_GPU_DEPR_AFTER__; + #undef __CV_GPU_DEPR_BEFORE__ + #undef __CV_GPU_DEPR_AFTER__ + + /*! includes several bit-fields: + - the magic signature + - continuity flag + - depth + - number of channels + */ + int flags; + + //! the number of rows and columns + int rows, cols; + + //! a distance between successive rows in bytes; includes the gap if any + size_t step; + + //! pointer to the data + uchar* data; + + //! pointer to the reference counter; + // when GpuMatrix points to user-allocated data, the pointer is NULL + int* refcount; + + //! helper fields used in locateROI and adjustROI + uchar* datastart; + uchar* dataend; + }; + + //! Creates continuous GPU matrix + CV_EXPORTS void createContinuous(int rows, int cols, int type, GpuMat& m); + CV_EXPORTS GpuMat createContinuous(int rows, int cols, int type); + CV_EXPORTS void createContinuous(Size size, int type, GpuMat& m); + CV_EXPORTS GpuMat createContinuous(Size size, int type); + + //! Ensures that size of the given matrix is not less than (rows, cols) size + //! and matrix type is match specified one too + CV_EXPORTS void ensureSizeIsEnough(int rows, int cols, int type, GpuMat& m); + CV_EXPORTS void ensureSizeIsEnough(Size size, int type, GpuMat& m); + + CV_EXPORTS GpuMat allocMatFromBuf(int rows, int cols, int type, GpuMat &mat); + + //////////////////////////////////////////////////////////////////////// + // Error handling + + CV_EXPORTS void error(const char* error_string, const char* file, const int line, const char* func = ""); + + //////////////////////////////////////////////////////////////////////// + //////////////////////////////////////////////////////////////////////// + //////////////////////////////////////////////////////////////////////// + + inline GpuMat::GpuMat() + : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0) + { + } + + inline GpuMat::GpuMat(int rows_, int cols_, int type_) + : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0) + { + if (rows_ > 0 && cols_ > 0) + create(rows_, cols_, type_); + } + + inline GpuMat::GpuMat(Size size_, int type_) + : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0) + { + if (size_.height > 0 && size_.width > 0) + create(size_.height, size_.width, type_); + } + + inline GpuMat::GpuMat(int rows_, int cols_, int type_, Scalar s_) + : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0) + { + if (rows_ > 0 && cols_ > 0) + { + create(rows_, cols_, type_); + setTo(s_); + } + } + + inline GpuMat::GpuMat(Size size_, int type_, Scalar s_) + : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0) + { + if (size_.height > 0 && size_.width > 0) + { + create(size_.height, size_.width, type_); + setTo(s_); + } + } + + inline GpuMat::~GpuMat() + { + release(); + } + + inline GpuMat GpuMat::clone() const + { + GpuMat m; + copyTo(m); + return m; + } + + inline void GpuMat::assignTo(GpuMat& m, int _type) const + { + if (_type < 0) + m = *this; + else + convertTo(m, _type); + } + + inline size_t GpuMat::step1() const + { + return step / elemSize1(); + } + + inline bool GpuMat::empty() const + { + return data == 0; + } + + template inline _Tp* GpuMat::ptr(int y) + { + return (_Tp*)ptr(y); + } + + template inline const _Tp* GpuMat::ptr(int y) const + { + return (const _Tp*)ptr(y); + } + + inline void swap(GpuMat& a, GpuMat& b) + { + a.swap(b); + } + + inline GpuMat GpuMat::row(int y) const + { + return GpuMat(*this, Range(y, y+1), Range::all()); + } + + inline GpuMat GpuMat::col(int x) const + { + return GpuMat(*this, Range::all(), Range(x, x+1)); + } + + inline GpuMat GpuMat::rowRange(int startrow, int endrow) const + { + return GpuMat(*this, Range(startrow, endrow), Range::all()); + } + + inline GpuMat GpuMat::rowRange(Range r) const + { + return GpuMat(*this, r, Range::all()); + } + + inline GpuMat GpuMat::colRange(int startcol, int endcol) const + { + return GpuMat(*this, Range::all(), Range(startcol, endcol)); + } + + inline GpuMat GpuMat::colRange(Range r) const + { + return GpuMat(*this, Range::all(), r); + } + + inline void GpuMat::create(Size size_, int type_) + { + create(size_.height, size_.width, type_); + } + + inline GpuMat GpuMat::operator()(Range _rowRange, Range _colRange) const + { + return GpuMat(*this, _rowRange, _colRange); + } + + inline GpuMat GpuMat::operator()(Rect roi) const + { + return GpuMat(*this, roi); + } + + inline bool GpuMat::isContinuous() const + { + return (flags & Mat::CONTINUOUS_FLAG) != 0; + } + + inline size_t GpuMat::elemSize() const + { + return CV_ELEM_SIZE(flags); + } + + inline size_t GpuMat::elemSize1() const + { + return CV_ELEM_SIZE1(flags); + } + + inline int GpuMat::type() const + { + return CV_MAT_TYPE(flags); + } + + inline int GpuMat::depth() const + { + return CV_MAT_DEPTH(flags); + } + + inline int GpuMat::channels() const + { + return CV_MAT_CN(flags); + } + + inline Size GpuMat::size() const + { + return Size(cols, rows); + } + + inline uchar* GpuMat::ptr(int y) + { + CV_DbgAssert((unsigned)y < (unsigned)rows); + return data + step * y; + } + + inline const uchar* GpuMat::ptr(int y) const + { + CV_DbgAssert((unsigned)y < (unsigned)rows); + return data + step * y; + } + + inline GpuMat& GpuMat::operator = (Scalar s) + { + setTo(s); + return *this; + } + + /** @cond IGNORED */ + template inline GpuMat::operator PtrStepSz() const + { + return PtrStepSz(rows, cols, (T*)data, step); + } + + template inline GpuMat::operator PtrStep() const + { + return PtrStep((T*)data, step); + } + + template inline GpuMat::operator DevMem2D_() const + { + return DevMem2D_(rows, cols, (T*)data, step); + } + + template inline GpuMat::operator PtrStep_() const + { + return PtrStep_(static_cast< DevMem2D_ >(*this)); + } + /** @endcond */ + + inline GpuMat createContinuous(int rows, int cols, int type) + { + GpuMat m; + createContinuous(rows, cols, type, m); + return m; + } + + inline void createContinuous(Size size, int type, GpuMat& m) + { + createContinuous(size.height, size.width, type, m); + } + + inline GpuMat createContinuous(Size size, int type) + { + GpuMat m; + createContinuous(size, type, m); + return m; + } + + inline void ensureSizeIsEnough(Size size, int type, GpuMat& m) + { + ensureSizeIsEnough(size.height, size.width, type, m); + } +}} + +#endif // __cplusplus + +#endif // __OPENCV_GPUMAT_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/internal.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/internal.hpp new file mode 100644 index 0000000..c2c8961 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/internal.hpp @@ -0,0 +1,795 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/* The header is for internal use and it is likely to change. + It contains some macro definitions that are used in cxcore, cv, cvaux + and, probably, other libraries. If you need some of this functionality, + the safe way is to copy it into your code and rename the macros. +*/ +#ifndef __OPENCV_CORE_INTERNAL_HPP__ +#define __OPENCV_CORE_INTERNAL_HPP__ + +#include + +#include "opencv2/core/core.hpp" +#include "opencv2/core/types_c.h" + +#if defined WIN32 || defined _WIN32 +# ifndef WIN32 +# define WIN32 +# endif +# ifndef _WIN32 +# define _WIN32 +# endif +#endif + +#if !defined WIN32 && !defined WINCE +# include +#endif + +#ifdef __BORLANDC__ +# ifndef WIN32 +# define WIN32 +# endif +# ifndef _WIN32 +# define _WIN32 +# endif +# define CV_DLL +# undef _CV_ALWAYS_PROFILE_ +# define _CV_ALWAYS_NO_PROFILE_ +#endif + +#ifndef FALSE +# define FALSE 0 +#endif +#ifndef TRUE +# define TRUE 1 +#endif + +#define __BEGIN__ __CV_BEGIN__ +#define __END__ __CV_END__ +#define EXIT __CV_EXIT__ + +#ifdef HAVE_IPP +# include "ipp.h" + +CV_INLINE IppiSize ippiSize(int width, int height) +{ + IppiSize size = { width, height }; + return size; +} + +CV_INLINE IppiSize ippiSize(const cv::Size & _size) +{ + IppiSize size = { _size.width, _size.height }; + return size; +} + +#endif + +#ifndef IPPI_CALL +# define IPPI_CALL(func) CV_Assert((func) >= 0) +#endif + +#if defined __SSE2__ || defined _M_X64 || (defined _M_IX86_FP && _M_IX86_FP >= 2) +# include "emmintrin.h" +# define CV_SSE 1 +# define CV_SSE2 1 +# if defined __SSE3__ || (defined _MSC_VER && _MSC_VER >= 1500) +# include "pmmintrin.h" +# define CV_SSE3 1 +# endif +# if defined __SSSE3__ || (defined _MSC_VER && _MSC_VER >= 1500) +# include "tmmintrin.h" +# define CV_SSSE3 1 +# endif +# if defined __SSE4_1__ || (defined _MSC_VER && _MSC_VER >= 1500) +# include +# define CV_SSE4_1 1 +# endif +# if defined __SSE4_2__ || (defined _MSC_VER && _MSC_VER >= 1500) +# include +# define CV_SSE4_2 1 +# endif +# if defined __AVX__ || (defined _MSC_FULL_VER && _MSC_FULL_VER >= 160040219) +// MS Visual Studio 2010 (2012?) has no macro pre-defined to identify the use of /arch:AVX +// See: http://connect.microsoft.com/VisualStudio/feedback/details/605858/arch-avx-should-define-a-predefined-macro-in-x64-and-set-a-unique-value-for-m-ix86-fp-in-win32 +# include +# define CV_AVX 1 +# if defined(_XCR_XFEATURE_ENABLED_MASK) +# define __xgetbv() _xgetbv(_XCR_XFEATURE_ENABLED_MASK) +# else +# define __xgetbv() 0 +# endif +# endif +# if defined __AVX2__ +# include +# define CV_AVX2 1 +# endif +#endif + + +#if (defined WIN32 || defined _WIN32) && defined(_M_ARM) +# include +# include "arm_neon.h" +# define CV_NEON 1 +# define CPU_HAS_NEON_FEATURE (true) +#elif defined(__ARM_NEON__) || defined(__ARM_NEON) +# include +# define CV_NEON 1 +# define CPU_HAS_NEON_FEATURE (true) +#endif + +#ifndef CV_SSE +# define CV_SSE 0 +#endif +#ifndef CV_SSE2 +# define CV_SSE2 0 +#endif +#ifndef CV_SSE3 +# define CV_SSE3 0 +#endif +#ifndef CV_SSSE3 +# define CV_SSSE3 0 +#endif +#ifndef CV_SSE4_1 +# define CV_SSE4_1 0 +#endif +#ifndef CV_SSE4_2 +# define CV_SSE4_2 0 +#endif +#ifndef CV_AVX +# define CV_AVX 0 +#endif +#ifndef CV_AVX2 +# define CV_AVX2 0 +#endif +#ifndef CV_NEON +# define CV_NEON 0 +#endif + +#ifdef HAVE_TBB +# include "tbb/tbb_stddef.h" +# if TBB_VERSION_MAJOR*100 + TBB_VERSION_MINOR >= 202 +# include "tbb/tbb.h" +# include "tbb/task.h" +# undef min +# undef max +# else +# undef HAVE_TBB +# endif +#endif + +#ifdef HAVE_EIGEN +# if defined __GNUC__ && defined __APPLE__ +# pragma GCC diagnostic ignored "-Wshadow" +# endif +# include +# include "opencv2/core/eigen.hpp" +#endif + +#ifdef __cplusplus + +namespace cv +{ +#ifdef HAVE_TBB + + typedef tbb::blocked_range BlockedRange; + + template static inline + void parallel_for( const BlockedRange& range, const Body& body ) + { + tbb::parallel_for(range, body); + } + + template static inline + void parallel_do( Iterator first, Iterator last, const Body& body ) + { + tbb::parallel_do(first, last, body); + } + + typedef tbb::split Split; + + template static inline + void parallel_reduce( const BlockedRange& range, Body& body ) + { + tbb::parallel_reduce(range, body); + } + + typedef tbb::concurrent_vector ConcurrentRectVector; + typedef tbb::concurrent_vector ConcurrentDoubleVector; +#else + class BlockedRange + { + public: + BlockedRange() : _begin(0), _end(0), _grainsize(0) {} + BlockedRange(int b, int e, int g=1) : _begin(b), _end(e), _grainsize(g) {} + int begin() const { return _begin; } + int end() const { return _end; } + int grainsize() const { return _grainsize; } + + protected: + int _begin, _end, _grainsize; + }; + + template static inline + void parallel_for( const BlockedRange& range, const Body& body ) + { + body(range); + } + typedef std::vector ConcurrentRectVector; + typedef std::vector ConcurrentDoubleVector; + + template static inline + void parallel_do( Iterator first, Iterator last, const Body& body ) + { + for( ; first != last; ++first ) + body(*first); + } + + class Split {}; + + template static inline + void parallel_reduce( const BlockedRange& range, Body& body ) + { + body(range); + } +#endif + + // Returns a static string if there is a parallel framework, + // NULL otherwise. + CV_EXPORTS const char* currentParallelFramework(); +} //namespace cv + +#define CV_INIT_ALGORITHM(classname, algname, memberinit) \ + static ::cv::Algorithm* create##classname() \ + { \ + return new classname; \ + } \ + \ + static ::cv::AlgorithmInfo& classname##_info() \ + { \ + static ::cv::AlgorithmInfo classname##_info_var(algname, create##classname); \ + return classname##_info_var; \ + } \ + \ + static ::cv::AlgorithmInfo& classname##_info_auto = classname##_info(); \ + \ + ::cv::AlgorithmInfo* classname::info() const \ + { \ + static volatile bool initialized = false; \ + \ + if( !initialized ) \ + { \ + initialized = true; \ + classname obj; \ + memberinit; \ + } \ + return &classname##_info(); \ + } + +#endif //__cplusplus + +/* maximal size of vector to run matrix operations on it inline (i.e. w/o ipp calls) */ +#define CV_MAX_INLINE_MAT_OP_SIZE 10 + +/* maximal linear size of matrix to allocate it on stack. */ +#define CV_MAX_LOCAL_MAT_SIZE 32 + +/* maximal size of local memory storage */ +#define CV_MAX_LOCAL_SIZE \ + (CV_MAX_LOCAL_MAT_SIZE*CV_MAX_LOCAL_MAT_SIZE*(int)sizeof(double)) + +/* default image row align (in bytes) */ +#define CV_DEFAULT_IMAGE_ROW_ALIGN 4 + +/* matrices are continuous by default */ +#define CV_DEFAULT_MAT_ROW_ALIGN 1 + +/* maximum size of dynamic memory buffer. + cvAlloc reports an error if a larger block is requested. */ +#define CV_MAX_ALLOC_SIZE (((size_t)1 << (sizeof(size_t)*8-2))) + +/* the alignment of all the allocated buffers */ +#define CV_MALLOC_ALIGN 16 + +/* default alignment for dynamic data strucutures, resided in storages. */ +#define CV_STRUCT_ALIGN ((int)sizeof(double)) + +/* default storage block size */ +#define CV_STORAGE_BLOCK_SIZE ((1<<16) - 128) + +/* default memory block for sparse array elements */ +#define CV_SPARSE_MAT_BLOCK (1<<12) + +/* initial hash table size */ +#define CV_SPARSE_HASH_SIZE0 (1<<10) + +/* maximal average node_count/hash_size ratio beyond which hash table is resized */ +#define CV_SPARSE_HASH_RATIO 3 + +/* max length of strings */ +#define CV_MAX_STRLEN 1024 + +#if 0 /*def CV_CHECK_FOR_NANS*/ +# define CV_CHECK_NANS( arr ) cvCheckArray((arr)) +#else +# define CV_CHECK_NANS( arr ) +#endif + +/****************************************************************************************\ +* Common declarations * +\****************************************************************************************/ + +#ifdef __GNUC__ +# define CV_DECL_ALIGNED(x) __attribute__ ((aligned (x))) +#elif defined _MSC_VER +# define CV_DECL_ALIGNED(x) __declspec(align(x)) +#else +# define CV_DECL_ALIGNED(x) +#endif + +#ifndef CV_IMPL +# define CV_IMPL CV_EXTERN_C +#endif + +#define CV_DBG_BREAK() { volatile int* crashMe = 0; *crashMe = 0; } + +/* default step, set in case of continuous data + to work around checks for valid step in some ipp functions */ +#define CV_STUB_STEP (1 << 30) + +#define CV_SIZEOF_FLOAT ((int)sizeof(float)) +#define CV_SIZEOF_SHORT ((int)sizeof(short)) + +#define CV_ORIGIN_TL 0 +#define CV_ORIGIN_BL 1 + +/* IEEE754 constants and macros */ +#define CV_POS_INF 0x7f800000 +#define CV_NEG_INF 0x807fffff /* CV_TOGGLE_FLT(0xff800000) */ +#define CV_1F 0x3f800000 +#define CV_TOGGLE_FLT(x) ((x)^((int)(x) < 0 ? 0x7fffffff : 0)) +#define CV_TOGGLE_DBL(x) \ + ((x)^((int64)(x) < 0 ? CV_BIG_INT(0x7fffffffffffffff) : 0)) + +#define CV_NOP(a) (a) +#define CV_ADD(a, b) ((a) + (b)) +#define CV_SUB(a, b) ((a) - (b)) +#define CV_MUL(a, b) ((a) * (b)) +#define CV_AND(a, b) ((a) & (b)) +#define CV_OR(a, b) ((a) | (b)) +#define CV_XOR(a, b) ((a) ^ (b)) +#define CV_ANDN(a, b) (~(a) & (b)) +#define CV_ORN(a, b) (~(a) | (b)) +#define CV_SQR(a) ((a) * (a)) + +#define CV_LT(a, b) ((a) < (b)) +#define CV_LE(a, b) ((a) <= (b)) +#define CV_EQ(a, b) ((a) == (b)) +#define CV_NE(a, b) ((a) != (b)) +#define CV_GT(a, b) ((a) > (b)) +#define CV_GE(a, b) ((a) >= (b)) + +#define CV_NONZERO(a) ((a) != 0) +#define CV_NONZERO_FLT(a) (((a)+(a)) != 0) + +/* general-purpose saturation macros */ +#define CV_CAST_8U(t) (uchar)(!((t) & ~255) ? (t) : (t) > 0 ? 255 : 0) +#define CV_CAST_8S(t) (schar)(!(((t)+128) & ~255) ? (t) : (t) > 0 ? 127 : -128) +#define CV_CAST_16U(t) (ushort)(!((t) & ~65535) ? (t) : (t) > 0 ? 65535 : 0) +#define CV_CAST_16S(t) (short)(!(((t)+32768) & ~65535) ? (t) : (t) > 0 ? 32767 : -32768) +#define CV_CAST_32S(t) (int)(t) +#define CV_CAST_64S(t) (int64)(t) +#define CV_CAST_32F(t) (float)(t) +#define CV_CAST_64F(t) (double)(t) + +#define CV_PASTE2(a,b) a##b +#define CV_PASTE(a,b) CV_PASTE2(a,b) + +#define CV_EMPTY +#define CV_MAKE_STR(a) #a + +#define CV_ZERO_OBJ(x) memset((x), 0, sizeof(*(x))) + +#define CV_DIM(static_array) ((int)(sizeof(static_array)/sizeof((static_array)[0]))) + +#define cvUnsupportedFormat "Unsupported format" + +CV_INLINE void* cvAlignPtr( const void* ptr, int align CV_DEFAULT(32) ) +{ + assert( (align & (align-1)) == 0 ); + return (void*)( ((size_t)ptr + align - 1) & ~(size_t)(align-1) ); +} + +CV_INLINE int cvAlign( int size, int align ) +{ + assert( (align & (align-1)) == 0 && size < INT_MAX ); + return (size + align - 1) & -align; +} + +CV_INLINE CvSize cvGetMatSize( const CvMat* mat ) +{ + CvSize size; + size.width = mat->cols; + size.height = mat->rows; + return size; +} + +#define CV_DESCALE(x,n) (((x) + (1 << ((n)-1))) >> (n)) +#define CV_FLT_TO_FIX(x,n) cvRound((x)*(1<<(n))) + +/****************************************************************************************\ + + Generic implementation of QuickSort algorithm. + ---------------------------------------------- + Using this macro user can declare customized sort function that can be much faster + than built-in qsort function because of lower overhead on elements + comparison and exchange. The macro takes less_than (or LT) argument - a macro or function + that takes 2 arguments returns non-zero if the first argument should be before the second + one in the sorted sequence and zero otherwise. + + Example: + + Suppose that the task is to sort points by ascending of y coordinates and if + y's are equal x's should ascend. + + The code is: + ------------------------------------------------------------------------------ + #define cmp_pts( pt1, pt2 ) \ + ((pt1).y < (pt2).y || ((pt1).y < (pt2).y && (pt1).x < (pt2).x)) + + [static] CV_IMPLEMENT_QSORT( icvSortPoints, CvPoint, cmp_pts ) + ------------------------------------------------------------------------------ + + After that the function "void icvSortPoints( CvPoint* array, size_t total, int aux );" + is available to user. + + aux is an additional parameter, which can be used when comparing elements. + The current implementation was derived from *BSD system qsort(): + + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + +\****************************************************************************************/ + +#define CV_IMPLEMENT_QSORT_EX( func_name, T, LT, user_data_type ) \ +void func_name( T *array, size_t total, user_data_type aux ) \ +{ \ + int isort_thresh = 7; \ + T t; \ + int sp = 0; \ + \ + struct \ + { \ + T *lb; \ + T *ub; \ + } \ + stack[48]; \ + \ + aux = aux; \ + \ + if( total <= 1 ) \ + return; \ + \ + stack[0].lb = array; \ + stack[0].ub = array + (total - 1); \ + \ + while( sp >= 0 ) \ + { \ + T* left = stack[sp].lb; \ + T* right = stack[sp--].ub; \ + \ + for(;;) \ + { \ + int i, n = (int)(right - left) + 1, m; \ + T* ptr; \ + T* ptr2; \ + \ + if( n <= isort_thresh ) \ + { \ + insert_sort: \ + for( ptr = left + 1; ptr <= right; ptr++ ) \ + { \ + for( ptr2 = ptr; ptr2 > left && LT(ptr2[0],ptr2[-1]); ptr2--) \ + CV_SWAP( ptr2[0], ptr2[-1], t ); \ + } \ + break; \ + } \ + else \ + { \ + T* left0; \ + T* left1; \ + T* right0; \ + T* right1; \ + T* pivot; \ + T* a; \ + T* b; \ + T* c; \ + int swap_cnt = 0; \ + \ + left0 = left; \ + right0 = right; \ + pivot = left + (n/2); \ + \ + if( n > 40 ) \ + { \ + int d = n / 8; \ + a = left, b = left + d, c = left + 2*d; \ + left = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \ + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \ + \ + a = pivot - d, b = pivot, c = pivot + d; \ + pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \ + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \ + \ + a = right - 2*d, b = right - d, c = right; \ + right = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \ + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \ + } \ + \ + a = left, b = pivot, c = right; \ + pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \ + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \ + if( pivot != left0 ) \ + { \ + CV_SWAP( *pivot, *left0, t ); \ + pivot = left0; \ + } \ + left = left1 = left0 + 1; \ + right = right1 = right0; \ + \ + for(;;) \ + { \ + while( left <= right && !LT(*pivot, *left) ) \ + { \ + if( !LT(*left, *pivot) ) \ + { \ + if( left > left1 ) \ + CV_SWAP( *left1, *left, t ); \ + swap_cnt = 1; \ + left1++; \ + } \ + left++; \ + } \ + \ + while( left <= right && !LT(*right, *pivot) ) \ + { \ + if( !LT(*pivot, *right) ) \ + { \ + if( right < right1 ) \ + CV_SWAP( *right1, *right, t ); \ + swap_cnt = 1; \ + right1--; \ + } \ + right--; \ + } \ + \ + if( left > right ) \ + break; \ + CV_SWAP( *left, *right, t ); \ + swap_cnt = 1; \ + left++; \ + right--; \ + } \ + \ + if( swap_cnt == 0 ) \ + { \ + left = left0, right = right0; \ + goto insert_sort; \ + } \ + \ + n = MIN( (int)(left1 - left0), (int)(left - left1) ); \ + for( i = 0; i < n; i++ ) \ + CV_SWAP( left0[i], left[i-n], t ); \ + \ + n = MIN( (int)(right0 - right1), (int)(right1 - right) ); \ + for( i = 0; i < n; i++ ) \ + CV_SWAP( left[i], right0[i-n+1], t ); \ + n = (int)(left - left1); \ + m = (int)(right1 - right); \ + if( n > 1 ) \ + { \ + if( m > 1 ) \ + { \ + if( n > m ) \ + { \ + stack[++sp].lb = left0; \ + stack[sp].ub = left0 + n - 1; \ + left = right0 - m + 1, right = right0; \ + } \ + else \ + { \ + stack[++sp].lb = right0 - m + 1; \ + stack[sp].ub = right0; \ + left = left0, right = left0 + n - 1; \ + } \ + } \ + else \ + left = left0, right = left0 + n - 1; \ + } \ + else if( m > 1 ) \ + left = right0 - m + 1, right = right0; \ + else \ + break; \ + } \ + } \ + } \ +} + +#define CV_IMPLEMENT_QSORT( func_name, T, cmp ) \ + CV_IMPLEMENT_QSORT_EX( func_name, T, cmp, int ) + +/****************************************************************************************\ +* Structures and macros for integration with IPP * +\****************************************************************************************/ + +/* IPP-compatible return codes */ +typedef enum CvStatus +{ + CV_BADMEMBLOCK_ERR = -113, + CV_INPLACE_NOT_SUPPORTED_ERR= -112, + CV_UNMATCHED_ROI_ERR = -111, + CV_NOTFOUND_ERR = -110, + CV_BADCONVERGENCE_ERR = -109, + + CV_BADDEPTH_ERR = -107, + CV_BADROI_ERR = -106, + CV_BADHEADER_ERR = -105, + CV_UNMATCHED_FORMATS_ERR = -104, + CV_UNSUPPORTED_COI_ERR = -103, + CV_UNSUPPORTED_CHANNELS_ERR = -102, + CV_UNSUPPORTED_DEPTH_ERR = -101, + CV_UNSUPPORTED_FORMAT_ERR = -100, + + CV_BADARG_ERR = -49, //ipp comp + CV_NOTDEFINED_ERR = -48, //ipp comp + + CV_BADCHANNELS_ERR = -47, //ipp comp + CV_BADRANGE_ERR = -44, //ipp comp + CV_BADSTEP_ERR = -29, //ipp comp + + CV_BADFLAG_ERR = -12, + CV_DIV_BY_ZERO_ERR = -11, //ipp comp + CV_BADCOEF_ERR = -10, + + CV_BADFACTOR_ERR = -7, + CV_BADPOINT_ERR = -6, + CV_BADSCALE_ERR = -4, + CV_OUTOFMEM_ERR = -3, + CV_NULLPTR_ERR = -2, + CV_BADSIZE_ERR = -1, + CV_NO_ERR = 0, + CV_OK = CV_NO_ERR +} +CvStatus; + +#define CV_NOTHROW throw() + +typedef struct CvFuncTable +{ + void* fn_2d[CV_DEPTH_MAX]; +} +CvFuncTable; + +typedef struct CvBigFuncTable +{ + void* fn_2d[CV_DEPTH_MAX*4]; +} CvBigFuncTable; + +#define CV_INIT_FUNC_TAB( tab, FUNCNAME, FLAG ) \ + (tab).fn_2d[CV_8U] = (void*)FUNCNAME##_8u##FLAG; \ + (tab).fn_2d[CV_8S] = 0; \ + (tab).fn_2d[CV_16U] = (void*)FUNCNAME##_16u##FLAG; \ + (tab).fn_2d[CV_16S] = (void*)FUNCNAME##_16s##FLAG; \ + (tab).fn_2d[CV_32S] = (void*)FUNCNAME##_32s##FLAG; \ + (tab).fn_2d[CV_32F] = (void*)FUNCNAME##_32f##FLAG; \ + (tab).fn_2d[CV_64F] = (void*)FUNCNAME##_64f##FLAG + +#ifdef __cplusplus + +// < Deprecated + +class CV_EXPORTS CvOpenGlFuncTab +{ +public: + virtual ~CvOpenGlFuncTab(); + + virtual void genBuffers(int n, unsigned int* buffers) const = 0; + virtual void deleteBuffers(int n, const unsigned int* buffers) const = 0; + + virtual void bufferData(unsigned int target, ptrdiff_t size, const void* data, unsigned int usage) const = 0; + virtual void bufferSubData(unsigned int target, ptrdiff_t offset, ptrdiff_t size, const void* data) const = 0; + + virtual void bindBuffer(unsigned int target, unsigned int buffer) const = 0; + + virtual void* mapBuffer(unsigned int target, unsigned int access) const = 0; + virtual void unmapBuffer(unsigned int target) const = 0; + + virtual void generateBitmapFont(const std::string& family, int height, int weight, bool italic, bool underline, int start, int count, int base) const = 0; + + virtual bool isGlContextInitialized() const = 0; +}; + +CV_EXPORTS void icvSetOpenGlFuncTab(const CvOpenGlFuncTab* tab); + +CV_EXPORTS bool icvCheckGlError(const char* file, const int line, const char* func = ""); + +// > + +namespace cv { namespace ogl { +CV_EXPORTS bool checkError(const char* file, const int line, const char* func = ""); +}} + +#define CV_CheckGlError() CV_DbgAssert( (cv::ogl::checkError(__FILE__, __LINE__, CV_Func)) ) + +#endif //__cplusplus + +#endif // __OPENCV_CORE_INTERNAL_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/mat.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/mat.hpp new file mode 100644 index 0000000..631c698 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/mat.hpp @@ -0,0 +1,2625 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_MATRIX_OPERATIONS_HPP__ +#define __OPENCV_CORE_MATRIX_OPERATIONS_HPP__ + +#ifndef SKIP_INCLUDES +#include +#include +#endif // SKIP_INCLUDES + +#ifdef __cplusplus + +namespace cv +{ + +//////////////////////////////// Mat //////////////////////////////// + +inline void Mat::initEmpty() +{ + flags = MAGIC_VAL; + dims = rows = cols = 0; + data = datastart = dataend = datalimit = 0; + refcount = 0; + allocator = 0; +} + +inline Mat::Mat() : size(&rows) +{ + initEmpty(); +} + +inline Mat::Mat(int _rows, int _cols, int _type) : size(&rows) +{ + initEmpty(); + create(_rows, _cols, _type); +} + +inline Mat::Mat(int _rows, int _cols, int _type, const Scalar& _s) : size(&rows) +{ + initEmpty(); + create(_rows, _cols, _type); + *this = _s; +} + +inline Mat::Mat(Size _sz, int _type) : size(&rows) +{ + initEmpty(); + create( _sz.height, _sz.width, _type ); +} + +inline Mat::Mat(Size _sz, int _type, const Scalar& _s) : size(&rows) +{ + initEmpty(); + create(_sz.height, _sz.width, _type); + *this = _s; +} + +inline Mat::Mat(int _dims, const int* _sz, int _type) : size(&rows) +{ + initEmpty(); + create(_dims, _sz, _type); +} + +inline Mat::Mat(int _dims, const int* _sz, int _type, const Scalar& _s) : size(&rows) +{ + initEmpty(); + create(_dims, _sz, _type); + *this = _s; +} + +inline Mat::Mat(const Mat& m) + : flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), data(m.data), + refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), + datalimit(m.datalimit), allocator(m.allocator), size(&rows) +{ + if( refcount ) + CV_XADD(refcount, 1); + if( m.dims <= 2 ) + { + step[0] = m.step[0]; step[1] = m.step[1]; + } + else + { + dims = 0; + copySize(m); + } +} + +inline Mat::Mat(int _rows, int _cols, int _type, void* _data, size_t _step) + : flags(MAGIC_VAL + (_type & TYPE_MASK)), dims(2), rows(_rows), cols(_cols), + data((uchar*)_data), refcount(0), datastart((uchar*)_data), dataend(0), + datalimit(0), allocator(0), size(&rows) +{ + size_t esz = CV_ELEM_SIZE(_type), minstep = cols*esz; + if( _step == AUTO_STEP ) + { + _step = minstep; + flags |= CONTINUOUS_FLAG; + } + else + { + if( rows == 1 ) _step = minstep; + CV_DbgAssert( _step >= minstep ); + flags |= _step == minstep ? CONTINUOUS_FLAG : 0; + } + step[0] = _step; step[1] = esz; + datalimit = datastart + _step*rows; + dataend = datalimit - _step + minstep; +} + +inline Mat::Mat(Size _sz, int _type, void* _data, size_t _step) + : flags(MAGIC_VAL + (_type & TYPE_MASK)), dims(2), rows(_sz.height), cols(_sz.width), + data((uchar*)_data), refcount(0), datastart((uchar*)_data), dataend(0), + datalimit(0), allocator(0), size(&rows) +{ + size_t esz = CV_ELEM_SIZE(_type), minstep = cols*esz; + if( _step == AUTO_STEP ) + { + _step = minstep; + flags |= CONTINUOUS_FLAG; + } + else + { + if( rows == 1 ) _step = minstep; + CV_DbgAssert( _step >= minstep ); + flags |= _step == minstep ? CONTINUOUS_FLAG : 0; + } + step[0] = _step; step[1] = esz; + datalimit = datastart + _step*rows; + dataend = datalimit - _step + minstep; +} + + +template inline Mat::Mat(const vector<_Tp>& vec, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows((int)vec.size()), cols(1), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if(vec.empty()) + return; + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + data = datastart = (uchar*)&vec[0]; + datalimit = dataend = datastart + rows*step[0]; + } + else + Mat((int)vec.size(), 1, DataType<_Tp>::type, (uchar*)&vec[0]).copyTo(*this); +} + + +template inline Mat::Mat(const Vec<_Tp, n>& vec, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows(n), cols(1), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + data = datastart = (uchar*)vec.val; + datalimit = dataend = datastart + rows*step[0]; + } + else + Mat(n, 1, DataType<_Tp>::type, (void*)vec.val).copyTo(*this); +} + + +template inline Mat::Mat(const Matx<_Tp,m,n>& M, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows(m), cols(n), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if( !copyData ) + { + step[0] = cols*sizeof(_Tp); + step[1] = sizeof(_Tp); + data = datastart = (uchar*)M.val; + datalimit = dataend = datastart + rows*step[0]; + } + else + Mat(m, n, DataType<_Tp>::type, (uchar*)M.val).copyTo(*this); +} + + +template inline Mat::Mat(const Point_<_Tp>& pt, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows(2), cols(1), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + data = datastart = (uchar*)&pt.x; + datalimit = dataend = datastart + rows*step[0]; + } + else + { + create(2, 1, DataType<_Tp>::type); + ((_Tp*)data)[0] = pt.x; + ((_Tp*)data)[1] = pt.y; + } +} + + +template inline Mat::Mat(const Point3_<_Tp>& pt, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows(3), cols(1), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + data = datastart = (uchar*)&pt.x; + datalimit = dataend = datastart + rows*step[0]; + } + else + { + create(3, 1, DataType<_Tp>::type); + ((_Tp*)data)[0] = pt.x; + ((_Tp*)data)[1] = pt.y; + ((_Tp*)data)[2] = pt.z; + } +} + + +template inline Mat::Mat(const MatCommaInitializer_<_Tp>& commaInitializer) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + *this = *commaInitializer; +} + +inline Mat::~Mat() +{ + release(); + if( step.p != step.buf ) + fastFree(step.p); +} + +inline Mat& Mat::operator = (const Mat& m) +{ + if( this != &m ) + { + if( m.refcount ) + CV_XADD(m.refcount, 1); + release(); + flags = m.flags; + if( dims <= 2 && m.dims <= 2 ) + { + dims = m.dims; + rows = m.rows; + cols = m.cols; + step[0] = m.step[0]; + step[1] = m.step[1]; + } + else + copySize(m); + data = m.data; + datastart = m.datastart; + dataend = m.dataend; + datalimit = m.datalimit; + refcount = m.refcount; + allocator = m.allocator; + } + return *this; +} + +inline Mat Mat::row(int y) const { return Mat(*this, Range(y, y+1), Range::all()); } +inline Mat Mat::col(int x) const { return Mat(*this, Range::all(), Range(x, x+1)); } +inline Mat Mat::rowRange(int startrow, int endrow) const + { return Mat(*this, Range(startrow, endrow), Range::all()); } +inline Mat Mat::rowRange(const Range& r) const + { return Mat(*this, r, Range::all()); } +inline Mat Mat::colRange(int startcol, int endcol) const + { return Mat(*this, Range::all(), Range(startcol, endcol)); } +inline Mat Mat::colRange(const Range& r) const + { return Mat(*this, Range::all(), r); } + +inline Mat Mat::diag(const Mat& d) +{ + CV_Assert( d.cols == 1 || d.rows == 1 ); + int len = d.rows + d.cols - 1; + Mat m(len, len, d.type(), Scalar(0)), md = m.diag(); + if( d.cols == 1 ) + d.copyTo(md); + else + transpose(d, md); + return m; +} + +inline Mat Mat::clone() const +{ + Mat m; + copyTo(m); + return m; +} + +inline void Mat::assignTo( Mat& m, int _type ) const +{ + if( _type < 0 ) + m = *this; + else + convertTo(m, _type); +} + +inline void Mat::create(int _rows, int _cols, int _type) +{ + _type &= TYPE_MASK; + if( dims <= 2 && rows == _rows && cols == _cols && type() == _type && data ) + return; + int sz[] = {_rows, _cols}; + create(2, sz, _type); +} + +inline void Mat::create(Size _sz, int _type) +{ + create(_sz.height, _sz.width, _type); +} + +inline void Mat::addref() +{ if( refcount ) CV_XADD(refcount, 1); } + +inline void Mat::release() +{ + if( refcount && CV_XADD(refcount, -1) == 1 ) + deallocate(); + data = datastart = dataend = datalimit = 0; + for(int i = 0; i < dims; i++) + size.p[i] = 0; + refcount = 0; +} + +inline Mat Mat::operator()( Range _rowRange, Range _colRange ) const +{ + return Mat(*this, _rowRange, _colRange); +} + +inline Mat Mat::operator()( const Rect& roi ) const +{ return Mat(*this, roi); } + +inline Mat Mat::operator()(const Range* ranges) const +{ + return Mat(*this, ranges); +} + +inline Mat::operator CvMat() const +{ + CV_DbgAssert(dims <= 2); + CvMat m = cvMat(rows, dims == 1 ? 1 : cols, type(), data); + m.step = (int)step[0]; + m.type = (m.type & ~CONTINUOUS_FLAG) | (flags & CONTINUOUS_FLAG); + return m; +} + +inline bool Mat::isContinuous() const { return (flags & CONTINUOUS_FLAG) != 0; } +inline bool Mat::isSubmatrix() const { return (flags & SUBMATRIX_FLAG) != 0; } +inline size_t Mat::elemSize() const { return dims > 0 ? step.p[dims-1] : 0; } +inline size_t Mat::elemSize1() const { return CV_ELEM_SIZE1(flags); } +inline int Mat::type() const { return CV_MAT_TYPE(flags); } +inline int Mat::depth() const { return CV_MAT_DEPTH(flags); } +inline int Mat::channels() const { return CV_MAT_CN(flags); } +inline size_t Mat::step1(int i) const { return step.p[i]/elemSize1(); } +inline bool Mat::empty() const { return data == 0 || total() == 0; } +inline size_t Mat::total() const +{ + if( dims <= 2 ) + return (size_t)rows*cols; + size_t p = 1; + for( int i = 0; i < dims; i++ ) + p *= size[i]; + return p; +} + +inline uchar* Mat::ptr(int y) +{ + CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) ); + return data + step.p[0]*y; +} + +inline const uchar* Mat::ptr(int y) const +{ + CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) ); + return data + step.p[0]*y; +} + +template inline _Tp* Mat::ptr(int y) +{ + CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) ); + return (_Tp*)(data + step.p[0]*y); +} + +template inline const _Tp* Mat::ptr(int y) const +{ + CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) ); + return (const _Tp*)(data + step.p[0]*y); +} + + +inline uchar* Mat::ptr(int i0, int i1) +{ + CV_DbgAssert( dims >= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] ); + return data + i0*step.p[0] + i1*step.p[1]; +} + +inline const uchar* Mat::ptr(int i0, int i1) const +{ + CV_DbgAssert( dims >= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] ); + return data + i0*step.p[0] + i1*step.p[1]; +} + +template inline _Tp* Mat::ptr(int i0, int i1) +{ + CV_DbgAssert( dims >= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] ); + return (_Tp*)(data + i0*step.p[0] + i1*step.p[1]); +} + +template inline const _Tp* Mat::ptr(int i0, int i1) const +{ + CV_DbgAssert( dims >= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] ); + return (const _Tp*)(data + i0*step.p[0] + i1*step.p[1]); +} + +inline uchar* Mat::ptr(int i0, int i1, int i2) +{ + CV_DbgAssert( dims >= 3 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + (unsigned)i2 < (unsigned)size.p[2] ); + return data + i0*step.p[0] + i1*step.p[1] + i2*step.p[2]; +} + +inline const uchar* Mat::ptr(int i0, int i1, int i2) const +{ + CV_DbgAssert( dims >= 3 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + (unsigned)i2 < (unsigned)size.p[2] ); + return data + i0*step.p[0] + i1*step.p[1] + i2*step.p[2]; +} + +template inline _Tp* Mat::ptr(int i0, int i1, int i2) +{ + CV_DbgAssert( dims >= 3 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + (unsigned)i2 < (unsigned)size.p[2] ); + return (_Tp*)(data + i0*step.p[0] + i1*step.p[1] + i2*step.p[2]); +} + +template inline const _Tp* Mat::ptr(int i0, int i1, int i2) const +{ + CV_DbgAssert( dims >= 3 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + (unsigned)i2 < (unsigned)size.p[2] ); + return (const _Tp*)(data + i0*step.p[0] + i1*step.p[1] + i2*step.p[2]); +} + +inline uchar* Mat::ptr(const int* idx) +{ + int i, d = dims; + uchar* p = data; + CV_DbgAssert( d >= 1 && p ); + for( i = 0; i < d; i++ ) + { + CV_DbgAssert( (unsigned)idx[i] < (unsigned)size.p[i] ); + p += idx[i]*step.p[i]; + } + return p; +} + +inline const uchar* Mat::ptr(const int* idx) const +{ + int i, d = dims; + uchar* p = data; + CV_DbgAssert( d >= 1 && p ); + for( i = 0; i < d; i++ ) + { + CV_DbgAssert( (unsigned)idx[i] < (unsigned)size.p[i] ); + p += idx[i]*step.p[i]; + } + return p; +} + +template inline _Tp& Mat::at(int i0, int i1) +{ + CV_DbgAssert( dims <= 2 && data && (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)(i1*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) && + CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1()); + return ((_Tp*)(data + step.p[0]*i0))[i1]; +} + +template inline const _Tp& Mat::at(int i0, int i1) const +{ + CV_DbgAssert( dims <= 2 && data && (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)(i1*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) && + CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1()); + return ((const _Tp*)(data + step.p[0]*i0))[i1]; +} + +template inline _Tp& Mat::at(Point pt) +{ + CV_DbgAssert( dims <= 2 && data && (unsigned)pt.y < (unsigned)size.p[0] && + (unsigned)(pt.x*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) && + CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1()); + return ((_Tp*)(data + step.p[0]*pt.y))[pt.x]; +} + +template inline const _Tp& Mat::at(Point pt) const +{ + CV_DbgAssert( dims <= 2 && data && (unsigned)pt.y < (unsigned)size.p[0] && + (unsigned)(pt.x*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) && + CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1()); + return ((const _Tp*)(data + step.p[0]*pt.y))[pt.x]; +} + +template inline _Tp& Mat::at(int i0) +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)i0 < (unsigned)(size.p[0]*size.p[1]) && + elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + if( isContinuous() || size.p[0] == 1 ) + return ((_Tp*)data)[i0]; + if( size.p[1] == 1 ) + return *(_Tp*)(data + step.p[0]*i0); + int i = i0/cols, j = i0 - i*cols; + return ((_Tp*)(data + step.p[0]*i))[j]; +} + +template inline const _Tp& Mat::at(int i0) const +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)i0 < (unsigned)(size.p[0]*size.p[1]) && + elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + if( isContinuous() || size.p[0] == 1 ) + return ((const _Tp*)data)[i0]; + if( size.p[1] == 1 ) + return *(const _Tp*)(data + step.p[0]*i0); + int i = i0/cols, j = i0 - i*cols; + return ((const _Tp*)(data + step.p[0]*i))[j]; +} + +template inline _Tp& Mat::at(int i0, int i1, int i2) +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(_Tp*)ptr(i0, i1, i2); +} +template inline const _Tp& Mat::at(int i0, int i1, int i2) const +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(const _Tp*)ptr(i0, i1, i2); +} +template inline _Tp& Mat::at(const int* idx) +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(_Tp*)ptr(idx); +} +template inline const _Tp& Mat::at(const int* idx) const +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(const _Tp*)ptr(idx); +} +template _Tp& Mat::at(const Vec& idx) +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(_Tp*)ptr(idx.val); +} +template inline const _Tp& Mat::at(const Vec& idx) const +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(const _Tp*)ptr(idx.val); +} + + +template inline MatConstIterator_<_Tp> Mat::begin() const +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + return MatConstIterator_<_Tp>((const Mat_<_Tp>*)this); +} + +template inline MatConstIterator_<_Tp> Mat::end() const +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + MatConstIterator_<_Tp> it((const Mat_<_Tp>*)this); + it += total(); + return it; +} + +template inline MatIterator_<_Tp> Mat::begin() +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + return MatIterator_<_Tp>((Mat_<_Tp>*)this); +} + +template inline MatIterator_<_Tp> Mat::end() +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + MatIterator_<_Tp> it((Mat_<_Tp>*)this); + it += total(); + return it; +} + +template inline Mat::operator vector<_Tp>() const +{ + vector<_Tp> v; + copyTo(v); + return v; +} + +template inline Mat::operator Vec<_Tp, n>() const +{ + CV_Assert( data && dims <= 2 && (rows == 1 || cols == 1) && + rows + cols - 1 == n && channels() == 1 ); + + if( isContinuous() && type() == DataType<_Tp>::type ) + return Vec<_Tp, n>((_Tp*)data); + Vec<_Tp, n> v; Mat tmp(rows, cols, DataType<_Tp>::type, v.val); + convertTo(tmp, tmp.type()); + return v; +} + +template inline Mat::operator Matx<_Tp, m, n>() const +{ + CV_Assert( data && dims <= 2 && rows == m && cols == n && channels() == 1 ); + + if( isContinuous() && type() == DataType<_Tp>::type ) + return Matx<_Tp, m, n>((_Tp*)data); + Matx<_Tp, m, n> mtx; Mat tmp(rows, cols, DataType<_Tp>::type, mtx.val); + convertTo(tmp, tmp.type()); + return mtx; +} + + +template inline void Mat::push_back(const _Tp& elem) +{ + if( !data ) + { + CV_Assert((type()==0) || (DataType<_Tp>::type == type())); + + *this = Mat(1, 1, DataType<_Tp>::type, (void*)&elem).clone(); + return; + } + CV_Assert(DataType<_Tp>::type == type() && cols == 1 + /* && dims == 2 (cols == 1 implies dims == 2) */); + uchar* tmp = dataend + step[0]; + if( !isSubmatrix() && isContinuous() && tmp <= datalimit ) + { + *(_Tp*)(data + (size.p[0]++)*step.p[0]) = elem; + dataend = tmp; + } + else + push_back_(&elem); +} + +template inline void Mat::push_back(const Mat_<_Tp>& m) +{ + push_back((const Mat&)m); +} + +inline Mat::MSize::MSize(int* _p) : p(_p) {} +inline Size Mat::MSize::operator()() const +{ + CV_DbgAssert(p[-1] <= 2); + return Size(p[1], p[0]); +} +inline const int& Mat::MSize::operator[](int i) const { return p[i]; } +inline int& Mat::MSize::operator[](int i) { return p[i]; } +inline Mat::MSize::operator const int*() const { return p; } + +inline bool Mat::MSize::operator == (const MSize& sz) const +{ + int d = p[-1], dsz = sz.p[-1]; + if( d != dsz ) + return false; + if( d == 2 ) + return p[0] == sz.p[0] && p[1] == sz.p[1]; + + for( int i = 0; i < d; i++ ) + if( p[i] != sz.p[i] ) + return false; + return true; +} + +inline bool Mat::MSize::operator != (const MSize& sz) const +{ + return !(*this == sz); +} + +inline Mat::MStep::MStep() { p = buf; p[0] = p[1] = 0; } +inline Mat::MStep::MStep(size_t s) { p = buf; p[0] = s; p[1] = 0; } +inline const size_t& Mat::MStep::operator[](int i) const { return p[i]; } +inline size_t& Mat::MStep::operator[](int i) { return p[i]; } +inline Mat::MStep::operator size_t() const +{ + CV_DbgAssert( p == buf ); + return buf[0]; +} +inline Mat::MStep& Mat::MStep::operator = (size_t s) +{ + CV_DbgAssert( p == buf ); + buf[0] = s; + return *this; +} + +static inline Mat cvarrToMatND(const CvArr* arr, bool copyData=false, int coiMode=0) +{ + return cvarrToMat(arr, copyData, true, coiMode); +} + +///////////////////////////////////////////// SVD ////////////////////////////////////////////////////// + +inline SVD::SVD() {} +inline SVD::SVD( InputArray m, int flags ) { operator ()(m, flags); } +inline void SVD::solveZ( InputArray m, OutputArray _dst ) +{ + Mat mtx = m.getMat(); + SVD svd(mtx, (mtx.rows >= mtx.cols ? 0 : SVD::FULL_UV)); + _dst.create(svd.vt.cols, 1, svd.vt.type()); + Mat dst = _dst.getMat(); + svd.vt.row(svd.vt.rows-1).reshape(1,svd.vt.cols).copyTo(dst); +} + +template inline void + SVD::compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w, Matx<_Tp, m, nm>& u, Matx<_Tp, n, nm>& vt ) +{ + assert( nm == MIN(m, n)); + Mat _a(a, false), _u(u, false), _w(w, false), _vt(vt, false); + SVD::compute(_a, _w, _u, _vt); + CV_Assert(_w.data == (uchar*)&w.val[0] && _u.data == (uchar*)&u.val[0] && _vt.data == (uchar*)&vt.val[0]); +} + +template inline void +SVD::compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w ) +{ + assert( nm == MIN(m, n)); + Mat _a(a, false), _w(w, false); + SVD::compute(_a, _w); + CV_Assert(_w.data == (uchar*)&w.val[0]); +} + +template inline void +SVD::backSubst( const Matx<_Tp, nm, 1>& w, const Matx<_Tp, m, nm>& u, + const Matx<_Tp, n, nm>& vt, const Matx<_Tp, m, nb>& rhs, + Matx<_Tp, n, nb>& dst ) +{ + assert( nm == MIN(m, n)); + Mat _u(u, false), _w(w, false), _vt(vt, false), _rhs(rhs, false), _dst(dst, false); + SVD::backSubst(_w, _u, _vt, _rhs, _dst); + CV_Assert(_dst.data == (uchar*)&dst.val[0]); +} + +///////////////////////////////// Mat_<_Tp> //////////////////////////////////// + +template inline Mat_<_Tp>::Mat_() + : Mat() { flags = (flags & ~CV_MAT_TYPE_MASK) | DataType<_Tp>::type; } + +template inline Mat_<_Tp>::Mat_(int _rows, int _cols) + : Mat(_rows, _cols, DataType<_Tp>::type) {} + +template inline Mat_<_Tp>::Mat_(int _rows, int _cols, const _Tp& value) + : Mat(_rows, _cols, DataType<_Tp>::type) { *this = value; } + +template inline Mat_<_Tp>::Mat_(Size _sz) + : Mat(_sz.height, _sz.width, DataType<_Tp>::type) {} + +template inline Mat_<_Tp>::Mat_(Size _sz, const _Tp& value) + : Mat(_sz.height, _sz.width, DataType<_Tp>::type) { *this = value; } + +template inline Mat_<_Tp>::Mat_(int _dims, const int* _sz) + : Mat(_dims, _sz, DataType<_Tp>::type) {} + +template inline Mat_<_Tp>::Mat_(int _dims, const int* _sz, const _Tp& _s) + : Mat(_dims, _sz, DataType<_Tp>::type, Scalar(_s)) {} + +template inline Mat_<_Tp>::Mat_(int _dims, const int* _sz, _Tp* _data, const size_t* _steps) + : Mat(_dims, _sz, DataType<_Tp>::type, _data, _steps) {} + +template inline Mat_<_Tp>::Mat_(const Mat_<_Tp>& m, const Range* ranges) + : Mat(m, ranges) {} + +template inline Mat_<_Tp>::Mat_(const Mat& m) + : Mat() { flags = (flags & ~CV_MAT_TYPE_MASK) | DataType<_Tp>::type; *this = m; } + +template inline Mat_<_Tp>::Mat_(const Mat_& m) + : Mat(m) {} + +template inline Mat_<_Tp>::Mat_(int _rows, int _cols, _Tp* _data, size_t steps) + : Mat(_rows, _cols, DataType<_Tp>::type, _data, steps) {} + +template inline Mat_<_Tp>::Mat_(const Mat_& m, const Range& _rowRange, const Range& _colRange) + : Mat(m, _rowRange, _colRange) {} + +template inline Mat_<_Tp>::Mat_(const Mat_& m, const Rect& roi) + : Mat(m, roi) {} + +template template inline + Mat_<_Tp>::Mat_(const Vec::channel_type, n>& vec, bool copyData) + : Mat(n/DataType<_Tp>::channels, 1, DataType<_Tp>::type, (void*)&vec) +{ + CV_Assert(n%DataType<_Tp>::channels == 0); + if( copyData ) + *this = clone(); +} + +template template inline + Mat_<_Tp>::Mat_(const Matx::channel_type,m,n>& M, bool copyData) + : Mat(m, n/DataType<_Tp>::channels, DataType<_Tp>::type, (void*)&M) +{ + CV_Assert(n % DataType<_Tp>::channels == 0); + if( copyData ) + *this = clone(); +} + +template inline Mat_<_Tp>::Mat_(const Point_::channel_type>& pt, bool copyData) + : Mat(2/DataType<_Tp>::channels, 1, DataType<_Tp>::type, (void*)&pt) +{ + CV_Assert(2 % DataType<_Tp>::channels == 0); + if( copyData ) + *this = clone(); +} + +template inline Mat_<_Tp>::Mat_(const Point3_::channel_type>& pt, bool copyData) + : Mat(3/DataType<_Tp>::channels, 1, DataType<_Tp>::type, (void*)&pt) +{ + CV_Assert(3 % DataType<_Tp>::channels == 0); + if( copyData ) + *this = clone(); +} + +template inline Mat_<_Tp>::Mat_(const MatCommaInitializer_<_Tp>& commaInitializer) + : Mat(commaInitializer) {} + +template inline Mat_<_Tp>::Mat_(const vector<_Tp>& vec, bool copyData) + : Mat(vec, copyData) {} + +template inline Mat_<_Tp>& Mat_<_Tp>::operator = (const Mat& m) +{ + if( DataType<_Tp>::type == m.type() ) + { + Mat::operator = (m); + return *this; + } + if( DataType<_Tp>::depth == m.depth() ) + { + return (*this = m.reshape(DataType<_Tp>::channels, m.dims, 0)); + } + CV_DbgAssert(DataType<_Tp>::channels == m.channels()); + m.convertTo(*this, type()); + return *this; +} + +template inline Mat_<_Tp>& Mat_<_Tp>::operator = (const Mat_& m) +{ + Mat::operator=(m); + return *this; +} + +template inline Mat_<_Tp>& Mat_<_Tp>::operator = (const _Tp& s) +{ + typedef typename DataType<_Tp>::vec_type VT; + Mat::operator=(Scalar((const VT&)s)); + return *this; +} + +template inline void Mat_<_Tp>::create(int _rows, int _cols) +{ + Mat::create(_rows, _cols, DataType<_Tp>::type); +} + +template inline void Mat_<_Tp>::create(Size _sz) +{ + Mat::create(_sz, DataType<_Tp>::type); +} + +template inline void Mat_<_Tp>::create(int _dims, const int* _sz) +{ + Mat::create(_dims, _sz, DataType<_Tp>::type); +} + + +template inline Mat_<_Tp> Mat_<_Tp>::cross(const Mat_& m) const +{ return Mat_<_Tp>(Mat::cross(m)); } + +template template inline Mat_<_Tp>::operator Mat_() const +{ return Mat_(*this); } + +template inline Mat_<_Tp> Mat_<_Tp>::row(int y) const +{ return Mat_(*this, Range(y, y+1), Range::all()); } +template inline Mat_<_Tp> Mat_<_Tp>::col(int x) const +{ return Mat_(*this, Range::all(), Range(x, x+1)); } +template inline Mat_<_Tp> Mat_<_Tp>::diag(int d) const +{ return Mat_(Mat::diag(d)); } +template inline Mat_<_Tp> Mat_<_Tp>::clone() const +{ return Mat_(Mat::clone()); } + +template inline size_t Mat_<_Tp>::elemSize() const +{ + CV_DbgAssert( Mat::elemSize() == sizeof(_Tp) ); + return sizeof(_Tp); +} + +template inline size_t Mat_<_Tp>::elemSize1() const +{ + CV_DbgAssert( Mat::elemSize1() == sizeof(_Tp)/DataType<_Tp>::channels ); + return sizeof(_Tp)/DataType<_Tp>::channels; +} +template inline int Mat_<_Tp>::type() const +{ + CV_DbgAssert( Mat::type() == DataType<_Tp>::type ); + return DataType<_Tp>::type; +} +template inline int Mat_<_Tp>::depth() const +{ + CV_DbgAssert( Mat::depth() == DataType<_Tp>::depth ); + return DataType<_Tp>::depth; +} +template inline int Mat_<_Tp>::channels() const +{ + CV_DbgAssert( Mat::channels() == DataType<_Tp>::channels ); + return DataType<_Tp>::channels; +} +template inline size_t Mat_<_Tp>::stepT(int i) const { return step.p[i]/elemSize(); } +template inline size_t Mat_<_Tp>::step1(int i) const { return step.p[i]/elemSize1(); } + +template inline Mat_<_Tp>& Mat_<_Tp>::adjustROI( int dtop, int dbottom, int dleft, int dright ) +{ return (Mat_<_Tp>&)(Mat::adjustROI(dtop, dbottom, dleft, dright)); } + +template inline Mat_<_Tp> Mat_<_Tp>::operator()( const Range& _rowRange, const Range& _colRange ) const +{ return Mat_<_Tp>(*this, _rowRange, _colRange); } + +template inline Mat_<_Tp> Mat_<_Tp>::operator()( const Rect& roi ) const +{ return Mat_<_Tp>(*this, roi); } + +template inline Mat_<_Tp> Mat_<_Tp>::operator()( const Range* ranges ) const +{ return Mat_<_Tp>(*this, ranges); } + +template inline _Tp* Mat_<_Tp>::operator [](int y) +{ return (_Tp*)ptr(y); } +template inline const _Tp* Mat_<_Tp>::operator [](int y) const +{ return (const _Tp*)ptr(y); } + +template inline _Tp& Mat_<_Tp>::operator ()(int i0, int i1) +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + type() == DataType<_Tp>::type ); + return ((_Tp*)(data + step.p[0]*i0))[i1]; +} + +template inline const _Tp& Mat_<_Tp>::operator ()(int i0, int i1) const +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + type() == DataType<_Tp>::type ); + return ((const _Tp*)(data + step.p[0]*i0))[i1]; +} + +template inline _Tp& Mat_<_Tp>::operator ()(Point pt) +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)pt.y < (unsigned)size.p[0] && + (unsigned)pt.x < (unsigned)size.p[1] && + type() == DataType<_Tp>::type ); + return ((_Tp*)(data + step.p[0]*pt.y))[pt.x]; +} + +template inline const _Tp& Mat_<_Tp>::operator ()(Point pt) const +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)pt.y < (unsigned)size.p[0] && + (unsigned)pt.x < (unsigned)size.p[1] && + type() == DataType<_Tp>::type ); + return ((const _Tp*)(data + step.p[0]*pt.y))[pt.x]; +} + +template inline _Tp& Mat_<_Tp>::operator ()(const int* idx) +{ + return Mat::at<_Tp>(idx); +} + +template inline const _Tp& Mat_<_Tp>::operator ()(const int* idx) const +{ + return Mat::at<_Tp>(idx); +} + +template template inline _Tp& Mat_<_Tp>::operator ()(const Vec& idx) +{ + return Mat::at<_Tp>(idx); +} + +template template inline const _Tp& Mat_<_Tp>::operator ()(const Vec& idx) const +{ + return Mat::at<_Tp>(idx); +} + +template inline _Tp& Mat_<_Tp>::operator ()(int i0) +{ + return this->at<_Tp>(i0); +} + +template inline const _Tp& Mat_<_Tp>::operator ()(int i0) const +{ + return this->at<_Tp>(i0); +} + +template inline _Tp& Mat_<_Tp>::operator ()(int i0, int i1, int i2) +{ + return this->at<_Tp>(i0, i1, i2); +} + +template inline const _Tp& Mat_<_Tp>::operator ()(int i0, int i1, int i2) const +{ + return this->at<_Tp>(i0, i1, i2); +} + + +template inline Mat_<_Tp>::operator vector<_Tp>() const +{ + vector<_Tp> v; + copyTo(v); + return v; +} + +template template inline Mat_<_Tp>::operator Vec::channel_type, n>() const +{ + CV_Assert(n % DataType<_Tp>::channels == 0); + return this->Mat::operator Vec::channel_type, n>(); +} + +template template inline Mat_<_Tp>::operator Matx::channel_type, m, n>() const +{ + CV_Assert(n % DataType<_Tp>::channels == 0); + + Matx::channel_type, m, n> res = this->Mat::operator Matx::channel_type, m, n>(); + return res; +} + +template inline void +process( const Mat_& m1, Mat_& m2, Op op ) +{ + int y, x, rows = m1.rows, cols = m1.cols; + + CV_DbgAssert( m1.size() == m2.size() ); + + for( y = 0; y < rows; y++ ) + { + const T1* src = m1[y]; + T2* dst = m2[y]; + + for( x = 0; x < cols; x++ ) + dst[x] = op(src[x]); + } +} + +template inline void +process( const Mat_& m1, const Mat_& m2, Mat_& m3, Op op ) +{ + int y, x, rows = m1.rows, cols = m1.cols; + + CV_DbgAssert( m1.size() == m2.size() ); + + for( y = 0; y < rows; y++ ) + { + const T1* src1 = m1[y]; + const T2* src2 = m2[y]; + T3* dst = m3[y]; + + for( x = 0; x < cols; x++ ) + dst[x] = op( src1[x], src2[x] ); + } +} + + +/////////////////////////////// Input/Output Arrays ///////////////////////////////// + +template inline _InputArray::_InputArray(const vector<_Tp>& vec) + : flags(FIXED_TYPE + STD_VECTOR + DataType<_Tp>::type), obj((void*)&vec) {} + +template inline _InputArray::_InputArray(const vector >& vec) + : flags(FIXED_TYPE + STD_VECTOR_VECTOR + DataType<_Tp>::type), obj((void*)&vec) {} + +template inline _InputArray::_InputArray(const vector >& vec) + : flags(FIXED_TYPE + STD_VECTOR_MAT + DataType<_Tp>::type), obj((void*)&vec) {} + +template inline _InputArray::_InputArray(const Matx<_Tp, m, n>& mtx) + : flags(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type), obj((void*)&mtx), sz(n, m) {} + +template inline _InputArray::_InputArray(const _Tp* vec, int n) + : flags(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type), obj((void*)vec), sz(n, 1) {} + +inline _InputArray::_InputArray(const Scalar& s) + : flags(FIXED_TYPE + FIXED_SIZE + MATX + CV_64F), obj((void*)&s), sz(1, 4) {} + +template inline _InputArray::_InputArray(const Mat_<_Tp>& m) + : flags(FIXED_TYPE + MAT + DataType<_Tp>::type), obj((void*)&m) {} + +template inline _OutputArray::_OutputArray(vector<_Tp>& vec) + : _InputArray(vec) {} +template inline _OutputArray::_OutputArray(vector >& vec) + : _InputArray(vec) {} +template inline _OutputArray::_OutputArray(vector >& vec) + : _InputArray(vec) {} +template inline _OutputArray::_OutputArray(Mat_<_Tp>& m) + : _InputArray(m) {} +template inline _OutputArray::_OutputArray(Matx<_Tp, m, n>& mtx) + : _InputArray(mtx) {} +template inline _OutputArray::_OutputArray(_Tp* vec, int n) + : _InputArray(vec, n) {} + +template inline _OutputArray::_OutputArray(const vector<_Tp>& vec) + : _InputArray(vec) {flags |= FIXED_SIZE;} +template inline _OutputArray::_OutputArray(const vector >& vec) + : _InputArray(vec) {flags |= FIXED_SIZE;} +template inline _OutputArray::_OutputArray(const vector >& vec) + : _InputArray(vec) {flags |= FIXED_SIZE;} + +template inline _OutputArray::_OutputArray(const Mat_<_Tp>& m) + : _InputArray(m) {flags |= FIXED_SIZE;} +template inline _OutputArray::_OutputArray(const Matx<_Tp, m, n>& mtx) + : _InputArray(mtx) {} +template inline _OutputArray::_OutputArray(const _Tp* vec, int n) + : _InputArray(vec, n) {} + +//////////////////////////////////// Matrix Expressions ///////////////////////////////////////// + +class CV_EXPORTS MatOp +{ +public: + MatOp() {}; + virtual ~MatOp() {}; + + virtual bool elementWise(const MatExpr& expr) const; + virtual void assign(const MatExpr& expr, Mat& m, int type=-1) const = 0; + virtual void roi(const MatExpr& expr, const Range& rowRange, + const Range& colRange, MatExpr& res) const; + virtual void diag(const MatExpr& expr, int d, MatExpr& res) const; + virtual void augAssignAdd(const MatExpr& expr, Mat& m) const; + virtual void augAssignSubtract(const MatExpr& expr, Mat& m) const; + virtual void augAssignMultiply(const MatExpr& expr, Mat& m) const; + virtual void augAssignDivide(const MatExpr& expr, Mat& m) const; + virtual void augAssignAnd(const MatExpr& expr, Mat& m) const; + virtual void augAssignOr(const MatExpr& expr, Mat& m) const; + virtual void augAssignXor(const MatExpr& expr, Mat& m) const; + + virtual void add(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const; + virtual void add(const MatExpr& expr1, const Scalar& s, MatExpr& res) const; + + virtual void subtract(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const; + virtual void subtract(const Scalar& s, const MatExpr& expr, MatExpr& res) const; + + virtual void multiply(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res, double scale=1) const; + virtual void multiply(const MatExpr& expr1, double s, MatExpr& res) const; + + virtual void divide(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res, double scale=1) const; + virtual void divide(double s, const MatExpr& expr, MatExpr& res) const; + + virtual void abs(const MatExpr& expr, MatExpr& res) const; + + virtual void transpose(const MatExpr& expr, MatExpr& res) const; + virtual void matmul(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const; + virtual void invert(const MatExpr& expr, int method, MatExpr& res) const; + + virtual Size size(const MatExpr& expr) const; + virtual int type(const MatExpr& expr) const; +}; + + +class CV_EXPORTS MatExpr +{ +public: + MatExpr() : op(0), flags(0), a(Mat()), b(Mat()), c(Mat()), alpha(0), beta(0), s(Scalar()) {} + MatExpr(const MatOp* _op, int _flags, const Mat& _a=Mat(), const Mat& _b=Mat(), + const Mat& _c=Mat(), double _alpha=1, double _beta=1, const Scalar& _s=Scalar()) + : op(_op), flags(_flags), a(_a), b(_b), c(_c), alpha(_alpha), beta(_beta), s(_s) {} + explicit MatExpr(const Mat& m); + operator Mat() const + { + Mat m; + op->assign(*this, m); + return m; + } + + template operator Mat_<_Tp>() const + { + Mat_<_Tp> m; + op->assign(*this, m, DataType<_Tp>::type); + return m; + } + + MatExpr row(int y) const; + MatExpr col(int x) const; + MatExpr diag(int d=0) const; + MatExpr operator()( const Range& rowRange, const Range& colRange ) const; + MatExpr operator()( const Rect& roi ) const; + + Mat cross(const Mat& m) const; + double dot(const Mat& m) const; + + MatExpr t() const; + MatExpr inv(int method = DECOMP_LU) const; + MatExpr mul(const MatExpr& e, double scale=1) const; + MatExpr mul(const Mat& m, double scale=1) const; + + Size size() const; + int type() const; + + const MatOp* op; + int flags; + + Mat a, b, c; + double alpha, beta; + Scalar s; +}; + + +CV_EXPORTS MatExpr operator + (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator + (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator + (const Scalar& s, const Mat& a); +CV_EXPORTS MatExpr operator + (const MatExpr& e, const Mat& m); +CV_EXPORTS MatExpr operator + (const Mat& m, const MatExpr& e); +CV_EXPORTS MatExpr operator + (const MatExpr& e, const Scalar& s); +CV_EXPORTS MatExpr operator + (const Scalar& s, const MatExpr& e); +CV_EXPORTS MatExpr operator + (const MatExpr& e1, const MatExpr& e2); + +CV_EXPORTS MatExpr operator - (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator - (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator - (const Scalar& s, const Mat& a); +CV_EXPORTS MatExpr operator - (const MatExpr& e, const Mat& m); +CV_EXPORTS MatExpr operator - (const Mat& m, const MatExpr& e); +CV_EXPORTS MatExpr operator - (const MatExpr& e, const Scalar& s); +CV_EXPORTS MatExpr operator - (const Scalar& s, const MatExpr& e); +CV_EXPORTS MatExpr operator - (const MatExpr& e1, const MatExpr& e2); + +CV_EXPORTS MatExpr operator - (const Mat& m); +CV_EXPORTS MatExpr operator - (const MatExpr& e); + +CV_EXPORTS MatExpr operator * (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator * (const Mat& a, double s); +CV_EXPORTS MatExpr operator * (double s, const Mat& a); +CV_EXPORTS MatExpr operator * (const MatExpr& e, const Mat& m); +CV_EXPORTS MatExpr operator * (const Mat& m, const MatExpr& e); +CV_EXPORTS MatExpr operator * (const MatExpr& e, double s); +CV_EXPORTS MatExpr operator * (double s, const MatExpr& e); +CV_EXPORTS MatExpr operator * (const MatExpr& e1, const MatExpr& e2); + +CV_EXPORTS MatExpr operator / (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator / (const Mat& a, double s); +CV_EXPORTS MatExpr operator / (double s, const Mat& a); +CV_EXPORTS MatExpr operator / (const MatExpr& e, const Mat& m); +CV_EXPORTS MatExpr operator / (const Mat& m, const MatExpr& e); +CV_EXPORTS MatExpr operator / (const MatExpr& e, double s); +CV_EXPORTS MatExpr operator / (double s, const MatExpr& e); +CV_EXPORTS MatExpr operator / (const MatExpr& e1, const MatExpr& e2); + +CV_EXPORTS MatExpr operator < (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator < (const Mat& a, double s); +CV_EXPORTS MatExpr operator < (double s, const Mat& a); + +CV_EXPORTS MatExpr operator <= (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator <= (const Mat& a, double s); +CV_EXPORTS MatExpr operator <= (double s, const Mat& a); + +CV_EXPORTS MatExpr operator == (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator == (const Mat& a, double s); +CV_EXPORTS MatExpr operator == (double s, const Mat& a); + +CV_EXPORTS MatExpr operator != (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator != (const Mat& a, double s); +CV_EXPORTS MatExpr operator != (double s, const Mat& a); + +CV_EXPORTS MatExpr operator >= (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator >= (const Mat& a, double s); +CV_EXPORTS MatExpr operator >= (double s, const Mat& a); + +CV_EXPORTS MatExpr operator > (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator > (const Mat& a, double s); +CV_EXPORTS MatExpr operator > (double s, const Mat& a); + +CV_EXPORTS MatExpr min(const Mat& a, const Mat& b); +CV_EXPORTS MatExpr min(const Mat& a, double s); +CV_EXPORTS MatExpr min(double s, const Mat& a); + +CV_EXPORTS MatExpr max(const Mat& a, const Mat& b); +CV_EXPORTS MatExpr max(const Mat& a, double s); +CV_EXPORTS MatExpr max(double s, const Mat& a); + +template static inline MatExpr min(const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + return cv::min((const Mat&)a, (const Mat&)b); +} + +template static inline MatExpr min(const Mat_<_Tp>& a, double s) +{ + return cv::min((const Mat&)a, s); +} + +template static inline MatExpr min(double s, const Mat_<_Tp>& a) +{ + return cv::min((const Mat&)a, s); +} + +template static inline MatExpr max(const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + return cv::max((const Mat&)a, (const Mat&)b); +} + +template static inline MatExpr max(const Mat_<_Tp>& a, double s) +{ + return cv::max((const Mat&)a, s); +} + +template static inline MatExpr max(double s, const Mat_<_Tp>& a) +{ + return cv::max((const Mat&)a, s); +} + +template static inline void min(const Mat_<_Tp>& a, const Mat_<_Tp>& b, Mat_<_Tp>& c) +{ + cv::min((const Mat&)a, (const Mat&)b, (Mat&)c); +} + +template static inline void min(const Mat_<_Tp>& a, double s, Mat_<_Tp>& c) +{ + cv::min((const Mat&)a, s, (Mat&)c); +} + +template static inline void min(double s, const Mat_<_Tp>& a, Mat_<_Tp>& c) +{ + cv::min((const Mat&)a, s, (Mat&)c); +} + +template static inline void max(const Mat_<_Tp>& a, const Mat_<_Tp>& b, Mat_<_Tp>& c) +{ + cv::max((const Mat&)a, (const Mat&)b, (Mat&)c); +} + +template static inline void max(const Mat_<_Tp>& a, double s, Mat_<_Tp>& c) +{ + cv::max((const Mat&)a, s, (Mat&)c); +} + +template static inline void max(double s, const Mat_<_Tp>& a, Mat_<_Tp>& c) +{ + cv::max((const Mat&)a, s, (Mat&)c); +} + + +CV_EXPORTS MatExpr operator & (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator & (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator & (const Scalar& s, const Mat& a); + +CV_EXPORTS MatExpr operator | (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator | (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator | (const Scalar& s, const Mat& a); + +CV_EXPORTS MatExpr operator ^ (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator ^ (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator ^ (const Scalar& s, const Mat& a); + +CV_EXPORTS MatExpr operator ~(const Mat& m); + +CV_EXPORTS MatExpr abs(const Mat& m); +CV_EXPORTS MatExpr abs(const MatExpr& e); + +template static inline MatExpr abs(const Mat_<_Tp>& m) +{ + return cv::abs((const Mat&)m); +} + +////////////////////////////// Augmenting algebraic operations ////////////////////////////////// + +inline Mat& Mat::operator = (const MatExpr& e) +{ + e.op->assign(e, *this); + return *this; +} + +template inline Mat_<_Tp>::Mat_(const MatExpr& e) +{ + e.op->assign(e, *this, DataType<_Tp>::type); +} + +template Mat_<_Tp>& Mat_<_Tp>::operator = (const MatExpr& e) +{ + e.op->assign(e, *this, DataType<_Tp>::type); + return *this; +} + +static inline Mat& operator += (const Mat& a, const Mat& b) +{ + add(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator += (const Mat& a, const Scalar& s) +{ + add(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator += (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + add(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline +Mat_<_Tp>& operator += (const Mat_<_Tp>& a, const Scalar& s) +{ + add(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator += (const Mat& a, const MatExpr& b) +{ + b.op->augAssignAdd(b, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator += (const Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignAdd(b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator -= (const Mat& a, const Mat& b) +{ + subtract(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator -= (const Mat& a, const Scalar& s) +{ + subtract(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator -= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + subtract(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline +Mat_<_Tp>& operator -= (const Mat_<_Tp>& a, const Scalar& s) +{ + subtract(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator -= (const Mat& a, const MatExpr& b) +{ + b.op->augAssignSubtract(b, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator -= (const Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignSubtract(b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator *= (const Mat& a, const Mat& b) +{ + gemm(a, b, 1, Mat(), 0, (Mat&)a, 0); + return (Mat&)a; +} + +static inline Mat& operator *= (const Mat& a, double s) +{ + a.convertTo((Mat&)a, -1, s); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator *= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + gemm(a, b, 1, Mat(), 0, (Mat&)a, 0); + return (Mat_<_Tp>&)a; +} + +template static inline +Mat_<_Tp>& operator *= (const Mat_<_Tp>& a, double s) +{ + a.convertTo((Mat&)a, -1, s); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator *= (const Mat& a, const MatExpr& b) +{ + b.op->augAssignMultiply(b, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator *= (const Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignMultiply(b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator /= (const Mat& a, const Mat& b) +{ + divide(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator /= (const Mat& a, double s) +{ + a.convertTo((Mat&)a, -1, 1./s); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator /= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + divide(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline +Mat_<_Tp>& operator /= (const Mat_<_Tp>& a, double s) +{ + a.convertTo((Mat&)a, -1, 1./s); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator /= (const Mat& a, const MatExpr& b) +{ + b.op->augAssignDivide(b, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator /= (const Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignDivide(b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +////////////////////////////// Logical operations /////////////////////////////// + +static inline Mat& operator &= (const Mat& a, const Mat& b) +{ + bitwise_and(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator &= (const Mat& a, const Scalar& s) +{ + bitwise_and(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline Mat_<_Tp>& +operator &= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + bitwise_and(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline Mat_<_Tp>& +operator &= (const Mat_<_Tp>& a, const Scalar& s) +{ + bitwise_and(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator |= (const Mat& a, const Mat& b) +{ + bitwise_or(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator |= (const Mat& a, const Scalar& s) +{ + bitwise_or(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline Mat_<_Tp>& +operator |= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + bitwise_or(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline Mat_<_Tp>& +operator |= (const Mat_<_Tp>& a, const Scalar& s) +{ + bitwise_or(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator ^= (const Mat& a, const Mat& b) +{ + bitwise_xor(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator ^= (const Mat& a, const Scalar& s) +{ + bitwise_xor(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline Mat_<_Tp>& +operator ^= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + bitwise_xor(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline Mat_<_Tp>& +operator ^= (const Mat_<_Tp>& a, const Scalar& s) +{ + bitwise_xor(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +/////////////////////////////// Miscellaneous operations ////////////////////////////// + +template void split(const Mat& src, vector >& mv) +{ split(src, (vector&)mv ); } + +////////////////////////////////////////////////////////////// + +template inline MatExpr Mat_<_Tp>::zeros(int rows, int cols) +{ + return Mat::zeros(rows, cols, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::zeros(Size sz) +{ + return Mat::zeros(sz, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::ones(int rows, int cols) +{ + return Mat::ones(rows, cols, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::ones(Size sz) +{ + return Mat::ones(sz, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::eye(int rows, int cols) +{ + return Mat::eye(rows, cols, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::eye(Size sz) +{ + return Mat::eye(sz, DataType<_Tp>::type); +} + +//////////////////////////////// Iterators & Comma initializers ////////////////////////////////// + +inline MatConstIterator::MatConstIterator() + : m(0), elemSize(0), ptr(0), sliceStart(0), sliceEnd(0) {} + +inline MatConstIterator::MatConstIterator(const Mat* _m) + : m(_m), elemSize(_m->elemSize()), ptr(0), sliceStart(0), sliceEnd(0) +{ + if( m && m->isContinuous() ) + { + sliceStart = m->data; + sliceEnd = sliceStart + m->total()*elemSize; + } + seek((const int*)0); +} + +inline MatConstIterator::MatConstIterator(const Mat* _m, int _row, int _col) + : m(_m), elemSize(_m->elemSize()), ptr(0), sliceStart(0), sliceEnd(0) +{ + CV_Assert(m && m->dims <= 2); + if( m->isContinuous() ) + { + sliceStart = m->data; + sliceEnd = sliceStart + m->total()*elemSize; + } + int idx[]={_row, _col}; + seek(idx); +} + +inline MatConstIterator::MatConstIterator(const Mat* _m, Point _pt) + : m(_m), elemSize(_m->elemSize()), ptr(0), sliceStart(0), sliceEnd(0) +{ + CV_Assert(m && m->dims <= 2); + if( m->isContinuous() ) + { + sliceStart = m->data; + sliceEnd = sliceStart + m->total()*elemSize; + } + int idx[]={_pt.y, _pt.x}; + seek(idx); +} + +inline MatConstIterator::MatConstIterator(const MatConstIterator& it) + : m(it.m), elemSize(it.elemSize), ptr(it.ptr), sliceStart(it.sliceStart), sliceEnd(it.sliceEnd) +{} + +inline MatConstIterator& MatConstIterator::operator = (const MatConstIterator& it ) +{ + m = it.m; elemSize = it.elemSize; ptr = it.ptr; + sliceStart = it.sliceStart; sliceEnd = it.sliceEnd; + return *this; +} + +inline uchar* MatConstIterator::operator *() const { return ptr; } + +inline MatConstIterator& MatConstIterator::operator += (ptrdiff_t ofs) +{ + if( !m || ofs == 0 ) + return *this; + ptrdiff_t ofsb = ofs*elemSize; + ptr += ofsb; + if( ptr < sliceStart || sliceEnd <= ptr ) + { + ptr -= ofsb; + seek(ofs, true); + } + return *this; +} + +inline MatConstIterator& MatConstIterator::operator -= (ptrdiff_t ofs) +{ return (*this += -ofs); } + +inline MatConstIterator& MatConstIterator::operator --() +{ + if( m && (ptr -= elemSize) < sliceStart ) + { + ptr += elemSize; + seek(-1, true); + } + return *this; +} + +inline MatConstIterator MatConstIterator::operator --(int) +{ + MatConstIterator b = *this; + *this += -1; + return b; +} + +inline MatConstIterator& MatConstIterator::operator ++() +{ + if( m && (ptr += elemSize) >= sliceEnd ) + { + ptr -= elemSize; + seek(1, true); + } + return *this; +} + +inline MatConstIterator MatConstIterator::operator ++(int) +{ + MatConstIterator b = *this; + *this += 1; + return b; +} + +template inline MatConstIterator_<_Tp>::MatConstIterator_() {} + +template inline MatConstIterator_<_Tp>::MatConstIterator_(const Mat_<_Tp>* _m) + : MatConstIterator(_m) {} + +template inline MatConstIterator_<_Tp>:: + MatConstIterator_(const Mat_<_Tp>* _m, int _row, int _col) + : MatConstIterator(_m, _row, _col) {} + +template inline MatConstIterator_<_Tp>:: + MatConstIterator_(const Mat_<_Tp>* _m, Point _pt) + : MatConstIterator(_m, _pt) {} + +template inline MatConstIterator_<_Tp>:: + MatConstIterator_(const MatConstIterator_& it) + : MatConstIterator(it) {} + +template inline MatConstIterator_<_Tp>& + MatConstIterator_<_Tp>::operator = (const MatConstIterator_& it ) +{ + MatConstIterator::operator = (it); + return *this; +} + +template inline _Tp MatConstIterator_<_Tp>::operator *() const { return *(_Tp*)(this->ptr); } + +template inline MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator += (ptrdiff_t ofs) +{ + MatConstIterator::operator += (ofs); + return *this; +} + +template inline MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator -= (ptrdiff_t ofs) +{ return (*this += -ofs); } + +template inline MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator --() +{ + MatConstIterator::operator --(); + return *this; +} + +template inline MatConstIterator_<_Tp> MatConstIterator_<_Tp>::operator --(int) +{ + MatConstIterator_ b = *this; + MatConstIterator::operator --(); + return b; +} + +template inline MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator ++() +{ + MatConstIterator::operator ++(); + return *this; +} + +template inline MatConstIterator_<_Tp> MatConstIterator_<_Tp>::operator ++(int) +{ + MatConstIterator_ b = *this; + MatConstIterator::operator ++(); + return b; +} + +template inline MatIterator_<_Tp>::MatIterator_() : MatConstIterator_<_Tp>() {} + +template inline MatIterator_<_Tp>::MatIterator_(Mat_<_Tp>* _m) + : MatConstIterator_<_Tp>(_m) {} + +template inline MatIterator_<_Tp>::MatIterator_(Mat_<_Tp>* _m, int _row, int _col) + : MatConstIterator_<_Tp>(_m, _row, _col) {} + +template inline MatIterator_<_Tp>::MatIterator_(const Mat_<_Tp>* _m, Point _pt) + : MatConstIterator_<_Tp>(_m, _pt) {} + +template inline MatIterator_<_Tp>::MatIterator_(const Mat_<_Tp>* _m, const int* _idx) + : MatConstIterator_<_Tp>(_m, _idx) {} + +template inline MatIterator_<_Tp>::MatIterator_(const MatIterator_& it) + : MatConstIterator_<_Tp>(it) {} + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator = (const MatIterator_<_Tp>& it ) +{ + MatConstIterator::operator = (it); + return *this; +} + +template inline _Tp& MatIterator_<_Tp>::operator *() const { return *(_Tp*)(this->ptr); } + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator += (ptrdiff_t ofs) +{ + MatConstIterator::operator += (ofs); + return *this; +} + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator -= (ptrdiff_t ofs) +{ + MatConstIterator::operator += (-ofs); + return *this; +} + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator --() +{ + MatConstIterator::operator --(); + return *this; +} + +template inline MatIterator_<_Tp> MatIterator_<_Tp>::operator --(int) +{ + MatIterator_ b = *this; + MatConstIterator::operator --(); + return b; +} + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator ++() +{ + MatConstIterator::operator ++(); + return *this; +} + +template inline MatIterator_<_Tp> MatIterator_<_Tp>::operator ++(int) +{ + MatIterator_ b = *this; + MatConstIterator::operator ++(); + return b; +} + +template inline Point MatConstIterator_<_Tp>::pos() const +{ + if( !m ) + return Point(); + CV_DbgAssert( m->dims <= 2 ); + if( m->isContinuous() ) + { + ptrdiff_t ofs = (const _Tp*)ptr - (const _Tp*)m->data; + int y = (int)(ofs / m->cols), x = (int)(ofs - (ptrdiff_t)y*m->cols); + return Point(x, y); + } + else + { + ptrdiff_t ofs = (uchar*)ptr - m->data; + int y = (int)(ofs / m->step), x = (int)((ofs - y*m->step)/sizeof(_Tp)); + return Point(x, y); + } +} + +static inline bool +operator == (const MatConstIterator& a, const MatConstIterator& b) +{ return a.m == b.m && a.ptr == b.ptr; } + +template static inline bool +operator != (const MatConstIterator& a, const MatConstIterator& b) +{ return !(a == b); } + +template static inline bool +operator == (const MatConstIterator_<_Tp>& a, const MatConstIterator_<_Tp>& b) +{ return a.m == b.m && a.ptr == b.ptr; } + +template static inline bool +operator != (const MatConstIterator_<_Tp>& a, const MatConstIterator_<_Tp>& b) +{ return a.m != b.m || a.ptr != b.ptr; } + +template static inline bool +operator == (const MatIterator_<_Tp>& a, const MatIterator_<_Tp>& b) +{ return a.m == b.m && a.ptr == b.ptr; } + +template static inline bool +operator != (const MatIterator_<_Tp>& a, const MatIterator_<_Tp>& b) +{ return a.m != b.m || a.ptr != b.ptr; } + +static inline bool +operator < (const MatConstIterator& a, const MatConstIterator& b) +{ return a.ptr < b.ptr; } + +static inline bool +operator > (const MatConstIterator& a, const MatConstIterator& b) +{ return a.ptr > b.ptr; } + +static inline bool +operator <= (const MatConstIterator& a, const MatConstIterator& b) +{ return a.ptr <= b.ptr; } + +static inline bool +operator >= (const MatConstIterator& a, const MatConstIterator& b) +{ return a.ptr >= b.ptr; } + +CV_EXPORTS ptrdiff_t operator - (const MatConstIterator& b, const MatConstIterator& a); + +static inline MatConstIterator operator + (const MatConstIterator& a, ptrdiff_t ofs) +{ MatConstIterator b = a; return b += ofs; } + +static inline MatConstIterator operator + (ptrdiff_t ofs, const MatConstIterator& a) +{ MatConstIterator b = a; return b += ofs; } + +static inline MatConstIterator operator - (const MatConstIterator& a, ptrdiff_t ofs) +{ MatConstIterator b = a; return b += -ofs; } + +template static inline MatConstIterator_<_Tp> +operator + (const MatConstIterator_<_Tp>& a, ptrdiff_t ofs) +{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatConstIterator_<_Tp>&)t; } + +template static inline MatConstIterator_<_Tp> +operator + (ptrdiff_t ofs, const MatConstIterator_<_Tp>& a) +{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatConstIterator_<_Tp>&)t; } + +template static inline MatConstIterator_<_Tp> +operator - (const MatConstIterator_<_Tp>& a, ptrdiff_t ofs) +{ MatConstIterator t = (const MatConstIterator&)a - ofs; return (MatConstIterator_<_Tp>&)t; } + +inline uchar* MatConstIterator::operator [](ptrdiff_t i) const +{ return *(*this + i); } + +template inline _Tp MatConstIterator_<_Tp>::operator [](ptrdiff_t i) const +{ return *(_Tp*)MatConstIterator::operator [](i); } + +template static inline MatIterator_<_Tp> +operator + (const MatIterator_<_Tp>& a, ptrdiff_t ofs) +{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatIterator_<_Tp>&)t; } + +template static inline MatIterator_<_Tp> +operator + (ptrdiff_t ofs, const MatIterator_<_Tp>& a) +{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatIterator_<_Tp>&)t; } + +template static inline MatIterator_<_Tp> +operator - (const MatIterator_<_Tp>& a, ptrdiff_t ofs) +{ MatConstIterator t = (const MatConstIterator&)a - ofs; return (MatIterator_<_Tp>&)t; } + +template inline _Tp& MatIterator_<_Tp>::operator [](ptrdiff_t i) const +{ return *(*this + i); } + +template inline MatConstIterator_<_Tp> Mat_<_Tp>::begin() const +{ return Mat::begin<_Tp>(); } + +template inline MatConstIterator_<_Tp> Mat_<_Tp>::end() const +{ return Mat::end<_Tp>(); } + +template inline MatIterator_<_Tp> Mat_<_Tp>::begin() +{ return Mat::begin<_Tp>(); } + +template inline MatIterator_<_Tp> Mat_<_Tp>::end() +{ return Mat::end<_Tp>(); } + +template inline MatCommaInitializer_<_Tp>::MatCommaInitializer_(Mat_<_Tp>* _m) : it(_m) {} + +template template inline MatCommaInitializer_<_Tp>& +MatCommaInitializer_<_Tp>::operator , (T2 v) +{ + CV_DbgAssert( this->it < ((const Mat_<_Tp>*)this->it.m)->end() ); + *this->it = _Tp(v); ++this->it; + return *this; +} + +template inline Mat_<_Tp> MatCommaInitializer_<_Tp>::operator *() const +{ + CV_DbgAssert( this->it == ((const Mat_<_Tp>*)this->it.m)->end() ); + return Mat_<_Tp>(*this->it.m); +} + +template inline MatCommaInitializer_<_Tp>::operator Mat_<_Tp>() const +{ + CV_DbgAssert( this->it == ((const Mat_<_Tp>*)this->it.m)->end() ); + return Mat_<_Tp>(*this->it.m); +} + +template static inline MatCommaInitializer_<_Tp> +operator << (const Mat_<_Tp>& m, T2 val) +{ + MatCommaInitializer_<_Tp> commaInitializer((Mat_<_Tp>*)&m); + return (commaInitializer, val); +} + +//////////////////////////////// SparseMat //////////////////////////////// + +inline SparseMat::SparseMat() +: flags(MAGIC_VAL), hdr(0) +{ +} + +inline SparseMat::SparseMat(int _dims, const int* _sizes, int _type) +: flags(MAGIC_VAL), hdr(0) +{ + create(_dims, _sizes, _type); +} + +inline SparseMat::SparseMat(const SparseMat& m) +: flags(m.flags), hdr(m.hdr) +{ + addref(); +} + +inline SparseMat::~SparseMat() +{ + release(); +} + +inline SparseMat& SparseMat::operator = (const SparseMat& m) +{ + if( this != &m ) + { + if( m.hdr ) + CV_XADD(&m.hdr->refcount, 1); + release(); + flags = m.flags; + hdr = m.hdr; + } + return *this; +} + +inline SparseMat& SparseMat::operator = (const Mat& m) +{ return (*this = SparseMat(m)); } + +inline SparseMat SparseMat::clone() const +{ + SparseMat temp; + this->copyTo(temp); + return temp; +} + + +inline void SparseMat::assignTo( SparseMat& m, int _type ) const +{ + if( _type < 0 ) + m = *this; + else + convertTo(m, _type); +} + +inline void SparseMat::addref() +{ if( hdr ) CV_XADD(&hdr->refcount, 1); } + +inline void SparseMat::release() +{ + if( hdr && CV_XADD(&hdr->refcount, -1) == 1 ) + delete hdr; + hdr = 0; +} + +inline size_t SparseMat::elemSize() const +{ return CV_ELEM_SIZE(flags); } + +inline size_t SparseMat::elemSize1() const +{ return CV_ELEM_SIZE1(flags); } + +inline int SparseMat::type() const +{ return CV_MAT_TYPE(flags); } + +inline int SparseMat::depth() const +{ return CV_MAT_DEPTH(flags); } + +inline int SparseMat::channels() const +{ return CV_MAT_CN(flags); } + +inline const int* SparseMat::size() const +{ + return hdr ? hdr->size : 0; +} + +inline int SparseMat::size(int i) const +{ + if( hdr ) + { + CV_DbgAssert((unsigned)i < (unsigned)hdr->dims); + return hdr->size[i]; + } + return 0; +} + +inline int SparseMat::dims() const +{ + return hdr ? hdr->dims : 0; +} + +inline size_t SparseMat::nzcount() const +{ + return hdr ? hdr->nodeCount : 0; +} + +inline size_t SparseMat::hash(int i0) const +{ + return (size_t)i0; +} + +inline size_t SparseMat::hash(int i0, int i1) const +{ + return (size_t)(unsigned)i0*HASH_SCALE + (unsigned)i1; +} + +inline size_t SparseMat::hash(int i0, int i1, int i2) const +{ + return ((size_t)(unsigned)i0*HASH_SCALE + (unsigned)i1)*HASH_SCALE + (unsigned)i2; +} + +inline size_t SparseMat::hash(const int* idx) const +{ + size_t h = (unsigned)idx[0]; + if( !hdr ) + return 0; + int i, d = hdr->dims; + for( i = 1; i < d; i++ ) + h = h*HASH_SCALE + (unsigned)idx[i]; + return h; +} + +template inline _Tp& SparseMat::ref(int i0, size_t* hashval) +{ return *(_Tp*)((SparseMat*)this)->ptr(i0, true, hashval); } + +template inline _Tp& SparseMat::ref(int i0, int i1, size_t* hashval) +{ return *(_Tp*)((SparseMat*)this)->ptr(i0, i1, true, hashval); } + +template inline _Tp& SparseMat::ref(int i0, int i1, int i2, size_t* hashval) +{ return *(_Tp*)((SparseMat*)this)->ptr(i0, i1, i2, true, hashval); } + +template inline _Tp& SparseMat::ref(const int* idx, size_t* hashval) +{ return *(_Tp*)((SparseMat*)this)->ptr(idx, true, hashval); } + +template inline _Tp SparseMat::value(int i0, size_t* hashval) const +{ + const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, false, hashval); + return p ? *p : _Tp(); +} + +template inline _Tp SparseMat::value(int i0, int i1, size_t* hashval) const +{ + const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, i1, false, hashval); + return p ? *p : _Tp(); +} + +template inline _Tp SparseMat::value(int i0, int i1, int i2, size_t* hashval) const +{ + const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, i1, i2, false, hashval); + return p ? *p : _Tp(); +} + +template inline _Tp SparseMat::value(const int* idx, size_t* hashval) const +{ + const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(idx, false, hashval); + return p ? *p : _Tp(); +} + +template inline const _Tp* SparseMat::find(int i0, size_t* hashval) const +{ return (const _Tp*)((SparseMat*)this)->ptr(i0, false, hashval); } + +template inline const _Tp* SparseMat::find(int i0, int i1, size_t* hashval) const +{ return (const _Tp*)((SparseMat*)this)->ptr(i0, i1, false, hashval); } + +template inline const _Tp* SparseMat::find(int i0, int i1, int i2, size_t* hashval) const +{ return (const _Tp*)((SparseMat*)this)->ptr(i0, i1, i2, false, hashval); } + +template inline const _Tp* SparseMat::find(const int* idx, size_t* hashval) const +{ return (const _Tp*)((SparseMat*)this)->ptr(idx, false, hashval); } + +template inline _Tp& SparseMat::value(Node* n) +{ return *(_Tp*)((uchar*)n + hdr->valueOffset); } + +template inline const _Tp& SparseMat::value(const Node* n) const +{ return *(const _Tp*)((const uchar*)n + hdr->valueOffset); } + +inline SparseMat::Node* SparseMat::node(size_t nidx) +{ return (Node*)(void*)&hdr->pool[nidx]; } + +inline const SparseMat::Node* SparseMat::node(size_t nidx) const +{ return (const Node*)(void*)&hdr->pool[nidx]; } + +inline SparseMatIterator SparseMat::begin() +{ return SparseMatIterator(this); } + +inline SparseMatConstIterator SparseMat::begin() const +{ return SparseMatConstIterator(this); } + +inline SparseMatIterator SparseMat::end() +{ SparseMatIterator it(this); it.seekEnd(); return it; } + +inline SparseMatConstIterator SparseMat::end() const +{ SparseMatConstIterator it(this); it.seekEnd(); return it; } + +template inline SparseMatIterator_<_Tp> SparseMat::begin() +{ return SparseMatIterator_<_Tp>(this); } + +template inline SparseMatConstIterator_<_Tp> SparseMat::begin() const +{ return SparseMatConstIterator_<_Tp>(this); } + +template inline SparseMatIterator_<_Tp> SparseMat::end() +{ SparseMatIterator_<_Tp> it(this); it.seekEnd(); return it; } + +template inline SparseMatConstIterator_<_Tp> SparseMat::end() const +{ SparseMatConstIterator_<_Tp> it(this); it.seekEnd(); return it; } + + +inline SparseMatConstIterator::SparseMatConstIterator() +: m(0), hashidx(0), ptr(0) +{ +} + +inline SparseMatConstIterator::SparseMatConstIterator(const SparseMatConstIterator& it) +: m(it.m), hashidx(it.hashidx), ptr(it.ptr) +{ +} + +static inline bool operator == (const SparseMatConstIterator& it1, const SparseMatConstIterator& it2) +{ return it1.m == it2.m && it1.ptr == it2.ptr; } + +static inline bool operator != (const SparseMatConstIterator& it1, const SparseMatConstIterator& it2) +{ return !(it1 == it2); } + + +inline SparseMatConstIterator& SparseMatConstIterator::operator = (const SparseMatConstIterator& it) +{ + if( this != &it ) + { + m = it.m; + hashidx = it.hashidx; + ptr = it.ptr; + } + return *this; +} + +template inline const _Tp& SparseMatConstIterator::value() const +{ return *(_Tp*)ptr; } + +inline const SparseMat::Node* SparseMatConstIterator::node() const +{ + return ptr && m && m->hdr ? + (const SparseMat::Node*)(void*)(ptr - m->hdr->valueOffset) : 0; +} + +inline SparseMatConstIterator SparseMatConstIterator::operator ++(int) +{ + SparseMatConstIterator it = *this; + ++*this; + return it; +} + + +inline void SparseMatConstIterator::seekEnd() +{ + if( m && m->hdr ) + { + hashidx = m->hdr->hashtab.size(); + ptr = 0; + } +} + +inline SparseMatIterator::SparseMatIterator() +{} + +inline SparseMatIterator::SparseMatIterator(SparseMat* _m) +: SparseMatConstIterator(_m) +{} + +inline SparseMatIterator::SparseMatIterator(const SparseMatIterator& it) +: SparseMatConstIterator(it) +{ +} + +inline SparseMatIterator& SparseMatIterator::operator = (const SparseMatIterator& it) +{ + (SparseMatConstIterator&)*this = it; + return *this; +} + +template inline _Tp& SparseMatIterator::value() const +{ return *(_Tp*)ptr; } + +inline SparseMat::Node* SparseMatIterator::node() const +{ + return (SparseMat::Node*)SparseMatConstIterator::node(); +} + +inline SparseMatIterator& SparseMatIterator::operator ++() +{ + SparseMatConstIterator::operator ++(); + return *this; +} + +inline SparseMatIterator SparseMatIterator::operator ++(int) +{ + SparseMatIterator it = *this; + ++*this; + return it; +} + + +template inline SparseMat_<_Tp>::SparseMat_() +{ flags = MAGIC_VAL | DataType<_Tp>::type; } + +template inline SparseMat_<_Tp>::SparseMat_(int _dims, const int* _sizes) +: SparseMat(_dims, _sizes, DataType<_Tp>::type) +{} + +template inline SparseMat_<_Tp>::SparseMat_(const SparseMat& m) +{ + if( m.type() == DataType<_Tp>::type ) + *this = (const SparseMat_<_Tp>&)m; + else + m.convertTo(*this, DataType<_Tp>::type); +} + +template inline SparseMat_<_Tp>::SparseMat_(const SparseMat_<_Tp>& m) +{ + this->flags = m.flags; + this->hdr = m.hdr; + if( this->hdr ) + CV_XADD(&this->hdr->refcount, 1); +} + +template inline SparseMat_<_Tp>::SparseMat_(const Mat& m) +{ + SparseMat sm(m); + *this = sm; +} + +template inline SparseMat_<_Tp>::SparseMat_(const CvSparseMat* m) +{ + SparseMat sm(m); + *this = sm; +} + +template inline SparseMat_<_Tp>& +SparseMat_<_Tp>::operator = (const SparseMat_<_Tp>& m) +{ + if( this != &m ) + { + if( m.hdr ) CV_XADD(&m.hdr->refcount, 1); + release(); + flags = m.flags; + hdr = m.hdr; + } + return *this; +} + +template inline SparseMat_<_Tp>& +SparseMat_<_Tp>::operator = (const SparseMat& m) +{ + if( m.type() == DataType<_Tp>::type ) + return (*this = (const SparseMat_<_Tp>&)m); + m.convertTo(*this, DataType<_Tp>::type); + return *this; +} + +template inline SparseMat_<_Tp>& +SparseMat_<_Tp>::operator = (const Mat& m) +{ return (*this = SparseMat(m)); } + +template inline SparseMat_<_Tp> +SparseMat_<_Tp>::clone() const +{ + SparseMat_<_Tp> m; + this->copyTo(m); + return m; +} + +template inline void +SparseMat_<_Tp>::create(int _dims, const int* _sizes) +{ + SparseMat::create(_dims, _sizes, DataType<_Tp>::type); +} + +template inline +SparseMat_<_Tp>::operator CvSparseMat*() const +{ + return SparseMat::operator CvSparseMat*(); +} + +template inline int SparseMat_<_Tp>::type() const +{ return DataType<_Tp>::type; } + +template inline int SparseMat_<_Tp>::depth() const +{ return DataType<_Tp>::depth; } + +template inline int SparseMat_<_Tp>::channels() const +{ return DataType<_Tp>::channels; } + +template inline _Tp& +SparseMat_<_Tp>::ref(int i0, size_t* hashval) +{ return SparseMat::ref<_Tp>(i0, hashval); } + +template inline _Tp +SparseMat_<_Tp>::operator()(int i0, size_t* hashval) const +{ return SparseMat::value<_Tp>(i0, hashval); } + +template inline _Tp& +SparseMat_<_Tp>::ref(int i0, int i1, size_t* hashval) +{ return SparseMat::ref<_Tp>(i0, i1, hashval); } + +template inline _Tp +SparseMat_<_Tp>::operator()(int i0, int i1, size_t* hashval) const +{ return SparseMat::value<_Tp>(i0, i1, hashval); } + +template inline _Tp& +SparseMat_<_Tp>::ref(int i0, int i1, int i2, size_t* hashval) +{ return SparseMat::ref<_Tp>(i0, i1, i2, hashval); } + +template inline _Tp +SparseMat_<_Tp>::operator()(int i0, int i1, int i2, size_t* hashval) const +{ return SparseMat::value<_Tp>(i0, i1, i2, hashval); } + +template inline _Tp& +SparseMat_<_Tp>::ref(const int* idx, size_t* hashval) +{ return SparseMat::ref<_Tp>(idx, hashval); } + +template inline _Tp +SparseMat_<_Tp>::operator()(const int* idx, size_t* hashval) const +{ return SparseMat::value<_Tp>(idx, hashval); } + +template inline SparseMatIterator_<_Tp> SparseMat_<_Tp>::begin() +{ return SparseMatIterator_<_Tp>(this); } + +template inline SparseMatConstIterator_<_Tp> SparseMat_<_Tp>::begin() const +{ return SparseMatConstIterator_<_Tp>(this); } + +template inline SparseMatIterator_<_Tp> SparseMat_<_Tp>::end() +{ SparseMatIterator_<_Tp> it(this); it.seekEnd(); return it; } + +template inline SparseMatConstIterator_<_Tp> SparseMat_<_Tp>::end() const +{ SparseMatConstIterator_<_Tp> it(this); it.seekEnd(); return it; } + +template inline +SparseMatConstIterator_<_Tp>::SparseMatConstIterator_() +{} + +template inline +SparseMatConstIterator_<_Tp>::SparseMatConstIterator_(const SparseMat_<_Tp>* _m) +: SparseMatConstIterator(_m) +{} + +template inline +SparseMatConstIterator_<_Tp>::SparseMatConstIterator_(const SparseMat* _m) +: SparseMatConstIterator(_m) +{ + CV_Assert( _m->type() == DataType<_Tp>::type ); +} + +template inline +SparseMatConstIterator_<_Tp>::SparseMatConstIterator_(const SparseMatConstIterator_<_Tp>& it) +: SparseMatConstIterator(it) +{} + +template inline SparseMatConstIterator_<_Tp>& +SparseMatConstIterator_<_Tp>::operator = (const SparseMatConstIterator_<_Tp>& it) +{ return reinterpret_cast&> + (*reinterpret_cast(this) = + reinterpret_cast(it)); } + +template inline const _Tp& +SparseMatConstIterator_<_Tp>::operator *() const +{ return *(const _Tp*)this->ptr; } + +template inline SparseMatConstIterator_<_Tp>& +SparseMatConstIterator_<_Tp>::operator ++() +{ + SparseMatConstIterator::operator ++(); + return *this; +} + +template inline SparseMatConstIterator_<_Tp> +SparseMatConstIterator_<_Tp>::operator ++(int) +{ + SparseMatConstIterator_<_Tp> it = *this; + SparseMatConstIterator::operator ++(); + return it; +} + +template inline +SparseMatIterator_<_Tp>::SparseMatIterator_() +{} + +template inline +SparseMatIterator_<_Tp>::SparseMatIterator_(SparseMat_<_Tp>* _m) +: SparseMatConstIterator_<_Tp>(_m) +{} + +template inline +SparseMatIterator_<_Tp>::SparseMatIterator_(SparseMat* _m) +: SparseMatConstIterator_<_Tp>(_m) +{} + +template inline +SparseMatIterator_<_Tp>::SparseMatIterator_(const SparseMatIterator_<_Tp>& it) +: SparseMatConstIterator_<_Tp>(it) +{} + +template inline SparseMatIterator_<_Tp>& +SparseMatIterator_<_Tp>::operator = (const SparseMatIterator_<_Tp>& it) +{ return reinterpret_cast&> + (*reinterpret_cast(this) = + reinterpret_cast(it)); } + +template inline _Tp& +SparseMatIterator_<_Tp>::operator *() const +{ return *(_Tp*)this->ptr; } + +template inline SparseMatIterator_<_Tp>& +SparseMatIterator_<_Tp>::operator ++() +{ + SparseMatConstIterator::operator ++(); + return *this; +} + +template inline SparseMatIterator_<_Tp> +SparseMatIterator_<_Tp>::operator ++(int) +{ + SparseMatIterator_<_Tp> it = *this; + SparseMatConstIterator::operator ++(); + return it; +} + +} + +#endif +#endif diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/opengl_interop.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/opengl_interop.hpp new file mode 100644 index 0000000..7ecaa8e --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/opengl_interop.hpp @@ -0,0 +1,284 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_OPENGL_INTEROP_HPP__ +#define __OPENCV_OPENGL_INTEROP_HPP__ + +#ifdef __cplusplus + +#include "opencv2/core/core.hpp" +#include "opencv2/core/opengl_interop_deprecated.hpp" + +namespace cv { namespace ogl { + +/////////////////// OpenGL Objects /////////////////// + +//! Smart pointer for OpenGL buffer memory with reference counting. +class CV_EXPORTS Buffer +{ +public: + enum Target + { + ARRAY_BUFFER = 0x8892, //!< The buffer will be used as a source for vertex data + ELEMENT_ARRAY_BUFFER = 0x8893, //!< The buffer will be used for indices (in glDrawElements, for example) + PIXEL_PACK_BUFFER = 0x88EB, //!< The buffer will be used for reading from OpenGL textures + PIXEL_UNPACK_BUFFER = 0x88EC //!< The buffer will be used for writing to OpenGL textures + }; + + enum Access + { + READ_ONLY = 0x88B8, + WRITE_ONLY = 0x88B9, + READ_WRITE = 0x88BA + }; + + //! create empty buffer + Buffer(); + + //! create buffer from existed buffer id + Buffer(int arows, int acols, int atype, unsigned int abufId, bool autoRelease = false); + Buffer(Size asize, int atype, unsigned int abufId, bool autoRelease = false); + + //! create buffer + Buffer(int arows, int acols, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false); + Buffer(Size asize, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false); + + //! copy from host/device memory + explicit Buffer(InputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false); + + //! create buffer + void create(int arows, int acols, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false); + void create(Size asize, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false) { create(asize.height, asize.width, atype, target, autoRelease); } + + //! release memory and delete buffer object + void release(); + + //! set auto release mode (if true, release will be called in object's destructor) + void setAutoRelease(bool flag); + + //! copy from host/device memory + void copyFrom(InputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false); + + //! copy to host/device memory + void copyTo(OutputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false) const; + + //! create copy of current buffer + Buffer clone(Target target = ARRAY_BUFFER, bool autoRelease = false) const; + + //! bind buffer for specified target + void bind(Target target) const; + + //! unbind any buffers from specified target + static void unbind(Target target); + + //! map to host memory + Mat mapHost(Access access); + void unmapHost(); + + //! map to device memory + gpu::GpuMat mapDevice(); + void unmapDevice(); + + int rows() const { return rows_; } + int cols() const { return cols_; } + Size size() const { return Size(cols_, rows_); } + bool empty() const { return rows_ == 0 || cols_ == 0; } + + int type() const { return type_; } + int depth() const { return CV_MAT_DEPTH(type_); } + int channels() const { return CV_MAT_CN(type_); } + int elemSize() const { return CV_ELEM_SIZE(type_); } + int elemSize1() const { return CV_ELEM_SIZE1(type_); } + + unsigned int bufId() const; + + class Impl; + +private: + Ptr impl_; + int rows_; + int cols_; + int type_; +}; + +//! Smart pointer for OpenGL 2D texture memory with reference counting. +class CV_EXPORTS Texture2D +{ +public: + enum Format + { + NONE = 0, + DEPTH_COMPONENT = 0x1902, //!< Depth + RGB = 0x1907, //!< Red, Green, Blue + RGBA = 0x1908 //!< Red, Green, Blue, Alpha + }; + + //! create empty texture + Texture2D(); + + //! create texture from existed texture id + Texture2D(int arows, int acols, Format aformat, unsigned int atexId, bool autoRelease = false); + Texture2D(Size asize, Format aformat, unsigned int atexId, bool autoRelease = false); + + //! create texture + Texture2D(int arows, int acols, Format aformat, bool autoRelease = false); + Texture2D(Size asize, Format aformat, bool autoRelease = false); + + //! copy from host/device memory + explicit Texture2D(InputArray arr, bool autoRelease = false); + + //! create texture + void create(int arows, int acols, Format aformat, bool autoRelease = false); + void create(Size asize, Format aformat, bool autoRelease = false) { create(asize.height, asize.width, aformat, autoRelease); } + + //! release memory and delete texture object + void release(); + + //! set auto release mode (if true, release will be called in object's destructor) + void setAutoRelease(bool flag); + + //! copy from host/device memory + void copyFrom(InputArray arr, bool autoRelease = false); + + //! copy to host/device memory + void copyTo(OutputArray arr, int ddepth = CV_32F, bool autoRelease = false) const; + + //! bind texture to current active texture unit for GL_TEXTURE_2D target + void bind() const; + + int rows() const { return rows_; } + int cols() const { return cols_; } + Size size() const { return Size(cols_, rows_); } + bool empty() const { return rows_ == 0 || cols_ == 0; } + + Format format() const { return format_; } + + unsigned int texId() const; + + class Impl; + +private: + Ptr impl_; + int rows_; + int cols_; + Format format_; +}; + +//! OpenGL Arrays +class CV_EXPORTS Arrays +{ +public: + Arrays(); + + void setVertexArray(InputArray vertex); + void resetVertexArray(); + + void setColorArray(InputArray color); + void resetColorArray(); + + void setNormalArray(InputArray normal); + void resetNormalArray(); + + void setTexCoordArray(InputArray texCoord); + void resetTexCoordArray(); + + void release(); + + void setAutoRelease(bool flag); + + void bind() const; + + int size() const { return size_; } + bool empty() const { return size_ == 0; } + +private: + int size_; + Buffer vertex_; + Buffer color_; + Buffer normal_; + Buffer texCoord_; +}; + +/////////////////// Render Functions /////////////////// + +//! render texture rectangle in window +CV_EXPORTS void render(const Texture2D& tex, + Rect_ wndRect = Rect_(0.0, 0.0, 1.0, 1.0), + Rect_ texRect = Rect_(0.0, 0.0, 1.0, 1.0)); + +//! render mode +enum { + POINTS = 0x0000, + LINES = 0x0001, + LINE_LOOP = 0x0002, + LINE_STRIP = 0x0003, + TRIANGLES = 0x0004, + TRIANGLE_STRIP = 0x0005, + TRIANGLE_FAN = 0x0006, + QUADS = 0x0007, + QUAD_STRIP = 0x0008, + POLYGON = 0x0009 +}; + +//! render OpenGL arrays +CV_EXPORTS void render(const Arrays& arr, int mode = POINTS, Scalar color = Scalar::all(255)); +CV_EXPORTS void render(const Arrays& arr, InputArray indices, int mode = POINTS, Scalar color = Scalar::all(255)); + +}} // namespace cv::gl + +namespace cv { namespace gpu { + +//! set a CUDA device to use OpenGL interoperability +CV_EXPORTS void setGlDevice(int device = 0); + +}} + +namespace cv { + +template <> CV_EXPORTS void Ptr::delete_obj(); +template <> CV_EXPORTS void Ptr::delete_obj(); + +} + +#endif // __cplusplus + +#endif // __OPENCV_OPENGL_INTEROP_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/opengl_interop_deprecated.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/opengl_interop_deprecated.hpp new file mode 100644 index 0000000..04e3fc0 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/opengl_interop_deprecated.hpp @@ -0,0 +1,300 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_OPENGL_INTEROP_DEPRECATED_HPP__ +#define __OPENCV_OPENGL_INTEROP_DEPRECATED_HPP__ + +#ifdef __cplusplus + +#include "opencv2/core/core.hpp" + +namespace cv +{ +//! Smart pointer for OpenGL buffer memory with reference counting. +class CV_EXPORTS GlBuffer +{ +public: + enum Usage + { + ARRAY_BUFFER = 0x8892, // buffer will use for OpenGL arrays (vertices, colors, normals, etc) + TEXTURE_BUFFER = 0x88EC // buffer will ise for OpenGL textures + }; + + //! create empty buffer + explicit GlBuffer(Usage usage); + + //! create buffer + GlBuffer(int rows, int cols, int type, Usage usage); + GlBuffer(Size size, int type, Usage usage); + + //! copy from host/device memory + GlBuffer(InputArray mat, Usage usage); + + void create(int rows, int cols, int type, Usage usage); + void create(Size size, int type, Usage usage); + void create(int rows, int cols, int type); + void create(Size size, int type); + + void release(); + + //! copy from host/device memory + void copyFrom(InputArray mat); + + void bind() const; + void unbind() const; + + //! map to host memory + Mat mapHost(); + void unmapHost(); + + //! map to device memory + gpu::GpuMat mapDevice(); + void unmapDevice(); + + inline int rows() const { return rows_; } + inline int cols() const { return cols_; } + inline Size size() const { return Size(cols_, rows_); } + inline bool empty() const { return rows_ == 0 || cols_ == 0; } + + inline int type() const { return type_; } + inline int depth() const { return CV_MAT_DEPTH(type_); } + inline int channels() const { return CV_MAT_CN(type_); } + inline int elemSize() const { return CV_ELEM_SIZE(type_); } + inline int elemSize1() const { return CV_ELEM_SIZE1(type_); } + + inline Usage usage() const { return usage_; } + + class Impl; +private: + int rows_; + int cols_; + int type_; + Usage usage_; + + Ptr impl_; +}; + +template <> CV_EXPORTS void Ptr::delete_obj(); + +//! Smart pointer for OpenGL 2d texture memory with reference counting. +class CV_EXPORTS GlTexture +{ +public: + //! create empty texture + GlTexture(); + + //! create texture + GlTexture(int rows, int cols, int type); + GlTexture(Size size, int type); + + //! copy from host/device memory + explicit GlTexture(InputArray mat, bool bgra = true); + + void create(int rows, int cols, int type); + void create(Size size, int type); + void release(); + + //! copy from host/device memory + void copyFrom(InputArray mat, bool bgra = true); + + void bind() const; + void unbind() const; + + inline int rows() const { return rows_; } + inline int cols() const { return cols_; } + inline Size size() const { return Size(cols_, rows_); } + inline bool empty() const { return rows_ == 0 || cols_ == 0; } + + inline int type() const { return type_; } + inline int depth() const { return CV_MAT_DEPTH(type_); } + inline int channels() const { return CV_MAT_CN(type_); } + inline int elemSize() const { return CV_ELEM_SIZE(type_); } + inline int elemSize1() const { return CV_ELEM_SIZE1(type_); } + + class Impl; +private: + int rows_; + int cols_; + int type_; + + Ptr impl_; + GlBuffer buf_; +}; + +template <> CV_EXPORTS void Ptr::delete_obj(); + +//! OpenGL Arrays +class CV_EXPORTS GlArrays +{ +public: + inline GlArrays() + : vertex_(GlBuffer::ARRAY_BUFFER), color_(GlBuffer::ARRAY_BUFFER), normal_(GlBuffer::ARRAY_BUFFER), texCoord_(GlBuffer::ARRAY_BUFFER) + { + } + + void setVertexArray(InputArray vertex); + inline void resetVertexArray() { vertex_.release(); } + + void setColorArray(InputArray color, bool bgra = true); + inline void resetColorArray() { color_.release(); } + + void setNormalArray(InputArray normal); + inline void resetNormalArray() { normal_.release(); } + + void setTexCoordArray(InputArray texCoord); + inline void resetTexCoordArray() { texCoord_.release(); } + + void bind() const; + void unbind() const; + + inline int rows() const { return vertex_.rows(); } + inline int cols() const { return vertex_.cols(); } + inline Size size() const { return vertex_.size(); } + inline bool empty() const { return vertex_.empty(); } + +private: + GlBuffer vertex_; + GlBuffer color_; + GlBuffer normal_; + GlBuffer texCoord_; +}; + +//! OpenGL Font +class CV_EXPORTS GlFont +{ +public: + enum Weight + { + WEIGHT_LIGHT = 300, + WEIGHT_NORMAL = 400, + WEIGHT_SEMIBOLD = 600, + WEIGHT_BOLD = 700, + WEIGHT_BLACK = 900 + }; + + enum Style + { + STYLE_NORMAL = 0, + STYLE_ITALIC = 1, + STYLE_UNDERLINE = 2 + }; + + static Ptr get(const std::string& family, int height = 12, Weight weight = WEIGHT_NORMAL, Style style = STYLE_NORMAL); + + void draw(const char* str, int len) const; + + inline const std::string& family() const { return family_; } + inline int height() const { return height_; } + inline Weight weight() const { return weight_; } + inline Style style() const { return style_; } + +private: + GlFont(const std::string& family, int height, Weight weight, Style style); + + std::string family_; + int height_; + Weight weight_; + Style style_; + + unsigned int base_; + + GlFont(const GlFont&); + GlFont& operator =(const GlFont&); +}; + +//! render functions + +//! render texture rectangle in window +CV_EXPORTS void render(const GlTexture& tex, + Rect_ wndRect = Rect_(0.0, 0.0, 1.0, 1.0), + Rect_ texRect = Rect_(0.0, 0.0, 1.0, 1.0)); + +//! render mode +namespace RenderMode { + enum { + POINTS = 0x0000, + LINES = 0x0001, + LINE_LOOP = 0x0002, + LINE_STRIP = 0x0003, + TRIANGLES = 0x0004, + TRIANGLE_STRIP = 0x0005, + TRIANGLE_FAN = 0x0006, + QUADS = 0x0007, + QUAD_STRIP = 0x0008, + POLYGON = 0x0009 + }; +} + +//! render OpenGL arrays +CV_EXPORTS void render(const GlArrays& arr, int mode = RenderMode::POINTS, Scalar color = Scalar::all(255)); + +CV_EXPORTS void render(const std::string& str, const Ptr& font, Scalar color, Point2d pos); + +//! OpenGL camera +class CV_EXPORTS GlCamera +{ +public: + GlCamera(); + + void lookAt(Point3d eye, Point3d center, Point3d up); + void setCameraPos(Point3d pos, double yaw, double pitch, double roll); + + void setScale(Point3d scale); + + void setProjectionMatrix(const Mat& projectionMatrix, bool transpose = true); + void setPerspectiveProjection(double fov, double aspect, double zNear, double zFar); + void setOrthoProjection(double left, double right, double bottom, double top, double zNear, double zFar); + + void setupProjectionMatrix() const; + void setupModelViewMatrix() const; +}; + +inline void GlBuffer::create(Size _size, int _type, Usage _usage) { create(_size.height, _size.width, _type, _usage); } +inline void GlBuffer::create(int _rows, int _cols, int _type) { create(_rows, _cols, _type, usage()); } +inline void GlBuffer::create(Size _size, int _type) { create(_size.height, _size.width, _type, usage()); } +inline void GlTexture::create(Size _size, int _type) { create(_size.height, _size.width, _type); } + +} // namespace cv + +#endif // __cplusplus + +#endif // __OPENCV_OPENGL_INTEROP_DEPRECATED_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/operations.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/operations.hpp new file mode 100644 index 0000000..0ae51c6 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/operations.hpp @@ -0,0 +1,4123 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_OPERATIONS_HPP__ +#define __OPENCV_CORE_OPERATIONS_HPP__ + +#ifndef SKIP_INCLUDES + #include + #include +#endif // SKIP_INCLUDES + + +#ifdef __cplusplus + +/////// exchange-add operation for atomic operations on reference counters /////// +#if defined __INTEL_COMPILER && !(defined WIN32 || defined _WIN32) // atomic increment on the linux version of the Intel(tm) compiler + #define CV_XADD(addr,delta) _InterlockedExchangeAdd(const_cast(reinterpret_cast(addr)), delta) +#elif defined __GNUC__ + + #if defined __clang__ && __clang_major__ >= 3 && !defined __ANDROID__ && !defined __EMSCRIPTEN__ && !defined(__CUDACC__) + #ifdef __ATOMIC_SEQ_CST + #define CV_XADD(addr, delta) __c11_atomic_fetch_add((_Atomic(int)*)(addr), (delta), __ATOMIC_SEQ_CST) + #else + #define CV_XADD(addr, delta) __atomic_fetch_add((_Atomic(int)*)(addr), (delta), 5) + #endif + #elif __GNUC__*10 + __GNUC_MINOR__ >= 42 + + #if !(defined WIN32 || defined _WIN32) && (defined __i486__ || defined __i586__ || \ + defined __i686__ || defined __MMX__ || defined __SSE__ || defined __ppc__) || \ + (defined __GNUC__ && defined _STLPORT_MAJOR) || \ + defined __EMSCRIPTEN__ + + #define CV_XADD __sync_fetch_and_add + #else + #include + #define CV_XADD __gnu_cxx::__exchange_and_add + #endif + + #else + #include + #if __GNUC__*10 + __GNUC_MINOR__ >= 34 + #define CV_XADD __gnu_cxx::__exchange_and_add + #else + #define CV_XADD __exchange_and_add + #endif + #endif + +#elif defined WIN32 || defined _WIN32 || defined WINCE + namespace cv { CV_EXPORTS int _interlockedExchangeAdd(int* addr, int delta); } + #define CV_XADD cv::_interlockedExchangeAdd + +#else + static inline int CV_XADD(int* addr, int delta) + { int tmp = *addr; *addr += delta; return tmp; } +#endif + +#include + +#ifdef _MSC_VER +# pragma warning(push) +# pragma warning(disable:4127) //conditional expression is constant +#endif + +namespace cv +{ + +using std::cos; +using std::sin; +using std::max; +using std::min; +using std::exp; +using std::log; +using std::pow; +using std::sqrt; + + +/////////////// saturate_cast (used in image & signal processing) /////////////////// + +template static inline _Tp saturate_cast(uchar v) { return _Tp(v); } +template static inline _Tp saturate_cast(schar v) { return _Tp(v); } +template static inline _Tp saturate_cast(ushort v) { return _Tp(v); } +template static inline _Tp saturate_cast(short v) { return _Tp(v); } +template static inline _Tp saturate_cast(unsigned v) { return _Tp(v); } +template static inline _Tp saturate_cast(int v) { return _Tp(v); } +template static inline _Tp saturate_cast(float v) { return _Tp(v); } +template static inline _Tp saturate_cast(double v) { return _Tp(v); } + +template<> inline uchar saturate_cast(schar v) +{ return (uchar)std::max((int)v, 0); } +template<> inline uchar saturate_cast(ushort v) +{ return (uchar)std::min((unsigned)v, (unsigned)UCHAR_MAX); } +template<> inline uchar saturate_cast(int v) +{ return (uchar)((unsigned)v <= UCHAR_MAX ? v : v > 0 ? UCHAR_MAX : 0); } +template<> inline uchar saturate_cast(short v) +{ return saturate_cast((int)v); } +template<> inline uchar saturate_cast(unsigned v) +{ return (uchar)std::min(v, (unsigned)UCHAR_MAX); } +template<> inline uchar saturate_cast(float v) +{ int iv = cvRound(v); return saturate_cast(iv); } +template<> inline uchar saturate_cast(double v) +{ int iv = cvRound(v); return saturate_cast(iv); } + +template<> inline schar saturate_cast(uchar v) +{ return (schar)std::min((int)v, SCHAR_MAX); } +template<> inline schar saturate_cast(ushort v) +{ return (schar)std::min((unsigned)v, (unsigned)SCHAR_MAX); } +template<> inline schar saturate_cast(int v) +{ + return (schar)((unsigned)(v-SCHAR_MIN) <= (unsigned)UCHAR_MAX ? + v : v > 0 ? SCHAR_MAX : SCHAR_MIN); +} +template<> inline schar saturate_cast(short v) +{ return saturate_cast((int)v); } +template<> inline schar saturate_cast(unsigned v) +{ return (schar)std::min(v, (unsigned)SCHAR_MAX); } + +template<> inline schar saturate_cast(float v) +{ int iv = cvRound(v); return saturate_cast(iv); } +template<> inline schar saturate_cast(double v) +{ int iv = cvRound(v); return saturate_cast(iv); } + +template<> inline ushort saturate_cast(schar v) +{ return (ushort)std::max((int)v, 0); } +template<> inline ushort saturate_cast(short v) +{ return (ushort)std::max((int)v, 0); } +template<> inline ushort saturate_cast(int v) +{ return (ushort)((unsigned)v <= (unsigned)USHRT_MAX ? v : v > 0 ? USHRT_MAX : 0); } +template<> inline ushort saturate_cast(unsigned v) +{ return (ushort)std::min(v, (unsigned)USHRT_MAX); } +template<> inline ushort saturate_cast(float v) +{ int iv = cvRound(v); return saturate_cast(iv); } +template<> inline ushort saturate_cast(double v) +{ int iv = cvRound(v); return saturate_cast(iv); } + +template<> inline short saturate_cast(ushort v) +{ return (short)std::min((int)v, SHRT_MAX); } +template<> inline short saturate_cast(int v) +{ + return (short)((unsigned)(v - SHRT_MIN) <= (unsigned)USHRT_MAX ? + v : v > 0 ? SHRT_MAX : SHRT_MIN); +} +template<> inline short saturate_cast(unsigned v) +{ return (short)std::min(v, (unsigned)SHRT_MAX); } +template<> inline short saturate_cast(float v) +{ int iv = cvRound(v); return saturate_cast(iv); } +template<> inline short saturate_cast(double v) +{ int iv = cvRound(v); return saturate_cast(iv); } + +template<> inline int saturate_cast(float v) { return cvRound(v); } +template<> inline int saturate_cast(double v) { return cvRound(v); } + +// we intentionally do not clip negative numbers, to make -1 become 0xffffffff etc. +template<> inline unsigned saturate_cast(float v){ return cvRound(v); } +template<> inline unsigned saturate_cast(double v) { return cvRound(v); } + +inline int fast_abs(uchar v) { return v; } +inline int fast_abs(schar v) { return std::abs((int)v); } +inline int fast_abs(ushort v) { return v; } +inline int fast_abs(short v) { return std::abs((int)v); } +inline int fast_abs(int v) { return std::abs(v); } +inline float fast_abs(float v) { return std::abs(v); } +inline double fast_abs(double v) { return std::abs(v); } + +//////////////////////////////// Matx ///////////////////////////////// + + +template inline Matx<_Tp, m, n>::Matx() +{ + for(int i = 0; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0) +{ + val[0] = v0; + for(int i = 1; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1) +{ + assert(channels >= 2); + val[0] = v0; val[1] = v1; + for(int i = 2; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2) +{ + assert(channels >= 3); + val[0] = v0; val[1] = v1; val[2] = v2; + for(int i = 3; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3) +{ + assert(channels >= 4); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + for(int i = 4; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4) +{ + assert(channels >= 5); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; val[4] = v4; + for(int i = 5; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5) +{ + assert(channels >= 6); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; + for(int i = 6; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6) +{ + assert(channels >= 7); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; + for(int i = 7; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7) +{ + assert(channels >= 8); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + for(int i = 8; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8) +{ + assert(channels >= 9); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; + for(int i = 9; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9) +{ + assert(channels >= 10); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; val[9] = v9; + for(int i = 10; i < channels; i++) val[i] = _Tp(0); +} + + +template +inline Matx<_Tp,m,n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9, _Tp v10, _Tp v11) +{ + assert(channels == 12); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; val[9] = v9; val[10] = v10; val[11] = v11; +} + +template +inline Matx<_Tp,m,n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9, _Tp v10, _Tp v11, + _Tp v12, _Tp v13, _Tp v14, _Tp v15) +{ + assert(channels == 16); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; val[9] = v9; val[10] = v10; val[11] = v11; + val[12] = v12; val[13] = v13; val[14] = v14; val[15] = v15; +} + +template inline Matx<_Tp, m, n>::Matx(const _Tp* values) +{ + for( int i = 0; i < channels; i++ ) val[i] = values[i]; +} + +template inline Matx<_Tp, m, n> Matx<_Tp, m, n>::all(_Tp alpha) +{ + Matx<_Tp, m, n> M; + for( int i = 0; i < m*n; i++ ) M.val[i] = alpha; + return M; +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::zeros() +{ + return all(0); +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::ones() +{ + return all(1); +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::eye() +{ + Matx<_Tp,m,n> M; + for(int i = 0; i < MIN(m,n); i++) + M(i,i) = 1; + return M; +} + +template inline _Tp Matx<_Tp, m, n>::dot(const Matx<_Tp, m, n>& M) const +{ + _Tp s = 0; + for( int i = 0; i < m*n; i++ ) s += val[i]*M.val[i]; + return s; +} + + +template inline double Matx<_Tp, m, n>::ddot(const Matx<_Tp, m, n>& M) const +{ + double s = 0; + for( int i = 0; i < m*n; i++ ) s += (double)val[i]*M.val[i]; + return s; +} + + +/** @cond IGNORED */ +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::diag(const typename Matx<_Tp,m,n>::diag_type& d) +{ + Matx<_Tp,m,n> M; + for(int i = 0; i < MIN(m,n); i++) + M(i,i) = d(i, 0); + return M; +} +/** @endcond */ + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::randu(_Tp a, _Tp b) +{ + Matx<_Tp,m,n> M; + Mat matM(M, false); + cv::randu(matM, Scalar(a), Scalar(b)); + return M; +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::randn(_Tp a, _Tp b) +{ + Matx<_Tp,m,n> M; + Mat matM(M, false); + cv::randn(matM, Scalar(a), Scalar(b)); + return M; +} + +template template +inline Matx<_Tp, m, n>::operator Matx() const +{ + Matx M; + for( int i = 0; i < m*n; i++ ) M.val[i] = saturate_cast(val[i]); + return M; +} + + +template template inline +Matx<_Tp, m1, n1> Matx<_Tp, m, n>::reshape() const +{ + CV_DbgAssert(m1*n1 == m*n); + return (const Matx<_Tp, m1, n1>&)*this; +} + + +template +template inline +Matx<_Tp, m1, n1> Matx<_Tp, m, n>::get_minor(int i, int j) const +{ + CV_DbgAssert(0 <= i && i+m1 <= m && 0 <= j && j+n1 <= n); + Matx<_Tp, m1, n1> s; + for( int di = 0; di < m1; di++ ) + for( int dj = 0; dj < n1; dj++ ) + s(di, dj) = (*this)(i+di, j+dj); + return s; +} + + +template inline +Matx<_Tp, 1, n> Matx<_Tp, m, n>::row(int i) const +{ + CV_DbgAssert((unsigned)i < (unsigned)m); + return Matx<_Tp, 1, n>(&val[i*n]); +} + + +template inline +Matx<_Tp, m, 1> Matx<_Tp, m, n>::col(int j) const +{ + CV_DbgAssert((unsigned)j < (unsigned)n); + Matx<_Tp, m, 1> v; + for( int i = 0; i < m; i++ ) + v.val[i] = val[i*n + j]; + return v; +} + + +template inline +typename Matx<_Tp, m, n>::diag_type Matx<_Tp, m, n>::diag() const +{ + diag_type d; + for( int i = 0; i < MIN(m, n); i++ ) + d.val[i] = val[i*n + i]; + return d; +} + + +template inline +const _Tp& Matx<_Tp, m, n>::operator ()(int i, int j) const +{ + CV_DbgAssert( (unsigned)i < (unsigned)m && (unsigned)j < (unsigned)n ); + return this->val[i*n + j]; +} + + +template inline +_Tp& Matx<_Tp, m, n>::operator ()(int i, int j) +{ + CV_DbgAssert( (unsigned)i < (unsigned)m && (unsigned)j < (unsigned)n ); + return val[i*n + j]; +} + + +template inline +const _Tp& Matx<_Tp, m, n>::operator ()(int i) const +{ + CV_DbgAssert( (m == 1 || n == 1) && (unsigned)i < (unsigned)(m+n-1) ); + return val[i]; +} + + +template inline +_Tp& Matx<_Tp, m, n>::operator ()(int i) +{ + CV_DbgAssert( (m == 1 || n == 1) && (unsigned)i < (unsigned)(m+n-1) ); + return val[i]; +} + + +template static inline +Matx<_Tp1, m, n>& operator += (Matx<_Tp1, m, n>& a, const Matx<_Tp2, m, n>& b) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp1>(a.val[i] + b.val[i]); + return a; +} + + +template static inline +Matx<_Tp1, m, n>& operator -= (Matx<_Tp1, m, n>& a, const Matx<_Tp2, m, n>& b) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp1>(a.val[i] - b.val[i]); + return a; +} + + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_AddOp) +{ + for( int i = 0; i < m*n; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] + b.val[i]); +} + + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_SubOp) +{ + for( int i = 0; i < m*n; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] - b.val[i]); +} + + +template template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, _T2 alpha, Matx_ScaleOp) +{ + for( int i = 0; i < m*n; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] * alpha); +} + + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_MulOp) +{ + for( int i = 0; i < m*n; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] * b.val[i]); +} + + +template template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b, Matx_MatMulOp) +{ + for( int i = 0; i < m; i++ ) + for( int j = 0; j < n; j++ ) + { + _Tp s = 0; + for( int k = 0; k < l; k++ ) + s += a(i, k) * b(k, j); + val[i*n + j] = s; + } +} + + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, n, m>& a, Matx_TOp) +{ + for( int i = 0; i < m; i++ ) + for( int j = 0; j < n; j++ ) + val[i*n + j] = a(j, i); +} + + +template static inline +Matx<_Tp, m, n> operator + (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b) +{ + return Matx<_Tp, m, n>(a, b, Matx_AddOp()); +} + + +template static inline +Matx<_Tp, m, n> operator - (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b) +{ + return Matx<_Tp, m, n>(a, b, Matx_SubOp()); +} + + +template static inline +Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, int alpha) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha); + return a; +} + +template static inline +Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, float alpha) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha); + return a; +} + +template static inline +Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, double alpha) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha); + return a; +} + +template static inline +Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, int alpha) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, float alpha) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, double alpha) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (int alpha, const Matx<_Tp, m, n>& a) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (float alpha, const Matx<_Tp, m, n>& a) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (double alpha, const Matx<_Tp, m, n>& a) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator - (const Matx<_Tp, m, n>& a) +{ + return Matx<_Tp, m, n>(a, -1, Matx_ScaleOp()); +} + + +template static inline +Matx<_Tp, m, n> operator * (const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b) +{ + return Matx<_Tp, m, n>(a, b, Matx_MatMulOp()); +} + + +template static inline +Vec<_Tp, m> operator * (const Matx<_Tp, m, n>& a, const Vec<_Tp, n>& b) +{ + Matx<_Tp, m, 1> c(a, b, Matx_MatMulOp()); + return reinterpret_cast&>(c); +} + + +template static inline +Point_<_Tp> operator * (const Matx<_Tp, 2, 2>& a, const Point_<_Tp>& b) +{ + Matx<_Tp, 2, 1> tmp = a*Vec<_Tp,2>(b.x, b.y); + return Point_<_Tp>(tmp.val[0], tmp.val[1]); +} + + +template static inline +Point3_<_Tp> operator * (const Matx<_Tp, 3, 3>& a, const Point3_<_Tp>& b) +{ + Matx<_Tp, 3, 1> tmp = a*Vec<_Tp,3>(b.x, b.y, b.z); + return Point3_<_Tp>(tmp.val[0], tmp.val[1], tmp.val[2]); +} + + +template static inline +Point3_<_Tp> operator * (const Matx<_Tp, 3, 3>& a, const Point_<_Tp>& b) +{ + Matx<_Tp, 3, 1> tmp = a*Vec<_Tp,3>(b.x, b.y, 1); + return Point3_<_Tp>(tmp.val[0], tmp.val[1], tmp.val[2]); +} + + +template static inline +Matx<_Tp, 4, 1> operator * (const Matx<_Tp, 4, 4>& a, const Point3_<_Tp>& b) +{ + return a*Matx<_Tp, 4, 1>(b.x, b.y, b.z, 1); +} + + +template static inline +Scalar operator * (const Matx<_Tp, 4, 4>& a, const Scalar& b) +{ + Matx c(Matx(a), b, Matx_MatMulOp()); + return static_cast(c); +} + + +static inline +Scalar operator * (const Matx& a, const Scalar& b) +{ + Matx c(a, b, Matx_MatMulOp()); + return static_cast(c); +} + + +template inline +Matx<_Tp, m, n> Matx<_Tp, m, n>::mul(const Matx<_Tp, m, n>& a) const +{ + return Matx<_Tp, m, n>(*this, a, Matx_MulOp()); +} + + +CV_EXPORTS int LU(float* A, size_t astep, int m, float* b, size_t bstep, int n); +CV_EXPORTS int LU(double* A, size_t astep, int m, double* b, size_t bstep, int n); +CV_EXPORTS bool Cholesky(float* A, size_t astep, int m, float* b, size_t bstep, int n); +CV_EXPORTS bool Cholesky(double* A, size_t astep, int m, double* b, size_t bstep, int n); + + +template struct Matx_DetOp +{ + double operator ()(const Matx<_Tp, m, m>& a) const + { + Matx<_Tp, m, m> temp = a; + double p = LU(temp.val, m*sizeof(_Tp), m, 0, 0, 0); + if( p == 0 ) + return p; + for( int i = 0; i < m; i++ ) + p *= temp(i, i); + return 1./p; + } +}; + + +template struct Matx_DetOp<_Tp, 1> +{ + double operator ()(const Matx<_Tp, 1, 1>& a) const + { + return a(0,0); + } +}; + + +template struct Matx_DetOp<_Tp, 2> +{ + double operator ()(const Matx<_Tp, 2, 2>& a) const + { + return a(0,0)*a(1,1) - a(0,1)*a(1,0); + } +}; + + +template struct Matx_DetOp<_Tp, 3> +{ + double operator ()(const Matx<_Tp, 3, 3>& a) const + { + return a(0,0)*(a(1,1)*a(2,2) - a(2,1)*a(1,2)) - + a(0,1)*(a(1,0)*a(2,2) - a(2,0)*a(1,2)) + + a(0,2)*(a(1,0)*a(2,1) - a(2,0)*a(1,1)); + } +}; + +template static inline +double determinant(const Matx<_Tp, m, m>& a) +{ + return Matx_DetOp<_Tp, m>()(a); +} + + +template static inline +double trace(const Matx<_Tp, m, n>& a) +{ + _Tp s = 0; + for( int i = 0; i < std::min(m, n); i++ ) + s += a(i,i); + return s; +} + + +template inline +Matx<_Tp, n, m> Matx<_Tp, m, n>::t() const +{ + return Matx<_Tp, n, m>(*this, Matx_TOp()); +} + + +template struct Matx_FastInvOp +{ + bool operator()(const Matx<_Tp, m, m>& a, Matx<_Tp, m, m>& b, int method) const + { + Matx<_Tp, m, m> temp = a; + + // assume that b is all 0's on input => make it a unity matrix + for( int i = 0; i < m; i++ ) + b(i, i) = (_Tp)1; + + if( method == DECOMP_CHOLESKY ) + return Cholesky(temp.val, m*sizeof(_Tp), m, b.val, m*sizeof(_Tp), m); + + return LU(temp.val, m*sizeof(_Tp), m, b.val, m*sizeof(_Tp), m) != 0; + } +}; + + +template struct Matx_FastInvOp<_Tp, 2> +{ + bool operator()(const Matx<_Tp, 2, 2>& a, Matx<_Tp, 2, 2>& b, int) const + { + _Tp d = determinant(a); + if( d == 0 ) + return false; + d = 1/d; + b(1,1) = a(0,0)*d; + b(0,0) = a(1,1)*d; + b(0,1) = -a(0,1)*d; + b(1,0) = -a(1,0)*d; + return true; + } +}; + + +template struct Matx_FastInvOp<_Tp, 3> +{ + bool operator()(const Matx<_Tp, 3, 3>& a, Matx<_Tp, 3, 3>& b, int) const + { + _Tp d = (_Tp)determinant(a); + if( d == 0 ) + return false; + d = 1/d; + b(0,0) = (a(1,1) * a(2,2) - a(1,2) * a(2,1)) * d; + b(0,1) = (a(0,2) * a(2,1) - a(0,1) * a(2,2)) * d; + b(0,2) = (a(0,1) * a(1,2) - a(0,2) * a(1,1)) * d; + + b(1,0) = (a(1,2) * a(2,0) - a(1,0) * a(2,2)) * d; + b(1,1) = (a(0,0) * a(2,2) - a(0,2) * a(2,0)) * d; + b(1,2) = (a(0,2) * a(1,0) - a(0,0) * a(1,2)) * d; + + b(2,0) = (a(1,0) * a(2,1) - a(1,1) * a(2,0)) * d; + b(2,1) = (a(0,1) * a(2,0) - a(0,0) * a(2,1)) * d; + b(2,2) = (a(0,0) * a(1,1) - a(0,1) * a(1,0)) * d; + return true; + } +}; + + +template inline +Matx<_Tp, n, m> Matx<_Tp, m, n>::inv(int method) const +{ + Matx<_Tp, n, m> b; + bool ok; + if( method == DECOMP_LU || method == DECOMP_CHOLESKY ) + ok = Matx_FastInvOp<_Tp, m>()(*this, b, method); + else + { + Mat A(*this, false), B(b, false); + ok = (invert(A, B, method) != 0); + } + return ok ? b : Matx<_Tp, n, m>::zeros(); +} + + +template struct Matx_FastSolveOp +{ + bool operator()(const Matx<_Tp, m, m>& a, const Matx<_Tp, m, n>& b, + Matx<_Tp, m, n>& x, int method) const + { + Matx<_Tp, m, m> temp = a; + x = b; + if( method == DECOMP_CHOLESKY ) + return Cholesky(temp.val, m*sizeof(_Tp), m, x.val, n*sizeof(_Tp), n); + + return LU(temp.val, m*sizeof(_Tp), m, x.val, n*sizeof(_Tp), n) != 0; + } +}; + + +template struct Matx_FastSolveOp<_Tp, 2, 1> +{ + bool operator()(const Matx<_Tp, 2, 2>& a, const Matx<_Tp, 2, 1>& b, + Matx<_Tp, 2, 1>& x, int) const + { + _Tp d = determinant(a); + if( d == 0 ) + return false; + d = 1/d; + x(0) = (b(0)*a(1,1) - b(1)*a(0,1))*d; + x(1) = (b(1)*a(0,0) - b(0)*a(1,0))*d; + return true; + } +}; + + +template struct Matx_FastSolveOp<_Tp, 3, 1> +{ + bool operator()(const Matx<_Tp, 3, 3>& a, const Matx<_Tp, 3, 1>& b, + Matx<_Tp, 3, 1>& x, int) const + { + _Tp d = (_Tp)determinant(a); + if( d == 0 ) + return false; + d = 1/d; + x(0) = d*(b(0)*(a(1,1)*a(2,2) - a(1,2)*a(2,1)) - + a(0,1)*(b(1)*a(2,2) - a(1,2)*b(2)) + + a(0,2)*(b(1)*a(2,1) - a(1,1)*b(2))); + + x(1) = d*(a(0,0)*(b(1)*a(2,2) - a(1,2)*b(2)) - + b(0)*(a(1,0)*a(2,2) - a(1,2)*a(2,0)) + + a(0,2)*(a(1,0)*b(2) - b(1)*a(2,0))); + + x(2) = d*(a(0,0)*(a(1,1)*b(2) - b(1)*a(2,1)) - + a(0,1)*(a(1,0)*b(2) - b(1)*a(2,0)) + + b(0)*(a(1,0)*a(2,1) - a(1,1)*a(2,0))); + return true; + } +}; + + +template template inline +Matx<_Tp, n, l> Matx<_Tp, m, n>::solve(const Matx<_Tp, m, l>& rhs, int method) const +{ + Matx<_Tp, n, l> x; + bool ok; + if( method == DECOMP_LU || method == DECOMP_CHOLESKY ) + ok = Matx_FastSolveOp<_Tp, m, l>()(*this, rhs, x, method); + else + { + Mat A(*this, false), B(rhs, false), X(x, false); + ok = cv::solve(A, B, X, method); + } + + return ok ? x : Matx<_Tp, n, l>::zeros(); +} + +template inline +Vec<_Tp, n> Matx<_Tp, m, n>::solve(const Vec<_Tp, m>& rhs, int method) const +{ + Matx<_Tp, n, 1> x = solve(reinterpret_cast&>(rhs), method); + return reinterpret_cast&>(x); +} + +template static inline +_AccTp normL2Sqr(const _Tp* a, int n) +{ + _AccTp s = 0; + int i=0; + #if CV_ENABLE_UNROLLED + for( ; i <= n - 4; i += 4 ) + { + _AccTp v0 = a[i], v1 = a[i+1], v2 = a[i+2], v3 = a[i+3]; + s += v0*v0 + v1*v1 + v2*v2 + v3*v3; + } +#endif + for( ; i < n; i++ ) + { + _AccTp v = a[i]; + s += v*v; + } + return s; +} + + +template static inline +_AccTp normL1(const _Tp* a, int n) +{ + _AccTp s = 0; + int i = 0; +#if CV_ENABLE_UNROLLED + for(; i <= n - 4; i += 4 ) + { + s += (_AccTp)fast_abs(a[i]) + (_AccTp)fast_abs(a[i+1]) + + (_AccTp)fast_abs(a[i+2]) + (_AccTp)fast_abs(a[i+3]); + } +#endif + for( ; i < n; i++ ) + s += fast_abs(a[i]); + return s; +} + + +template static inline +_AccTp normInf(const _Tp* a, int n) +{ + _AccTp s = 0; + for( int i = 0; i < n; i++ ) + s = std::max(s, (_AccTp)fast_abs(a[i])); + return s; +} + + +template static inline +_AccTp normL2Sqr(const _Tp* a, const _Tp* b, int n) +{ + _AccTp s = 0; + int i= 0; +#if CV_ENABLE_UNROLLED + for(; i <= n - 4; i += 4 ) + { + _AccTp v0 = _AccTp(a[i] - b[i]), v1 = _AccTp(a[i+1] - b[i+1]), v2 = _AccTp(a[i+2] - b[i+2]), v3 = _AccTp(a[i+3] - b[i+3]); + s += v0*v0 + v1*v1 + v2*v2 + v3*v3; + } +#endif + for( ; i < n; i++ ) + { + _AccTp v = _AccTp(a[i] - b[i]); + s += v*v; + } + return s; +} + +CV_EXPORTS float normL2Sqr_(const float* a, const float* b, int n); +CV_EXPORTS float normL1_(const float* a, const float* b, int n); +CV_EXPORTS int normL1_(const uchar* a, const uchar* b, int n); +CV_EXPORTS int normHamming(const uchar* a, const uchar* b, int n); +CV_EXPORTS int normHamming(const uchar* a, const uchar* b, int n, int cellSize); + +template<> inline float normL2Sqr(const float* a, const float* b, int n) +{ + if( n >= 8 ) + return normL2Sqr_(a, b, n); + float s = 0; + for( int i = 0; i < n; i++ ) + { + float v = a[i] - b[i]; + s += v*v; + } + return s; +} + + +template static inline +_AccTp normL1(const _Tp* a, const _Tp* b, int n) +{ + _AccTp s = 0; + int i= 0; +#if CV_ENABLE_UNROLLED + for(; i <= n - 4; i += 4 ) + { + _AccTp v0 = _AccTp(a[i] - b[i]), v1 = _AccTp(a[i+1] - b[i+1]), v2 = _AccTp(a[i+2] - b[i+2]), v3 = _AccTp(a[i+3] - b[i+3]); + s += std::abs(v0) + std::abs(v1) + std::abs(v2) + std::abs(v3); + } +#endif + for( ; i < n; i++ ) + { + _AccTp v = _AccTp(a[i] - b[i]); + s += std::abs(v); + } + return s; +} + +template<> inline float normL1(const float* a, const float* b, int n) +{ + if( n >= 8 ) + return normL1_(a, b, n); + float s = 0; + for( int i = 0; i < n; i++ ) + { + float v = a[i] - b[i]; + s += std::abs(v); + } + return s; +} + +template<> inline int normL1(const uchar* a, const uchar* b, int n) +{ + return normL1_(a, b, n); +} + +template static inline +_AccTp normInf(const _Tp* a, const _Tp* b, int n) +{ + _AccTp s = 0; + for( int i = 0; i < n; i++ ) + { + _AccTp v0 = a[i] - b[i]; + s = std::max(s, std::abs(v0)); + } + return s; +} + + +template static inline +double norm(const Matx<_Tp, m, n>& M) +{ + return std::sqrt(normL2Sqr<_Tp, double>(M.val, m*n)); +} + + +template static inline +double norm(const Matx<_Tp, m, n>& M, int normType) +{ + return normType == NORM_INF ? (double)normInf<_Tp, typename DataType<_Tp>::work_type>(M.val, m*n) : + normType == NORM_L1 ? (double)normL1<_Tp, typename DataType<_Tp>::work_type>(M.val, m*n) : + std::sqrt((double)normL2Sqr<_Tp, typename DataType<_Tp>::work_type>(M.val, m*n)); +} + + +template static inline +bool operator == (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b) +{ + for( int i = 0; i < m*n; i++ ) + if( a.val[i] != b.val[i] ) return false; + return true; +} + +template static inline +bool operator != (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b) +{ + return !(a == b); +} + + +template static inline +MatxCommaInitializer<_Tp, m, n> operator << (const Matx<_Tp, m, n>& mtx, _T2 val) +{ + MatxCommaInitializer<_Tp, m, n> commaInitializer((Matx<_Tp, m, n>*)&mtx); + return (commaInitializer, val); +} + +template inline +MatxCommaInitializer<_Tp, m, n>::MatxCommaInitializer(Matx<_Tp, m, n>* _mtx) + : dst(_mtx), idx(0) +{} + +template template inline +MatxCommaInitializer<_Tp, m, n>& MatxCommaInitializer<_Tp, m, n>::operator , (_T2 value) +{ + CV_DbgAssert( idx < m*n ); + dst->val[idx++] = saturate_cast<_Tp>(value); + return *this; +} + +template inline +Matx<_Tp, m, n> MatxCommaInitializer<_Tp, m, n>::operator *() const +{ + CV_DbgAssert( idx == n*m ); + return *dst; +} + +/////////////////////////// short vector (Vec) ///////////////////////////// + +template inline Vec<_Tp, cn>::Vec() +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0) + : Matx<_Tp, cn, 1>(v0) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1) + : Matx<_Tp, cn, 1>(v0, v1) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2) + : Matx<_Tp, cn, 1>(v0, v1, v2) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7, v8) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9) +{} + +template inline Vec<_Tp, cn>::Vec(const _Tp* values) + : Matx<_Tp, cn, 1>(values) +{} + + +template inline Vec<_Tp, cn>::Vec(const Vec<_Tp, cn>& m) + : Matx<_Tp, cn, 1>(m.val) +{} + +template inline +Vec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_AddOp op) +: Matx<_Tp, cn, 1>(a, b, op) +{} + +template inline +Vec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_SubOp op) +: Matx<_Tp, cn, 1>(a, b, op) +{} + +template template inline +Vec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, _T2 alpha, Matx_ScaleOp op) +: Matx<_Tp, cn, 1>(a, alpha, op) +{} + +template inline Vec<_Tp, cn> Vec<_Tp, cn>::all(_Tp alpha) +{ + Vec v; + for( int i = 0; i < cn; i++ ) v.val[i] = alpha; + return v; +} + +template inline Vec<_Tp, cn> Vec<_Tp, cn>::mul(const Vec<_Tp, cn>& v) const +{ + Vec<_Tp, cn> w; + for( int i = 0; i < cn; i++ ) w.val[i] = saturate_cast<_Tp>(this->val[i]*v.val[i]); + return w; +} + +template Vec<_Tp, 2> conjugate(const Vec<_Tp, 2>& v) +{ + return Vec<_Tp, 2>(v[0], -v[1]); +} + +template Vec<_Tp, 4> conjugate(const Vec<_Tp, 4>& v) +{ + return Vec<_Tp, 4>(v[0], -v[1], -v[2], -v[3]); +} + +template<> inline Vec Vec::conj() const +{ + return conjugate(*this); +} + +template<> inline Vec Vec::conj() const +{ + return conjugate(*this); +} + +template<> inline Vec Vec::conj() const +{ + return conjugate(*this); +} + +template<> inline Vec Vec::conj() const +{ + return conjugate(*this); +} + +template inline Vec<_Tp, cn> Vec<_Tp, cn>::cross(const Vec<_Tp, cn>&) const +{ + CV_Error(CV_StsError, "for arbitrary-size vector there is no cross-product defined"); + return Vec<_Tp, cn>(); +} + +template template +inline Vec<_Tp, cn>::operator Vec() const +{ + Vec v; + for( int i = 0; i < cn; i++ ) v.val[i] = saturate_cast(this->val[i]); + return v; +} + +template inline Vec<_Tp, cn>::operator CvScalar() const +{ + CvScalar s = {{0,0,0,0}}; + int i; + for( i = 0; i < std::min(cn, 4); i++ ) s.val[i] = this->val[i]; + for( ; i < 4; i++ ) s.val[i] = 0; + return s; +} + +template inline const _Tp& Vec<_Tp, cn>::operator [](int i) const +{ + CV_DbgAssert( (unsigned)i < (unsigned)cn ); + return this->val[i]; +} + +template inline _Tp& Vec<_Tp, cn>::operator [](int i) +{ + CV_DbgAssert( (unsigned)i < (unsigned)cn ); + return this->val[i]; +} + +template inline const _Tp& Vec<_Tp, cn>::operator ()(int i) const +{ + CV_DbgAssert( (unsigned)i < (unsigned)cn ); + return this->val[i]; +} + +template inline _Tp& Vec<_Tp, cn>::operator ()(int i) +{ + CV_DbgAssert( (unsigned)i < (unsigned)cn ); + return this->val[i]; +} + +template static inline Vec<_Tp1, cn>& +operator += (Vec<_Tp1, cn>& a, const Vec<_Tp2, cn>& b) +{ + for( int i = 0; i < cn; i++ ) + a.val[i] = saturate_cast<_Tp1>(a.val[i] + b.val[i]); + return a; +} + +template static inline Vec<_Tp1, cn>& +operator -= (Vec<_Tp1, cn>& a, const Vec<_Tp2, cn>& b) +{ + for( int i = 0; i < cn; i++ ) + a.val[i] = saturate_cast<_Tp1>(a.val[i] - b.val[i]); + return a; +} + +template static inline Vec<_Tp, cn> +operator + (const Vec<_Tp, cn>& a, const Vec<_Tp, cn>& b) +{ + return Vec<_Tp, cn>(a, b, Matx_AddOp()); +} + +template static inline Vec<_Tp, cn> +operator - (const Vec<_Tp, cn>& a, const Vec<_Tp, cn>& b) +{ + return Vec<_Tp, cn>(a, b, Matx_SubOp()); +} + +template static inline +Vec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, int alpha) +{ + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*alpha); + return a; +} + +template static inline +Vec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, float alpha) +{ + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*alpha); + return a; +} + +template static inline +Vec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, double alpha) +{ + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*alpha); + return a; +} + +template static inline +Vec<_Tp, cn>& operator /= (Vec<_Tp, cn>& a, int alpha) +{ + double ialpha = 1./alpha; + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*ialpha); + return a; +} + +template static inline +Vec<_Tp, cn>& operator /= (Vec<_Tp, cn>& a, float alpha) +{ + float ialpha = 1.f/alpha; + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*ialpha); + return a; +} + +template static inline +Vec<_Tp, cn>& operator /= (Vec<_Tp, cn>& a, double alpha) +{ + double ialpha = 1./alpha; + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*ialpha); + return a; +} + +template static inline Vec<_Tp, cn> +operator * (const Vec<_Tp, cn>& a, int alpha) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (int alpha, const Vec<_Tp, cn>& a) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (const Vec<_Tp, cn>& a, float alpha) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (float alpha, const Vec<_Tp, cn>& a) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (const Vec<_Tp, cn>& a, double alpha) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (double alpha, const Vec<_Tp, cn>& a) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator / (const Vec<_Tp, cn>& a, int alpha) +{ + return Vec<_Tp, cn>(a, 1./alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator / (const Vec<_Tp, cn>& a, float alpha) +{ + return Vec<_Tp, cn>(a, 1.f/alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator / (const Vec<_Tp, cn>& a, double alpha) +{ + return Vec<_Tp, cn>(a, 1./alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator - (const Vec<_Tp, cn>& a) +{ + Vec<_Tp,cn> t; + for( int i = 0; i < cn; i++ ) t.val[i] = saturate_cast<_Tp>(-a.val[i]); + return t; +} + +template inline Vec<_Tp, 4> operator * (const Vec<_Tp, 4>& v1, const Vec<_Tp, 4>& v2) +{ + return Vec<_Tp, 4>(saturate_cast<_Tp>(v1[0]*v2[0] - v1[1]*v2[1] - v1[2]*v2[2] - v1[3]*v2[3]), + saturate_cast<_Tp>(v1[0]*v2[1] + v1[1]*v2[0] + v1[2]*v2[3] - v1[3]*v2[2]), + saturate_cast<_Tp>(v1[0]*v2[2] - v1[1]*v2[3] + v1[2]*v2[0] + v1[3]*v2[1]), + saturate_cast<_Tp>(v1[0]*v2[3] + v1[1]*v2[2] - v1[2]*v2[1] + v1[3]*v2[0])); +} + +template inline Vec<_Tp, 4>& operator *= (Vec<_Tp, 4>& v1, const Vec<_Tp, 4>& v2) +{ + v1 = v1 * v2; + return v1; +} + +template<> inline Vec Vec::cross(const Vec& v) const +{ + return Vec(val[1]*v.val[2] - val[2]*v.val[1], + val[2]*v.val[0] - val[0]*v.val[2], + val[0]*v.val[1] - val[1]*v.val[0]); +} + +template<> inline Vec Vec::cross(const Vec& v) const +{ + return Vec(val[1]*v.val[2] - val[2]*v.val[1], + val[2]*v.val[0] - val[0]*v.val[2], + val[0]*v.val[1] - val[1]*v.val[0]); +} + +template inline Vec<_Tp, cn> normalize(const Vec<_Tp, cn>& v) +{ + double nv = norm(v); + return v * (nv ? 1./nv : 0.); +} + +template static inline +VecCommaInitializer<_Tp, cn> operator << (const Vec<_Tp, cn>& vec, _T2 val) +{ + VecCommaInitializer<_Tp, cn> commaInitializer((Vec<_Tp, cn>*)&vec); + return (commaInitializer, val); +} + +template inline +VecCommaInitializer<_Tp, cn>::VecCommaInitializer(Vec<_Tp, cn>* _vec) + : MatxCommaInitializer<_Tp, cn, 1>(_vec) +{} + +template template inline +VecCommaInitializer<_Tp, cn>& VecCommaInitializer<_Tp, cn>::operator , (_T2 value) +{ + CV_DbgAssert( this->idx < cn ); + this->dst->val[this->idx++] = saturate_cast<_Tp>(value); + return *this; +} + +template inline +Vec<_Tp, cn> VecCommaInitializer<_Tp, cn>::operator *() const +{ + CV_DbgAssert( this->idx == cn ); + return *this->dst; +} + +//////////////////////////////// Complex ////////////////////////////// + +template inline Complex<_Tp>::Complex() : re(0), im(0) {} +template inline Complex<_Tp>::Complex( _Tp _re, _Tp _im ) : re(_re), im(_im) {} +template template inline Complex<_Tp>::operator Complex() const +{ return Complex(saturate_cast(re), saturate_cast(im)); } +template inline Complex<_Tp> Complex<_Tp>::conj() const +{ return Complex<_Tp>(re, -im); } + +template static inline +bool operator == (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return a.re == b.re && a.im == b.im; } + +template static inline +bool operator != (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return a.re != b.re || a.im != b.im; } + +template static inline +Complex<_Tp> operator + (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return Complex<_Tp>( a.re + b.re, a.im + b.im ); } + +template static inline +Complex<_Tp>& operator += (Complex<_Tp>& a, const Complex<_Tp>& b) +{ a.re += b.re; a.im += b.im; return a; } + +template static inline +Complex<_Tp> operator - (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return Complex<_Tp>( a.re - b.re, a.im - b.im ); } + +template static inline +Complex<_Tp>& operator -= (Complex<_Tp>& a, const Complex<_Tp>& b) +{ a.re -= b.re; a.im -= b.im; return a; } + +template static inline +Complex<_Tp> operator - (const Complex<_Tp>& a) +{ return Complex<_Tp>(-a.re, -a.im); } + +template static inline +Complex<_Tp> operator * (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return Complex<_Tp>( a.re*b.re - a.im*b.im, a.re*b.im + a.im*b.re ); } + +template static inline +Complex<_Tp> operator * (const Complex<_Tp>& a, _Tp b) +{ return Complex<_Tp>( a.re*b, a.im*b ); } + +template static inline +Complex<_Tp> operator * (_Tp b, const Complex<_Tp>& a) +{ return Complex<_Tp>( a.re*b, a.im*b ); } + +template static inline +Complex<_Tp> operator + (const Complex<_Tp>& a, _Tp b) +{ return Complex<_Tp>( a.re + b, a.im ); } + +template static inline +Complex<_Tp> operator - (const Complex<_Tp>& a, _Tp b) +{ return Complex<_Tp>( a.re - b, a.im ); } + +template static inline +Complex<_Tp> operator + (_Tp b, const Complex<_Tp>& a) +{ return Complex<_Tp>( a.re + b, a.im ); } + +template static inline +Complex<_Tp> operator - (_Tp b, const Complex<_Tp>& a) +{ return Complex<_Tp>( b - a.re, -a.im ); } + +template static inline +Complex<_Tp>& operator += (Complex<_Tp>& a, _Tp b) +{ a.re += b; return a; } + +template static inline +Complex<_Tp>& operator -= (Complex<_Tp>& a, _Tp b) +{ a.re -= b; return a; } + +template static inline +Complex<_Tp>& operator *= (Complex<_Tp>& a, _Tp b) +{ a.re *= b; a.im *= b; return a; } + +template static inline +double abs(const Complex<_Tp>& a) +{ return std::sqrt( (double)a.re*a.re + (double)a.im*a.im); } + +template static inline +Complex<_Tp> operator / (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ + double t = 1./((double)b.re*b.re + (double)b.im*b.im); + return Complex<_Tp>( (_Tp)((a.re*b.re + a.im*b.im)*t), + (_Tp)((-a.re*b.im + a.im*b.re)*t) ); +} + +template static inline +Complex<_Tp>& operator /= (Complex<_Tp>& a, const Complex<_Tp>& b) +{ + return (a = a / b); +} + +template static inline +Complex<_Tp> operator / (const Complex<_Tp>& a, _Tp b) +{ + _Tp t = (_Tp)1/b; + return Complex<_Tp>( a.re*t, a.im*t ); +} + +template static inline +Complex<_Tp> operator / (_Tp b, const Complex<_Tp>& a) +{ + return Complex<_Tp>(b)/a; +} + +template static inline +Complex<_Tp> operator /= (const Complex<_Tp>& a, _Tp b) +{ + _Tp t = (_Tp)1/b; + a.re *= t; a.im *= t; return a; +} + +//////////////////////////////// 2D Point //////////////////////////////// + +template inline Point_<_Tp>::Point_() : x(0), y(0) {} +template inline Point_<_Tp>::Point_(_Tp _x, _Tp _y) : x(_x), y(_y) {} +template inline Point_<_Tp>::Point_(const Point_& pt) : x(pt.x), y(pt.y) {} +template inline Point_<_Tp>::Point_(const CvPoint& pt) : x((_Tp)pt.x), y((_Tp)pt.y) {} +template inline Point_<_Tp>::Point_(const CvPoint2D32f& pt) + : x(saturate_cast<_Tp>(pt.x)), y(saturate_cast<_Tp>(pt.y)) {} +template inline Point_<_Tp>::Point_(const Size_<_Tp>& sz) : x(sz.width), y(sz.height) {} +template inline Point_<_Tp>::Point_(const Vec<_Tp,2>& v) : x(v[0]), y(v[1]) {} +template inline Point_<_Tp>& Point_<_Tp>::operator = (const Point_& pt) +{ x = pt.x; y = pt.y; return *this; } + +template template inline Point_<_Tp>::operator Point_<_Tp2>() const +{ return Point_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y)); } +template inline Point_<_Tp>::operator CvPoint() const +{ return cvPoint(saturate_cast(x), saturate_cast(y)); } +template inline Point_<_Tp>::operator CvPoint2D32f() const +{ return cvPoint2D32f((float)x, (float)y); } +template inline Point_<_Tp>::operator Vec<_Tp, 2>() const +{ return Vec<_Tp, 2>(x, y); } + +template inline _Tp Point_<_Tp>::dot(const Point_& pt) const +{ return saturate_cast<_Tp>(x*pt.x + y*pt.y); } +template inline double Point_<_Tp>::ddot(const Point_& pt) const +{ return (double)x*pt.x + (double)y*pt.y; } + +template inline double Point_<_Tp>::cross(const Point_& pt) const +{ return (double)x*pt.y - (double)y*pt.x; } + +template static inline Point_<_Tp>& +operator += (Point_<_Tp>& a, const Point_<_Tp>& b) +{ + a.x = saturate_cast<_Tp>(a.x + b.x); + a.y = saturate_cast<_Tp>(a.y + b.y); + return a; +} + +template static inline Point_<_Tp>& +operator -= (Point_<_Tp>& a, const Point_<_Tp>& b) +{ + a.x = saturate_cast<_Tp>(a.x - b.x); + a.y = saturate_cast<_Tp>(a.y - b.y); + return a; +} + +template static inline Point_<_Tp>& +operator *= (Point_<_Tp>& a, int b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + return a; +} + +template static inline Point_<_Tp>& +operator *= (Point_<_Tp>& a, float b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + return a; +} + +template static inline Point_<_Tp>& +operator *= (Point_<_Tp>& a, double b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + return a; +} + +template static inline double norm(const Point_<_Tp>& pt) +{ return std::sqrt((double)pt.x*pt.x + (double)pt.y*pt.y); } + +template static inline bool operator == (const Point_<_Tp>& a, const Point_<_Tp>& b) +{ return a.x == b.x && a.y == b.y; } + +template static inline bool operator != (const Point_<_Tp>& a, const Point_<_Tp>& b) +{ return a.x != b.x || a.y != b.y; } + +template static inline Point_<_Tp> operator + (const Point_<_Tp>& a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x + b.x), saturate_cast<_Tp>(a.y + b.y) ); } + +template static inline Point_<_Tp> operator - (const Point_<_Tp>& a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x - b.x), saturate_cast<_Tp>(a.y - b.y) ); } + +template static inline Point_<_Tp> operator - (const Point_<_Tp>& a) +{ return Point_<_Tp>( saturate_cast<_Tp>(-a.x), saturate_cast<_Tp>(-a.y) ); } + +template static inline Point_<_Tp> operator * (const Point_<_Tp>& a, int b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) ); } + +template static inline Point_<_Tp> operator * (int a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); } + +template static inline Point_<_Tp> operator * (const Point_<_Tp>& a, float b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) ); } + +template static inline Point_<_Tp> operator * (float a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); } + +template static inline Point_<_Tp> operator * (const Point_<_Tp>& a, double b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) ); } + +template static inline Point_<_Tp> operator * (double a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); } + +//////////////////////////////// 3D Point //////////////////////////////// + +template inline Point3_<_Tp>::Point3_() : x(0), y(0), z(0) {} +template inline Point3_<_Tp>::Point3_(_Tp _x, _Tp _y, _Tp _z) : x(_x), y(_y), z(_z) {} +template inline Point3_<_Tp>::Point3_(const Point3_& pt) : x(pt.x), y(pt.y), z(pt.z) {} +template inline Point3_<_Tp>::Point3_(const Point_<_Tp>& pt) : x(pt.x), y(pt.y), z(_Tp()) {} +template inline Point3_<_Tp>::Point3_(const CvPoint3D32f& pt) : + x(saturate_cast<_Tp>(pt.x)), y(saturate_cast<_Tp>(pt.y)), z(saturate_cast<_Tp>(pt.z)) {} +template inline Point3_<_Tp>::Point3_(const Vec<_Tp, 3>& v) : x(v[0]), y(v[1]), z(v[2]) {} + +template template inline Point3_<_Tp>::operator Point3_<_Tp2>() const +{ return Point3_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y), saturate_cast<_Tp2>(z)); } + +template inline Point3_<_Tp>::operator CvPoint3D32f() const +{ return cvPoint3D32f((float)x, (float)y, (float)z); } + +template inline Point3_<_Tp>::operator Vec<_Tp, 3>() const +{ return Vec<_Tp, 3>(x, y, z); } + +template inline Point3_<_Tp>& Point3_<_Tp>::operator = (const Point3_& pt) +{ x = pt.x; y = pt.y; z = pt.z; return *this; } + +template inline _Tp Point3_<_Tp>::dot(const Point3_& pt) const +{ return saturate_cast<_Tp>(x*pt.x + y*pt.y + z*pt.z); } +template inline double Point3_<_Tp>::ddot(const Point3_& pt) const +{ return (double)x*pt.x + (double)y*pt.y + (double)z*pt.z; } + +template inline Point3_<_Tp> Point3_<_Tp>::cross(const Point3_<_Tp>& pt) const +{ + return Point3_<_Tp>(y*pt.z - z*pt.y, z*pt.x - x*pt.z, x*pt.y - y*pt.x); +} + +template static inline Point3_<_Tp>& +operator += (Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ + a.x = saturate_cast<_Tp>(a.x + b.x); + a.y = saturate_cast<_Tp>(a.y + b.y); + a.z = saturate_cast<_Tp>(a.z + b.z); + return a; +} + +template static inline Point3_<_Tp>& +operator -= (Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ + a.x = saturate_cast<_Tp>(a.x - b.x); + a.y = saturate_cast<_Tp>(a.y - b.y); + a.z = saturate_cast<_Tp>(a.z - b.z); + return a; +} + +template static inline Point3_<_Tp>& +operator *= (Point3_<_Tp>& a, int b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + a.z = saturate_cast<_Tp>(a.z*b); + return a; +} + +template static inline Point3_<_Tp>& +operator *= (Point3_<_Tp>& a, float b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + a.z = saturate_cast<_Tp>(a.z*b); + return a; +} + +template static inline Point3_<_Tp>& +operator *= (Point3_<_Tp>& a, double b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + a.z = saturate_cast<_Tp>(a.z*b); + return a; +} + +template static inline double norm(const Point3_<_Tp>& pt) +{ return std::sqrt((double)pt.x*pt.x + (double)pt.y*pt.y + (double)pt.z*pt.z); } + +template static inline bool operator == (const Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ return a.x == b.x && a.y == b.y && a.z == b.z; } + +template static inline bool operator != (const Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ return a.x != b.x || a.y != b.y || a.z != b.z; } + +template static inline Point3_<_Tp> operator + (const Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x + b.x), + saturate_cast<_Tp>(a.y + b.y), + saturate_cast<_Tp>(a.z + b.z)); } + +template static inline Point3_<_Tp> operator - (const Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x - b.x), + saturate_cast<_Tp>(a.y - b.y), + saturate_cast<_Tp>(a.z - b.z)); } + +template static inline Point3_<_Tp> operator - (const Point3_<_Tp>& a) +{ return Point3_<_Tp>( saturate_cast<_Tp>(-a.x), + saturate_cast<_Tp>(-a.y), + saturate_cast<_Tp>(-a.z) ); } + +template static inline Point3_<_Tp> operator * (const Point3_<_Tp>& a, int b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x*b), + saturate_cast<_Tp>(a.y*b), + saturate_cast<_Tp>(a.z*b) ); } + +template static inline Point3_<_Tp> operator * (int a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(b.x*a), + saturate_cast<_Tp>(b.y*a), + saturate_cast<_Tp>(b.z*a) ); } + +template static inline Point3_<_Tp> operator * (const Point3_<_Tp>& a, float b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x*b), + saturate_cast<_Tp>(a.y*b), + saturate_cast<_Tp>(a.z*b) ); } + +template static inline Point3_<_Tp> operator * (float a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(b.x*a), + saturate_cast<_Tp>(b.y*a), + saturate_cast<_Tp>(b.z*a) ); } + +template static inline Point3_<_Tp> operator * (const Point3_<_Tp>& a, double b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x*b), + saturate_cast<_Tp>(a.y*b), + saturate_cast<_Tp>(a.z*b) ); } + +template static inline Point3_<_Tp> operator * (double a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(b.x*a), + saturate_cast<_Tp>(b.y*a), + saturate_cast<_Tp>(b.z*a) ); } + +//////////////////////////////// Size //////////////////////////////// + +template inline Size_<_Tp>::Size_() + : width(0), height(0) {} +template inline Size_<_Tp>::Size_(_Tp _width, _Tp _height) + : width(_width), height(_height) {} +template inline Size_<_Tp>::Size_(const Size_& sz) + : width(sz.width), height(sz.height) {} +template inline Size_<_Tp>::Size_(const CvSize& sz) + : width(saturate_cast<_Tp>(sz.width)), height(saturate_cast<_Tp>(sz.height)) {} +template inline Size_<_Tp>::Size_(const CvSize2D32f& sz) + : width(saturate_cast<_Tp>(sz.width)), height(saturate_cast<_Tp>(sz.height)) {} +template inline Size_<_Tp>::Size_(const Point_<_Tp>& pt) : width(pt.x), height(pt.y) {} + +template template inline Size_<_Tp>::operator Size_<_Tp2>() const +{ return Size_<_Tp2>(saturate_cast<_Tp2>(width), saturate_cast<_Tp2>(height)); } +template inline Size_<_Tp>::operator CvSize() const +{ return cvSize(saturate_cast(width), saturate_cast(height)); } +template inline Size_<_Tp>::operator CvSize2D32f() const +{ return cvSize2D32f((float)width, (float)height); } + +template inline Size_<_Tp>& Size_<_Tp>::operator = (const Size_<_Tp>& sz) +{ width = sz.width; height = sz.height; return *this; } +template static inline Size_<_Tp> operator * (const Size_<_Tp>& a, _Tp b) +{ return Size_<_Tp>(a.width * b, a.height * b); } +template static inline Size_<_Tp> operator + (const Size_<_Tp>& a, const Size_<_Tp>& b) +{ return Size_<_Tp>(a.width + b.width, a.height + b.height); } +template static inline Size_<_Tp> operator - (const Size_<_Tp>& a, const Size_<_Tp>& b) +{ return Size_<_Tp>(a.width - b.width, a.height - b.height); } +template inline _Tp Size_<_Tp>::area() const { return width*height; } + +template static inline Size_<_Tp>& operator += (Size_<_Tp>& a, const Size_<_Tp>& b) +{ a.width += b.width; a.height += b.height; return a; } +template static inline Size_<_Tp>& operator -= (Size_<_Tp>& a, const Size_<_Tp>& b) +{ a.width -= b.width; a.height -= b.height; return a; } + +template static inline bool operator == (const Size_<_Tp>& a, const Size_<_Tp>& b) +{ return a.width == b.width && a.height == b.height; } +template static inline bool operator != (const Size_<_Tp>& a, const Size_<_Tp>& b) +{ return a.width != b.width || a.height != b.height; } + +//////////////////////////////// Rect //////////////////////////////// + + +template inline Rect_<_Tp>::Rect_() : x(0), y(0), width(0), height(0) {} +template inline Rect_<_Tp>::Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height) : x(_x), y(_y), width(_width), height(_height) {} +template inline Rect_<_Tp>::Rect_(const Rect_<_Tp>& r) : x(r.x), y(r.y), width(r.width), height(r.height) {} +template inline Rect_<_Tp>::Rect_(const CvRect& r) : x((_Tp)r.x), y((_Tp)r.y), width((_Tp)r.width), height((_Tp)r.height) {} +template inline Rect_<_Tp>::Rect_(const Point_<_Tp>& org, const Size_<_Tp>& sz) : + x(org.x), y(org.y), width(sz.width), height(sz.height) {} +template inline Rect_<_Tp>::Rect_(const Point_<_Tp>& pt1, const Point_<_Tp>& pt2) +{ + x = std::min(pt1.x, pt2.x); y = std::min(pt1.y, pt2.y); + width = std::max(pt1.x, pt2.x) - x; height = std::max(pt1.y, pt2.y) - y; +} +template inline Rect_<_Tp>& Rect_<_Tp>::operator = ( const Rect_<_Tp>& r ) +{ x = r.x; y = r.y; width = r.width; height = r.height; return *this; } + +template inline Point_<_Tp> Rect_<_Tp>::tl() const { return Point_<_Tp>(x,y); } +template inline Point_<_Tp> Rect_<_Tp>::br() const { return Point_<_Tp>(x+width, y+height); } + +template static inline Rect_<_Tp>& operator += ( Rect_<_Tp>& a, const Point_<_Tp>& b ) +{ a.x += b.x; a.y += b.y; return a; } +template static inline Rect_<_Tp>& operator -= ( Rect_<_Tp>& a, const Point_<_Tp>& b ) +{ a.x -= b.x; a.y -= b.y; return a; } + +template static inline Rect_<_Tp>& operator += ( Rect_<_Tp>& a, const Size_<_Tp>& b ) +{ a.width += b.width; a.height += b.height; return a; } + +template static inline Rect_<_Tp>& operator -= ( Rect_<_Tp>& a, const Size_<_Tp>& b ) +{ a.width -= b.width; a.height -= b.height; return a; } + +template static inline Rect_<_Tp>& operator &= ( Rect_<_Tp>& a, const Rect_<_Tp>& b ) +{ + _Tp x1 = std::max(a.x, b.x), y1 = std::max(a.y, b.y); + a.width = std::min(a.x + a.width, b.x + b.width) - x1; + a.height = std::min(a.y + a.height, b.y + b.height) - y1; + a.x = x1; a.y = y1; + if( a.width <= 0 || a.height <= 0 ) + a = Rect(); + return a; +} + +template static inline Rect_<_Tp>& operator |= ( Rect_<_Tp>& a, const Rect_<_Tp>& b ) +{ + _Tp x1 = std::min(a.x, b.x), y1 = std::min(a.y, b.y); + a.width = std::max(a.x + a.width, b.x + b.width) - x1; + a.height = std::max(a.y + a.height, b.y + b.height) - y1; + a.x = x1; a.y = y1; + return a; +} + +template inline Size_<_Tp> Rect_<_Tp>::size() const { return Size_<_Tp>(width, height); } +template inline _Tp Rect_<_Tp>::area() const { return width*height; } + +template template inline Rect_<_Tp>::operator Rect_<_Tp2>() const +{ return Rect_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y), + saturate_cast<_Tp2>(width), saturate_cast<_Tp2>(height)); } +template inline Rect_<_Tp>::operator CvRect() const +{ return cvRect(saturate_cast(x), saturate_cast(y), + saturate_cast(width), saturate_cast(height)); } + +template inline bool Rect_<_Tp>::contains(const Point_<_Tp>& pt) const +{ return x <= pt.x && pt.x < x + width && y <= pt.y && pt.y < y + height; } + +template static inline bool operator == (const Rect_<_Tp>& a, const Rect_<_Tp>& b) +{ + return a.x == b.x && a.y == b.y && a.width == b.width && a.height == b.height; +} + +template static inline bool operator != (const Rect_<_Tp>& a, const Rect_<_Tp>& b) +{ + return a.x != b.x || a.y != b.y || a.width != b.width || a.height != b.height; +} + +template static inline Rect_<_Tp> operator + (const Rect_<_Tp>& a, const Point_<_Tp>& b) +{ + return Rect_<_Tp>( a.x + b.x, a.y + b.y, a.width, a.height ); +} + +template static inline Rect_<_Tp> operator - (const Rect_<_Tp>& a, const Point_<_Tp>& b) +{ + return Rect_<_Tp>( a.x - b.x, a.y - b.y, a.width, a.height ); +} + +template static inline Rect_<_Tp> operator + (const Rect_<_Tp>& a, const Size_<_Tp>& b) +{ + return Rect_<_Tp>( a.x, a.y, a.width + b.width, a.height + b.height ); +} + +template static inline Rect_<_Tp> operator & (const Rect_<_Tp>& a, const Rect_<_Tp>& b) +{ + Rect_<_Tp> c = a; + return c &= b; +} + +template static inline Rect_<_Tp> operator | (const Rect_<_Tp>& a, const Rect_<_Tp>& b) +{ + Rect_<_Tp> c = a; + return c |= b; +} + +template inline bool Point_<_Tp>::inside( const Rect_<_Tp>& r ) const +{ + return r.contains(*this); +} + +inline RotatedRect::RotatedRect() { angle = 0; } +inline RotatedRect::RotatedRect(const Point2f& _center, const Size2f& _size, float _angle) + : center(_center), size(_size), angle(_angle) {} +inline RotatedRect::RotatedRect(const CvBox2D& box) + : center(box.center), size(box.size), angle(box.angle) {} +inline RotatedRect::operator CvBox2D() const +{ + CvBox2D box; box.center = center; box.size = size; box.angle = angle; + return box; +} + +//////////////////////////////// Scalar_ /////////////////////////////// + +template inline Scalar_<_Tp>::Scalar_() +{ this->val[0] = this->val[1] = this->val[2] = this->val[3] = 0; } + +template inline Scalar_<_Tp>::Scalar_(_Tp v0, _Tp v1, _Tp v2, _Tp v3) +{ this->val[0] = v0; this->val[1] = v1; this->val[2] = v2; this->val[3] = v3; } + +template inline Scalar_<_Tp>::Scalar_(const CvScalar& s) +{ + this->val[0] = saturate_cast<_Tp>(s.val[0]); + this->val[1] = saturate_cast<_Tp>(s.val[1]); + this->val[2] = saturate_cast<_Tp>(s.val[2]); + this->val[3] = saturate_cast<_Tp>(s.val[3]); +} + +template inline Scalar_<_Tp>::Scalar_(_Tp v0) +{ this->val[0] = v0; this->val[1] = this->val[2] = this->val[3] = 0; } + +template inline Scalar_<_Tp> Scalar_<_Tp>::all(_Tp v0) +{ return Scalar_<_Tp>(v0, v0, v0, v0); } +template inline Scalar_<_Tp>::operator CvScalar() const +{ return cvScalar(this->val[0], this->val[1], this->val[2], this->val[3]); } + +template template inline Scalar_<_Tp>::operator Scalar_() const +{ + return Scalar_(saturate_cast(this->val[0]), + saturate_cast(this->val[1]), + saturate_cast(this->val[2]), + saturate_cast(this->val[3])); +} + +template static inline Scalar_<_Tp>& operator += (Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + a.val[0] = saturate_cast<_Tp>(a.val[0] + b.val[0]); + a.val[1] = saturate_cast<_Tp>(a.val[1] + b.val[1]); + a.val[2] = saturate_cast<_Tp>(a.val[2] + b.val[2]); + a.val[3] = saturate_cast<_Tp>(a.val[3] + b.val[3]); + return a; +} + +template static inline Scalar_<_Tp>& operator -= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + a.val[0] = saturate_cast<_Tp>(a.val[0] - b.val[0]); + a.val[1] = saturate_cast<_Tp>(a.val[1] - b.val[1]); + a.val[2] = saturate_cast<_Tp>(a.val[2] - b.val[2]); + a.val[3] = saturate_cast<_Tp>(a.val[3] - b.val[3]); + return a; +} + +template static inline Scalar_<_Tp>& operator *= ( Scalar_<_Tp>& a, _Tp v ) +{ + a.val[0] = saturate_cast<_Tp>(a.val[0] * v); + a.val[1] = saturate_cast<_Tp>(a.val[1] * v); + a.val[2] = saturate_cast<_Tp>(a.val[2] * v); + a.val[3] = saturate_cast<_Tp>(a.val[3] * v); + return a; +} + +template inline Scalar_<_Tp> Scalar_<_Tp>::mul(const Scalar_<_Tp>& t, double scale ) const +{ + return Scalar_<_Tp>( saturate_cast<_Tp>(this->val[0]*t.val[0]*scale), + saturate_cast<_Tp>(this->val[1]*t.val[1]*scale), + saturate_cast<_Tp>(this->val[2]*t.val[2]*scale), + saturate_cast<_Tp>(this->val[3]*t.val[3]*scale)); +} + +template static inline bool operator == ( const Scalar_<_Tp>& a, const Scalar_<_Tp>& b ) +{ + return a.val[0] == b.val[0] && a.val[1] == b.val[1] && + a.val[2] == b.val[2] && a.val[3] == b.val[3]; +} + +template static inline bool operator != ( const Scalar_<_Tp>& a, const Scalar_<_Tp>& b ) +{ + return a.val[0] != b.val[0] || a.val[1] != b.val[1] || + a.val[2] != b.val[2] || a.val[3] != b.val[3]; +} + +template static inline Scalar_<_Tp> operator + (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] + b.val[0]), + saturate_cast<_Tp>(a.val[1] + b.val[1]), + saturate_cast<_Tp>(a.val[2] + b.val[2]), + saturate_cast<_Tp>(a.val[3] + b.val[3])); +} + +template static inline Scalar_<_Tp> operator - (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] - b.val[0]), + saturate_cast<_Tp>(a.val[1] - b.val[1]), + saturate_cast<_Tp>(a.val[2] - b.val[2]), + saturate_cast<_Tp>(a.val[3] - b.val[3])); +} + +template static inline Scalar_<_Tp> operator * (const Scalar_<_Tp>& a, _Tp alpha) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] * alpha), + saturate_cast<_Tp>(a.val[1] * alpha), + saturate_cast<_Tp>(a.val[2] * alpha), + saturate_cast<_Tp>(a.val[3] * alpha)); +} + +template static inline Scalar_<_Tp> operator * (_Tp alpha, const Scalar_<_Tp>& a) +{ + return a*alpha; +} + +template static inline Scalar_<_Tp> operator - (const Scalar_<_Tp>& a) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(-a.val[0]), saturate_cast<_Tp>(-a.val[1]), + saturate_cast<_Tp>(-a.val[2]), saturate_cast<_Tp>(-a.val[3])); +} + + +template static inline Scalar_<_Tp> +operator * (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a[0]*b[0] - a[1]*b[1] - a[2]*b[2] - a[3]*b[3]), + saturate_cast<_Tp>(a[0]*b[1] + a[1]*b[0] + a[2]*b[3] - a[3]*b[2]), + saturate_cast<_Tp>(a[0]*b[2] - a[1]*b[3] + a[2]*b[0] + a[3]*b[1]), + saturate_cast<_Tp>(a[0]*b[3] + a[1]*b[2] - a[2]*b[1] + a[3]*b[0])); +} + +template static inline Scalar_<_Tp>& +operator *= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + a = a*b; + return a; +} + +template inline Scalar_<_Tp> Scalar_<_Tp>::conj() const +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(this->val[0]), + saturate_cast<_Tp>(-this->val[1]), + saturate_cast<_Tp>(-this->val[2]), + saturate_cast<_Tp>(-this->val[3])); +} + +template inline bool Scalar_<_Tp>::isReal() const +{ + return this->val[1] == 0 && this->val[2] == 0 && this->val[3] == 0; +} + +template static inline +Scalar_<_Tp> operator / (const Scalar_<_Tp>& a, _Tp alpha) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] / alpha), + saturate_cast<_Tp>(a.val[1] / alpha), + saturate_cast<_Tp>(a.val[2] / alpha), + saturate_cast<_Tp>(a.val[3] / alpha)); +} + +template static inline +Scalar_ operator / (const Scalar_& a, float alpha) +{ + float s = 1/alpha; + return Scalar_(a.val[0]*s, a.val[1]*s, a.val[2]*s, a.val[3]*s); +} + +template static inline +Scalar_ operator / (const Scalar_& a, double alpha) +{ + double s = 1/alpha; + return Scalar_(a.val[0]*s, a.val[1]*s, a.val[2]*s, a.val[3]*s); +} + +template static inline +Scalar_<_Tp>& operator /= (Scalar_<_Tp>& a, _Tp alpha) +{ + a = a/alpha; + return a; +} + +template static inline +Scalar_<_Tp> operator / (_Tp a, const Scalar_<_Tp>& b) +{ + _Tp s = a/(b[0]*b[0] + b[1]*b[1] + b[2]*b[2] + b[3]*b[3]); + return b.conj()*s; +} + +template static inline +Scalar_<_Tp> operator / (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + return a*((_Tp)1/b); +} + +template static inline +Scalar_<_Tp>& operator /= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + a = a/b; + return a; +} + +//////////////////////////////// Range ///////////////////////////////// + +inline Range::Range() : start(0), end(0) {} +inline Range::Range(int _start, int _end) : start(_start), end(_end) {} +inline Range::Range(const CvSlice& slice) : start(slice.start_index), end(slice.end_index) +{ + if( start == 0 && end == CV_WHOLE_SEQ_END_INDEX ) + *this = Range::all(); +} + +inline int Range::size() const { return end - start; } +inline bool Range::empty() const { return start == end; } +inline Range Range::all() { return Range(INT_MIN, INT_MAX); } + +static inline bool operator == (const Range& r1, const Range& r2) +{ return r1.start == r2.start && r1.end == r2.end; } + +static inline bool operator != (const Range& r1, const Range& r2) +{ return !(r1 == r2); } + +static inline bool operator !(const Range& r) +{ return r.start == r.end; } + +static inline Range operator & (const Range& r1, const Range& r2) +{ + Range r(std::max(r1.start, r2.start), std::min(r1.end, r2.end)); + r.end = std::max(r.end, r.start); + return r; +} + +static inline Range& operator &= (Range& r1, const Range& r2) +{ + r1 = r1 & r2; + return r1; +} + +static inline Range operator + (const Range& r1, int delta) +{ + return Range(r1.start + delta, r1.end + delta); +} + +static inline Range operator + (int delta, const Range& r1) +{ + return Range(r1.start + delta, r1.end + delta); +} + +static inline Range operator - (const Range& r1, int delta) +{ + return r1 + (-delta); +} + +inline Range::operator CvSlice() const +{ return *this != Range::all() ? cvSlice(start, end) : CV_WHOLE_SEQ; } + + + +//////////////////////////////// Vector //////////////////////////////// + +// template vector class. It is similar to STL's vector, +// with a few important differences: +// 1) it can be created on top of user-allocated data w/o copying it +// 2) vector b = a means copying the header, +// not the underlying data (use clone() to make a deep copy) +template class Vector +{ +public: + typedef _Tp value_type; + typedef _Tp* iterator; + typedef const _Tp* const_iterator; + typedef _Tp& reference; + typedef const _Tp& const_reference; + + struct Hdr + { + Hdr() : data(0), datastart(0), refcount(0), size(0), capacity(0) {}; + _Tp* data; + _Tp* datastart; + int* refcount; + size_t size; + size_t capacity; + }; + + Vector() {} + Vector(size_t _size) { resize(_size); } + Vector(size_t _size, const _Tp& val) + { + resize(_size); + for(size_t i = 0; i < _size; i++) + hdr.data[i] = val; + } + Vector(_Tp* _data, size_t _size, bool _copyData=false) + { set(_data, _size, _copyData); } + + template Vector(const Vec<_Tp, n>& vec) + { set((_Tp*)&vec.val[0], n, true); } + + Vector(const std::vector<_Tp>& vec, bool _copyData=false) + { set(!vec.empty() ? (_Tp*)&vec[0] : 0, vec.size(), _copyData); } + + Vector(const Vector& d) { *this = d; } + + Vector(const Vector& d, const Range& r_) + { + Range r = r_ == Range::all() ? Range(0, d.size()) : r_; + /*if( r == Range::all() ) + r = Range(0, d.size());*/ + if( r.size() > 0 && r.start >= 0 && r.end <= d.size() ) + { + if( d.hdr.refcount ) + CV_XADD(d.hdr.refcount, 1); + hdr.refcount = d.hdr.refcount; + hdr.datastart = d.hdr.datastart; + hdr.data = d.hdr.data + r.start; + hdr.capacity = hdr.size = r.size(); + } + } + + Vector<_Tp>& operator = (const Vector& d) + { + if( this != &d ) + { + if( d.hdr.refcount ) + CV_XADD(d.hdr.refcount, 1); + release(); + hdr = d.hdr; + } + return *this; + } + + ~Vector() { release(); } + + Vector<_Tp> clone() const + { return hdr.data ? Vector<_Tp>(hdr.data, hdr.size, true) : Vector<_Tp>(); } + + void copyTo(Vector<_Tp>& vec) const + { + size_t i, sz = size(); + vec.resize(sz); + const _Tp* src = hdr.data; + _Tp* dst = vec.hdr.data; + for( i = 0; i < sz; i++ ) + dst[i] = src[i]; + } + + void copyTo(std::vector<_Tp>& vec) const + { + size_t i, sz = size(); + vec.resize(sz); + const _Tp* src = hdr.data; + _Tp* dst = sz ? &vec[0] : 0; + for( i = 0; i < sz; i++ ) + dst[i] = src[i]; + } + + operator CvMat() const + { return cvMat((int)size(), 1, type(), (void*)hdr.data); } + + _Tp& operator [] (size_t i) { CV_DbgAssert( i < size() ); return hdr.data[i]; } + const _Tp& operator [] (size_t i) const { CV_DbgAssert( i < size() ); return hdr.data[i]; } + Vector operator() (const Range& r) const { return Vector(*this, r); } + _Tp& back() { CV_DbgAssert(!empty()); return hdr.data[hdr.size-1]; } + const _Tp& back() const { CV_DbgAssert(!empty()); return hdr.data[hdr.size-1]; } + _Tp& front() { CV_DbgAssert(!empty()); return hdr.data[0]; } + const _Tp& front() const { CV_DbgAssert(!empty()); return hdr.data[0]; } + + _Tp* begin() { return hdr.data; } + _Tp* end() { return hdr.data + hdr.size; } + const _Tp* begin() const { return hdr.data; } + const _Tp* end() const { return hdr.data + hdr.size; } + + void addref() { if( hdr.refcount ) CV_XADD(hdr.refcount, 1); } + void release() + { + if( hdr.refcount && CV_XADD(hdr.refcount, -1) == 1 ) + { + delete[] hdr.datastart; + delete hdr.refcount; + } + hdr = Hdr(); + } + + void set(_Tp* _data, size_t _size, bool _copyData=false) + { + if( !_copyData ) + { + release(); + hdr.data = hdr.datastart = _data; + hdr.size = hdr.capacity = _size; + hdr.refcount = 0; + } + else + { + reserve(_size); + for( size_t i = 0; i < _size; i++ ) + hdr.data[i] = _data[i]; + hdr.size = _size; + } + } + + void reserve(size_t newCapacity) + { + _Tp* newData; + int* newRefcount; + size_t i, oldSize = hdr.size; + if( (!hdr.refcount || *hdr.refcount == 1) && hdr.capacity >= newCapacity ) + return; + newCapacity = std::max(newCapacity, oldSize); + newData = new _Tp[newCapacity]; + newRefcount = new int(1); + for( i = 0; i < oldSize; i++ ) + newData[i] = hdr.data[i]; + release(); + hdr.data = hdr.datastart = newData; + hdr.capacity = newCapacity; + hdr.size = oldSize; + hdr.refcount = newRefcount; + } + + void resize(size_t newSize) + { + size_t i; + newSize = std::max(newSize, (size_t)0); + if( (!hdr.refcount || *hdr.refcount == 1) && hdr.size == newSize ) + return; + if( newSize > hdr.capacity ) + reserve(std::max(newSize, std::max((size_t)4, hdr.capacity*2))); + for( i = hdr.size; i < newSize; i++ ) + hdr.data[i] = _Tp(); + hdr.size = newSize; + } + + Vector<_Tp>& push_back(const _Tp& elem) + { + if( hdr.size == hdr.capacity ) + reserve( std::max((size_t)4, hdr.capacity*2) ); + hdr.data[hdr.size++] = elem; + return *this; + } + + Vector<_Tp>& pop_back() + { + if( hdr.size > 0 ) + --hdr.size; + return *this; + } + + size_t size() const { return hdr.size; } + size_t capacity() const { return hdr.capacity; } + bool empty() const { return hdr.size == 0; } + void clear() { resize(0); } + int type() const { return DataType<_Tp>::type; } + +protected: + Hdr hdr; +}; + + +template inline typename DataType<_Tp>::work_type +dot(const Vector<_Tp>& v1, const Vector<_Tp>& v2) +{ + typedef typename DataType<_Tp>::work_type _Tw; + size_t i = 0, n = v1.size(); + assert(v1.size() == v2.size()); + + _Tw s = 0; + const _Tp *ptr1 = &v1[0], *ptr2 = &v2[0]; + for( ; i < n; i++ ) + s += (_Tw)ptr1[i]*ptr2[i]; + + return s; +} + +// Multiply-with-Carry RNG +inline RNG::RNG() { state = 0xffffffff; } +inline RNG::RNG(uint64 _state) { state = _state ? _state : 0xffffffff; } +inline unsigned RNG::next() +{ + state = (uint64)(unsigned)state*CV_RNG_COEFF + (unsigned)(state >> 32); + return (unsigned)state; +} + +inline RNG::operator uchar() { return (uchar)next(); } +inline RNG::operator schar() { return (schar)next(); } +inline RNG::operator ushort() { return (ushort)next(); } +inline RNG::operator short() { return (short)next(); } +inline RNG::operator unsigned() { return next(); } +inline unsigned RNG::operator ()(unsigned N) {return (unsigned)uniform(0,N);} +inline unsigned RNG::operator ()() {return next();} +inline RNG::operator int() { return (int)next(); } +// * (2^32-1)^-1 +inline RNG::operator float() { return next()*2.3283064365386962890625e-10f; } +inline RNG::operator double() +{ + unsigned t = next(); + return (((uint64)t << 32) | next())*5.4210108624275221700372640043497e-20; +} +inline int RNG::uniform(int a, int b) { return a == b ? a : (int)(next()%(b - a) + a); } +inline float RNG::uniform(float a, float b) { return ((float)*this)*(b - a) + a; } +inline double RNG::uniform(double a, double b) { return ((double)*this)*(b - a) + a; } + +inline TermCriteria::TermCriteria() : type(0), maxCount(0), epsilon(0) {} +inline TermCriteria::TermCriteria(int _type, int _maxCount, double _epsilon) + : type(_type), maxCount(_maxCount), epsilon(_epsilon) {} +inline TermCriteria::TermCriteria(const CvTermCriteria& criteria) + : type(criteria.type), maxCount(criteria.max_iter), epsilon(criteria.epsilon) {} +inline TermCriteria::operator CvTermCriteria() const +{ return cvTermCriteria(type, maxCount, epsilon); } + +inline uchar* LineIterator::operator *() { return ptr; } +inline LineIterator& LineIterator::operator ++() +{ + int mask = err < 0 ? -1 : 0; + err += minusDelta + (plusDelta & mask); + ptr += minusStep + (plusStep & mask); + return *this; +} +inline LineIterator LineIterator::operator ++(int) +{ + LineIterator it = *this; + ++(*this); + return it; +} +inline Point LineIterator::pos() const +{ + Point p; + p.y = (int)((ptr - ptr0)/step); + p.x = (int)(((ptr - ptr0) - p.y*step)/elemSize); + return p; +} + +/////////////////////////////// AutoBuffer //////////////////////////////////////// + +template inline AutoBuffer<_Tp, fixed_size>::AutoBuffer() +{ + ptr = buf; + size = fixed_size; +} + +template inline AutoBuffer<_Tp, fixed_size>::AutoBuffer(size_t _size) +{ + ptr = buf; + size = fixed_size; + allocate(_size); +} + +template inline AutoBuffer<_Tp, fixed_size>::~AutoBuffer() +{ deallocate(); } + +template inline void AutoBuffer<_Tp, fixed_size>::allocate(size_t _size) +{ + if(_size <= size) + return; + deallocate(); + if(_size > fixed_size) + { + ptr = cv::allocate<_Tp>(_size); + size = _size; + } +} + +template inline void AutoBuffer<_Tp, fixed_size>::deallocate() +{ + if( ptr != buf ) + { + cv::deallocate<_Tp>(ptr, size); + ptr = buf; + size = fixed_size; + } +} + +template inline AutoBuffer<_Tp, fixed_size>::operator _Tp* () +{ return ptr; } + +template inline AutoBuffer<_Tp, fixed_size>::operator const _Tp* () const +{ return ptr; } + + +/////////////////////////////////// Ptr //////////////////////////////////////// + +template inline Ptr<_Tp>::Ptr() : obj(0), refcount(0) {} +template inline Ptr<_Tp>::Ptr(_Tp* _obj) : obj(_obj) +{ + if(obj) + { + refcount = (int*)fastMalloc(sizeof(*refcount)); + *refcount = 1; + } + else + refcount = 0; +} + +template inline void Ptr<_Tp>::addref() +{ if( refcount ) CV_XADD(refcount, 1); } + +template inline void Ptr<_Tp>::release() +{ + if( refcount && CV_XADD(refcount, -1) == 1 ) + { + delete_obj(); + fastFree(refcount); + } + refcount = 0; + obj = 0; +} + +template inline void Ptr<_Tp>::delete_obj() +{ + if( obj ) delete obj; +} + +template inline Ptr<_Tp>::~Ptr() { release(); } + +template inline Ptr<_Tp>::Ptr(const Ptr<_Tp>& _ptr) +{ + obj = _ptr.obj; + refcount = _ptr.refcount; + addref(); +} + +template inline Ptr<_Tp>& Ptr<_Tp>::operator = (const Ptr<_Tp>& _ptr) +{ + if (this != &_ptr) + { + int* _refcount = _ptr.refcount; + if( _refcount ) + CV_XADD(_refcount, 1); + release(); + obj = _ptr.obj; + refcount = _refcount; + } + return *this; +} + +template inline _Tp* Ptr<_Tp>::operator -> () { return obj; } +template inline const _Tp* Ptr<_Tp>::operator -> () const { return obj; } + +template inline Ptr<_Tp>::operator _Tp* () { return obj; } +template inline Ptr<_Tp>::operator const _Tp*() const { return obj; } + +template inline bool Ptr<_Tp>::empty() const { return obj == 0; } + +template template Ptr<_Tp>::Ptr(const Ptr<_Tp2>& p) + : obj(0), refcount(0) +{ + if (p.empty()) + return; + + _Tp* p_casted = dynamic_cast<_Tp*>(p.obj); + if (!p_casted) + return; + + obj = p_casted; + refcount = p.refcount; + addref(); +} + +template template inline Ptr<_Tp2> Ptr<_Tp>::ptr() +{ + Ptr<_Tp2> p; + if( !obj ) + return p; + + _Tp2* obj_casted = dynamic_cast<_Tp2*>(obj); + if (!obj_casted) + return p; + + if( refcount ) + CV_XADD(refcount, 1); + + p.obj = obj_casted; + p.refcount = refcount; + return p; +} + +template template inline const Ptr<_Tp2> Ptr<_Tp>::ptr() const +{ + Ptr<_Tp2> p; + if( !obj ) + return p; + + _Tp2* obj_casted = dynamic_cast<_Tp2*>(obj); + if (!obj_casted) + return p; + + if( refcount ) + CV_XADD(refcount, 1); + + p.obj = obj_casted; + p.refcount = refcount; + return p; +} + +template +Ptr makePtr() +{ + return Ptr(new T()); +} + +template +Ptr makePtr(const A1& a1) +{ + return Ptr(new T(a1)); +} + +template +Ptr makePtr(const A1& a1, const A2& a2) +{ + return Ptr(new T(a1, a2)); +} + +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3) +{ + return Ptr(new T(a1, a2, a3)); +} + +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4) +{ + return Ptr(new T(a1, a2, a3, a4)); +} + +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5) +{ + return Ptr(new T(a1, a2, a3, a4, a5)); +} + +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6) +{ + return Ptr(new T(a1, a2, a3, a4, a5, a6)); +} + +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7) +{ + return Ptr(new T(a1, a2, a3, a4, a5, a6, a7)); +} + +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8) +{ + return Ptr(new T(a1, a2, a3, a4, a5, a6, a7, a8)); +} + +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9) +{ + return Ptr(new T(a1, a2, a3, a4, a5, a6, a7, a8, a9)); +} + +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9, const A10& a10) +{ + return Ptr(new T(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10)); +} + +//// specializied implementations of Ptr::delete_obj() for classic OpenCV types + +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); + +//////////////////////////////////////// XML & YAML I/O //////////////////////////////////// + +CV_EXPORTS_W void write( FileStorage& fs, const string& name, int value ); +CV_EXPORTS_W void write( FileStorage& fs, const string& name, float value ); +CV_EXPORTS_W void write( FileStorage& fs, const string& name, double value ); +CV_EXPORTS_W void write( FileStorage& fs, const string& name, const string& value ); + +template inline void write(FileStorage& fs, const _Tp& value) +{ write(fs, string(), value); } + +CV_EXPORTS void writeScalar( FileStorage& fs, int value ); +CV_EXPORTS void writeScalar( FileStorage& fs, float value ); +CV_EXPORTS void writeScalar( FileStorage& fs, double value ); +CV_EXPORTS void writeScalar( FileStorage& fs, const string& value ); + +template<> inline void write( FileStorage& fs, const int& value ) +{ + writeScalar(fs, value); +} + +template<> inline void write( FileStorage& fs, const float& value ) +{ + writeScalar(fs, value); +} + +template<> inline void write( FileStorage& fs, const double& value ) +{ + writeScalar(fs, value); +} + +template<> inline void write( FileStorage& fs, const string& value ) +{ + writeScalar(fs, value); +} + +template inline void write(FileStorage& fs, const Point_<_Tp>& pt ) +{ + write(fs, pt.x); + write(fs, pt.y); +} + +template inline void write(FileStorage& fs, const Point3_<_Tp>& pt ) +{ + write(fs, pt.x); + write(fs, pt.y); + write(fs, pt.z); +} + +template inline void write(FileStorage& fs, const Size_<_Tp>& sz ) +{ + write(fs, sz.width); + write(fs, sz.height); +} + +template inline void write(FileStorage& fs, const Complex<_Tp>& c ) +{ + write(fs, c.re); + write(fs, c.im); +} + +template inline void write(FileStorage& fs, const Rect_<_Tp>& r ) +{ + write(fs, r.x); + write(fs, r.y); + write(fs, r.width); + write(fs, r.height); +} + +template inline void write(FileStorage& fs, const Vec<_Tp, cn>& v ) +{ + for(int i = 0; i < cn; i++) + write(fs, v.val[i]); +} + +template inline void write(FileStorage& fs, const Scalar_<_Tp>& s ) +{ + write(fs, s.val[0]); + write(fs, s.val[1]); + write(fs, s.val[2]); + write(fs, s.val[3]); +} + +inline void write(FileStorage& fs, const Range& r ) +{ + write(fs, r.start); + write(fs, r.end); +} + +class CV_EXPORTS WriteStructContext +{ +public: + WriteStructContext(FileStorage& _fs, const string& name, + int flags, const string& typeName=string()); + ~WriteStructContext(); + FileStorage* fs; +}; + +template inline void write(FileStorage& fs, const string& name, const Point_<_Tp>& pt ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, pt.x); + write(fs, pt.y); +} + +template inline void write(FileStorage& fs, const string& name, const Point3_<_Tp>& pt ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, pt.x); + write(fs, pt.y); + write(fs, pt.z); +} + +template inline void write(FileStorage& fs, const string& name, const Size_<_Tp>& sz ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, sz.width); + write(fs, sz.height); +} + +template inline void write(FileStorage& fs, const string& name, const Complex<_Tp>& c ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, c.re); + write(fs, c.im); +} + +template inline void write(FileStorage& fs, const string& name, const Rect_<_Tp>& r ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, r.x); + write(fs, r.y); + write(fs, r.width); + write(fs, r.height); +} + +template inline void write(FileStorage& fs, const string& name, const Vec<_Tp, cn>& v ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + for(int i = 0; i < cn; i++) + write(fs, v.val[i]); +} + +template inline void write(FileStorage& fs, const string& name, const Scalar_<_Tp>& s ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, s.val[0]); + write(fs, s.val[1]); + write(fs, s.val[2]); + write(fs, s.val[3]); +} + +inline void write(FileStorage& fs, const string& name, const Range& r ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, r.start); + write(fs, r.end); +} + +template class VecWriterProxy +{ +public: + VecWriterProxy( FileStorage* _fs ) : fs(_fs) {} + void operator()(const vector<_Tp>& vec) const + { + size_t i, count = vec.size(); + for( i = 0; i < count; i++ ) + write( *fs, vec[i] ); + } + FileStorage* fs; +}; + +template class VecWriterProxy<_Tp,1> +{ +public: + VecWriterProxy( FileStorage* _fs ) : fs(_fs) {} + void operator()(const vector<_Tp>& vec) const + { + int _fmt = DataType<_Tp>::fmt; + char fmt[] = { (char)((_fmt>>8)+'1'), (char)_fmt, '\0' }; + fs->writeRaw( string(fmt), !vec.empty() ? (uchar*)&vec[0] : 0, vec.size()*sizeof(_Tp) ); + } + FileStorage* fs; +}; + +template static inline void write( FileStorage& fs, const vector<_Tp>& vec ) +{ + VecWriterProxy<_Tp, DataType<_Tp>::fmt != 0> w(&fs); + w(vec); +} + +template static inline void write( FileStorage& fs, const string& name, + const vector<_Tp>& vec ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+(DataType<_Tp>::fmt != 0 ? CV_NODE_FLOW : 0)); + write(fs, vec); +} + +CV_EXPORTS_W void write( FileStorage& fs, const string& name, const Mat& value ); +CV_EXPORTS void write( FileStorage& fs, const string& name, const SparseMat& value ); + +template static inline FileStorage& operator << (FileStorage& fs, const _Tp& value) +{ + if( !fs.isOpened() ) + return fs; + if( fs.state == FileStorage::NAME_EXPECTED + FileStorage::INSIDE_MAP ) + CV_Error( CV_StsError, "No element name has been given" ); + write( fs, fs.elname, value ); + if( fs.state & FileStorage::INSIDE_MAP ) + fs.state = FileStorage::NAME_EXPECTED + FileStorage::INSIDE_MAP; + return fs; +} + +CV_EXPORTS FileStorage& operator << (FileStorage& fs, const string& str); + +static inline FileStorage& operator << (FileStorage& fs, const char* str) +{ return (fs << string(str)); } + +static inline FileStorage& operator << (FileStorage& fs, char* value) +{ return (fs << string(value)); } + +inline FileNode::FileNode() : fs(0), node(0) {} +inline FileNode::FileNode(const CvFileStorage* _fs, const CvFileNode* _node) + : fs(_fs), node(_node) {} + +inline FileNode::FileNode(const FileNode& _node) : fs(_node.fs), node(_node.node) {} + +inline int FileNode::type() const { return !node ? NONE : (node->tag & TYPE_MASK); } +inline bool FileNode::empty() const { return node == 0; } +inline bool FileNode::isNone() const { return type() == NONE; } +inline bool FileNode::isSeq() const { return type() == SEQ; } +inline bool FileNode::isMap() const { return type() == MAP; } +inline bool FileNode::isInt() const { return type() == INT; } +inline bool FileNode::isReal() const { return type() == REAL; } +inline bool FileNode::isString() const { return type() == STR; } +inline bool FileNode::isNamed() const { return !node ? false : (node->tag & NAMED) != 0; } +inline size_t FileNode::size() const +{ + int t = type(); + return t == MAP ? (size_t)((CvSet*)node->data.map)->active_count : + t == SEQ ? (size_t)node->data.seq->total : (size_t)!isNone(); +} + +inline CvFileNode* FileNode::operator *() { return (CvFileNode*)node; } +inline const CvFileNode* FileNode::operator* () const { return node; } + +static inline void read(const FileNode& node, int& value, int default_value) +{ + value = !node.node ? default_value : + CV_NODE_IS_INT(node.node->tag) ? node.node->data.i : + CV_NODE_IS_REAL(node.node->tag) ? cvRound(node.node->data.f) : 0x7fffffff; +} + +static inline void read(const FileNode& node, bool& value, bool default_value) +{ + int temp; read(node, temp, (int)default_value); + value = temp != 0; +} + +static inline void read(const FileNode& node, uchar& value, uchar default_value) +{ + int temp; read(node, temp, (int)default_value); + value = saturate_cast(temp); +} + +static inline void read(const FileNode& node, schar& value, schar default_value) +{ + int temp; read(node, temp, (int)default_value); + value = saturate_cast(temp); +} + +static inline void read(const FileNode& node, ushort& value, ushort default_value) +{ + int temp; read(node, temp, (int)default_value); + value = saturate_cast(temp); +} + +static inline void read(const FileNode& node, short& value, short default_value) +{ + int temp; read(node, temp, (int)default_value); + value = saturate_cast(temp); +} + +static inline void read(const FileNode& node, float& value, float default_value) +{ + value = !node.node ? default_value : + CV_NODE_IS_INT(node.node->tag) ? (float)node.node->data.i : + CV_NODE_IS_REAL(node.node->tag) ? (float)node.node->data.f : 1e30f; +} + +static inline void read(const FileNode& node, double& value, double default_value) +{ + value = !node.node ? default_value : + CV_NODE_IS_INT(node.node->tag) ? (double)node.node->data.i : + CV_NODE_IS_REAL(node.node->tag) ? node.node->data.f : 1e300; +} + +static inline void read(const FileNode& node, string& value, const string& default_value) +{ + value = !node.node ? default_value : CV_NODE_IS_STRING(node.node->tag) ? string(node.node->data.str.ptr) : string(""); +} + +template static inline void read(const FileNode& node, Point_<_Tp>& value, const Point_<_Tp>& default_value) +{ + vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 2 ? default_value : Point_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1])); +} + +template static inline void read(const FileNode& node, Point3_<_Tp>& value, const Point3_<_Tp>& default_value) +{ + vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 3 ? default_value : Point3_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]), + saturate_cast<_Tp>(temp[2])); +} + +template static inline void read(const FileNode& node, Size_<_Tp>& value, const Size_<_Tp>& default_value) +{ + vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 2 ? default_value : Size_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1])); +} + +template static inline void read(const FileNode& node, Complex<_Tp>& value, const Complex<_Tp>& default_value) +{ + vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 2 ? default_value : Complex<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1])); +} + +template static inline void read(const FileNode& node, Rect_<_Tp>& value, const Rect_<_Tp>& default_value) +{ + vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 4 ? default_value : Rect_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]), + saturate_cast<_Tp>(temp[2]), saturate_cast<_Tp>(temp[3])); +} + +template static inline void read(const FileNode& node, Vec<_Tp, cn>& value, const Vec<_Tp, cn>& default_value) +{ + vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != cn ? default_value : Vec<_Tp, cn>(&temp[0]); +} + +template static inline void read(const FileNode& node, Scalar_<_Tp>& value, const Scalar_<_Tp>& default_value) +{ + vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 4 ? default_value : Scalar_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]), + saturate_cast<_Tp>(temp[2]), saturate_cast<_Tp>(temp[3])); +} + +static inline void read(const FileNode& node, Range& value, const Range& default_value) +{ + Point2i temp(value.start, value.end); const Point2i default_temp = Point2i(default_value.start, default_value.end); + read(node, temp, default_temp); + value.start = temp.x; value.end = temp.y; +} + +CV_EXPORTS_W void read(const FileNode& node, Mat& mat, const Mat& default_mat=Mat() ); +CV_EXPORTS void read(const FileNode& node, SparseMat& mat, const SparseMat& default_mat=SparseMat() ); + +inline FileNode::operator int() const +{ + int value; + read(*this, value, 0); + return value; +} +inline FileNode::operator float() const +{ + float value; + read(*this, value, 0.f); + return value; +} +inline FileNode::operator double() const +{ + double value; + read(*this, value, 0.); + return value; +} +inline FileNode::operator string() const +{ + string value; + read(*this, value, value); + return value; +} + +inline void FileNode::readRaw( const string& fmt, uchar* vec, size_t len ) const +{ + begin().readRaw( fmt, vec, len ); +} + +template class VecReaderProxy +{ +public: + VecReaderProxy( FileNodeIterator* _it ) : it(_it) {} + void operator()(vector<_Tp>& vec, size_t count) const + { + count = std::min(count, it->remaining); + vec.resize(count); + for( size_t i = 0; i < count; i++, ++(*it) ) + read(**it, vec[i], _Tp()); + } + FileNodeIterator* it; +}; + +template class VecReaderProxy<_Tp,1> +{ +public: + VecReaderProxy( FileNodeIterator* _it ) : it(_it) {} + void operator()(vector<_Tp>& vec, size_t count) const + { + size_t remaining = it->remaining, cn = DataType<_Tp>::channels; + int _fmt = DataType<_Tp>::fmt; + char fmt[] = { (char)((_fmt>>8)+'1'), (char)_fmt, '\0' }; + size_t remaining1 = remaining/cn; + count = count < remaining1 ? count : remaining1; + vec.resize(count); + it->readRaw( string(fmt), !vec.empty() ? (uchar*)&vec[0] : 0, count*sizeof(_Tp) ); + } + FileNodeIterator* it; +}; + +template static inline void +read( FileNodeIterator& it, vector<_Tp>& vec, size_t maxCount=(size_t)INT_MAX ) +{ + VecReaderProxy<_Tp, DataType<_Tp>::fmt != 0> r(&it); + r(vec, maxCount); +} + +template static inline void +read( const FileNode& node, vector<_Tp>& vec, const vector<_Tp>& default_value=vector<_Tp>() ) +{ + if(!node.node) + vec = default_value; + else + { + FileNodeIterator it = node.begin(); + read( it, vec ); + } +} + +inline FileNodeIterator FileNode::begin() const +{ + return FileNodeIterator(fs, node); +} + +inline FileNodeIterator FileNode::end() const +{ + return FileNodeIterator(fs, node, size()); +} + +inline FileNode FileNodeIterator::operator *() const +{ return FileNode(fs, (const CvFileNode*)(void*)reader.ptr); } + +inline FileNode FileNodeIterator::operator ->() const +{ return FileNode(fs, (const CvFileNode*)(void*)reader.ptr); } + +template static inline FileNodeIterator& operator >> (FileNodeIterator& it, _Tp& value) +{ read( *it, value, _Tp()); return ++it; } + +template static inline +FileNodeIterator& operator >> (FileNodeIterator& it, vector<_Tp>& vec) +{ + VecReaderProxy<_Tp, DataType<_Tp>::fmt != 0> r(&it); + r(vec, (size_t)INT_MAX); + return it; +} + +template static inline void operator >> (const FileNode& n, _Tp& value) +{ read( n, value, _Tp()); } + +template static inline void operator >> (const FileNode& n, vector<_Tp>& vec) +{ FileNodeIterator it = n.begin(); it >> vec; } + +static inline bool operator == (const FileNodeIterator& it1, const FileNodeIterator& it2) +{ + return it1.fs == it2.fs && it1.container == it2.container && + it1.reader.ptr == it2.reader.ptr && it1.remaining == it2.remaining; +} + +static inline bool operator != (const FileNodeIterator& it1, const FileNodeIterator& it2) +{ + return !(it1 == it2); +} + +static inline ptrdiff_t operator - (const FileNodeIterator& it1, const FileNodeIterator& it2) +{ + return it2.remaining - it1.remaining; +} + +static inline bool operator < (const FileNodeIterator& it1, const FileNodeIterator& it2) +{ + return it1.remaining > it2.remaining; +} + +inline FileNode FileStorage::getFirstTopLevelNode() const +{ + FileNode r = root(); + FileNodeIterator it = r.begin(); + return it != r.end() ? *it : FileNode(); +} + +//////////////////////////////////////// Various algorithms //////////////////////////////////// + +template static inline _Tp gcd(_Tp a, _Tp b) +{ + if( a < b ) + std::swap(a, b); + while( b > 0 ) + { + _Tp r = a % b; + a = b; + b = r; + } + return a; +} + +/****************************************************************************************\ + + Generic implementation of QuickSort algorithm + Use it as: vector<_Tp> a; ... sort(a,); + + The current implementation was derived from *BSD system qsort(): + + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + +\****************************************************************************************/ + +template void sort( vector<_Tp>& vec, _LT LT=_LT() ) +{ + int isort_thresh = 7; + int sp = 0; + + struct + { + _Tp *lb; + _Tp *ub; + } stack[48]; + + size_t total = vec.size(); + + if( total <= 1 ) + return; + + _Tp* arr = &vec[0]; + stack[0].lb = arr; + stack[0].ub = arr + (total - 1); + + while( sp >= 0 ) + { + _Tp* left = stack[sp].lb; + _Tp* right = stack[sp--].ub; + + for(;;) + { + int i, n = (int)(right - left) + 1, m; + _Tp* ptr; + _Tp* ptr2; + + if( n <= isort_thresh ) + { + insert_sort: + for( ptr = left + 1; ptr <= right; ptr++ ) + { + for( ptr2 = ptr; ptr2 > left && LT(ptr2[0],ptr2[-1]); ptr2--) + std::swap( ptr2[0], ptr2[-1] ); + } + break; + } + else + { + _Tp* left0; + _Tp* left1; + _Tp* right0; + _Tp* right1; + _Tp* pivot; + _Tp* a; + _Tp* b; + _Tp* c; + int swap_cnt = 0; + + left0 = left; + right0 = right; + pivot = left + (n/2); + + if( n > 40 ) + { + int d = n / 8; + a = left, b = left + d, c = left + 2*d; + left = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); + + a = pivot - d, b = pivot, c = pivot + d; + pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); + + a = right - 2*d, b = right - d, c = right; + right = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); + } + + a = left, b = pivot, c = right; + pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); + if( pivot != left0 ) + { + std::swap( *pivot, *left0 ); + pivot = left0; + } + left = left1 = left0 + 1; + right = right1 = right0; + + for(;;) + { + while( left <= right && !LT(*pivot, *left) ) + { + if( !LT(*left, *pivot) ) + { + if( left > left1 ) + std::swap( *left1, *left ); + swap_cnt = 1; + left1++; + } + left++; + } + + while( left <= right && !LT(*right, *pivot) ) + { + if( !LT(*pivot, *right) ) + { + if( right < right1 ) + std::swap( *right1, *right ); + swap_cnt = 1; + right1--; + } + right--; + } + + if( left > right ) + break; + std::swap( *left, *right ); + swap_cnt = 1; + left++; + right--; + } + + if( swap_cnt == 0 ) + { + left = left0, right = right0; + goto insert_sort; + } + + n = std::min( (int)(left1 - left0), (int)(left - left1) ); + for( i = 0; i < n; i++ ) + std::swap( left0[i], left[i-n] ); + + n = std::min( (int)(right0 - right1), (int)(right1 - right) ); + for( i = 0; i < n; i++ ) + std::swap( left[i], right0[i-n+1] ); + n = (int)(left - left1); + m = (int)(right1 - right); + if( n > 1 ) + { + if( m > 1 ) + { + if( n > m ) + { + stack[++sp].lb = left0; + stack[sp].ub = left0 + n - 1; + left = right0 - m + 1, right = right0; + } + else + { + stack[++sp].lb = right0 - m + 1; + stack[sp].ub = right0; + left = left0, right = left0 + n - 1; + } + } + else + left = left0, right = left0 + n - 1; + } + else if( m > 1 ) + left = right0 - m + 1, right = right0; + else + break; + } + } + } +} + +template class LessThan +{ +public: + bool operator()(const _Tp& a, const _Tp& b) const { return a < b; } +}; + +template class GreaterEq +{ +public: + bool operator()(const _Tp& a, const _Tp& b) const { return a >= b; } +}; + +template class LessThanIdx +{ +public: + LessThanIdx( const _Tp* _arr ) : arr(_arr) {} + bool operator()(int a, int b) const { return arr[a] < arr[b]; } + const _Tp* arr; +}; + +template class GreaterEqIdx +{ +public: + GreaterEqIdx( const _Tp* _arr ) : arr(_arr) {} + bool operator()(int a, int b) const { return arr[a] >= arr[b]; } + const _Tp* arr; +}; + + +// This function splits the input sequence or set into one or more equivalence classes and +// returns the vector of labels - 0-based class indexes for each element. +// predicate(a,b) returns true if the two sequence elements certainly belong to the same class. +// +// The algorithm is described in "Introduction to Algorithms" +// by Cormen, Leiserson and Rivest, the chapter "Data structures for disjoint sets" +template int +partition( const vector<_Tp>& _vec, vector& labels, + _EqPredicate predicate=_EqPredicate()) +{ + int i, j, N = (int)_vec.size(); + const _Tp* vec = &_vec[0]; + + const int PARENT=0; + const int RANK=1; + + vector _nodes(N*2); + int (*nodes)[2] = (int(*)[2])&_nodes[0]; + + // The first O(N) pass: create N single-vertex trees + for(i = 0; i < N; i++) + { + nodes[i][PARENT]=-1; + nodes[i][RANK] = 0; + } + + // The main O(N^2) pass: merge connected components + for( i = 0; i < N; i++ ) + { + int root = i; + + // find root + while( nodes[root][PARENT] >= 0 ) + root = nodes[root][PARENT]; + + for( j = 0; j < N; j++ ) + { + if( i == j || !predicate(vec[i], vec[j])) + continue; + int root2 = j; + + while( nodes[root2][PARENT] >= 0 ) + root2 = nodes[root2][PARENT]; + + if( root2 != root ) + { + // unite both trees + int rank = nodes[root][RANK], rank2 = nodes[root2][RANK]; + if( rank > rank2 ) + nodes[root2][PARENT] = root; + else + { + nodes[root][PARENT] = root2; + nodes[root2][RANK] += rank == rank2; + root = root2; + } + assert( nodes[root][PARENT] < 0 ); + + int k = j, parent; + + // compress the path from node2 to root + while( (parent = nodes[k][PARENT]) >= 0 ) + { + nodes[k][PARENT] = root; + k = parent; + } + + // compress the path from node to root + k = i; + while( (parent = nodes[k][PARENT]) >= 0 ) + { + nodes[k][PARENT] = root; + k = parent; + } + } + } + } + + // Final O(N) pass: enumerate classes + labels.resize(N); + int nclasses = 0; + + for( i = 0; i < N; i++ ) + { + int root = i; + while( nodes[root][PARENT] >= 0 ) + root = nodes[root][PARENT]; + // re-use the rank as the class label + if( nodes[root][RANK] >= 0 ) + nodes[root][RANK] = ~nclasses++; + labels[i] = ~nodes[root][RANK]; + } + + return nclasses; +} + + +////////////////////////////////////////////////////////////////////////////// + +// bridge C++ => C Seq API +CV_EXPORTS schar* seqPush( CvSeq* seq, const void* element=0); +CV_EXPORTS schar* seqPushFront( CvSeq* seq, const void* element=0); +CV_EXPORTS void seqPop( CvSeq* seq, void* element=0); +CV_EXPORTS void seqPopFront( CvSeq* seq, void* element=0); +CV_EXPORTS void seqPopMulti( CvSeq* seq, void* elements, + int count, int in_front=0 ); +CV_EXPORTS void seqRemove( CvSeq* seq, int index ); +CV_EXPORTS void clearSeq( CvSeq* seq ); +CV_EXPORTS schar* getSeqElem( const CvSeq* seq, int index ); +CV_EXPORTS void seqRemoveSlice( CvSeq* seq, CvSlice slice ); +CV_EXPORTS void seqInsertSlice( CvSeq* seq, int before_index, const CvArr* from_arr ); + +template inline Seq<_Tp>::Seq() : seq(0) {} +template inline Seq<_Tp>::Seq( const CvSeq* _seq ) : seq((CvSeq*)_seq) +{ + CV_Assert(!_seq || _seq->elem_size == sizeof(_Tp)); +} + +template inline Seq<_Tp>::Seq( MemStorage& storage, + int headerSize ) +{ + CV_Assert(headerSize >= (int)sizeof(CvSeq)); + seq = cvCreateSeq(DataType<_Tp>::type, headerSize, sizeof(_Tp), storage); +} + +template inline _Tp& Seq<_Tp>::operator [](int idx) +{ return *(_Tp*)getSeqElem(seq, idx); } + +template inline const _Tp& Seq<_Tp>::operator [](int idx) const +{ return *(_Tp*)getSeqElem(seq, idx); } + +template inline SeqIterator<_Tp> Seq<_Tp>::begin() const +{ return SeqIterator<_Tp>(*this); } + +template inline SeqIterator<_Tp> Seq<_Tp>::end() const +{ return SeqIterator<_Tp>(*this, true); } + +template inline size_t Seq<_Tp>::size() const +{ return seq ? seq->total : 0; } + +template inline int Seq<_Tp>::type() const +{ return seq ? CV_MAT_TYPE(seq->flags) : 0; } + +template inline int Seq<_Tp>::depth() const +{ return seq ? CV_MAT_DEPTH(seq->flags) : 0; } + +template inline int Seq<_Tp>::channels() const +{ return seq ? CV_MAT_CN(seq->flags) : 0; } + +template inline size_t Seq<_Tp>::elemSize() const +{ return seq ? seq->elem_size : 0; } + +template inline size_t Seq<_Tp>::index(const _Tp& elem) const +{ return cvSeqElemIdx(seq, &elem); } + +template inline void Seq<_Tp>::push_back(const _Tp& elem) +{ cvSeqPush(seq, &elem); } + +template inline void Seq<_Tp>::push_front(const _Tp& elem) +{ cvSeqPushFront(seq, &elem); } + +template inline void Seq<_Tp>::push_back(const _Tp* elem, size_t count) +{ cvSeqPushMulti(seq, elem, (int)count, 0); } + +template inline void Seq<_Tp>::push_front(const _Tp* elem, size_t count) +{ cvSeqPushMulti(seq, elem, (int)count, 1); } + +template inline _Tp& Seq<_Tp>::back() +{ return *(_Tp*)getSeqElem(seq, -1); } + +template inline const _Tp& Seq<_Tp>::back() const +{ return *(const _Tp*)getSeqElem(seq, -1); } + +template inline _Tp& Seq<_Tp>::front() +{ return *(_Tp*)getSeqElem(seq, 0); } + +template inline const _Tp& Seq<_Tp>::front() const +{ return *(const _Tp*)getSeqElem(seq, 0); } + +template inline bool Seq<_Tp>::empty() const +{ return !seq || seq->total == 0; } + +template inline void Seq<_Tp>::clear() +{ if(seq) clearSeq(seq); } + +template inline void Seq<_Tp>::pop_back() +{ seqPop(seq); } + +template inline void Seq<_Tp>::pop_front() +{ seqPopFront(seq); } + +template inline void Seq<_Tp>::pop_back(_Tp* elem, size_t count) +{ seqPopMulti(seq, elem, (int)count, 0); } + +template inline void Seq<_Tp>::pop_front(_Tp* elem, size_t count) +{ seqPopMulti(seq, elem, (int)count, 1); } + +template inline void Seq<_Tp>::insert(int idx, const _Tp& elem) +{ seqInsert(seq, idx, &elem); } + +template inline void Seq<_Tp>::insert(int idx, const _Tp* elems, size_t count) +{ + CvMat m = cvMat(1, count, DataType<_Tp>::type, elems); + seqInsertSlice(seq, idx, &m); +} + +template inline void Seq<_Tp>::remove(int idx) +{ seqRemove(seq, idx); } + +template inline void Seq<_Tp>::remove(const Range& r) +{ seqRemoveSlice(seq, r); } + +template inline void Seq<_Tp>::copyTo(vector<_Tp>& vec, const Range& range) const +{ + size_t len = !seq ? 0 : range == Range::all() ? seq->total : range.end - range.start; + vec.resize(len); + if( seq && len ) + cvCvtSeqToArray(seq, &vec[0], range); +} + +template inline Seq<_Tp>::operator vector<_Tp>() const +{ + vector<_Tp> vec; + copyTo(vec); + return vec; +} + +template inline SeqIterator<_Tp>::SeqIterator() +{ memset(this, 0, sizeof(*this)); } + +template inline SeqIterator<_Tp>::SeqIterator(const Seq<_Tp>& _seq, bool seekEnd) +{ + cvStartReadSeq(_seq.seq, this); + index = seekEnd ? _seq.seq->total : 0; +} + +template inline void SeqIterator<_Tp>::seek(size_t pos) +{ + cvSetSeqReaderPos(this, (int)pos, false); + index = pos; +} + +template inline size_t SeqIterator<_Tp>::tell() const +{ return index; } + +template inline _Tp& SeqIterator<_Tp>::operator *() +{ return *(_Tp*)ptr; } + +template inline const _Tp& SeqIterator<_Tp>::operator *() const +{ return *(const _Tp*)ptr; } + +template inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator ++() +{ + CV_NEXT_SEQ_ELEM(sizeof(_Tp), *this); + if( ++index >= seq->total*2 ) + index = 0; + return *this; +} + +template inline SeqIterator<_Tp> SeqIterator<_Tp>::operator ++(int) const +{ + SeqIterator<_Tp> it = *this; + ++*this; + return it; +} + +template inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator --() +{ + CV_PREV_SEQ_ELEM(sizeof(_Tp), *this); + if( --index < 0 ) + index = seq->total*2-1; + return *this; +} + +template inline SeqIterator<_Tp> SeqIterator<_Tp>::operator --(int) const +{ + SeqIterator<_Tp> it = *this; + --*this; + return it; +} + +template inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator +=(int delta) +{ + cvSetSeqReaderPos(this, delta, 1); + index += delta; + int n = seq->total*2; + if( index < 0 ) + index += n; + if( index >= n ) + index -= n; + return *this; +} + +template inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator -=(int delta) +{ + return (*this += -delta); +} + +template inline ptrdiff_t operator - (const SeqIterator<_Tp>& a, + const SeqIterator<_Tp>& b) +{ + ptrdiff_t delta = a.index - b.index, n = a.seq->total; +#if defined(__QNX__) + // No long std::abs(long) in QNX + long absdelta = (delta < 0) ? -delta : delta; + if( absdelta > n ) +#else + if( std::abs(static_cast(delta)) > n ) +#endif + delta += delta < 0 ? n : -n; + + return delta; +} + +template inline bool operator == (const SeqIterator<_Tp>& a, + const SeqIterator<_Tp>& b) +{ + return a.seq == b.seq && a.index == b.index; +} + +template inline bool operator != (const SeqIterator<_Tp>& a, + const SeqIterator<_Tp>& b) +{ + return !(a == b); +} + + +template struct RTTIImpl +{ +public: + static int isInstance(const void* ptr) + { + static _ClsName dummy; + static void* dummyp = &dummy; + union + { + const void* p; + const void** pp; + } a, b; + a.p = dummyp; + b.p = ptr; + return *a.pp == *b.pp; + } + static void release(void** dbptr) + { + if(dbptr && *dbptr) + { + delete (_ClsName*)*dbptr; + *dbptr = 0; + } + } + static void* read(CvFileStorage* fs, CvFileNode* n) + { + FileNode fn(fs, n); + _ClsName* obj = new _ClsName; + if(obj->read(fn)) + return obj; + delete obj; + return 0; + } + + static void write(CvFileStorage* _fs, const char* name, const void* ptr, CvAttrList) + { + if(ptr && _fs) + { + FileStorage fs(_fs); + fs.fs.addref(); + ((const _ClsName*)ptr)->write(fs, string(name)); + } + } + + static void* clone(const void* ptr) + { + if(!ptr) + return 0; + return new _ClsName(*(const _ClsName*)ptr); + } +}; + + +class CV_EXPORTS Formatter +{ +public: + virtual ~Formatter() {} + virtual void write(std::ostream& out, const Mat& m, const int* params=0, int nparams=0) const = 0; + virtual void write(std::ostream& out, const void* data, int nelems, int type, + const int* params=0, int nparams=0) const = 0; + static const Formatter* get(const char* fmt=""); + static const Formatter* setDefault(const Formatter* fmt); +}; + + +struct CV_EXPORTS Formatted +{ + Formatted(const Mat& m, const Formatter* fmt, + const vector& params); + Formatted(const Mat& m, const Formatter* fmt, + const int* params=0); + Mat mtx; + const Formatter* fmt; + vector params; +}; + +static inline Formatted format(const Mat& mtx, const char* fmt, + const vector& params=vector()) +{ + return Formatted(mtx, Formatter::get(fmt), params); +} + +template static inline Formatted format(const vector >& vec, + const char* fmt, const vector& params=vector()) +{ + return Formatted(Mat(vec), Formatter::get(fmt), params); +} + +template static inline Formatted format(const vector >& vec, + const char* fmt, const vector& params=vector()) +{ + return Formatted(Mat(vec), Formatter::get(fmt), params); +} + +/** \brief prints Mat to the output stream in Matlab notation + * use like + @verbatim + Mat my_mat = Mat::eye(3,3,CV_32F); + std::cout << my_mat; + @endverbatim + */ +static inline std::ostream& operator << (std::ostream& out, const Mat& mtx) +{ + Formatter::get()->write(out, mtx); + return out; +} + +/** \brief prints Mat to the output stream allows in the specified notation (see format) + * use like + @verbatim + Mat my_mat = Mat::eye(3,3,CV_32F); + std::cout << my_mat; + @endverbatim + */ +static inline std::ostream& operator << (std::ostream& out, const Formatted& fmtd) +{ + fmtd.fmt->write(out, fmtd.mtx); + return out; +} + + +template static inline std::ostream& operator << (std::ostream& out, + const vector >& vec) +{ + Formatter::get()->write(out, Mat(vec)); + return out; +} + + +template static inline std::ostream& operator << (std::ostream& out, + const vector >& vec) +{ + Formatter::get()->write(out, Mat(vec)); + return out; +} + + +/** Writes a Matx to an output stream. + */ +template inline std::ostream& operator<<(std::ostream& out, const Matx<_Tp, m, n>& matx) +{ + out << cv::Mat(matx); + return out; +} + +/** Writes a point to an output stream in Matlab notation + */ +template inline std::ostream& operator<<(std::ostream& out, const Point_<_Tp>& p) +{ + out << "[" << p.x << ", " << p.y << "]"; + return out; +} + +/** Writes a point to an output stream in Matlab notation + */ +template inline std::ostream& operator<<(std::ostream& out, const Point3_<_Tp>& p) +{ + out << "[" << p.x << ", " << p.y << ", " << p.z << "]"; + return out; +} + +/** Writes a Vec to an output stream. Format example : [10, 20, 30] + */ +template inline std::ostream& operator<<(std::ostream& out, const Vec<_Tp, n>& vec) +{ + out << "["; + + if(Vec<_Tp, n>::depth < CV_32F) + { + for (int i = 0; i < n - 1; ++i) { + out << (int)vec[i] << ", "; + } + out << (int)vec[n-1] << "]"; + } + else + { + for (int i = 0; i < n - 1; ++i) { + out << vec[i] << ", "; + } + out << vec[n-1] << "]"; + } + + return out; +} + +/** Writes a Size_ to an output stream. Format example : [640 x 480] + */ +template inline std::ostream& operator<<(std::ostream& out, const Size_<_Tp>& size) +{ + out << "[" << size.width << " x " << size.height << "]"; + return out; +} + +/** Writes a Rect_ to an output stream. Format example : [640 x 480 from (10, 20)] + */ +template inline std::ostream& operator<<(std::ostream& out, const Rect_<_Tp>& rect) +{ + out << "[" << rect.width << " x " << rect.height << " from (" << rect.x << ", " << rect.y << ")]"; + return out; +} + + +template inline Ptr<_Tp> Algorithm::create(const string& name) +{ + return _create(name).ptr<_Tp>(); +} + +template +inline void Algorithm::set(const char* _name, const Ptr<_Tp>& value) +{ + Ptr algo_ptr = value. template ptr(); + if (algo_ptr.empty()) { + CV_Error( CV_StsUnsupportedFormat, "unknown/unsupported Ptr type of the second parameter of the method Algorithm::set"); + } + info()->set(this, _name, ParamType::type, &algo_ptr); +} + +template +inline void Algorithm::set(const string& _name, const Ptr<_Tp>& value) +{ + this->set<_Tp>(_name.c_str(), value); +} + +template +inline void Algorithm::setAlgorithm(const char* _name, const Ptr<_Tp>& value) +{ + Ptr algo_ptr = value. template ptr(); + if (algo_ptr.empty()) { + CV_Error( CV_StsUnsupportedFormat, "unknown/unsupported Ptr type of the second parameter of the method Algorithm::set"); + } + info()->set(this, _name, ParamType::type, &algo_ptr); +} + +template +inline void Algorithm::setAlgorithm(const string& _name, const Ptr<_Tp>& value) +{ + this->set<_Tp>(_name.c_str(), value); +} + +template inline typename ParamType<_Tp>::member_type Algorithm::get(const string& _name) const +{ + typename ParamType<_Tp>::member_type value; + info()->get(this, _name.c_str(), ParamType<_Tp>::type, &value); + return value; +} + +template inline typename ParamType<_Tp>::member_type Algorithm::get(const char* _name) const +{ + typename ParamType<_Tp>::member_type value; + info()->get(this, _name, ParamType<_Tp>::type, &value); + return value; +} + +template inline void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter, + Ptr<_Tp>& value, bool readOnly, Ptr<_Tp> (Algorithm::*getter)(), void (Algorithm::*setter)(const Ptr<_Tp>&), + const string& help) +{ + //TODO: static assert: _Tp inherits from _Base + addParam_(algo, parameter, ParamType<_Base>::type, &value, readOnly, + (Algorithm::Getter)getter, (Algorithm::Setter)setter, help); +} + +template inline void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter, + Ptr<_Tp>& value, bool readOnly, Ptr<_Tp> (Algorithm::*getter)(), void (Algorithm::*setter)(const Ptr<_Tp>&), + const string& help) +{ + //TODO: static assert: _Tp inherits from Algorithm + addParam_(algo, parameter, ParamType::type, &value, readOnly, + (Algorithm::Getter)getter, (Algorithm::Setter)setter, help); +} + +} + +#ifdef _MSC_VER +# pragma warning(pop) +#endif + +#endif // __cplusplus +#endif diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/types_c.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/types_c.h new file mode 100644 index 0000000..c21cd2c --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/types_c.h @@ -0,0 +1,1923 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_TYPES_H__ +#define __OPENCV_CORE_TYPES_H__ + +#if !defined _CRT_SECURE_NO_DEPRECATE && defined _MSC_VER +# if _MSC_VER > 1300 +# define _CRT_SECURE_NO_DEPRECATE /* to avoid multiple Visual Studio 2005 warnings */ +# endif +#endif + + +#ifndef SKIP_INCLUDES + +#include +#include +#include +#include + +#if !defined _MSC_VER && !defined __BORLANDC__ +# include +#endif + +#if defined __ICL +# define CV_ICC __ICL +#elif defined __ICC +# define CV_ICC __ICC +#elif defined __ECL +# define CV_ICC __ECL +#elif defined __ECC +# define CV_ICC __ECC +#elif defined __INTEL_COMPILER +# define CV_ICC __INTEL_COMPILER +#endif + +#if defined CV_ICC && !defined CV_ENABLE_UNROLLED +# define CV_ENABLE_UNROLLED 0 +#else +# define CV_ENABLE_UNROLLED 1 +#endif + +#if (defined _M_X64 && defined _MSC_VER && _MSC_VER >= 1400) || (__GNUC__ >= 4 && defined __x86_64__) +# if defined WIN32 +# include +# endif +# if defined __SSE2__ || !defined __GNUC__ +# include +# endif +#endif + +#if defined __BORLANDC__ +# include +#else +# include +#endif + +#ifdef HAVE_IPL +# ifndef __IPL_H__ +# if defined WIN32 || defined _WIN32 +# include +# else +# include +# endif +# endif +#elif defined __IPL_H__ +# define HAVE_IPL +#endif + +#endif // SKIP_INCLUDES + +#if defined WIN32 || defined _WIN32 +# define CV_CDECL __cdecl +# define CV_STDCALL __stdcall +#else +# define CV_CDECL +# define CV_STDCALL +#endif + +#ifndef CV_EXTERN_C +# ifdef __cplusplus +# define CV_EXTERN_C extern "C" +# define CV_DEFAULT(val) = val +# else +# define CV_EXTERN_C +# define CV_DEFAULT(val) +# endif +#endif + +#ifndef CV_EXTERN_C_FUNCPTR +# ifdef __cplusplus +# define CV_EXTERN_C_FUNCPTR(x) extern "C" { typedef x; } +# else +# define CV_EXTERN_C_FUNCPTR(x) typedef x +# endif +#endif + +#ifndef CV_INLINE +# if defined __cplusplus +# define CV_INLINE inline +# elif defined _MSC_VER +# define CV_INLINE __inline +# else +# define CV_INLINE static +# endif +#endif /* CV_INLINE */ + +#if (defined WIN32 || defined _WIN32 || defined WINCE) && defined CVAPI_EXPORTS +# define CV_EXPORTS __declspec(dllexport) +#else +# define CV_EXPORTS +#endif + +#ifndef CVAPI +# define CVAPI(rettype) CV_EXTERN_C CV_EXPORTS rettype CV_CDECL +#endif + +#if defined _MSC_VER || defined __BORLANDC__ + typedef __int64 int64; + typedef unsigned __int64 uint64; +# define CV_BIG_INT(n) n##I64 +# define CV_BIG_UINT(n) n##UI64 +#else + typedef int64_t int64; + typedef uint64_t uint64; +# define CV_BIG_INT(n) n##LL +# define CV_BIG_UINT(n) n##ULL +#endif + +#ifndef HAVE_IPL + typedef unsigned char uchar; + typedef unsigned short ushort; +#endif + +typedef signed char schar; + +/* special informative macros for wrapper generators */ +#define CV_CARRAY(counter) +#define CV_CUSTOM_CARRAY(args) +#define CV_EXPORTS_W CV_EXPORTS +#define CV_EXPORTS_W_SIMPLE CV_EXPORTS +#define CV_EXPORTS_AS(synonym) CV_EXPORTS +#define CV_EXPORTS_W_MAP CV_EXPORTS +#define CV_IN_OUT +#define CV_OUT +#define CV_PROP +#define CV_PROP_RW +#define CV_WRAP +#define CV_WRAP_AS(synonym) +#define CV_WRAP_DEFAULT(value) + +/* CvArr* is used to pass arbitrary + * array-like data structures + * into functions where the particular + * array type is recognized at runtime: + */ +typedef void CvArr; + +typedef union Cv32suf +{ + int i; + unsigned u; + float f; +} +Cv32suf; + +typedef union Cv64suf +{ + int64 i; + uint64 u; + double f; +} +Cv64suf; + +typedef int CVStatus; + +enum { + CV_StsOk= 0, /* everithing is ok */ + CV_StsBackTrace= -1, /* pseudo error for back trace */ + CV_StsError= -2, /* unknown /unspecified error */ + CV_StsInternal= -3, /* internal error (bad state) */ + CV_StsNoMem= -4, /* insufficient memory */ + CV_StsBadArg= -5, /* function arg/param is bad */ + CV_StsBadFunc= -6, /* unsupported function */ + CV_StsNoConv= -7, /* iter. didn't converge */ + CV_StsAutoTrace= -8, /* tracing */ + CV_HeaderIsNull= -9, /* image header is NULL */ + CV_BadImageSize= -10, /* image size is invalid */ + CV_BadOffset= -11, /* offset is invalid */ + CV_BadDataPtr= -12, /**/ + CV_BadStep= -13, /**/ + CV_BadModelOrChSeq= -14, /**/ + CV_BadNumChannels= -15, /**/ + CV_BadNumChannel1U= -16, /**/ + CV_BadDepth= -17, /**/ + CV_BadAlphaChannel= -18, /**/ + CV_BadOrder= -19, /**/ + CV_BadOrigin= -20, /**/ + CV_BadAlign= -21, /**/ + CV_BadCallBack= -22, /**/ + CV_BadTileSize= -23, /**/ + CV_BadCOI= -24, /**/ + CV_BadROISize= -25, /**/ + CV_MaskIsTiled= -26, /**/ + CV_StsNullPtr= -27, /* null pointer */ + CV_StsVecLengthErr= -28, /* incorrect vector length */ + CV_StsFilterStructContentErr= -29, /* incorr. filter structure content */ + CV_StsKernelStructContentErr= -30, /* incorr. transform kernel content */ + CV_StsFilterOffsetErr= -31, /* incorrect filter offset value */ + CV_StsBadSize= -201, /* the input/output structure size is incorrect */ + CV_StsDivByZero= -202, /* division by zero */ + CV_StsInplaceNotSupported= -203, /* in-place operation is not supported */ + CV_StsObjectNotFound= -204, /* request can't be completed */ + CV_StsUnmatchedFormats= -205, /* formats of input/output arrays differ */ + CV_StsBadFlag= -206, /* flag is wrong or not supported */ + CV_StsBadPoint= -207, /* bad CvPoint */ + CV_StsBadMask= -208, /* bad format of mask (neither 8uC1 nor 8sC1)*/ + CV_StsUnmatchedSizes= -209, /* sizes of input/output structures do not match */ + CV_StsUnsupportedFormat= -210, /* the data format/type is not supported by the function*/ + CV_StsOutOfRange= -211, /* some of parameters are out of range */ + CV_StsParseError= -212, /* invalid syntax/structure of the parsed file */ + CV_StsNotImplemented= -213, /* the requested function/feature is not implemented */ + CV_StsBadMemBlock= -214, /* an allocated block has been corrupted */ + CV_StsAssert= -215, /* assertion failed */ + CV_GpuNotSupported= -216, + CV_GpuApiCallError= -217, + CV_OpenGlNotSupported= -218, + CV_OpenGlApiCallError= -219, + CV_OpenCLDoubleNotSupported= -220, + CV_OpenCLInitError= -221, + CV_OpenCLNoAMDBlasFft= -222 +}; + +/****************************************************************************************\ +* Common macros and inline functions * +\****************************************************************************************/ + +#ifdef HAVE_TEGRA_OPTIMIZATION +# include "tegra_round.hpp" +#endif + +#define CV_PI 3.1415926535897932384626433832795 +#define CV_LOG2 0.69314718055994530941723212145818 + +#define CV_SWAP(a,b,t) ((t) = (a), (a) = (b), (b) = (t)) + +#ifndef MIN +# define MIN(a,b) ((a) > (b) ? (b) : (a)) +#endif + +#ifndef MAX +# define MAX(a,b) ((a) < (b) ? (b) : (a)) +#endif + +/* min & max without jumps */ +#define CV_IMIN(a, b) ((a) ^ (((a)^(b)) & (((a) < (b)) - 1))) + +#define CV_IMAX(a, b) ((a) ^ (((a)^(b)) & (((a) > (b)) - 1))) + +/* absolute value without jumps */ +#ifndef __cplusplus +# define CV_IABS(a) (((a) ^ ((a) < 0 ? -1 : 0)) - ((a) < 0 ? -1 : 0)) +#else +# define CV_IABS(a) abs(a) +#endif +#define CV_CMP(a,b) (((a) > (b)) - ((a) < (b))) +#define CV_SIGN(a) CV_CMP((a),0) + +#if defined __GNUC__ && defined __arm__ && (defined __ARM_PCS_VFP || defined __ARM_VFPV3__) +# define CV_VFP 1 +#else +# define CV_VFP 0 +#endif + + +#if CV_VFP +// 1. general scheme +#define ARM_ROUND(_value, _asm_string) \ + int res; \ + float temp; \ + (void)temp; \ + asm(_asm_string : [res] "=r" (res), [temp] "=w" (temp) : [value] "w" (_value)); \ + return res; +// 2. version for double +#ifdef __clang__ +#define ARM_ROUND_DBL(value) ARM_ROUND(value, "vcvtr.s32.f64 %[temp], %[value] \n vmov %[res], %[temp]") +#else +#define ARM_ROUND_DBL(value) ARM_ROUND(value, "vcvtr.s32.f64 %[temp], %P[value] \n vmov %[res], %[temp]") +#endif +// 3. version for float +#define ARM_ROUND_FLT(value) ARM_ROUND(value, "vcvtr.s32.f32 %[temp], %[value]\n vmov %[res], %[temp]") +#endif // CV_VFP + +CV_INLINE int cvRound( double value ) +{ +#if (defined _MSC_VER && defined _M_X64) || (defined __GNUC__ && defined __x86_64__ && defined __SSE2__ && !defined __APPLE__) + __m128d t = _mm_set_sd( value ); + return _mm_cvtsd_si32(t); +#elif defined _MSC_VER && defined _M_IX86 + int t; + __asm + { + fld value; + fistp t; + } + return t; +#elif defined _MSC_VER && defined _M_ARM && defined HAVE_TEGRA_OPTIMIZATION + TEGRA_ROUND(value); +#elif defined CV_ICC || defined __GNUC__ +# ifdef HAVE_TEGRA_OPTIMIZATION + TEGRA_ROUND(value); +# elif CV_VFP + ARM_ROUND_DBL(value) +# else + return (int)lrint(value); +# endif +#else + double intpart, fractpart; + fractpart = modf(value, &intpart); + if ((fabs(fractpart) != 0.5) || ((((int)intpart) % 2) != 0)) + return (int)(value + (value >= 0 ? 0.5 : -0.5)); + else + return (int)intpart; +#endif +} + +#if defined __SSE2__ || (defined _M_IX86_FP && 2 == _M_IX86_FP) +# include "emmintrin.h" +#endif + +CV_INLINE int cvFloor( double value ) +{ +#if defined _MSC_VER && defined _M_X64 || (defined __GNUC__ && defined __SSE2__ && !defined __APPLE__) + __m128d t = _mm_set_sd( value ); + int i = _mm_cvtsd_si32(t); + return i - _mm_movemask_pd(_mm_cmplt_sd(t, _mm_cvtsi32_sd(t,i))); +#elif defined __GNUC__ + int i = (int)value; + return i - (i > value); +#else + int i = cvRound(value); + float diff = (float)(value - i); + return i - (diff < 0); +#endif +} + + +CV_INLINE int cvCeil( double value ) +{ +#if defined _MSC_VER && defined _M_X64 || (defined __GNUC__ && defined __SSE2__&& !defined __APPLE__) + __m128d t = _mm_set_sd( value ); + int i = _mm_cvtsd_si32(t); + return i + _mm_movemask_pd(_mm_cmplt_sd(_mm_cvtsi32_sd(t,i), t)); +#elif defined __GNUC__ + int i = (int)value; + return i + (i < value); +#else + int i = cvRound(value); + float diff = (float)(i - value); + return i + (diff < 0); +#endif +} + +#define cvInvSqrt(value) ((float)(1./sqrt(value))) +#define cvSqrt(value) ((float)sqrt(value)) + +CV_INLINE int cvIsNaN( double value ) +{ + Cv64suf ieee754; + ieee754.f = value; + return ((unsigned)(ieee754.u >> 32) & 0x7fffffff) + + ((unsigned)ieee754.u != 0) > 0x7ff00000; +} + + +CV_INLINE int cvIsInf( double value ) +{ + Cv64suf ieee754; + ieee754.f = value; + return ((unsigned)(ieee754.u >> 32) & 0x7fffffff) == 0x7ff00000 && + (unsigned)ieee754.u == 0; +} + + +/*************** Random number generation *******************/ + +typedef uint64 CvRNG; + +#define CV_RNG_COEFF 4164903690U + +CV_INLINE CvRNG cvRNG( int64 seed CV_DEFAULT(-1)) +{ + CvRNG rng = seed ? (uint64)seed : (uint64)(int64)-1; + return rng; +} + +/* Return random 32-bit unsigned integer: */ +CV_INLINE unsigned cvRandInt( CvRNG* rng ) +{ + uint64 temp = *rng; + temp = (uint64)(unsigned)temp*CV_RNG_COEFF + (temp >> 32); + *rng = temp; + return (unsigned)temp; +} + +/* Returns random floating-point number between 0 and 1: */ +CV_INLINE double cvRandReal( CvRNG* rng ) +{ + return cvRandInt(rng)*2.3283064365386962890625e-10 /* 2^-32 */; +} + +/****************************************************************************************\ +* Image type (IplImage) * +\****************************************************************************************/ + +#ifndef HAVE_IPL + +/* + * The following definitions (until #endif) + * is an extract from IPL headers. + * Copyright (c) 1995 Intel Corporation. + */ +#define IPL_DEPTH_SIGN 0x80000000 + +#define IPL_DEPTH_1U 1 +#define IPL_DEPTH_8U 8 +#define IPL_DEPTH_16U 16 +#define IPL_DEPTH_32F 32 + +#define IPL_DEPTH_8S (IPL_DEPTH_SIGN| 8) +#define IPL_DEPTH_16S (IPL_DEPTH_SIGN|16) +#define IPL_DEPTH_32S (IPL_DEPTH_SIGN|32) + +#define IPL_DATA_ORDER_PIXEL 0 +#define IPL_DATA_ORDER_PLANE 1 + +#define IPL_ORIGIN_TL 0 +#define IPL_ORIGIN_BL 1 + +#define IPL_ALIGN_4BYTES 4 +#define IPL_ALIGN_8BYTES 8 +#define IPL_ALIGN_16BYTES 16 +#define IPL_ALIGN_32BYTES 32 + +#define IPL_ALIGN_DWORD IPL_ALIGN_4BYTES +#define IPL_ALIGN_QWORD IPL_ALIGN_8BYTES + +#define IPL_BORDER_CONSTANT 0 +#define IPL_BORDER_REPLICATE 1 +#define IPL_BORDER_REFLECT 2 +#define IPL_BORDER_WRAP 3 + +typedef struct _IplImage +{ + int nSize; /* sizeof(IplImage) */ + int ID; /* version (=0)*/ + int nChannels; /* Most of OpenCV functions support 1,2,3 or 4 channels */ + int alphaChannel; /* Ignored by OpenCV */ + int depth; /* Pixel depth in bits: IPL_DEPTH_8U, IPL_DEPTH_8S, IPL_DEPTH_16S, + IPL_DEPTH_32S, IPL_DEPTH_32F and IPL_DEPTH_64F are supported. */ + char colorModel[4]; /* Ignored by OpenCV */ + char channelSeq[4]; /* ditto */ + int dataOrder; /* 0 - interleaved color channels, 1 - separate color channels. + cvCreateImage can only create interleaved images */ + int origin; /* 0 - top-left origin, + 1 - bottom-left origin (Windows bitmaps style). */ + int align; /* Alignment of image rows (4 or 8). + OpenCV ignores it and uses widthStep instead. */ + int width; /* Image width in pixels. */ + int height; /* Image height in pixels. */ + struct _IplROI *roi; /* Image ROI. If NULL, the whole image is selected. */ + struct _IplImage *maskROI; /* Must be NULL. */ + void *imageId; /* " " */ + struct _IplTileInfo *tileInfo; /* " " */ + int imageSize; /* Image data size in bytes + (==image->height*image->widthStep + in case of interleaved data)*/ + char *imageData; /* Pointer to aligned image data. */ + int widthStep; /* Size of aligned image row in bytes. */ + int BorderMode[4]; /* Ignored by OpenCV. */ + int BorderConst[4]; /* Ditto. */ + char *imageDataOrigin; /* Pointer to very origin of image data + (not necessarily aligned) - + needed for correct deallocation */ +} +IplImage; + +typedef struct _IplTileInfo IplTileInfo; + +typedef struct _IplROI +{ + int coi; /* 0 - no COI (all channels are selected), 1 - 0th channel is selected ...*/ + int xOffset; + int yOffset; + int width; + int height; +} +IplROI; + +typedef struct _IplConvKernel +{ + int nCols; + int nRows; + int anchorX; + int anchorY; + int *values; + int nShiftR; +} +IplConvKernel; + +typedef struct _IplConvKernelFP +{ + int nCols; + int nRows; + int anchorX; + int anchorY; + float *values; +} +IplConvKernelFP; + +#define IPL_IMAGE_HEADER 1 +#define IPL_IMAGE_DATA 2 +#define IPL_IMAGE_ROI 4 + +#endif/*HAVE_IPL*/ + +/* extra border mode */ +#define IPL_BORDER_REFLECT_101 4 +#define IPL_BORDER_TRANSPARENT 5 + +#define IPL_IMAGE_MAGIC_VAL ((int)sizeof(IplImage)) +#define CV_TYPE_NAME_IMAGE "opencv-image" + +#define CV_IS_IMAGE_HDR(img) \ + ((img) != NULL && ((const IplImage*)(img))->nSize == sizeof(IplImage)) + +#define CV_IS_IMAGE(img) \ + (CV_IS_IMAGE_HDR(img) && ((IplImage*)img)->imageData != NULL) + +/* for storing double-precision + floating point data in IplImage's */ +#define IPL_DEPTH_64F 64 + +/* get reference to pixel at (col,row), + for multi-channel images (col) should be multiplied by number of channels */ +#define CV_IMAGE_ELEM( image, elemtype, row, col ) \ + (((elemtype*)((image)->imageData + (image)->widthStep*(row)))[(col)]) + +/****************************************************************************************\ +* Matrix type (CvMat) * +\****************************************************************************************/ + +#define CV_CN_MAX 512 +#define CV_CN_SHIFT 3 +#define CV_DEPTH_MAX (1 << CV_CN_SHIFT) + +#define CV_8U 0 +#define CV_8S 1 +#define CV_16U 2 +#define CV_16S 3 +#define CV_32S 4 +#define CV_32F 5 +#define CV_64F 6 +#define CV_USRTYPE1 7 + +#define CV_MAT_DEPTH_MASK (CV_DEPTH_MAX - 1) +#define CV_MAT_DEPTH(flags) ((flags) & CV_MAT_DEPTH_MASK) + +#define CV_MAKETYPE(depth,cn) (CV_MAT_DEPTH(depth) + (((cn)-1) << CV_CN_SHIFT)) +#define CV_MAKE_TYPE CV_MAKETYPE + +#define CV_8UC1 CV_MAKETYPE(CV_8U,1) +#define CV_8UC2 CV_MAKETYPE(CV_8U,2) +#define CV_8UC3 CV_MAKETYPE(CV_8U,3) +#define CV_8UC4 CV_MAKETYPE(CV_8U,4) +#define CV_8UC(n) CV_MAKETYPE(CV_8U,(n)) + +#define CV_8SC1 CV_MAKETYPE(CV_8S,1) +#define CV_8SC2 CV_MAKETYPE(CV_8S,2) +#define CV_8SC3 CV_MAKETYPE(CV_8S,3) +#define CV_8SC4 CV_MAKETYPE(CV_8S,4) +#define CV_8SC(n) CV_MAKETYPE(CV_8S,(n)) + +#define CV_16UC1 CV_MAKETYPE(CV_16U,1) +#define CV_16UC2 CV_MAKETYPE(CV_16U,2) +#define CV_16UC3 CV_MAKETYPE(CV_16U,3) +#define CV_16UC4 CV_MAKETYPE(CV_16U,4) +#define CV_16UC(n) CV_MAKETYPE(CV_16U,(n)) + +#define CV_16SC1 CV_MAKETYPE(CV_16S,1) +#define CV_16SC2 CV_MAKETYPE(CV_16S,2) +#define CV_16SC3 CV_MAKETYPE(CV_16S,3) +#define CV_16SC4 CV_MAKETYPE(CV_16S,4) +#define CV_16SC(n) CV_MAKETYPE(CV_16S,(n)) + +#define CV_32SC1 CV_MAKETYPE(CV_32S,1) +#define CV_32SC2 CV_MAKETYPE(CV_32S,2) +#define CV_32SC3 CV_MAKETYPE(CV_32S,3) +#define CV_32SC4 CV_MAKETYPE(CV_32S,4) +#define CV_32SC(n) CV_MAKETYPE(CV_32S,(n)) + +#define CV_32FC1 CV_MAKETYPE(CV_32F,1) +#define CV_32FC2 CV_MAKETYPE(CV_32F,2) +#define CV_32FC3 CV_MAKETYPE(CV_32F,3) +#define CV_32FC4 CV_MAKETYPE(CV_32F,4) +#define CV_32FC(n) CV_MAKETYPE(CV_32F,(n)) + +#define CV_64FC1 CV_MAKETYPE(CV_64F,1) +#define CV_64FC2 CV_MAKETYPE(CV_64F,2) +#define CV_64FC3 CV_MAKETYPE(CV_64F,3) +#define CV_64FC4 CV_MAKETYPE(CV_64F,4) +#define CV_64FC(n) CV_MAKETYPE(CV_64F,(n)) + +#define CV_AUTO_STEP 0x7fffffff +#define CV_WHOLE_ARR cvSlice( 0, 0x3fffffff ) + +#define CV_MAT_CN_MASK ((CV_CN_MAX - 1) << CV_CN_SHIFT) +#define CV_MAT_CN(flags) ((((flags) & CV_MAT_CN_MASK) >> CV_CN_SHIFT) + 1) +#define CV_MAT_TYPE_MASK (CV_DEPTH_MAX*CV_CN_MAX - 1) +#define CV_MAT_TYPE(flags) ((flags) & CV_MAT_TYPE_MASK) +#define CV_MAT_CONT_FLAG_SHIFT 14 +#define CV_MAT_CONT_FLAG (1 << CV_MAT_CONT_FLAG_SHIFT) +#define CV_IS_MAT_CONT(flags) ((flags) & CV_MAT_CONT_FLAG) +#define CV_IS_CONT_MAT CV_IS_MAT_CONT +#define CV_SUBMAT_FLAG_SHIFT 15 +#define CV_SUBMAT_FLAG (1 << CV_SUBMAT_FLAG_SHIFT) +#define CV_IS_SUBMAT(flags) ((flags) & CV_MAT_SUBMAT_FLAG) + +#define CV_MAGIC_MASK 0xFFFF0000 +#define CV_MAT_MAGIC_VAL 0x42420000 +#define CV_TYPE_NAME_MAT "opencv-matrix" + +typedef struct CvMat +{ + int type; + int step; + + /* for internal use only */ + int* refcount; + int hdr_refcount; + + union + { + uchar* ptr; + short* s; + int* i; + float* fl; + double* db; + } data; + +#ifdef __cplusplus + union + { + int rows; + int height; + }; + + union + { + int cols; + int width; + }; +#else + int rows; + int cols; +#endif + +} +CvMat; + + +#define CV_IS_MAT_HDR(mat) \ + ((mat) != NULL && \ + (((const CvMat*)(mat))->type & CV_MAGIC_MASK) == CV_MAT_MAGIC_VAL && \ + ((const CvMat*)(mat))->cols > 0 && ((const CvMat*)(mat))->rows > 0) + +#define CV_IS_MAT_HDR_Z(mat) \ + ((mat) != NULL && \ + (((const CvMat*)(mat))->type & CV_MAGIC_MASK) == CV_MAT_MAGIC_VAL && \ + ((const CvMat*)(mat))->cols >= 0 && ((const CvMat*)(mat))->rows >= 0) + +#define CV_IS_MAT(mat) \ + (CV_IS_MAT_HDR(mat) && ((const CvMat*)(mat))->data.ptr != NULL) + +#define CV_IS_MASK_ARR(mat) \ + (((mat)->type & (CV_MAT_TYPE_MASK & ~CV_8SC1)) == 0) + +#define CV_ARE_TYPES_EQ(mat1, mat2) \ + ((((mat1)->type ^ (mat2)->type) & CV_MAT_TYPE_MASK) == 0) + +#define CV_ARE_CNS_EQ(mat1, mat2) \ + ((((mat1)->type ^ (mat2)->type) & CV_MAT_CN_MASK) == 0) + +#define CV_ARE_DEPTHS_EQ(mat1, mat2) \ + ((((mat1)->type ^ (mat2)->type) & CV_MAT_DEPTH_MASK) == 0) + +#define CV_ARE_SIZES_EQ(mat1, mat2) \ + ((mat1)->rows == (mat2)->rows && (mat1)->cols == (mat2)->cols) + +#define CV_IS_MAT_CONST(mat) \ + (((mat)->rows|(mat)->cols) == 1) + +/* Size of each channel item, + 0x124489 = 1000 0100 0100 0010 0010 0001 0001 ~ array of sizeof(arr_type_elem) */ +#define CV_ELEM_SIZE1(type) \ + ((((sizeof(size_t)<<28)|0x8442211) >> CV_MAT_DEPTH(type)*4) & 15) + +/* 0x3a50 = 11 10 10 01 01 00 00 ~ array of log2(sizeof(arr_type_elem)) */ +#define CV_ELEM_SIZE(type) \ + (CV_MAT_CN(type) << ((((sizeof(size_t)/4+1)*16384|0x3a50) >> CV_MAT_DEPTH(type)*2) & 3)) + +#define IPL2CV_DEPTH(depth) \ + ((((CV_8U)+(CV_16U<<4)+(CV_32F<<8)+(CV_64F<<16)+(CV_8S<<20)+ \ + (CV_16S<<24)+(CV_32S<<28)) >> ((((depth) & 0xF0) >> 2) + \ + (((depth) & IPL_DEPTH_SIGN) ? 20 : 0))) & 15) + +/* Inline constructor. No data is allocated internally!!! + * (Use together with cvCreateData, or use cvCreateMat instead to + * get a matrix with allocated data): + */ +CV_INLINE CvMat cvMat( int rows, int cols, int type, void* data CV_DEFAULT(NULL)) +{ + CvMat m; + + assert( (unsigned)CV_MAT_DEPTH(type) <= CV_64F ); + type = CV_MAT_TYPE(type); + m.type = CV_MAT_MAGIC_VAL | CV_MAT_CONT_FLAG | type; + m.cols = cols; + m.rows = rows; + m.step = m.cols*CV_ELEM_SIZE(type); + m.data.ptr = (uchar*)data; + m.refcount = NULL; + m.hdr_refcount = 0; + + return m; +} + + +#define CV_MAT_ELEM_PTR_FAST( mat, row, col, pix_size ) \ + (assert( (unsigned)(row) < (unsigned)(mat).rows && \ + (unsigned)(col) < (unsigned)(mat).cols ), \ + (mat).data.ptr + (size_t)(mat).step*(row) + (pix_size)*(col)) + +#define CV_MAT_ELEM_PTR( mat, row, col ) \ + CV_MAT_ELEM_PTR_FAST( mat, row, col, CV_ELEM_SIZE((mat).type) ) + +#define CV_MAT_ELEM( mat, elemtype, row, col ) \ + (*(elemtype*)CV_MAT_ELEM_PTR_FAST( mat, row, col, sizeof(elemtype))) + + +CV_INLINE double cvmGet( const CvMat* mat, int row, int col ) +{ + int type; + + type = CV_MAT_TYPE(mat->type); + assert( (unsigned)row < (unsigned)mat->rows && + (unsigned)col < (unsigned)mat->cols ); + + if( type == CV_32FC1 ) + return ((float*)(void*)(mat->data.ptr + (size_t)mat->step*row))[col]; + else + { + assert( type == CV_64FC1 ); + return ((double*)(void*)(mat->data.ptr + (size_t)mat->step*row))[col]; + } +} + + +CV_INLINE void cvmSet( CvMat* mat, int row, int col, double value ) +{ + int type; + type = CV_MAT_TYPE(mat->type); + assert( (unsigned)row < (unsigned)mat->rows && + (unsigned)col < (unsigned)mat->cols ); + + if( type == CV_32FC1 ) + ((float*)(void*)(mat->data.ptr + (size_t)mat->step*row))[col] = (float)value; + else + { + assert( type == CV_64FC1 ); + ((double*)(void*)(mat->data.ptr + (size_t)mat->step*row))[col] = value; + } +} + + +CV_INLINE int cvIplDepth( int type ) +{ + int depth = CV_MAT_DEPTH(type); + return CV_ELEM_SIZE1(depth)*8 | (depth == CV_8S || depth == CV_16S || + depth == CV_32S ? IPL_DEPTH_SIGN : 0); +} + + +/****************************************************************************************\ +* Multi-dimensional dense array (CvMatND) * +\****************************************************************************************/ + +#define CV_MATND_MAGIC_VAL 0x42430000 +#define CV_TYPE_NAME_MATND "opencv-nd-matrix" + +#define CV_MAX_DIM 32 +#define CV_MAX_DIM_HEAP 1024 + +typedef struct CvMatND +{ + int type; + int dims; + + int* refcount; + int hdr_refcount; + + union + { + uchar* ptr; + float* fl; + double* db; + int* i; + short* s; + } data; + + struct + { + int size; + int step; + } + dim[CV_MAX_DIM]; +} +CvMatND; + +#define CV_IS_MATND_HDR(mat) \ + ((mat) != NULL && (((const CvMatND*)(mat))->type & CV_MAGIC_MASK) == CV_MATND_MAGIC_VAL) + +#define CV_IS_MATND(mat) \ + (CV_IS_MATND_HDR(mat) && ((const CvMatND*)(mat))->data.ptr != NULL) + + +/****************************************************************************************\ +* Multi-dimensional sparse array (CvSparseMat) * +\****************************************************************************************/ + +#define CV_SPARSE_MAT_MAGIC_VAL 0x42440000 +#define CV_TYPE_NAME_SPARSE_MAT "opencv-sparse-matrix" + +struct CvSet; + +typedef struct CvSparseMat +{ + int type; + int dims; + int* refcount; + int hdr_refcount; + + struct CvSet* heap; + void** hashtable; + int hashsize; + int valoffset; + int idxoffset; + int size[CV_MAX_DIM]; +} +CvSparseMat; + +#define CV_IS_SPARSE_MAT_HDR(mat) \ + ((mat) != NULL && \ + (((const CvSparseMat*)(mat))->type & CV_MAGIC_MASK) == CV_SPARSE_MAT_MAGIC_VAL) + +#define CV_IS_SPARSE_MAT(mat) \ + CV_IS_SPARSE_MAT_HDR(mat) + +/**************** iteration through a sparse array *****************/ + +typedef struct CvSparseNode +{ + unsigned hashval; + struct CvSparseNode* next; +} +CvSparseNode; + +typedef struct CvSparseMatIterator +{ + CvSparseMat* mat; + CvSparseNode* node; + int curidx; +} +CvSparseMatIterator; + +#define CV_NODE_VAL(mat,node) ((void*)((uchar*)(node) + (mat)->valoffset)) +#define CV_NODE_IDX(mat,node) ((int*)((uchar*)(node) + (mat)->idxoffset)) + +/****************************************************************************************\ +* Histogram * +\****************************************************************************************/ + +typedef int CvHistType; + +#define CV_HIST_MAGIC_VAL 0x42450000 +#define CV_HIST_UNIFORM_FLAG (1 << 10) + +/* indicates whether bin ranges are set already or not */ +#define CV_HIST_RANGES_FLAG (1 << 11) + +#define CV_HIST_ARRAY 0 +#define CV_HIST_SPARSE 1 +#define CV_HIST_TREE CV_HIST_SPARSE + +/* should be used as a parameter only, + it turns to CV_HIST_UNIFORM_FLAG of hist->type */ +#define CV_HIST_UNIFORM 1 + +typedef struct CvHistogram +{ + int type; + CvArr* bins; + float thresh[CV_MAX_DIM][2]; /* For uniform histograms. */ + float** thresh2; /* For non-uniform histograms. */ + CvMatND mat; /* Embedded matrix header for array histograms. */ +} +CvHistogram; + +#define CV_IS_HIST( hist ) \ + ((hist) != NULL && \ + (((CvHistogram*)(hist))->type & CV_MAGIC_MASK) == CV_HIST_MAGIC_VAL && \ + (hist)->bins != NULL) + +#define CV_IS_UNIFORM_HIST( hist ) \ + (((hist)->type & CV_HIST_UNIFORM_FLAG) != 0) + +#define CV_IS_SPARSE_HIST( hist ) \ + CV_IS_SPARSE_MAT((hist)->bins) + +#define CV_HIST_HAS_RANGES( hist ) \ + (((hist)->type & CV_HIST_RANGES_FLAG) != 0) + +/****************************************************************************************\ +* Other supplementary data type definitions * +\****************************************************************************************/ + +/*************************************** CvRect *****************************************/ + +typedef struct CvRect +{ + int x; + int y; + int width; + int height; +} +CvRect; + +CV_INLINE CvRect cvRect( int x, int y, int width, int height ) +{ + CvRect r; + + r.x = x; + r.y = y; + r.width = width; + r.height = height; + + return r; +} + + +CV_INLINE IplROI cvRectToROI( CvRect rect, int coi ) +{ + IplROI roi; + roi.xOffset = rect.x; + roi.yOffset = rect.y; + roi.width = rect.width; + roi.height = rect.height; + roi.coi = coi; + + return roi; +} + + +CV_INLINE CvRect cvROIToRect( IplROI roi ) +{ + return cvRect( roi.xOffset, roi.yOffset, roi.width, roi.height ); +} + +/*********************************** CvTermCriteria *************************************/ + +#define CV_TERMCRIT_ITER 1 +#define CV_TERMCRIT_NUMBER CV_TERMCRIT_ITER +#define CV_TERMCRIT_EPS 2 + +typedef struct CvTermCriteria +{ + int type; /* may be combination of + CV_TERMCRIT_ITER + CV_TERMCRIT_EPS */ + int max_iter; + double epsilon; +} +CvTermCriteria; + +CV_INLINE CvTermCriteria cvTermCriteria( int type, int max_iter, double epsilon ) +{ + CvTermCriteria t; + + t.type = type; + t.max_iter = max_iter; + t.epsilon = (float)epsilon; + + return t; +} + + +/******************************* CvPoint and variants ***********************************/ + +typedef struct CvPoint +{ + int x; + int y; +} +CvPoint; + + +CV_INLINE CvPoint cvPoint( int x, int y ) +{ + CvPoint p; + + p.x = x; + p.y = y; + + return p; +} + + +typedef struct CvPoint2D32f +{ + float x; + float y; +} +CvPoint2D32f; + + +CV_INLINE CvPoint2D32f cvPoint2D32f( double x, double y ) +{ + CvPoint2D32f p; + + p.x = (float)x; + p.y = (float)y; + + return p; +} + + +CV_INLINE CvPoint2D32f cvPointTo32f( CvPoint point ) +{ + return cvPoint2D32f( (float)point.x, (float)point.y ); +} + + +CV_INLINE CvPoint cvPointFrom32f( CvPoint2D32f point ) +{ + CvPoint ipt; + ipt.x = cvRound(point.x); + ipt.y = cvRound(point.y); + + return ipt; +} + + +typedef struct CvPoint3D32f +{ + float x; + float y; + float z; +} +CvPoint3D32f; + + +CV_INLINE CvPoint3D32f cvPoint3D32f( double x, double y, double z ) +{ + CvPoint3D32f p; + + p.x = (float)x; + p.y = (float)y; + p.z = (float)z; + + return p; +} + + +typedef struct CvPoint2D64f +{ + double x; + double y; +} +CvPoint2D64f; + + +CV_INLINE CvPoint2D64f cvPoint2D64f( double x, double y ) +{ + CvPoint2D64f p; + + p.x = x; + p.y = y; + + return p; +} + + +typedef struct CvPoint3D64f +{ + double x; + double y; + double z; +} +CvPoint3D64f; + + +CV_INLINE CvPoint3D64f cvPoint3D64f( double x, double y, double z ) +{ + CvPoint3D64f p; + + p.x = x; + p.y = y; + p.z = z; + + return p; +} + + +/******************************** CvSize's & CvBox **************************************/ + +typedef struct CvSize +{ + int width; + int height; +} +CvSize; + +CV_INLINE CvSize cvSize( int width, int height ) +{ + CvSize s; + + s.width = width; + s.height = height; + + return s; +} + +typedef struct CvSize2D32f +{ + float width; + float height; +} +CvSize2D32f; + + +CV_INLINE CvSize2D32f cvSize2D32f( double width, double height ) +{ + CvSize2D32f s; + + s.width = (float)width; + s.height = (float)height; + + return s; +} + +typedef struct CvBox2D +{ + CvPoint2D32f center; /* Center of the box. */ + CvSize2D32f size; /* Box width and length. */ + float angle; /* Angle between the horizontal axis */ + /* and the first side (i.e. length) in degrees */ +} +CvBox2D; + + +/* Line iterator state: */ +typedef struct CvLineIterator +{ + /* Pointer to the current point: */ + uchar* ptr; + + /* Bresenham algorithm state: */ + int err; + int plus_delta; + int minus_delta; + int plus_step; + int minus_step; +} +CvLineIterator; + + + +/************************************* CvSlice ******************************************/ + +typedef struct CvSlice +{ + int start_index, end_index; +} +CvSlice; + +CV_INLINE CvSlice cvSlice( int start, int end ) +{ + CvSlice slice; + slice.start_index = start; + slice.end_index = end; + + return slice; +} + +#define CV_WHOLE_SEQ_END_INDEX 0x3fffffff +#define CV_WHOLE_SEQ cvSlice(0, CV_WHOLE_SEQ_END_INDEX) + + +/************************************* CvScalar *****************************************/ + +typedef struct CvScalar +{ + double val[4]; +} +CvScalar; + +CV_INLINE CvScalar cvScalar( double val0, double val1 CV_DEFAULT(0), + double val2 CV_DEFAULT(0), double val3 CV_DEFAULT(0)) +{ + CvScalar scalar; + scalar.val[0] = val0; scalar.val[1] = val1; + scalar.val[2] = val2; scalar.val[3] = val3; + return scalar; +} + + +CV_INLINE CvScalar cvRealScalar( double val0 ) +{ + CvScalar scalar; + scalar.val[0] = val0; + scalar.val[1] = scalar.val[2] = scalar.val[3] = 0; + return scalar; +} + +CV_INLINE CvScalar cvScalarAll( double val0123 ) +{ + CvScalar scalar; + scalar.val[0] = val0123; + scalar.val[1] = val0123; + scalar.val[2] = val0123; + scalar.val[3] = val0123; + return scalar; +} + +/****************************************************************************************\ +* Dynamic Data structures * +\****************************************************************************************/ + +/******************************** Memory storage ****************************************/ + +typedef struct CvMemBlock +{ + struct CvMemBlock* prev; + struct CvMemBlock* next; +} +CvMemBlock; + +#define CV_STORAGE_MAGIC_VAL 0x42890000 + +typedef struct CvMemStorage +{ + int signature; + CvMemBlock* bottom; /* First allocated block. */ + CvMemBlock* top; /* Current memory block - top of the stack. */ + struct CvMemStorage* parent; /* We get new blocks from parent as needed. */ + int block_size; /* Block size. */ + int free_space; /* Remaining free space in current block. */ +} +CvMemStorage; + +#define CV_IS_STORAGE(storage) \ + ((storage) != NULL && \ + (((CvMemStorage*)(storage))->signature & CV_MAGIC_MASK) == CV_STORAGE_MAGIC_VAL) + + +typedef struct CvMemStoragePos +{ + CvMemBlock* top; + int free_space; +} +CvMemStoragePos; + + +/*********************************** Sequence *******************************************/ + +typedef struct CvSeqBlock +{ + struct CvSeqBlock* prev; /* Previous sequence block. */ + struct CvSeqBlock* next; /* Next sequence block. */ + int start_index; /* Index of the first element in the block + */ + /* sequence->first->start_index. */ + int count; /* Number of elements in the block. */ + schar* data; /* Pointer to the first element of the block. */ +} +CvSeqBlock; + + +#define CV_TREE_NODE_FIELDS(node_type) \ + int flags; /* Miscellaneous flags. */ \ + int header_size; /* Size of sequence header. */ \ + struct node_type* h_prev; /* Previous sequence. */ \ + struct node_type* h_next; /* Next sequence. */ \ + struct node_type* v_prev; /* 2nd previous sequence. */ \ + struct node_type* v_next /* 2nd next sequence. */ + +/* + Read/Write sequence. + Elements can be dynamically inserted to or deleted from the sequence. +*/ +#define CV_SEQUENCE_FIELDS() \ + CV_TREE_NODE_FIELDS(CvSeq); \ + int total; /* Total number of elements. */ \ + int elem_size; /* Size of sequence element in bytes. */ \ + schar* block_max; /* Maximal bound of the last block. */ \ + schar* ptr; /* Current write pointer. */ \ + int delta_elems; /* Grow seq this many at a time. */ \ + CvMemStorage* storage; /* Where the seq is stored. */ \ + CvSeqBlock* free_blocks; /* Free blocks list. */ \ + CvSeqBlock* first; /* Pointer to the first sequence block. */ + +typedef struct CvSeq +{ + CV_SEQUENCE_FIELDS() +} +CvSeq; + +#define CV_TYPE_NAME_SEQ "opencv-sequence" +#define CV_TYPE_NAME_SEQ_TREE "opencv-sequence-tree" + +/*************************************** Set ********************************************/ +/* + Set. + Order is not preserved. There can be gaps between sequence elements. + After the element has been inserted it stays in the same place all the time. + The MSB(most-significant or sign bit) of the first field (flags) is 0 iff the element exists. +*/ +#define CV_SET_ELEM_FIELDS(elem_type) \ + int flags; \ + struct elem_type* next_free; + +typedef struct CvSetElem +{ + CV_SET_ELEM_FIELDS(CvSetElem) +} +CvSetElem; + +#define CV_SET_FIELDS() \ + CV_SEQUENCE_FIELDS() \ + CvSetElem* free_elems; \ + int active_count; + +typedef struct CvSet +{ + CV_SET_FIELDS() +} +CvSet; + + +#define CV_SET_ELEM_IDX_MASK ((1 << 26) - 1) +#define CV_SET_ELEM_FREE_FLAG (1 << (sizeof(int)*8-1)) + +/* Checks whether the element pointed by ptr belongs to a set or not */ +#define CV_IS_SET_ELEM( ptr ) (((CvSetElem*)(ptr))->flags >= 0) + +/************************************* Graph ********************************************/ + +/* + We represent a graph as a set of vertices. + Vertices contain their adjacency lists (more exactly, pointers to first incoming or + outcoming edge (or 0 if isolated vertex)). Edges are stored in another set. + There is a singly-linked list of incoming/outcoming edges for each vertex. + + Each edge consists of + + o Two pointers to the starting and ending vertices + (vtx[0] and vtx[1] respectively). + + A graph may be oriented or not. In the latter case, edges between + vertex i to vertex j are not distinguished during search operations. + + o Two pointers to next edges for the starting and ending vertices, where + next[0] points to the next edge in the vtx[0] adjacency list and + next[1] points to the next edge in the vtx[1] adjacency list. +*/ +#define CV_GRAPH_EDGE_FIELDS() \ + int flags; \ + float weight; \ + struct CvGraphEdge* next[2]; \ + struct CvGraphVtx* vtx[2]; + + +#define CV_GRAPH_VERTEX_FIELDS() \ + int flags; \ + struct CvGraphEdge* first; + + +typedef struct CvGraphEdge +{ + CV_GRAPH_EDGE_FIELDS() +} +CvGraphEdge; + +typedef struct CvGraphVtx +{ + CV_GRAPH_VERTEX_FIELDS() +} +CvGraphVtx; + +typedef struct CvGraphVtx2D +{ + CV_GRAPH_VERTEX_FIELDS() + CvPoint2D32f* ptr; +} +CvGraphVtx2D; + +/* + Graph is "derived" from the set (this is set a of vertices) + and includes another set (edges) +*/ +#define CV_GRAPH_FIELDS() \ + CV_SET_FIELDS() \ + CvSet* edges; + +typedef struct CvGraph +{ + CV_GRAPH_FIELDS() +} +CvGraph; + +#define CV_TYPE_NAME_GRAPH "opencv-graph" + +/*********************************** Chain/Countour *************************************/ + +typedef struct CvChain +{ + CV_SEQUENCE_FIELDS() + CvPoint origin; +} +CvChain; + +#define CV_CONTOUR_FIELDS() \ + CV_SEQUENCE_FIELDS() \ + CvRect rect; \ + int color; \ + int reserved[3]; + +typedef struct CvContour +{ + CV_CONTOUR_FIELDS() +} +CvContour; + +typedef CvContour CvPoint2DSeq; + +/****************************************************************************************\ +* Sequence types * +\****************************************************************************************/ + +#define CV_SEQ_MAGIC_VAL 0x42990000 + +#define CV_IS_SEQ(seq) \ + ((seq) != NULL && (((CvSeq*)(seq))->flags & CV_MAGIC_MASK) == CV_SEQ_MAGIC_VAL) + +#define CV_SET_MAGIC_VAL 0x42980000 +#define CV_IS_SET(set) \ + ((set) != NULL && (((CvSeq*)(set))->flags & CV_MAGIC_MASK) == CV_SET_MAGIC_VAL) + +#define CV_SEQ_ELTYPE_BITS 12 +#define CV_SEQ_ELTYPE_MASK ((1 << CV_SEQ_ELTYPE_BITS) - 1) + +#define CV_SEQ_ELTYPE_POINT CV_32SC2 /* (x,y) */ +#define CV_SEQ_ELTYPE_CODE CV_8UC1 /* freeman code: 0..7 */ +#define CV_SEQ_ELTYPE_GENERIC 0 +#define CV_SEQ_ELTYPE_PTR CV_USRTYPE1 +#define CV_SEQ_ELTYPE_PPOINT CV_SEQ_ELTYPE_PTR /* &(x,y) */ +#define CV_SEQ_ELTYPE_INDEX CV_32SC1 /* #(x,y) */ +#define CV_SEQ_ELTYPE_GRAPH_EDGE 0 /* &next_o, &next_d, &vtx_o, &vtx_d */ +#define CV_SEQ_ELTYPE_GRAPH_VERTEX 0 /* first_edge, &(x,y) */ +#define CV_SEQ_ELTYPE_TRIAN_ATR 0 /* vertex of the binary tree */ +#define CV_SEQ_ELTYPE_CONNECTED_COMP 0 /* connected component */ +#define CV_SEQ_ELTYPE_POINT3D CV_32FC3 /* (x,y,z) */ + +#define CV_SEQ_KIND_BITS 2 +#define CV_SEQ_KIND_MASK (((1 << CV_SEQ_KIND_BITS) - 1)<flags & CV_SEQ_ELTYPE_MASK) +#define CV_SEQ_KIND( seq ) ((seq)->flags & CV_SEQ_KIND_MASK ) + +/* flag checking */ +#define CV_IS_SEQ_INDEX( seq ) ((CV_SEQ_ELTYPE(seq) == CV_SEQ_ELTYPE_INDEX) && \ + (CV_SEQ_KIND(seq) == CV_SEQ_KIND_GENERIC)) + +#define CV_IS_SEQ_CURVE( seq ) (CV_SEQ_KIND(seq) == CV_SEQ_KIND_CURVE) +#define CV_IS_SEQ_CLOSED( seq ) (((seq)->flags & CV_SEQ_FLAG_CLOSED) != 0) +#define CV_IS_SEQ_CONVEX( seq ) 0 +#define CV_IS_SEQ_HOLE( seq ) (((seq)->flags & CV_SEQ_FLAG_HOLE) != 0) +#define CV_IS_SEQ_SIMPLE( seq ) 1 + +/* type checking macros */ +#define CV_IS_SEQ_POINT_SET( seq ) \ + ((CV_SEQ_ELTYPE(seq) == CV_32SC2 || CV_SEQ_ELTYPE(seq) == CV_32FC2)) + +#define CV_IS_SEQ_POINT_SUBSET( seq ) \ + (CV_IS_SEQ_INDEX( seq ) || CV_SEQ_ELTYPE(seq) == CV_SEQ_ELTYPE_PPOINT) + +#define CV_IS_SEQ_POLYLINE( seq ) \ + (CV_SEQ_KIND(seq) == CV_SEQ_KIND_CURVE && CV_IS_SEQ_POINT_SET(seq)) + +#define CV_IS_SEQ_POLYGON( seq ) \ + (CV_IS_SEQ_POLYLINE(seq) && CV_IS_SEQ_CLOSED(seq)) + +#define CV_IS_SEQ_CHAIN( seq ) \ + (CV_SEQ_KIND(seq) == CV_SEQ_KIND_CURVE && (seq)->elem_size == 1) + +#define CV_IS_SEQ_CONTOUR( seq ) \ + (CV_IS_SEQ_CLOSED(seq) && (CV_IS_SEQ_POLYLINE(seq) || CV_IS_SEQ_CHAIN(seq))) + +#define CV_IS_SEQ_CHAIN_CONTOUR( seq ) \ + (CV_IS_SEQ_CHAIN( seq ) && CV_IS_SEQ_CLOSED( seq )) + +#define CV_IS_SEQ_POLYGON_TREE( seq ) \ + (CV_SEQ_ELTYPE (seq) == CV_SEQ_ELTYPE_TRIAN_ATR && \ + CV_SEQ_KIND( seq ) == CV_SEQ_KIND_BIN_TREE ) + +#define CV_IS_GRAPH( seq ) \ + (CV_IS_SET(seq) && CV_SEQ_KIND((CvSet*)(seq)) == CV_SEQ_KIND_GRAPH) + +#define CV_IS_GRAPH_ORIENTED( seq ) \ + (((seq)->flags & CV_GRAPH_FLAG_ORIENTED) != 0) + +#define CV_IS_SUBDIV2D( seq ) \ + (CV_IS_SET(seq) && CV_SEQ_KIND((CvSet*)(seq)) == CV_SEQ_KIND_SUBDIV2D) + +/****************************************************************************************/ +/* Sequence writer & reader */ +/****************************************************************************************/ + +#define CV_SEQ_WRITER_FIELDS() \ + int header_size; \ + CvSeq* seq; /* the sequence written */ \ + CvSeqBlock* block; /* current block */ \ + schar* ptr; /* pointer to free space */ \ + schar* block_min; /* pointer to the beginning of block*/\ + schar* block_max; /* pointer to the end of block */ + +typedef struct CvSeqWriter +{ + CV_SEQ_WRITER_FIELDS() +} +CvSeqWriter; + + +#define CV_SEQ_READER_FIELDS() \ + int header_size; \ + CvSeq* seq; /* sequence, beign read */ \ + CvSeqBlock* block; /* current block */ \ + schar* ptr; /* pointer to element be read next */ \ + schar* block_min; /* pointer to the beginning of block */\ + schar* block_max; /* pointer to the end of block */ \ + int delta_index;/* = seq->first->start_index */ \ + schar* prev_elem; /* pointer to previous element */ + + +typedef struct CvSeqReader +{ + CV_SEQ_READER_FIELDS() +} +CvSeqReader; + +/****************************************************************************************/ +/* Operations on sequences */ +/****************************************************************************************/ + +#define CV_SEQ_ELEM( seq, elem_type, index ) \ +/* assert gives some guarantee that parameter is valid */ \ +( assert(sizeof((seq)->first[0]) == sizeof(CvSeqBlock) && \ + (seq)->elem_size == sizeof(elem_type)), \ + (elem_type*)((seq)->first && (unsigned)index < \ + (unsigned)((seq)->first->count) ? \ + (seq)->first->data + (index) * sizeof(elem_type) : \ + cvGetSeqElem( (CvSeq*)(seq), (index) ))) +#define CV_GET_SEQ_ELEM( elem_type, seq, index ) CV_SEQ_ELEM( (seq), elem_type, (index) ) + +/* Add element to sequence: */ +#define CV_WRITE_SEQ_ELEM_VAR( elem_ptr, writer ) \ +{ \ + if( (writer).ptr >= (writer).block_max ) \ + { \ + cvCreateSeqBlock( &writer); \ + } \ + memcpy((writer).ptr, elem_ptr, (writer).seq->elem_size);\ + (writer).ptr += (writer).seq->elem_size; \ +} + +#define CV_WRITE_SEQ_ELEM( elem, writer ) \ +{ \ + assert( (writer).seq->elem_size == sizeof(elem)); \ + if( (writer).ptr >= (writer).block_max ) \ + { \ + cvCreateSeqBlock( &writer); \ + } \ + assert( (writer).ptr <= (writer).block_max - sizeof(elem));\ + memcpy((writer).ptr, &(elem), sizeof(elem)); \ + (writer).ptr += sizeof(elem); \ +} + + +/* Move reader position forward: */ +#define CV_NEXT_SEQ_ELEM( elem_size, reader ) \ +{ \ + if( ((reader).ptr += (elem_size)) >= (reader).block_max ) \ + { \ + cvChangeSeqBlock( &(reader), 1 ); \ + } \ +} + + +/* Move reader position backward: */ +#define CV_PREV_SEQ_ELEM( elem_size, reader ) \ +{ \ + if( ((reader).ptr -= (elem_size)) < (reader).block_min ) \ + { \ + cvChangeSeqBlock( &(reader), -1 ); \ + } \ +} + +/* Read element and move read position forward: */ +#define CV_READ_SEQ_ELEM( elem, reader ) \ +{ \ + assert( (reader).seq->elem_size == sizeof(elem)); \ + memcpy( &(elem), (reader).ptr, sizeof((elem))); \ + CV_NEXT_SEQ_ELEM( sizeof(elem), reader ) \ +} + +/* Read element and move read position backward: */ +#define CV_REV_READ_SEQ_ELEM( elem, reader ) \ +{ \ + assert( (reader).seq->elem_size == sizeof(elem)); \ + memcpy(&(elem), (reader).ptr, sizeof((elem))); \ + CV_PREV_SEQ_ELEM( sizeof(elem), reader ) \ +} + + +#define CV_READ_CHAIN_POINT( _pt, reader ) \ +{ \ + (_pt) = (reader).pt; \ + if( (reader).ptr ) \ + { \ + CV_READ_SEQ_ELEM( (reader).code, (reader)); \ + assert( ((reader).code & ~7) == 0 ); \ + (reader).pt.x += (reader).deltas[(int)(reader).code][0]; \ + (reader).pt.y += (reader).deltas[(int)(reader).code][1]; \ + } \ +} + +#define CV_CURRENT_POINT( reader ) (*((CvPoint*)((reader).ptr))) +#define CV_PREV_POINT( reader ) (*((CvPoint*)((reader).prev_elem))) + +#define CV_READ_EDGE( pt1, pt2, reader ) \ +{ \ + assert( sizeof(pt1) == sizeof(CvPoint) && \ + sizeof(pt2) == sizeof(CvPoint) && \ + reader.seq->elem_size == sizeof(CvPoint)); \ + (pt1) = CV_PREV_POINT( reader ); \ + (pt2) = CV_CURRENT_POINT( reader ); \ + (reader).prev_elem = (reader).ptr; \ + CV_NEXT_SEQ_ELEM( sizeof(CvPoint), (reader)); \ +} + +/************ Graph macros ************/ + +/* Return next graph edge for given vertex: */ +#define CV_NEXT_GRAPH_EDGE( edge, vertex ) \ + (assert((edge)->vtx[0] == (vertex) || (edge)->vtx[1] == (vertex)), \ + (edge)->next[(edge)->vtx[1] == (vertex)]) + + + +/****************************************************************************************\ +* Data structures for persistence (a.k.a serialization) functionality * +\****************************************************************************************/ + +/* "black box" file storage */ +typedef struct CvFileStorage CvFileStorage; + +/* Storage flags: */ +#define CV_STORAGE_READ 0 +#define CV_STORAGE_WRITE 1 +#define CV_STORAGE_WRITE_TEXT CV_STORAGE_WRITE +#define CV_STORAGE_WRITE_BINARY CV_STORAGE_WRITE +#define CV_STORAGE_APPEND 2 +#define CV_STORAGE_MEMORY 4 +#define CV_STORAGE_FORMAT_MASK (7<<3) +#define CV_STORAGE_FORMAT_AUTO 0 +#define CV_STORAGE_FORMAT_XML 8 +#define CV_STORAGE_FORMAT_YAML 16 + +/* List of attributes: */ +typedef struct CvAttrList +{ + const char** attr; /* NULL-terminated array of (attribute_name,attribute_value) pairs. */ + struct CvAttrList* next; /* Pointer to next chunk of the attributes list. */ +} +CvAttrList; + +CV_INLINE CvAttrList cvAttrList( const char** attr CV_DEFAULT(NULL), + CvAttrList* next CV_DEFAULT(NULL) ) +{ + CvAttrList l; + l.attr = attr; + l.next = next; + + return l; +} + +struct CvTypeInfo; + +#define CV_NODE_NONE 0 +#define CV_NODE_INT 1 +#define CV_NODE_INTEGER CV_NODE_INT +#define CV_NODE_REAL 2 +#define CV_NODE_FLOAT CV_NODE_REAL +#define CV_NODE_STR 3 +#define CV_NODE_STRING CV_NODE_STR +#define CV_NODE_REF 4 /* not used */ +#define CV_NODE_SEQ 5 +#define CV_NODE_MAP 6 +#define CV_NODE_TYPE_MASK 7 + +#define CV_NODE_TYPE(flags) ((flags) & CV_NODE_TYPE_MASK) + +/* file node flags */ +#define CV_NODE_FLOW 8 /* Used only for writing structures in YAML format. */ +#define CV_NODE_USER 16 +#define CV_NODE_EMPTY 32 +#define CV_NODE_NAMED 64 + +#define CV_NODE_IS_INT(flags) (CV_NODE_TYPE(flags) == CV_NODE_INT) +#define CV_NODE_IS_REAL(flags) (CV_NODE_TYPE(flags) == CV_NODE_REAL) +#define CV_NODE_IS_STRING(flags) (CV_NODE_TYPE(flags) == CV_NODE_STRING) +#define CV_NODE_IS_SEQ(flags) (CV_NODE_TYPE(flags) == CV_NODE_SEQ) +#define CV_NODE_IS_MAP(flags) (CV_NODE_TYPE(flags) == CV_NODE_MAP) +#define CV_NODE_IS_COLLECTION(flags) (CV_NODE_TYPE(flags) >= CV_NODE_SEQ) +#define CV_NODE_IS_FLOW(flags) (((flags) & CV_NODE_FLOW) != 0) +#define CV_NODE_IS_EMPTY(flags) (((flags) & CV_NODE_EMPTY) != 0) +#define CV_NODE_IS_USER(flags) (((flags) & CV_NODE_USER) != 0) +#define CV_NODE_HAS_NAME(flags) (((flags) & CV_NODE_NAMED) != 0) + +#define CV_NODE_SEQ_SIMPLE 256 +#define CV_NODE_SEQ_IS_SIMPLE(seq) (((seq)->flags & CV_NODE_SEQ_SIMPLE) != 0) + +typedef struct CvString +{ + int len; + char* ptr; +} +CvString; + +/* All the keys (names) of elements in the readed file storage + are stored in the hash to speed up the lookup operations: */ +typedef struct CvStringHashNode +{ + unsigned hashval; + CvString str; + struct CvStringHashNode* next; +} +CvStringHashNode; + +typedef struct CvGenericHash CvFileNodeHash; + +/* Basic element of the file storage - scalar or collection: */ +typedef struct CvFileNode +{ + int tag; + struct CvTypeInfo* info; /* type information + (only for user-defined object, for others it is 0) */ + union + { + double f; /* scalar floating-point number */ + int i; /* scalar integer number */ + CvString str; /* text string */ + CvSeq* seq; /* sequence (ordered collection of file nodes) */ + CvFileNodeHash* map; /* map (collection of named file nodes) */ + } data; +} +CvFileNode; + +#ifdef __cplusplus +extern "C" { +#endif +typedef int (CV_CDECL *CvIsInstanceFunc)( const void* struct_ptr ); +typedef void (CV_CDECL *CvReleaseFunc)( void** struct_dblptr ); +typedef void* (CV_CDECL *CvReadFunc)( CvFileStorage* storage, CvFileNode* node ); +typedef void (CV_CDECL *CvWriteFunc)( CvFileStorage* storage, const char* name, + const void* struct_ptr, CvAttrList attributes ); +typedef void* (CV_CDECL *CvCloneFunc)( const void* struct_ptr ); +#ifdef __cplusplus +} +#endif + +typedef struct CvTypeInfo +{ + int flags; + int header_size; + struct CvTypeInfo* prev; + struct CvTypeInfo* next; + const char* type_name; + CvIsInstanceFunc is_instance; + CvReleaseFunc release; + CvReadFunc read; + CvWriteFunc write; + CvCloneFunc clone; +} +CvTypeInfo; + + +/**** System data types ******/ + +typedef struct CvPluginFuncInfo +{ + void** func_addr; + void* default_func_addr; + const char* func_names; + int search_modules; + int loaded_from; +} +CvPluginFuncInfo; + +typedef struct CvModuleInfo +{ + struct CvModuleInfo* next; + const char* name; + const char* version; + CvPluginFuncInfo* func_tab; +} +CvModuleInfo; + +#endif /*__OPENCV_CORE_TYPES_H__*/ + +/* End of file. */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/version.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/version.hpp new file mode 100644 index 0000000..2dbb3c3 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/version.hpp @@ -0,0 +1,72 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright( C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +//(including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort(including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/* + definition of the current version of OpenCV + Usefull to test in user programs +*/ + +#ifndef __OPENCV_VERSION_HPP__ +#define __OPENCV_VERSION_HPP__ + +#define CV_VERSION_EPOCH 2 +#define CV_VERSION_MAJOR 4 +#define CV_VERSION_MINOR 13 +#define CV_VERSION_REVISION 0 + +#define CVAUX_STR_EXP(__A) #__A +#define CVAUX_STR(__A) CVAUX_STR_EXP(__A) + +#define CVAUX_STRW_EXP(__A) L#__A +#define CVAUX_STRW(__A) CVAUX_STRW_EXP(__A) + +#if CV_VERSION_REVISION +# define CV_VERSION CVAUX_STR(CV_VERSION_EPOCH) "." CVAUX_STR(CV_VERSION_MAJOR) "." CVAUX_STR(CV_VERSION_MINOR) "." CVAUX_STR(CV_VERSION_REVISION) +#else +# define CV_VERSION CVAUX_STR(CV_VERSION_EPOCH) "." CVAUX_STR(CV_VERSION_MAJOR) "." CVAUX_STR(CV_VERSION_MINOR) +#endif + +/* old style version constants*/ +#define CV_MAJOR_VERSION CV_VERSION_EPOCH +#define CV_MINOR_VERSION CV_VERSION_MAJOR +#define CV_SUBMINOR_VERSION CV_VERSION_MINOR + +#endif diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/wimage.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/wimage.hpp new file mode 100644 index 0000000..c7afa8c --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/wimage.hpp @@ -0,0 +1,621 @@ +/////////////////////////////////////////////////////////////////////////////// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to +// this license. If you do not agree to this license, do not download, +// install, copy or use the software. +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2008, Google, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation or contributors may not be used to endorse +// or promote products derived from this software without specific +// prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" +// and any express or implied warranties, including, but not limited to, the +// implied warranties of merchantability and fitness for a particular purpose +// are disclaimed. In no event shall the Intel Corporation or contributors be +// liable for any direct, indirect, incidental, special, exemplary, or +// consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. + + +///////////////////////////////////////////////////////////////////////////////// +// +// Image class which provides a thin layer around an IplImage. The goals +// of the class design are: +// 1. All the data has explicit ownership to avoid memory leaks +// 2. No hidden allocations or copies for performance. +// 3. Easy access to OpenCV methods (which will access IPP if available) +// 4. Can easily treat external data as an image +// 5. Easy to create images which are subsets of other images +// 6. Fast pixel access which can take advantage of number of channels +// if known at compile time. +// +// The WImage class is the image class which provides the data accessors. +// The 'W' comes from the fact that it is also a wrapper around the popular +// but inconvenient IplImage class. A WImage can be constructed either using a +// WImageBuffer class which allocates and frees the data, +// or using a WImageView class which constructs a subimage or a view into +// external data. The view class does no memory management. Each class +// actually has two versions, one when the number of channels is known at +// compile time and one when it isn't. Using the one with the number of +// channels specified can provide some compile time optimizations by using the +// fact that the number of channels is a constant. +// +// We use the convention (c,r) to refer to column c and row r with (0,0) being +// the upper left corner. This is similar to standard Euclidean coordinates +// with the first coordinate varying in the horizontal direction and the second +// coordinate varying in the vertical direction. +// Thus (c,r) is usually in the domain [0, width) X [0, height) +// +// Example usage: +// WImageBuffer3_b im(5,7); // Make a 5X7 3 channel image of type uchar +// WImageView3_b sub_im(im, 2,2, 3,3); // 3X3 submatrix +// vector vec(10, 3.0f); +// WImageView1_f user_im(&vec[0], 2, 5); // 2X5 image w/ supplied data +// +// im.SetZero(); // same as cvSetZero(im.Ipl()) +// *im(2, 3) = 15; // Modify the element at column 2, row 3 +// MySetRand(&sub_im); +// +// // Copy the second row into the first. This can be done with no memory +// // allocation and will use SSE if IPP is available. +// int w = im.Width(); +// im.View(0,0, w,1).CopyFrom(im.View(0,1, w,1)); +// +// // Doesn't care about source of data since using WImage +// void MySetRand(WImage_b* im) { // Works with any number of channels +// for (int r = 0; r < im->Height(); ++r) { +// float* row = im->Row(r); +// for (int c = 0; c < im->Width(); ++c) { +// for (int ch = 0; ch < im->Channels(); ++ch, ++row) { +// *row = uchar(rand() & 255); +// } +// } +// } +// } +// +// Functions that are not part of the basic image allocation, viewing, and +// access should come from OpenCV, except some useful functions that are not +// part of OpenCV can be found in wimage_util.h +#ifndef __OPENCV_CORE_WIMAGE_HPP__ +#define __OPENCV_CORE_WIMAGE_HPP__ + +#include "opencv2/core/core_c.h" + +#ifdef __cplusplus + +namespace cv { + +template class WImage; +template class WImageBuffer; +template class WImageView; + +template class WImageC; +template class WImageBufferC; +template class WImageViewC; + +// Commonly used typedefs. +typedef WImage WImage_b; +typedef WImageView WImageView_b; +typedef WImageBuffer WImageBuffer_b; + +typedef WImageC WImage1_b; +typedef WImageViewC WImageView1_b; +typedef WImageBufferC WImageBuffer1_b; + +typedef WImageC WImage3_b; +typedef WImageViewC WImageView3_b; +typedef WImageBufferC WImageBuffer3_b; + +typedef WImage WImage_f; +typedef WImageView WImageView_f; +typedef WImageBuffer WImageBuffer_f; + +typedef WImageC WImage1_f; +typedef WImageViewC WImageView1_f; +typedef WImageBufferC WImageBuffer1_f; + +typedef WImageC WImage3_f; +typedef WImageViewC WImageView3_f; +typedef WImageBufferC WImageBuffer3_f; + +// There isn't a standard for signed and unsigned short so be more +// explicit in the typename for these cases. +typedef WImage WImage_16s; +typedef WImageView WImageView_16s; +typedef WImageBuffer WImageBuffer_16s; + +typedef WImageC WImage1_16s; +typedef WImageViewC WImageView1_16s; +typedef WImageBufferC WImageBuffer1_16s; + +typedef WImageC WImage3_16s; +typedef WImageViewC WImageView3_16s; +typedef WImageBufferC WImageBuffer3_16s; + +typedef WImage WImage_16u; +typedef WImageView WImageView_16u; +typedef WImageBuffer WImageBuffer_16u; + +typedef WImageC WImage1_16u; +typedef WImageViewC WImageView1_16u; +typedef WImageBufferC WImageBuffer1_16u; + +typedef WImageC WImage3_16u; +typedef WImageViewC WImageView3_16u; +typedef WImageBufferC WImageBuffer3_16u; + +// +// WImage definitions +// +// This WImage class gives access to the data it refers to. It can be +// constructed either by allocating the data with a WImageBuffer class or +// using the WImageView class to refer to a subimage or outside data. +template +class WImage +{ +public: + typedef T BaseType; + + // WImage is an abstract class with no other virtual methods so make the + // destructor virtual. + virtual ~WImage() = 0; + + // Accessors + IplImage* Ipl() {return image_; } + const IplImage* Ipl() const {return image_; } + T* ImageData() { return reinterpret_cast(image_->imageData); } + const T* ImageData() const { + return reinterpret_cast(image_->imageData); + } + + int Width() const {return image_->width; } + int Height() const {return image_->height; } + + // WidthStep is the number of bytes to go to the pixel with the next y coord + int WidthStep() const {return image_->widthStep; } + + int Channels() const {return image_->nChannels; } + int ChannelSize() const {return sizeof(T); } // number of bytes per channel + + // Number of bytes per pixel + int PixelSize() const {return Channels() * ChannelSize(); } + + // Return depth type (e.g. IPL_DEPTH_8U, IPL_DEPTH_32F) which is the number + // of bits per channel and with the signed bit set. + // This is known at compile time using specializations. + int Depth() const; + + inline const T* Row(int r) const { + return reinterpret_cast(image_->imageData + r*image_->widthStep); + } + + inline T* Row(int r) { + return reinterpret_cast(image_->imageData + r*image_->widthStep); + } + + // Pixel accessors which returns a pointer to the start of the channel + inline T* operator() (int c, int r) { + return reinterpret_cast(image_->imageData + r*image_->widthStep) + + c*Channels(); + } + + inline const T* operator() (int c, int r) const { + return reinterpret_cast(image_->imageData + r*image_->widthStep) + + c*Channels(); + } + + // Copy the contents from another image which is just a convenience to cvCopy + void CopyFrom(const WImage& src) { cvCopy(src.Ipl(), image_); } + + // Set contents to zero which is just a convenient to cvSetZero + void SetZero() { cvSetZero(image_); } + + // Construct a view into a region of this image + WImageView View(int c, int r, int width, int height); + +protected: + // Disallow copy and assignment + WImage(const WImage&); + void operator=(const WImage&); + + explicit WImage(IplImage* img) : image_(img) { + assert(!img || img->depth == Depth()); + } + + void SetIpl(IplImage* image) { + assert(!image || image->depth == Depth()); + image_ = image; + } + + IplImage* image_; +}; + + + +// Image class when both the pixel type and number of channels +// are known at compile time. This wrapper will speed up some of the operations +// like accessing individual pixels using the () operator. +template +class WImageC : public WImage +{ +public: + typedef typename WImage::BaseType BaseType; + enum { kChannels = C }; + + explicit WImageC(IplImage* img) : WImage(img) { + assert(!img || img->nChannels == Channels()); + } + + // Construct a view into a region of this image + WImageViewC View(int c, int r, int width, int height); + + // Copy the contents from another image which is just a convenience to cvCopy + void CopyFrom(const WImageC& src) { + cvCopy(src.Ipl(), WImage::image_); + } + + // WImageC is an abstract class with no other virtual methods so make the + // destructor virtual. + virtual ~WImageC() = 0; + + int Channels() const {return C; } + +protected: + // Disallow copy and assignment + WImageC(const WImageC&); + void operator=(const WImageC&); + + void SetIpl(IplImage* image) { + assert(!image || image->depth == WImage::Depth()); + WImage::SetIpl(image); + } +}; + +// +// WImageBuffer definitions +// +// Image class which owns the data, so it can be allocated and is always +// freed. It cannot be copied but can be explicity cloned. +// +template +class WImageBuffer : public WImage +{ +public: + typedef typename WImage::BaseType BaseType; + + // Default constructor which creates an object that can be + WImageBuffer() : WImage(0) {} + + WImageBuffer(int width, int height, int nchannels) : WImage(0) { + Allocate(width, height, nchannels); + } + + // Constructor which takes ownership of a given IplImage so releases + // the image on destruction. + explicit WImageBuffer(IplImage* img) : WImage(img) {} + + // Allocate an image. Does nothing if current size is the same as + // the new size. + void Allocate(int width, int height, int nchannels); + + // Set the data to point to an image, releasing the old data + void SetIpl(IplImage* img) { + ReleaseImage(); + WImage::SetIpl(img); + } + + // Clone an image which reallocates the image if of a different dimension. + void CloneFrom(const WImage& src) { + Allocate(src.Width(), src.Height(), src.Channels()); + CopyFrom(src); + } + + ~WImageBuffer() { + ReleaseImage(); + } + + // Release the image if it isn't null. + void ReleaseImage() { + if (WImage::image_) { + IplImage* image = WImage::image_; + cvReleaseImage(&image); + WImage::SetIpl(0); + } + } + + bool IsNull() const {return WImage::image_ == NULL; } + +private: + // Disallow copy and assignment + WImageBuffer(const WImageBuffer&); + void operator=(const WImageBuffer&); +}; + +// Like a WImageBuffer class but when the number of channels is known +// at compile time. +template +class WImageBufferC : public WImageC +{ +public: + typedef typename WImage::BaseType BaseType; + enum { kChannels = C }; + + // Default constructor which creates an object that can be + WImageBufferC() : WImageC(0) {} + + WImageBufferC(int width, int height) : WImageC(0) { + Allocate(width, height); + } + + // Constructor which takes ownership of a given IplImage so releases + // the image on destruction. + explicit WImageBufferC(IplImage* img) : WImageC(img) {} + + // Allocate an image. Does nothing if current size is the same as + // the new size. + void Allocate(int width, int height); + + // Set the data to point to an image, releasing the old data + void SetIpl(IplImage* img) { + ReleaseImage(); + WImageC::SetIpl(img); + } + + // Clone an image which reallocates the image if of a different dimension. + void CloneFrom(const WImageC& src) { + Allocate(src.Width(), src.Height()); + CopyFrom(src); + } + + ~WImageBufferC() { + ReleaseImage(); + } + + // Release the image if it isn't null. + void ReleaseImage() { + if (WImage::image_) { + IplImage* image = WImage::image_; + cvReleaseImage(&image); + WImageC::SetIpl(0); + } + } + + bool IsNull() const {return WImage::image_ == NULL; } + +private: + // Disallow copy and assignment + WImageBufferC(const WImageBufferC&); + void operator=(const WImageBufferC&); +}; + +// +// WImageView definitions +// +// View into an image class which allows treating a subimage as an image +// or treating external data as an image +// +template +class WImageView : public WImage +{ +public: + typedef typename WImage::BaseType BaseType; + + // Construct a subimage. No checks are done that the subimage lies + // completely inside the original image. + WImageView(WImage* img, int c, int r, int width, int height); + + // Refer to external data. + // If not given width_step assumed to be same as width. + WImageView(T* data, int width, int height, int channels, int width_step = -1); + + // Refer to external data. This does NOT take ownership + // of the supplied IplImage. + WImageView(IplImage* img) : WImage(img) {} + + // Copy constructor + WImageView(const WImage& img) : WImage(0) { + header_ = *(img.Ipl()); + WImage::SetIpl(&header_); + } + + WImageView& operator=(const WImage& img) { + header_ = *(img.Ipl()); + WImage::SetIpl(&header_); + return *this; + } + +protected: + IplImage header_; +}; + + +template +class WImageViewC : public WImageC +{ +public: + typedef typename WImage::BaseType BaseType; + enum { kChannels = C }; + + // Default constructor needed for vectors of views. + WImageViewC(); + + virtual ~WImageViewC() {} + + // Construct a subimage. No checks are done that the subimage lies + // completely inside the original image. + WImageViewC(WImageC* img, + int c, int r, int width, int height); + + // Refer to external data + WImageViewC(T* data, int width, int height, int width_step = -1); + + // Refer to external data. This does NOT take ownership + // of the supplied IplImage. + WImageViewC(IplImage* img) : WImageC(img) {} + + // Copy constructor which does a shallow copy to allow multiple views + // of same data. gcc-4.1.1 gets confused if both versions of + // the constructor and assignment operator are not provided. + WImageViewC(const WImageC& img) : WImageC(0) { + header_ = *(img.Ipl()); + WImageC::SetIpl(&header_); + } + WImageViewC(const WImageViewC& img) : WImageC(0) { + header_ = *(img.Ipl()); + WImageC::SetIpl(&header_); + } + + WImageViewC& operator=(const WImageC& img) { + header_ = *(img.Ipl()); + WImageC::SetIpl(&header_); + return *this; + } + WImageViewC& operator=(const WImageViewC& img) { + header_ = *(img.Ipl()); + WImageC::SetIpl(&header_); + return *this; + } + +protected: + IplImage header_; +}; + + +// Specializations for depth +template<> +inline int WImage::Depth() const {return IPL_DEPTH_8U; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_8S; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_16S; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_16U; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_32S; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_32F; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_64F; } + +// +// Pure virtual destructors still need to be defined. +// +template inline WImage::~WImage() {} +template inline WImageC::~WImageC() {} + +// +// Allocate ImageData +// +template +inline void WImageBuffer::Allocate(int width, int height, int nchannels) +{ + if (IsNull() || WImage::Width() != width || + WImage::Height() != height || WImage::Channels() != nchannels) { + ReleaseImage(); + WImage::image_ = cvCreateImage(cvSize(width, height), + WImage::Depth(), nchannels); + } +} + +template +inline void WImageBufferC::Allocate(int width, int height) +{ + if (IsNull() || WImage::Width() != width || WImage::Height() != height) { + ReleaseImage(); + WImageC::SetIpl(cvCreateImage(cvSize(width, height),WImage::Depth(), C)); + } +} + +// +// ImageView methods +// +template +WImageView::WImageView(WImage* img, int c, int r, int width, int height) + : WImage(0) +{ + header_ = *(img->Ipl()); + header_.imageData = reinterpret_cast((*img)(c, r)); + header_.width = width; + header_.height = height; + WImage::SetIpl(&header_); +} + +template +WImageView::WImageView(T* data, int width, int height, int nchannels, int width_step) + : WImage(0) +{ + cvInitImageHeader(&header_, cvSize(width, height), WImage::Depth(), nchannels); + header_.imageData = reinterpret_cast(data); + if (width_step > 0) { + header_.widthStep = width_step; + } + WImage::SetIpl(&header_); +} + +template +WImageViewC::WImageViewC(WImageC* img, int c, int r, int width, int height) + : WImageC(0) +{ + header_ = *(img->Ipl()); + header_.imageData = reinterpret_cast((*img)(c, r)); + header_.width = width; + header_.height = height; + WImageC::SetIpl(&header_); +} + +template +WImageViewC::WImageViewC() : WImageC(0) { + cvInitImageHeader(&header_, cvSize(0, 0), WImage::Depth(), C); + header_.imageData = reinterpret_cast(0); + WImageC::SetIpl(&header_); +} + +template +WImageViewC::WImageViewC(T* data, int width, int height, int width_step) + : WImageC(0) +{ + cvInitImageHeader(&header_, cvSize(width, height), WImage::Depth(), C); + header_.imageData = reinterpret_cast(data); + if (width_step > 0) { + header_.widthStep = width_step; + } + WImageC::SetIpl(&header_); +} + +// Construct a view into a region of an image +template +WImageView WImage::View(int c, int r, int width, int height) { + return WImageView(this, c, r, width, height); +} + +template +WImageViewC WImageC::View(int c, int r, int width, int height) { + return WImageViewC(this, c, r, width, height); +} + +} // end of namespace + +#endif // __cplusplus + +#endif diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/features2d.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/features2d.hpp new file mode 100644 index 0000000..3ab3273 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/features2d.hpp @@ -0,0 +1,43 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "opencv2/features2d/features2d.hpp" diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/features2d/features2d.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/features2d/features2d.hpp new file mode 100644 index 0000000..e4e796f --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/features2d/features2d.hpp @@ -0,0 +1,1616 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_FEATURES_2D_HPP__ +#define __OPENCV_FEATURES_2D_HPP__ + +#include "opencv2/core/core.hpp" +#include "opencv2/flann/miniflann.hpp" + +#ifdef __cplusplus +#include + +namespace cv +{ + +CV_EXPORTS bool initModule_features2d(); + +/*! + The Keypoint Class + + The class instance stores a keypoint, i.e. a point feature found by one of many available keypoint detectors, such as + Harris corner detector, cv::FAST, cv::StarDetector, cv::SURF, cv::SIFT, cv::LDetector etc. + + The keypoint is characterized by the 2D position, scale + (proportional to the diameter of the neighborhood that needs to be taken into account), + orientation and some other parameters. The keypoint neighborhood is then analyzed by another algorithm that builds a descriptor + (usually represented as a feature vector). The keypoints representing the same object in different images can then be matched using + cv::KDTree or another method. +*/ +class CV_EXPORTS_W_SIMPLE KeyPoint +{ +public: + //! the default constructor + CV_WRAP KeyPoint() : pt(0,0), size(0), angle(-1), response(0), octave(0), class_id(-1) {} + //! the full constructor + KeyPoint(Point2f _pt, float _size, float _angle=-1, + float _response=0, int _octave=0, int _class_id=-1) + : pt(_pt), size(_size), angle(_angle), + response(_response), octave(_octave), class_id(_class_id) {} + //! another form of the full constructor + CV_WRAP KeyPoint(float x, float y, float _size, float _angle=-1, + float _response=0, int _octave=0, int _class_id=-1) + : pt(x, y), size(_size), angle(_angle), + response(_response), octave(_octave), class_id(_class_id) {} + + size_t hash() const; + + //! converts vector of keypoints to vector of points + static void convert(const vector& keypoints, + CV_OUT vector& points2f, + const vector& keypointIndexes=vector()); + //! converts vector of points to the vector of keypoints, where each keypoint is assigned the same size and the same orientation + static void convert(const vector& points2f, + CV_OUT vector& keypoints, + float size=1, float response=1, int octave=0, int class_id=-1); + + //! computes overlap for pair of keypoints; + //! overlap is a ratio between area of keypoint regions intersection and + //! area of keypoint regions union (now keypoint region is circle) + static float overlap(const KeyPoint& kp1, const KeyPoint& kp2); + + CV_PROP_RW Point2f pt; //!< coordinates of the keypoints + CV_PROP_RW float size; //!< diameter of the meaningful keypoint neighborhood + CV_PROP_RW float angle; //!< computed orientation of the keypoint (-1 if not applicable); + //!< it's in [0,360) degrees and measured relative to + //!< image coordinate system, ie in clockwise. + CV_PROP_RW float response; //!< the response by which the most strong keypoints have been selected. Can be used for the further sorting or subsampling + CV_PROP_RW int octave; //!< octave (pyramid layer) from which the keypoint has been extracted + CV_PROP_RW int class_id; //!< object class (if the keypoints need to be clustered by an object they belong to) +}; + +//! writes vector of keypoints to the file storage +CV_EXPORTS void write(FileStorage& fs, const string& name, const vector& keypoints); +//! reads vector of keypoints from the specified file storage node +CV_EXPORTS void read(const FileNode& node, CV_OUT vector& keypoints); + +/* + * A class filters a vector of keypoints. + * Because now it is difficult to provide a convenient interface for all usage scenarios of the keypoints filter class, + * it has only several needed by now static methods. + */ +class CV_EXPORTS KeyPointsFilter +{ +public: + KeyPointsFilter(){} + + /* + * Remove keypoints within borderPixels of an image edge. + */ + static void runByImageBorder( vector& keypoints, Size imageSize, int borderSize ); + /* + * Remove keypoints of sizes out of range. + */ + static void runByKeypointSize( vector& keypoints, float minSize, + float maxSize=FLT_MAX ); + /* + * Remove keypoints from some image by mask for pixels of this image. + */ + static void runByPixelsMask( vector& keypoints, const Mat& mask ); + /* + * Remove duplicated keypoints. + */ + static void removeDuplicated( vector& keypoints ); + + /* + * Retain the specified number of the best keypoints (according to the response) + */ + static void retainBest( vector& keypoints, int npoints ); +}; + + +/************************************ Base Classes ************************************/ + +/* + * Abstract base class for 2D image feature detectors. + */ +class CV_EXPORTS_W FeatureDetector : public virtual Algorithm +{ +public: + virtual ~FeatureDetector(); + + /* + * Detect keypoints in an image. + * image The image. + * keypoints The detected keypoints. + * mask Mask specifying where to look for keypoints (optional). Must be a char + * matrix with non-zero values in the region of interest. + */ + CV_WRAP void detect( const Mat& image, CV_OUT vector& keypoints, const Mat& mask=Mat() ) const; + + /* + * Detect keypoints in an image set. + * images Image collection. + * keypoints Collection of keypoints detected in an input images. keypoints[i] is a set of keypoints detected in an images[i]. + * masks Masks for image set. masks[i] is a mask for images[i]. + */ + void detect( const vector& images, vector >& keypoints, const vector& masks=vector() ) const; + + // Return true if detector object is empty + CV_WRAP virtual bool empty() const; + + // Create feature detector by detector name. + CV_WRAP static Ptr create( const string& detectorType ); + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const = 0; + + /* + * Remove keypoints that are not in the mask. + * Helper function, useful when wrapping a library call for keypoint detection that + * does not support a mask argument. + */ + static void removeInvalidPoints( const Mat& mask, vector& keypoints ); +}; + + +/* + * Abstract base class for computing descriptors for image keypoints. + * + * In this interface we assume a keypoint descriptor can be represented as a + * dense, fixed-dimensional vector of some basic type. Most descriptors used + * in practice follow this pattern, as it makes it very easy to compute + * distances between descriptors. Therefore we represent a collection of + * descriptors as a Mat, where each row is one keypoint descriptor. + */ +class CV_EXPORTS_W DescriptorExtractor : public virtual Algorithm +{ +public: + virtual ~DescriptorExtractor(); + + /* + * Compute the descriptors for a set of keypoints in an image. + * image The image. + * keypoints The input keypoints. Keypoints for which a descriptor cannot be computed are removed. + * descriptors Copmputed descriptors. Row i is the descriptor for keypoint i. + */ + CV_WRAP void compute( const Mat& image, CV_OUT CV_IN_OUT vector& keypoints, CV_OUT Mat& descriptors ) const; + + /* + * Compute the descriptors for a keypoints collection detected in image collection. + * images Image collection. + * keypoints Input keypoints collection. keypoints[i] is keypoints detected in images[i]. + * Keypoints for which a descriptor cannot be computed are removed. + * descriptors Descriptor collection. descriptors[i] are descriptors computed for set keypoints[i]. + */ + void compute( const vector& images, vector >& keypoints, vector& descriptors ) const; + + CV_WRAP virtual int descriptorSize() const = 0; + CV_WRAP virtual int descriptorType() const = 0; + + CV_WRAP virtual bool empty() const; + + CV_WRAP static Ptr create( const string& descriptorExtractorType ); + +protected: + virtual void computeImpl( const Mat& image, vector& keypoints, Mat& descriptors ) const = 0; + + /* + * Remove keypoints within borderPixels of an image edge. + */ + static void removeBorderKeypoints( vector& keypoints, + Size imageSize, int borderSize ); +}; + + + +/* + * Abstract base class for simultaneous 2D feature detection descriptor extraction. + */ +class CV_EXPORTS_W Feature2D : public FeatureDetector, public DescriptorExtractor +{ +public: + /* + * Detect keypoints in an image. + * image The image. + * keypoints The detected keypoints. + * mask Mask specifying where to look for keypoints (optional). Must be a char + * matrix with non-zero values in the region of interest. + * useProvidedKeypoints If true, the method will skip the detection phase and will compute + * descriptors for the provided keypoints + */ + CV_WRAP_AS(detectAndCompute) virtual void operator()( InputArray image, InputArray mask, + CV_OUT vector& keypoints, + OutputArray descriptors, + bool useProvidedKeypoints=false ) const = 0; + + CV_WRAP void compute( const Mat& image, CV_OUT CV_IN_OUT std::vector& keypoints, CV_OUT Mat& descriptors ) const; + + // Create feature detector and descriptor extractor by name. + CV_WRAP static Ptr create( const string& name ); +}; + +/*! + BRISK implementation +*/ +class CV_EXPORTS_W BRISK : public Feature2D +{ +public: + CV_WRAP explicit BRISK(int thresh=30, int octaves=3, float patternScale=1.0f); + + virtual ~BRISK(); + + // returns the descriptor size in bytes + int descriptorSize() const; + // returns the descriptor type + int descriptorType() const; + + // Compute the BRISK features on an image + void operator()(InputArray image, InputArray mask, vector& keypoints) const; + + // Compute the BRISK features and descriptors on an image + void operator()( InputArray image, InputArray mask, vector& keypoints, + OutputArray descriptors, bool useProvidedKeypoints=false ) const; + + AlgorithmInfo* info() const; + + // custom setup + CV_WRAP explicit BRISK(std::vector &radiusList, std::vector &numberList, + float dMax=5.85f, float dMin=8.2f, std::vector indexChange=std::vector()); + + // call this to generate the kernel: + // circle of radius r (pixels), with n points; + // short pairings with dMax, long pairings with dMin + CV_WRAP void generateKernel(std::vector &radiusList, + std::vector &numberList, float dMax=5.85f, float dMin=8.2f, + std::vector indexChange=std::vector()); + +protected: + + void computeImpl( const Mat& image, vector& keypoints, Mat& descriptors ) const; + void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + void computeKeypointsNoOrientation(InputArray image, InputArray mask, vector& keypoints) const; + void computeDescriptorsAndOrOrientation(InputArray image, InputArray mask, vector& keypoints, + OutputArray descriptors, bool doDescriptors, bool doOrientation, + bool useProvidedKeypoints) const; + + // Feature parameters + CV_PROP_RW int threshold; + CV_PROP_RW int octaves; + + // some helper structures for the Brisk pattern representation + struct BriskPatternPoint{ + float x; // x coordinate relative to center + float y; // x coordinate relative to center + float sigma; // Gaussian smoothing sigma + }; + struct BriskShortPair{ + unsigned int i; // index of the first pattern point + unsigned int j; // index of other pattern point + }; + struct BriskLongPair{ + unsigned int i; // index of the first pattern point + unsigned int j; // index of other pattern point + int weighted_dx; // 1024.0/dx + int weighted_dy; // 1024.0/dy + }; + inline int smoothedIntensity(const cv::Mat& image, + const cv::Mat& integral,const float key_x, + const float key_y, const unsigned int scale, + const unsigned int rot, const unsigned int point) const; + // pattern properties + BriskPatternPoint* patternPoints_; //[i][rotation][scale] + unsigned int points_; // total number of collocation points + float* scaleList_; // lists the scaling per scale index [scale] + unsigned int* sizeList_; // lists the total pattern size per scale index [scale] + static const unsigned int scales_; // scales discretization + static const float scalerange_; // span of sizes 40->4 Octaves - else, this needs to be adjusted... + static const unsigned int n_rot_; // discretization of the rotation look-up + + // pairs + int strings_; // number of uchars the descriptor consists of + float dMax_; // short pair maximum distance + float dMin_; // long pair maximum distance + BriskShortPair* shortPairs_; // d<_dMax + BriskLongPair* longPairs_; // d>_dMin + unsigned int noShortPairs_; // number of shortParis + unsigned int noLongPairs_; // number of longParis + + // general + static const float basicSize_; +}; + + +/*! + ORB implementation. +*/ +class CV_EXPORTS_W ORB : public Feature2D +{ +public: + // the size of the signature in bytes + enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 }; + + CV_WRAP explicit ORB(int nfeatures = 500, float scaleFactor = 1.2f, int nlevels = 8, int edgeThreshold = 31, + int firstLevel = 0, int WTA_K=2, int scoreType=ORB::HARRIS_SCORE, int patchSize=31 ); + + // returns the descriptor size in bytes + int descriptorSize() const; + // returns the descriptor type + int descriptorType() const; + + // Compute the ORB features and descriptors on an image + void operator()(InputArray image, InputArray mask, vector& keypoints) const; + + // Compute the ORB features and descriptors on an image + void operator()( InputArray image, InputArray mask, vector& keypoints, + OutputArray descriptors, bool useProvidedKeypoints=false ) const; + + AlgorithmInfo* info() const; + +protected: + + void computeImpl( const Mat& image, vector& keypoints, Mat& descriptors ) const; + void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + CV_PROP_RW int nfeatures; + CV_PROP_RW double scaleFactor; + CV_PROP_RW int nlevels; + CV_PROP_RW int edgeThreshold; + CV_PROP_RW int firstLevel; + CV_PROP_RW int WTA_K; + CV_PROP_RW int scoreType; + CV_PROP_RW int patchSize; +}; + +typedef ORB OrbFeatureDetector; +typedef ORB OrbDescriptorExtractor; + +/*! + FREAK implementation +*/ +class CV_EXPORTS FREAK : public DescriptorExtractor +{ +public: + /** Constructor + * @param orientationNormalized enable orientation normalization + * @param scaleNormalized enable scale normalization + * @param patternScale scaling of the description pattern + * @param nOctaves number of octaves covered by the detected keypoints + * @param selectedPairs (optional) user defined selected pairs + */ + explicit FREAK( bool orientationNormalized = true, + bool scaleNormalized = true, + float patternScale = 22.0f, + int nOctaves = 4, + const vector& selectedPairs = vector()); + FREAK( const FREAK& rhs ); + FREAK& operator=( const FREAK& ); + + virtual ~FREAK(); + + /** returns the descriptor length in bytes */ + virtual int descriptorSize() const; + + /** returns the descriptor type */ + virtual int descriptorType() const; + + /** select the 512 "best description pairs" + * @param images grayscale images set + * @param keypoints set of detected keypoints + * @param corrThresh correlation threshold + * @param verbose print construction information + * @return list of best pair indexes + */ + vector selectPairs( const vector& images, vector >& keypoints, + const double corrThresh = 0.7, bool verbose = true ); + + AlgorithmInfo* info() const; + + enum + { + NB_SCALES = 64, NB_PAIRS = 512, NB_ORIENPAIRS = 45 + }; + +protected: + virtual void computeImpl( const Mat& image, vector& keypoints, Mat& descriptors ) const; + void buildPattern(); + uchar meanIntensity( const Mat& image, const Mat& integral, const float kp_x, const float kp_y, + const unsigned int scale, const unsigned int rot, const unsigned int point ) const; + + bool orientationNormalized; //true if the orientation is normalized, false otherwise + bool scaleNormalized; //true if the scale is normalized, false otherwise + double patternScale; //scaling of the pattern + int nOctaves; //number of octaves + bool extAll; // true if all pairs need to be extracted for pairs selection + + double patternScale0; + int nOctaves0; + vector selectedPairs0; + + struct PatternPoint + { + float x; // x coordinate relative to center + float y; // x coordinate relative to center + float sigma; // Gaussian smoothing sigma + }; + + struct DescriptionPair + { + uchar i; // index of the first point + uchar j; // index of the second point + }; + + struct OrientationPair + { + uchar i; // index of the first point + uchar j; // index of the second point + int weight_dx; // dx/(norm_sq))*4096 + int weight_dy; // dy/(norm_sq))*4096 + }; + + vector patternLookup; // look-up table for the pattern points (position+sigma of all points at all scales and orientation) + int patternSizes[NB_SCALES]; // size of the pattern at a specific scale (used to check if a point is within image boundaries) + DescriptionPair descriptionPairs[NB_PAIRS]; + OrientationPair orientationPairs[NB_ORIENPAIRS]; +}; + + +/*! + Maximal Stable Extremal Regions class. + + The class implements MSER algorithm introduced by J. Matas. + Unlike SIFT, SURF and many other detectors in OpenCV, this is salient region detector, + not the salient point detector. + + It returns the regions, each of those is encoded as a contour. +*/ +class CV_EXPORTS_W MSER : public FeatureDetector +{ +public: + //! the full constructor + CV_WRAP explicit MSER( int _delta=5, int _min_area=60, int _max_area=14400, + double _max_variation=0.25, double _min_diversity=.2, + int _max_evolution=200, double _area_threshold=1.01, + double _min_margin=0.003, int _edge_blur_size=5 ); + + //! the operator that extracts the MSERs from the image or the specific part of it + CV_WRAP_AS(detect) void operator()( const Mat& image, CV_OUT vector >& msers, + const Mat& mask=Mat() ) const; + AlgorithmInfo* info() const; + +protected: + void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + int delta; + int minArea; + int maxArea; + double maxVariation; + double minDiversity; + int maxEvolution; + double areaThreshold; + double minMargin; + int edgeBlurSize; +}; + +typedef MSER MserFeatureDetector; + +/*! + The "Star" Detector. + + The class implements the keypoint detector introduced by K. Konolige. +*/ +class CV_EXPORTS_W StarDetector : public FeatureDetector +{ +public: + //! the full constructor + CV_WRAP StarDetector(int _maxSize=45, int _responseThreshold=30, + int _lineThresholdProjected=10, + int _lineThresholdBinarized=8, + int _suppressNonmaxSize=5); + + //! finds the keypoints in the image + CV_WRAP_AS(detect) void operator()(const Mat& image, + CV_OUT vector& keypoints) const; + + AlgorithmInfo* info() const; + +protected: + void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + int maxSize; + int responseThreshold; + int lineThresholdProjected; + int lineThresholdBinarized; + int suppressNonmaxSize; +}; + +//! detects corners using FAST algorithm by E. Rosten +CV_EXPORTS void FAST( InputArray image, CV_OUT vector& keypoints, + int threshold, bool nonmaxSuppression=true ); + +CV_EXPORTS void FASTX( InputArray image, CV_OUT vector& keypoints, + int threshold, bool nonmaxSuppression, int type ); + +class CV_EXPORTS_W FastFeatureDetector : public FeatureDetector +{ +public: + + enum + { // Define it in old class to simplify migration to 2.5 + TYPE_5_8 = 0, TYPE_7_12 = 1, TYPE_9_16 = 2 + }; + + CV_WRAP FastFeatureDetector( int threshold=10, bool nonmaxSuppression=true ); + AlgorithmInfo* info() const; + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + int threshold; + bool nonmaxSuppression; +}; + + +class CV_EXPORTS_W GFTTDetector : public FeatureDetector +{ +public: + CV_WRAP GFTTDetector( int maxCorners=1000, double qualityLevel=0.01, double minDistance=1, + int blockSize=3, bool useHarrisDetector=false, double k=0.04 ); + AlgorithmInfo* info() const; + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + int nfeatures; + double qualityLevel; + double minDistance; + int blockSize; + bool useHarrisDetector; + double k; +}; + +typedef GFTTDetector GoodFeaturesToTrackDetector; +typedef StarDetector StarFeatureDetector; + +class CV_EXPORTS_W SimpleBlobDetector : public FeatureDetector +{ +public: + struct CV_EXPORTS_W_SIMPLE Params + { + CV_WRAP Params(); + CV_PROP_RW float thresholdStep; + CV_PROP_RW float minThreshold; + CV_PROP_RW float maxThreshold; + CV_PROP_RW size_t minRepeatability; + CV_PROP_RW float minDistBetweenBlobs; + + CV_PROP_RW bool filterByColor; + CV_PROP_RW uchar blobColor; + + CV_PROP_RW bool filterByArea; + CV_PROP_RW float minArea, maxArea; + + CV_PROP_RW bool filterByCircularity; + CV_PROP_RW float minCircularity, maxCircularity; + + CV_PROP_RW bool filterByInertia; + CV_PROP_RW float minInertiaRatio, maxInertiaRatio; + + CV_PROP_RW bool filterByConvexity; + CV_PROP_RW float minConvexity, maxConvexity; + + void read( const FileNode& fn ); + void write( FileStorage& fs ) const; + }; + + CV_WRAP SimpleBlobDetector(const SimpleBlobDetector::Params ¶meters = SimpleBlobDetector::Params()); + + virtual void read( const FileNode& fn ); + virtual void write( FileStorage& fs ) const; + +protected: + struct CV_EXPORTS Center + { + Point2d location; + double radius; + double confidence; + }; + + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + virtual void findBlobs(const Mat &image, const Mat &binaryImage, vector
¢ers) const; + + Params params; + AlgorithmInfo* info() const; +}; + + +class CV_EXPORTS DenseFeatureDetector : public FeatureDetector +{ +public: + explicit DenseFeatureDetector( float initFeatureScale=1.f, int featureScaleLevels=1, + float featureScaleMul=0.1f, + int initXyStep=6, int initImgBound=0, + bool varyXyStepWithScale=true, + bool varyImgBoundWithScale=false ); + AlgorithmInfo* info() const; + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + double initFeatureScale; + int featureScaleLevels; + double featureScaleMul; + + int initXyStep; + int initImgBound; + + bool varyXyStepWithScale; + bool varyImgBoundWithScale; +}; + +/* + * Adapts a detector to partition the source image into a grid and detect + * points in each cell. + */ +class CV_EXPORTS_W GridAdaptedFeatureDetector : public FeatureDetector +{ +public: + /* + * detector Detector that will be adapted. + * maxTotalKeypoints Maximum count of keypoints detected on the image. Only the strongest keypoints + * will be keeped. + * gridRows Grid rows count. + * gridCols Grid column count. + */ + CV_WRAP GridAdaptedFeatureDetector( const Ptr& detector=0, + int maxTotalKeypoints=1000, + int gridRows=4, int gridCols=4 ); + + // TODO implement read/write + virtual bool empty() const; + + AlgorithmInfo* info() const; + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + Ptr detector; + int maxTotalKeypoints; + int gridRows; + int gridCols; +}; + +/* + * Adapts a detector to detect points over multiple levels of a Gaussian + * pyramid. Useful for detectors that are not inherently scaled. + */ +class CV_EXPORTS_W PyramidAdaptedFeatureDetector : public FeatureDetector +{ +public: + // maxLevel - The 0-based index of the last pyramid layer + CV_WRAP PyramidAdaptedFeatureDetector( const Ptr& detector, int maxLevel=2 ); + + // TODO implement read/write + virtual bool empty() const; + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + Ptr detector; + int maxLevel; +}; + +/** \brief A feature detector parameter adjuster, this is used by the DynamicAdaptedFeatureDetector + * and is a wrapper for FeatureDetector that allow them to be adjusted after a detection + */ +class CV_EXPORTS AdjusterAdapter: public FeatureDetector +{ +public: + /** pure virtual interface + */ + virtual ~AdjusterAdapter() {} + /** too few features were detected so, adjust the detector params accordingly + * \param min the minimum number of desired features + * \param n_detected the number previously detected + */ + virtual void tooFew(int min, int n_detected) = 0; + /** too many features were detected so, adjust the detector params accordingly + * \param max the maximum number of desired features + * \param n_detected the number previously detected + */ + virtual void tooMany(int max, int n_detected) = 0; + /** are params maxed out or still valid? + * \return false if the parameters can't be adjusted any more + */ + virtual bool good() const = 0; + + virtual Ptr clone() const = 0; + + static Ptr create( const string& detectorType ); +}; +/** \brief an adaptively adjusting detector that iteratively detects until the desired number + * of features are detected. + * Beware that this is not thread safe - as the adjustment of parameters breaks the const + * of the detection routine... + * /TODO Make this const correct and thread safe + * + * sample usage: + //will create a detector that attempts to find 100 - 110 FAST Keypoints, and will at most run + //FAST feature detection 10 times until that number of keypoints are found + Ptr detector(new DynamicAdaptedFeatureDetector(new FastAdjuster(20,true),100, 110, 10)); + + */ +class CV_EXPORTS DynamicAdaptedFeatureDetector: public FeatureDetector +{ +public: + + /** \param adjuster an AdjusterAdapter that will do the detection and parameter adjustment + * \param max_features the maximum desired number of features + * \param max_iters the maximum number of times to try to adjust the feature detector params + * for the FastAdjuster this can be high, but with Star or Surf this can get time consuming + * \param min_features the minimum desired features + */ + DynamicAdaptedFeatureDetector( const Ptr& adjuster, int min_features=400, int max_features=500, int max_iters=5 ); + + virtual bool empty() const; + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + +private: + DynamicAdaptedFeatureDetector& operator=(const DynamicAdaptedFeatureDetector&); + DynamicAdaptedFeatureDetector(const DynamicAdaptedFeatureDetector&); + + int escape_iters_; + int min_features_, max_features_; + const Ptr adjuster_; +}; + +/**\brief an adjust for the FAST detector. This will basically decrement or increment the + * threshold by 1 + */ +class CV_EXPORTS FastAdjuster: public AdjusterAdapter +{ +public: + /**\param init_thresh the initial threshold to start with, default = 20 + * \param nonmax whether to use non max or not for fast feature detection + * \param min_thresh + * \param max_thresh + */ + FastAdjuster(int init_thresh=20, bool nonmax=true, int min_thresh=1, int max_thresh=200); + + virtual void tooFew(int minv, int n_detected); + virtual void tooMany(int maxv, int n_detected); + virtual bool good() const; + + virtual Ptr clone() const; + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + int thresh_; + bool nonmax_; + int init_thresh_, min_thresh_, max_thresh_; +}; + + +/** An adjuster for StarFeatureDetector, this one adjusts the responseThreshold for now + * TODO find a faster way to converge the parameters for Star - use CvStarDetectorParams + */ +class CV_EXPORTS StarAdjuster: public AdjusterAdapter +{ +public: + StarAdjuster(double initial_thresh=30.0, double min_thresh=2., double max_thresh=200.); + + virtual void tooFew(int minv, int n_detected); + virtual void tooMany(int maxv, int n_detected); + virtual bool good() const; + + virtual Ptr clone() const; + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + double thresh_, init_thresh_, min_thresh_, max_thresh_; +}; + +class CV_EXPORTS SurfAdjuster: public AdjusterAdapter +{ +public: + SurfAdjuster( double initial_thresh=400.f, double min_thresh=2, double max_thresh=1000 ); + + virtual void tooFew(int minv, int n_detected); + virtual void tooMany(int maxv, int n_detected); + virtual bool good() const; + + virtual Ptr clone() const; + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + double thresh_, init_thresh_, min_thresh_, max_thresh_; +}; + +CV_EXPORTS Mat windowedMatchingMask( const vector& keypoints1, const vector& keypoints2, + float maxDeltaX, float maxDeltaY ); + + + +/* + * OpponentColorDescriptorExtractor + * + * Adapts a descriptor extractor to compute descriptors in Opponent Color Space + * (refer to van de Sande et al., CGIV 2008 "Color Descriptors for Object Category Recognition"). + * Input RGB image is transformed in Opponent Color Space. Then unadapted descriptor extractor + * (set in constructor) computes descriptors on each of the three channel and concatenate + * them into a single color descriptor. + */ +class CV_EXPORTS OpponentColorDescriptorExtractor : public DescriptorExtractor +{ +public: + OpponentColorDescriptorExtractor( const Ptr& descriptorExtractor ); + + virtual void read( const FileNode& ); + virtual void write( FileStorage& ) const; + + virtual int descriptorSize() const; + virtual int descriptorType() const; + + virtual bool empty() const; + +protected: + virtual void computeImpl( const Mat& image, vector& keypoints, Mat& descriptors ) const; + + Ptr descriptorExtractor; +}; + +/* + * BRIEF Descriptor + */ +class CV_EXPORTS BriefDescriptorExtractor : public DescriptorExtractor +{ +public: + static const int PATCH_SIZE = 48; + static const int KERNEL_SIZE = 9; + + // bytes is a length of descriptor in bytes. It can be equal 16, 32 or 64 bytes. + BriefDescriptorExtractor( int bytes = 32 ); + + virtual void read( const FileNode& ); + virtual void write( FileStorage& ) const; + + virtual int descriptorSize() const; + virtual int descriptorType() const; + + /// @todo read and write for brief + + AlgorithmInfo* info() const; + +protected: + virtual void computeImpl(const Mat& image, vector& keypoints, Mat& descriptors) const; + + typedef void(*PixelTestFn)(const Mat&, const vector&, Mat&); + + int bytes_; + PixelTestFn test_fn_; +}; + + +/****************************************************************************************\ +* Distance * +\****************************************************************************************/ + +template +struct CV_EXPORTS Accumulator +{ + typedef T Type; +}; + +template<> struct Accumulator { typedef float Type; }; +template<> struct Accumulator { typedef float Type; }; +template<> struct Accumulator { typedef float Type; }; +template<> struct Accumulator { typedef float Type; }; + +/* + * Squared Euclidean distance functor + */ +template +struct CV_EXPORTS SL2 +{ + enum { normType = NORM_L2SQR }; + typedef T ValueType; + typedef typename Accumulator::Type ResultType; + + ResultType operator()( const T* a, const T* b, int size ) const + { + return normL2Sqr(a, b, size); + } +}; + +/* + * Euclidean distance functor + */ +template +struct CV_EXPORTS L2 +{ + enum { normType = NORM_L2 }; + typedef T ValueType; + typedef typename Accumulator::Type ResultType; + + ResultType operator()( const T* a, const T* b, int size ) const + { + return (ResultType)sqrt((double)normL2Sqr(a, b, size)); + } +}; + +/* + * Manhattan distance (city block distance) functor + */ +template +struct CV_EXPORTS L1 +{ + enum { normType = NORM_L1 }; + typedef T ValueType; + typedef typename Accumulator::Type ResultType; + + ResultType operator()( const T* a, const T* b, int size ) const + { + return normL1(a, b, size); + } +}; + +/* + * Hamming distance functor - counts the bit differences between two strings - useful for the Brief descriptor + * bit count of A exclusive XOR'ed with B + */ +struct CV_EXPORTS Hamming +{ + enum { normType = NORM_HAMMING }; + typedef unsigned char ValueType; + typedef int ResultType; + + /** this will count the bits in a ^ b + */ + ResultType operator()( const unsigned char* a, const unsigned char* b, int size ) const + { + return normHamming(a, b, size); + } +}; + +typedef Hamming HammingLUT; + +template struct HammingMultilevel +{ + enum { normType = NORM_HAMMING + (cellsize>1) }; + typedef unsigned char ValueType; + typedef int ResultType; + + ResultType operator()( const unsigned char* a, const unsigned char* b, int size ) const + { + return normHamming(a, b, size, cellsize); + } +}; + +/****************************************************************************************\ +* DMatch * +\****************************************************************************************/ +/* + * Struct for matching: query descriptor index, train descriptor index, train image index and distance between descriptors. + */ +struct CV_EXPORTS_W_SIMPLE DMatch +{ + CV_WRAP DMatch() : queryIdx(-1), trainIdx(-1), imgIdx(-1), distance(FLT_MAX) {} + CV_WRAP DMatch( int _queryIdx, int _trainIdx, float _distance ) : + queryIdx(_queryIdx), trainIdx(_trainIdx), imgIdx(-1), distance(_distance) {} + CV_WRAP DMatch( int _queryIdx, int _trainIdx, int _imgIdx, float _distance ) : + queryIdx(_queryIdx), trainIdx(_trainIdx), imgIdx(_imgIdx), distance(_distance) {} + + CV_PROP_RW int queryIdx; // query descriptor index + CV_PROP_RW int trainIdx; // train descriptor index + CV_PROP_RW int imgIdx; // train image index + + CV_PROP_RW float distance; + + // less is better + bool operator<( const DMatch &m ) const + { + return distance < m.distance; + } +}; + +/****************************************************************************************\ +* DescriptorMatcher * +\****************************************************************************************/ +/* + * Abstract base class for matching two sets of descriptors. + */ +class CV_EXPORTS_W DescriptorMatcher : public Algorithm +{ +public: + virtual ~DescriptorMatcher(); + + /* + * Add descriptors to train descriptor collection. + * descriptors Descriptors to add. Each descriptors[i] is a descriptors set from one image. + */ + CV_WRAP virtual void add( const vector& descriptors ); + /* + * Get train descriptors collection. + */ + CV_WRAP const vector& getTrainDescriptors() const; + /* + * Clear train descriptors collection. + */ + CV_WRAP virtual void clear(); + + /* + * Return true if there are not train descriptors in collection. + */ + CV_WRAP virtual bool empty() const; + /* + * Return true if the matcher supports mask in match methods. + */ + CV_WRAP virtual bool isMaskSupported() const = 0; + + /* + * Train matcher (e.g. train flann index). + * In all methods to match the method train() is run every time before matching. + * Some descriptor matchers (e.g. BruteForceMatcher) have empty implementation + * of this method, other matchers really train their inner structures + * (e.g. FlannBasedMatcher trains flann::Index). So nonempty implementation + * of train() should check the class object state and do traing/retraining + * only if the state requires that (e.g. FlannBasedMatcher trains flann::Index + * if it has not trained yet or if new descriptors have been added to the train + * collection). + */ + CV_WRAP virtual void train(); + /* + * Group of methods to match descriptors from image pair. + * Method train() is run in this methods. + */ + // Find one best match for each query descriptor (if mask is empty). + CV_WRAP void match( const Mat& queryDescriptors, const Mat& trainDescriptors, + CV_OUT vector& matches, const Mat& mask=Mat() ) const; + // Find k best matches for each query descriptor (in increasing order of distances). + // compactResult is used when mask is not empty. If compactResult is false matches + // vector will have the same size as queryDescriptors rows. If compactResult is true + // matches vector will not contain matches for fully masked out query descriptors. + CV_WRAP void knnMatch( const Mat& queryDescriptors, const Mat& trainDescriptors, + CV_OUT vector >& matches, int k, + const Mat& mask=Mat(), bool compactResult=false ) const; + // Find best matches for each query descriptor which have distance less than + // maxDistance (in increasing order of distances). + void radiusMatch( const Mat& queryDescriptors, const Mat& trainDescriptors, + vector >& matches, float maxDistance, + const Mat& mask=Mat(), bool compactResult=false ) const; + /* + * Group of methods to match descriptors from one image to image set. + * See description of similar methods for matching image pair above. + */ + CV_WRAP void match( const Mat& queryDescriptors, CV_OUT vector& matches, + const vector& masks=vector() ); + CV_WRAP void knnMatch( const Mat& queryDescriptors, CV_OUT vector >& matches, int k, + const vector& masks=vector(), bool compactResult=false ); + void radiusMatch( const Mat& queryDescriptors, vector >& matches, float maxDistance, + const vector& masks=vector(), bool compactResult=false ); + + // Reads matcher object from a file node + virtual void read( const FileNode& ); + // Writes matcher object to a file storage + virtual void write( FileStorage& ) const; + + // Clone the matcher. If emptyTrainData is false the method create deep copy of the object, i.e. copies + // both parameters and train data. If emptyTrainData is true the method create object copy with current parameters + // but with empty train data. + virtual Ptr clone( bool emptyTrainData=false ) const = 0; + + CV_WRAP static Ptr create( const string& descriptorMatcherType ); +protected: + /* + * Class to work with descriptors from several images as with one merged matrix. + * It is used e.g. in FlannBasedMatcher. + */ + class CV_EXPORTS DescriptorCollection + { + public: + DescriptorCollection(); + DescriptorCollection( const DescriptorCollection& collection ); + virtual ~DescriptorCollection(); + + // Vector of matrices "descriptors" will be merged to one matrix "mergedDescriptors" here. + void set( const vector& descriptors ); + virtual void clear(); + + const Mat& getDescriptors() const; + const Mat getDescriptor( int imgIdx, int localDescIdx ) const; + const Mat getDescriptor( int globalDescIdx ) const; + void getLocalIdx( int globalDescIdx, int& imgIdx, int& localDescIdx ) const; + + int size() const; + + protected: + Mat mergedDescriptors; + vector startIdxs; + }; + + // In fact the matching is implemented only by the following two methods. These methods suppose + // that the class object has been trained already. Public match methods call these methods + // after calling train(). + virtual void knnMatchImpl( const Mat& queryDescriptors, vector >& matches, int k, + const vector& masks=vector(), bool compactResult=false ) = 0; + virtual void radiusMatchImpl( const Mat& queryDescriptors, vector >& matches, float maxDistance, + const vector& masks=vector(), bool compactResult=false ) = 0; + + static bool isPossibleMatch( const Mat& mask, int queryIdx, int trainIdx ); + static bool isMaskedOut( const vector& masks, int queryIdx ); + + static Mat clone_op( Mat m ) { return m.clone(); } + void checkMasks( const vector& masks, int queryDescriptorsCount ) const; + + // Collection of descriptors from train images. + vector trainDescCollection; +}; + +/* + * Brute-force descriptor matcher. + * + * For each descriptor in the first set, this matcher finds the closest + * descriptor in the second set by trying each one. + * + * For efficiency, BruteForceMatcher is templated on the distance metric. + * For float descriptors, a common choice would be cv::L2. + */ +class CV_EXPORTS_W BFMatcher : public DescriptorMatcher +{ +public: + CV_WRAP BFMatcher( int normType=NORM_L2, bool crossCheck=false ); + virtual ~BFMatcher() {} + + virtual bool isMaskSupported() const { return true; } + + virtual Ptr clone( bool emptyTrainData=false ) const; + + AlgorithmInfo* info() const; +protected: + virtual void knnMatchImpl( const Mat& queryDescriptors, vector >& matches, int k, + const vector& masks=vector(), bool compactResult=false ); + virtual void radiusMatchImpl( const Mat& queryDescriptors, vector >& matches, float maxDistance, + const vector& masks=vector(), bool compactResult=false ); + + int normType; + bool crossCheck; +}; + + +/* + * Flann based matcher + */ +class CV_EXPORTS_W FlannBasedMatcher : public DescriptorMatcher +{ +public: + CV_WRAP FlannBasedMatcher( const Ptr& indexParams=new flann::KDTreeIndexParams(), + const Ptr& searchParams=new flann::SearchParams() ); + + virtual void add( const vector& descriptors ); + virtual void clear(); + + // Reads matcher object from a file node + virtual void read( const FileNode& ); + // Writes matcher object to a file storage + virtual void write( FileStorage& ) const; + + virtual void train(); + virtual bool isMaskSupported() const; + + virtual Ptr clone( bool emptyTrainData=false ) const; + + AlgorithmInfo* info() const; +protected: + static void convertToDMatches( const DescriptorCollection& descriptors, + const Mat& indices, const Mat& distances, + vector >& matches ); + + virtual void knnMatchImpl( const Mat& queryDescriptors, vector >& matches, int k, + const vector& masks=vector(), bool compactResult=false ); + virtual void radiusMatchImpl( const Mat& queryDescriptors, vector >& matches, float maxDistance, + const vector& masks=vector(), bool compactResult=false ); + + Ptr indexParams; + Ptr searchParams; + Ptr flannIndex; + + DescriptorCollection mergedDescriptors; + int addedDescCount; +}; + +/****************************************************************************************\ +* GenericDescriptorMatcher * +\****************************************************************************************/ +/* + * Abstract interface for a keypoint descriptor and matcher + */ +class GenericDescriptorMatcher; +typedef GenericDescriptorMatcher GenericDescriptorMatch; + +class CV_EXPORTS GenericDescriptorMatcher +{ +public: + GenericDescriptorMatcher(); + virtual ~GenericDescriptorMatcher(); + + /* + * Add train collection: images and keypoints from them. + * images A set of train images. + * ketpoints Keypoint collection that have been detected on train images. + * + * Keypoints for which a descriptor cannot be computed are removed. Such keypoints + * must be filtered in this method befor adding keypoints to train collection "trainPointCollection". + * If inheritor class need perform such prefiltering the method add() must be overloaded. + * In the other class methods programmer has access to the train keypoints by a constant link. + */ + virtual void add( const vector& images, + vector >& keypoints ); + + const vector& getTrainImages() const; + const vector >& getTrainKeypoints() const; + + /* + * Clear images and keypoints storing in train collection. + */ + virtual void clear(); + /* + * Returns true if matcher supports mask to match descriptors. + */ + virtual bool isMaskSupported() = 0; + /* + * Train some inner structures (e.g. flann index or decision trees). + * train() methods is run every time in matching methods. So the method implementation + * should has a check whether these inner structures need be trained/retrained or not. + */ + virtual void train(); + + /* + * Classifies query keypoints. + * queryImage The query image + * queryKeypoints Keypoints from the query image + * trainImage The train image + * trainKeypoints Keypoints from the train image + */ + // Classify keypoints from query image under one train image. + void classify( const Mat& queryImage, vector& queryKeypoints, + const Mat& trainImage, vector& trainKeypoints ) const; + // Classify keypoints from query image under train image collection. + void classify( const Mat& queryImage, vector& queryKeypoints ); + + /* + * Group of methods to match keypoints from image pair. + * Keypoints for which a descriptor cannot be computed are removed. + * train() method is called here. + */ + // Find one best match for each query descriptor (if mask is empty). + void match( const Mat& queryImage, vector& queryKeypoints, + const Mat& trainImage, vector& trainKeypoints, + vector& matches, const Mat& mask=Mat() ) const; + // Find k best matches for each query keypoint (in increasing order of distances). + // compactResult is used when mask is not empty. If compactResult is false matches + // vector will have the same size as queryDescriptors rows. + // If compactResult is true matches vector will not contain matches for fully masked out query descriptors. + void knnMatch( const Mat& queryImage, vector& queryKeypoints, + const Mat& trainImage, vector& trainKeypoints, + vector >& matches, int k, + const Mat& mask=Mat(), bool compactResult=false ) const; + // Find best matches for each query descriptor which have distance less than maxDistance (in increasing order of distances). + void radiusMatch( const Mat& queryImage, vector& queryKeypoints, + const Mat& trainImage, vector& trainKeypoints, + vector >& matches, float maxDistance, + const Mat& mask=Mat(), bool compactResult=false ) const; + /* + * Group of methods to match keypoints from one image to image set. + * See description of similar methods for matching image pair above. + */ + void match( const Mat& queryImage, vector& queryKeypoints, + vector& matches, const vector& masks=vector() ); + void knnMatch( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, int k, + const vector& masks=vector(), bool compactResult=false ); + void radiusMatch( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, float maxDistance, + const vector& masks=vector(), bool compactResult=false ); + + // Reads matcher object from a file node + virtual void read( const FileNode& fn ); + // Writes matcher object to a file storage + virtual void write( FileStorage& fs ) const; + + // Return true if matching object is empty (e.g. feature detector or descriptor matcher are empty) + virtual bool empty() const; + + // Clone the matcher. If emptyTrainData is false the method create deep copy of the object, i.e. copies + // both parameters and train data. If emptyTrainData is true the method create object copy with current parameters + // but with empty train data. + virtual Ptr clone( bool emptyTrainData=false ) const = 0; + + static Ptr create( const string& genericDescritptorMatcherType, + const string ¶msFilename=string() ); + +protected: + // In fact the matching is implemented only by the following two methods. These methods suppose + // that the class object has been trained already. Public match methods call these methods + // after calling train(). + virtual void knnMatchImpl( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, int k, + const vector& masks, bool compactResult ) = 0; + virtual void radiusMatchImpl( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, float maxDistance, + const vector& masks, bool compactResult ) = 0; + /* + * A storage for sets of keypoints together with corresponding images and class IDs + */ + class CV_EXPORTS KeyPointCollection + { + public: + KeyPointCollection(); + KeyPointCollection( const KeyPointCollection& collection ); + void add( const vector& images, const vector >& keypoints ); + void clear(); + + // Returns the total number of keypoints in the collection + size_t keypointCount() const; + size_t imageCount() const; + + const vector >& getKeypoints() const; + const vector& getKeypoints( int imgIdx ) const; + const KeyPoint& getKeyPoint( int imgIdx, int localPointIdx ) const; + const KeyPoint& getKeyPoint( int globalPointIdx ) const; + void getLocalIdx( int globalPointIdx, int& imgIdx, int& localPointIdx ) const; + + const vector& getImages() const; + const Mat& getImage( int imgIdx ) const; + + protected: + int pointCount; + + vector images; + vector > keypoints; + // global indices of the first points in each image, startIndices.size() = keypoints.size() + vector startIndices; + + private: + static Mat clone_op( Mat m ) { return m.clone(); } + }; + + KeyPointCollection trainPointCollection; +}; + + +/****************************************************************************************\ +* VectorDescriptorMatcher * +\****************************************************************************************/ + +/* + * A class used for matching descriptors that can be described as vectors in a finite-dimensional space + */ +class VectorDescriptorMatcher; +typedef VectorDescriptorMatcher VectorDescriptorMatch; + +class CV_EXPORTS VectorDescriptorMatcher : public GenericDescriptorMatcher +{ +public: + VectorDescriptorMatcher( const Ptr& extractor, const Ptr& matcher ); + virtual ~VectorDescriptorMatcher(); + + virtual void add( const vector& imgCollection, + vector >& pointCollection ); + + virtual void clear(); + + virtual void train(); + + virtual bool isMaskSupported(); + + virtual void read( const FileNode& fn ); + virtual void write( FileStorage& fs ) const; + virtual bool empty() const; + + virtual Ptr clone( bool emptyTrainData=false ) const; + +protected: + virtual void knnMatchImpl( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, int k, + const vector& masks, bool compactResult ); + virtual void radiusMatchImpl( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, float maxDistance, + const vector& masks, bool compactResult ); + + Ptr extractor; + Ptr matcher; +}; + +/****************************************************************************************\ +* Drawing functions * +\****************************************************************************************/ +struct CV_EXPORTS DrawMatchesFlags +{ + enum{ DEFAULT = 0, // Output image matrix will be created (Mat::create), + // i.e. existing memory of output image may be reused. + // Two source image, matches and single keypoints will be drawn. + // For each keypoint only the center point will be drawn (without + // the circle around keypoint with keypoint size and orientation). + DRAW_OVER_OUTIMG = 1, // Output image matrix will not be created (Mat::create). + // Matches will be drawn on existing content of output image. + NOT_DRAW_SINGLE_POINTS = 2, // Single keypoints will not be drawn. + DRAW_RICH_KEYPOINTS = 4 // For each keypoint the circle around keypoint with keypoint size and + // orientation will be drawn. + }; +}; + +// Draw keypoints. +CV_EXPORTS_W void drawKeypoints( const Mat& image, const vector& keypoints, CV_OUT Mat& outImage, + const Scalar& color=Scalar::all(-1), int flags=DrawMatchesFlags::DEFAULT ); + +// Draws matches of keypints from two images on output image. +CV_EXPORTS void drawMatches( const Mat& img1, const vector& keypoints1, + const Mat& img2, const vector& keypoints2, + const vector& matches1to2, Mat& outImg, + const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), + const vector& matchesMask=vector(), int flags=DrawMatchesFlags::DEFAULT ); + +CV_EXPORTS void drawMatches( const Mat& img1, const vector& keypoints1, + const Mat& img2, const vector& keypoints2, + const vector >& matches1to2, Mat& outImg, + const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), + const vector >& matchesMask=vector >(), int flags=DrawMatchesFlags::DEFAULT ); + +/****************************************************************************************\ +* Functions to evaluate the feature detectors and [generic] descriptor extractors * +\****************************************************************************************/ + +CV_EXPORTS void evaluateFeatureDetector( const Mat& img1, const Mat& img2, const Mat& H1to2, + vector* keypoints1, vector* keypoints2, + float& repeatability, int& correspCount, + const Ptr& fdetector=Ptr() ); + +CV_EXPORTS void computeRecallPrecisionCurve( const vector >& matches1to2, + const vector >& correctMatches1to2Mask, + vector& recallPrecisionCurve ); + +CV_EXPORTS float getRecall( const vector& recallPrecisionCurve, float l_precision ); +CV_EXPORTS int getNearestPoint( const vector& recallPrecisionCurve, float l_precision ); + +CV_EXPORTS void evaluateGenericDescriptorMatcher( const Mat& img1, const Mat& img2, const Mat& H1to2, + vector& keypoints1, vector& keypoints2, + vector >* matches1to2, vector >* correctMatches1to2Mask, + vector& recallPrecisionCurve, + const Ptr& dmatch=Ptr() ); + + +/****************************************************************************************\ +* Bag of visual words * +\****************************************************************************************/ +/* + * Abstract base class for training of a 'bag of visual words' vocabulary from a set of descriptors + */ +class CV_EXPORTS_W BOWTrainer +{ +public: + BOWTrainer(); + virtual ~BOWTrainer(); + + CV_WRAP void add( const Mat& descriptors ); + CV_WRAP const vector& getDescriptors() const; + CV_WRAP int descripotorsCount() const; + + CV_WRAP virtual void clear(); + + /* + * Train visual words vocabulary, that is cluster training descriptors and + * compute cluster centers. + * Returns cluster centers. + * + * descriptors Training descriptors computed on images keypoints. + */ + CV_WRAP virtual Mat cluster() const = 0; + CV_WRAP virtual Mat cluster( const Mat& descriptors ) const = 0; + +protected: + vector descriptors; + int size; +}; + +/* + * This is BOWTrainer using cv::kmeans to get vocabulary. + */ +class CV_EXPORTS_W BOWKMeansTrainer : public BOWTrainer +{ +public: + CV_WRAP BOWKMeansTrainer( int clusterCount, const TermCriteria& termcrit=TermCriteria(), + int attempts=3, int flags=KMEANS_PP_CENTERS ); + virtual ~BOWKMeansTrainer(); + + // Returns trained vocabulary (i.e. cluster centers). + CV_WRAP virtual Mat cluster() const; + CV_WRAP virtual Mat cluster( const Mat& descriptors ) const; + +protected: + + int clusterCount; + TermCriteria termcrit; + int attempts; + int flags; +}; + +/* + * Class to compute image descriptor using bag of visual words. + */ +class CV_EXPORTS_W BOWImgDescriptorExtractor +{ +public: + CV_WRAP BOWImgDescriptorExtractor( const Ptr& dextractor, + const Ptr& dmatcher ); + virtual ~BOWImgDescriptorExtractor(); + + CV_WRAP void setVocabulary( const Mat& vocabulary ); + CV_WRAP const Mat& getVocabulary() const; + void compute( const Mat& image, vector& keypoints, Mat& imgDescriptor, + vector >* pointIdxsOfClusters=0, Mat* descriptors=0 ); + // compute() is not constant because DescriptorMatcher::match is not constant + + CV_WRAP_AS(compute) void compute2( const Mat& image, vector& keypoints, CV_OUT Mat& imgDescriptor ) + { compute(image,keypoints,imgDescriptor); } + + CV_WRAP int descriptorSize() const; + CV_WRAP int descriptorType() const; + +protected: + Mat vocabulary; + Ptr dextractor; + Ptr dmatcher; +}; + +} /* namespace cv */ + +#endif /* __cplusplus */ + +#endif + +/* End of file. */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann.hpp new file mode 100644 index 0000000..ea8fcd7 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann.hpp @@ -0,0 +1,43 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "opencv2/flann/flann.hpp" diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/all_indices.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/all_indices.h new file mode 100644 index 0000000..ff53fd8 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/all_indices.h @@ -0,0 +1,155 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + + +#ifndef OPENCV_FLANN_ALL_INDICES_H_ +#define OPENCV_FLANN_ALL_INDICES_H_ + +#include "general.h" + +#include "nn_index.h" +#include "kdtree_index.h" +#include "kdtree_single_index.h" +#include "kmeans_index.h" +#include "composite_index.h" +#include "linear_index.h" +#include "hierarchical_clustering_index.h" +#include "lsh_index.h" +#include "autotuned_index.h" + + +namespace cvflann +{ + +template +struct index_creator +{ + static NNIndex* create(const Matrix& dataset, const IndexParams& params, const Distance& distance) + { + flann_algorithm_t index_type = get_param(params, "algorithm"); + + NNIndex* nnIndex; + switch (index_type) { + case FLANN_INDEX_LINEAR: + nnIndex = new LinearIndex(dataset, params, distance); + break; + case FLANN_INDEX_KDTREE_SINGLE: + nnIndex = new KDTreeSingleIndex(dataset, params, distance); + break; + case FLANN_INDEX_KDTREE: + nnIndex = new KDTreeIndex(dataset, params, distance); + break; + case FLANN_INDEX_KMEANS: + nnIndex = new KMeansIndex(dataset, params, distance); + break; + case FLANN_INDEX_COMPOSITE: + nnIndex = new CompositeIndex(dataset, params, distance); + break; + case FLANN_INDEX_AUTOTUNED: + nnIndex = new AutotunedIndex(dataset, params, distance); + break; + case FLANN_INDEX_HIERARCHICAL: + nnIndex = new HierarchicalClusteringIndex(dataset, params, distance); + break; + case FLANN_INDEX_LSH: + nnIndex = new LshIndex(dataset, params, distance); + break; + default: + throw FLANNException("Unknown index type"); + } + + return nnIndex; + } +}; + +template +struct index_creator +{ + static NNIndex* create(const Matrix& dataset, const IndexParams& params, const Distance& distance) + { + flann_algorithm_t index_type = get_param(params, "algorithm"); + + NNIndex* nnIndex; + switch (index_type) { + case FLANN_INDEX_LINEAR: + nnIndex = new LinearIndex(dataset, params, distance); + break; + case FLANN_INDEX_KMEANS: + nnIndex = new KMeansIndex(dataset, params, distance); + break; + case FLANN_INDEX_HIERARCHICAL: + nnIndex = new HierarchicalClusteringIndex(dataset, params, distance); + break; + case FLANN_INDEX_LSH: + nnIndex = new LshIndex(dataset, params, distance); + break; + default: + throw FLANNException("Unknown index type"); + } + + return nnIndex; + } +}; + +template +struct index_creator +{ + static NNIndex* create(const Matrix& dataset, const IndexParams& params, const Distance& distance) + { + flann_algorithm_t index_type = get_param(params, "algorithm"); + + NNIndex* nnIndex; + switch (index_type) { + case FLANN_INDEX_LINEAR: + nnIndex = new LinearIndex(dataset, params, distance); + break; + case FLANN_INDEX_HIERARCHICAL: + nnIndex = new HierarchicalClusteringIndex(dataset, params, distance); + break; + case FLANN_INDEX_LSH: + nnIndex = new LshIndex(dataset, params, distance); + break; + default: + throw FLANNException("Unknown index type"); + } + + return nnIndex; + } +}; + +template +NNIndex* create_index_by_type(const Matrix& dataset, const IndexParams& params, const Distance& distance) +{ + return index_creator::create(dataset, params,distance); +} + +} + +#endif /* OPENCV_FLANN_ALL_INDICES_H_ */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/allocator.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/allocator.h new file mode 100644 index 0000000..26091d0 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/allocator.h @@ -0,0 +1,188 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_ALLOCATOR_H_ +#define OPENCV_FLANN_ALLOCATOR_H_ + +#include +#include + + +namespace cvflann +{ + +/** + * Allocates (using C's malloc) a generic type T. + * + * Params: + * count = number of instances to allocate. + * Returns: pointer (of type T*) to memory buffer + */ +template +T* allocate(size_t count = 1) +{ + T* mem = (T*) ::malloc(sizeof(T)*count); + return mem; +} + + +/** + * Pooled storage allocator + * + * The following routines allow for the efficient allocation of storage in + * small chunks from a specified pool. Rather than allowing each structure + * to be freed individually, an entire pool of storage is freed at once. + * This method has two advantages over just using malloc() and free(). First, + * it is far more efficient for allocating small objects, as there is + * no overhead for remembering all the information needed to free each + * object or consolidating fragmented memory. Second, the decision about + * how long to keep an object is made at the time of allocation, and there + * is no need to track down all the objects to free them. + * + */ + +const size_t WORDSIZE=16; +const size_t BLOCKSIZE=8192; + +class PooledAllocator +{ + /* We maintain memory alignment to word boundaries by requiring that all + allocations be in multiples of the machine wordsize. */ + /* Size of machine word in bytes. Must be power of 2. */ + /* Minimum number of bytes requested at a time from the system. Must be multiple of WORDSIZE. */ + + + int remaining; /* Number of bytes left in current block of storage. */ + void* base; /* Pointer to base of current block of storage. */ + void* loc; /* Current location in block to next allocate memory. */ + int blocksize; + + +public: + int usedMemory; + int wastedMemory; + + /** + Default constructor. Initializes a new pool. + */ + PooledAllocator(int blockSize = BLOCKSIZE) + { + blocksize = blockSize; + remaining = 0; + base = NULL; + + usedMemory = 0; + wastedMemory = 0; + } + + /** + * Destructor. Frees all the memory allocated in this pool. + */ + ~PooledAllocator() + { + void* prev; + + while (base != NULL) { + prev = *((void**) base); /* Get pointer to prev block. */ + ::free(base); + base = prev; + } + } + + /** + * Returns a pointer to a piece of new memory of the given size in bytes + * allocated from the pool. + */ + void* allocateMemory(int size) + { + int blockSize; + + /* Round size up to a multiple of wordsize. The following expression + only works for WORDSIZE that is a power of 2, by masking last bits of + incremented size to zero. + */ + size = (size + (WORDSIZE - 1)) & ~(WORDSIZE - 1); + + /* Check whether a new block must be allocated. Note that the first word + of a block is reserved for a pointer to the previous block. + */ + if (size > remaining) { + + wastedMemory += remaining; + + /* Allocate new storage. */ + blockSize = (size + sizeof(void*) + (WORDSIZE-1) > BLOCKSIZE) ? + size + sizeof(void*) + (WORDSIZE-1) : BLOCKSIZE; + + // use the standard C malloc to allocate memory + void* m = ::malloc(blockSize); + if (!m) { + fprintf(stderr,"Failed to allocate memory.\n"); + return NULL; + } + + /* Fill first word of new block with pointer to previous block. */ + ((void**) m)[0] = base; + base = m; + + int shift = 0; + //int shift = (WORDSIZE - ( (((size_t)m) + sizeof(void*)) & (WORDSIZE-1))) & (WORDSIZE-1); + + remaining = blockSize - sizeof(void*) - shift; + loc = ((char*)m + sizeof(void*) + shift); + } + void* rloc = loc; + loc = (char*)loc + size; + remaining -= size; + + usedMemory += size; + + return rloc; + } + + /** + * Allocates (using this pool) a generic type T. + * + * Params: + * count = number of instances to allocate. + * Returns: pointer (of type T*) to memory buffer + */ + template + T* allocate(size_t count = 1) + { + T* mem = (T*) this->allocateMemory((int)(sizeof(T)*count)); + return mem; + } + +}; + +} + +#endif //OPENCV_FLANN_ALLOCATOR_H_ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/any.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/any.h new file mode 100644 index 0000000..7e3fd79 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/any.h @@ -0,0 +1,318 @@ +#ifndef OPENCV_FLANN_ANY_H_ +#define OPENCV_FLANN_ANY_H_ +/* + * (C) Copyright Christopher Diggins 2005-2011 + * (C) Copyright Pablo Aguilar 2005 + * (C) Copyright Kevlin Henney 2001 + * + * Distributed under the Boost Software License, Version 1.0. (See + * accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt + * + * Adapted for FLANN by Marius Muja + */ + +#include "defines.h" +#include +#include +#include + +namespace cvflann +{ + +namespace anyimpl +{ + +struct bad_any_cast +{ +}; + +struct empty_any +{ +}; + +inline std::ostream& operator <<(std::ostream& out, const empty_any&) +{ + out << "[empty_any]"; + return out; +} + +struct base_any_policy +{ + virtual void static_delete(void** x) = 0; + virtual void copy_from_value(void const* src, void** dest) = 0; + virtual void clone(void* const* src, void** dest) = 0; + virtual void move(void* const* src, void** dest) = 0; + virtual void* get_value(void** src) = 0; + virtual ::size_t get_size() = 0; + virtual const std::type_info& type() = 0; + virtual void print(std::ostream& out, void* const* src) = 0; + +#ifdef OPENCV_CAN_BREAK_BINARY_COMPATIBILITY + virtual ~base_any_policy() {} +#endif +}; + +template +struct typed_base_any_policy : base_any_policy +{ + virtual ::size_t get_size() { return sizeof(T); } + virtual const std::type_info& type() { return typeid(T); } + +}; + +template +struct small_any_policy : typed_base_any_policy +{ + virtual void static_delete(void**) { } + virtual void copy_from_value(void const* src, void** dest) + { + new (dest) T(* reinterpret_cast(src)); + } + virtual void clone(void* const* src, void** dest) { *dest = *src; } + virtual void move(void* const* src, void** dest) { *dest = *src; } + virtual void* get_value(void** src) { return reinterpret_cast(src); } + virtual void print(std::ostream& out, void* const* src) { out << *reinterpret_cast(src); } +}; + +template +struct big_any_policy : typed_base_any_policy +{ + virtual void static_delete(void** x) + { + if (* x) delete (* reinterpret_cast(x)); *x = NULL; + } + virtual void copy_from_value(void const* src, void** dest) + { + *dest = new T(*reinterpret_cast(src)); + } + virtual void clone(void* const* src, void** dest) + { + *dest = new T(**reinterpret_cast(src)); + } + virtual void move(void* const* src, void** dest) + { + (*reinterpret_cast(dest))->~T(); + **reinterpret_cast(dest) = **reinterpret_cast(src); + } + virtual void* get_value(void** src) { return *src; } + virtual void print(std::ostream& out, void* const* src) { out << *reinterpret_cast(*src); } +}; + +template<> inline void big_any_policy::print(std::ostream& out, void* const* src) +{ + out << int(*reinterpret_cast(*src)); +} + +template<> inline void big_any_policy::print(std::ostream& out, void* const* src) +{ + out << int(*reinterpret_cast(*src)); +} + +template +struct choose_policy +{ + typedef big_any_policy type; +}; + +template +struct choose_policy +{ + typedef small_any_policy type; +}; + +struct any; + +/// Choosing the policy for an any type is illegal, but should never happen. +/// This is designed to throw a compiler error. +template<> +struct choose_policy +{ + typedef void type; +}; + +/// Specializations for small types. +#define SMALL_POLICY(TYPE) \ + template<> \ + struct choose_policy { typedef small_any_policy type; \ + } + +SMALL_POLICY(signed char); +SMALL_POLICY(unsigned char); +SMALL_POLICY(signed short); +SMALL_POLICY(unsigned short); +SMALL_POLICY(signed int); +SMALL_POLICY(unsigned int); +SMALL_POLICY(signed long); +SMALL_POLICY(unsigned long); +SMALL_POLICY(float); +SMALL_POLICY(bool); + +#undef SMALL_POLICY + +template +class SinglePolicy +{ + SinglePolicy(); + SinglePolicy(const SinglePolicy& other); + SinglePolicy& operator=(const SinglePolicy& other); + +public: + static base_any_policy* get_policy(); + +private: + static typename choose_policy::type policy; +}; + +template +typename choose_policy::type SinglePolicy::policy; + +/// This function will return a different policy for each type. +template +inline base_any_policy* SinglePolicy::get_policy() { return &policy; } + +} // namespace anyimpl + +struct any +{ +private: + // fields + anyimpl::base_any_policy* policy; + void* object; + +public: + /// Initializing constructor. + template + any(const T& x) + : policy(anyimpl::SinglePolicy::get_policy()), object(NULL) + { + assign(x); + } + + /// Empty constructor. + any() + : policy(anyimpl::SinglePolicy::get_policy()), object(NULL) + { } + + /// Special initializing constructor for string literals. + any(const char* x) + : policy(anyimpl::SinglePolicy::get_policy()), object(NULL) + { + assign(x); + } + + /// Copy constructor. + any(const any& x) + : policy(anyimpl::SinglePolicy::get_policy()), object(NULL) + { + assign(x); + } + + /// Destructor. + ~any() + { + policy->static_delete(&object); + } + + /// Assignment function from another any. + any& assign(const any& x) + { + reset(); + policy = x.policy; + policy->clone(&x.object, &object); + return *this; + } + + /// Assignment function. + template + any& assign(const T& x) + { + reset(); + policy = anyimpl::SinglePolicy::get_policy(); + policy->copy_from_value(&x, &object); + return *this; + } + + /// Assignment operator. + template + any& operator=(const T& x) + { + return assign(x); + } + + /// Assignment operator, specialed for literal strings. + /// They have types like const char [6] which don't work as expected. + any& operator=(const char* x) + { + return assign(x); + } + + /// Utility functions + any& swap(any& x) + { + std::swap(policy, x.policy); + std::swap(object, x.object); + return *this; + } + + /// Cast operator. You can only cast to the original type. + template + T& cast() + { + if (policy->type() != typeid(T)) throw anyimpl::bad_any_cast(); + T* r = reinterpret_cast(policy->get_value(&object)); + return *r; + } + + /// Cast operator. You can only cast to the original type. + template + const T& cast() const + { + if (policy->type() != typeid(T)) throw anyimpl::bad_any_cast(); + T* r = reinterpret_cast(policy->get_value(const_cast(&object))); + return *r; + } + + /// Returns true if the any contains no value. + bool empty() const + { + return policy->type() == typeid(anyimpl::empty_any); + } + + /// Frees any allocated memory, and sets the value to NULL. + void reset() + { + policy->static_delete(&object); + policy = anyimpl::SinglePolicy::get_policy(); + } + + /// Returns true if the two types are the same. + bool compatible(const any& x) const + { + return policy->type() == x.policy->type(); + } + + /// Returns if the type is compatible with the policy + template + bool has_type() + { + return policy->type() == typeid(T); + } + + const std::type_info& type() const + { + return policy->type(); + } + + friend std::ostream& operator <<(std::ostream& out, const any& any_val); +}; + +inline std::ostream& operator <<(std::ostream& out, const any& any_val) +{ + any_val.policy->print(out,&any_val.object); + return out; +} + +} + +#endif // OPENCV_FLANN_ANY_H_ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/autotuned_index.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/autotuned_index.h new file mode 100644 index 0000000..454641e --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/autotuned_index.h @@ -0,0 +1,595 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ +#ifndef OPENCV_FLANN_AUTOTUNED_INDEX_H_ +#define OPENCV_FLANN_AUTOTUNED_INDEX_H_ + +#include "general.h" +#include "nn_index.h" +#include "ground_truth.h" +#include "index_testing.h" +#include "sampling.h" +#include "kdtree_index.h" +#include "kdtree_single_index.h" +#include "kmeans_index.h" +#include "composite_index.h" +#include "linear_index.h" +#include "logger.h" + +namespace cvflann +{ + +template +NNIndex* create_index_by_type(const Matrix& dataset, const IndexParams& params, const Distance& distance); + + +struct AutotunedIndexParams : public IndexParams +{ + AutotunedIndexParams(float target_precision = 0.8, float build_weight = 0.01, float memory_weight = 0, float sample_fraction = 0.1) + { + (*this)["algorithm"] = FLANN_INDEX_AUTOTUNED; + // precision desired (used for autotuning, -1 otherwise) + (*this)["target_precision"] = target_precision; + // build tree time weighting factor + (*this)["build_weight"] = build_weight; + // index memory weighting factor + (*this)["memory_weight"] = memory_weight; + // what fraction of the dataset to use for autotuning + (*this)["sample_fraction"] = sample_fraction; + } +}; + + +template +class AutotunedIndex : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + AutotunedIndex(const Matrix& inputData, const IndexParams& params = AutotunedIndexParams(), Distance d = Distance()) : + dataset_(inputData), distance_(d) + { + target_precision_ = get_param(params, "target_precision",0.8f); + build_weight_ = get_param(params,"build_weight", 0.01f); + memory_weight_ = get_param(params, "memory_weight", 0.0f); + sample_fraction_ = get_param(params,"sample_fraction", 0.1f); + bestIndex_ = NULL; + } + + AutotunedIndex(const AutotunedIndex&); + AutotunedIndex& operator=(const AutotunedIndex&); + + virtual ~AutotunedIndex() + { + if (bestIndex_ != NULL) { + delete bestIndex_; + bestIndex_ = NULL; + } + } + + /** + * Dummy implementation for other algorithms of addable indexes after that. + */ + void addIndex(const Matrix& /*wholeData*/, const Matrix& /*additionalData*/) + { + } + + /** + * Method responsible with building the index. + */ + virtual void buildIndex() + { + std::ostringstream stream; + bestParams_ = estimateBuildParams(); + print_params(bestParams_, stream); + Logger::info("----------------------------------------------------\n"); + Logger::info("Autotuned parameters:\n"); + Logger::info("%s", stream.str().c_str()); + Logger::info("----------------------------------------------------\n"); + + bestIndex_ = create_index_by_type(dataset_, bestParams_, distance_); + bestIndex_->buildIndex(); + speedup_ = estimateSearchParams(bestSearchParams_); + stream.str(std::string()); + print_params(bestSearchParams_, stream); + Logger::info("----------------------------------------------------\n"); + Logger::info("Search parameters:\n"); + Logger::info("%s", stream.str().c_str()); + Logger::info("----------------------------------------------------\n"); + } + + /** + * Saves the index to a stream + */ + virtual void saveIndex(FILE* stream) + { + save_value(stream, (int)bestIndex_->getType()); + bestIndex_->saveIndex(stream); + save_value(stream, get_param(bestSearchParams_, "checks")); + } + + /** + * Loads the index from a stream + */ + virtual void loadIndex(FILE* stream) + { + int index_type; + + load_value(stream, index_type); + IndexParams params; + params["algorithm"] = (flann_algorithm_t)index_type; + bestIndex_ = create_index_by_type(dataset_, params, distance_); + bestIndex_->loadIndex(stream); + int checks; + load_value(stream, checks); + bestSearchParams_["checks"] = checks; + } + + /** + * Method that searches for nearest-neighbors + */ + virtual void findNeighbors(ResultSet& result, const ElementType* vec, const SearchParams& searchParams) + { + int checks = get_param(searchParams,"checks",FLANN_CHECKS_AUTOTUNED); + if (checks == FLANN_CHECKS_AUTOTUNED) { + bestIndex_->findNeighbors(result, vec, bestSearchParams_); + } + else { + bestIndex_->findNeighbors(result, vec, searchParams); + } + } + + + IndexParams getParameters() const + { + return bestIndex_->getParameters(); + } + + SearchParams getSearchParameters() const + { + return bestSearchParams_; + } + + float getSpeedup() const + { + return speedup_; + } + + + /** + * Number of features in this index. + */ + virtual size_t size() const + { + return bestIndex_->size(); + } + + /** + * The length of each vector in this index. + */ + virtual size_t veclen() const + { + return bestIndex_->veclen(); + } + + /** + * The amount of memory (in bytes) this index uses. + */ + virtual int usedMemory() const + { + return bestIndex_->usedMemory(); + } + + /** + * Algorithm name + */ + virtual flann_algorithm_t getType() const + { + return FLANN_INDEX_AUTOTUNED; + } + +private: + + struct CostData + { + float searchTimeCost; + float buildTimeCost; + float memoryCost; + float totalCost; + IndexParams params; + }; + + void evaluate_kmeans(CostData& cost) + { + StartStopTimer t; + int checks; + const int nn = 1; + + Logger::info("KMeansTree using params: max_iterations=%d, branching=%d\n", + get_param(cost.params,"iterations"), + get_param(cost.params,"branching")); + KMeansIndex kmeans(sampledDataset_, cost.params, distance_); + // measure index build time + t.start(); + kmeans.buildIndex(); + t.stop(); + float buildTime = (float)t.value; + + // measure search time + float searchTime = test_index_precision(kmeans, sampledDataset_, testDataset_, gt_matches_, target_precision_, checks, distance_, nn); + + float datasetMemory = float(sampledDataset_.rows * sampledDataset_.cols * sizeof(float)); + cost.memoryCost = (kmeans.usedMemory() + datasetMemory) / datasetMemory; + cost.searchTimeCost = searchTime; + cost.buildTimeCost = buildTime; + Logger::info("KMeansTree buildTime=%g, searchTime=%g, build_weight=%g\n", buildTime, searchTime, build_weight_); + } + + + void evaluate_kdtree(CostData& cost) + { + StartStopTimer t; + int checks; + const int nn = 1; + + Logger::info("KDTree using params: trees=%d\n", get_param(cost.params,"trees")); + KDTreeIndex kdtree(sampledDataset_, cost.params, distance_); + + t.start(); + kdtree.buildIndex(); + t.stop(); + float buildTime = (float)t.value; + + //measure search time + float searchTime = test_index_precision(kdtree, sampledDataset_, testDataset_, gt_matches_, target_precision_, checks, distance_, nn); + + float datasetMemory = float(sampledDataset_.rows * sampledDataset_.cols * sizeof(float)); + cost.memoryCost = (kdtree.usedMemory() + datasetMemory) / datasetMemory; + cost.searchTimeCost = searchTime; + cost.buildTimeCost = buildTime; + Logger::info("KDTree buildTime=%g, searchTime=%g\n", buildTime, searchTime); + } + + + // struct KMeansSimpleDownhillFunctor { + // + // Autotune& autotuner; + // KMeansSimpleDownhillFunctor(Autotune& autotuner_) : autotuner(autotuner_) {}; + // + // float operator()(int* params) { + // + // float maxFloat = numeric_limits::max(); + // + // if (params[0]<2) return maxFloat; + // if (params[1]<0) return maxFloat; + // + // CostData c; + // c.params["algorithm"] = KMEANS; + // c.params["centers-init"] = CENTERS_RANDOM; + // c.params["branching"] = params[0]; + // c.params["max-iterations"] = params[1]; + // + // autotuner.evaluate_kmeans(c); + // + // return c.timeCost; + // + // } + // }; + // + // struct KDTreeSimpleDownhillFunctor { + // + // Autotune& autotuner; + // KDTreeSimpleDownhillFunctor(Autotune& autotuner_) : autotuner(autotuner_) {}; + // + // float operator()(int* params) { + // float maxFloat = numeric_limits::max(); + // + // if (params[0]<1) return maxFloat; + // + // CostData c; + // c.params["algorithm"] = KDTREE; + // c.params["trees"] = params[0]; + // + // autotuner.evaluate_kdtree(c); + // + // return c.timeCost; + // + // } + // }; + + + + void optimizeKMeans(std::vector& costs) + { + Logger::info("KMEANS, Step 1: Exploring parameter space\n"); + + // explore kmeans parameters space using combinations of the parameters below + int maxIterations[] = { 1, 5, 10, 15 }; + int branchingFactors[] = { 16, 32, 64, 128, 256 }; + + int kmeansParamSpaceSize = FLANN_ARRAY_LEN(maxIterations) * FLANN_ARRAY_LEN(branchingFactors); + costs.reserve(costs.size() + kmeansParamSpaceSize); + + // evaluate kmeans for all parameter combinations + for (size_t i = 0; i < FLANN_ARRAY_LEN(maxIterations); ++i) { + for (size_t j = 0; j < FLANN_ARRAY_LEN(branchingFactors); ++j) { + CostData cost; + cost.params["algorithm"] = FLANN_INDEX_KMEANS; + cost.params["centers_init"] = FLANN_CENTERS_RANDOM; + cost.params["iterations"] = maxIterations[i]; + cost.params["branching"] = branchingFactors[j]; + + evaluate_kmeans(cost); + costs.push_back(cost); + } + } + + // Logger::info("KMEANS, Step 2: simplex-downhill optimization\n"); + // + // const int n = 2; + // // choose initial simplex points as the best parameters so far + // int kmeansNMPoints[n*(n+1)]; + // float kmeansVals[n+1]; + // for (int i=0;i& costs) + { + Logger::info("KD-TREE, Step 1: Exploring parameter space\n"); + + // explore kd-tree parameters space using the parameters below + int testTrees[] = { 1, 4, 8, 16, 32 }; + + // evaluate kdtree for all parameter combinations + for (size_t i = 0; i < FLANN_ARRAY_LEN(testTrees); ++i) { + CostData cost; + cost.params["algorithm"] = FLANN_INDEX_KDTREE; + cost.params["trees"] = testTrees[i]; + + evaluate_kdtree(cost); + costs.push_back(cost); + } + + // Logger::info("KD-TREE, Step 2: simplex-downhill optimization\n"); + // + // const int n = 1; + // // choose initial simplex points as the best parameters so far + // int kdtreeNMPoints[n*(n+1)]; + // float kdtreeVals[n+1]; + // for (int i=0;i costs; + + int sampleSize = int(sample_fraction_ * dataset_.rows); + int testSampleSize = std::min(sampleSize / 10, 1000); + + Logger::info("Entering autotuning, dataset size: %d, sampleSize: %d, testSampleSize: %d, target precision: %g\n", dataset_.rows, sampleSize, testSampleSize, target_precision_); + + // For a very small dataset, it makes no sense to build any fancy index, just + // use linear search + if (testSampleSize < 10) { + Logger::info("Choosing linear, dataset too small\n"); + return LinearIndexParams(); + } + + // We use a fraction of the original dataset to speedup the autotune algorithm + sampledDataset_ = random_sample(dataset_, sampleSize); + // We use a cross-validation approach, first we sample a testset from the dataset + testDataset_ = random_sample(sampledDataset_, testSampleSize, true); + + // We compute the ground truth using linear search + Logger::info("Computing ground truth... \n"); + gt_matches_ = Matrix(new int[testDataset_.rows], testDataset_.rows, 1); + StartStopTimer t; + t.start(); + compute_ground_truth(sampledDataset_, testDataset_, gt_matches_, 0, distance_); + t.stop(); + + CostData linear_cost; + linear_cost.searchTimeCost = (float)t.value; + linear_cost.buildTimeCost = 0; + linear_cost.memoryCost = 0; + linear_cost.params["algorithm"] = FLANN_INDEX_LINEAR; + + costs.push_back(linear_cost); + + // Start parameter autotune process + Logger::info("Autotuning parameters...\n"); + + optimizeKMeans(costs); + optimizeKDTree(costs); + + float bestTimeCost = costs[0].searchTimeCost; + for (size_t i = 0; i < costs.size(); ++i) { + float timeCost = costs[i].buildTimeCost * build_weight_ + costs[i].searchTimeCost; + if (timeCost < bestTimeCost) { + bestTimeCost = timeCost; + } + } + + float bestCost = costs[0].searchTimeCost / bestTimeCost; + IndexParams bestParams = costs[0].params; + if (bestTimeCost > 0) { + for (size_t i = 0; i < costs.size(); ++i) { + float crtCost = (costs[i].buildTimeCost * build_weight_ + costs[i].searchTimeCost) / bestTimeCost + + memory_weight_ * costs[i].memoryCost; + if (crtCost < bestCost) { + bestCost = crtCost; + bestParams = costs[i].params; + } + } + } + + delete[] gt_matches_.data; + delete[] testDataset_.data; + delete[] sampledDataset_.data; + + return bestParams; + } + + + + /** + * Estimates the search time parameters needed to get the desired precision. + * Precondition: the index is built + * Postcondition: the searchParams will have the optimum params set, also the speedup obtained over linear search. + */ + float estimateSearchParams(SearchParams& searchParams) + { + const int nn = 1; + const size_t SAMPLE_COUNT = 1000; + + assert(bestIndex_ != NULL); // must have a valid index + + float speedup = 0; + + int samples = (int)std::min(dataset_.rows / 10, SAMPLE_COUNT); + if (samples > 0) { + Matrix testDataset = random_sample(dataset_, samples); + + Logger::info("Computing ground truth\n"); + + // we need to compute the ground truth first + Matrix gt_matches(new int[testDataset.rows], testDataset.rows, 1); + StartStopTimer t; + t.start(); + compute_ground_truth(dataset_, testDataset, gt_matches, 1, distance_); + t.stop(); + float linear = (float)t.value; + + int checks; + Logger::info("Estimating number of checks\n"); + + float searchTime; + float cb_index; + if (bestIndex_->getType() == FLANN_INDEX_KMEANS) { + Logger::info("KMeans algorithm, estimating cluster border factor\n"); + KMeansIndex* kmeans = (KMeansIndex*)bestIndex_; + float bestSearchTime = -1; + float best_cb_index = -1; + int best_checks = -1; + for (cb_index = 0; cb_index < 1.1f; cb_index += 0.2f) { + kmeans->set_cb_index(cb_index); + searchTime = test_index_precision(*kmeans, dataset_, testDataset, gt_matches, target_precision_, checks, distance_, nn, 1); + if ((searchTime < bestSearchTime) || (bestSearchTime == -1)) { + bestSearchTime = searchTime; + best_cb_index = cb_index; + best_checks = checks; + } + } + searchTime = bestSearchTime; + cb_index = best_cb_index; + checks = best_checks; + + kmeans->set_cb_index(best_cb_index); + Logger::info("Optimum cb_index: %g\n", cb_index); + bestParams_["cb_index"] = cb_index; + } + else { + searchTime = test_index_precision(*bestIndex_, dataset_, testDataset, gt_matches, target_precision_, checks, distance_, nn, 1); + } + + Logger::info("Required number of checks: %d \n", checks); + searchParams["checks"] = checks; + + speedup = linear / searchTime; + + delete[] gt_matches.data; + delete[] testDataset.data; + } + + return speedup; + } + +private: + NNIndex* bestIndex_; + + IndexParams bestParams_; + SearchParams bestSearchParams_; + + Matrix sampledDataset_; + Matrix testDataset_; + Matrix gt_matches_; + + float speedup_; + + /** + * The dataset used by this index + */ + const Matrix dataset_; + + /** + * Index parameters + */ + float target_precision_; + float build_weight_; + float memory_weight_; + float sample_fraction_; + + Distance distance_; + + +}; +} + +#endif /* OPENCV_FLANN_AUTOTUNED_INDEX_H_ */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/composite_index.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/composite_index.h new file mode 100644 index 0000000..02b7bc1 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/composite_index.h @@ -0,0 +1,201 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_COMPOSITE_INDEX_H_ +#define OPENCV_FLANN_COMPOSITE_INDEX_H_ + +#include "general.h" +#include "nn_index.h" +#include "kdtree_index.h" +#include "kmeans_index.h" + +namespace cvflann +{ + +/** + * Index parameters for the CompositeIndex. + */ +struct CompositeIndexParams : public IndexParams +{ + CompositeIndexParams(int trees = 4, int branching = 32, int iterations = 11, + flann_centers_init_t centers_init = FLANN_CENTERS_RANDOM, float cb_index = 0.2 ) + { + (*this)["algorithm"] = FLANN_INDEX_KMEANS; + // number of randomized trees to use (for kdtree) + (*this)["trees"] = trees; + // branching factor + (*this)["branching"] = branching; + // max iterations to perform in one kmeans clustering (kmeans tree) + (*this)["iterations"] = iterations; + // algorithm used for picking the initial cluster centers for kmeans tree + (*this)["centers_init"] = centers_init; + // cluster boundary index. Used when searching the kmeans tree + (*this)["cb_index"] = cb_index; + } +}; + + +/** + * This index builds a kd-tree index and a k-means index and performs nearest + * neighbour search both indexes. This gives a slight boost in search performance + * as some of the neighbours that are missed by one index are found by the other. + */ +template +class CompositeIndex : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + /** + * Index constructor + * @param inputData dataset containing the points to index + * @param params Index parameters + * @param d Distance functor + * @return + */ + CompositeIndex(const Matrix& inputData, const IndexParams& params = CompositeIndexParams(), + Distance d = Distance()) : index_params_(params) + { + kdtree_index_ = new KDTreeIndex(inputData, params, d); + kmeans_index_ = new KMeansIndex(inputData, params, d); + + } + + CompositeIndex(const CompositeIndex&); + CompositeIndex& operator=(const CompositeIndex&); + + virtual ~CompositeIndex() + { + delete kdtree_index_; + delete kmeans_index_; + } + + /** + * @return The index type + */ + flann_algorithm_t getType() const + { + return FLANN_INDEX_COMPOSITE; + } + + /** + * @return Size of the index + */ + size_t size() const + { + return kdtree_index_->size(); + } + + /** + * \returns The dimensionality of the features in this index. + */ + size_t veclen() const + { + return kdtree_index_->veclen(); + } + + /** + * \returns The amount of memory (in bytes) used by the index. + */ + int usedMemory() const + { + return kmeans_index_->usedMemory() + kdtree_index_->usedMemory(); + } + + /** + * Dummy implementation for other algorithms of addable indexes after that. + */ + void addIndex(const Matrix& /*wholeData*/, const Matrix& /*additionalData*/) + { + } + + /** + * \brief Builds the index + */ + void buildIndex() + { + Logger::info("Building kmeans tree...\n"); + kmeans_index_->buildIndex(); + Logger::info("Building kdtree tree...\n"); + kdtree_index_->buildIndex(); + } + + /** + * \brief Saves the index to a stream + * \param stream The stream to save the index to + */ + void saveIndex(FILE* stream) + { + kmeans_index_->saveIndex(stream); + kdtree_index_->saveIndex(stream); + } + + /** + * \brief Loads the index from a stream + * \param stream The stream from which the index is loaded + */ + void loadIndex(FILE* stream) + { + kmeans_index_->loadIndex(stream); + kdtree_index_->loadIndex(stream); + } + + /** + * \returns The index parameters + */ + IndexParams getParameters() const + { + return index_params_; + } + + /** + * \brief Method that searches for nearest-neighbours + */ + void findNeighbors(ResultSet& result, const ElementType* vec, const SearchParams& searchParams) + { + kmeans_index_->findNeighbors(result, vec, searchParams); + kdtree_index_->findNeighbors(result, vec, searchParams); + } + +private: + /** The k-means index */ + KMeansIndex* kmeans_index_; + + /** The kd-tree index */ + KDTreeIndex* kdtree_index_; + + /** The index parameters */ + const IndexParams index_params_; +}; + +} + +#endif //OPENCV_FLANN_COMPOSITE_INDEX_H_ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/config.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/config.h new file mode 100644 index 0000000..56832fd --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/config.h @@ -0,0 +1,38 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2011 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2011 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + + +#ifndef OPENCV_FLANN_CONFIG_H_ +#define OPENCV_FLANN_CONFIG_H_ + +#ifdef FLANN_VERSION_ +#undef FLANN_VERSION_ +#endif +#define FLANN_VERSION_ "1.6.10" + +#endif /* OPENCV_FLANN_CONFIG_H_ */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/defines.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/defines.h new file mode 100644 index 0000000..13833b3 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/defines.h @@ -0,0 +1,176 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2011 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2011 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + + +#ifndef OPENCV_FLANN_DEFINES_H_ +#define OPENCV_FLANN_DEFINES_H_ + +#include "config.h" + +#ifdef FLANN_EXPORT +#undef FLANN_EXPORT +#endif +#ifdef WIN32 +/* win32 dll export/import directives */ + #ifdef FLANN_EXPORTS + #define FLANN_EXPORT __declspec(dllexport) + #elif defined(FLANN_STATIC) + #define FLANN_EXPORT + #else + #define FLANN_EXPORT __declspec(dllimport) + #endif +#else +/* unix needs nothing */ + #define FLANN_EXPORT +#endif + + +#ifdef FLANN_DEPRECATED +#undef FLANN_DEPRECATED +#endif +#ifdef __GNUC__ +#define FLANN_DEPRECATED __attribute__ ((deprecated)) +#elif defined(_MSC_VER) +#define FLANN_DEPRECATED __declspec(deprecated) +#else +#pragma message("WARNING: You need to implement FLANN_DEPRECATED for this compiler") +#define FLANN_DEPRECATED +#endif + + +#undef FLANN_PLATFORM_32_BIT +#undef FLANN_PLATFORM_64_BIT +#if defined __amd64__ || defined __x86_64__ || defined _WIN64 || defined _M_X64 +#define FLANN_PLATFORM_64_BIT +#else +#define FLANN_PLATFORM_32_BIT +#endif + + +#undef FLANN_ARRAY_LEN +#define FLANN_ARRAY_LEN(a) (sizeof(a)/sizeof(a[0])) + +namespace cvflann { + +/* Nearest neighbour index algorithms */ +enum flann_algorithm_t +{ + FLANN_INDEX_LINEAR = 0, + FLANN_INDEX_KDTREE = 1, + FLANN_INDEX_KMEANS = 2, + FLANN_INDEX_COMPOSITE = 3, + FLANN_INDEX_KDTREE_SINGLE = 4, + FLANN_INDEX_HIERARCHICAL = 5, + FLANN_INDEX_LSH = 6, + FLANN_INDEX_SAVED = 254, + FLANN_INDEX_AUTOTUNED = 255, + + // deprecated constants, should use the FLANN_INDEX_* ones instead + LINEAR = 0, + KDTREE = 1, + KMEANS = 2, + COMPOSITE = 3, + KDTREE_SINGLE = 4, + SAVED = 254, + AUTOTUNED = 255 +}; + + + +enum flann_centers_init_t +{ + FLANN_CENTERS_RANDOM = 0, + FLANN_CENTERS_GONZALES = 1, + FLANN_CENTERS_KMEANSPP = 2, + + // deprecated constants, should use the FLANN_CENTERS_* ones instead + CENTERS_RANDOM = 0, + CENTERS_GONZALES = 1, + CENTERS_KMEANSPP = 2 +}; + +enum flann_log_level_t +{ + FLANN_LOG_NONE = 0, + FLANN_LOG_FATAL = 1, + FLANN_LOG_ERROR = 2, + FLANN_LOG_WARN = 3, + FLANN_LOG_INFO = 4 +}; + +enum flann_distance_t +{ + FLANN_DIST_EUCLIDEAN = 1, + FLANN_DIST_L2 = 1, + FLANN_DIST_MANHATTAN = 2, + FLANN_DIST_L1 = 2, + FLANN_DIST_MINKOWSKI = 3, + FLANN_DIST_MAX = 4, + FLANN_DIST_HIST_INTERSECT = 5, + FLANN_DIST_HELLINGER = 6, + FLANN_DIST_CHI_SQUARE = 7, + FLANN_DIST_CS = 7, + FLANN_DIST_KULLBACK_LEIBLER = 8, + FLANN_DIST_KL = 8, + FLANN_DIST_HAMMING = 9, + + // deprecated constants, should use the FLANN_DIST_* ones instead + EUCLIDEAN = 1, + MANHATTAN = 2, + MINKOWSKI = 3, + MAX_DIST = 4, + HIST_INTERSECT = 5, + HELLINGER = 6, + CS = 7, + KL = 8, + KULLBACK_LEIBLER = 8 +}; + +enum flann_datatype_t +{ + FLANN_INT8 = 0, + FLANN_INT16 = 1, + FLANN_INT32 = 2, + FLANN_INT64 = 3, + FLANN_UINT8 = 4, + FLANN_UINT16 = 5, + FLANN_UINT32 = 6, + FLANN_UINT64 = 7, + FLANN_FLOAT32 = 8, + FLANN_FLOAT64 = 9 +}; + +enum +{ + FLANN_CHECKS_UNLIMITED = -1, + FLANN_CHECKS_AUTOTUNED = -2 +}; + +} + +#endif /* OPENCV_FLANN_DEFINES_H_ */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/dist.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/dist.h new file mode 100644 index 0000000..5ba3d34 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/dist.h @@ -0,0 +1,937 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_DIST_H_ +#define OPENCV_FLANN_DIST_H_ + +#include +#include +#include +#ifdef _MSC_VER +typedef unsigned __int32 uint32_t; +typedef unsigned __int64 uint64_t; +#else +#include +#endif + +#include "defines.h" + +#if (defined WIN32 || defined _WIN32) && defined(_M_ARM) +# include +#endif + +#if defined(__ARM_NEON__) || defined(__ARM_NEON) +# include "arm_neon.h" +#endif + +namespace cvflann +{ + +template +inline T abs(T x) { return (x<0) ? -x : x; } + +template<> +inline int abs(int x) { return ::abs(x); } + +template<> +inline float abs(float x) { return fabsf(x); } + +template<> +inline double abs(double x) { return fabs(x); } + +template +struct Accumulator { typedef T Type; }; +template<> +struct Accumulator { typedef float Type; }; +template<> +struct Accumulator { typedef float Type; }; +template<> +struct Accumulator { typedef float Type; }; +template<> +struct Accumulator { typedef float Type; }; +template<> +struct Accumulator { typedef float Type; }; +template<> +struct Accumulator { typedef float Type; }; + +#undef True +#undef False + +class True +{ +}; + +class False +{ +}; + + +/** + * Squared Euclidean distance functor. + * + * This is the simpler, unrolled version. This is preferable for + * very low dimensionality data (eg 3D points) + */ +template +struct L2_Simple +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const + { + ResultType result = ResultType(); + ResultType diff; + for(size_t i = 0; i < size; ++i ) { + diff = *a++ - *b++; + result += diff*diff; + } + return result; + } + + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + return (a-b)*(a-b); + } +}; + + + +/** + * Squared Euclidean distance functor, optimized version + */ +template +struct L2 +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + + /** + * Compute the squared Euclidean distance between two vectors. + * + * This is highly optimised, with loop unrolling, as it is one + * of the most expensive inner loops. + * + * The computation of squared root at the end is omitted for + * efficiency. + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const + { + ResultType result = ResultType(); + ResultType diff0, diff1, diff2, diff3; + Iterator1 last = a + size; + Iterator1 lastgroup = last - 3; + + /* Process 4 items with each loop for efficiency. */ + while (a < lastgroup) { + diff0 = (ResultType)(a[0] - b[0]); + diff1 = (ResultType)(a[1] - b[1]); + diff2 = (ResultType)(a[2] - b[2]); + diff3 = (ResultType)(a[3] - b[3]); + result += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3; + a += 4; + b += 4; + + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + /* Process last 0-3 pixels. Not needed for standard vector lengths. */ + while (a < last) { + diff0 = (ResultType)(*a++ - *b++); + result += diff0 * diff0; + } + return result; + } + + /** + * Partial euclidean distance, using just one dimension. This is used by the + * kd-tree when computing partial distances while traversing the tree. + * + * Squared root is omitted for efficiency. + */ + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + return (a-b)*(a-b); + } +}; + + +/* + * Manhattan distance functor, optimized version + */ +template +struct L1 +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + + /** + * Compute the Manhattan (L_1) distance between two vectors. + * + * This is highly optimised, with loop unrolling, as it is one + * of the most expensive inner loops. + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const + { + ResultType result = ResultType(); + ResultType diff0, diff1, diff2, diff3; + Iterator1 last = a + size; + Iterator1 lastgroup = last - 3; + + /* Process 4 items with each loop for efficiency. */ + while (a < lastgroup) { + diff0 = (ResultType)abs(a[0] - b[0]); + diff1 = (ResultType)abs(a[1] - b[1]); + diff2 = (ResultType)abs(a[2] - b[2]); + diff3 = (ResultType)abs(a[3] - b[3]); + result += diff0 + diff1 + diff2 + diff3; + a += 4; + b += 4; + + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + /* Process last 0-3 pixels. Not needed for standard vector lengths. */ + while (a < last) { + diff0 = (ResultType)abs(*a++ - *b++); + result += diff0; + } + return result; + } + + /** + * Partial distance, used by the kd-tree. + */ + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + return abs(a-b); + } +}; + + + +template +struct MinkowskiDistance +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + + int order; + + MinkowskiDistance(int order_) : order(order_) {} + + /** + * Compute the Minkowsky (L_p) distance between two vectors. + * + * This is highly optimised, with loop unrolling, as it is one + * of the most expensive inner loops. + * + * The computation of squared root at the end is omitted for + * efficiency. + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const + { + ResultType result = ResultType(); + ResultType diff0, diff1, diff2, diff3; + Iterator1 last = a + size; + Iterator1 lastgroup = last - 3; + + /* Process 4 items with each loop for efficiency. */ + while (a < lastgroup) { + diff0 = (ResultType)abs(a[0] - b[0]); + diff1 = (ResultType)abs(a[1] - b[1]); + diff2 = (ResultType)abs(a[2] - b[2]); + diff3 = (ResultType)abs(a[3] - b[3]); + result += pow(diff0,order) + pow(diff1,order) + pow(diff2,order) + pow(diff3,order); + a += 4; + b += 4; + + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + /* Process last 0-3 pixels. Not needed for standard vector lengths. */ + while (a < last) { + diff0 = (ResultType)abs(*a++ - *b++); + result += pow(diff0,order); + } + return result; + } + + /** + * Partial distance, used by the kd-tree. + */ + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + return pow(static_cast(abs(a-b)),order); + } +}; + + + +template +struct MaxDistance +{ + typedef False is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + + /** + * Compute the max distance (L_infinity) between two vectors. + * + * This distance is not a valid kdtree distance, it's not dimensionwise additive. + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const + { + ResultType result = ResultType(); + ResultType diff0, diff1, diff2, diff3; + Iterator1 last = a + size; + Iterator1 lastgroup = last - 3; + + /* Process 4 items with each loop for efficiency. */ + while (a < lastgroup) { + diff0 = abs(a[0] - b[0]); + diff1 = abs(a[1] - b[1]); + diff2 = abs(a[2] - b[2]); + diff3 = abs(a[3] - b[3]); + if (diff0>result) {result = diff0; } + if (diff1>result) {result = diff1; } + if (diff2>result) {result = diff2; } + if (diff3>result) {result = diff3; } + a += 4; + b += 4; + + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + /* Process last 0-3 pixels. Not needed for standard vector lengths. */ + while (a < last) { + diff0 = abs(*a++ - *b++); + result = (diff0>result) ? diff0 : result; + } + return result; + } + + /* This distance functor is not dimension-wise additive, which + * makes it an invalid kd-tree distance, not implementing the accum_dist method */ + +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** + * Hamming distance functor - counts the bit differences between two strings - useful for the Brief descriptor + * bit count of A exclusive XOR'ed with B + */ +struct HammingLUT +{ + typedef False is_kdtree_distance; + typedef False is_vector_space_distance; + + typedef unsigned char ElementType; + typedef int ResultType; + + /** this will count the bits in a ^ b + */ + ResultType operator()(const unsigned char* a, const unsigned char* b, int size) const + { + static const uchar popCountTable[] = + { + 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 + }; + ResultType result = 0; + for (int i = 0; i < size; i++) { + result += popCountTable[a[i] ^ b[i]]; + } + return result; + } +}; + +/** + * Hamming distance functor - counts the bit differences between two strings - useful for the Brief descriptor + * bit count of A exclusive XOR'ed with B + */ +struct HammingLUT2 +{ + typedef False is_kdtree_distance; + typedef False is_vector_space_distance; + + typedef unsigned char ElementType; + typedef int ResultType; + + /** this will count the bits in a ^ b + */ + ResultType operator()(const unsigned char* a, const unsigned char* b, size_t size) const + { + static const uchar popCountTable[] = + { + 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 + }; + ResultType result = 0; + for (size_t i = 0; i < size; i++) { + result += popCountTable[a[i] ^ b[i]]; + } + return result; + } +}; + +/** + * Hamming distance functor (pop count between two binary vectors, i.e. xor them and count the number of bits set) + * That code was taken from brief.cpp in OpenCV + */ +template +struct Hamming +{ + typedef False is_kdtree_distance; + typedef False is_vector_space_distance; + + + typedef T ElementType; + typedef int ResultType; + + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const + { + ResultType result = 0; +#if defined(__ARM_NEON__) || defined(__ARM_NEON) + { + uint32x4_t bits = vmovq_n_u32(0); + for (size_t i = 0; i < size; i += 16) { + uint8x16_t A_vec = vld1q_u8 (a + i); + uint8x16_t B_vec = vld1q_u8 (b + i); + uint8x16_t AxorB = veorq_u8 (A_vec, B_vec); + uint8x16_t bitsSet = vcntq_u8 (AxorB); + uint16x8_t bitSet8 = vpaddlq_u8 (bitsSet); + uint32x4_t bitSet4 = vpaddlq_u16 (bitSet8); + bits = vaddq_u32(bits, bitSet4); + } + uint64x2_t bitSet2 = vpaddlq_u32 (bits); + result = vgetq_lane_s32 (vreinterpretq_s32_u64(bitSet2),0); + result += vgetq_lane_s32 (vreinterpretq_s32_u64(bitSet2),2); + } +#elif __GNUC__ + { + //for portability just use unsigned long -- and use the __builtin_popcountll (see docs for __builtin_popcountll) + typedef unsigned long long pop_t; + const size_t modulo = size % sizeof(pop_t); + const pop_t* a2 = reinterpret_cast (a); + const pop_t* b2 = reinterpret_cast (b); + const pop_t* a2_end = a2 + (size / sizeof(pop_t)); + + for (; a2 != a2_end; ++a2, ++b2) result += __builtin_popcountll((*a2) ^ (*b2)); + + if (modulo) { + //in the case where size is not dividable by sizeof(size_t) + //need to mask off the bits at the end + pop_t a_final = 0, b_final = 0; + memcpy(&a_final, a2, modulo); + memcpy(&b_final, b2, modulo); + result += __builtin_popcountll(a_final ^ b_final); + } + } +#else // NO NEON and NOT GNUC + typedef unsigned long long pop_t; + HammingLUT lut; + result = lut(reinterpret_cast (a), + reinterpret_cast (b), size * sizeof(pop_t)); +#endif + return result; + } +}; + +template +struct Hamming2 +{ + typedef False is_kdtree_distance; + typedef False is_vector_space_distance; + + typedef T ElementType; + typedef int ResultType; + + /** This is popcount_3() from: + * http://en.wikipedia.org/wiki/Hamming_weight */ + unsigned int popcnt32(uint32_t n) const + { + n -= ((n >> 1) & 0x55555555); + n = (n & 0x33333333) + ((n >> 2) & 0x33333333); + return (((n + (n >> 4))& 0xF0F0F0F)* 0x1010101) >> 24; + } + +#ifdef FLANN_PLATFORM_64_BIT + unsigned int popcnt64(uint64_t n) const + { + n -= ((n >> 1) & 0x5555555555555555); + n = (n & 0x3333333333333333) + ((n >> 2) & 0x3333333333333333); + return (((n + (n >> 4))& 0x0f0f0f0f0f0f0f0f)* 0x0101010101010101) >> 56; + } +#endif + + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const + { +#ifdef FLANN_PLATFORM_64_BIT + const uint64_t* pa = reinterpret_cast(a); + const uint64_t* pb = reinterpret_cast(b); + ResultType result = 0; + size /= (sizeof(uint64_t)/sizeof(unsigned char)); + for(size_t i = 0; i < size; ++i ) { + result += popcnt64(*pa ^ *pb); + ++pa; + ++pb; + } +#else + const uint32_t* pa = reinterpret_cast(a); + const uint32_t* pb = reinterpret_cast(b); + ResultType result = 0; + size /= (sizeof(uint32_t)/sizeof(unsigned char)); + for(size_t i = 0; i < size; ++i ) { + result += popcnt32(*pa ^ *pb); + ++pa; + ++pb; + } +#endif + return result; + } +}; + + + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct HistIntersectionDistance +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + + /** + * Compute the histogram intersection distance + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const + { + ResultType result = ResultType(); + ResultType min0, min1, min2, min3; + Iterator1 last = a + size; + Iterator1 lastgroup = last - 3; + + /* Process 4 items with each loop for efficiency. */ + while (a < lastgroup) { + min0 = (ResultType)(a[0] < b[0] ? a[0] : b[0]); + min1 = (ResultType)(a[1] < b[1] ? a[1] : b[1]); + min2 = (ResultType)(a[2] < b[2] ? a[2] : b[2]); + min3 = (ResultType)(a[3] < b[3] ? a[3] : b[3]); + result += min0 + min1 + min2 + min3; + a += 4; + b += 4; + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + /* Process last 0-3 pixels. Not needed for standard vector lengths. */ + while (a < last) { + min0 = (ResultType)(*a < *b ? *a : *b); + result += min0; + ++a; + ++b; + } + return result; + } + + /** + * Partial distance, used by the kd-tree. + */ + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + return a +struct HellingerDistance +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + + /** + * Compute the histogram intersection distance + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const + { + ResultType result = ResultType(); + ResultType diff0, diff1, diff2, diff3; + Iterator1 last = a + size; + Iterator1 lastgroup = last - 3; + + /* Process 4 items with each loop for efficiency. */ + while (a < lastgroup) { + diff0 = sqrt(static_cast(a[0])) - sqrt(static_cast(b[0])); + diff1 = sqrt(static_cast(a[1])) - sqrt(static_cast(b[1])); + diff2 = sqrt(static_cast(a[2])) - sqrt(static_cast(b[2])); + diff3 = sqrt(static_cast(a[3])) - sqrt(static_cast(b[3])); + result += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3; + a += 4; + b += 4; + } + while (a < last) { + diff0 = sqrt(static_cast(*a++)) - sqrt(static_cast(*b++)); + result += diff0 * diff0; + } + return result; + } + + /** + * Partial distance, used by the kd-tree. + */ + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + return sqrt(static_cast(a)) - sqrt(static_cast(b)); + } +}; + + +template +struct ChiSquareDistance +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + + /** + * Compute the chi-square distance + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const + { + ResultType result = ResultType(); + ResultType sum, diff; + Iterator1 last = a + size; + + while (a < last) { + sum = (ResultType)(*a + *b); + if (sum>0) { + diff = (ResultType)(*a - *b); + result += diff*diff/sum; + } + ++a; + ++b; + + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + return result; + } + + /** + * Partial distance, used by the kd-tree. + */ + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + ResultType result = ResultType(); + ResultType sum, diff; + + sum = (ResultType)(a+b); + if (sum>0) { + diff = (ResultType)(a-b); + result = diff*diff/sum; + } + return result; + } +}; + + +template +struct KL_Divergence +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + + /** + * Compute the Kullback–Leibler divergence + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const + { + ResultType result = ResultType(); + Iterator1 last = a + size; + + while (a < last) { + if (* a != 0) { + ResultType ratio = (ResultType)(*a / *b); + if (ratio>0) { + result += *a * log(ratio); + } + } + ++a; + ++b; + + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + return result; + } + + /** + * Partial distance, used by the kd-tree. + */ + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + ResultType result = ResultType(); + ResultType ratio = (ResultType)(a / b); + if (ratio>0) { + result = a * log(ratio); + } + return result; + } +}; + + + +/* + * This is a "zero iterator". It basically behaves like a zero filled + * array to all algorithms that use arrays as iterators (STL style). + * It's useful when there's a need to compute the distance between feature + * and origin it and allows for better compiler optimisation than using a + * zero-filled array. + */ +template +struct ZeroIterator +{ + + T operator*() + { + return 0; + } + + T operator[](int) + { + return 0; + } + + const ZeroIterator& operator ++() + { + return *this; + } + + ZeroIterator operator ++(int) + { + return *this; + } + + ZeroIterator& operator+=(int) + { + return *this; + } + +}; + + +/* + * Depending on processed distances, some of them are already squared (e.g. L2) + * and some are not (e.g.Hamming). In KMeans++ for instance we want to be sure + * we are working on ^2 distances, thus following templates to ensure that. + */ +template +struct squareDistance +{ + typedef typename Distance::ResultType ResultType; + ResultType operator()( ResultType dist ) { return dist*dist; } +}; + + +template +struct squareDistance, ElementType> +{ + typedef typename L2_Simple::ResultType ResultType; + ResultType operator()( ResultType dist ) { return dist; } +}; + +template +struct squareDistance, ElementType> +{ + typedef typename L2::ResultType ResultType; + ResultType operator()( ResultType dist ) { return dist; } +}; + + +template +struct squareDistance, ElementType> +{ + typedef typename MinkowskiDistance::ResultType ResultType; + ResultType operator()( ResultType dist ) { return dist; } +}; + +template +struct squareDistance, ElementType> +{ + typedef typename HellingerDistance::ResultType ResultType; + ResultType operator()( ResultType dist ) { return dist; } +}; + +template +struct squareDistance, ElementType> +{ + typedef typename ChiSquareDistance::ResultType ResultType; + ResultType operator()( ResultType dist ) { return dist; } +}; + + +template +typename Distance::ResultType ensureSquareDistance( typename Distance::ResultType dist ) +{ + typedef typename Distance::ElementType ElementType; + + squareDistance dummy; + return dummy( dist ); +} + + +/* + * ...and a template to ensure the user that he will process the normal distance, + * and not squared distance, without loosing processing time calling sqrt(ensureSquareDistance) + * that will result in doing actually sqrt(dist*dist) for L1 distance for instance. + */ +template +struct simpleDistance +{ + typedef typename Distance::ResultType ResultType; + ResultType operator()( ResultType dist ) { return dist; } +}; + + +template +struct simpleDistance, ElementType> +{ + typedef typename L2_Simple::ResultType ResultType; + ResultType operator()( ResultType dist ) { return sqrt(dist); } +}; + +template +struct simpleDistance, ElementType> +{ + typedef typename L2::ResultType ResultType; + ResultType operator()( ResultType dist ) { return sqrt(dist); } +}; + + +template +struct simpleDistance, ElementType> +{ + typedef typename MinkowskiDistance::ResultType ResultType; + ResultType operator()( ResultType dist ) { return sqrt(dist); } +}; + +template +struct simpleDistance, ElementType> +{ + typedef typename HellingerDistance::ResultType ResultType; + ResultType operator()( ResultType dist ) { return sqrt(dist); } +}; + +template +struct simpleDistance, ElementType> +{ + typedef typename ChiSquareDistance::ResultType ResultType; + ResultType operator()( ResultType dist ) { return sqrt(dist); } +}; + + +template +typename Distance::ResultType ensureSimpleDistance( typename Distance::ResultType dist ) +{ + typedef typename Distance::ElementType ElementType; + + simpleDistance dummy; + return dummy( dist ); +} + +} + +#endif //OPENCV_FLANN_DIST_H_ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/dummy.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/dummy.h new file mode 100644 index 0000000..3390981 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/dummy.h @@ -0,0 +1,45 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_DUMMY_H_ +#define OPENCV_FLANN_DUMMY_H_ + +namespace cvflann +{ + +#if (defined WIN32 || defined _WIN32 || defined WINCE) && defined CVAPI_EXPORTS +__declspec(dllexport) +#endif +void dummyfunc(); + +} + + +#endif /* OPENCV_FLANN_DUMMY_H_ */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/dynamic_bitset.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/dynamic_bitset.h new file mode 100644 index 0000000..d795b5d --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/dynamic_bitset.h @@ -0,0 +1,159 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +/*********************************************************************** + * Author: Vincent Rabaud + *************************************************************************/ + +#ifndef OPENCV_FLANN_DYNAMIC_BITSET_H_ +#define OPENCV_FLANN_DYNAMIC_BITSET_H_ + +#ifndef FLANN_USE_BOOST +# define FLANN_USE_BOOST 0 +#endif +//#define FLANN_USE_BOOST 1 +#if FLANN_USE_BOOST +#include +typedef boost::dynamic_bitset<> DynamicBitset; +#else + +#include + +#include "dist.h" + +namespace cvflann { + +/** Class re-implementing the boost version of it + * This helps not depending on boost, it also does not do the bound checks + * and has a way to reset a block for speed + */ +class DynamicBitset +{ +public: + /** default constructor + */ + DynamicBitset() + { + } + + /** only constructor we use in our code + * @param sz the size of the bitset (in bits) + */ + DynamicBitset(size_t sz) + { + resize(sz); + reset(); + } + + /** Sets all the bits to 0 + */ + void clear() + { + std::fill(bitset_.begin(), bitset_.end(), 0); + } + + /** @brief checks if the bitset is empty + * @return true if the bitset is empty + */ + bool empty() const + { + return bitset_.empty(); + } + + /** set all the bits to 0 + */ + void reset() + { + std::fill(bitset_.begin(), bitset_.end(), 0); + } + + /** @brief set one bit to 0 + * @param index + */ + void reset(size_t index) + { + bitset_[index / cell_bit_size_] &= ~(size_t(1) << (index % cell_bit_size_)); + } + + /** @brief sets a specific bit to 0, and more bits too + * This function is useful when resetting a given set of bits so that the + * whole bitset ends up being 0: if that's the case, we don't care about setting + * other bits to 0 + * @param index + */ + void reset_block(size_t index) + { + bitset_[index / cell_bit_size_] = 0; + } + + /** resize the bitset so that it contains at least sz bits + * @param sz + */ + void resize(size_t sz) + { + size_ = sz; + bitset_.resize(sz / cell_bit_size_ + 1); + } + + /** set a bit to true + * @param index the index of the bit to set to 1 + */ + void set(size_t index) + { + bitset_[index / cell_bit_size_] |= size_t(1) << (index % cell_bit_size_); + } + + /** gives the number of contained bits + */ + size_t size() const + { + return size_; + } + + /** check if a bit is set + * @param index the index of the bit to check + * @return true if the bit is set + */ + bool test(size_t index) const + { + return (bitset_[index / cell_bit_size_] & (size_t(1) << (index % cell_bit_size_))) != 0; + } + +private: + std::vector bitset_; + size_t size_; + static const unsigned int cell_bit_size_ = CHAR_BIT * sizeof(size_t); +}; + +} // namespace cvflann + +#endif + +#endif // OPENCV_FLANN_DYNAMIC_BITSET_H_ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/flann.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/flann.hpp new file mode 100644 index 0000000..d053488 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/flann.hpp @@ -0,0 +1,427 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef _OPENCV_FLANN_HPP_ +#define _OPENCV_FLANN_HPP_ + +#ifdef __cplusplus + +#include "opencv2/core/types_c.h" +#include "opencv2/core/core.hpp" +#include "opencv2/flann/flann_base.hpp" +#include "opencv2/flann/miniflann.hpp" + +namespace cvflann +{ + CV_EXPORTS flann_distance_t flann_distance_type(); + FLANN_DEPRECATED CV_EXPORTS void set_distance_type(flann_distance_t distance_type, int order); +} + + +namespace cv +{ +namespace flann +{ + +template struct CvType {}; +template <> struct CvType { static int type() { return CV_8U; } }; +template <> struct CvType { static int type() { return CV_8S; } }; +template <> struct CvType { static int type() { return CV_16U; } }; +template <> struct CvType { static int type() { return CV_16S; } }; +template <> struct CvType { static int type() { return CV_32S; } }; +template <> struct CvType { static int type() { return CV_32F; } }; +template <> struct CvType { static int type() { return CV_64F; } }; + + +// bring the flann parameters into this namespace +using ::cvflann::get_param; +using ::cvflann::print_params; + +// bring the flann distances into this namespace +using ::cvflann::L2_Simple; +using ::cvflann::L2; +using ::cvflann::L1; +using ::cvflann::MinkowskiDistance; +using ::cvflann::MaxDistance; +using ::cvflann::HammingLUT; +using ::cvflann::Hamming; +using ::cvflann::Hamming2; +using ::cvflann::HistIntersectionDistance; +using ::cvflann::HellingerDistance; +using ::cvflann::ChiSquareDistance; +using ::cvflann::KL_Divergence; + + + +template +class GenericIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + GenericIndex(const Mat& features, const ::cvflann::IndexParams& params, Distance distance = Distance()); + + ~GenericIndex(); + + void knnSearch(const vector& query, vector& indices, + vector& dists, int knn, const ::cvflann::SearchParams& params); + void knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const ::cvflann::SearchParams& params); + + int radiusSearch(const vector& query, vector& indices, + vector& dists, DistanceType radius, const ::cvflann::SearchParams& params); + int radiusSearch(const Mat& query, Mat& indices, Mat& dists, + DistanceType radius, const ::cvflann::SearchParams& params); + + void save(std::string filename) { nnIndex->save(filename); } + + int veclen() const { return nnIndex->veclen(); } + + int size() const { return nnIndex->size(); } + + ::cvflann::IndexParams getParameters() { return nnIndex->getParameters(); } + + FLANN_DEPRECATED const ::cvflann::IndexParams* getIndexParameters() { return nnIndex->getIndexParameters(); } + +private: + ::cvflann::Index* nnIndex; +}; + + +#define FLANN_DISTANCE_CHECK \ + if ( ::cvflann::flann_distance_type() != cvflann::FLANN_DIST_L2) { \ + printf("[WARNING] You are using cv::flann::Index (or cv::flann::GenericIndex) and have also changed "\ + "the distance using cvflann::set_distance_type. This is no longer working as expected "\ + "(cv::flann::Index always uses L2). You should create the index templated on the distance, "\ + "for example for L1 distance use: GenericIndex< L1 > \n"); \ + } + + +template +GenericIndex::GenericIndex(const Mat& dataset, const ::cvflann::IndexParams& params, Distance distance) +{ + CV_Assert(dataset.type() == CvType::type()); + CV_Assert(dataset.isContinuous()); + ::cvflann::Matrix m_dataset((ElementType*)dataset.ptr(0), dataset.rows, dataset.cols); + + nnIndex = new ::cvflann::Index(m_dataset, params, distance); + + FLANN_DISTANCE_CHECK + + nnIndex->buildIndex(); +} + +template +GenericIndex::~GenericIndex() +{ + delete nnIndex; +} + +template +void GenericIndex::knnSearch(const vector& query, vector& indices, vector& dists, int knn, const ::cvflann::SearchParams& searchParams) +{ + ::cvflann::Matrix m_query((ElementType*)&query[0], 1, query.size()); + ::cvflann::Matrix m_indices(&indices[0], 1, indices.size()); + ::cvflann::Matrix m_dists(&dists[0], 1, dists.size()); + + FLANN_DISTANCE_CHECK + + nnIndex->knnSearch(m_query,m_indices,m_dists,knn,searchParams); +} + + +template +void GenericIndex::knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const ::cvflann::SearchParams& searchParams) +{ + CV_Assert(queries.type() == CvType::type()); + CV_Assert(queries.isContinuous()); + ::cvflann::Matrix m_queries((ElementType*)queries.ptr(0), queries.rows, queries.cols); + + CV_Assert(indices.type() == CV_32S); + CV_Assert(indices.isContinuous()); + ::cvflann::Matrix m_indices((int*)indices.ptr(0), indices.rows, indices.cols); + + CV_Assert(dists.type() == CvType::type()); + CV_Assert(dists.isContinuous()); + ::cvflann::Matrix m_dists((DistanceType*)dists.ptr(0), dists.rows, dists.cols); + + FLANN_DISTANCE_CHECK + + nnIndex->knnSearch(m_queries,m_indices,m_dists,knn, searchParams); +} + +template +int GenericIndex::radiusSearch(const vector& query, vector& indices, vector& dists, DistanceType radius, const ::cvflann::SearchParams& searchParams) +{ + ::cvflann::Matrix m_query((ElementType*)&query[0], 1, query.size()); + ::cvflann::Matrix m_indices(&indices[0], 1, indices.size()); + ::cvflann::Matrix m_dists(&dists[0], 1, dists.size()); + + FLANN_DISTANCE_CHECK + + return nnIndex->radiusSearch(m_query,m_indices,m_dists,radius,searchParams); +} + +template +int GenericIndex::radiusSearch(const Mat& query, Mat& indices, Mat& dists, DistanceType radius, const ::cvflann::SearchParams& searchParams) +{ + CV_Assert(query.type() == CvType::type()); + CV_Assert(query.isContinuous()); + ::cvflann::Matrix m_query((ElementType*)query.ptr(0), query.rows, query.cols); + + CV_Assert(indices.type() == CV_32S); + CV_Assert(indices.isContinuous()); + ::cvflann::Matrix m_indices((int*)indices.ptr(0), indices.rows, indices.cols); + + CV_Assert(dists.type() == CvType::type()); + CV_Assert(dists.isContinuous()); + ::cvflann::Matrix m_dists((DistanceType*)dists.ptr(0), dists.rows, dists.cols); + + FLANN_DISTANCE_CHECK + + return nnIndex->radiusSearch(m_query,m_indices,m_dists,radius,searchParams); +} + +/** + * @deprecated Use GenericIndex class instead + */ +template +class +#ifndef _MSC_VER + FLANN_DEPRECATED +#endif + Index_ { +public: + typedef typename L2::ElementType ElementType; + typedef typename L2::ResultType DistanceType; + + Index_(const Mat& features, const ::cvflann::IndexParams& params); + + ~Index_(); + + void knnSearch(const vector& query, vector& indices, vector& dists, int knn, const ::cvflann::SearchParams& params); + void knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const ::cvflann::SearchParams& params); + + int radiusSearch(const vector& query, vector& indices, vector& dists, DistanceType radius, const ::cvflann::SearchParams& params); + int radiusSearch(const Mat& query, Mat& indices, Mat& dists, DistanceType radius, const ::cvflann::SearchParams& params); + + void save(std::string filename) + { + if (nnIndex_L1) nnIndex_L1->save(filename); + if (nnIndex_L2) nnIndex_L2->save(filename); + } + + int veclen() const + { + if (nnIndex_L1) return nnIndex_L1->veclen(); + if (nnIndex_L2) return nnIndex_L2->veclen(); + } + + int size() const + { + if (nnIndex_L1) return nnIndex_L1->size(); + if (nnIndex_L2) return nnIndex_L2->size(); + } + + ::cvflann::IndexParams getParameters() + { + if (nnIndex_L1) return nnIndex_L1->getParameters(); + if (nnIndex_L2) return nnIndex_L2->getParameters(); + + } + + FLANN_DEPRECATED const ::cvflann::IndexParams* getIndexParameters() + { + if (nnIndex_L1) return nnIndex_L1->getIndexParameters(); + if (nnIndex_L2) return nnIndex_L2->getIndexParameters(); + } + +private: + // providing backwards compatibility for L2 and L1 distances (most common) + ::cvflann::Index< L2 >* nnIndex_L2; + ::cvflann::Index< L1 >* nnIndex_L1; +}; + +#ifdef _MSC_VER +template +class FLANN_DEPRECATED Index_; +#endif + +template +Index_::Index_(const Mat& dataset, const ::cvflann::IndexParams& params) +{ + printf("[WARNING] The cv::flann::Index_ class is deperecated, use cv::flann::GenericIndex instead\n"); + + CV_Assert(dataset.type() == CvType::type()); + CV_Assert(dataset.isContinuous()); + ::cvflann::Matrix m_dataset((ElementType*)dataset.ptr(0), dataset.rows, dataset.cols); + + if ( ::cvflann::flann_distance_type() == cvflann::FLANN_DIST_L2 ) { + nnIndex_L1 = NULL; + nnIndex_L2 = new ::cvflann::Index< L2 >(m_dataset, params); + } + else if ( ::cvflann::flann_distance_type() == cvflann::FLANN_DIST_L1 ) { + nnIndex_L1 = new ::cvflann::Index< L1 >(m_dataset, params); + nnIndex_L2 = NULL; + } + else { + printf("[ERROR] cv::flann::Index_ only provides backwards compatibility for the L1 and L2 distances. " + "For other distance types you must use cv::flann::GenericIndex\n"); + CV_Assert(0); + } + if (nnIndex_L1) nnIndex_L1->buildIndex(); + if (nnIndex_L2) nnIndex_L2->buildIndex(); +} + +template +Index_::~Index_() +{ + if (nnIndex_L1) delete nnIndex_L1; + if (nnIndex_L2) delete nnIndex_L2; +} + +template +void Index_::knnSearch(const vector& query, vector& indices, vector& dists, int knn, const ::cvflann::SearchParams& searchParams) +{ + ::cvflann::Matrix m_query((ElementType*)&query[0], 1, query.size()); + ::cvflann::Matrix m_indices(&indices[0], 1, indices.size()); + ::cvflann::Matrix m_dists(&dists[0], 1, dists.size()); + + if (nnIndex_L1) nnIndex_L1->knnSearch(m_query,m_indices,m_dists,knn,searchParams); + if (nnIndex_L2) nnIndex_L2->knnSearch(m_query,m_indices,m_dists,knn,searchParams); +} + + +template +void Index_::knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const ::cvflann::SearchParams& searchParams) +{ + CV_Assert(queries.type() == CvType::type()); + CV_Assert(queries.isContinuous()); + ::cvflann::Matrix m_queries((ElementType*)queries.ptr(0), queries.rows, queries.cols); + + CV_Assert(indices.type() == CV_32S); + CV_Assert(indices.isContinuous()); + ::cvflann::Matrix m_indices((int*)indices.ptr(0), indices.rows, indices.cols); + + CV_Assert(dists.type() == CvType::type()); + CV_Assert(dists.isContinuous()); + ::cvflann::Matrix m_dists((DistanceType*)dists.ptr(0), dists.rows, dists.cols); + + if (nnIndex_L1) nnIndex_L1->knnSearch(m_queries,m_indices,m_dists,knn, searchParams); + if (nnIndex_L2) nnIndex_L2->knnSearch(m_queries,m_indices,m_dists,knn, searchParams); +} + +template +int Index_::radiusSearch(const vector& query, vector& indices, vector& dists, DistanceType radius, const ::cvflann::SearchParams& searchParams) +{ + ::cvflann::Matrix m_query((ElementType*)&query[0], 1, query.size()); + ::cvflann::Matrix m_indices(&indices[0], 1, indices.size()); + ::cvflann::Matrix m_dists(&dists[0], 1, dists.size()); + + if (nnIndex_L1) return nnIndex_L1->radiusSearch(m_query,m_indices,m_dists,radius,searchParams); + if (nnIndex_L2) return nnIndex_L2->radiusSearch(m_query,m_indices,m_dists,radius,searchParams); +} + +template +int Index_::radiusSearch(const Mat& query, Mat& indices, Mat& dists, DistanceType radius, const ::cvflann::SearchParams& searchParams) +{ + CV_Assert(query.type() == CvType::type()); + CV_Assert(query.isContinuous()); + ::cvflann::Matrix m_query((ElementType*)query.ptr(0), query.rows, query.cols); + + CV_Assert(indices.type() == CV_32S); + CV_Assert(indices.isContinuous()); + ::cvflann::Matrix m_indices((int*)indices.ptr(0), indices.rows, indices.cols); + + CV_Assert(dists.type() == CvType::type()); + CV_Assert(dists.isContinuous()); + ::cvflann::Matrix m_dists((DistanceType*)dists.ptr(0), dists.rows, dists.cols); + + if (nnIndex_L1) return nnIndex_L1->radiusSearch(m_query,m_indices,m_dists,radius,searchParams); + if (nnIndex_L2) return nnIndex_L2->radiusSearch(m_query,m_indices,m_dists,radius,searchParams); +} + + +template +int hierarchicalClustering(const Mat& features, Mat& centers, const ::cvflann::KMeansIndexParams& params, + Distance d = Distance()) +{ + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + CV_Assert(features.type() == CvType::type()); + CV_Assert(features.isContinuous()); + ::cvflann::Matrix m_features((ElementType*)features.ptr(0), features.rows, features.cols); + + CV_Assert(centers.type() == CvType::type()); + CV_Assert(centers.isContinuous()); + ::cvflann::Matrix m_centers((DistanceType*)centers.ptr(0), centers.rows, centers.cols); + + return ::cvflann::hierarchicalClustering(m_features, m_centers, params, d); +} + + +template +FLANN_DEPRECATED int hierarchicalClustering(const Mat& features, Mat& centers, const ::cvflann::KMeansIndexParams& params) +{ + printf("[WARNING] cv::flann::hierarchicalClustering is deprecated, use " + "cv::flann::hierarchicalClustering instead\n"); + + if ( ::cvflann::flann_distance_type() == cvflann::FLANN_DIST_L2 ) { + return hierarchicalClustering< L2 >(features, centers, params); + } + else if ( ::cvflann::flann_distance_type() == cvflann::FLANN_DIST_L1 ) { + return hierarchicalClustering< L1 >(features, centers, params); + } + else { + printf("[ERROR] cv::flann::hierarchicalClustering only provides backwards " + "compatibility for the L1 and L2 distances. " + "For other distance types you must use cv::flann::hierarchicalClustering\n"); + CV_Assert(0); + } +} + +} } // namespace cv::flann + +#endif // __cplusplus + +#endif diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/flann_base.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/flann_base.hpp new file mode 100644 index 0000000..bb5b120 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/flann_base.hpp @@ -0,0 +1,301 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_BASE_HPP_ +#define OPENCV_FLANN_BASE_HPP_ + +#include +#include +#include +#include + +#include "general.h" +#include "matrix.h" +#include "params.h" +#include "saving.h" + +#include "all_indices.h" + +namespace cvflann +{ + +/** + * Sets the log level used for all flann functions + * @param level Verbosity level + */ +inline void log_verbosity(int level) +{ + if (level >= 0) { + Logger::setLevel(level); + } +} + +/** + * (Deprecated) Index parameters for creating a saved index. + */ +struct SavedIndexParams : public IndexParams +{ + SavedIndexParams(std::string filename) + { + (* this)["algorithm"] = FLANN_INDEX_SAVED; + (*this)["filename"] = filename; + } +}; + + +template +NNIndex* load_saved_index(const Matrix& dataset, const std::string& filename, Distance distance) +{ + typedef typename Distance::ElementType ElementType; + + FILE* fin = fopen(filename.c_str(), "rb"); + if (fin == NULL) { + return NULL; + } + IndexHeader header = load_header(fin); + if (header.data_type != Datatype::type()) { + throw FLANNException("Datatype of saved index is different than of the one to be created."); + } + if ((size_t(header.rows) != dataset.rows)||(size_t(header.cols) != dataset.cols)) { + throw FLANNException("The index saved belongs to a different dataset"); + } + + IndexParams params; + params["algorithm"] = header.index_type; + NNIndex* nnIndex = create_index_by_type(dataset, params, distance); + nnIndex->loadIndex(fin); + fclose(fin); + + return nnIndex; +} + + +template +class Index : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + Index(const Matrix& features, const IndexParams& params, Distance distance = Distance() ) + : index_params_(params) + { + flann_algorithm_t index_type = get_param(params,"algorithm"); + loaded_ = false; + + if (index_type == FLANN_INDEX_SAVED) { + nnIndex_ = load_saved_index(features, get_param(params,"filename"), distance); + loaded_ = true; + } + else { + nnIndex_ = create_index_by_type(features, params, distance); + } + } + + ~Index() + { + delete nnIndex_; + } + + /** + * implementation for algorithms of addable indexes after that. + */ + void addIndex(const Matrix& wholeData, const Matrix& additionalData) + { + if (!loaded_) { + nnIndex_->addIndex(wholeData, additionalData); + } + } + + /** + * Builds the index. + */ + void buildIndex() + { + if (!loaded_) { + nnIndex_->buildIndex(); + } + } + + void save(std::string filename) + { + FILE* fout = fopen(filename.c_str(), "wb"); + if (fout == NULL) { + throw FLANNException("Cannot open file"); + } + save_header(fout, *nnIndex_); + saveIndex(fout); + fclose(fout); + } + + /** + * \brief Saves the index to a stream + * \param stream The stream to save the index to + */ + virtual void saveIndex(FILE* stream) + { + nnIndex_->saveIndex(stream); + } + + /** + * \brief Loads the index from a stream + * \param stream The stream from which the index is loaded + */ + virtual void loadIndex(FILE* stream) + { + nnIndex_->loadIndex(stream); + } + + /** + * \returns number of features in this index. + */ + size_t veclen() const + { + return nnIndex_->veclen(); + } + + /** + * \returns The dimensionality of the features in this index. + */ + size_t size() const + { + return nnIndex_->size(); + } + + /** + * \returns The index type (kdtree, kmeans,...) + */ + flann_algorithm_t getType() const + { + return nnIndex_->getType(); + } + + /** + * \returns The amount of memory (in bytes) used by the index. + */ + virtual int usedMemory() const + { + return nnIndex_->usedMemory(); + } + + + /** + * \returns The index parameters + */ + IndexParams getParameters() const + { + return nnIndex_->getParameters(); + } + + /** + * \brief Perform k-nearest neighbor search + * \param[in] queries The query points for which to find the nearest neighbors + * \param[out] indices The indices of the nearest neighbors found + * \param[out] dists Distances to the nearest neighbors found + * \param[in] knn Number of nearest neighbors to return + * \param[in] params Search parameters + */ + void knnSearch(const Matrix& queries, Matrix& indices, Matrix& dists, int knn, const SearchParams& params) + { + nnIndex_->knnSearch(queries, indices, dists, knn, params); + } + + /** + * \brief Perform radius search + * \param[in] query The query point + * \param[out] indices The indinces of the neighbors found within the given radius + * \param[out] dists The distances to the nearest neighbors found + * \param[in] radius The radius used for search + * \param[in] params Search parameters + * \returns Number of neighbors found + */ + int radiusSearch(const Matrix& query, Matrix& indices, Matrix& dists, float radius, const SearchParams& params) + { + return nnIndex_->radiusSearch(query, indices, dists, radius, params); + } + + /** + * \brief Method that searches for nearest-neighbours + */ + void findNeighbors(ResultSet& result, const ElementType* vec, const SearchParams& searchParams) + { + nnIndex_->findNeighbors(result, vec, searchParams); + } + + /** + * \brief Returns actual index + */ + FLANN_DEPRECATED NNIndex* getIndex() + { + return nnIndex_; + } + + /** + * \brief Returns index parameters. + * \deprecated use getParameters() instead. + */ + FLANN_DEPRECATED const IndexParams* getIndexParameters() + { + return &index_params_; + } + +private: + /** Pointer to actual index class */ + NNIndex* nnIndex_; + /** Indices if the index was loaded from a file */ + bool loaded_; + /** Parameters passed to the index */ + IndexParams index_params_; +}; + +/** + * Performs a hierarchical clustering of the points passed as argument and then takes a cut in the + * the clustering tree to return a flat clustering. + * @param[in] points Points to be clustered + * @param centers The computed cluster centres. Matrix should be preallocated and centers.rows is the + * number of clusters requested. + * @param params Clustering parameters (The same as for cvflann::KMeansIndex) + * @param d Distance to be used for clustering (eg: cvflann::L2) + * @return number of clusters computed (can be different than clusters.rows and is the highest number + * of the form (branching-1)*K+1 smaller than clusters.rows). + */ +template +int hierarchicalClustering(const Matrix& points, Matrix& centers, + const KMeansIndexParams& params, Distance d = Distance()) +{ + KMeansIndex kmeans(points, params, d); + kmeans.buildIndex(); + + int clusterNum = kmeans.getClusterCenters(centers); + return clusterNum; +} + +} +#endif /* OPENCV_FLANN_BASE_HPP_ */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/general.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/general.h new file mode 100644 index 0000000..87e7e2f --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/general.h @@ -0,0 +1,52 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_GENERAL_H_ +#define OPENCV_FLANN_GENERAL_H_ + +#include "defines.h" +#include +#include + +namespace cvflann +{ + +class FLANNException : public std::runtime_error +{ +public: + FLANNException(const char* message) : std::runtime_error(message) { } + + FLANNException(const std::string& message) : std::runtime_error(message) { } +}; + +} + + +#endif /* OPENCV_FLANN_GENERAL_H_ */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/ground_truth.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/ground_truth.h new file mode 100644 index 0000000..fd8f3ae --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/ground_truth.h @@ -0,0 +1,94 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_GROUND_TRUTH_H_ +#define OPENCV_FLANN_GROUND_TRUTH_H_ + +#include "dist.h" +#include "matrix.h" + + +namespace cvflann +{ + +template +void find_nearest(const Matrix& dataset, typename Distance::ElementType* query, int* matches, int nn, + int skip = 0, Distance distance = Distance()) +{ + typedef typename Distance::ResultType DistanceType; + int n = nn + skip; + + std::vector match(n); + std::vector dists(n); + + dists[0] = distance(dataset[0], query, dataset.cols); + match[0] = 0; + int dcnt = 1; + + for (size_t i=1; i=1 && dists[j] +void compute_ground_truth(const Matrix& dataset, const Matrix& testset, Matrix& matches, + int skip=0, Distance d = Distance()) +{ + for (size_t i=0; i(dataset, testset[i], matches[i], (int)matches.cols, skip, d); + } +} + + +} + +#endif //OPENCV_FLANN_GROUND_TRUTH_H_ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/hdf5.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/hdf5.h new file mode 100644 index 0000000..ef3e999 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/hdf5.h @@ -0,0 +1,231 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + + +#ifndef OPENCV_FLANN_HDF5_H_ +#define OPENCV_FLANN_HDF5_H_ + +#include + +#include "matrix.h" + + +namespace cvflann +{ + +namespace +{ + +template +hid_t get_hdf5_type() +{ + throw FLANNException("Unsupported type for IO operations"); +} + +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_CHAR; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_UCHAR; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_SHORT; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_USHORT; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_INT; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_UINT; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_LONG; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_ULONG; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_FLOAT; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_DOUBLE; } +} + + +#define CHECK_ERROR(x,y) if ((x)<0) throw FLANNException((y)); + +template +void save_to_file(const cvflann::Matrix& dataset, const std::string& filename, const std::string& name) +{ + +#if H5Eset_auto_vers == 2 + H5Eset_auto( H5E_DEFAULT, NULL, NULL ); +#else + H5Eset_auto( NULL, NULL ); +#endif + + herr_t status; + hid_t file_id; + file_id = H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT); + if (file_id < 0) { + file_id = H5Fcreate(filename.c_str(), H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT); + } + CHECK_ERROR(file_id,"Error creating hdf5 file."); + + hsize_t dimsf[2]; // dataset dimensions + dimsf[0] = dataset.rows; + dimsf[1] = dataset.cols; + + hid_t space_id = H5Screate_simple(2, dimsf, NULL); + hid_t memspace_id = H5Screate_simple(2, dimsf, NULL); + + hid_t dataset_id; +#if H5Dcreate_vers == 2 + dataset_id = H5Dcreate2(file_id, name.c_str(), get_hdf5_type(), space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); +#else + dataset_id = H5Dcreate(file_id, name.c_str(), get_hdf5_type(), space_id, H5P_DEFAULT); +#endif + + if (dataset_id<0) { +#if H5Dopen_vers == 2 + dataset_id = H5Dopen2(file_id, name.c_str(), H5P_DEFAULT); +#else + dataset_id = H5Dopen(file_id, name.c_str()); +#endif + } + CHECK_ERROR(dataset_id,"Error creating or opening dataset in file."); + + status = H5Dwrite(dataset_id, get_hdf5_type(), memspace_id, space_id, H5P_DEFAULT, dataset.data ); + CHECK_ERROR(status, "Error writing to dataset"); + + H5Sclose(memspace_id); + H5Sclose(space_id); + H5Dclose(dataset_id); + H5Fclose(file_id); + +} + + +template +void load_from_file(cvflann::Matrix& dataset, const std::string& filename, const std::string& name) +{ + herr_t status; + hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT); + CHECK_ERROR(file_id,"Error opening hdf5 file."); + + hid_t dataset_id; +#if H5Dopen_vers == 2 + dataset_id = H5Dopen2(file_id, name.c_str(), H5P_DEFAULT); +#else + dataset_id = H5Dopen(file_id, name.c_str()); +#endif + CHECK_ERROR(dataset_id,"Error opening dataset in file."); + + hid_t space_id = H5Dget_space(dataset_id); + + hsize_t dims_out[2]; + H5Sget_simple_extent_dims(space_id, dims_out, NULL); + + dataset = cvflann::Matrix(new T[dims_out[0]*dims_out[1]], dims_out[0], dims_out[1]); + + status = H5Dread(dataset_id, get_hdf5_type(), H5S_ALL, H5S_ALL, H5P_DEFAULT, dataset[0]); + CHECK_ERROR(status, "Error reading dataset"); + + H5Sclose(space_id); + H5Dclose(dataset_id); + H5Fclose(file_id); +} + + +#ifdef HAVE_MPI + +namespace mpi +{ +/** + * Loads a the hyperslice corresponding to this processor from a hdf5 file. + * @param flann_dataset Dataset where the data is loaded + * @param filename HDF5 file name + * @param name Name of dataset inside file + */ +template +void load_from_file(cvflann::Matrix& dataset, const std::string& filename, const std::string& name) +{ + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + + int mpi_size, mpi_rank; + MPI_Comm_size(comm, &mpi_size); + MPI_Comm_rank(comm, &mpi_rank); + + herr_t status; + + hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); + H5Pset_fapl_mpio(plist_id, comm, info); + hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDWR, plist_id); + CHECK_ERROR(file_id,"Error opening hdf5 file."); + H5Pclose(plist_id); + hid_t dataset_id; +#if H5Dopen_vers == 2 + dataset_id = H5Dopen2(file_id, name.c_str(), H5P_DEFAULT); +#else + dataset_id = H5Dopen(file_id, name.c_str()); +#endif + CHECK_ERROR(dataset_id,"Error opening dataset in file."); + + hid_t space_id = H5Dget_space(dataset_id); + hsize_t dims[2]; + H5Sget_simple_extent_dims(space_id, dims, NULL); + + hsize_t count[2]; + hsize_t offset[2]; + + hsize_t item_cnt = dims[0]/mpi_size+(dims[0]%mpi_size==0 ? 0 : 1); + hsize_t cnt = (mpi_rank(), memspace_id, space_id, plist_id, dataset.data); + CHECK_ERROR(status, "Error reading dataset"); + + H5Pclose(plist_id); + H5Sclose(space_id); + H5Sclose(memspace_id); + H5Dclose(dataset_id); + H5Fclose(file_id); +} +} +#endif // HAVE_MPI +} // namespace cvflann::mpi + +#endif /* OPENCV_FLANN_HDF5_H_ */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/heap.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/heap.h new file mode 100644 index 0000000..92a6ea6 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/heap.h @@ -0,0 +1,165 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_HEAP_H_ +#define OPENCV_FLANN_HEAP_H_ + +#include +#include + +namespace cvflann +{ + +/** + * Priority Queue Implementation + * + * The priority queue is implemented with a heap. A heap is a complete + * (full) binary tree in which each parent is less than both of its + * children, but the order of the children is unspecified. + */ +template +class Heap +{ + + /** + * Storage array for the heap. + * Type T must be comparable. + */ + std::vector heap; + int length; + + /** + * Number of element in the heap + */ + int count; + + + +public: + /** + * Constructor. + * + * Params: + * sz = heap size + */ + + Heap(int sz) + { + length = sz; + heap.reserve(length); + count = 0; + } + + /** + * + * Returns: heap size + */ + int size() + { + return count; + } + + /** + * Tests if the heap is empty + * + * Returns: true is heap empty, false otherwise + */ + bool empty() + { + return size()==0; + } + + /** + * Clears the heap. + */ + void clear() + { + heap.clear(); + count = 0; + } + + struct CompareT + { + bool operator()(const T& t_1, const T& t_2) const + { + return t_2 < t_1; + } + }; + + /** + * Insert a new element in the heap. + * + * We select the next empty leaf node, and then keep moving any larger + * parents down until the right location is found to store this element. + * + * Params: + * value = the new element to be inserted in the heap + */ + void insert(T value) + { + /* If heap is full, then return without adding this element. */ + if (count == length) { + return; + } + + heap.push_back(value); + static CompareT compareT; + std::push_heap(heap.begin(), heap.end(), compareT); + ++count; + } + + + + /** + * Returns the node of minimum value from the heap (top of the heap). + * + * Params: + * value = out parameter used to return the min element + * Returns: false if heap empty + */ + bool popMin(T& value) + { + if (count == 0) { + return false; + } + + value = heap[0]; + static CompareT compareT; + std::pop_heap(heap.begin(), heap.end(), compareT); + heap.pop_back(); + --count; + + return true; /* Return old last node. */ + } +}; + +} + +#endif //OPENCV_FLANN_HEAP_H_ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/hierarchical_clustering_index.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/hierarchical_clustering_index.h new file mode 100644 index 0000000..59423ae --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/hierarchical_clustering_index.h @@ -0,0 +1,776 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2011 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2011 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_HIERARCHICAL_CLUSTERING_INDEX_H_ +#define OPENCV_FLANN_HIERARCHICAL_CLUSTERING_INDEX_H_ + +#include +#include +#include +#include +#include +#include + +#include "general.h" +#include "nn_index.h" +#include "dist.h" +#include "matrix.h" +#include "result_set.h" +#include "heap.h" +#include "allocator.h" +#include "random.h" +#include "saving.h" + + +namespace cvflann +{ + +struct HierarchicalClusteringIndexParams : public IndexParams +{ + HierarchicalClusteringIndexParams(int branching = 32, + flann_centers_init_t centers_init = FLANN_CENTERS_RANDOM, + int trees = 4, int leaf_size = 100) + { + (*this)["algorithm"] = FLANN_INDEX_HIERARCHICAL; + // The branching factor used in the hierarchical clustering + (*this)["branching"] = branching; + // Algorithm used for picking the initial cluster centers + (*this)["centers_init"] = centers_init; + // number of parallel trees to build + (*this)["trees"] = trees; + // maximum leaf size + (*this)["leaf_size"] = leaf_size; + } +}; + + +/** + * Hierarchical index + * + * Contains a tree constructed through a hierarchical clustering + * and other information for indexing a set of points for nearest-neighbour matching. + */ +template +class HierarchicalClusteringIndex : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + +private: + + + typedef void (HierarchicalClusteringIndex::* centersAlgFunction)(int, int*, int, int*, int&); + + /** + * The function used for choosing the cluster centers. + */ + centersAlgFunction chooseCenters; + + + + /** + * Chooses the initial centers in the k-means clustering in a random manner. + * + * Params: + * k = number of centers + * vecs = the dataset of points + * indices = indices in the dataset + * indices_length = length of indices vector + * + */ + void chooseCentersRandom(int k, int* dsindices, int indices_length, int* centers, int& centers_length) + { + UniqueRandom r(indices_length); + + int index; + for (index=0; index=0 && rnd < n); + + centers[0] = dsindices[rnd]; + + int index; + for (index=1; indexbest_val) { + best_val = dist; + best_index = j; + } + } + if (best_index!=-1) { + centers[index] = dsindices[best_index]; + } + else { + break; + } + } + centers_length = index; + } + + + /** + * Chooses the initial centers in the k-means using the algorithm + * proposed in the KMeans++ paper: + * Arthur, David; Vassilvitskii, Sergei - k-means++: The Advantages of Careful Seeding + * + * Implementation of this function was converted from the one provided in Arthur's code. + * + * Params: + * k = number of centers + * vecs = the dataset of points + * indices = indices in the dataset + * Returns: + */ + void chooseCentersKMeanspp(int k, int* dsindices, int indices_length, int* centers, int& centers_length) + { + int n = indices_length; + + double currentPot = 0; + DistanceType* closestDistSq = new DistanceType[n]; + + // Choose one random center and set the closestDistSq values + int index = rand_int(n); + assert(index >=0 && index < n); + centers[0] = dsindices[index]; + + // Computing distance^2 will have the advantage of even higher probability further to pick new centers + // far from previous centers (and this complies to "k-means++: the advantages of careful seeding" article) + for (int i = 0; i < n; i++) { + closestDistSq[i] = distance(dataset[dsindices[i]], dataset[dsindices[index]], dataset.cols); + closestDistSq[i] = ensureSquareDistance( closestDistSq[i] ); + currentPot += closestDistSq[i]; + } + + + const int numLocalTries = 1; + + // Choose each center + int centerCount; + for (centerCount = 1; centerCount < k; centerCount++) { + + // Repeat several trials + double bestNewPot = -1; + int bestNewIndex = 0; + for (int localTrial = 0; localTrial < numLocalTries; localTrial++) { + + // Choose our center - have to be slightly careful to return a valid answer even accounting + // for possible rounding errors + double randVal = rand_double(currentPot); + for (index = 0; index < n-1; index++) { + if (randVal <= closestDistSq[index]) break; + else randVal -= closestDistSq[index]; + } + + // Compute the new potential + double newPot = 0; + for (int i = 0; i < n; i++) { + DistanceType dist = distance(dataset[dsindices[i]], dataset[dsindices[index]], dataset.cols); + newPot += std::min( ensureSquareDistance(dist), closestDistSq[i] ); + } + + // Store the best result + if ((bestNewPot < 0)||(newPot < bestNewPot)) { + bestNewPot = newPot; + bestNewIndex = index; + } + } + + // Add the appropriate center + centers[centerCount] = dsindices[bestNewIndex]; + currentPot = bestNewPot; + for (int i = 0; i < n; i++) { + DistanceType dist = distance(dataset[dsindices[i]], dataset[dsindices[bestNewIndex]], dataset.cols); + closestDistSq[i] = std::min( ensureSquareDistance(dist), closestDistSq[i] ); + } + } + + centers_length = centerCount; + + delete[] closestDistSq; + } + + +public: + + + /** + * Index constructor + * + * Params: + * inputData = dataset with the input features + * params = parameters passed to the hierarchical k-means algorithm + */ + HierarchicalClusteringIndex(const Matrix& inputData, const IndexParams& index_params = HierarchicalClusteringIndexParams(), + Distance d = Distance()) + : dataset(inputData), params(index_params), root(NULL), indices(NULL), distance(d) + { + memoryCounter = 0; + + size_ = dataset.rows; + veclen_ = dataset.cols; + + branching_ = get_param(params,"branching",32); + centers_init_ = get_param(params,"centers_init", FLANN_CENTERS_RANDOM); + trees_ = get_param(params,"trees",4); + leaf_size_ = get_param(params,"leaf_size",100); + + if (centers_init_==FLANN_CENTERS_RANDOM) { + chooseCenters = &HierarchicalClusteringIndex::chooseCentersRandom; + } + else if (centers_init_==FLANN_CENTERS_GONZALES) { + chooseCenters = &HierarchicalClusteringIndex::chooseCentersGonzales; + } + else if (centers_init_==FLANN_CENTERS_KMEANSPP) { + chooseCenters = &HierarchicalClusteringIndex::chooseCentersKMeanspp; + } + else { + throw FLANNException("Unknown algorithm for choosing initial centers."); + } + + trees_ = get_param(params,"trees",4); + root = new NodePtr[trees_]; + indices = new int*[trees_]; + + for (int i=0; i& /*wholeData*/, const Matrix& /*additionalData*/) + { + } + + /** + * Builds the index + */ + void buildIndex() + { + if (branching_<2) { + throw FLANNException("Branching factor must be at least 2"); + } + + free_elements(); + + for (int i=0; i(); + computeClustering(root[i], indices[i], (int)size_, branching_,0); + } + } + + + flann_algorithm_t getType() const + { + return FLANN_INDEX_HIERARCHICAL; + } + + + void saveIndex(FILE* stream) + { + save_value(stream, branching_); + save_value(stream, trees_); + save_value(stream, centers_init_); + save_value(stream, leaf_size_); + save_value(stream, memoryCounter); + for (int i=0; i& result, const ElementType* vec, const SearchParams& searchParams) + { + + int maxChecks = get_param(searchParams,"checks",32); + + // Priority queue storing intermediate branches in the best-bin-first search + Heap* heap = new Heap((int)size_); + + std::vector checked(size_,false); + int checks = 0; + for (int i=0; ipopMin(branch) && (checks BranchSt; + + + + void save_tree(FILE* stream, NodePtr node, int num) + { + save_value(stream, *node); + if (node->childs==NULL) { + int indices_offset = (int)(node->indices - indices[num]); + save_value(stream, indices_offset); + } + else { + for(int i=0; ichilds[i], num); + } + } + } + + + void load_tree(FILE* stream, NodePtr& node, int num) + { + node = pool.allocate(); + load_value(stream, *node); + if (node->childs==NULL) { + int indices_offset; + load_value(stream, indices_offset); + node->indices = indices[num] + indices_offset; + } + else { + node->childs = pool.allocate(branching_); + for(int i=0; ichilds[i], num); + } + } + } + + + + + void computeLabels(int* dsindices, int indices_length, int* centers, int centers_length, int* labels, DistanceType& cost) + { + cost = 0; + for (int i=0; inew_dist) { + labels[i] = j; + dist = new_dist; + } + } + cost += dist; + } + } + + /** + * The method responsible with actually doing the recursive hierarchical + * clustering + * + * Params: + * node = the node to cluster + * indices = indices of the points belonging to the current node + * branching = the branching factor to use in the clustering + * + * TODO: for 1-sized clusters don't store a cluster center (it's the same as the single cluster point) + */ + void computeClustering(NodePtr node, int* dsindices, int indices_length, int branching, int level) + { + node->size = indices_length; + node->level = level; + + if (indices_length < leaf_size_) { // leaf node + node->indices = dsindices; + std::sort(node->indices,node->indices+indices_length); + node->childs = NULL; + return; + } + + std::vector centers(branching); + std::vector labels(indices_length); + + int centers_length; + (this->*chooseCenters)(branching, dsindices, indices_length, ¢ers[0], centers_length); + + if (centers_lengthindices = dsindices; + std::sort(node->indices,node->indices+indices_length); + node->childs = NULL; + return; + } + + + // assign points to clusters + DistanceType cost; + computeLabels(dsindices, indices_length, ¢ers[0], centers_length, &labels[0], cost); + + node->childs = pool.allocate(branching); + int start = 0; + int end = start; + for (int i=0; ichilds[i] = pool.allocate(); + node->childs[i]->pivot = centers[i]; + node->childs[i]->indices = NULL; + computeClustering(node->childs[i],dsindices+start, end-start, branching, level+1); + start=end; + } + } + + + + /** + * Performs one descent in the hierarchical k-means tree. The branches not + * visited are stored in a priority queue. + * + * Params: + * node = node to explore + * result = container for the k-nearest neighbors found + * vec = query points + * checks = how many points in the dataset have been checked so far + * maxChecks = maximum dataset points to checks + */ + + + void findNN(NodePtr node, ResultSet& result, const ElementType* vec, int& checks, int maxChecks, + Heap* heap, std::vector& checked) + { + if (node->childs==NULL) { + if (checks>=maxChecks) { + if (result.full()) return; + } + for (int i=0; isize; ++i) { + int index = node->indices[i]; + if (!checked[index]) { + DistanceType dist = distance(dataset[index], vec, veclen_); + result.addPoint(dist, index); + checked[index] = true; + ++checks; + } + } + } + else { + DistanceType* domain_distances = new DistanceType[branching_]; + int best_index = 0; + domain_distances[best_index] = distance(vec, dataset[node->childs[best_index]->pivot], veclen_); + for (int i=1; ichilds[i]->pivot], veclen_); + if (domain_distances[i]insert(BranchSt(node->childs[i],domain_distances[i])); + } + } + delete[] domain_distances; + findNN(node->childs[best_index],result,vec, checks, maxChecks, heap, checked); + } + } + +private: + + + /** + * The dataset used by this index + */ + const Matrix dataset; + + /** + * Parameters used by this index + */ + IndexParams params; + + + /** + * Number of features in the dataset. + */ + size_t size_; + + /** + * Length of each feature. + */ + size_t veclen_; + + /** + * The root node in the tree. + */ + NodePtr* root; + + /** + * Array of indices to vectors in the dataset. + */ + int** indices; + + + /** + * The distance + */ + Distance distance; + + /** + * Pooled memory allocator. + * + * Using a pooled memory allocator is more efficient + * than allocating memory directly when there is a large + * number small of memory allocations. + */ + PooledAllocator pool; + + /** + * Memory occupied by the index. + */ + int memoryCounter; + + /** index parameters */ + int branching_; + int trees_; + flann_centers_init_t centers_init_; + int leaf_size_; + + +}; + +} + +#endif /* OPENCV_FLANN_HIERARCHICAL_CLUSTERING_INDEX_H_ */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/index_testing.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/index_testing.h new file mode 100644 index 0000000..d764004 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/index_testing.h @@ -0,0 +1,318 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_INDEX_TESTING_H_ +#define OPENCV_FLANN_INDEX_TESTING_H_ + +#include +#include +#include + +#include "matrix.h" +#include "nn_index.h" +#include "result_set.h" +#include "logger.h" +#include "timer.h" + + +namespace cvflann +{ + +inline int countCorrectMatches(int* neighbors, int* groundTruth, int n) +{ + int count = 0; + for (int i=0; i +typename Distance::ResultType computeDistanceRaport(const Matrix& inputData, typename Distance::ElementType* target, + int* neighbors, int* groundTruth, int veclen, int n, const Distance& distance) +{ + typedef typename Distance::ResultType DistanceType; + + DistanceType ret = 0; + for (int i=0; i +float search_with_ground_truth(NNIndex& index, const Matrix& inputData, + const Matrix& testData, const Matrix& matches, int nn, int checks, + float& time, typename Distance::ResultType& dist, const Distance& distance, int skipMatches) +{ + typedef typename Distance::ResultType DistanceType; + + if (matches.cols resultSet(nn+skipMatches); + SearchParams searchParams(checks); + + std::vector indices(nn+skipMatches); + std::vector dists(nn+skipMatches); + int* neighbors = &indices[skipMatches]; + + int correct = 0; + DistanceType distR = 0; + StartStopTimer t; + int repeats = 0; + while (t.value<0.2) { + repeats++; + t.start(); + correct = 0; + distR = 0; + for (size_t i = 0; i < testData.rows; i++) { + resultSet.init(&indices[0], &dists[0]); + index.findNeighbors(resultSet, testData[i], searchParams); + + correct += countCorrectMatches(neighbors,matches[i], nn); + distR += computeDistanceRaport(inputData, testData[i], neighbors, matches[i], (int)testData.cols, nn, distance); + } + t.stop(); + } + time = float(t.value/repeats); + + float precicion = (float)correct/(nn*testData.rows); + + dist = distR/(testData.rows*nn); + + Logger::info("%8d %10.4g %10.5g %10.5g %10.5g\n", + checks, precicion, time, 1000.0 * time / testData.rows, dist); + + return precicion; +} + + +template +float test_index_checks(NNIndex& index, const Matrix& inputData, + const Matrix& testData, const Matrix& matches, + int checks, float& precision, const Distance& distance, int nn = 1, int skipMatches = 0) +{ + typedef typename Distance::ResultType DistanceType; + + Logger::info(" Nodes Precision(%) Time(s) Time/vec(ms) Mean dist\n"); + Logger::info("---------------------------------------------------------\n"); + + float time = 0; + DistanceType dist = 0; + precision = search_with_ground_truth(index, inputData, testData, matches, nn, checks, time, dist, distance, skipMatches); + + return time; +} + +template +float test_index_precision(NNIndex& index, const Matrix& inputData, + const Matrix& testData, const Matrix& matches, + float precision, int& checks, const Distance& distance, int nn = 1, int skipMatches = 0) +{ + typedef typename Distance::ResultType DistanceType; + const float SEARCH_EPS = 0.001f; + + Logger::info(" Nodes Precision(%) Time(s) Time/vec(ms) Mean dist\n"); + Logger::info("---------------------------------------------------------\n"); + + int c2 = 1; + float p2; + int c1 = 1; + //float p1; + float time; + DistanceType dist; + + p2 = search_with_ground_truth(index, inputData, testData, matches, nn, c2, time, dist, distance, skipMatches); + + if (p2>precision) { + Logger::info("Got as close as I can\n"); + checks = c2; + return time; + } + + while (p2SEARCH_EPS) { + Logger::info("Start linear estimation\n"); + // after we got to values in the vecinity of the desired precision + // use linear approximation get a better estimation + + cx = (c1+c2)/2; + realPrecision = search_with_ground_truth(index, inputData, testData, matches, nn, cx, time, dist, distance, skipMatches); + while (fabs(realPrecision-precision)>SEARCH_EPS) { + + if (realPrecision +void test_index_precisions(NNIndex& index, const Matrix& inputData, + const Matrix& testData, const Matrix& matches, + float* precisions, int precisions_length, const Distance& distance, int nn = 1, int skipMatches = 0, float maxTime = 0) +{ + typedef typename Distance::ResultType DistanceType; + + const float SEARCH_EPS = 0.001; + + // make sure precisions array is sorted + std::sort(precisions, precisions+precisions_length); + + int pindex = 0; + float precision = precisions[pindex]; + + Logger::info(" Nodes Precision(%) Time(s) Time/vec(ms) Mean dist\n"); + Logger::info("---------------------------------------------------------\n"); + + int c2 = 1; + float p2; + + int c1 = 1; + float p1; + + float time; + DistanceType dist; + + p2 = search_with_ground_truth(index, inputData, testData, matches, nn, c2, time, dist, distance, skipMatches); + + // if precision for 1 run down the tree is already + // better then some of the requested precisions, then + // skip those + while (precisions[pindex] 0)&&(time > maxTime)&&(p2SEARCH_EPS) { + Logger::info("Start linear estimation\n"); + // after we got to values in the vecinity of the desired precision + // use linear approximation get a better estimation + + cx = (c1+c2)/2; + realPrecision = search_with_ground_truth(index, inputData, testData, matches, nn, cx, time, dist, distance, skipMatches); + while (fabs(realPrecision-precision)>SEARCH_EPS) { + + if (realPrecision +#include +#include +#include + +#include "general.h" +#include "nn_index.h" +#include "dynamic_bitset.h" +#include "matrix.h" +#include "result_set.h" +#include "heap.h" +#include "allocator.h" +#include "random.h" +#include "saving.h" + + +namespace cvflann +{ + +struct KDTreeIndexParams : public IndexParams +{ + KDTreeIndexParams(int trees = 4) + { + (*this)["algorithm"] = FLANN_INDEX_KDTREE; + (*this)["trees"] = trees; + } +}; + + +/** + * Randomized kd-tree index + * + * Contains the k-d trees and other information for indexing a set of points + * for nearest-neighbor matching. + */ +template +class KDTreeIndex : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + + /** + * KDTree constructor + * + * Params: + * inputData = dataset with the input features + * params = parameters passed to the kdtree algorithm + */ + KDTreeIndex(const Matrix& inputData, const IndexParams& params = KDTreeIndexParams(), + Distance d = Distance() ) : + dataset_(inputData), index_params_(params), distance_(d) + { + size_ = dataset_.rows; + veclen_ = dataset_.cols; + + trees_ = get_param(index_params_,"trees",4); + tree_roots_ = new NodePtr[trees_]; + + // Create a permutable array of indices to the input vectors. + vind_.resize(size_); + for (size_t i = 0; i < size_; ++i) { + vind_[i] = int(i); + } + + mean_ = new DistanceType[veclen_]; + var_ = new DistanceType[veclen_]; + } + + + KDTreeIndex(const KDTreeIndex&); + KDTreeIndex& operator=(const KDTreeIndex&); + + /** + * Standard destructor + */ + ~KDTreeIndex() + { + if (tree_roots_!=NULL) { + delete[] tree_roots_; + } + delete[] mean_; + delete[] var_; + } + + /** + * Dummy implementation for other algorithms of addable indexes after that. + */ + void addIndex(const Matrix& /*wholeData*/, const Matrix& /*additionalData*/) + { + } + + /** + * Builds the index + */ + void buildIndex() + { + /* Construct the randomized trees. */ + for (int i = 0; i < trees_; i++) { + /* Randomize the order of vectors to allow for unbiased sampling. */ + std::random_shuffle(vind_.begin(), vind_.end()); + tree_roots_[i] = divideTree(&vind_[0], int(size_) ); + } + } + + + flann_algorithm_t getType() const + { + return FLANN_INDEX_KDTREE; + } + + + void saveIndex(FILE* stream) + { + save_value(stream, trees_); + for (int i=0; i& result, const ElementType* vec, const SearchParams& searchParams) + { + int maxChecks = get_param(searchParams,"checks", 32); + float epsError = 1+get_param(searchParams,"eps",0.0f); + + if (maxChecks==FLANN_CHECKS_UNLIMITED) { + getExactNeighbors(result, vec, epsError); + } + else { + getNeighbors(result, vec, maxChecks, epsError); + } + } + + IndexParams getParameters() const + { + return index_params_; + } + +private: + + + /*--------------------- Internal Data Structures --------------------------*/ + struct Node + { + /** + * Dimension used for subdivision. + */ + int divfeat; + /** + * The values used for subdivision. + */ + DistanceType divval; + /** + * The child nodes. + */ + Node* child1, * child2; + }; + typedef Node* NodePtr; + typedef BranchStruct BranchSt; + typedef BranchSt* Branch; + + + + void save_tree(FILE* stream, NodePtr tree) + { + save_value(stream, *tree); + if (tree->child1!=NULL) { + save_tree(stream, tree->child1); + } + if (tree->child2!=NULL) { + save_tree(stream, tree->child2); + } + } + + + void load_tree(FILE* stream, NodePtr& tree) + { + tree = pool_.allocate(); + load_value(stream, *tree); + if (tree->child1!=NULL) { + load_tree(stream, tree->child1); + } + if (tree->child2!=NULL) { + load_tree(stream, tree->child2); + } + } + + + /** + * Create a tree node that subdivides the list of vecs from vind[first] + * to vind[last]. The routine is called recursively on each sublist. + * Place a pointer to this new tree node in the location pTree. + * + * Params: pTree = the new node to create + * first = index of the first vector + * last = index of the last vector + */ + NodePtr divideTree(int* ind, int count) + { + NodePtr node = pool_.allocate(); // allocate memory + + /* If too few exemplars remain, then make this a leaf node. */ + if ( count == 1) { + node->child1 = node->child2 = NULL; /* Mark as leaf node. */ + node->divfeat = *ind; /* Store index of this vec. */ + } + else { + int idx; + int cutfeat; + DistanceType cutval; + meanSplit(ind, count, idx, cutfeat, cutval); + + node->divfeat = cutfeat; + node->divval = cutval; + node->child1 = divideTree(ind, idx); + node->child2 = divideTree(ind+idx, count-idx); + } + + return node; + } + + + /** + * Choose which feature to use in order to subdivide this set of vectors. + * Make a random choice among those with the highest variance, and use + * its variance as the threshold value. + */ + void meanSplit(int* ind, int count, int& index, int& cutfeat, DistanceType& cutval) + { + memset(mean_,0,veclen_*sizeof(DistanceType)); + memset(var_,0,veclen_*sizeof(DistanceType)); + + /* Compute mean values. Only the first SAMPLE_MEAN values need to be + sampled to get a good estimate. + */ + int cnt = std::min((int)SAMPLE_MEAN+1, count); + for (int j = 0; j < cnt; ++j) { + ElementType* v = dataset_[ind[j]]; + for (size_t k=0; kcount/2) index = lim1; + else if (lim2 v[topind[num-1]])) { + /* Put this element at end of topind. */ + if (num < RAND_DIM) { + topind[num++] = i; /* Add to list. */ + } + else { + topind[num-1] = i; /* Replace last element. */ + } + /* Bubble end value down to right location by repeated swapping. */ + int j = num - 1; + while (j > 0 && v[topind[j]] > v[topind[j-1]]) { + std::swap(topind[j], topind[j-1]); + --j; + } + } + } + /* Select a random integer in range [0,num-1], and return that index. */ + int rnd = rand_int(num); + return (int)topind[rnd]; + } + + + /** + * Subdivide the list of points by a plane perpendicular on axe corresponding + * to the 'cutfeat' dimension at 'cutval' position. + * + * On return: + * dataset[ind[0..lim1-1]][cutfeat]cutval + */ + void planeSplit(int* ind, int count, int cutfeat, DistanceType cutval, int& lim1, int& lim2) + { + /* Move vector indices for left subtree to front of list. */ + int left = 0; + int right = count-1; + for (;; ) { + while (left<=right && dataset_[ind[left]][cutfeat]=cutval) --right; + if (left>right) break; + std::swap(ind[left], ind[right]); ++left; --right; + } + lim1 = left; + right = count-1; + for (;; ) { + while (left<=right && dataset_[ind[left]][cutfeat]<=cutval) ++left; + while (left<=right && dataset_[ind[right]][cutfeat]>cutval) --right; + if (left>right) break; + std::swap(ind[left], ind[right]); ++left; --right; + } + lim2 = left; + } + + /** + * Performs an exact nearest neighbor search. The exact search performs a full + * traversal of the tree. + */ + void getExactNeighbors(ResultSet& result, const ElementType* vec, float epsError) + { + // checkID -= 1; /* Set a different unique ID for each search. */ + + if (trees_ > 1) { + fprintf(stderr,"It doesn't make any sense to use more than one tree for exact search"); + } + if (trees_>0) { + searchLevelExact(result, vec, tree_roots_[0], 0.0, epsError); + } + assert(result.full()); + } + + /** + * Performs the approximate nearest-neighbor search. The search is approximate + * because the tree traversal is abandoned after a given number of descends in + * the tree. + */ + void getNeighbors(ResultSet& result, const ElementType* vec, int maxCheck, float epsError) + { + int i; + BranchSt branch; + + int checkCount = 0; + Heap* heap = new Heap((int)size_); + DynamicBitset checked(size_); + + /* Search once through each tree down to root. */ + for (i = 0; i < trees_; ++i) { + searchLevel(result, vec, tree_roots_[i], 0, checkCount, maxCheck, epsError, heap, checked); + } + + /* Keep searching other branches from heap until finished. */ + while ( heap->popMin(branch) && (checkCount < maxCheck || !result.full() )) { + searchLevel(result, vec, branch.node, branch.mindist, checkCount, maxCheck, epsError, heap, checked); + } + + delete heap; + + assert(result.full()); + } + + + /** + * Search starting from a given node of the tree. Based on any mismatches at + * higher levels, all exemplars below this level must have a distance of + * at least "mindistsq". + */ + void searchLevel(ResultSet& result_set, const ElementType* vec, NodePtr node, DistanceType mindist, int& checkCount, int maxCheck, + float epsError, Heap* heap, DynamicBitset& checked) + { + if (result_set.worstDist()child1 == NULL)&&(node->child2 == NULL)) { + /* Do not check same node more than once when searching multiple trees. + Once a vector is checked, we set its location in vind to the + current checkID. + */ + int index = node->divfeat; + if ( checked.test(index) || ((checkCount>=maxCheck)&& result_set.full()) ) return; + checked.set(index); + checkCount++; + + DistanceType dist = distance_(dataset_[index], vec, veclen_); + result_set.addPoint(dist,index); + + return; + } + + /* Which child branch should be taken first? */ + ElementType val = vec[node->divfeat]; + DistanceType diff = val - node->divval; + NodePtr bestChild = (diff < 0) ? node->child1 : node->child2; + NodePtr otherChild = (diff < 0) ? node->child2 : node->child1; + + /* Create a branch record for the branch not taken. Add distance + of this feature boundary (we don't attempt to correct for any + use of this feature in a parent node, which is unlikely to + happen and would have only a small effect). Don't bother + adding more branches to heap after halfway point, as cost of + adding exceeds their value. + */ + + DistanceType new_distsq = mindist + distance_.accum_dist(val, node->divval, node->divfeat); + // if (2 * checkCount < maxCheck || !result.full()) { + if ((new_distsq*epsError < result_set.worstDist())|| !result_set.full()) { + heap->insert( BranchSt(otherChild, new_distsq) ); + } + + /* Call recursively to search next level down. */ + searchLevel(result_set, vec, bestChild, mindist, checkCount, maxCheck, epsError, heap, checked); + } + + /** + * Performs an exact search in the tree starting from a node. + */ + void searchLevelExact(ResultSet& result_set, const ElementType* vec, const NodePtr node, DistanceType mindist, const float epsError) + { + /* If this is a leaf node, then do check and return. */ + if ((node->child1 == NULL)&&(node->child2 == NULL)) { + int index = node->divfeat; + DistanceType dist = distance_(dataset_[index], vec, veclen_); + result_set.addPoint(dist,index); + return; + } + + /* Which child branch should be taken first? */ + ElementType val = vec[node->divfeat]; + DistanceType diff = val - node->divval; + NodePtr bestChild = (diff < 0) ? node->child1 : node->child2; + NodePtr otherChild = (diff < 0) ? node->child2 : node->child1; + + /* Create a branch record for the branch not taken. Add distance + of this feature boundary (we don't attempt to correct for any + use of this feature in a parent node, which is unlikely to + happen and would have only a small effect). Don't bother + adding more branches to heap after halfway point, as cost of + adding exceeds their value. + */ + + DistanceType new_distsq = mindist + distance_.accum_dist(val, node->divval, node->divfeat); + + /* Call recursively to search next level down. */ + searchLevelExact(result_set, vec, bestChild, mindist, epsError); + + if (new_distsq*epsError<=result_set.worstDist()) { + searchLevelExact(result_set, vec, otherChild, new_distsq, epsError); + } + } + + +private: + + enum + { + /** + * To improve efficiency, only SAMPLE_MEAN random values are used to + * compute the mean and variance at each level when building a tree. + * A value of 100 seems to perform as well as using all values. + */ + SAMPLE_MEAN = 100, + /** + * Top random dimensions to consider + * + * When creating random trees, the dimension on which to subdivide is + * selected at random from among the top RAND_DIM dimensions with the + * highest variance. A value of 5 works well. + */ + RAND_DIM=5 + }; + + + /** + * Number of randomized trees that are used + */ + int trees_; + + /** + * Array of indices to vectors in the dataset. + */ + std::vector vind_; + + /** + * The dataset used by this index + */ + const Matrix dataset_; + + IndexParams index_params_; + + size_t size_; + size_t veclen_; + + + DistanceType* mean_; + DistanceType* var_; + + + /** + * Array of k-d trees used to find neighbours. + */ + NodePtr* tree_roots_; + + /** + * Pooled memory allocator. + * + * Using a pooled memory allocator is more efficient + * than allocating memory directly when there is a large + * number small of memory allocations. + */ + PooledAllocator pool_; + + Distance distance_; + + +}; // class KDTreeForest + +} + +#endif //OPENCV_FLANN_KDTREE_INDEX_H_ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/kdtree_single_index.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/kdtree_single_index.h new file mode 100644 index 0000000..252fc4c --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/kdtree_single_index.h @@ -0,0 +1,641 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_KDTREE_SINGLE_INDEX_H_ +#define OPENCV_FLANN_KDTREE_SINGLE_INDEX_H_ + +#include +#include +#include +#include + +#include "general.h" +#include "nn_index.h" +#include "matrix.h" +#include "result_set.h" +#include "heap.h" +#include "allocator.h" +#include "random.h" +#include "saving.h" + +namespace cvflann +{ + +struct KDTreeSingleIndexParams : public IndexParams +{ + KDTreeSingleIndexParams(int leaf_max_size = 10, bool reorder = true, int dim = -1) + { + (*this)["algorithm"] = FLANN_INDEX_KDTREE_SINGLE; + (*this)["leaf_max_size"] = leaf_max_size; + (*this)["reorder"] = reorder; + (*this)["dim"] = dim; + } +}; + + +/** + * Randomized kd-tree index + * + * Contains the k-d trees and other information for indexing a set of points + * for nearest-neighbor matching. + */ +template +class KDTreeSingleIndex : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + + /** + * KDTree constructor + * + * Params: + * inputData = dataset with the input features + * params = parameters passed to the kdtree algorithm + */ + KDTreeSingleIndex(const Matrix& inputData, const IndexParams& params = KDTreeSingleIndexParams(), + Distance d = Distance() ) : + dataset_(inputData), index_params_(params), distance_(d) + { + size_ = dataset_.rows; + dim_ = dataset_.cols; + int dim_param = get_param(params,"dim",-1); + if (dim_param>0) dim_ = dim_param; + leaf_max_size_ = get_param(params,"leaf_max_size",10); + reorder_ = get_param(params,"reorder",true); + + // Create a permutable array of indices to the input vectors. + vind_.resize(size_); + for (size_t i = 0; i < size_; i++) { + vind_[i] = (int)i; + } + } + + KDTreeSingleIndex(const KDTreeSingleIndex&); + KDTreeSingleIndex& operator=(const KDTreeSingleIndex&); + + /** + * Standard destructor + */ + ~KDTreeSingleIndex() + { + if (reorder_) delete[] data_.data; + } + + /** + * Dummy implementation for other algorithms of addable indexes after that. + */ + void addIndex(const Matrix& /*wholeData*/, const Matrix& /*additionalData*/) + { + } + + /** + * Builds the index + */ + void buildIndex() + { + computeBoundingBox(root_bbox_); + root_node_ = divideTree(0, (int)size_, root_bbox_ ); // construct the tree + + if (reorder_) { + delete[] data_.data; + data_ = cvflann::Matrix(new ElementType[size_*dim_], size_, dim_); + for (size_t i=0; i& queries, Matrix& indices, Matrix& dists, int knn, const SearchParams& params) + { + assert(queries.cols == veclen()); + assert(indices.rows >= queries.rows); + assert(dists.rows >= queries.rows); + assert(int(indices.cols) >= knn); + assert(int(dists.cols) >= knn); + + KNNSimpleResultSet resultSet(knn); + for (size_t i = 0; i < queries.rows; i++) { + resultSet.init(indices[i], dists[i]); + findNeighbors(resultSet, queries[i], params); + } + } + + IndexParams getParameters() const + { + return index_params_; + } + + /** + * Find set of nearest neighbors to vec. Their indices are stored inside + * the result object. + * + * Params: + * result = the result object in which the indices of the nearest-neighbors are stored + * vec = the vector for which to search the nearest neighbors + * maxCheck = the maximum number of restarts (in a best-bin-first manner) + */ + void findNeighbors(ResultSet& result, const ElementType* vec, const SearchParams& searchParams) + { + float epsError = 1+get_param(searchParams,"eps",0.0f); + + std::vector dists(dim_,0); + DistanceType distsq = computeInitialDistances(vec, dists); + searchLevel(result, vec, root_node_, distsq, dists, epsError); + } + +private: + + + /*--------------------- Internal Data Structures --------------------------*/ + struct Node + { + /** + * Indices of points in leaf node + */ + int left, right; + /** + * Dimension used for subdivision. + */ + int divfeat; + /** + * The values used for subdivision. + */ + DistanceType divlow, divhigh; + /** + * The child nodes. + */ + Node* child1, * child2; + }; + typedef Node* NodePtr; + + + struct Interval + { + DistanceType low, high; + }; + + typedef std::vector BoundingBox; + + typedef BranchStruct BranchSt; + typedef BranchSt* Branch; + + + + + void save_tree(FILE* stream, NodePtr tree) + { + save_value(stream, *tree); + if (tree->child1!=NULL) { + save_tree(stream, tree->child1); + } + if (tree->child2!=NULL) { + save_tree(stream, tree->child2); + } + } + + + void load_tree(FILE* stream, NodePtr& tree) + { + tree = pool_.allocate(); + load_value(stream, *tree); + if (tree->child1!=NULL) { + load_tree(stream, tree->child1); + } + if (tree->child2!=NULL) { + load_tree(stream, tree->child2); + } + } + + + void computeBoundingBox(BoundingBox& bbox) + { + bbox.resize(dim_); + for (size_t i=0; ibbox[i].high) bbox[i].high = (DistanceType)dataset_[k][i]; + } + } + } + + + /** + * Create a tree node that subdivides the list of vecs from vind[first] + * to vind[last]. The routine is called recursively on each sublist. + * Place a pointer to this new tree node in the location pTree. + * + * Params: pTree = the new node to create + * first = index of the first vector + * last = index of the last vector + */ + NodePtr divideTree(int left, int right, BoundingBox& bbox) + { + NodePtr node = pool_.allocate(); // allocate memory + + /* If too few exemplars remain, then make this a leaf node. */ + if ( (right-left) <= leaf_max_size_) { + node->child1 = node->child2 = NULL; /* Mark as leaf node. */ + node->left = left; + node->right = right; + + // compute bounding-box of leaf points + for (size_t i=0; idataset_[vind_[k]][i]) bbox[i].low=(DistanceType)dataset_[vind_[k]][i]; + if (bbox[i].highdivfeat = cutfeat; + + BoundingBox left_bbox(bbox); + left_bbox[cutfeat].high = cutval; + node->child1 = divideTree(left, left+idx, left_bbox); + + BoundingBox right_bbox(bbox); + right_bbox[cutfeat].low = cutval; + node->child2 = divideTree(left+idx, right, right_bbox); + + node->divlow = left_bbox[cutfeat].high; + node->divhigh = right_bbox[cutfeat].low; + + for (size_t i=0; imax_elem) max_elem = val; + } + } + + void middleSplit(int* ind, int count, int& index, int& cutfeat, DistanceType& cutval, const BoundingBox& bbox) + { + // find the largest span from the approximate bounding box + ElementType max_span = bbox[0].high-bbox[0].low; + cutfeat = 0; + cutval = (bbox[0].high+bbox[0].low)/2; + for (size_t i=1; imax_span) { + max_span = span; + cutfeat = i; + cutval = (bbox[i].high+bbox[i].low)/2; + } + } + + // compute exact span on the found dimension + ElementType min_elem, max_elem; + computeMinMax(ind, count, cutfeat, min_elem, max_elem); + cutval = (min_elem+max_elem)/2; + max_span = max_elem - min_elem; + + // check if a dimension of a largest span exists + size_t k = cutfeat; + for (size_t i=0; imax_span) { + computeMinMax(ind, count, i, min_elem, max_elem); + span = max_elem - min_elem; + if (span>max_span) { + max_span = span; + cutfeat = i; + cutval = (min_elem+max_elem)/2; + } + } + } + int lim1, lim2; + planeSplit(ind, count, cutfeat, cutval, lim1, lim2); + + if (lim1>count/2) index = lim1; + else if (lim2max_span) { + max_span = span; + } + } + DistanceType max_spread = -1; + cutfeat = 0; + for (size_t i=0; i(DistanceType)((1-EPS)*max_span)) { + ElementType min_elem, max_elem; + computeMinMax(ind, count, cutfeat, min_elem, max_elem); + DistanceType spread = (DistanceType)(max_elem-min_elem); + if (spread>max_spread) { + cutfeat = (int)i; + max_spread = spread; + } + } + } + // split in the middle + DistanceType split_val = (bbox[cutfeat].low+bbox[cutfeat].high)/2; + ElementType min_elem, max_elem; + computeMinMax(ind, count, cutfeat, min_elem, max_elem); + + if (split_valmax_elem) cutval = (DistanceType)max_elem; + else cutval = split_val; + + int lim1, lim2; + planeSplit(ind, count, cutfeat, cutval, lim1, lim2); + + if (lim1>count/2) index = lim1; + else if (lim2cutval + */ + void planeSplit(int* ind, int count, int cutfeat, DistanceType cutval, int& lim1, int& lim2) + { + /* Move vector indices for left subtree to front of list. */ + int left = 0; + int right = count-1; + for (;; ) { + while (left<=right && dataset_[ind[left]][cutfeat]=cutval) --right; + if (left>right) break; + std::swap(ind[left], ind[right]); ++left; --right; + } + /* If either list is empty, it means that all remaining features + * are identical. Split in the middle to maintain a balanced tree. + */ + lim1 = left; + right = count-1; + for (;; ) { + while (left<=right && dataset_[ind[left]][cutfeat]<=cutval) ++left; + while (left<=right && dataset_[ind[right]][cutfeat]>cutval) --right; + if (left>right) break; + std::swap(ind[left], ind[right]); ++left; --right; + } + lim2 = left; + } + + DistanceType computeInitialDistances(const ElementType* vec, std::vector& dists) + { + DistanceType distsq = 0.0; + + for (size_t i = 0; i < dim_; ++i) { + if (vec[i] < root_bbox_[i].low) { + dists[i] = distance_.accum_dist(vec[i], root_bbox_[i].low, (int)i); + distsq += dists[i]; + } + if (vec[i] > root_bbox_[i].high) { + dists[i] = distance_.accum_dist(vec[i], root_bbox_[i].high, (int)i); + distsq += dists[i]; + } + } + + return distsq; + } + + /** + * Performs an exact search in the tree starting from a node. + */ + void searchLevel(ResultSet& result_set, const ElementType* vec, const NodePtr node, DistanceType mindistsq, + std::vector& dists, const float epsError) + { + /* If this is a leaf node, then do check and return. */ + if ((node->child1 == NULL)&&(node->child2 == NULL)) { + DistanceType worst_dist = result_set.worstDist(); + for (int i=node->left; iright; ++i) { + int index = reorder_ ? i : vind_[i]; + DistanceType dist = distance_(vec, data_[index], dim_, worst_dist); + if (distdivfeat; + ElementType val = vec[idx]; + DistanceType diff1 = val - node->divlow; + DistanceType diff2 = val - node->divhigh; + + NodePtr bestChild; + NodePtr otherChild; + DistanceType cut_dist; + if ((diff1+diff2)<0) { + bestChild = node->child1; + otherChild = node->child2; + cut_dist = distance_.accum_dist(val, node->divhigh, idx); + } + else { + bestChild = node->child2; + otherChild = node->child1; + cut_dist = distance_.accum_dist( val, node->divlow, idx); + } + + /* Call recursively to search next level down. */ + searchLevel(result_set, vec, bestChild, mindistsq, dists, epsError); + + DistanceType dst = dists[idx]; + mindistsq = mindistsq + cut_dist - dst; + dists[idx] = cut_dist; + if (mindistsq*epsError<=result_set.worstDist()) { + searchLevel(result_set, vec, otherChild, mindistsq, dists, epsError); + } + dists[idx] = dst; + } + +private: + + /** + * The dataset used by this index + */ + const Matrix dataset_; + + IndexParams index_params_; + + int leaf_max_size_; + bool reorder_; + + + /** + * Array of indices to vectors in the dataset. + */ + std::vector vind_; + + Matrix data_; + + size_t size_; + size_t dim_; + + /** + * Array of k-d trees used to find neighbours. + */ + NodePtr root_node_; + + BoundingBox root_bbox_; + + /** + * Pooled memory allocator. + * + * Using a pooled memory allocator is more efficient + * than allocating memory directly when there is a large + * number small of memory allocations. + */ + PooledAllocator pool_; + + Distance distance_; +}; // class KDTree + +} + +#endif //OPENCV_FLANN_KDTREE_SINGLE_INDEX_H_ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/kmeans_index.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/kmeans_index.h new file mode 100644 index 0000000..e119ceb --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/kmeans_index.h @@ -0,0 +1,1133 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_KMEANS_INDEX_H_ +#define OPENCV_FLANN_KMEANS_INDEX_H_ + +#include +#include +#include +#include +#include +#include + +#include "general.h" +#include "nn_index.h" +#include "dist.h" +#include "matrix.h" +#include "result_set.h" +#include "heap.h" +#include "allocator.h" +#include "random.h" +#include "saving.h" +#include "logger.h" + + +namespace cvflann +{ + +struct KMeansIndexParams : public IndexParams +{ + KMeansIndexParams(int branching = 32, int iterations = 11, + flann_centers_init_t centers_init = FLANN_CENTERS_RANDOM, float cb_index = 0.2 ) + { + (*this)["algorithm"] = FLANN_INDEX_KMEANS; + // branching factor + (*this)["branching"] = branching; + // max iterations to perform in one kmeans clustering (kmeans tree) + (*this)["iterations"] = iterations; + // algorithm used for picking the initial cluster centers for kmeans tree + (*this)["centers_init"] = centers_init; + // cluster boundary index. Used when searching the kmeans tree + (*this)["cb_index"] = cb_index; + } +}; + + +/** + * Hierarchical kmeans index + * + * Contains a tree constructed through a hierarchical kmeans clustering + * and other information for indexing a set of points for nearest-neighbour matching. + */ +template +class KMeansIndex : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + + + typedef void (KMeansIndex::* centersAlgFunction)(int, int*, int, int*, int&); + + /** + * The function used for choosing the cluster centers. + */ + centersAlgFunction chooseCenters; + + + + /** + * Chooses the initial centers in the k-means clustering in a random manner. + * + * Params: + * k = number of centers + * vecs = the dataset of points + * indices = indices in the dataset + * indices_length = length of indices vector + * + */ + void chooseCentersRandom(int k, int* indices, int indices_length, int* centers, int& centers_length) + { + UniqueRandom r(indices_length); + + int index; + for (index=0; index=0 && rnd < n); + + centers[0] = indices[rnd]; + + int index; + for (index=1; indexbest_val) { + best_val = dist; + best_index = j; + } + } + if (best_index!=-1) { + centers[index] = indices[best_index]; + } + else { + break; + } + } + centers_length = index; + } + + + /** + * Chooses the initial centers in the k-means using the algorithm + * proposed in the KMeans++ paper: + * Arthur, David; Vassilvitskii, Sergei - k-means++: The Advantages of Careful Seeding + * + * Implementation of this function was converted from the one provided in Arthur's code. + * + * Params: + * k = number of centers + * vecs = the dataset of points + * indices = indices in the dataset + * Returns: + */ + void chooseCentersKMeanspp(int k, int* indices, int indices_length, int* centers, int& centers_length) + { + int n = indices_length; + + double currentPot = 0; + DistanceType* closestDistSq = new DistanceType[n]; + + // Choose one random center and set the closestDistSq values + int index = rand_int(n); + assert(index >=0 && index < n); + centers[0] = indices[index]; + + for (int i = 0; i < n; i++) { + closestDistSq[i] = distance_(dataset_[indices[i]], dataset_[indices[index]], dataset_.cols); + closestDistSq[i] = ensureSquareDistance( closestDistSq[i] ); + currentPot += closestDistSq[i]; + } + + + const int numLocalTries = 1; + + // Choose each center + int centerCount; + for (centerCount = 1; centerCount < k; centerCount++) { + + // Repeat several trials + double bestNewPot = -1; + int bestNewIndex = -1; + for (int localTrial = 0; localTrial < numLocalTries; localTrial++) { + + // Choose our center - have to be slightly careful to return a valid answer even accounting + // for possible rounding errors + double randVal = rand_double(currentPot); + for (index = 0; index < n-1; index++) { + if (randVal <= closestDistSq[index]) break; + else randVal -= closestDistSq[index]; + } + + // Compute the new potential + double newPot = 0; + for (int i = 0; i < n; i++) { + DistanceType dist = distance_(dataset_[indices[i]], dataset_[indices[index]], dataset_.cols); + newPot += std::min( ensureSquareDistance(dist), closestDistSq[i] ); + } + + // Store the best result + if ((bestNewPot < 0)||(newPot < bestNewPot)) { + bestNewPot = newPot; + bestNewIndex = index; + } + } + + // Add the appropriate center + centers[centerCount] = indices[bestNewIndex]; + currentPot = bestNewPot; + for (int i = 0; i < n; i++) { + DistanceType dist = distance_(dataset_[indices[i]], dataset_[indices[bestNewIndex]], dataset_.cols); + closestDistSq[i] = std::min( ensureSquareDistance(dist), closestDistSq[i] ); + } + } + + centers_length = centerCount; + + delete[] closestDistSq; + } + + + +public: + + flann_algorithm_t getType() const + { + return FLANN_INDEX_KMEANS; + } + + /** + * Index constructor + * + * Params: + * inputData = dataset with the input features + * params = parameters passed to the hierarchical k-means algorithm + */ + KMeansIndex(const Matrix& inputData, const IndexParams& params = KMeansIndexParams(), + Distance d = Distance()) + : dataset_(inputData), index_params_(params), root_(NULL), indices_(NULL), distance_(d) + { + memoryCounter_ = 0; + + size_ = dataset_.rows; + veclen_ = dataset_.cols; + + branching_ = get_param(params,"branching",32); + iterations_ = get_param(params,"iterations",11); + if (iterations_<0) { + iterations_ = (std::numeric_limits::max)(); + } + centers_init_ = get_param(params,"centers_init",FLANN_CENTERS_RANDOM); + + if (centers_init_==FLANN_CENTERS_RANDOM) { + chooseCenters = &KMeansIndex::chooseCentersRandom; + } + else if (centers_init_==FLANN_CENTERS_GONZALES) { + chooseCenters = &KMeansIndex::chooseCentersGonzales; + } + else if (centers_init_==FLANN_CENTERS_KMEANSPP) { + chooseCenters = &KMeansIndex::chooseCentersKMeanspp; + } + else { + throw FLANNException("Unknown algorithm for choosing initial centers."); + } + cb_index_ = 0.4f; + + } + + + KMeansIndex(const KMeansIndex&); + KMeansIndex& operator=(const KMeansIndex&); + + + /** + * Index destructor. + * + * Release the memory used by the index. + */ + virtual ~KMeansIndex() + { + if (root_ != NULL) { + free_centers(root_); + } + if (indices_!=NULL) { + delete[] indices_; + } + } + + /** + * Returns size of index. + */ + size_t size() const + { + return size_; + } + + /** + * Returns the length of an index feature. + */ + size_t veclen() const + { + return veclen_; + } + + + void set_cb_index( float index) + { + cb_index_ = index; + } + + /** + * Computes the inde memory usage + * Returns: memory used by the index + */ + int usedMemory() const + { + return pool_.usedMemory+pool_.wastedMemory+memoryCounter_; + } + + /** + * Dummy implementation for other algorithms of addable indexes after that. + */ + void addIndex(const Matrix& /*wholeData*/, const Matrix& /*additionalData*/) + { + } + + /** + * Builds the index + */ + void buildIndex() + { + if (branching_<2) { + throw FLANNException("Branching factor must be at least 2"); + } + + indices_ = new int[size_]; + for (size_t i=0; i(); + std::memset(root_, 0, sizeof(KMeansNode)); + + computeNodeStatistics(root_, indices_, (int)size_); + computeClustering(root_, indices_, (int)size_, branching_,0); + } + + + void saveIndex(FILE* stream) + { + save_value(stream, branching_); + save_value(stream, iterations_); + save_value(stream, memoryCounter_); + save_value(stream, cb_index_); + save_value(stream, *indices_, (int)size_); + + save_tree(stream, root_); + } + + + void loadIndex(FILE* stream) + { + load_value(stream, branching_); + load_value(stream, iterations_); + load_value(stream, memoryCounter_); + load_value(stream, cb_index_); + if (indices_!=NULL) { + delete[] indices_; + } + indices_ = new int[size_]; + load_value(stream, *indices_, size_); + + if (root_!=NULL) { + free_centers(root_); + } + load_tree(stream, root_); + + index_params_["algorithm"] = getType(); + index_params_["branching"] = branching_; + index_params_["iterations"] = iterations_; + index_params_["centers_init"] = centers_init_; + index_params_["cb_index"] = cb_index_; + + } + + + /** + * Find set of nearest neighbors to vec. Their indices are stored inside + * the result object. + * + * Params: + * result = the result object in which the indices of the nearest-neighbors are stored + * vec = the vector for which to search the nearest neighbors + * searchParams = parameters that influence the search algorithm (checks, cb_index) + */ + void findNeighbors(ResultSet& result, const ElementType* vec, const SearchParams& searchParams) + { + + int maxChecks = get_param(searchParams,"checks",32); + + if (maxChecks==FLANN_CHECKS_UNLIMITED) { + findExactNN(root_, result, vec); + } + else { + // Priority queue storing intermediate branches in the best-bin-first search + Heap* heap = new Heap((int)size_); + + int checks = 0; + findNN(root_, result, vec, checks, maxChecks, heap); + + BranchSt branch; + while (heap->popMin(branch) && (checks& centers) + { + int numClusters = centers.rows; + if (numClusters<1) { + throw FLANNException("Number of clusters must be at least 1"); + } + + DistanceType variance; + KMeansNodePtr* clusters = new KMeansNodePtr[numClusters]; + + int clusterCount = getMinVarianceClusters(root_, clusters, numClusters, variance); + + Logger::info("Clusters requested: %d, returning %d\n",numClusters, clusterCount); + + for (int i=0; ipivot; + for (size_t j=0; j BranchSt; + + + + + void save_tree(FILE* stream, KMeansNodePtr node) + { + save_value(stream, *node); + save_value(stream, *(node->pivot), (int)veclen_); + if (node->childs==NULL) { + int indices_offset = (int)(node->indices - indices_); + save_value(stream, indices_offset); + } + else { + for(int i=0; ichilds[i]); + } + } + } + + + void load_tree(FILE* stream, KMeansNodePtr& node) + { + node = pool_.allocate(); + load_value(stream, *node); + node->pivot = new DistanceType[veclen_]; + load_value(stream, *(node->pivot), (int)veclen_); + if (node->childs==NULL) { + int indices_offset; + load_value(stream, indices_offset); + node->indices = indices_ + indices_offset; + } + else { + node->childs = pool_.allocate(branching_); + for(int i=0; ichilds[i]); + } + } + } + + + /** + * Helper function + */ + void free_centers(KMeansNodePtr node) + { + delete[] node->pivot; + if (node->childs!=NULL) { + for (int k=0; kchilds[k]); + } + } + } + + /** + * Computes the statistics of a node (mean, radius, variance). + * + * Params: + * node = the node to use + * indices = the indices of the points belonging to the node + */ + void computeNodeStatistics(KMeansNodePtr node, int* indices, int indices_length) + { + + DistanceType radius = 0; + DistanceType variance = 0; + DistanceType* mean = new DistanceType[veclen_]; + memoryCounter_ += int(veclen_*sizeof(DistanceType)); + + memset(mean,0,veclen_*sizeof(DistanceType)); + + for (size_t i=0; i(), veclen_); + } + for (size_t j=0; j(), veclen_); + + DistanceType tmp = 0; + for (int i=0; iradius) { + radius = tmp; + } + } + + node->variance = variance; + node->radius = radius; + node->pivot = mean; + } + + + /** + * The method responsible with actually doing the recursive hierarchical + * clustering + * + * Params: + * node = the node to cluster + * indices = indices of the points belonging to the current node + * branching = the branching factor to use in the clustering + * + * TODO: for 1-sized clusters don't store a cluster center (it's the same as the single cluster point) + */ + void computeClustering(KMeansNodePtr node, int* indices, int indices_length, int branching, int level) + { + node->size = indices_length; + node->level = level; + + if (indices_length < branching) { + node->indices = indices; + std::sort(node->indices,node->indices+indices_length); + node->childs = NULL; + return; + } + + int* centers_idx = new int[branching]; + int centers_length; + (this->*chooseCenters)(branching, indices, indices_length, centers_idx, centers_length); + + if (centers_lengthindices = indices; + std::sort(node->indices,node->indices+indices_length); + node->childs = NULL; + delete [] centers_idx; + return; + } + + + Matrix dcenters(new double[branching*veclen_],branching,veclen_); + for (int i=0; i radiuses(branching); + int* count = new int[branching]; + for (int i=0; inew_sq_dist) { + belongs_to[i] = j; + sq_dist = new_sq_dist; + } + } + if (sq_dist>radiuses[belongs_to[i]]) { + radiuses[belongs_to[i]] = sq_dist; + } + count[belongs_to[i]]++; + } + + bool converged = false; + int iteration = 0; + while (!converged && iterationnew_sq_dist) { + new_centroid = j; + sq_dist = new_sq_dist; + } + } + if (sq_dist>radiuses[new_centroid]) { + radiuses[new_centroid] = sq_dist; + } + if (new_centroid != belongs_to[i]) { + count[belongs_to[i]]--; + count[new_centroid]++; + belongs_to[i] = new_centroid; + + converged = false; + } + } + + for (int i=0; ichilds = pool_.allocate(branching); + int start = 0; + int end = start; + for (int c=0; c(), veclen_); + variance += d; + mean_radius += sqrt(d); + std::swap(indices[i],indices[end]); + std::swap(belongs_to[i],belongs_to[end]); + end++; + } + } + variance /= s; + mean_radius /= s; + variance -= distance_(centers[c], ZeroIterator(), veclen_); + + node->childs[c] = pool_.allocate(); + std::memset(node->childs[c], 0, sizeof(KMeansNode)); + node->childs[c]->radius = radiuses[c]; + node->childs[c]->pivot = centers[c]; + node->childs[c]->variance = variance; + node->childs[c]->mean_radius = mean_radius; + computeClustering(node->childs[c],indices+start, end-start, branching, level+1); + start=end; + } + + delete[] dcenters.data; + delete[] centers; + delete[] count; + delete[] belongs_to; + } + + + + /** + * Performs one descent in the hierarchical k-means tree. The branches not + * visited are stored in a priority queue. + * + * Params: + * node = node to explore + * result = container for the k-nearest neighbors found + * vec = query points + * checks = how many points in the dataset have been checked so far + * maxChecks = maximum dataset points to checks + */ + + + void findNN(KMeansNodePtr node, ResultSet& result, const ElementType* vec, int& checks, int maxChecks, + Heap* heap) + { + // Ignore those clusters that are too far away + { + DistanceType bsq = distance_(vec, node->pivot, veclen_); + DistanceType rsq = node->radius; + DistanceType wsq = result.worstDist(); + + DistanceType val = bsq-rsq-wsq; + DistanceType val2 = val*val-4*rsq*wsq; + + //if (val>0) { + if ((val>0)&&(val2>0)) { + return; + } + } + + if (node->childs==NULL) { + if (checks>=maxChecks) { + if (result.full()) return; + } + checks += node->size; + for (int i=0; isize; ++i) { + int index = node->indices[i]; + DistanceType dist = distance_(dataset_[index], vec, veclen_); + result.addPoint(dist, index); + } + } + else { + DistanceType* domain_distances = new DistanceType[branching_]; + int closest_center = exploreNodeBranches(node, vec, domain_distances, heap); + delete[] domain_distances; + findNN(node->childs[closest_center],result,vec, checks, maxChecks, heap); + } + } + + /** + * Helper function that computes the nearest childs of a node to a given query point. + * Params: + * node = the node + * q = the query point + * distances = array with the distances to each child node. + * Returns: + */ + int exploreNodeBranches(KMeansNodePtr node, const ElementType* q, DistanceType* domain_distances, Heap* heap) + { + + int best_index = 0; + domain_distances[best_index] = distance_(q, node->childs[best_index]->pivot, veclen_); + for (int i=1; ichilds[i]->pivot, veclen_); + if (domain_distances[i]childs[best_index]->pivot; + for (int i=0; ichilds[i]->variance; + + // float dist_to_border = getDistanceToBorder(node.childs[i].pivot,best_center,q); + // if (domain_distances[i]insert(BranchSt(node->childs[i],domain_distances[i])); + } + } + + return best_index; + } + + + /** + * Function the performs exact nearest neighbor search by traversing the entire tree. + */ + void findExactNN(KMeansNodePtr node, ResultSet& result, const ElementType* vec) + { + // Ignore those clusters that are too far away + { + DistanceType bsq = distance_(vec, node->pivot, veclen_); + DistanceType rsq = node->radius; + DistanceType wsq = result.worstDist(); + + DistanceType val = bsq-rsq-wsq; + DistanceType val2 = val*val-4*rsq*wsq; + + // if (val>0) { + if ((val>0)&&(val2>0)) { + return; + } + } + + + if (node->childs==NULL) { + for (int i=0; isize; ++i) { + int index = node->indices[i]; + DistanceType dist = distance_(dataset_[index], vec, veclen_); + result.addPoint(dist, index); + } + } + else { + int* sort_indices = new int[branching_]; + + getCenterOrdering(node, vec, sort_indices); + + for (int i=0; ichilds[sort_indices[i]],result,vec); + } + + delete[] sort_indices; + } + } + + + /** + * Helper function. + * + * I computes the order in which to traverse the child nodes of a particular node. + */ + void getCenterOrdering(KMeansNodePtr node, const ElementType* q, int* sort_indices) + { + DistanceType* domain_distances = new DistanceType[branching_]; + for (int i=0; ichilds[i]->pivot, veclen_); + + int j=0; + while (domain_distances[j]j; --k) { + domain_distances[k] = domain_distances[k-1]; + sort_indices[k] = sort_indices[k-1]; + } + domain_distances[j] = dist; + sort_indices[j] = i; + } + delete[] domain_distances; + } + + /** + * Method that computes the squared distance from the query point q + * from inside region with center c to the border between this + * region and the region with center p + */ + DistanceType getDistanceToBorder(DistanceType* p, DistanceType* c, DistanceType* q) + { + DistanceType sum = 0; + DistanceType sum2 = 0; + + for (int i=0; ivariance*root->size; + + while (clusterCount::max)(); + int splitIndex = -1; + + for (int i=0; ichilds != NULL) { + + DistanceType variance = meanVariance - clusters[i]->variance*clusters[i]->size; + + for (int j=0; jchilds[j]->variance*clusters[i]->childs[j]->size; + } + if (variance clusters_length) break; + + meanVariance = minVariance; + + // split node + KMeansNodePtr toSplit = clusters[splitIndex]; + clusters[splitIndex] = toSplit->childs[0]; + for (int i=1; ichilds[i]; + } + } + + varianceValue = meanVariance/root->size; + return clusterCount; + } + +private: + /** The branching factor used in the hierarchical k-means clustering */ + int branching_; + + /** Maximum number of iterations to use when performing k-means clustering */ + int iterations_; + + /** Algorithm for choosing the cluster centers */ + flann_centers_init_t centers_init_; + + /** + * Cluster border index. This is used in the tree search phase when determining + * the closest cluster to explore next. A zero value takes into account only + * the cluster centres, a value greater then zero also take into account the size + * of the cluster. + */ + float cb_index_; + + /** + * The dataset used by this index + */ + const Matrix dataset_; + + /** Index parameters */ + IndexParams index_params_; + + /** + * Number of features in the dataset. + */ + size_t size_; + + /** + * Length of each feature. + */ + size_t veclen_; + + /** + * The root node in the tree. + */ + KMeansNodePtr root_; + + /** + * Array of indices to vectors in the dataset. + */ + int* indices_; + + /** + * The distance + */ + Distance distance_; + + /** + * Pooled memory allocator. + */ + PooledAllocator pool_; + + /** + * Memory occupied by the index. + */ + int memoryCounter_; +}; + +} + +#endif //OPENCV_FLANN_KMEANS_INDEX_H_ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/linear_index.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/linear_index.h new file mode 100644 index 0000000..0ea084a --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/linear_index.h @@ -0,0 +1,139 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_LINEAR_INDEX_H_ +#define OPENCV_FLANN_LINEAR_INDEX_H_ + +#include "general.h" +#include "nn_index.h" + +namespace cvflann +{ + +struct LinearIndexParams : public IndexParams +{ + LinearIndexParams() + { + (* this)["algorithm"] = FLANN_INDEX_LINEAR; + } +}; + +template +class LinearIndex : public NNIndex +{ +public: + + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + + LinearIndex(const Matrix& inputData, const IndexParams& params = LinearIndexParams(), + Distance d = Distance()) : + dataset_(inputData), index_params_(params), distance_(d) + { + } + + LinearIndex(const LinearIndex&); + LinearIndex& operator=(const LinearIndex&); + + flann_algorithm_t getType() const + { + return FLANN_INDEX_LINEAR; + } + + + size_t size() const + { + return dataset_.rows; + } + + size_t veclen() const + { + return dataset_.cols; + } + + + int usedMemory() const + { + return 0; + } + + /** + * Dummy implementation for other algorithms of addable indexes after that. + */ + void addIndex(const Matrix& /*wholeData*/, const Matrix& /*additionalData*/) + { + } + + void buildIndex() + { + /* nothing to do here for linear search */ + } + + void saveIndex(FILE*) + { + /* nothing to do here for linear search */ + } + + + void loadIndex(FILE*) + { + /* nothing to do here for linear search */ + + index_params_["algorithm"] = getType(); + } + + void findNeighbors(ResultSet& resultSet, const ElementType* vec, const SearchParams& /*searchParams*/) + { + ElementType* data = dataset_.data; + for (size_t i = 0; i < dataset_.rows; ++i, data += dataset_.cols) { + DistanceType dist = distance_(data, vec, dataset_.cols); + resultSet.addPoint(dist, (int)i); + } + } + + IndexParams getParameters() const + { + return index_params_; + } + +private: + /** The dataset */ + const Matrix dataset_; + /** Index parameters */ + IndexParams index_params_; + /** Index distance */ + Distance distance_; + +}; + +} + +#endif // OPENCV_FLANN_LINEAR_INDEX_H_ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/logger.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/logger.h new file mode 100644 index 0000000..24f3fb6 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/logger.h @@ -0,0 +1,130 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_LOGGER_H +#define OPENCV_FLANN_LOGGER_H + +#include +#include + +#include "defines.h" + + +namespace cvflann +{ + +class Logger +{ + Logger() : stream(stdout), logLevel(FLANN_LOG_WARN) {} + + ~Logger() + { + if ((stream!=NULL)&&(stream!=stdout)) { + fclose(stream); + } + } + + static Logger& instance() + { + static Logger logger; + return logger; + } + + void _setDestination(const char* name) + { + if (name==NULL) { + stream = stdout; + } + else { + stream = fopen(name,"w"); + if (stream == NULL) { + stream = stdout; + } + } + } + + int _log(int level, const char* fmt, va_list arglist) + { + if (level > logLevel ) return -1; + int ret = vfprintf(stream, fmt, arglist); + return ret; + } + +public: + /** + * Sets the logging level. All messages with lower priority will be ignored. + * @param level Logging level + */ + static void setLevel(int level) { instance().logLevel = level; } + + /** + * Sets the logging destination + * @param name Filename or NULL for console + */ + static void setDestination(const char* name) { instance()._setDestination(name); } + + /** + * Print log message + * @param level Log level + * @param fmt Message format + * @return + */ + static int log(int level, const char* fmt, ...) + { + va_list arglist; + va_start(arglist, fmt); + int ret = instance()._log(level,fmt,arglist); + va_end(arglist); + return ret; + } + +#define LOG_METHOD(NAME,LEVEL) \ + static int NAME(const char* fmt, ...) \ + { \ + va_list ap; \ + va_start(ap, fmt); \ + int ret = instance()._log(LEVEL, fmt, ap); \ + va_end(ap); \ + return ret; \ + } + + LOG_METHOD(fatal, FLANN_LOG_FATAL) + LOG_METHOD(error, FLANN_LOG_ERROR) + LOG_METHOD(warn, FLANN_LOG_WARN) + LOG_METHOD(info, FLANN_LOG_INFO) + +private: + FILE* stream; + int logLevel; +}; + +} + +#endif //OPENCV_FLANN_LOGGER_H diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/lsh_index.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/lsh_index.h new file mode 100644 index 0000000..2b89337 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/lsh_index.h @@ -0,0 +1,420 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +/*********************************************************************** + * Author: Vincent Rabaud + *************************************************************************/ + +#ifndef OPENCV_FLANN_LSH_INDEX_H_ +#define OPENCV_FLANN_LSH_INDEX_H_ + +#include +#include +#include +#include +#include + +#include "general.h" +#include "nn_index.h" +#include "matrix.h" +#include "result_set.h" +#include "heap.h" +#include "lsh_table.h" +#include "allocator.h" +#include "random.h" +#include "saving.h" + +namespace cvflann +{ + +struct LshIndexParams : public IndexParams +{ + LshIndexParams(unsigned int table_number = 12, unsigned int key_size = 20, unsigned int multi_probe_level = 2) + { + (* this)["algorithm"] = FLANN_INDEX_LSH; + // The number of hash tables to use + (*this)["table_number"] = table_number; + // The length of the key in the hash tables + (*this)["key_size"] = key_size; + // Number of levels to use in multi-probe (0 for standard LSH) + (*this)["multi_probe_level"] = multi_probe_level; + } +}; + +/** + * Randomized kd-tree index + * + * Contains the k-d trees and other information for indexing a set of points + * for nearest-neighbor matching. + */ +template +class LshIndex : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + /** Constructor + * @param input_data dataset with the input features + * @param params parameters passed to the LSH algorithm + * @param d the distance used + */ + LshIndex(const Matrix& input_data, const IndexParams& params = LshIndexParams(), + Distance d = Distance()) : + dataset_(input_data), index_params_(params), distance_(d) + { + // cv::flann::IndexParams sets integer params as 'int', so it is used with get_param + // in place of 'unsigned int' + table_number_ = (unsigned int)get_param(index_params_,"table_number",12); + key_size_ = (unsigned int)get_param(index_params_,"key_size",20); + multi_probe_level_ = (unsigned int)get_param(index_params_,"multi_probe_level",2); + + feature_size_ = (unsigned)dataset_.cols; + fill_xor_mask(0, key_size_, multi_probe_level_, xor_masks_); + } + + + LshIndex(const LshIndex&); + LshIndex& operator=(const LshIndex&); + + /** + * Implementation for the LSH addable indexes after that. + * @param wholeData whole dataset with the input features + * @param additionalData additional dataset with the input features + */ + void addIndex(const Matrix& wholeData, const Matrix& additionalData) + { + tables_.resize(table_number_); + for (unsigned int i = 0; i < table_number_; ++i) { + lsh::LshTable& table = tables_[i]; + // Add the features to the table with indexed offset + table.add((int)(wholeData.rows - additionalData.rows), additionalData); + } + dataset_ = wholeData; + } + + /** + * Builds the index + */ + void buildIndex() + { + std::vector indices(feature_size_ * CHAR_BIT); + + tables_.resize(table_number_); + for (unsigned int i = 0; i < table_number_; ++i) { + + //re-initialize the random indices table that the LshTable will use to pick its sub-dimensions + if( (indices.size() == feature_size_ * CHAR_BIT) || (indices.size() < key_size_) ) + { + indices.resize( feature_size_ * CHAR_BIT ); + for (size_t j = 0; j < feature_size_ * CHAR_BIT; ++j) + indices[j] = j; + std::random_shuffle(indices.begin(), indices.end()); + } + + lsh::LshTable& table = tables_[i]; + table = lsh::LshTable(feature_size_, key_size_, indices); + + // Add the features to the table with offset 0 + table.add(0, dataset_); + } + } + + flann_algorithm_t getType() const + { + return FLANN_INDEX_LSH; + } + + + void saveIndex(FILE* stream) + { + save_value(stream,table_number_); + save_value(stream,key_size_); + save_value(stream,multi_probe_level_); + save_value(stream, dataset_); + } + + void loadIndex(FILE* stream) + { + load_value(stream, table_number_); + load_value(stream, key_size_); + load_value(stream, multi_probe_level_); + load_value(stream, dataset_); + // Building the index is so fast we can afford not storing it + buildIndex(); + + index_params_["algorithm"] = getType(); + index_params_["table_number"] = table_number_; + index_params_["key_size"] = key_size_; + index_params_["multi_probe_level"] = multi_probe_level_; + } + + /** + * Returns size of index. + */ + size_t size() const + { + return dataset_.rows; + } + + /** + * Returns the length of an index feature. + */ + size_t veclen() const + { + return feature_size_; + } + + /** + * Computes the index memory usage + * Returns: memory used by the index + */ + int usedMemory() const + { + return (int)(dataset_.rows * sizeof(int)); + } + + + IndexParams getParameters() const + { + return index_params_; + } + + /** + * \brief Perform k-nearest neighbor search + * \param[in] queries The query points for which to find the nearest neighbors + * \param[out] indices The indices of the nearest neighbors found + * \param[out] dists Distances to the nearest neighbors found + * \param[in] knn Number of nearest neighbors to return + * \param[in] params Search parameters + */ + virtual void knnSearch(const Matrix& queries, Matrix& indices, Matrix& dists, int knn, const SearchParams& params) + { + assert(queries.cols == veclen()); + assert(indices.rows >= queries.rows); + assert(dists.rows >= queries.rows); + assert(int(indices.cols) >= knn); + assert(int(dists.cols) >= knn); + + + KNNUniqueResultSet resultSet(knn); + for (size_t i = 0; i < queries.rows; i++) { + resultSet.clear(); + std::fill_n(indices[i], knn, -1); + std::fill_n(dists[i], knn, std::numeric_limits::max()); + findNeighbors(resultSet, queries[i], params); + if (get_param(params,"sorted",true)) resultSet.sortAndCopy(indices[i], dists[i], knn); + else resultSet.copy(indices[i], dists[i], knn); + } + } + + + /** + * Find set of nearest neighbors to vec. Their indices are stored inside + * the result object. + * + * Params: + * result = the result object in which the indices of the nearest-neighbors are stored + * vec = the vector for which to search the nearest neighbors + * maxCheck = the maximum number of restarts (in a best-bin-first manner) + */ + void findNeighbors(ResultSet& result, const ElementType* vec, const SearchParams& /*searchParams*/) + { + getNeighbors(vec, result); + } + +private: + /** Defines the comparator on score and index + */ + typedef std::pair ScoreIndexPair; + struct SortScoreIndexPairOnSecond + { + bool operator()(const ScoreIndexPair& left, const ScoreIndexPair& right) const + { + return left.second < right.second; + } + }; + + /** Fills the different xor masks to use when getting the neighbors in multi-probe LSH + * @param key the key we build neighbors from + * @param lowest_index the lowest index of the bit set + * @param level the multi-probe level we are at + * @param xor_masks all the xor mask + */ + void fill_xor_mask(lsh::BucketKey key, int lowest_index, unsigned int level, + std::vector& xor_masks) + { + xor_masks.push_back(key); + if (level == 0) return; + for (int index = lowest_index - 1; index >= 0; --index) { + // Create a new key + lsh::BucketKey new_key = key | (1 << index); + fill_xor_mask(new_key, index, level - 1, xor_masks); + } + } + + /** Performs the approximate nearest-neighbor search. + * @param vec the feature to analyze + * @param do_radius flag indicating if we check the radius too + * @param radius the radius if it is a radius search + * @param do_k flag indicating if we limit the number of nn + * @param k_nn the number of nearest neighbors + * @param checked_average used for debugging + */ + void getNeighbors(const ElementType* vec, bool /*do_radius*/, float radius, bool do_k, unsigned int k_nn, + float& /*checked_average*/) + { + static std::vector score_index_heap; + + if (do_k) { + unsigned int worst_score = std::numeric_limits::max(); + typename std::vector >::const_iterator table = tables_.begin(); + typename std::vector >::const_iterator table_end = tables_.end(); + for (; table != table_end; ++table) { + size_t key = table->getKey(vec); + std::vector::const_iterator xor_mask = xor_masks_.begin(); + std::vector::const_iterator xor_mask_end = xor_masks_.end(); + for (; xor_mask != xor_mask_end; ++xor_mask) { + size_t sub_key = key ^ (*xor_mask); + const lsh::Bucket* bucket = table->getBucketFromKey(sub_key); + if (bucket == 0) continue; + + // Go over each descriptor index + std::vector::const_iterator training_index = bucket->begin(); + std::vector::const_iterator last_training_index = bucket->end(); + DistanceType hamming_distance; + + // Process the rest of the candidates + for (; training_index < last_training_index; ++training_index) { + hamming_distance = distance_(vec, dataset_[*training_index], dataset_.cols); + + if (hamming_distance < worst_score) { + // Insert the new element + score_index_heap.push_back(ScoreIndexPair(hamming_distance, training_index)); + std::push_heap(score_index_heap.begin(), score_index_heap.end()); + + if (score_index_heap.size() > (unsigned int)k_nn) { + // Remove the highest distance value as we have too many elements + std::pop_heap(score_index_heap.begin(), score_index_heap.end()); + score_index_heap.pop_back(); + // Keep track of the worst score + worst_score = score_index_heap.front().first; + } + } + } + } + } + } + else { + typename std::vector >::const_iterator table = tables_.begin(); + typename std::vector >::const_iterator table_end = tables_.end(); + for (; table != table_end; ++table) { + size_t key = table->getKey(vec); + std::vector::const_iterator xor_mask = xor_masks_.begin(); + std::vector::const_iterator xor_mask_end = xor_masks_.end(); + for (; xor_mask != xor_mask_end; ++xor_mask) { + size_t sub_key = key ^ (*xor_mask); + const lsh::Bucket* bucket = table->getBucketFromKey(sub_key); + if (bucket == 0) continue; + + // Go over each descriptor index + std::vector::const_iterator training_index = bucket->begin(); + std::vector::const_iterator last_training_index = bucket->end(); + DistanceType hamming_distance; + + // Process the rest of the candidates + for (; training_index < last_training_index; ++training_index) { + // Compute the Hamming distance + hamming_distance = distance_(vec, dataset_[*training_index], dataset_.cols); + if (hamming_distance < radius) score_index_heap.push_back(ScoreIndexPair(hamming_distance, training_index)); + } + } + } + } + } + + /** Performs the approximate nearest-neighbor search. + * This is a slower version than the above as it uses the ResultSet + * @param vec the feature to analyze + */ + void getNeighbors(const ElementType* vec, ResultSet& result) + { + typename std::vector >::const_iterator table = tables_.begin(); + typename std::vector >::const_iterator table_end = tables_.end(); + for (; table != table_end; ++table) { + size_t key = table->getKey(vec); + std::vector::const_iterator xor_mask = xor_masks_.begin(); + std::vector::const_iterator xor_mask_end = xor_masks_.end(); + for (; xor_mask != xor_mask_end; ++xor_mask) { + size_t sub_key = key ^ (*xor_mask); + const lsh::Bucket* bucket = table->getBucketFromKey((lsh::BucketKey)sub_key); + if (bucket == 0) continue; + + // Go over each descriptor index + std::vector::const_iterator training_index = bucket->begin(); + std::vector::const_iterator last_training_index = bucket->end(); + DistanceType hamming_distance; + + // Process the rest of the candidates + for (; training_index < last_training_index; ++training_index) { + // Compute the Hamming distance + hamming_distance = distance_(vec, dataset_[*training_index], (int)dataset_.cols); + result.addPoint(hamming_distance, *training_index); + } + } + } + } + + /** The different hash tables */ + std::vector > tables_; + + /** The data the LSH tables where built from */ + Matrix dataset_; + + /** The size of the features (as ElementType[]) */ + unsigned int feature_size_; + + IndexParams index_params_; + + /** table number */ + unsigned int table_number_; + /** key size */ + unsigned int key_size_; + /** How far should we look for neighbors in multi-probe LSH */ + unsigned int multi_probe_level_; + + /** The XOR masks to apply to a key to get the neighboring buckets */ + std::vector xor_masks_; + + Distance distance_; +}; +} + +#endif //OPENCV_FLANN_LSH_INDEX_H_ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/lsh_table.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/lsh_table.h new file mode 100644 index 0000000..cef01b2 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/lsh_table.h @@ -0,0 +1,497 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +/*********************************************************************** + * Author: Vincent Rabaud + *************************************************************************/ + +#ifndef OPENCV_FLANN_LSH_TABLE_H_ +#define OPENCV_FLANN_LSH_TABLE_H_ + +#include +#include +#include +#include +// TODO as soon as we use C++0x, use the code in USE_UNORDERED_MAP +#ifdef __GXX_EXPERIMENTAL_CXX0X__ +# define USE_UNORDERED_MAP 1 +#else +# define USE_UNORDERED_MAP 0 +#endif +#if USE_UNORDERED_MAP +#include +#else +#include +#endif +#include +#include + +#include "dynamic_bitset.h" +#include "matrix.h" + +namespace cvflann +{ + +namespace lsh +{ + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** What is stored in an LSH bucket + */ +typedef uint32_t FeatureIndex; +/** The id from which we can get a bucket back in an LSH table + */ +typedef unsigned int BucketKey; + +/** A bucket in an LSH table + */ +typedef std::vector Bucket; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** POD for stats about an LSH table + */ +struct LshStats +{ + std::vector bucket_sizes_; + size_t n_buckets_; + size_t bucket_size_mean_; + size_t bucket_size_median_; + size_t bucket_size_min_; + size_t bucket_size_max_; + size_t bucket_size_std_dev; + /** Each contained vector contains three value: beginning/end for interval, number of elements in the bin + */ + std::vector > size_histogram_; +}; + +/** Overload the << operator for LshStats + * @param out the streams + * @param stats the stats to display + * @return the streams + */ +inline std::ostream& operator <<(std::ostream& out, const LshStats& stats) +{ + int w = 20; + out << "Lsh Table Stats:\n" << std::setw(w) << std::setiosflags(std::ios::right) << "N buckets : " + << stats.n_buckets_ << "\n" << std::setw(w) << std::setiosflags(std::ios::right) << "mean size : " + << std::setiosflags(std::ios::left) << stats.bucket_size_mean_ << "\n" << std::setw(w) + << std::setiosflags(std::ios::right) << "median size : " << stats.bucket_size_median_ << "\n" << std::setw(w) + << std::setiosflags(std::ios::right) << "min size : " << std::setiosflags(std::ios::left) + << stats.bucket_size_min_ << "\n" << std::setw(w) << std::setiosflags(std::ios::right) << "max size : " + << std::setiosflags(std::ios::left) << stats.bucket_size_max_; + + // Display the histogram + out << std::endl << std::setw(w) << std::setiosflags(std::ios::right) << "histogram : " + << std::setiosflags(std::ios::left); + for (std::vector >::const_iterator iterator = stats.size_histogram_.begin(), end = + stats.size_histogram_.end(); iterator != end; ++iterator) out << (*iterator)[0] << "-" << (*iterator)[1] << ": " << (*iterator)[2] << ", "; + + return out; +} + + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** Lsh hash table. As its key is a sub-feature, and as usually + * the size of it is pretty small, we keep it as a continuous memory array. + * The value is an index in the corpus of features (we keep it as an unsigned + * int for pure memory reasons, it could be a size_t) + */ +template +class LshTable +{ +public: + /** A container of all the feature indices. Optimized for space + */ +#if USE_UNORDERED_MAP + typedef std::unordered_map BucketsSpace; +#else + typedef std::map BucketsSpace; +#endif + + /** A container of all the feature indices. Optimized for speed + */ + typedef std::vector BucketsSpeed; + + /** Default constructor + */ + LshTable() + { + } + + /** Default constructor + * Create the mask and allocate the memory + * @param feature_size is the size of the feature (considered as a ElementType[]) + * @param key_size is the number of bits that are turned on in the feature + * @param indices + */ + LshTable(unsigned int feature_size, unsigned int key_size, std::vector & indices) + { + (void)feature_size; + (void)key_size; + (void)indices; + std::cerr << "LSH is not implemented for that type" << std::endl; + assert(0); + } + + /** Add a feature to the table + * @param value the value to store for that feature + * @param feature the feature itself + */ + void add(unsigned int value, const ElementType* feature) + { + // Add the value to the corresponding bucket + BucketKey key = (lsh::BucketKey)getKey(feature); + + switch (speed_level_) { + case kArray: + // That means we get the buckets from an array + buckets_speed_[key].push_back(value); + break; + case kBitsetHash: + // That means we can check the bitset for the presence of a key + key_bitset_.set(key); + buckets_space_[key].push_back(value); + break; + case kHash: + { + // That means we have to check for the hash table for the presence of a key + buckets_space_[key].push_back(value); + break; + } + } + } + + /** Add a set of features to the table + * @param indexed_ofst previous indexed offset + * @param dataset the values to store + */ + void add(int indexed_ofst, Matrix dataset) + { +#if USE_UNORDERED_MAP + buckets_space_.rehash((buckets_space_.size() + dataset.rows) * 1.2); +#endif + // Add the features to the table + for (unsigned int i = 0; i < dataset.rows; ++i) add(i + indexed_ofst, dataset[i]); + // Now that the table is full, optimize it for speed/space + optimize(); + } + + /** Get a bucket given the key + * @param key + * @return + */ + inline const Bucket* getBucketFromKey(BucketKey key) const + { + // Generate other buckets + switch (speed_level_) { + case kArray: + // That means we get the buckets from an array + return &buckets_speed_[key]; + break; + case kBitsetHash: + // That means we can check the bitset for the presence of a key + if (key_bitset_.test(key)) return &buckets_space_.find(key)->second; + else return 0; + break; + case kHash: + { + // That means we have to check for the hash table for the presence of a key + BucketsSpace::const_iterator bucket_it, bucket_end = buckets_space_.end(); + bucket_it = buckets_space_.find(key); + // Stop here if that bucket does not exist + if (bucket_it == bucket_end) return 0; + else return &bucket_it->second; + break; + } + } + return 0; + } + + /** Compute the sub-signature of a feature + */ + size_t getKey(const ElementType* /*feature*/) const + { + std::cerr << "LSH is not implemented for that type" << std::endl; + assert(0); + return 1; + } + + /** Get statistics about the table + * @return + */ + LshStats getStats() const; + +private: + /** defines the speed fo the implementation + * kArray uses a vector for storing data + * kBitsetHash uses a hash map but checks for the validity of a key with a bitset + * kHash uses a hash map only + */ + enum SpeedLevel + { + kArray, kBitsetHash, kHash + }; + + /** Initialize some variables + */ + void initialize(size_t key_size) + { + const size_t key_size_lower_bound = 1; + //a value (size_t(1) << key_size) must fit the size_t type so key_size has to be strictly less than size of size_t + const size_t key_size_upper_bound = std::min(sizeof(BucketKey) * CHAR_BIT + 1, sizeof(size_t) * CHAR_BIT); + if (key_size < key_size_lower_bound || key_size >= key_size_upper_bound) + { + std::stringstream errorMessage; + errorMessage << "Invalid key_size (=" << key_size << "). Valid values for your system are " << key_size_lower_bound << " <= key_size < " << key_size_upper_bound << "."; + CV_Error(CV_StsBadArg, errorMessage.str()); + } + + speed_level_ = kHash; + key_size_ = (unsigned)key_size; + } + + /** Optimize the table for speed/space + */ + void optimize() + { + // If we are already using the fast storage, no need to do anything + if (speed_level_ == kArray) return; + + // Use an array if it will be more than half full + if (buckets_space_.size() > ((size_t(1) << key_size_) / 2)) { + speed_level_ = kArray; + // Fill the array version of it + buckets_speed_.resize(size_t(1) << key_size_); + for (BucketsSpace::const_iterator key_bucket = buckets_space_.begin(); key_bucket != buckets_space_.end(); ++key_bucket) buckets_speed_[key_bucket->first] = key_bucket->second; + + // Empty the hash table + buckets_space_.clear(); + return; + } + + // If the bitset is going to use less than 10% of the RAM of the hash map (at least 1 size_t for the key and two + // for the vector) or less than 512MB (key_size_ <= 30) + if (((std::max(buckets_space_.size(), buckets_speed_.size()) * CHAR_BIT * 3 * sizeof(BucketKey)) / 10 + >= (size_t(1) << key_size_)) || (key_size_ <= 32)) { + speed_level_ = kBitsetHash; + key_bitset_.resize(size_t(1) << key_size_); + key_bitset_.reset(); + // Try with the BucketsSpace + for (BucketsSpace::const_iterator key_bucket = buckets_space_.begin(); key_bucket != buckets_space_.end(); ++key_bucket) key_bitset_.set(key_bucket->first); + } + else { + speed_level_ = kHash; + key_bitset_.clear(); + } + } + + /** The vector of all the buckets if they are held for speed + */ + BucketsSpeed buckets_speed_; + + /** The hash table of all the buckets in case we cannot use the speed version + */ + BucketsSpace buckets_space_; + + /** What is used to store the data */ + SpeedLevel speed_level_; + + /** If the subkey is small enough, it will keep track of which subkeys are set through that bitset + * That is just a speedup so that we don't look in the hash table (which can be mush slower that checking a bitset) + */ + DynamicBitset key_bitset_; + + /** The size of the sub-signature in bits + */ + unsigned int key_size_; + + // Members only used for the unsigned char specialization + /** The mask to apply to a feature to get the hash key + * Only used in the unsigned char case + */ + std::vector mask_; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// Specialization for unsigned char + +template<> +inline LshTable::LshTable( unsigned int feature_size, + unsigned int subsignature_size, + std::vector & indices ) +{ + initialize(subsignature_size); + // Allocate the mask + mask_ = std::vector((size_t)ceil((float)(feature_size * sizeof(char)) / (float)sizeof(size_t)), 0); + + // Generate a random set of order of subsignature_size_ bits + for (unsigned int i = 0; i < key_size_; ++i) { + //Ensure the Nth bit will be selected only once among the different LshTables + //to avoid having two different tables with signatures sharing many dimensions/many bits + size_t index = indices[0]; + indices.erase( indices.begin() ); + + // Set that bit in the mask + size_t divisor = CHAR_BIT * sizeof(size_t); + size_t idx = index / divisor; //pick the right size_t index + mask_[idx] |= size_t(1) << (index % divisor); //use modulo to find the bit offset + } + + // Set to 1 if you want to display the mask for debug +#if 0 + { + size_t bcount = 0; + BOOST_FOREACH(size_t mask_block, mask_){ + out << std::setw(sizeof(size_t) * CHAR_BIT / 4) << std::setfill('0') << std::hex << mask_block + << std::endl; + bcount += __builtin_popcountll(mask_block); + } + out << "bit count : " << std::dec << bcount << std::endl; + out << "mask size : " << mask_.size() << std::endl; + return out; + } +#endif +} + +/** Return the Subsignature of a feature + * @param feature the feature to analyze + */ +template<> +inline size_t LshTable::getKey(const unsigned char* feature) const +{ + // no need to check if T is dividable by sizeof(size_t) like in the Hamming + // distance computation as we have a mask + const size_t* feature_block_ptr = reinterpret_cast ((const void*)feature); + + // Figure out the subsignature of the feature + // Given the feature ABCDEF, and the mask 001011, the output will be + // 000CEF + size_t subsignature = 0; + size_t bit_index = 1; + + for (std::vector::const_iterator pmask_block = mask_.begin(); pmask_block != mask_.end(); ++pmask_block) { + // get the mask and signature blocks + size_t feature_block = *feature_block_ptr; + size_t mask_block = *pmask_block; + while (mask_block) { + // Get the lowest set bit in the mask block + size_t lowest_bit = mask_block & (-(ptrdiff_t)mask_block); + // Add it to the current subsignature if necessary + subsignature += (feature_block & lowest_bit) ? bit_index : 0; + // Reset the bit in the mask block + mask_block ^= lowest_bit; + // increment the bit index for the subsignature + bit_index <<= 1; + } + // Check the next feature block + ++feature_block_ptr; + } + return subsignature; +} + +template<> +inline LshStats LshTable::getStats() const +{ + LshStats stats; + stats.bucket_size_mean_ = 0; + if ((buckets_speed_.empty()) && (buckets_space_.empty())) { + stats.n_buckets_ = 0; + stats.bucket_size_median_ = 0; + stats.bucket_size_min_ = 0; + stats.bucket_size_max_ = 0; + return stats; + } + + if (!buckets_speed_.empty()) { + for (BucketsSpeed::const_iterator pbucket = buckets_speed_.begin(); pbucket != buckets_speed_.end(); ++pbucket) { + stats.bucket_sizes_.push_back((lsh::FeatureIndex)pbucket->size()); + stats.bucket_size_mean_ += pbucket->size(); + } + stats.bucket_size_mean_ /= buckets_speed_.size(); + stats.n_buckets_ = buckets_speed_.size(); + } + else { + for (BucketsSpace::const_iterator x = buckets_space_.begin(); x != buckets_space_.end(); ++x) { + stats.bucket_sizes_.push_back((lsh::FeatureIndex)x->second.size()); + stats.bucket_size_mean_ += x->second.size(); + } + stats.bucket_size_mean_ /= buckets_space_.size(); + stats.n_buckets_ = buckets_space_.size(); + } + + std::sort(stats.bucket_sizes_.begin(), stats.bucket_sizes_.end()); + + // BOOST_FOREACH(int size, stats.bucket_sizes_) + // std::cout << size << " "; + // std::cout << std::endl; + stats.bucket_size_median_ = stats.bucket_sizes_[stats.bucket_sizes_.size() / 2]; + stats.bucket_size_min_ = stats.bucket_sizes_.front(); + stats.bucket_size_max_ = stats.bucket_sizes_.back(); + + // TODO compute mean and std + /*float mean, stddev; + stats.bucket_size_mean_ = mean; + stats.bucket_size_std_dev = stddev;*/ + + // Include a histogram of the buckets + unsigned int bin_start = 0; + unsigned int bin_end = 20; + bool is_new_bin = true; + for (std::vector::iterator iterator = stats.bucket_sizes_.begin(), end = stats.bucket_sizes_.end(); iterator + != end; ) + if (*iterator < bin_end) { + if (is_new_bin) { + stats.size_histogram_.push_back(std::vector(3, 0)); + stats.size_histogram_.back()[0] = bin_start; + stats.size_histogram_.back()[1] = bin_end - 1; + is_new_bin = false; + } + ++stats.size_histogram_.back()[2]; + ++iterator; + } + else { + bin_start += 20; + bin_end += 20; + is_new_bin = true; + } + + return stats; +} + +// End the two namespaces +} +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +#endif /* OPENCV_FLANN_LSH_TABLE_H_ */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/matrix.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/matrix.h new file mode 100644 index 0000000..51b6c63 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/matrix.h @@ -0,0 +1,116 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_DATASET_H_ +#define OPENCV_FLANN_DATASET_H_ + +#include + +#include "general.h" + +namespace cvflann +{ + +/** + * Class that implements a simple rectangular matrix stored in a memory buffer and + * provides convenient matrix-like access using the [] operators. + */ +template +class Matrix +{ +public: + typedef T type; + + size_t rows; + size_t cols; + size_t stride; + T* data; + + Matrix() : rows(0), cols(0), stride(0), data(NULL) + { + } + + Matrix(T* data_, size_t rows_, size_t cols_, size_t stride_ = 0) : + rows(rows_), cols(cols_), stride(stride_), data(data_) + { + if (stride==0) stride = cols; + } + + /** + * Convenience function for deallocating the storage data. + */ + FLANN_DEPRECATED void free() + { + fprintf(stderr, "The cvflann::Matrix::free() method is deprecated " + "and it does not do any memory deallocation any more. You are" + "responsible for deallocating the matrix memory (by doing" + "'delete[] matrix.data' for example)"); + } + + /** + * Operator that return a (pointer to a) row of the data. + */ + T* operator[](size_t index) const + { + return data+index*stride; + } +}; + + +class UntypedMatrix +{ +public: + size_t rows; + size_t cols; + void* data; + flann_datatype_t type; + + UntypedMatrix(void* data_, long rows_, long cols_) : + rows(rows_), cols(cols_), data(data_) + { + } + + ~UntypedMatrix() + { + } + + + template + Matrix as() + { + return Matrix((T*)data, rows, cols); + } +}; + + + +} + +#endif //OPENCV_FLANN_DATASET_H_ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/miniflann.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/miniflann.hpp new file mode 100644 index 0000000..121f8d0 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/miniflann.hpp @@ -0,0 +1,163 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef _OPENCV_MINIFLANN_HPP_ +#define _OPENCV_MINIFLANN_HPP_ + +#ifdef __cplusplus + +#include "opencv2/core/core.hpp" +#include "opencv2/flann/defines.h" + +namespace cv +{ + +namespace flann +{ + +struct CV_EXPORTS IndexParams +{ + IndexParams(); + ~IndexParams(); + + std::string getString(const std::string& key, const std::string& defaultVal=std::string()) const; + int getInt(const std::string& key, int defaultVal=-1) const; + double getDouble(const std::string& key, double defaultVal=-1) const; + + void setString(const std::string& key, const std::string& value); + void setInt(const std::string& key, int value); + void setDouble(const std::string& key, double value); + void setFloat(const std::string& key, float value); + void setBool(const std::string& key, bool value); + void setAlgorithm(int value); + + void getAll(std::vector& names, + std::vector& types, + std::vector& strValues, + std::vector& numValues) const; + + void* params; +}; + +struct CV_EXPORTS KDTreeIndexParams : public IndexParams +{ + KDTreeIndexParams(int trees=4); +}; + +struct CV_EXPORTS LinearIndexParams : public IndexParams +{ + LinearIndexParams(); +}; + +struct CV_EXPORTS CompositeIndexParams : public IndexParams +{ + CompositeIndexParams(int trees = 4, int branching = 32, int iterations = 11, + cvflann::flann_centers_init_t centers_init = cvflann::FLANN_CENTERS_RANDOM, float cb_index = 0.2f ); +}; + +struct CV_EXPORTS AutotunedIndexParams : public IndexParams +{ + AutotunedIndexParams(float target_precision = 0.8f, float build_weight = 0.01f, + float memory_weight = 0, float sample_fraction = 0.1f); +}; + +struct CV_EXPORTS HierarchicalClusteringIndexParams : public IndexParams +{ + HierarchicalClusteringIndexParams(int branching = 32, + cvflann::flann_centers_init_t centers_init = cvflann::FLANN_CENTERS_RANDOM, int trees = 4, int leaf_size = 100 ); +}; + +struct CV_EXPORTS KMeansIndexParams : public IndexParams +{ + KMeansIndexParams(int branching = 32, int iterations = 11, + cvflann::flann_centers_init_t centers_init = cvflann::FLANN_CENTERS_RANDOM, float cb_index = 0.2f ); +}; + +struct CV_EXPORTS LshIndexParams : public IndexParams +{ + LshIndexParams(int table_number, int key_size, int multi_probe_level); +}; + +struct CV_EXPORTS SavedIndexParams : public IndexParams +{ + SavedIndexParams(const std::string& filename); +}; + +struct CV_EXPORTS SearchParams : public IndexParams +{ + SearchParams( int checks = 32, float eps = 0, bool sorted = true ); +}; + +class CV_EXPORTS_W Index +{ +public: + CV_WRAP Index(); + CV_WRAP Index(InputArray features, const IndexParams& params, cvflann::flann_distance_t distType=cvflann::FLANN_DIST_L2); + virtual ~Index(); + + CV_WRAP virtual void build(InputArray wholefeatures, InputArray additionalfeatures, const IndexParams& params, cvflann::flann_distance_t distType=cvflann::FLANN_DIST_L2); + + CV_WRAP virtual void knnSearch(InputArray query, OutputArray indices, + OutputArray dists, int knn, const SearchParams& params=SearchParams()); + + CV_WRAP virtual int radiusSearch(InputArray query, OutputArray indices, + OutputArray dists, double radius, int maxResults, + const SearchParams& params=SearchParams()); + + CV_WRAP virtual void save(const std::string& filename) const; + CV_WRAP virtual bool load(InputArray features, const std::string& filename); + CV_WRAP virtual void release(); + CV_WRAP cvflann::flann_distance_t getDistance() const; + CV_WRAP cvflann::flann_algorithm_t getAlgorithm() const; + +protected: + cvflann::flann_distance_t distType; + cvflann::flann_algorithm_t algo; + int featureType; + void* index; +}; + +} } // namespace cv::flann + +#endif // __cplusplus + +#endif diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/nn_index.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/nn_index.h new file mode 100644 index 0000000..4a874f5 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/nn_index.h @@ -0,0 +1,184 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_NNINDEX_H +#define OPENCV_FLANN_NNINDEX_H + +#include + +#include "general.h" +#include "matrix.h" +#include "result_set.h" +#include "params.h" + +namespace cvflann +{ + +/** + * Nearest-neighbour index base class + */ +template +class NNIndex +{ + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + +public: + + virtual ~NNIndex() {} + + /** + * \brief Builds the index + */ + virtual void buildIndex() = 0; + + /** + * \brief implementation for algorithms of addable indexes after that. + */ + virtual void addIndex(const Matrix& wholeData, const Matrix& additionalData) = 0; + + /** + * \brief Perform k-nearest neighbor search + * \param[in] queries The query points for which to find the nearest neighbors + * \param[out] indices The indices of the nearest neighbors found + * \param[out] dists Distances to the nearest neighbors found + * \param[in] knn Number of nearest neighbors to return + * \param[in] params Search parameters + */ + virtual void knnSearch(const Matrix& queries, Matrix& indices, Matrix& dists, int knn, const SearchParams& params) + { + assert(queries.cols == veclen()); + assert(indices.rows >= queries.rows); + assert(dists.rows >= queries.rows); + assert(int(indices.cols) >= knn); + assert(int(dists.cols) >= knn); + +#if 0 + KNNResultSet resultSet(knn); + for (size_t i = 0; i < queries.rows; i++) { + resultSet.init(indices[i], dists[i]); + findNeighbors(resultSet, queries[i], params); + } +#else + KNNUniqueResultSet resultSet(knn); + for (size_t i = 0; i < queries.rows; i++) { + resultSet.clear(); + findNeighbors(resultSet, queries[i], params); + if (get_param(params,"sorted",true)) resultSet.sortAndCopy(indices[i], dists[i], knn); + else resultSet.copy(indices[i], dists[i], knn); + } +#endif + } + + /** + * \brief Perform radius search + * \param[in] query The query point + * \param[out] indices The indinces of the neighbors found within the given radius + * \param[out] dists The distances to the nearest neighbors found + * \param[in] radius The radius used for search + * \param[in] params Search parameters + * \returns Number of neighbors found + */ + virtual int radiusSearch(const Matrix& query, Matrix& indices, Matrix& dists, float radius, const SearchParams& params) + { + if (query.rows != 1) { + fprintf(stderr, "I can only search one feature at a time for range search\n"); + return -1; + } + assert(query.cols == veclen()); + assert(indices.cols == dists.cols); + + int n = 0; + int* indices_ptr = NULL; + DistanceType* dists_ptr = NULL; + if (indices.cols > 0) { + n = (int)indices.cols; + indices_ptr = indices[0]; + dists_ptr = dists[0]; + } + + RadiusUniqueResultSet resultSet((DistanceType)radius); + resultSet.clear(); + findNeighbors(resultSet, query[0], params); + if (n>0) { + if (get_param(params,"sorted",true)) resultSet.sortAndCopy(indices_ptr, dists_ptr, n); + else resultSet.copy(indices_ptr, dists_ptr, n); + } + + return (int)resultSet.size(); + } + + /** + * \brief Saves the index to a stream + * \param stream The stream to save the index to + */ + virtual void saveIndex(FILE* stream) = 0; + + /** + * \brief Loads the index from a stream + * \param stream The stream from which the index is loaded + */ + virtual void loadIndex(FILE* stream) = 0; + + /** + * \returns number of features in this index. + */ + virtual size_t size() const = 0; + + /** + * \returns The dimensionality of the features in this index. + */ + virtual size_t veclen() const = 0; + + /** + * \returns The amount of memory (in bytes) used by the index. + */ + virtual int usedMemory() const = 0; + + /** + * \returns The index type (kdtree, kmeans,...) + */ + virtual flann_algorithm_t getType() const = 0; + + /** + * \returns The index parameters + */ + virtual IndexParams getParameters() const = 0; + + + /** + * \brief Method that searches for nearest-neighbours + */ + virtual void findNeighbors(ResultSet& result, const ElementType* vec, const SearchParams& searchParams) = 0; +}; + +} + +#endif //OPENCV_FLANN_NNINDEX_H diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/object_factory.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/object_factory.h new file mode 100644 index 0000000..7f971c5 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/object_factory.h @@ -0,0 +1,91 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_OBJECT_FACTORY_H_ +#define OPENCV_FLANN_OBJECT_FACTORY_H_ + +#include + +namespace cvflann +{ + +class CreatorNotFound +{ +}; + +template +class ObjectFactory +{ + typedef ObjectFactory ThisClass; + typedef std::map ObjectRegistry; + + // singleton class, private constructor + ObjectFactory() {} + +public: + + bool subscribe(UniqueIdType id, ObjectCreator creator) + { + if (object_registry.find(id) != object_registry.end()) return false; + + object_registry[id] = creator; + return true; + } + + bool unregister(UniqueIdType id) + { + return object_registry.erase(id) == 1; + } + + ObjectCreator create(UniqueIdType id) + { + typename ObjectRegistry::const_iterator iter = object_registry.find(id); + + if (iter == object_registry.end()) { + throw CreatorNotFound(); + } + + return iter->second; + } + + static ThisClass& instance() + { + static ThisClass the_factory; + return the_factory; + } +private: + ObjectRegistry object_registry; +}; + +} + +#endif /* OPENCV_FLANN_OBJECT_FACTORY_H_ */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/params.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/params.h new file mode 100644 index 0000000..b40c39e --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/params.h @@ -0,0 +1,99 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2011 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2011 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + + +#ifndef OPENCV_FLANN_PARAMS_H_ +#define OPENCV_FLANN_PARAMS_H_ + +#include "any.h" +#include "general.h" +#include +#include + + +namespace cvflann +{ + +typedef std::map IndexParams; + +struct SearchParams : public IndexParams +{ + SearchParams(int checks = 32, float eps = 0, bool sorted = true ) + { + // how many leafs to visit when searching for neighbours (-1 for unlimited) + (*this)["checks"] = checks; + // search for eps-approximate neighbours (default: 0) + (*this)["eps"] = eps; + // only for radius search, require neighbours sorted by distance (default: true) + (*this)["sorted"] = sorted; + } +}; + + +template +T get_param(const IndexParams& params, std::string name, const T& default_value) +{ + IndexParams::const_iterator it = params.find(name); + if (it != params.end()) { + return it->second.cast(); + } + else { + return default_value; + } +} + +template +T get_param(const IndexParams& params, std::string name) +{ + IndexParams::const_iterator it = params.find(name); + if (it != params.end()) { + return it->second.cast(); + } + else { + throw FLANNException(std::string("Missing parameter '")+name+std::string("' in the parameters given")); + } +} + +inline void print_params(const IndexParams& params, std::ostream& stream) +{ + IndexParams::const_iterator it; + + for(it=params.begin(); it!=params.end(); ++it) { + stream << it->first << " : " << it->second << std::endl; + } +} + +inline void print_params(const IndexParams& params) +{ + print_params(params, std::cout); +} + +} + + +#endif /* OPENCV_FLANN_PARAMS_H_ */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/random.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/random.h new file mode 100644 index 0000000..a3cf5ec --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/random.h @@ -0,0 +1,133 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_RANDOM_H +#define OPENCV_FLANN_RANDOM_H + +#include +#include +#include + +#include "general.h" + +namespace cvflann +{ + +/** + * Seeds the random number generator + * @param seed Random seed + */ +inline void seed_random(unsigned int seed) +{ + srand(seed); +} + +/* + * Generates a random double value. + */ +/** + * Generates a random double value. + * @param high Upper limit + * @param low Lower limit + * @return Random double value + */ +inline double rand_double(double high = 1.0, double low = 0) +{ + return low + ((high-low) * (std::rand() / (RAND_MAX + 1.0))); +} + +/** + * Generates a random integer value. + * @param high Upper limit + * @param low Lower limit + * @return Random integer value + */ +inline int rand_int(int high = RAND_MAX, int low = 0) +{ + return low + (int) ( double(high-low) * (std::rand() / (RAND_MAX + 1.0))); +} + +/** + * Random number generator that returns a distinct number from + * the [0,n) interval each time. + */ +class UniqueRandom +{ + std::vector vals_; + int size_; + int counter_; + +public: + /** + * Constructor. + * @param n Size of the interval from which to generate + * @return + */ + UniqueRandom(int n) + { + init(n); + } + + /** + * Initializes the number generator. + * @param n the size of the interval from which to generate random numbers. + */ + void init(int n) + { + // create and initialize an array of size n + vals_.resize(n); + size_ = n; + for (int i = 0; i < size_; ++i) vals_[i] = i; + + // shuffle the elements in the array + std::random_shuffle(vals_.begin(), vals_.end()); + + counter_ = 0; + } + + /** + * Return a distinct random integer in greater or equal to 0 and less + * than 'n' on each call. It should be called maximum 'n' times. + * Returns: a random integer + */ + int next() + { + if (counter_ == size_) { + return -1; + } + else { + return vals_[counter_++]; + } + } +}; + +} + +#endif //OPENCV_FLANN_RANDOM_H diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/result_set.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/result_set.h new file mode 100644 index 0000000..9750019 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/result_set.h @@ -0,0 +1,543 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_RESULTSET_H +#define OPENCV_FLANN_RESULTSET_H + +#include +#include +#include +#include +#include +#include + +namespace cvflann +{ + +/* This record represents a branch point when finding neighbors in + the tree. It contains a record of the minimum distance to the query + point, as well as the node at which the search resumes. + */ + +template +struct BranchStruct +{ + T node; /* Tree node at which search resumes */ + DistanceType mindist; /* Minimum distance to query for all nodes below. */ + + BranchStruct() {} + BranchStruct(const T& aNode, DistanceType dist) : node(aNode), mindist(dist) {} + + bool operator<(const BranchStruct& rhs) const + { + return mindist +class ResultSet +{ +public: + virtual ~ResultSet() {} + + virtual bool full() const = 0; + + virtual void addPoint(DistanceType dist, int index) = 0; + + virtual DistanceType worstDist() const = 0; + +}; + +/** + * KNNSimpleResultSet does not ensure that the element it holds are unique. + * Is used in those cases where the nearest neighbour algorithm used does not + * attempt to insert the same element multiple times. + */ +template +class KNNSimpleResultSet : public ResultSet +{ + int* indices; + DistanceType* dists; + int capacity; + int count; + DistanceType worst_distance_; + +public: + KNNSimpleResultSet(int capacity_) : capacity(capacity_), count(0) + { + } + + void init(int* indices_, DistanceType* dists_) + { + indices = indices_; + dists = dists_; + count = 0; + worst_distance_ = (std::numeric_limits::max)(); + dists[capacity-1] = worst_distance_; + } + + size_t size() const + { + return count; + } + + bool full() const + { + return count == capacity; + } + + + void addPoint(DistanceType dist, int index) + { + if (dist >= worst_distance_) return; + int i; + for (i=count; i>0; --i) { +#ifdef FLANN_FIRST_MATCH + if ( (dists[i-1]>dist) || ((dist==dists[i-1])&&(indices[i-1]>index)) ) +#else + if (dists[i-1]>dist) +#endif + { + if (i +class KNNResultSet : public ResultSet +{ + int* indices; + DistanceType* dists; + int capacity; + int count; + DistanceType worst_distance_; + +public: + KNNResultSet(int capacity_) : capacity(capacity_), count(0) + { + } + + void init(int* indices_, DistanceType* dists_) + { + indices = indices_; + dists = dists_; + count = 0; + worst_distance_ = (std::numeric_limits::max)(); + dists[capacity-1] = worst_distance_; + } + + size_t size() const + { + return count; + } + + bool full() const + { + return count == capacity; + } + + + void addPoint(DistanceType dist, int index) + { + if (dist >= worst_distance_) return; + int i; + for (i = count; i > 0; --i) { +#ifdef FLANN_FIRST_MATCH + if ( (dists[i-1]<=dist) && ((dist!=dists[i-1])||(indices[i-1]<=index)) ) +#else + if (dists[i-1]<=dist) +#endif + { + // Check for duplicate indices + int j = i - 1; + while ((j >= 0) && (dists[j] == dist)) { + if (indices[j] == index) { + return; + } + --j; + } + break; + } + } + + if (count < capacity) ++count; + for (int j = count-1; j > i; --j) { + dists[j] = dists[j-1]; + indices[j] = indices[j-1]; + } + dists[i] = dist; + indices[i] = index; + worst_distance_ = dists[capacity-1]; + } + + DistanceType worstDist() const + { + return worst_distance_; + } +}; + + +/** + * A result-set class used when performing a radius based search. + */ +template +class RadiusResultSet : public ResultSet +{ + DistanceType radius; + int* indices; + DistanceType* dists; + size_t capacity; + size_t count; + +public: + RadiusResultSet(DistanceType radius_, int* indices_, DistanceType* dists_, int capacity_) : + radius(radius_), indices(indices_), dists(dists_), capacity(capacity_) + { + init(); + } + + ~RadiusResultSet() + { + } + + void init() + { + count = 0; + } + + size_t size() const + { + return count; + } + + bool full() const + { + return true; + } + + void addPoint(DistanceType dist, int index) + { + if (dist0)&&(count < capacity)) { + dists[count] = dist; + indices[count] = index; + } + count++; + } + } + + DistanceType worstDist() const + { + return radius; + } + +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** Class that holds the k NN neighbors + * Faster than KNNResultSet as it uses a binary heap and does not maintain two arrays + */ +template +class UniqueResultSet : public ResultSet +{ +public: + struct DistIndex + { + DistIndex(DistanceType dist, unsigned int index) : + dist_(dist), index_(index) + { + } + bool operator<(const DistIndex dist_index) const + { + return (dist_ < dist_index.dist_) || ((dist_ == dist_index.dist_) && index_ < dist_index.index_); + } + DistanceType dist_; + unsigned int index_; + }; + + /** Default cosntructor */ + UniqueResultSet() : + worst_distance_(std::numeric_limits::max()) + { + } + + /** Check the status of the set + * @return true if we have k NN + */ + inline bool full() const + { + return is_full_; + } + + /** Remove all elements in the set + */ + virtual void clear() = 0; + + /** Copy the set to two C arrays + * @param indices pointer to a C array of indices + * @param dist pointer to a C array of distances + * @param n_neighbors the number of neighbors to copy + */ + virtual void copy(int* indices, DistanceType* dist, int n_neighbors = -1) const + { + if (n_neighbors < 0) { + for (typename std::set::const_iterator dist_index = dist_indices_.begin(), dist_index_end = + dist_indices_.end(); dist_index != dist_index_end; ++dist_index, ++indices, ++dist) { + *indices = dist_index->index_; + *dist = dist_index->dist_; + } + } + else { + int i = 0; + for (typename std::set::const_iterator dist_index = dist_indices_.begin(), dist_index_end = + dist_indices_.end(); (dist_index != dist_index_end) && (i < n_neighbors); ++dist_index, ++indices, ++dist, ++i) { + *indices = dist_index->index_; + *dist = dist_index->dist_; + } + } + } + + /** Copy the set to two C arrays but sort it according to the distance first + * @param indices pointer to a C array of indices + * @param dist pointer to a C array of distances + * @param n_neighbors the number of neighbors to copy + */ + virtual void sortAndCopy(int* indices, DistanceType* dist, int n_neighbors = -1) const + { + copy(indices, dist, n_neighbors); + } + + /** The number of neighbors in the set + * @return + */ + size_t size() const + { + return dist_indices_.size(); + } + + /** The distance of the furthest neighbor + * If we don't have enough neighbors, it returns the max possible value + * @return + */ + inline DistanceType worstDist() const + { + return worst_distance_; + } +protected: + /** Flag to say if the set is full */ + bool is_full_; + + /** The worst distance found so far */ + DistanceType worst_distance_; + + /** The best candidates so far */ + std::set dist_indices_; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** Class that holds the k NN neighbors + * Faster than KNNResultSet as it uses a binary heap and does not maintain two arrays + */ +template +class KNNUniqueResultSet : public UniqueResultSet +{ +public: + /** Constructor + * @param capacity the number of neighbors to store at max + */ + KNNUniqueResultSet(unsigned int capacity) : capacity_(capacity) + { + this->is_full_ = false; + this->clear(); + } + + /** Add a possible candidate to the best neighbors + * @param dist distance for that neighbor + * @param index index of that neighbor + */ + inline void addPoint(DistanceType dist, int index) + { + // Don't do anything if we are worse than the worst + if (dist >= worst_distance_) return; + dist_indices_.insert(DistIndex(dist, index)); + + if (is_full_) { + if (dist_indices_.size() > capacity_) { + dist_indices_.erase(*dist_indices_.rbegin()); + worst_distance_ = dist_indices_.rbegin()->dist_; + } + } + else if (dist_indices_.size() == capacity_) { + is_full_ = true; + worst_distance_ = dist_indices_.rbegin()->dist_; + } + } + + /** Remove all elements in the set + */ + void clear() + { + dist_indices_.clear(); + worst_distance_ = std::numeric_limits::max(); + is_full_ = false; + } + +protected: + typedef typename UniqueResultSet::DistIndex DistIndex; + using UniqueResultSet::is_full_; + using UniqueResultSet::worst_distance_; + using UniqueResultSet::dist_indices_; + + /** The number of neighbors to keep */ + unsigned int capacity_; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** Class that holds the radius nearest neighbors + * It is more accurate than RadiusResult as it is not limited in the number of neighbors + */ +template +class RadiusUniqueResultSet : public UniqueResultSet +{ +public: + /** Constructor + * @param radius the maximum distance of a neighbor + */ + RadiusUniqueResultSet(DistanceType radius) : + radius_(radius) + { + is_full_ = true; + } + + /** Add a possible candidate to the best neighbors + * @param dist distance for that neighbor + * @param index index of that neighbor + */ + void addPoint(DistanceType dist, int index) + { + if (dist <= radius_) dist_indices_.insert(DistIndex(dist, index)); + } + + /** Remove all elements in the set + */ + inline void clear() + { + dist_indices_.clear(); + } + + + /** Check the status of the set + * @return alwys false + */ + inline bool full() const + { + return true; + } + + /** The distance of the furthest neighbor + * If we don't have enough neighbors, it returns the max possible value + * @return + */ + inline DistanceType worstDist() const + { + return radius_; + } +private: + typedef typename UniqueResultSet::DistIndex DistIndex; + using UniqueResultSet::dist_indices_; + using UniqueResultSet::is_full_; + + /** The furthest distance a neighbor can be */ + DistanceType radius_; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** Class that holds the k NN neighbors within a radius distance + */ +template +class KNNRadiusUniqueResultSet : public KNNUniqueResultSet +{ +public: + /** Constructor + * @param capacity the number of neighbors to store at max + * @param radius the maximum distance of a neighbor + */ + KNNRadiusUniqueResultSet(unsigned int capacity, DistanceType radius) + { + this->capacity_ = capacity; + this->radius_ = radius; + this->dist_indices_.reserve(capacity_); + this->clear(); + } + + /** Remove all elements in the set + */ + void clear() + { + dist_indices_.clear(); + worst_distance_ = radius_; + is_full_ = false; + } +private: + using KNNUniqueResultSet::dist_indices_; + using KNNUniqueResultSet::is_full_; + using KNNUniqueResultSet::worst_distance_; + + /** The maximum number of neighbors to consider */ + unsigned int capacity_; + + /** The maximum distance of a neighbor */ + DistanceType radius_; +}; +} + +#endif //OPENCV_FLANN_RESULTSET_H diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/sampling.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/sampling.h new file mode 100644 index 0000000..396f177 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/sampling.h @@ -0,0 +1,81 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + + +#ifndef OPENCV_FLANN_SAMPLING_H_ +#define OPENCV_FLANN_SAMPLING_H_ + +#include "matrix.h" +#include "random.h" + +namespace cvflann +{ + +template +Matrix random_sample(Matrix& srcMatrix, long size, bool remove = false) +{ + Matrix newSet(new T[size * srcMatrix.cols], size,srcMatrix.cols); + + T* src,* dest; + for (long i=0; i +Matrix random_sample(const Matrix& srcMatrix, size_t size) +{ + UniqueRandom rand((int)srcMatrix.rows); + Matrix newSet(new T[size * srcMatrix.cols], size,srcMatrix.cols); + + T* src,* dest; + for (size_t i=0; i +#include + +#include "general.h" +#include "nn_index.h" + +#ifdef FLANN_SIGNATURE_ +#undef FLANN_SIGNATURE_ +#endif +#define FLANN_SIGNATURE_ "FLANN_INDEX" + +namespace cvflann +{ + +template +struct Datatype {}; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_INT8; } }; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_INT16; } }; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_INT32; } }; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_UINT8; } }; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_UINT16; } }; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_UINT32; } }; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_FLOAT32; } }; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_FLOAT64; } }; + + +/** + * Structure representing the index header. + */ +struct IndexHeader +{ + char signature[16]; + char version[16]; + flann_datatype_t data_type; + flann_algorithm_t index_type; + size_t rows; + size_t cols; +}; + +/** + * Saves index header to stream + * + * @param stream - Stream to save to + * @param index - The index to save + */ +template +void save_header(FILE* stream, const NNIndex& index) +{ + IndexHeader header; + memset(header.signature, 0, sizeof(header.signature)); + strcpy(header.signature, FLANN_SIGNATURE_); + memset(header.version, 0, sizeof(header.version)); + strcpy(header.version, FLANN_VERSION_); + header.data_type = Datatype::type(); + header.index_type = index.getType(); + header.rows = index.size(); + header.cols = index.veclen(); + + std::fwrite(&header, sizeof(header),1,stream); +} + + +/** + * + * @param stream - Stream to load from + * @return Index header + */ +inline IndexHeader load_header(FILE* stream) +{ + IndexHeader header; + size_t read_size = fread(&header,sizeof(header),1,stream); + + if (read_size!=(size_t)1) { + throw FLANNException("Invalid index file, cannot read"); + } + + if (strcmp(header.signature,FLANN_SIGNATURE_)!=0) { + throw FLANNException("Invalid index file, wrong signature"); + } + + return header; + +} + + +template +void save_value(FILE* stream, const T& value, size_t count = 1) +{ + fwrite(&value, sizeof(value),count, stream); +} + +template +void save_value(FILE* stream, const cvflann::Matrix& value) +{ + fwrite(&value, sizeof(value),1, stream); + fwrite(value.data, sizeof(T),value.rows*value.cols, stream); +} + +template +void save_value(FILE* stream, const std::vector& value) +{ + size_t size = value.size(); + fwrite(&size, sizeof(size_t), 1, stream); + fwrite(&value[0], sizeof(T), size, stream); +} + +template +void load_value(FILE* stream, T& value, size_t count = 1) +{ + size_t read_cnt = fread(&value, sizeof(value), count, stream); + if (read_cnt != count) { + throw FLANNException("Cannot read from file"); + } +} + +template +void load_value(FILE* stream, cvflann::Matrix& value) +{ + size_t read_cnt = fread(&value, sizeof(value), 1, stream); + if (read_cnt != 1) { + throw FLANNException("Cannot read from file"); + } + value.data = new T[value.rows*value.cols]; + read_cnt = fread(value.data, sizeof(T), value.rows*value.cols, stream); + if (read_cnt != (size_t)(value.rows*value.cols)) { + throw FLANNException("Cannot read from file"); + } +} + + +template +void load_value(FILE* stream, std::vector& value) +{ + size_t size; + size_t read_cnt = fread(&size, sizeof(size_t), 1, stream); + if (read_cnt!=1) { + throw FLANNException("Cannot read from file"); + } + value.resize(size); + read_cnt = fread(&value[0], sizeof(T), size, stream); + if (read_cnt != size) { + throw FLANNException("Cannot read from file"); + } +} + +} + +#endif /* OPENCV_FLANN_SAVING_H_ */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/simplex_downhill.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/simplex_downhill.h new file mode 100644 index 0000000..145901a --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/simplex_downhill.h @@ -0,0 +1,186 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_SIMPLEX_DOWNHILL_H_ +#define OPENCV_FLANN_SIMPLEX_DOWNHILL_H_ + +namespace cvflann +{ + +/** + Adds val to array vals (and point to array points) and keeping the arrays sorted by vals. + */ +template +void addValue(int pos, float val, float* vals, T* point, T* points, int n) +{ + vals[pos] = val; + for (int i=0; i0 && vals[j] +float optimizeSimplexDownhill(T* points, int n, F func, float* vals = NULL ) +{ + const int MAX_ITERATIONS = 10; + + assert(n>0); + + T* p_o = new T[n]; + T* p_r = new T[n]; + T* p_e = new T[n]; + + int alpha = 1; + + int iterations = 0; + + bool ownVals = false; + if (vals == NULL) { + ownVals = true; + vals = new float[n+1]; + for (int i=0; i MAX_ITERATIONS) break; + + // compute average of simplex points (except the highest point) + for (int j=0; j=vals[0])&&(val_r=vals[n]) { + for (int i=0; i + + +namespace cvflann +{ + +/** + * A start-stop timer class. + * + * Can be used to time portions of code. + */ +class StartStopTimer +{ + clock_t startTime; + +public: + /** + * Value of the timer. + */ + double value; + + + /** + * Constructor. + */ + StartStopTimer() + { + reset(); + } + + /** + * Starts the timer. + */ + void start() + { + startTime = clock(); + } + + /** + * Stops the timer and updates timer value. + */ + void stop() + { + clock_t stopTime = clock(); + value += ( (double)stopTime - startTime) / CLOCKS_PER_SEC; + } + + /** + * Resets the timer value to 0. + */ + void reset() + { + value = 0; + } + +}; + +} + +#endif // FLANN_TIMER_H diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/block.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/block.hpp new file mode 100644 index 0000000..6cc00ae --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/block.hpp @@ -0,0 +1,203 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_DEVICE_BLOCK_HPP__ +#define __OPENCV_GPU_DEVICE_BLOCK_HPP__ + +namespace cv { namespace gpu { namespace device +{ + struct Block + { + static __device__ __forceinline__ unsigned int id() + { + return blockIdx.x; + } + + static __device__ __forceinline__ unsigned int stride() + { + return blockDim.x * blockDim.y * blockDim.z; + } + + static __device__ __forceinline__ void sync() + { + __syncthreads(); + } + + static __device__ __forceinline__ int flattenedThreadId() + { + return threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; + } + + template + static __device__ __forceinline__ void fill(It beg, It end, const T& value) + { + int STRIDE = stride(); + It t = beg + flattenedThreadId(); + + for(; t < end; t += STRIDE) + *t = value; + } + + template + static __device__ __forceinline__ void yota(OutIt beg, OutIt end, T value) + { + int STRIDE = stride(); + int tid = flattenedThreadId(); + value += tid; + + for(OutIt t = beg + tid; t < end; t += STRIDE, value += STRIDE) + *t = value; + } + + template + static __device__ __forceinline__ void copy(InIt beg, InIt end, OutIt out) + { + int STRIDE = stride(); + InIt t = beg + flattenedThreadId(); + OutIt o = out + (t - beg); + + for(; t < end; t += STRIDE, o += STRIDE) + *o = *t; + } + + template + static __device__ __forceinline__ void transfrom(InIt beg, InIt end, OutIt out, UnOp op) + { + int STRIDE = stride(); + InIt t = beg + flattenedThreadId(); + OutIt o = out + (t - beg); + + for(; t < end; t += STRIDE, o += STRIDE) + *o = op(*t); + } + + template + static __device__ __forceinline__ void transfrom(InIt1 beg1, InIt1 end1, InIt2 beg2, OutIt out, BinOp op) + { + int STRIDE = stride(); + InIt1 t1 = beg1 + flattenedThreadId(); + InIt2 t2 = beg2 + flattenedThreadId(); + OutIt o = out + (t1 - beg1); + + for(; t1 < end1; t1 += STRIDE, t2 += STRIDE, o += STRIDE) + *o = op(*t1, *t2); + } + + template + static __device__ __forceinline__ void reduce(volatile T* buffer, BinOp op) + { + int tid = flattenedThreadId(); + T val = buffer[tid]; + + if (CTA_SIZE >= 1024) { if (tid < 512) buffer[tid] = val = op(val, buffer[tid + 512]); __syncthreads(); } + if (CTA_SIZE >= 512) { if (tid < 256) buffer[tid] = val = op(val, buffer[tid + 256]); __syncthreads(); } + if (CTA_SIZE >= 256) { if (tid < 128) buffer[tid] = val = op(val, buffer[tid + 128]); __syncthreads(); } + if (CTA_SIZE >= 128) { if (tid < 64) buffer[tid] = val = op(val, buffer[tid + 64]); __syncthreads(); } + + if (tid < 32) + { + if (CTA_SIZE >= 64) { buffer[tid] = val = op(val, buffer[tid + 32]); } + if (CTA_SIZE >= 32) { buffer[tid] = val = op(val, buffer[tid + 16]); } + if (CTA_SIZE >= 16) { buffer[tid] = val = op(val, buffer[tid + 8]); } + if (CTA_SIZE >= 8) { buffer[tid] = val = op(val, buffer[tid + 4]); } + if (CTA_SIZE >= 4) { buffer[tid] = val = op(val, buffer[tid + 2]); } + if (CTA_SIZE >= 2) { buffer[tid] = val = op(val, buffer[tid + 1]); } + } + } + + template + static __device__ __forceinline__ T reduce(volatile T* buffer, T init, BinOp op) + { + int tid = flattenedThreadId(); + T val = buffer[tid] = init; + __syncthreads(); + + if (CTA_SIZE >= 1024) { if (tid < 512) buffer[tid] = val = op(val, buffer[tid + 512]); __syncthreads(); } + if (CTA_SIZE >= 512) { if (tid < 256) buffer[tid] = val = op(val, buffer[tid + 256]); __syncthreads(); } + if (CTA_SIZE >= 256) { if (tid < 128) buffer[tid] = val = op(val, buffer[tid + 128]); __syncthreads(); } + if (CTA_SIZE >= 128) { if (tid < 64) buffer[tid] = val = op(val, buffer[tid + 64]); __syncthreads(); } + + if (tid < 32) + { + if (CTA_SIZE >= 64) { buffer[tid] = val = op(val, buffer[tid + 32]); } + if (CTA_SIZE >= 32) { buffer[tid] = val = op(val, buffer[tid + 16]); } + if (CTA_SIZE >= 16) { buffer[tid] = val = op(val, buffer[tid + 8]); } + if (CTA_SIZE >= 8) { buffer[tid] = val = op(val, buffer[tid + 4]); } + if (CTA_SIZE >= 4) { buffer[tid] = val = op(val, buffer[tid + 2]); } + if (CTA_SIZE >= 2) { buffer[tid] = val = op(val, buffer[tid + 1]); } + } + __syncthreads(); + return buffer[0]; + } + + template + static __device__ __forceinline__ void reduce_n(T* data, unsigned int n, BinOp op) + { + int ftid = flattenedThreadId(); + int sft = stride(); + + if (sft < n) + { + for (unsigned int i = sft + ftid; i < n; i += sft) + data[ftid] = op(data[ftid], data[i]); + + __syncthreads(); + + n = sft; + } + + while (n > 1) + { + unsigned int half = n/2; + + if (ftid < half) + data[ftid] = op(data[ftid], data[n - ftid - 1]); + + __syncthreads(); + + n = n - half; + } + } + }; +}}} + +#endif /* __OPENCV_GPU_DEVICE_BLOCK_HPP__ */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/border_interpolate.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/border_interpolate.hpp new file mode 100644 index 0000000..693ba21 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/border_interpolate.hpp @@ -0,0 +1,714 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_BORDER_INTERPOLATE_HPP__ +#define __OPENCV_GPU_BORDER_INTERPOLATE_HPP__ + +#include "saturate_cast.hpp" +#include "vec_traits.hpp" +#include "vec_math.hpp" + +namespace cv { namespace gpu { namespace device +{ + ////////////////////////////////////////////////////////////// + // BrdConstant + + template struct BrdRowConstant + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdRowConstant(int width_, const D& val_ = VecTraits::all(0)) : width(width_), val(val_) {} + + template __device__ __forceinline__ D at_low(int x, const T* data) const + { + return x >= 0 ? saturate_cast(data[x]) : val; + } + + template __device__ __forceinline__ D at_high(int x, const T* data) const + { + return x < width ? saturate_cast(data[x]) : val; + } + + template __device__ __forceinline__ D at(int x, const T* data) const + { + return (x >= 0 && x < width) ? saturate_cast(data[x]) : val; + } + + const int width; + const D val; + }; + + template struct BrdColConstant + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdColConstant(int height_, const D& val_ = VecTraits::all(0)) : height(height_), val(val_) {} + + template __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const + { + return y >= 0 ? saturate_cast(*(const T*)((const char*)data + y * step)) : val; + } + + template __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const + { + return y < height ? saturate_cast(*(const T*)((const char*)data + y * step)) : val; + } + + template __device__ __forceinline__ D at(int y, const T* data, size_t step) const + { + return (y >= 0 && y < height) ? saturate_cast(*(const T*)((const char*)data + y * step)) : val; + } + + const int height; + const D val; + }; + + template struct BrdConstant + { + typedef D result_type; + + __host__ __device__ __forceinline__ BrdConstant(int height_, int width_, const D& val_ = VecTraits::all(0)) : height(height_), width(width_), val(val_) + { + } + + template __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const + { + return (x >= 0 && x < width && y >= 0 && y < height) ? saturate_cast(((const T*)((const uchar*)data + y * step))[x]) : val; + } + + template __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const + { + return (x >= 0 && x < width && y >= 0 && y < height) ? saturate_cast(src(y, x)) : val; + } + + const int height; + const int width; + const D val; + }; + + ////////////////////////////////////////////////////////////// + // BrdReplicate + + template struct BrdRowReplicate + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdRowReplicate(int width) : last_col(width - 1) {} + template __host__ __device__ __forceinline__ BrdRowReplicate(int width, U) : last_col(width - 1) {} + + __device__ __forceinline__ int idx_col_low(int x) const + { + return ::max(x, 0); + } + + __device__ __forceinline__ int idx_col_high(int x) const + { + return ::min(x, last_col); + } + + __device__ __forceinline__ int idx_col(int x) const + { + return idx_col_low(idx_col_high(x)); + } + + template __device__ __forceinline__ D at_low(int x, const T* data) const + { + return saturate_cast(data[idx_col_low(x)]); + } + + template __device__ __forceinline__ D at_high(int x, const T* data) const + { + return saturate_cast(data[idx_col_high(x)]); + } + + template __device__ __forceinline__ D at(int x, const T* data) const + { + return saturate_cast(data[idx_col(x)]); + } + + const int last_col; + }; + + template struct BrdColReplicate + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdColReplicate(int height) : last_row(height - 1) {} + template __host__ __device__ __forceinline__ BrdColReplicate(int height, U) : last_row(height - 1) {} + + __device__ __forceinline__ int idx_row_low(int y) const + { + return ::max(y, 0); + } + + __device__ __forceinline__ int idx_row_high(int y) const + { + return ::min(y, last_row); + } + + __device__ __forceinline__ int idx_row(int y) const + { + return idx_row_low(idx_row_high(y)); + } + + template __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const + { + return saturate_cast(*(const T*)((const char*)data + idx_row_low(y) * step)); + } + + template __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const + { + return saturate_cast(*(const T*)((const char*)data + idx_row_high(y) * step)); + } + + template __device__ __forceinline__ D at(int y, const T* data, size_t step) const + { + return saturate_cast(*(const T*)((const char*)data + idx_row(y) * step)); + } + + const int last_row; + }; + + template struct BrdReplicate + { + typedef D result_type; + + __host__ __device__ __forceinline__ BrdReplicate(int height, int width) : last_row(height - 1), last_col(width - 1) {} + template __host__ __device__ __forceinline__ BrdReplicate(int height, int width, U) : last_row(height - 1), last_col(width - 1) {} + + __device__ __forceinline__ int idx_row_low(int y) const + { + return ::max(y, 0); + } + + __device__ __forceinline__ int idx_row_high(int y) const + { + return ::min(y, last_row); + } + + __device__ __forceinline__ int idx_row(int y) const + { + return idx_row_low(idx_row_high(y)); + } + + __device__ __forceinline__ int idx_col_low(int x) const + { + return ::max(x, 0); + } + + __device__ __forceinline__ int idx_col_high(int x) const + { + return ::min(x, last_col); + } + + __device__ __forceinline__ int idx_col(int x) const + { + return idx_col_low(idx_col_high(x)); + } + + template __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const + { + return saturate_cast(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]); + } + + template __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const + { + return saturate_cast(src(idx_row(y), idx_col(x))); + } + + const int last_row; + const int last_col; + }; + + ////////////////////////////////////////////////////////////// + // BrdReflect101 + + template struct BrdRowReflect101 + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdRowReflect101(int width) : last_col(width - 1) {} + template __host__ __device__ __forceinline__ BrdRowReflect101(int width, U) : last_col(width - 1) {} + + __device__ __forceinline__ int idx_col_low(int x) const + { + return ::abs(x) % (last_col + 1); + } + + __device__ __forceinline__ int idx_col_high(int x) const + { + return ::abs(last_col - ::abs(last_col - x)) % (last_col + 1); + } + + __device__ __forceinline__ int idx_col(int x) const + { + return idx_col_low(idx_col_high(x)); + } + + template __device__ __forceinline__ D at_low(int x, const T* data) const + { + return saturate_cast(data[idx_col_low(x)]); + } + + template __device__ __forceinline__ D at_high(int x, const T* data) const + { + return saturate_cast(data[idx_col_high(x)]); + } + + template __device__ __forceinline__ D at(int x, const T* data) const + { + return saturate_cast(data[idx_col(x)]); + } + + const int last_col; + }; + + template struct BrdColReflect101 + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdColReflect101(int height) : last_row(height - 1) {} + template __host__ __device__ __forceinline__ BrdColReflect101(int height, U) : last_row(height - 1) {} + + __device__ __forceinline__ int idx_row_low(int y) const + { + return ::abs(y) % (last_row + 1); + } + + __device__ __forceinline__ int idx_row_high(int y) const + { + return ::abs(last_row - ::abs(last_row - y)) % (last_row + 1); + } + + __device__ __forceinline__ int idx_row(int y) const + { + return idx_row_low(idx_row_high(y)); + } + + template __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const + { + return saturate_cast(*(const D*)((const char*)data + idx_row_low(y) * step)); + } + + template __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const + { + return saturate_cast(*(const D*)((const char*)data + idx_row_high(y) * step)); + } + + template __device__ __forceinline__ D at(int y, const T* data, size_t step) const + { + return saturate_cast(*(const D*)((const char*)data + idx_row(y) * step)); + } + + const int last_row; + }; + + template struct BrdReflect101 + { + typedef D result_type; + + __host__ __device__ __forceinline__ BrdReflect101(int height, int width) : last_row(height - 1), last_col(width - 1) {} + template __host__ __device__ __forceinline__ BrdReflect101(int height, int width, U) : last_row(height - 1), last_col(width - 1) {} + + __device__ __forceinline__ int idx_row_low(int y) const + { + return ::abs(y) % (last_row + 1); + } + + __device__ __forceinline__ int idx_row_high(int y) const + { + return ::abs(last_row - ::abs(last_row - y)) % (last_row + 1); + } + + __device__ __forceinline__ int idx_row(int y) const + { + return idx_row_low(idx_row_high(y)); + } + + __device__ __forceinline__ int idx_col_low(int x) const + { + return ::abs(x) % (last_col + 1); + } + + __device__ __forceinline__ int idx_col_high(int x) const + { + return ::abs(last_col - ::abs(last_col - x)) % (last_col + 1); + } + + __device__ __forceinline__ int idx_col(int x) const + { + return idx_col_low(idx_col_high(x)); + } + + template __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const + { + return saturate_cast(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]); + } + + template __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const + { + return saturate_cast(src(idx_row(y), idx_col(x))); + } + + const int last_row; + const int last_col; + }; + + ////////////////////////////////////////////////////////////// + // BrdReflect + + template struct BrdRowReflect + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdRowReflect(int width) : last_col(width - 1) {} + template __host__ __device__ __forceinline__ BrdRowReflect(int width, U) : last_col(width - 1) {} + + __device__ __forceinline__ int idx_col_low(int x) const + { + return (::abs(x) - (x < 0)) % (last_col + 1); + } + + __device__ __forceinline__ int idx_col_high(int x) const + { + return ::abs(last_col - ::abs(last_col - x) + (x > last_col)) % (last_col + 1); + } + + __device__ __forceinline__ int idx_col(int x) const + { + return idx_col_high(::abs(x) - (x < 0)); + } + + template __device__ __forceinline__ D at_low(int x, const T* data) const + { + return saturate_cast(data[idx_col_low(x)]); + } + + template __device__ __forceinline__ D at_high(int x, const T* data) const + { + return saturate_cast(data[idx_col_high(x)]); + } + + template __device__ __forceinline__ D at(int x, const T* data) const + { + return saturate_cast(data[idx_col(x)]); + } + + const int last_col; + }; + + template struct BrdColReflect + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdColReflect(int height) : last_row(height - 1) {} + template __host__ __device__ __forceinline__ BrdColReflect(int height, U) : last_row(height - 1) {} + + __device__ __forceinline__ int idx_row_low(int y) const + { + return (::abs(y) - (y < 0)) % (last_row + 1); + } + + __device__ __forceinline__ int idx_row_high(int y) const + { + return ::abs(last_row - ::abs(last_row - y) + (y > last_row)) % (last_row + 1); + } + + __device__ __forceinline__ int idx_row(int y) const + { + return idx_row_high(::abs(y) - (y < 0)); + } + + template __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const + { + return saturate_cast(*(const D*)((const char*)data + idx_row_low(y) * step)); + } + + template __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const + { + return saturate_cast(*(const D*)((const char*)data + idx_row_high(y) * step)); + } + + template __device__ __forceinline__ D at(int y, const T* data, size_t step) const + { + return saturate_cast(*(const D*)((const char*)data + idx_row(y) * step)); + } + + const int last_row; + }; + + template struct BrdReflect + { + typedef D result_type; + + __host__ __device__ __forceinline__ BrdReflect(int height, int width) : last_row(height - 1), last_col(width - 1) {} + template __host__ __device__ __forceinline__ BrdReflect(int height, int width, U) : last_row(height - 1), last_col(width - 1) {} + + __device__ __forceinline__ int idx_row_low(int y) const + { + return (::abs(y) - (y < 0)) % (last_row + 1); + } + + __device__ __forceinline__ int idx_row_high(int y) const + { + return /*::abs*/(last_row - ::abs(last_row - y) + (y > last_row)) /*% (last_row + 1)*/; + } + + __device__ __forceinline__ int idx_row(int y) const + { + return idx_row_low(idx_row_high(y)); + } + + __device__ __forceinline__ int idx_col_low(int x) const + { + return (::abs(x) - (x < 0)) % (last_col + 1); + } + + __device__ __forceinline__ int idx_col_high(int x) const + { + return (last_col - ::abs(last_col - x) + (x > last_col)); + } + + __device__ __forceinline__ int idx_col(int x) const + { + return idx_col_low(idx_col_high(x)); + } + + template __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const + { + return saturate_cast(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]); + } + + template __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const + { + return saturate_cast(src(idx_row(y), idx_col(x))); + } + + const int last_row; + const int last_col; + }; + + ////////////////////////////////////////////////////////////// + // BrdWrap + + template struct BrdRowWrap + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdRowWrap(int width_) : width(width_) {} + template __host__ __device__ __forceinline__ BrdRowWrap(int width_, U) : width(width_) {} + + __device__ __forceinline__ int idx_col_low(int x) const + { + return (x >= 0) * x + (x < 0) * (x - ((x - width + 1) / width) * width); + } + + __device__ __forceinline__ int idx_col_high(int x) const + { + return (x < width) * x + (x >= width) * (x % width); + } + + __device__ __forceinline__ int idx_col(int x) const + { + return idx_col_high(idx_col_low(x)); + } + + template __device__ __forceinline__ D at_low(int x, const T* data) const + { + return saturate_cast(data[idx_col_low(x)]); + } + + template __device__ __forceinline__ D at_high(int x, const T* data) const + { + return saturate_cast(data[idx_col_high(x)]); + } + + template __device__ __forceinline__ D at(int x, const T* data) const + { + return saturate_cast(data[idx_col(x)]); + } + + const int width; + }; + + template struct BrdColWrap + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdColWrap(int height_) : height(height_) {} + template __host__ __device__ __forceinline__ BrdColWrap(int height_, U) : height(height_) {} + + __device__ __forceinline__ int idx_row_low(int y) const + { + return (y >= 0) * y + (y < 0) * (y - ((y - height + 1) / height) * height); + } + + __device__ __forceinline__ int idx_row_high(int y) const + { + return (y < height) * y + (y >= height) * (y % height); + } + + __device__ __forceinline__ int idx_row(int y) const + { + return idx_row_high(idx_row_low(y)); + } + + template __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const + { + return saturate_cast(*(const D*)((const char*)data + idx_row_low(y) * step)); + } + + template __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const + { + return saturate_cast(*(const D*)((const char*)data + idx_row_high(y) * step)); + } + + template __device__ __forceinline__ D at(int y, const T* data, size_t step) const + { + return saturate_cast(*(const D*)((const char*)data + idx_row(y) * step)); + } + + const int height; + }; + + template struct BrdWrap + { + typedef D result_type; + + __host__ __device__ __forceinline__ BrdWrap(int height_, int width_) : + height(height_), width(width_) + { + } + template + __host__ __device__ __forceinline__ BrdWrap(int height_, int width_, U) : + height(height_), width(width_) + { + } + + __device__ __forceinline__ int idx_row_low(int y) const + { + return (y >= 0) ? y : (y - ((y - height + 1) / height) * height); + } + + __device__ __forceinline__ int idx_row_high(int y) const + { + return (y < height) ? y : (y % height); + } + + __device__ __forceinline__ int idx_row(int y) const + { + return idx_row_high(idx_row_low(y)); + } + + __device__ __forceinline__ int idx_col_low(int x) const + { + return (x >= 0) ? x : (x - ((x - width + 1) / width) * width); + } + + __device__ __forceinline__ int idx_col_high(int x) const + { + return (x < width) ? x : (x % width); + } + + __device__ __forceinline__ int idx_col(int x) const + { + return idx_col_high(idx_col_low(x)); + } + + template __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const + { + return saturate_cast(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]); + } + + template __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const + { + return saturate_cast(src(idx_row(y), idx_col(x))); + } + + const int height; + const int width; + }; + + ////////////////////////////////////////////////////////////// + // BorderReader + + template struct BorderReader + { + typedef typename B::result_type elem_type; + typedef typename Ptr2D::index_type index_type; + + __host__ __device__ __forceinline__ BorderReader(const Ptr2D& ptr_, const B& b_) : ptr(ptr_), b(b_) {} + + __device__ __forceinline__ elem_type operator ()(index_type y, index_type x) const + { + return b.at(y, x, ptr); + } + + const Ptr2D ptr; + const B b; + }; + + // under win32 there is some bug with templated types that passed as kernel parameters + // with this specialization all works fine + template struct BorderReader< Ptr2D, BrdConstant > + { + typedef typename BrdConstant::result_type elem_type; + typedef typename Ptr2D::index_type index_type; + + __host__ __device__ __forceinline__ BorderReader(const Ptr2D& src_, const BrdConstant& b) : + src(src_), height(b.height), width(b.width), val(b.val) + { + } + + __device__ __forceinline__ D operator ()(index_type y, index_type x) const + { + return (x >= 0 && x < width && y >= 0 && y < height) ? saturate_cast(src(y, x)) : val; + } + + const Ptr2D src; + const int height; + const int width; + const D val; + }; +}}} // namespace cv { namespace gpu { namespace device + +#endif // __OPENCV_GPU_BORDER_INTERPOLATE_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/color.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/color.hpp new file mode 100644 index 0000000..5af64bf --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/color.hpp @@ -0,0 +1,301 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_COLOR_HPP__ +#define __OPENCV_GPU_COLOR_HPP__ + +#include "detail/color_detail.hpp" + +namespace cv { namespace gpu { namespace device +{ + // All OPENCV_GPU_IMPLEMENT_*_TRAITS(ColorSpace1_to_ColorSpace2, ...) macros implements + // template class ColorSpace1_to_ColorSpace2_traits + // { + // typedef ... functor_type; + // static __host__ __device__ functor_type create_functor(); + // }; + + OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(bgr_to_rgb, 3, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(bgr_to_bgra, 3, 4, 0) + OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(bgr_to_rgba, 3, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(bgra_to_bgr, 4, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(bgra_to_rgb, 4, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(bgra_to_rgba, 4, 4, 2) + + #undef OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS + + OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(bgr_to_bgr555, 3, 0, 5) + OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(bgr_to_bgr565, 3, 0, 6) + OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(rgb_to_bgr555, 3, 2, 5) + OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(rgb_to_bgr565, 3, 2, 6) + OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(bgra_to_bgr555, 4, 0, 5) + OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(bgra_to_bgr565, 4, 0, 6) + OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(rgba_to_bgr555, 4, 2, 5) + OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(rgba_to_bgr565, 4, 2, 6) + + #undef OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS + + OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_rgb, 3, 2, 5) + OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_rgb, 3, 2, 6) + OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_bgr, 3, 0, 5) + OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_bgr, 3, 0, 6) + OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_rgba, 4, 2, 5) + OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_rgba, 4, 2, 6) + OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_bgra, 4, 0, 5) + OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_bgra, 4, 0, 6) + + #undef OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS + + OPENCV_GPU_IMPLEMENT_GRAY2RGB_TRAITS(gray_to_bgr, 3) + OPENCV_GPU_IMPLEMENT_GRAY2RGB_TRAITS(gray_to_bgra, 4) + + #undef OPENCV_GPU_IMPLEMENT_GRAY2RGB_TRAITS + + OPENCV_GPU_IMPLEMENT_GRAY2RGB5x5_TRAITS(gray_to_bgr555, 5) + OPENCV_GPU_IMPLEMENT_GRAY2RGB5x5_TRAITS(gray_to_bgr565, 6) + + #undef OPENCV_GPU_IMPLEMENT_GRAY2RGB5x5_TRAITS + + OPENCV_GPU_IMPLEMENT_RGB5x52GRAY_TRAITS(bgr555_to_gray, 5) + OPENCV_GPU_IMPLEMENT_RGB5x52GRAY_TRAITS(bgr565_to_gray, 6) + + #undef OPENCV_GPU_IMPLEMENT_RGB5x52GRAY_TRAITS + + OPENCV_GPU_IMPLEMENT_RGB2GRAY_TRAITS(rgb_to_gray, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2GRAY_TRAITS(bgr_to_gray, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2GRAY_TRAITS(rgba_to_gray, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2GRAY_TRAITS(bgra_to_gray, 4, 0) + + #undef OPENCV_GPU_IMPLEMENT_RGB2GRAY_TRAITS + + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv, 3, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv, 4, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv4, 3, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv4, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv, 3, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv, 4, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv4, 3, 4, 0) + OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv4, 4, 4, 0) + + #undef OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS + + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgb, 3, 3, 2) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgba, 3, 4, 2) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgb, 4, 3, 2) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgba, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgr, 3, 3, 0) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgra, 3, 4, 0) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgr, 4, 3, 0) + OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgra, 4, 4, 0) + + #undef OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS + + OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(rgb_to_YCrCb, 3, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(rgba_to_YCrCb, 4, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(rgb_to_YCrCb4, 3, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(rgba_to_YCrCb4, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(bgr_to_YCrCb, 3, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(bgra_to_YCrCb, 4, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(bgr_to_YCrCb4, 3, 4, 0) + OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(bgra_to_YCrCb4, 4, 4, 0) + + #undef OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS + + OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_rgb, 3, 3, 2) + OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_rgba, 3, 4, 2) + OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_rgb, 4, 3, 2) + OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_rgba, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_bgr, 3, 3, 0) + OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_bgra, 3, 4, 0) + OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_bgr, 4, 3, 0) + OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_bgra, 4, 4, 0) + + #undef OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS + + OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(rgb_to_xyz, 3, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(rgba_to_xyz, 4, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(rgb_to_xyz4, 3, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(rgba_to_xyz4, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(bgr_to_xyz, 3, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(bgra_to_xyz, 4, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(bgr_to_xyz4, 3, 4, 0) + OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(bgra_to_xyz4, 4, 4, 0) + + #undef OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS + + OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_rgb, 3, 3, 2) + OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_rgb, 4, 3, 2) + OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_rgba, 3, 4, 2) + OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_rgba, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_bgr, 3, 3, 0) + OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_bgr, 4, 3, 0) + OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_bgra, 3, 4, 0) + OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_bgra, 4, 4, 0) + + #undef OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS + + OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(rgb_to_hsv, 3, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(rgba_to_hsv, 4, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(rgb_to_hsv4, 3, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(rgba_to_hsv4, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(bgr_to_hsv, 3, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(bgra_to_hsv, 4, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(bgr_to_hsv4, 3, 4, 0) + OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(bgra_to_hsv4, 4, 4, 0) + + #undef OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS + + OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_rgb, 3, 3, 2) + OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_rgba, 3, 4, 2) + OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_rgb, 4, 3, 2) + OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_rgba, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_bgr, 3, 3, 0) + OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_bgra, 3, 4, 0) + OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_bgr, 4, 3, 0) + OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_bgra, 4, 4, 0) + + #undef OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS + + OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(rgb_to_hls, 3, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(rgba_to_hls, 4, 3, 2) + OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(rgb_to_hls4, 3, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(rgba_to_hls4, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(bgr_to_hls, 3, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(bgra_to_hls, 4, 3, 0) + OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(bgr_to_hls4, 3, 4, 0) + OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(bgra_to_hls4, 4, 4, 0) + + #undef OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS + + OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls_to_rgb, 3, 3, 2) + OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls_to_rgba, 3, 4, 2) + OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_rgb, 4, 3, 2) + OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_rgba, 4, 4, 2) + OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls_to_bgr, 3, 3, 0) + OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls_to_bgra, 3, 4, 0) + OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_bgr, 4, 3, 0) + OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_bgra, 4, 4, 0) + + #undef OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS + + OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(rgb_to_lab, 3, 3, true, 2) + OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(rgba_to_lab, 4, 3, true, 2) + OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(rgb_to_lab4, 3, 4, true, 2) + OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(rgba_to_lab4, 4, 4, true, 2) + OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(bgr_to_lab, 3, 3, true, 0) + OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(bgra_to_lab, 4, 3, true, 0) + OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(bgr_to_lab4, 3, 4, true, 0) + OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(bgra_to_lab4, 4, 4, true, 0) + + OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lrgb_to_lab, 3, 3, false, 2) + OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lrgba_to_lab, 4, 3, false, 2) + OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lrgb_to_lab4, 3, 4, false, 2) + OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lrgba_to_lab4, 4, 4, false, 2) + OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lbgr_to_lab, 3, 3, false, 0) + OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lbgra_to_lab, 4, 3, false, 0) + OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lbgr_to_lab4, 3, 4, false, 0) + OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lbgra_to_lab4, 4, 4, false, 0) + + #undef OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS + + OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_rgb, 3, 3, true, 2) + OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_rgb, 4, 3, true, 2) + OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_rgba, 3, 4, true, 2) + OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_rgba, 4, 4, true, 2) + OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_bgr, 3, 3, true, 0) + OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_bgr, 4, 3, true, 0) + OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_bgra, 3, 4, true, 0) + OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_bgra, 4, 4, true, 0) + + OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lrgb, 3, 3, false, 2) + OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lrgb, 4, 3, false, 2) + OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lrgba, 3, 4, false, 2) + OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lrgba, 4, 4, false, 2) + OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lbgr, 3, 3, false, 0) + OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lbgr, 4, 3, false, 0) + OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lbgra, 3, 4, false, 0) + OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lbgra, 4, 4, false, 0) + + #undef OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS + + OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(rgb_to_luv, 3, 3, true, 2) + OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(rgba_to_luv, 4, 3, true, 2) + OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(rgb_to_luv4, 3, 4, true, 2) + OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(rgba_to_luv4, 4, 4, true, 2) + OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(bgr_to_luv, 3, 3, true, 0) + OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(bgra_to_luv, 4, 3, true, 0) + OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(bgr_to_luv4, 3, 4, true, 0) + OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(bgra_to_luv4, 4, 4, true, 0) + + OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lrgb_to_luv, 3, 3, false, 2) + OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lrgba_to_luv, 4, 3, false, 2) + OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lrgb_to_luv4, 3, 4, false, 2) + OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lrgba_to_luv4, 4, 4, false, 2) + OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lbgr_to_luv, 3, 3, false, 0) + OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lbgra_to_luv, 4, 3, false, 0) + OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lbgr_to_luv4, 3, 4, false, 0) + OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lbgra_to_luv4, 4, 4, false, 0) + + #undef OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS + + OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_rgb, 3, 3, true, 2) + OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_rgb, 4, 3, true, 2) + OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_rgba, 3, 4, true, 2) + OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_rgba, 4, 4, true, 2) + OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_bgr, 3, 3, true, 0) + OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_bgr, 4, 3, true, 0) + OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_bgra, 3, 4, true, 0) + OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_bgra, 4, 4, true, 0) + + OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lrgb, 3, 3, false, 2) + OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lrgb, 4, 3, false, 2) + OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lrgba, 3, 4, false, 2) + OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lrgba, 4, 4, false, 2) + OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lbgr, 3, 3, false, 0) + OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lbgr, 4, 3, false, 0) + OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lbgra, 3, 4, false, 0) + OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lbgra, 4, 4, false, 0) + + #undef OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS +}}} // namespace cv { namespace gpu { namespace device + +#endif // __OPENCV_GPU_BORDER_INTERPOLATE_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/common.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/common.hpp new file mode 100644 index 0000000..26a349f --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/common.hpp @@ -0,0 +1,118 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_COMMON_HPP__ +#define __OPENCV_GPU_COMMON_HPP__ + +#include +#include "opencv2/core/cuda_devptrs.hpp" + +#ifndef CV_PI + #define CV_PI 3.1415926535897932384626433832795 +#endif + +#ifndef CV_PI_F + #ifndef CV_PI + #define CV_PI_F 3.14159265f + #else + #define CV_PI_F ((float)CV_PI) + #endif +#endif + +#if defined(__GNUC__) + #define cudaSafeCall(expr) ___cudaSafeCall(expr, __FILE__, __LINE__, __func__) +#else /* defined(__CUDACC__) || defined(__MSVC__) */ + #define cudaSafeCall(expr) ___cudaSafeCall(expr, __FILE__, __LINE__) +#endif + +namespace cv { namespace gpu +{ + void error(const char *error_string, const char *file, const int line, const char *func); + + template static inline bool isAligned(const T* ptr, size_t size) + { + return reinterpret_cast(ptr) % size == 0; + } + + static inline bool isAligned(size_t step, size_t size) + { + return step % size == 0; + } +}} + +static inline void ___cudaSafeCall(cudaError_t err, const char *file, const int line, const char *func = "") +{ + if (cudaSuccess != err) + cv::gpu::error(cudaGetErrorString(err), file, line, func); +} + +namespace cv { namespace gpu +{ + __host__ __device__ __forceinline__ int divUp(int total, int grain) + { + return (total + grain - 1) / grain; + } + + namespace device + { + using cv::gpu::divUp; + +#ifdef __CUDACC__ + typedef unsigned char uchar; + typedef unsigned short ushort; + typedef signed char schar; + #if defined (_WIN32) || defined (__APPLE__) || defined (__QNX__) + typedef unsigned int uint; + #endif + + template inline void bindTexture(const textureReference* tex, const PtrStepSz& img) + { + cudaChannelFormatDesc desc = cudaCreateChannelDesc(); + cudaSafeCall( cudaBindTexture2D(0, tex, img.ptr(), &desc, img.cols, img.rows, img.step) ); + } +#endif // __CUDACC__ + } +}} + + + +#endif // __OPENCV_GPU_COMMON_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/datamov_utils.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/datamov_utils.hpp new file mode 100644 index 0000000..a3f62fb --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/datamov_utils.hpp @@ -0,0 +1,105 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_DATAMOV_UTILS_HPP__ +#define __OPENCV_GPU_DATAMOV_UTILS_HPP__ + +#include "common.hpp" + +namespace cv { namespace gpu { namespace device +{ + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 200 + + // for Fermi memory space is detected automatically + template struct ForceGlob + { + __device__ __forceinline__ static void Load(const T* ptr, int offset, T& val) { val = ptr[offset]; } + }; + + #else // __CUDA_ARCH__ >= 200 + + #if defined(_WIN64) || defined(__LP64__) + // 64-bit register modifier for inlined asm + #define OPENCV_GPU_ASM_PTR "l" + #else + // 32-bit register modifier for inlined asm + #define OPENCV_GPU_ASM_PTR "r" + #endif + + template struct ForceGlob; + + #define OPENCV_GPU_DEFINE_FORCE_GLOB(base_type, ptx_type, reg_mod) \ + template <> struct ForceGlob \ + { \ + __device__ __forceinline__ static void Load(const base_type* ptr, int offset, base_type& val) \ + { \ + asm("ld.global."#ptx_type" %0, [%1];" : "="#reg_mod(val) : OPENCV_GPU_ASM_PTR(ptr + offset)); \ + } \ + }; + + #define OPENCV_GPU_DEFINE_FORCE_GLOB_B(base_type, ptx_type) \ + template <> struct ForceGlob \ + { \ + __device__ __forceinline__ static void Load(const base_type* ptr, int offset, base_type& val) \ + { \ + asm("ld.global."#ptx_type" %0, [%1];" : "=r"(*reinterpret_cast(&val)) : OPENCV_GPU_ASM_PTR(ptr + offset)); \ + } \ + }; + + OPENCV_GPU_DEFINE_FORCE_GLOB_B(uchar, u8) + OPENCV_GPU_DEFINE_FORCE_GLOB_B(schar, s8) + OPENCV_GPU_DEFINE_FORCE_GLOB_B(char, b8) + OPENCV_GPU_DEFINE_FORCE_GLOB (ushort, u16, h) + OPENCV_GPU_DEFINE_FORCE_GLOB (short, s16, h) + OPENCV_GPU_DEFINE_FORCE_GLOB (uint, u32, r) + OPENCV_GPU_DEFINE_FORCE_GLOB (int, s32, r) + OPENCV_GPU_DEFINE_FORCE_GLOB (float, f32, f) + OPENCV_GPU_DEFINE_FORCE_GLOB (double, f64, d) + + #undef OPENCV_GPU_DEFINE_FORCE_GLOB + #undef OPENCV_GPU_DEFINE_FORCE_GLOB_B + #undef OPENCV_GPU_ASM_PTR + + #endif // __CUDA_ARCH__ >= 200 +}}} // namespace cv { namespace gpu { namespace device + +#endif // __OPENCV_GPU_DATAMOV_UTILS_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/color_detail.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/color_detail.hpp new file mode 100644 index 0000000..c4ec64b --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/color_detail.hpp @@ -0,0 +1,2219 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_COLOR_DETAIL_HPP__ +#define __OPENCV_GPU_COLOR_DETAIL_HPP__ + +#include "../common.hpp" +#include "../vec_traits.hpp" +#include "../saturate_cast.hpp" +#include "../limits.hpp" +#include "../functional.hpp" + +namespace cv { namespace gpu { namespace device +{ + #ifndef CV_DESCALE + #define CV_DESCALE(x, n) (((x) + (1 << ((n)-1))) >> (n)) + #endif + + namespace color_detail + { + template struct ColorChannel + { + typedef float worktype_f; + static __device__ __forceinline__ T max() { return numeric_limits::max(); } + static __device__ __forceinline__ T half() { return (T)(max()/2 + 1); } + }; + + template<> struct ColorChannel + { + typedef float worktype_f; + static __device__ __forceinline__ float max() { return 1.f; } + static __device__ __forceinline__ float half() { return 0.5f; } + }; + + template static __device__ __forceinline__ void setAlpha(typename TypeVec::vec_type& vec, T val) + { + } + + template static __device__ __forceinline__ void setAlpha(typename TypeVec::vec_type& vec, T val) + { + vec.w = val; + } + + template static __device__ __forceinline__ T getAlpha(const typename TypeVec::vec_type& vec) + { + return ColorChannel::max(); + } + + template static __device__ __forceinline__ T getAlpha(const typename TypeVec::vec_type& vec) + { + return vec.w; + } + + enum + { + yuv_shift = 14, + xyz_shift = 12, + R2Y = 4899, + G2Y = 9617, + B2Y = 1868, + BLOCK_SIZE = 256 + }; + } + +////////////////// Various 3/4-channel to 3/4-channel RGB transformations ///////////////// + + namespace color_detail + { + template struct RGB2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ typename TypeVec::vec_type operator()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + dst.x = bidx == 0 ? src.x : src.z; + dst.y = src.y; + dst.z = bidx == 0 ? src.z : src.x; + setAlpha(dst, getAlpha(src)); + + return dst; + } + + __host__ __device__ __forceinline__ RGB2RGB() {} + __host__ __device__ __forceinline__ RGB2RGB(const RGB2RGB&) {} + }; + + template <> struct RGB2RGB : unary_function + { + __device__ uint operator()(uint src) const + { + uint dst = 0; + + dst |= (0xffu & (src >> 16)); + dst |= (0xffu & (src >> 8)) << 8; + dst |= (0xffu & (src)) << 16; + dst |= (0xffu & (src >> 24)) << 24; + + return dst; + } + + __host__ __device__ __forceinline__ RGB2RGB() {} + __host__ __device__ __forceinline__ RGB2RGB(const RGB2RGB&) {} + }; + } + +#define OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + +/////////// Transforming 16-bit (565 or 555) RGB to/from 24/32-bit (888[8]) RGB ////////// + + namespace color_detail + { + template struct RGB2RGB5x5Converter; + + template struct RGB2RGB5x5Converter<6, bidx> + { + template + static __device__ __forceinline__ ushort cvt(const T& src) + { + uint b = bidx == 0 ? src.x : src.z; + uint g = src.y; + uint r = bidx == 0 ? src.z : src.x; + return (ushort)((b >> 3) | ((g & ~3) << 3) | ((r & ~7) << 8)); + } + }; + + template struct RGB2RGB5x5Converter<5, bidx> + { + static __device__ __forceinline__ ushort cvt(const uchar3& src) + { + uint b = bidx == 0 ? src.x : src.z; + uint g = src.y; + uint r = bidx == 0 ? src.z : src.x; + return (ushort)((b >> 3) | ((g & ~7) << 2) | ((r & ~7) << 7)); + } + + static __device__ __forceinline__ ushort cvt(const uchar4& src) + { + uint b = bidx == 0 ? src.x : src.z; + uint g = src.y; + uint r = bidx == 0 ? src.z : src.x; + uint a = src.w; + return (ushort)((b >> 3) | ((g & ~7) << 2) | ((r & ~7) << 7) | (a * 0x8000)); + } + }; + + template struct RGB2RGB5x5: + unary_function::vec_type, ushort> + { + __device__ __forceinline__ ushort operator()(const typename TypeVec::vec_type& src) const + { + return RGB2RGB5x5Converter::cvt(src); + } + + __host__ __device__ __forceinline__ RGB2RGB5x5() {} + __host__ __device__ __forceinline__ RGB2RGB5x5(const RGB2RGB5x5&) {} + }; + } + +#define OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(name, scn, bidx, green_bits) \ + struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2RGB5x5 functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + namespace color_detail + { + template struct RGB5x52RGBConverter; + + template struct RGB5x52RGBConverter<5, bidx> + { + static __device__ __forceinline__ void cvt(uint src, uchar3& dst) + { + (bidx == 0 ? dst.x : dst.z) = src << 3; + dst.y = (src >> 2) & ~7; + (bidx == 0 ? dst.z : dst.x) = (src >> 7) & ~7; + } + + static __device__ __forceinline__ void cvt(uint src, uint& dst) + { + dst = 0; + + dst |= (0xffu & (src << 3)) << (bidx * 8); + dst |= (0xffu & ((src >> 2) & ~7)) << 8; + dst |= (0xffu & ((src >> 7) & ~7)) << ((bidx ^ 2) * 8); + dst |= ((src & 0x8000) * 0xffu) << 24; + } + }; + + template struct RGB5x52RGBConverter<6, bidx> + { + static __device__ __forceinline__ void cvt(uint src, uchar3& dst) + { + (bidx == 0 ? dst.x : dst.z) = src << 3; + dst.y = (src >> 3) & ~3; + (bidx == 0 ? dst.z : dst.x) = (src >> 8) & ~7; + } + + static __device__ __forceinline__ void cvt(uint src, uint& dst) + { + dst = 0xffu << 24; + + dst |= (0xffu & (src << 3)) << (bidx * 8); + dst |= (0xffu &((src >> 3) & ~3)) << 8; + dst |= (0xffu & ((src >> 8) & ~7)) << ((bidx ^ 2) * 8); + } + }; + + template struct RGB5x52RGB; + + template struct RGB5x52RGB<3, bidx, green_bits> : unary_function + { + __device__ __forceinline__ uchar3 operator()(ushort src) const + { + uchar3 dst; + RGB5x52RGBConverter::cvt(src, dst); + return dst; + } + + __host__ __device__ __forceinline__ RGB5x52RGB() {} + __host__ __device__ __forceinline__ RGB5x52RGB(const RGB5x52RGB&) {} + }; + + template struct RGB5x52RGB<4, bidx, green_bits> : unary_function + { + __device__ __forceinline__ uint operator()(ushort src) const + { + uint dst; + RGB5x52RGBConverter::cvt(src, dst); + return dst; + } + + __host__ __device__ __forceinline__ RGB5x52RGB() {} + __host__ __device__ __forceinline__ RGB5x52RGB(const RGB5x52RGB&) {} + }; + } + +#define OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(name, dcn, bidx, green_bits) \ + struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB5x52RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + +///////////////////////////////// Grayscale to Color //////////////////////////////// + + namespace color_detail + { + template struct Gray2RGB : unary_function::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator()(T src) const + { + typename TypeVec::vec_type dst; + + dst.z = dst.y = dst.x = src; + setAlpha(dst, ColorChannel::max()); + + return dst; + } + + __host__ __device__ __forceinline__ Gray2RGB() {} + __host__ __device__ __forceinline__ Gray2RGB(const Gray2RGB&) {} + }; + + template <> struct Gray2RGB : unary_function + { + __device__ __forceinline__ uint operator()(uint src) const + { + uint dst = 0xffu << 24; + + dst |= src; + dst |= src << 8; + dst |= src << 16; + + return dst; + } + + __host__ __device__ __forceinline__ Gray2RGB() {} + __host__ __device__ __forceinline__ Gray2RGB(const Gray2RGB&) {} + }; + } + +#define OPENCV_GPU_IMPLEMENT_GRAY2RGB_TRAITS(name, dcn) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::Gray2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + namespace color_detail + { + template struct Gray2RGB5x5Converter; + + template<> struct Gray2RGB5x5Converter<6> + { + static __device__ __forceinline__ ushort cvt(uint t) + { + return (ushort)((t >> 3) | ((t & ~3) << 3) | ((t & ~7) << 8)); + } + }; + + template<> struct Gray2RGB5x5Converter<5> + { + static __device__ __forceinline__ ushort cvt(uint t) + { + t >>= 3; + return (ushort)(t | (t << 5) | (t << 10)); + } + }; + + template struct Gray2RGB5x5 : unary_function + { + __device__ __forceinline__ ushort operator()(uint src) const + { + return Gray2RGB5x5Converter::cvt(src); + } + + __host__ __device__ __forceinline__ Gray2RGB5x5() {} + __host__ __device__ __forceinline__ Gray2RGB5x5(const Gray2RGB5x5&) {} + }; + } + +#define OPENCV_GPU_IMPLEMENT_GRAY2RGB5x5_TRAITS(name, green_bits) \ + struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::Gray2RGB5x5 functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + +///////////////////////////////// Color to Grayscale //////////////////////////////// + + namespace color_detail + { + template struct RGB5x52GrayConverter; + + template <> struct RGB5x52GrayConverter<6> + { + static __device__ __forceinline__ uchar cvt(uint t) + { + return (uchar)CV_DESCALE(((t << 3) & 0xf8) * B2Y + ((t >> 3) & 0xfc) * G2Y + ((t >> 8) & 0xf8) * R2Y, yuv_shift); + } + }; + + template <> struct RGB5x52GrayConverter<5> + { + static __device__ __forceinline__ uchar cvt(uint t) + { + return (uchar)CV_DESCALE(((t << 3) & 0xf8) * B2Y + ((t >> 2) & 0xf8) * G2Y + ((t >> 7) & 0xf8) * R2Y, yuv_shift); + } + }; + + template struct RGB5x52Gray : unary_function + { + __device__ __forceinline__ uchar operator()(uint src) const + { + return RGB5x52GrayConverter::cvt(src); + } + + __host__ __device__ __forceinline__ RGB5x52Gray() {} + __host__ __device__ __forceinline__ RGB5x52Gray(const RGB5x52Gray&) {} + }; + } + +#define OPENCV_GPU_IMPLEMENT_RGB5x52GRAY_TRAITS(name, green_bits) \ + struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB5x52Gray functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + namespace color_detail + { + template static __device__ __forceinline__ uint RGB2GrayConvert_8U(const T& src) + { + uint b = bidx == 0 ? src.x : src.z; + uint g = src.y; + uint r = bidx == 0 ? src.z : src.x; + return CV_DESCALE((uint)(b * B2Y + g * G2Y + r * R2Y), yuv_shift); + } + + template static __device__ __forceinline__ uchar RGB2GrayConvert_8UC4(uint src) + { + uint b = 0xffu & (src >> (bidx * 8)); + uint g = 0xffu & (src >> 8); + uint r = 0xffu & (src >> ((bidx ^ 2) * 8)); + return CV_DESCALE((uint)(b * B2Y + g * G2Y + r * R2Y), yuv_shift); + } + + template static __device__ __forceinline__ float RGB2GrayConvert_32F(const T& src) + { + float b = bidx == 0 ? src.x : src.z; + float g = src.y; + float r = bidx == 0 ? src.z : src.x; + return b * 0.114f + g * 0.587f + r * 0.299f; + } + + template struct RGB2Gray : unary_function::vec_type, T> + { + __device__ __forceinline__ T operator()(const typename TypeVec::vec_type& src) const + { + return RGB2GrayConvert_8U(src); + } + + __host__ __device__ __forceinline__ RGB2Gray() {} + __host__ __device__ __forceinline__ RGB2Gray(const RGB2Gray&) {} + }; + + template struct RGB2Gray : + unary_function::vec_type, float> + { + __device__ __forceinline__ float operator()(const typename TypeVec::vec_type& src) const + { + return RGB2GrayConvert_32F(src); + } + + __host__ __device__ __forceinline__ RGB2Gray() {} + __host__ __device__ __forceinline__ RGB2Gray(const RGB2Gray&) {} + }; + + template struct RGB2Gray : unary_function + { + __device__ __forceinline__ uchar operator()(uint src) const + { + return RGB2GrayConvert_8UC4(src); + } + + __host__ __device__ __forceinline__ RGB2Gray() {} + __host__ __device__ __forceinline__ RGB2Gray(const RGB2Gray&) {} + }; + } + +#define OPENCV_GPU_IMPLEMENT_RGB2GRAY_TRAITS(name, scn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2Gray functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + +///////////////////////////////////// RGB <-> YUV ////////////////////////////////////// + + namespace color_detail + { + __constant__ float c_RGB2YUVCoeffs_f[5] = { 0.114f, 0.587f, 0.299f, 0.492f, 0.877f }; + __constant__ int c_RGB2YUVCoeffs_i[5] = { B2Y, G2Y, R2Y, 8061, 14369 }; + + template static __device__ void RGB2YUVConvert(const T& src, D& dst) + { + const int delta = ColorChannel::elem_type>::half() * (1 << yuv_shift); + + const int b = bidx == 0 ? src.x : src.z; + const int g = src.y; + const int r = bidx == 0 ? src.z : src.x; + + const int Y = CV_DESCALE(b * c_RGB2YUVCoeffs_i[2] + g * c_RGB2YUVCoeffs_i[1] + r * c_RGB2YUVCoeffs_i[0], yuv_shift); + const int Cr = CV_DESCALE((r - Y) * c_RGB2YUVCoeffs_i[3] + delta, yuv_shift); + const int Cb = CV_DESCALE((b - Y) * c_RGB2YUVCoeffs_i[4] + delta, yuv_shift); + + dst.x = saturate_cast::elem_type>(Y); + dst.y = saturate_cast::elem_type>(Cr); + dst.z = saturate_cast::elem_type>(Cb); + } + + template static __device__ __forceinline__ void RGB2YUVConvert_32F(const T& src, D& dst) + { + const float b = bidx == 0 ? src.x : src.z; + const float g = src.y; + const float r = bidx == 0 ? src.z : src.x; + + dst.x = b * c_RGB2YUVCoeffs_f[2] + g * c_RGB2YUVCoeffs_f[1] + r * c_RGB2YUVCoeffs_f[0]; + dst.y = (r - dst.x) * c_RGB2YUVCoeffs_f[3] + ColorChannel::half(); + dst.z = (b - dst.x) * c_RGB2YUVCoeffs_f[4] + ColorChannel::half(); + } + + template struct RGB2YUV + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + RGB2YUVConvert(src, dst); + return dst; + } + + __host__ __device__ __forceinline__ RGB2YUV() {} + __host__ __device__ __forceinline__ RGB2YUV(const RGB2YUV&) {} + }; + + template struct RGB2YUV + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + RGB2YUVConvert_32F(src, dst); + return dst; + } + + __host__ __device__ __forceinline__ RGB2YUV() {} + __host__ __device__ __forceinline__ RGB2YUV(const RGB2YUV&) {} + }; + } + +#define OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2YUV functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + namespace color_detail + { + __constant__ float c_YUV2RGBCoeffs_f[5] = { 2.032f, -0.395f, -0.581f, 1.140f }; + __constant__ int c_YUV2RGBCoeffs_i[5] = { 33292, -6472, -9519, 18678 }; + + template static __device__ void YUV2RGBConvert(const T& src, D& dst) + { + const int b = src.x + CV_DESCALE((src.z - ColorChannel::elem_type>::half()) * c_YUV2RGBCoeffs_i[3], yuv_shift); + + const int g = src.x + CV_DESCALE((src.z - ColorChannel::elem_type>::half()) * c_YUV2RGBCoeffs_i[2] + + (src.y - ColorChannel::elem_type>::half()) * c_YUV2RGBCoeffs_i[1], yuv_shift); + + const int r = src.x + CV_DESCALE((src.y - ColorChannel::elem_type>::half()) * c_YUV2RGBCoeffs_i[0], yuv_shift); + + (bidx == 0 ? dst.x : dst.z) = saturate_cast::elem_type>(b); + dst.y = saturate_cast::elem_type>(g); + (bidx == 0 ? dst.z : dst.x) = saturate_cast::elem_type>(r); + } + + template static __device__ uint YUV2RGBConvert_8UC4(uint src) + { + const int x = 0xff & (src); + const int y = 0xff & (src >> 8); + const int z = 0xff & (src >> 16); + + const int b = x + CV_DESCALE((z - ColorChannel::half()) * c_YUV2RGBCoeffs_i[3], yuv_shift); + + const int g = x + CV_DESCALE((z - ColorChannel::half()) * c_YUV2RGBCoeffs_i[2] + + (y - ColorChannel::half()) * c_YUV2RGBCoeffs_i[1], yuv_shift); + + const int r = x + CV_DESCALE((y - ColorChannel::half()) * c_YUV2RGBCoeffs_i[0], yuv_shift); + + uint dst = 0xffu << 24; + + dst |= saturate_cast(b) << (bidx * 8); + dst |= saturate_cast(g) << 8; + dst |= saturate_cast(r) << ((bidx ^ 2) * 8); + + return dst; + } + + template static __device__ __forceinline__ void YUV2RGBConvert_32F(const T& src, D& dst) + { + (bidx == 0 ? dst.x : dst.z) = src.x + (src.z - ColorChannel::half()) * c_YUV2RGBCoeffs_f[3]; + + dst.y = src.x + (src.z - ColorChannel::half()) * c_YUV2RGBCoeffs_f[2] + + (src.y - ColorChannel::half()) * c_YUV2RGBCoeffs_f[1]; + + (bidx == 0 ? dst.z : dst.x) = src.x + (src.y - ColorChannel::half()) * c_YUV2RGBCoeffs_f[0]; + } + + template struct YUV2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + YUV2RGBConvert(src, dst); + setAlpha(dst, ColorChannel::max()); + + return dst; + } + + __host__ __device__ __forceinline__ YUV2RGB() {} + __host__ __device__ __forceinline__ YUV2RGB(const YUV2RGB&) {} + }; + + template struct YUV2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + YUV2RGBConvert_32F(src, dst); + setAlpha(dst, ColorChannel::max()); + + return dst; + } + + __host__ __device__ __forceinline__ YUV2RGB() {} + __host__ __device__ __forceinline__ YUV2RGB(const YUV2RGB&) {} + }; + + template struct YUV2RGB : unary_function + { + __device__ __forceinline__ uint operator ()(uint src) const + { + return YUV2RGBConvert_8UC4(src); + } + + __host__ __device__ __forceinline__ YUV2RGB() {} + __host__ __device__ __forceinline__ YUV2RGB(const YUV2RGB&) {} + }; + } + +#define OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::YUV2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + +///////////////////////////////////// RGB <-> YCrCb ////////////////////////////////////// + + namespace color_detail + { + __constant__ float c_RGB2YCrCbCoeffs_f[5] = {0.299f, 0.587f, 0.114f, 0.713f, 0.564f}; + __constant__ int c_RGB2YCrCbCoeffs_i[5] = {R2Y, G2Y, B2Y, 11682, 9241}; + + template static __device__ void RGB2YCrCbConvert(const T& src, D& dst) + { + const int delta = ColorChannel::elem_type>::half() * (1 << yuv_shift); + + const int b = bidx == 0 ? src.x : src.z; + const int g = src.y; + const int r = bidx == 0 ? src.z : src.x; + + const int Y = CV_DESCALE(b * c_RGB2YCrCbCoeffs_i[2] + g * c_RGB2YCrCbCoeffs_i[1] + r * c_RGB2YCrCbCoeffs_i[0], yuv_shift); + const int Cr = CV_DESCALE((r - Y) * c_RGB2YCrCbCoeffs_i[3] + delta, yuv_shift); + const int Cb = CV_DESCALE((b - Y) * c_RGB2YCrCbCoeffs_i[4] + delta, yuv_shift); + + dst.x = saturate_cast::elem_type>(Y); + dst.y = saturate_cast::elem_type>(Cr); + dst.z = saturate_cast::elem_type>(Cb); + } + + template static __device__ uint RGB2YCrCbConvert_8UC4(uint src) + { + const int delta = ColorChannel::half() * (1 << yuv_shift); + + const int Y = CV_DESCALE((0xffu & src) * c_RGB2YCrCbCoeffs_i[bidx^2] + (0xffu & (src >> 8)) * c_RGB2YCrCbCoeffs_i[1] + (0xffu & (src >> 16)) * c_RGB2YCrCbCoeffs_i[bidx], yuv_shift); + const int Cr = CV_DESCALE(((0xffu & (src >> ((bidx ^ 2) * 8))) - Y) * c_RGB2YCrCbCoeffs_i[3] + delta, yuv_shift); + const int Cb = CV_DESCALE(((0xffu & (src >> (bidx * 8))) - Y) * c_RGB2YCrCbCoeffs_i[4] + delta, yuv_shift); + + uint dst = 0; + + dst |= saturate_cast(Y); + dst |= saturate_cast(Cr) << 8; + dst |= saturate_cast(Cb) << 16; + + return dst; + } + + template static __device__ __forceinline__ void RGB2YCrCbConvert_32F(const T& src, D& dst) + { + const float b = bidx == 0 ? src.x : src.z; + const float g = src.y; + const float r = bidx == 0 ? src.z : src.x; + + dst.x = b * c_RGB2YCrCbCoeffs_f[2] + g * c_RGB2YCrCbCoeffs_f[1] + r * c_RGB2YCrCbCoeffs_f[0]; + dst.y = (r - dst.x) * c_RGB2YCrCbCoeffs_f[3] + ColorChannel::half(); + dst.z = (b - dst.x) * c_RGB2YCrCbCoeffs_f[4] + ColorChannel::half(); + } + + template struct RGB2YCrCb + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + RGB2YCrCbConvert(src, dst); + return dst; + } + + __host__ __device__ __forceinline__ RGB2YCrCb() {} + __host__ __device__ __forceinline__ RGB2YCrCb(const RGB2YCrCb&) {} + }; + + template struct RGB2YCrCb + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + RGB2YCrCbConvert_32F(src, dst); + return dst; + } + + __host__ __device__ __forceinline__ RGB2YCrCb() {} + __host__ __device__ __forceinline__ RGB2YCrCb(const RGB2YCrCb&) {} + }; + + template struct RGB2YCrCb : unary_function + { + __device__ __forceinline__ uint operator ()(uint src) const + { + return RGB2YCrCbConvert_8UC4(src); + } + + __host__ __device__ __forceinline__ RGB2YCrCb() {} + __host__ __device__ __forceinline__ RGB2YCrCb(const RGB2YCrCb&) {} + }; + } + +#define OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2YCrCb functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + namespace color_detail + { + __constant__ float c_YCrCb2RGBCoeffs_f[5] = {1.403f, -0.714f, -0.344f, 1.773f}; + __constant__ int c_YCrCb2RGBCoeffs_i[5] = {22987, -11698, -5636, 29049}; + + template static __device__ void YCrCb2RGBConvert(const T& src, D& dst) + { + const int b = src.x + CV_DESCALE((src.z - ColorChannel::elem_type>::half()) * c_YCrCb2RGBCoeffs_i[3], yuv_shift); + const int g = src.x + CV_DESCALE((src.z - ColorChannel::elem_type>::half()) * c_YCrCb2RGBCoeffs_i[2] + + (src.y - ColorChannel::elem_type>::half()) * c_YCrCb2RGBCoeffs_i[1], yuv_shift); + const int r = src.x + CV_DESCALE((src.y - ColorChannel::elem_type>::half()) * c_YCrCb2RGBCoeffs_i[0], yuv_shift); + + (bidx == 0 ? dst.x : dst.z) = saturate_cast::elem_type>(b); + dst.y = saturate_cast::elem_type>(g); + (bidx == 0 ? dst.z : dst.x) = saturate_cast::elem_type>(r); + } + + template static __device__ uint YCrCb2RGBConvert_8UC4(uint src) + { + const int x = 0xff & (src); + const int y = 0xff & (src >> 8); + const int z = 0xff & (src >> 16); + + const int b = x + CV_DESCALE((z - ColorChannel::half()) * c_YCrCb2RGBCoeffs_i[3], yuv_shift); + const int g = x + CV_DESCALE((z - ColorChannel::half()) * c_YCrCb2RGBCoeffs_i[2] + (y - ColorChannel::half()) * c_YCrCb2RGBCoeffs_i[1], yuv_shift); + const int r = x + CV_DESCALE((y - ColorChannel::half()) * c_YCrCb2RGBCoeffs_i[0], yuv_shift); + + uint dst = 0xffu << 24; + + dst |= saturate_cast(b) << (bidx * 8); + dst |= saturate_cast(g) << 8; + dst |= saturate_cast(r) << ((bidx ^ 2) * 8); + + return dst; + } + + template __device__ __forceinline__ void YCrCb2RGBConvert_32F(const T& src, D& dst) + { + (bidx == 0 ? dst.x : dst.z) = src.x + (src.z - ColorChannel::half()) * c_YCrCb2RGBCoeffs_f[3]; + dst.y = src.x + (src.z - ColorChannel::half()) * c_YCrCb2RGBCoeffs_f[2] + (src.y - ColorChannel::half()) * c_YCrCb2RGBCoeffs_f[1]; + (bidx == 0 ? dst.z : dst.x) = src.x + (src.y - ColorChannel::half()) * c_YCrCb2RGBCoeffs_f[0]; + } + + template struct YCrCb2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + YCrCb2RGBConvert(src, dst); + setAlpha(dst, ColorChannel::max()); + + return dst; + } + + __host__ __device__ __forceinline__ YCrCb2RGB() {} + __host__ __device__ __forceinline__ YCrCb2RGB(const YCrCb2RGB&) {} + }; + + template struct YCrCb2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + YCrCb2RGBConvert_32F(src, dst); + setAlpha(dst, ColorChannel::max()); + + return dst; + } + + __host__ __device__ __forceinline__ YCrCb2RGB() {} + __host__ __device__ __forceinline__ YCrCb2RGB(const YCrCb2RGB&) {} + }; + + template struct YCrCb2RGB : unary_function + { + __device__ __forceinline__ uint operator ()(uint src) const + { + return YCrCb2RGBConvert_8UC4(src); + } + + __host__ __device__ __forceinline__ YCrCb2RGB() {} + __host__ __device__ __forceinline__ YCrCb2RGB(const YCrCb2RGB&) {} + }; + } + +#define OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::YCrCb2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + +////////////////////////////////////// RGB <-> XYZ /////////////////////////////////////// + + namespace color_detail + { + __constant__ float c_RGB2XYZ_D65f[9] = { 0.412453f, 0.357580f, 0.180423f, 0.212671f, 0.715160f, 0.072169f, 0.019334f, 0.119193f, 0.950227f }; + __constant__ int c_RGB2XYZ_D65i[9] = { 1689, 1465, 739, 871, 2929, 296, 79, 488, 3892 }; + + template static __device__ __forceinline__ void RGB2XYZConvert_8U(const T& src, D& dst) + { + const uint b = bidx == 0 ? src.x : src.z; + const uint g = src.y; + const uint r = bidx == 0 ? src.z : src.x; + + dst.x = saturate_cast::elem_type>(CV_DESCALE(r * c_RGB2XYZ_D65i[0] + g * c_RGB2XYZ_D65i[1] + b * c_RGB2XYZ_D65i[2], xyz_shift)); + dst.y = saturate_cast::elem_type>(CV_DESCALE(r * c_RGB2XYZ_D65i[3] + g * c_RGB2XYZ_D65i[4] + b * c_RGB2XYZ_D65i[5], xyz_shift)); + dst.z = saturate_cast::elem_type>(CV_DESCALE(r * c_RGB2XYZ_D65i[6] + g * c_RGB2XYZ_D65i[7] + b * c_RGB2XYZ_D65i[8], xyz_shift)); + } + + template static __device__ __forceinline__ uint RGB2XYZConvert_8UC4(uint src) + { + const uint b = 0xffu & (src >> (bidx * 8)); + const uint g = 0xffu & (src >> 8); + const uint r = 0xffu & (src >> ((bidx ^ 2) * 8)); + + const uint x = saturate_cast(CV_DESCALE(r * c_RGB2XYZ_D65i[0] + g * c_RGB2XYZ_D65i[1] + b * c_RGB2XYZ_D65i[2], xyz_shift)); + const uint y = saturate_cast(CV_DESCALE(r * c_RGB2XYZ_D65i[3] + g * c_RGB2XYZ_D65i[4] + b * c_RGB2XYZ_D65i[5], xyz_shift)); + const uint z = saturate_cast(CV_DESCALE(r * c_RGB2XYZ_D65i[6] + g * c_RGB2XYZ_D65i[7] + b * c_RGB2XYZ_D65i[8], xyz_shift)); + + uint dst = 0; + + dst |= x; + dst |= y << 8; + dst |= z << 16; + + return dst; + } + + template static __device__ __forceinline__ void RGB2XYZConvert_32F(const T& src, D& dst) + { + const float b = bidx == 0 ? src.x : src.z; + const float g = src.y; + const float r = bidx == 0 ? src.z : src.x; + + dst.x = r * c_RGB2XYZ_D65f[0] + g * c_RGB2XYZ_D65f[1] + b * c_RGB2XYZ_D65f[2]; + dst.y = r * c_RGB2XYZ_D65f[3] + g * c_RGB2XYZ_D65f[4] + b * c_RGB2XYZ_D65f[5]; + dst.z = r * c_RGB2XYZ_D65f[6] + g * c_RGB2XYZ_D65f[7] + b * c_RGB2XYZ_D65f[8]; + } + + template struct RGB2XYZ + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + RGB2XYZConvert_8U(src, dst); + + return dst; + } + + __host__ __device__ __forceinline__ RGB2XYZ() {} + __host__ __device__ __forceinline__ RGB2XYZ(const RGB2XYZ&) {} + }; + + template struct RGB2XYZ + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + RGB2XYZConvert_32F(src, dst); + + return dst; + } + + __host__ __device__ __forceinline__ RGB2XYZ() {} + __host__ __device__ __forceinline__ RGB2XYZ(const RGB2XYZ&) {} + }; + + template struct RGB2XYZ : unary_function + { + __device__ __forceinline__ uint operator()(uint src) const + { + return RGB2XYZConvert_8UC4(src); + } + + __host__ __device__ __forceinline__ RGB2XYZ() {} + __host__ __device__ __forceinline__ RGB2XYZ(const RGB2XYZ&) {} + }; + } + +#define OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2XYZ functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + namespace color_detail + { + __constant__ float c_XYZ2sRGB_D65f[9] = { 3.240479f, -1.53715f, -0.498535f, -0.969256f, 1.875991f, 0.041556f, 0.055648f, -0.204043f, 1.057311f }; + __constant__ int c_XYZ2sRGB_D65i[9] = { 13273, -6296, -2042, -3970, 7684, 170, 228, -836, 4331 }; + + template static __device__ __forceinline__ void XYZ2RGBConvert_8U(const T& src, D& dst) + { + (bidx == 0 ? dst.z : dst.x) = saturate_cast::elem_type>(CV_DESCALE(src.x * c_XYZ2sRGB_D65i[0] + src.y * c_XYZ2sRGB_D65i[1] + src.z * c_XYZ2sRGB_D65i[2], xyz_shift)); + dst.y = saturate_cast::elem_type>(CV_DESCALE(src.x * c_XYZ2sRGB_D65i[3] + src.y * c_XYZ2sRGB_D65i[4] + src.z * c_XYZ2sRGB_D65i[5], xyz_shift)); + (bidx == 0 ? dst.x : dst.z) = saturate_cast::elem_type>(CV_DESCALE(src.x * c_XYZ2sRGB_D65i[6] + src.y * c_XYZ2sRGB_D65i[7] + src.z * c_XYZ2sRGB_D65i[8], xyz_shift)); + } + + template static __device__ __forceinline__ uint XYZ2RGBConvert_8UC4(uint src) + { + const int x = 0xff & src; + const int y = 0xff & (src >> 8); + const int z = 0xff & (src >> 16); + + const uint r = saturate_cast(CV_DESCALE(x * c_XYZ2sRGB_D65i[0] + y * c_XYZ2sRGB_D65i[1] + z * c_XYZ2sRGB_D65i[2], xyz_shift)); + const uint g = saturate_cast(CV_DESCALE(x * c_XYZ2sRGB_D65i[3] + y * c_XYZ2sRGB_D65i[4] + z * c_XYZ2sRGB_D65i[5], xyz_shift)); + const uint b = saturate_cast(CV_DESCALE(x * c_XYZ2sRGB_D65i[6] + y * c_XYZ2sRGB_D65i[7] + z * c_XYZ2sRGB_D65i[8], xyz_shift)); + + uint dst = 0xffu << 24; + + dst |= b << (bidx * 8); + dst |= g << 8; + dst |= r << ((bidx ^ 2) * 8); + + return dst; + } + + template static __device__ __forceinline__ void XYZ2RGBConvert_32F(const T& src, D& dst) + { + (bidx == 0 ? dst.z : dst.x) = src.x * c_XYZ2sRGB_D65f[0] + src.y * c_XYZ2sRGB_D65f[1] + src.z * c_XYZ2sRGB_D65f[2]; + dst.y = src.x * c_XYZ2sRGB_D65f[3] + src.y * c_XYZ2sRGB_D65f[4] + src.z * c_XYZ2sRGB_D65f[5]; + (bidx == 0 ? dst.x : dst.z) = src.x * c_XYZ2sRGB_D65f[6] + src.y * c_XYZ2sRGB_D65f[7] + src.z * c_XYZ2sRGB_D65f[8]; + } + + template struct XYZ2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + XYZ2RGBConvert_8U(src, dst); + setAlpha(dst, ColorChannel::max()); + + return dst; + } + + __host__ __device__ __forceinline__ XYZ2RGB() {} + __host__ __device__ __forceinline__ XYZ2RGB(const XYZ2RGB&) {} + }; + + template struct XYZ2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + XYZ2RGBConvert_32F(src, dst); + setAlpha(dst, ColorChannel::max()); + + return dst; + } + + __host__ __device__ __forceinline__ XYZ2RGB() {} + __host__ __device__ __forceinline__ XYZ2RGB(const XYZ2RGB&) {} + }; + + template struct XYZ2RGB : unary_function + { + __device__ __forceinline__ uint operator()(uint src) const + { + return XYZ2RGBConvert_8UC4(src); + } + + __host__ __device__ __forceinline__ XYZ2RGB() {} + __host__ __device__ __forceinline__ XYZ2RGB(const XYZ2RGB&) {} + }; + } + +#define OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::XYZ2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + +////////////////////////////////////// RGB <-> HSV /////////////////////////////////////// + + namespace color_detail + { + __constant__ int c_HsvDivTable [256] = {0, 1044480, 522240, 348160, 261120, 208896, 174080, 149211, 130560, 116053, 104448, 94953, 87040, 80345, 74606, 69632, 65280, 61440, 58027, 54973, 52224, 49737, 47476, 45412, 43520, 41779, 40172, 38684, 37303, 36017, 34816, 33693, 32640, 31651, 30720, 29842, 29013, 28229, 27486, 26782, 26112, 25475, 24869, 24290, 23738, 23211, 22706, 22223, 21760, 21316, 20890, 20480, 20086, 19707, 19342, 18991, 18651, 18324, 18008, 17703, 17408, 17123, 16846, 16579, 16320, 16069, 15825, 15589, 15360, 15137, 14921, 14711, 14507, 14308, 14115, 13926, 13743, 13565, 13391, 13221, 13056, 12895, 12738, 12584, 12434, 12288, 12145, 12006, 11869, 11736, 11605, 11478, 11353, 11231, 11111, 10995, 10880, 10768, 10658, 10550, 10445, 10341, 10240, 10141, 10043, 9947, 9854, 9761, 9671, 9582, 9495, 9410, 9326, 9243, 9162, 9082, 9004, 8927, 8852, 8777, 8704, 8632, 8561, 8492, 8423, 8356, 8290, 8224, 8160, 8097, 8034, 7973, 7913, 7853, 7795, 7737, 7680, 7624, 7569, 7514, 7461, 7408, 7355, 7304, 7253, 7203, 7154, 7105, 7057, 7010, 6963, 6917, 6872, 6827, 6782, 6739, 6695, 6653, 6611, 6569, 6528, 6487, 6447, 6408, 6369, 6330, 6292, 6254, 6217, 6180, 6144, 6108, 6073, 6037, 6003, 5968, 5935, 5901, 5868, 5835, 5803, 5771, 5739, 5708, 5677, 5646, 5615, 5585, 5556, 5526, 5497, 5468, 5440, 5412, 5384, 5356, 5329, 5302, 5275, 5249, 5222, 5196, 5171, 5145, 5120, 5095, 5070, 5046, 5022, 4998, 4974, 4950, 4927, 4904, 4881, 4858, 4836, 4813, 4791, 4769, 4748, 4726, 4705, 4684, 4663, 4642, 4622, 4601, 4581, 4561, 4541, 4522, 4502, 4483, 4464, 4445, 4426, 4407, 4389, 4370, 4352, 4334, 4316, 4298, 4281, 4263, 4246, 4229, 4212, 4195, 4178, 4161, 4145, 4128, 4112, 4096}; + __constant__ int c_HsvDivTable180[256] = {0, 122880, 61440, 40960, 30720, 24576, 20480, 17554, 15360, 13653, 12288, 11171, 10240, 9452, 8777, 8192, 7680, 7228, 6827, 6467, 6144, 5851, 5585, 5343, 5120, 4915, 4726, 4551, 4389, 4237, 4096, 3964, 3840, 3724, 3614, 3511, 3413, 3321, 3234, 3151, 3072, 2997, 2926, 2858, 2793, 2731, 2671, 2614, 2560, 2508, 2458, 2409, 2363, 2318, 2276, 2234, 2194, 2156, 2119, 2083, 2048, 2014, 1982, 1950, 1920, 1890, 1862, 1834, 1807, 1781, 1755, 1731, 1707, 1683, 1661, 1638, 1617, 1596, 1575, 1555, 1536, 1517, 1499, 1480, 1463, 1446, 1429, 1412, 1396, 1381, 1365, 1350, 1336, 1321, 1307, 1293, 1280, 1267, 1254, 1241, 1229, 1217, 1205, 1193, 1182, 1170, 1159, 1148, 1138, 1127, 1117, 1107, 1097, 1087, 1078, 1069, 1059, 1050, 1041, 1033, 1024, 1016, 1007, 999, 991, 983, 975, 968, 960, 953, 945, 938, 931, 924, 917, 910, 904, 897, 890, 884, 878, 871, 865, 859, 853, 847, 842, 836, 830, 825, 819, 814, 808, 803, 798, 793, 788, 783, 778, 773, 768, 763, 759, 754, 749, 745, 740, 736, 731, 727, 723, 719, 714, 710, 706, 702, 698, 694, 690, 686, 683, 679, 675, 671, 668, 664, 661, 657, 654, 650, 647, 643, 640, 637, 633, 630, 627, 624, 621, 617, 614, 611, 608, 605, 602, 599, 597, 594, 591, 588, 585, 582, 580, 577, 574, 572, 569, 566, 564, 561, 559, 556, 554, 551, 549, 546, 544, 541, 539, 537, 534, 532, 530, 527, 525, 523, 521, 518, 516, 514, 512, 510, 508, 506, 504, 502, 500, 497, 495, 493, 492, 490, 488, 486, 484, 482}; + __constant__ int c_HsvDivTable256[256] = {0, 174763, 87381, 58254, 43691, 34953, 29127, 24966, 21845, 19418, 17476, 15888, 14564, 13443, 12483, 11651, 10923, 10280, 9709, 9198, 8738, 8322, 7944, 7598, 7282, 6991, 6722, 6473, 6242, 6026, 5825, 5638, 5461, 5296, 5140, 4993, 4855, 4723, 4599, 4481, 4369, 4263, 4161, 4064, 3972, 3884, 3799, 3718, 3641, 3567, 3495, 3427, 3361, 3297, 3236, 3178, 3121, 3066, 3013, 2962, 2913, 2865, 2819, 2774, 2731, 2689, 2648, 2608, 2570, 2533, 2497, 2461, 2427, 2394, 2362, 2330, 2300, 2270, 2241, 2212, 2185, 2158, 2131, 2106, 2081, 2056, 2032, 2009, 1986, 1964, 1942, 1920, 1900, 1879, 1859, 1840, 1820, 1802, 1783, 1765, 1748, 1730, 1713, 1697, 1680, 1664, 1649, 1633, 1618, 1603, 1589, 1574, 1560, 1547, 1533, 1520, 1507, 1494, 1481, 1469, 1456, 1444, 1432, 1421, 1409, 1398, 1387, 1376, 1365, 1355, 1344, 1334, 1324, 1314, 1304, 1295, 1285, 1276, 1266, 1257, 1248, 1239, 1231, 1222, 1214, 1205, 1197, 1189, 1181, 1173, 1165, 1157, 1150, 1142, 1135, 1128, 1120, 1113, 1106, 1099, 1092, 1085, 1079, 1072, 1066, 1059, 1053, 1046, 1040, 1034, 1028, 1022, 1016, 1010, 1004, 999, 993, 987, 982, 976, 971, 966, 960, 955, 950, 945, 940, 935, 930, 925, 920, 915, 910, 906, 901, 896, 892, 887, 883, 878, 874, 869, 865, 861, 857, 853, 848, 844, 840, 836, 832, 828, 824, 820, 817, 813, 809, 805, 802, 798, 794, 791, 787, 784, 780, 777, 773, 770, 767, 763, 760, 757, 753, 750, 747, 744, 741, 737, 734, 731, 728, 725, 722, 719, 716, 713, 710, 708, 705, 702, 699, 696, 694, 691, 688, 685}; + + template static __device__ void RGB2HSVConvert_8U(const T& src, D& dst) + { + const int hsv_shift = 12; + const int* hdiv_table = hr == 180 ? c_HsvDivTable180 : c_HsvDivTable256; + + int b = bidx == 0 ? src.x : src.z; + int g = src.y; + int r = bidx == 0 ? src.z : src.x; + + int h, s, v = b; + int vmin = b, diff; + int vr, vg; + + v = ::max(v, g); + v = ::max(v, r); + vmin = ::min(vmin, g); + vmin = ::min(vmin, r); + + diff = v - vmin; + vr = (v == r) * -1; + vg = (v == g) * -1; + + s = (diff * c_HsvDivTable[v] + (1 << (hsv_shift-1))) >> hsv_shift; + h = (vr & (g - b)) + (~vr & ((vg & (b - r + 2 * diff)) + ((~vg) & (r - g + 4 * diff)))); + h = (h * hdiv_table[diff] + (1 << (hsv_shift-1))) >> hsv_shift; + h += (h < 0) * hr; + + dst.x = saturate_cast(h); + dst.y = (uchar)s; + dst.z = (uchar)v; + } + + template static __device__ uint RGB2HSVConvert_8UC4(uint src) + { + const int hsv_shift = 12; + const int* hdiv_table = hr == 180 ? c_HsvDivTable180 : c_HsvDivTable256; + + const int b = 0xff & (src >> (bidx * 8)); + const int g = 0xff & (src >> 8); + const int r = 0xff & (src >> ((bidx ^ 2) * 8)); + + int h, s, v = b; + int vmin = b, diff; + int vr, vg; + + v = ::max(v, g); + v = ::max(v, r); + vmin = ::min(vmin, g); + vmin = ::min(vmin, r); + + diff = v - vmin; + vr = (v == r) * -1; + vg = (v == g) * -1; + + s = (diff * c_HsvDivTable[v] + (1 << (hsv_shift-1))) >> hsv_shift; + h = (vr & (g - b)) + (~vr & ((vg & (b - r + 2 * diff)) + ((~vg) & (r - g + 4 * diff)))); + h = (h * hdiv_table[diff] + (1 << (hsv_shift-1))) >> hsv_shift; + h += (h < 0) * hr; + + uint dst = 0; + + dst |= saturate_cast(h); + dst |= (0xffu & s) << 8; + dst |= (0xffu & v) << 16; + + return dst; + } + + template static __device__ void RGB2HSVConvert_32F(const T& src, D& dst) + { + const float hscale = hr * (1.f / 360.f); + + float b = bidx == 0 ? src.x : src.z; + float g = src.y; + float r = bidx == 0 ? src.z : src.x; + + float h, s, v; + + float vmin, diff; + + v = vmin = r; + v = fmax(v, g); + v = fmax(v, b); + vmin = fmin(vmin, g); + vmin = fmin(vmin, b); + + diff = v - vmin; + s = diff / (float)(::fabs(v) + numeric_limits::epsilon()); + diff = (float)(60. / (diff + numeric_limits::epsilon())); + + h = (v == r) * (g - b) * diff; + h += (v != r && v == g) * ((b - r) * diff + 120.f); + h += (v != r && v != g) * ((r - g) * diff + 240.f); + h += (h < 0) * 360.f; + + dst.x = h * hscale; + dst.y = s; + dst.z = v; + } + + template struct RGB2HSV + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + RGB2HSVConvert_8U(src, dst); + + return dst; + } + + __host__ __device__ __forceinline__ RGB2HSV() {} + __host__ __device__ __forceinline__ RGB2HSV(const RGB2HSV&) {} + }; + + template struct RGB2HSV + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + RGB2HSVConvert_32F(src, dst); + + return dst; + } + + __host__ __device__ __forceinline__ RGB2HSV() {} + __host__ __device__ __forceinline__ RGB2HSV(const RGB2HSV&) {} + }; + + template struct RGB2HSV : unary_function + { + __device__ __forceinline__ uint operator()(uint src) const + { + return RGB2HSVConvert_8UC4(src); + } + + __host__ __device__ __forceinline__ RGB2HSV() {} + __host__ __device__ __forceinline__ RGB2HSV(const RGB2HSV&) {} + }; + } + +#define OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2HSV functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template struct name ## _full_traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2HSV functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template <> struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2HSV functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template <> struct name ## _full_traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2HSV functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + namespace color_detail + { + __constant__ int c_HsvSectorData[6][3] = { {1,3,0}, {1,0,2}, {3,0,1}, {0,2,1}, {0,1,3}, {2,1,0} }; + + template static __device__ void HSV2RGBConvert_32F(const T& src, D& dst) + { + const float hscale = 6.f / hr; + + float h = src.x, s = src.y, v = src.z; + float b = v, g = v, r = v; + + if (s != 0) + { + h *= hscale; + + if( h < 0 ) + do h += 6; while( h < 0 ); + else if( h >= 6 ) + do h -= 6; while( h >= 6 ); + + int sector = __float2int_rd(h); + h -= sector; + + if ( (unsigned)sector >= 6u ) + { + sector = 0; + h = 0.f; + } + + float tab[4]; + tab[0] = v; + tab[1] = v * (1.f - s); + tab[2] = v * (1.f - s * h); + tab[3] = v * (1.f - s * (1.f - h)); + + b = tab[c_HsvSectorData[sector][0]]; + g = tab[c_HsvSectorData[sector][1]]; + r = tab[c_HsvSectorData[sector][2]]; + } + + dst.x = (bidx == 0 ? b : r); + dst.y = g; + dst.z = (bidx == 0 ? r : b); + } + + template static __device__ void HSV2RGBConvert_8U(const T& src, D& dst) + { + float3 buf; + + buf.x = src.x; + buf.y = src.y * (1.f / 255.f); + buf.z = src.z * (1.f / 255.f); + + HSV2RGBConvert_32F(buf, buf); + + dst.x = saturate_cast(buf.x * 255.f); + dst.y = saturate_cast(buf.y * 255.f); + dst.z = saturate_cast(buf.z * 255.f); + } + + template static __device__ uint HSV2RGBConvert_8UC4(uint src) + { + float3 buf; + + buf.x = src & 0xff; + buf.y = ((src >> 8) & 0xff) * (1.f/255.f); + buf.z = ((src >> 16) & 0xff) * (1.f/255.f); + + HSV2RGBConvert_32F(buf, buf); + + uint dst = 0xffu << 24; + + dst |= saturate_cast(buf.x * 255.f); + dst |= saturate_cast(buf.y * 255.f) << 8; + dst |= saturate_cast(buf.z * 255.f) << 16; + + return dst; + } + + template struct HSV2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + HSV2RGBConvert_8U(src, dst); + setAlpha(dst, ColorChannel::max()); + + return dst; + } + + __host__ __device__ __forceinline__ HSV2RGB() {} + __host__ __device__ __forceinline__ HSV2RGB(const HSV2RGB&) {} + }; + + template struct HSV2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + HSV2RGBConvert_32F(src, dst); + setAlpha(dst, ColorChannel::max()); + + return dst; + } + + __host__ __device__ __forceinline__ HSV2RGB() {} + __host__ __device__ __forceinline__ HSV2RGB(const HSV2RGB&) {} + }; + + template struct HSV2RGB : unary_function + { + __device__ __forceinline__ uint operator()(uint src) const + { + return HSV2RGBConvert_8UC4(src); + } + + __host__ __device__ __forceinline__ HSV2RGB() {} + __host__ __device__ __forceinline__ HSV2RGB(const HSV2RGB&) {} + }; + } + +#define OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::HSV2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template struct name ## _full_traits \ + { \ + typedef ::cv::gpu::device::color_detail::HSV2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template <> struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::HSV2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template <> struct name ## _full_traits \ + { \ + typedef ::cv::gpu::device::color_detail::HSV2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + +/////////////////////////////////////// RGB <-> HLS //////////////////////////////////////// + + namespace color_detail + { + template static __device__ void RGB2HLSConvert_32F(const T& src, D& dst) + { + const float hscale = hr * (1.f / 360.f); + + float b = bidx == 0 ? src.x : src.z; + float g = src.y; + float r = bidx == 0 ? src.z : src.x; + + float h = 0.f, s = 0.f, l; + float vmin, vmax, diff; + + vmax = vmin = r; + vmax = fmax(vmax, g); + vmax = fmax(vmax, b); + vmin = fmin(vmin, g); + vmin = fmin(vmin, b); + + diff = vmax - vmin; + l = (vmax + vmin) * 0.5f; + + if (diff > numeric_limits::epsilon()) + { + s = (l < 0.5f) * diff / (vmax + vmin); + s += (l >= 0.5f) * diff / (2.0f - vmax - vmin); + + diff = 60.f / diff; + + h = (vmax == r) * (g - b) * diff; + h += (vmax != r && vmax == g) * ((b - r) * diff + 120.f); + h += (vmax != r && vmax != g) * ((r - g) * diff + 240.f); + h += (h < 0.f) * 360.f; + } + + dst.x = h * hscale; + dst.y = l; + dst.z = s; + } + + template static __device__ void RGB2HLSConvert_8U(const T& src, D& dst) + { + float3 buf; + + buf.x = src.x * (1.f / 255.f); + buf.y = src.y * (1.f / 255.f); + buf.z = src.z * (1.f / 255.f); + + RGB2HLSConvert_32F(buf, buf); + + dst.x = saturate_cast(buf.x); + dst.y = saturate_cast(buf.y*255.f); + dst.z = saturate_cast(buf.z*255.f); + } + + template static __device__ uint RGB2HLSConvert_8UC4(uint src) + { + float3 buf; + + buf.x = (0xff & src) * (1.f / 255.f); + buf.y = (0xff & (src >> 8)) * (1.f / 255.f); + buf.z = (0xff & (src >> 16)) * (1.f / 255.f); + + RGB2HLSConvert_32F(buf, buf); + + uint dst = 0xffu << 24; + + dst |= saturate_cast(buf.x); + dst |= saturate_cast(buf.y * 255.f) << 8; + dst |= saturate_cast(buf.z * 255.f) << 16; + + return dst; + } + + template struct RGB2HLS + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + RGB2HLSConvert_8U(src, dst); + + return dst; + } + + __host__ __device__ __forceinline__ RGB2HLS() {} + __host__ __device__ __forceinline__ RGB2HLS(const RGB2HLS&) {} + }; + + template struct RGB2HLS + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + RGB2HLSConvert_32F(src, dst); + + return dst; + } + + __host__ __device__ __forceinline__ RGB2HLS() {} + __host__ __device__ __forceinline__ RGB2HLS(const RGB2HLS&) {} + }; + + template struct RGB2HLS : unary_function + { + __device__ __forceinline__ uint operator()(uint src) const + { + return RGB2HLSConvert_8UC4(src); + } + + __host__ __device__ __forceinline__ RGB2HLS() {} + __host__ __device__ __forceinline__ RGB2HLS(const RGB2HLS&) {} + }; + } + +#define OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2HLS functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template struct name ## _full_traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2HLS functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template <> struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2HLS functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template <> struct name ## _full_traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2HLS functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + namespace color_detail + { + __constant__ int c_HlsSectorData[6][3] = { {1,3,0}, {1,0,2}, {3,0,1}, {0,2,1}, {0,1,3}, {2,1,0} }; + + template static __device__ void HLS2RGBConvert_32F(const T& src, D& dst) + { + const float hscale = 6.0f / hr; + + float h = src.x, l = src.y, s = src.z; + float b = l, g = l, r = l; + + if (s != 0) + { + float p2 = (l <= 0.5f) * l * (1 + s); + p2 += (l > 0.5f) * (l + s - l * s); + float p1 = 2 * l - p2; + + h *= hscale; + + if( h < 0 ) + do h += 6; while( h < 0 ); + else if( h >= 6 ) + do h -= 6; while( h >= 6 ); + + int sector; + sector = __float2int_rd(h); + + h -= sector; + + float tab[4]; + tab[0] = p2; + tab[1] = p1; + tab[2] = p1 + (p2 - p1) * (1 - h); + tab[3] = p1 + (p2 - p1) * h; + + b = tab[c_HlsSectorData[sector][0]]; + g = tab[c_HlsSectorData[sector][1]]; + r = tab[c_HlsSectorData[sector][2]]; + } + + dst.x = bidx == 0 ? b : r; + dst.y = g; + dst.z = bidx == 0 ? r : b; + } + + template static __device__ void HLS2RGBConvert_8U(const T& src, D& dst) + { + float3 buf; + + buf.x = src.x; + buf.y = src.y * (1.f / 255.f); + buf.z = src.z * (1.f / 255.f); + + HLS2RGBConvert_32F(buf, buf); + + dst.x = saturate_cast(buf.x * 255.f); + dst.y = saturate_cast(buf.y * 255.f); + dst.z = saturate_cast(buf.z * 255.f); + } + + template static __device__ uint HLS2RGBConvert_8UC4(uint src) + { + float3 buf; + + buf.x = 0xff & src; + buf.y = (0xff & (src >> 8)) * (1.f / 255.f); + buf.z = (0xff & (src >> 16)) * (1.f / 255.f); + + HLS2RGBConvert_32F(buf, buf); + + uint dst = 0xffu << 24; + + dst |= saturate_cast(buf.x * 255.f); + dst |= saturate_cast(buf.y * 255.f) << 8; + dst |= saturate_cast(buf.z * 255.f) << 16; + + return dst; + } + + template struct HLS2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + HLS2RGBConvert_8U(src, dst); + setAlpha(dst, ColorChannel::max()); + + return dst; + } + + __host__ __device__ __forceinline__ HLS2RGB() {} + __host__ __device__ __forceinline__ HLS2RGB(const HLS2RGB&) {} + }; + + template struct HLS2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + HLS2RGBConvert_32F(src, dst); + setAlpha(dst, ColorChannel::max()); + + return dst; + } + + __host__ __device__ __forceinline__ HLS2RGB() {} + __host__ __device__ __forceinline__ HLS2RGB(const HLS2RGB&) {} + }; + + template struct HLS2RGB : unary_function + { + __device__ __forceinline__ uint operator()(uint src) const + { + return HLS2RGBConvert_8UC4(src); + } + + __host__ __device__ __forceinline__ HLS2RGB() {} + __host__ __device__ __forceinline__ HLS2RGB(const HLS2RGB&) {} + }; + } + +#define OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::HLS2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template struct name ## _full_traits \ + { \ + typedef ::cv::gpu::device::color_detail::HLS2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template <> struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::HLS2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template <> struct name ## _full_traits \ + { \ + typedef ::cv::gpu::device::color_detail::HLS2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + +///////////////////////////////////// RGB <-> Lab ///////////////////////////////////// + + namespace color_detail + { + enum + { + LAB_CBRT_TAB_SIZE = 1024, + GAMMA_TAB_SIZE = 1024, + lab_shift = xyz_shift, + gamma_shift = 3, + lab_shift2 = (lab_shift + gamma_shift), + LAB_CBRT_TAB_SIZE_B = (256 * 3 / 2 * (1 << gamma_shift)) + }; + + __constant__ ushort c_sRGBGammaTab_b[] = {0,1,1,2,2,3,4,4,5,6,6,7,8,8,9,10,11,11,12,13,14,15,16,17,19,20,21,22,24,25,26,28,29,31,33,34,36,38,40,41,43,45,47,49,51,54,56,58,60,63,65,68,70,73,75,78,81,83,86,89,92,95,98,101,105,108,111,115,118,121,125,129,132,136,140,144,147,151,155,160,164,168,172,176,181,185,190,194,199,204,209,213,218,223,228,233,239,244,249,255,260,265,271,277,282,288,294,300,306,312,318,324,331,337,343,350,356,363,370,376,383,390,397,404,411,418,426,433,440,448,455,463,471,478,486,494,502,510,518,527,535,543,552,560,569,578,586,595,604,613,622,631,641,650,659,669,678,688,698,707,717,727,737,747,757,768,778,788,799,809,820,831,842,852,863,875,886,897,908,920,931,943,954,966,978,990,1002,1014,1026,1038,1050,1063,1075,1088,1101,1113,1126,1139,1152,1165,1178,1192,1205,1218,1232,1245,1259,1273,1287,1301,1315,1329,1343,1357,1372,1386,1401,1415,1430,1445,1460,1475,1490,1505,1521,1536,1551,1567,1583,1598,1614,1630,1646,1662,1678,1695,1711,1728,1744,1761,1778,1794,1811,1828,1846,1863,1880,1897,1915,1933,1950,1968,1986,2004,2022,2040}; + + __device__ __forceinline__ int LabCbrt_b(int i) + { + float x = i * (1.f / (255.f * (1 << gamma_shift))); + return (1 << lab_shift2) * (x < 0.008856f ? x * 7.787f + 0.13793103448275862f : ::cbrtf(x)); + } + + template + __device__ __forceinline__ void RGB2LabConvert_8U(const T& src, D& dst) + { + const int Lscale = (116 * 255 + 50) / 100; + const int Lshift = -((16 * 255 * (1 << lab_shift2) + 50) / 100); + + int B = blueIdx == 0 ? src.x : src.z; + int G = src.y; + int R = blueIdx == 0 ? src.z : src.x; + + if (srgb) + { + B = c_sRGBGammaTab_b[B]; + G = c_sRGBGammaTab_b[G]; + R = c_sRGBGammaTab_b[R]; + } + else + { + B <<= 3; + G <<= 3; + R <<= 3; + } + + int fX = LabCbrt_b(CV_DESCALE(B * 778 + G * 1541 + R * 1777, lab_shift)); + int fY = LabCbrt_b(CV_DESCALE(B * 296 + G * 2929 + R * 871, lab_shift)); + int fZ = LabCbrt_b(CV_DESCALE(B * 3575 + G * 448 + R * 73, lab_shift)); + + int L = CV_DESCALE(Lscale * fY + Lshift, lab_shift2); + int a = CV_DESCALE(500 * (fX - fY) + 128 * (1 << lab_shift2), lab_shift2); + int b = CV_DESCALE(200 * (fY - fZ) + 128 * (1 << lab_shift2), lab_shift2); + + dst.x = saturate_cast(L); + dst.y = saturate_cast(a); + dst.z = saturate_cast(b); + } + + __device__ __forceinline__ float splineInterpolate(float x, const float* tab, int n) + { + int ix = ::min(::max(int(x), 0), n-1); + x -= ix; + tab += ix * 4; + return ((tab[3] * x + tab[2]) * x + tab[1]) * x + tab[0]; + } + + __constant__ float c_sRGBGammaTab[] = {0,7.55853e-05,0.,-7.51331e-13,7.55853e-05,7.55853e-05,-2.25399e-12,3.75665e-12,0.000151171,7.55853e-05,9.01597e-12,-6.99932e-12,0.000226756,7.55853e-05,-1.1982e-11,2.41277e-12,0.000302341,7.55853e-05,-4.74369e-12,1.19001e-11,0.000377927,7.55853e-05,3.09568e-11,-2.09095e-11,0.000453512,7.55853e-05,-3.17718e-11,1.35303e-11,0.000529097,7.55853e-05,8.81905e-12,-4.10782e-12,0.000604683,7.55853e-05,-3.50439e-12,2.90097e-12,0.000680268,7.55853e-05,5.19852e-12,-7.49607e-12,0.000755853,7.55853e-05,-1.72897e-11,2.70833e-11,0.000831439,7.55854e-05,6.39602e-11,-4.26295e-11,0.000907024,7.55854e-05,-6.39282e-11,2.70193e-11,0.000982609,7.55853e-05,1.71298e-11,-7.24017e-12,0.00105819,7.55853e-05,-4.59077e-12,1.94137e-12,0.00113378,7.55853e-05,1.23333e-12,-5.25291e-13,0.00120937,7.55853e-05,-3.42545e-13,1.59799e-13,0.00128495,7.55853e-05,1.36852e-13,-1.13904e-13,0.00136054,7.55853e-05,-2.04861e-13,2.95818e-13,0.00143612,7.55853e-05,6.82594e-13,-1.06937e-12,0.00151171,7.55853e-05,-2.52551e-12,3.98166e-12,0.00158729,7.55853e-05,9.41946e-12,-1.48573e-11,0.00166288,7.55853e-05,-3.51523e-11,5.54474e-11,0.00173846,7.55854e-05,1.3119e-10,-9.0517e-11,0.00181405,7.55854e-05,-1.40361e-10,7.37899e-11,0.00188963,7.55853e-05,8.10085e-11,-8.82272e-11,0.00196522,7.55852e-05,-1.83673e-10,1.62704e-10,0.0020408,7.55853e-05,3.04438e-10,-2.13341e-10,0.00211639,7.55853e-05,-3.35586e-10,2.25e-10,0.00219197,7.55853e-05,3.39414e-10,-2.20997e-10,0.00226756,7.55853e-05,-3.23576e-10,1.93326e-10,0.00234315,7.55853e-05,2.564e-10,-8.66446e-11,0.00241873,7.55855e-05,-3.53328e-12,-7.9578e-11,0.00249432,7.55853e-05,-2.42267e-10,1.72126e-10,0.0025699,7.55853e-05,2.74111e-10,-1.43265e-10,0.00264549,7.55854e-05,-1.55683e-10,-6.47292e-11,0.00272107,7.55849e-05,-3.4987e-10,8.67842e-10,0.00279666,7.55868e-05,2.25366e-09,-3.8723e-09,0.00287224,7.55797e-05,-9.36325e-09,1.5087e-08,0.00294783,7.56063e-05,3.58978e-08,-5.69415e-08,0.00302341,7.55072e-05,-1.34927e-07,2.13144e-07,0.003099,7.58768e-05,5.04507e-07,1.38713e-07,0.00317552,7.7302e-05,9.20646e-07,-1.55186e-07,0.00325359,7.86777e-05,4.55087e-07,4.26813e-08,0.00333276,7.97159e-05,5.83131e-07,-1.06495e-08,0.00341305,8.08502e-05,5.51182e-07,3.87467e-09,0.00349446,8.19642e-05,5.62806e-07,-1.92586e-10,0.00357698,8.30892e-05,5.62228e-07,1.0866e-09,0.00366063,8.4217e-05,5.65488e-07,5.02818e-10,0.00374542,8.53494e-05,5.66997e-07,8.60211e-10,0.00383133,8.6486e-05,5.69577e-07,7.13044e-10,0.00391839,8.76273e-05,5.71716e-07,4.78527e-10,0.00400659,8.87722e-05,5.73152e-07,1.09818e-09,0.00409594,8.99218e-05,5.76447e-07,2.50964e-10,0.00418644,9.10754e-05,5.772e-07,1.15762e-09,0.00427809,9.22333e-05,5.80672e-07,2.40865e-10,0.0043709,9.33954e-05,5.81395e-07,1.13854e-09,0.00446488,9.45616e-05,5.84811e-07,3.27267e-10,0.00456003,9.57322e-05,5.85792e-07,8.1197e-10,0.00465635,9.69062e-05,5.88228e-07,6.15823e-10,0.00475384,9.80845e-05,5.90076e-07,9.15747e-10,0.00485252,9.92674e-05,5.92823e-07,3.778e-10,0.00495238,0.000100454,5.93956e-07,8.32623e-10,0.00505343,0.000101645,5.96454e-07,4.82695e-10,0.00515567,0.000102839,5.97902e-07,9.61904e-10,0.00525911,0.000104038,6.00788e-07,3.26281e-10,0.00536375,0.00010524,6.01767e-07,9.926e-10,0.00546959,0.000106447,6.04745e-07,3.59933e-10,0.00557664,0.000107657,6.05824e-07,8.2728e-10,0.0056849,0.000108871,6.08306e-07,5.21898e-10,0.00579438,0.00011009,6.09872e-07,8.10492e-10,0.00590508,0.000111312,6.12303e-07,4.27046e-10,0.00601701,0.000112538,6.13585e-07,7.40878e-10,0.00613016,0.000113767,6.15807e-07,8.00469e-10,0.00624454,0.000115001,6.18209e-07,2.48178e-10,0.00636016,0.000116238,6.18953e-07,1.00073e-09,0.00647702,0.000117479,6.21955e-07,4.05654e-10,0.00659512,0.000118724,6.23172e-07,6.36192e-10,0.00671447,0.000119973,6.25081e-07,7.74927e-10,0.00683507,0.000121225,6.27406e-07,4.54975e-10,0.00695692,0.000122481,6.28771e-07,6.64841e-10,0.00708003,0.000123741,6.30765e-07,6.10972e-10,0.00720441,0.000125004,6.32598e-07,6.16543e-10,0.00733004,0.000126271,6.34448e-07,6.48204e-10,0.00745695,0.000127542,6.36392e-07,5.15835e-10,0.00758513,0.000128816,6.3794e-07,5.48103e-10,0.00771458,0.000130094,6.39584e-07,1.01706e-09,0.00784532,0.000131376,6.42635e-07,4.0283e-11,0.00797734,0.000132661,6.42756e-07,6.84471e-10,0.00811064,0.000133949,6.4481e-07,9.47144e-10,0.00824524,0.000135241,6.47651e-07,1.83472e-10,0.00838112,0.000136537,6.48201e-07,1.11296e-09,0.00851831,0.000137837,6.5154e-07,2.13163e-11,0.0086568,0.00013914,6.51604e-07,6.64462e-10,0.00879659,0.000140445,6.53598e-07,1.04613e-09,0.00893769,0.000141756,6.56736e-07,-1.92377e-10,0.0090801,0.000143069,6.56159e-07,1.58601e-09,0.00922383,0.000144386,6.60917e-07,-5.63754e-10,0.00936888,0.000145706,6.59226e-07,1.60033e-09,0.00951524,0.000147029,6.64027e-07,-2.49543e-10,0.00966294,0.000148356,6.63278e-07,1.26043e-09,0.00981196,0.000149687,6.67059e-07,-1.35572e-10,0.00996231,0.00015102,6.66653e-07,1.14458e-09,0.010114,0.000152357,6.70086e-07,2.13864e-10,0.010267,0.000153698,6.70728e-07,7.93856e-10,0.0104214,0.000155042,6.73109e-07,3.36077e-10,0.0105771,0.000156389,6.74118e-07,6.55765e-10,0.0107342,0.000157739,6.76085e-07,7.66211e-10,0.0108926,0.000159094,6.78384e-07,4.66116e-12,0.0110524,0.000160451,6.78398e-07,1.07775e-09,0.0112135,0.000161811,6.81631e-07,3.41023e-10,0.011376,0.000163175,6.82654e-07,3.5205e-10,0.0115398,0.000164541,6.8371e-07,1.04473e-09,0.0117051,0.000165912,6.86844e-07,1.25757e-10,0.0118717,0.000167286,6.87222e-07,3.14818e-10,0.0120396,0.000168661,6.88166e-07,1.40886e-09,0.012209,0.000170042,6.92393e-07,-3.62244e-10,0.0123797,0.000171425,6.91306e-07,9.71397e-10,0.0125518,0.000172811,6.9422e-07,2.02003e-10,0.0127253,0.0001742,6.94826e-07,1.01448e-09,0.0129002,0.000175593,6.97869e-07,3.96653e-10,0.0130765,0.00017699,6.99059e-07,1.92927e-10,0.0132542,0.000178388,6.99638e-07,6.94305e-10,0.0134333,0.00017979,7.01721e-07,7.55108e-10,0.0136138,0.000181195,7.03986e-07,1.05918e-11,0.0137957,0.000182603,7.04018e-07,1.06513e-09,0.013979,0.000184015,7.07214e-07,3.85512e-10,0.0141637,0.00018543,7.0837e-07,1.86769e-10,0.0143499,0.000186848,7.0893e-07,7.30116e-10,0.0145374,0.000188268,7.11121e-07,6.17983e-10,0.0147264,0.000189692,7.12975e-07,5.23282e-10,0.0149168,0.000191119,7.14545e-07,8.28398e-11,0.0151087,0.000192549,7.14793e-07,1.0081e-09,0.0153019,0.000193981,7.17817e-07,5.41244e-10,0.0154966,0.000195418,7.19441e-07,-3.7907e-10,0.0156928,0.000196856,7.18304e-07,1.90641e-09,0.0158903,0.000198298,7.24023e-07,-7.27387e-10,0.0160893,0.000199744,7.21841e-07,1.00317e-09,0.0162898,0.000201191,7.24851e-07,4.39949e-10,0.0164917,0.000202642,7.2617e-07,9.6234e-10,0.0166951,0.000204097,7.29057e-07,-5.64019e-10,0.0168999,0.000205554,7.27365e-07,1.29374e-09,0.0171062,0.000207012,7.31247e-07,9.77025e-10,0.017314,0.000208478,7.34178e-07,-1.47651e-09,0.0175232,0.000209942,7.29748e-07,3.06636e-09,0.0177338,0.00021141,7.38947e-07,-1.47573e-09,0.017946,0.000212884,7.3452e-07,9.7386e-10,0.0181596,0.000214356,7.37442e-07,1.30562e-09,0.0183747,0.000215835,7.41358e-07,-6.08376e-10,0.0185913,0.000217315,7.39533e-07,1.12785e-09,0.0188093,0.000218798,7.42917e-07,-1.77711e-10,0.0190289,0.000220283,7.42384e-07,1.44562e-09,0.0192499,0.000221772,7.46721e-07,-1.68825e-11,0.0194724,0.000223266,7.4667e-07,4.84533e-10,0.0196964,0.000224761,7.48124e-07,-5.85298e-11,0.0199219,0.000226257,7.47948e-07,1.61217e-09,0.0201489,0.000227757,7.52785e-07,-8.02136e-10,0.0203775,0.00022926,7.50378e-07,1.59637e-09,0.0206075,0.000230766,7.55167e-07,4.47168e-12,0.020839,0.000232276,7.55181e-07,2.48387e-10,0.021072,0.000233787,7.55926e-07,8.6474e-10,0.0213066,0.000235302,7.5852e-07,1.78299e-11,0.0215426,0.000236819,7.58573e-07,9.26567e-10,0.0217802,0.000238339,7.61353e-07,1.34529e-12,0.0220193,0.000239862,7.61357e-07,9.30659e-10,0.0222599,0.000241387,7.64149e-07,1.34529e-12,0.0225021,0.000242915,7.64153e-07,9.26567e-10,0.0227458,0.000244447,7.66933e-07,1.76215e-11,0.022991,0.00024598,7.66986e-07,8.65536e-10,0.0232377,0.000247517,7.69582e-07,2.45677e-10,0.023486,0.000249057,7.70319e-07,1.44193e-11,0.0237358,0.000250598,7.70363e-07,1.55918e-09,0.0239872,0.000252143,7.7504e-07,-6.63173e-10,0.0242401,0.000253691,7.73051e-07,1.09357e-09,0.0244946,0.000255241,7.76331e-07,1.41919e-11,0.0247506,0.000256793,7.76374e-07,7.12248e-10,0.0250082,0.000258348,7.78511e-07,8.62049e-10,0.0252673,0.000259908,7.81097e-07,-4.35061e-10,0.025528,0.000261469,7.79792e-07,8.7825e-10,0.0257902,0.000263031,7.82426e-07,6.47181e-10,0.0260541,0.000264598,7.84368e-07,2.58448e-10,0.0263194,0.000266167,7.85143e-07,1.81558e-10,0.0265864,0.000267738,7.85688e-07,8.78041e-10,0.0268549,0.000269312,7.88322e-07,3.15102e-11,0.027125,0.000270889,7.88417e-07,8.58525e-10,0.0273967,0.000272468,7.90992e-07,2.59812e-10,0.02767,0.000274051,7.91772e-07,-3.5224e-11,0.0279448,0.000275634,7.91666e-07,1.74377e-09,0.0282212,0.000277223,7.96897e-07,-1.35196e-09,0.0284992,0.000278813,7.92841e-07,1.80141e-09,0.0287788,0.000280404,7.98246e-07,-2.65629e-10,0.0290601,0.000281999,7.97449e-07,1.12374e-09,0.0293428,0.000283598,8.0082e-07,-5.04106e-10,0.0296272,0.000285198,7.99308e-07,8.92764e-10,0.0299132,0.000286799,8.01986e-07,6.58379e-10,0.0302008,0.000288405,8.03961e-07,1.98971e-10,0.0304901,0.000290014,8.04558e-07,4.08382e-10,0.0307809,0.000291624,8.05783e-07,3.01839e-11,0.0310733,0.000293236,8.05874e-07,1.33343e-09,0.0313673,0.000294851,8.09874e-07,2.2419e-10,0.031663,0.000296472,8.10547e-07,-3.67606e-10,0.0319603,0.000298092,8.09444e-07,1.24624e-09,0.0322592,0.000299714,8.13182e-07,-8.92025e-10,0.0325597,0.000301338,8.10506e-07,2.32183e-09,0.0328619,0.000302966,8.17472e-07,-9.44719e-10,0.0331657,0.000304598,8.14638e-07,1.45703e-09,0.0334711,0.000306232,8.19009e-07,-1.15805e-09,0.0337781,0.000307866,8.15535e-07,3.17507e-09,0.0340868,0.000309507,8.2506e-07,-4.09161e-09,0.0343971,0.000311145,8.12785e-07,5.74079e-09,0.0347091,0.000312788,8.30007e-07,-3.97034e-09,0.0350227,0.000314436,8.18096e-07,2.68985e-09,0.035338,0.00031608,8.26166e-07,6.61676e-10,0.0356549,0.000317734,8.28151e-07,-1.61123e-09,0.0359734,0.000319386,8.23317e-07,2.05786e-09,0.0362936,0.000321038,8.29491e-07,8.30388e-10,0.0366155,0.0003227,8.31982e-07,-1.65424e-09,0.036939,0.000324359,8.27019e-07,2.06129e-09,0.0372642,0.000326019,8.33203e-07,8.59719e-10,0.0375911,0.000327688,8.35782e-07,-1.77488e-09,0.0379196,0.000329354,8.30458e-07,2.51464e-09,0.0382498,0.000331023,8.38002e-07,-8.33135e-10,0.0385817,0.000332696,8.35502e-07,8.17825e-10,0.0389152,0.00033437,8.37956e-07,1.28718e-09,0.0392504,0.00033605,8.41817e-07,-2.2413e-09,0.0395873,0.000337727,8.35093e-07,3.95265e-09,0.0399258,0.000339409,8.46951e-07,-2.39332e-09,0.0402661,0.000341095,8.39771e-07,1.89533e-09,0.040608,0.000342781,8.45457e-07,-1.46271e-09,0.0409517,0.000344467,8.41069e-07,3.95554e-09,0.041297,0.000346161,8.52936e-07,-3.18369e-09,0.041644,0.000347857,8.43385e-07,1.32873e-09,0.0419927,0.000349548,8.47371e-07,1.59402e-09,0.0423431,0.000351248,8.52153e-07,-2.54336e-10,0.0426952,0.000352951,8.5139e-07,-5.76676e-10,0.043049,0.000354652,8.4966e-07,2.56114e-09,0.0434045,0.000356359,8.57343e-07,-2.21744e-09,0.0437617,0.000358067,8.50691e-07,2.58344e-09,0.0441206,0.000359776,8.58441e-07,-6.65826e-10,0.0444813,0.000361491,8.56444e-07,7.99218e-11,0.0448436,0.000363204,8.56684e-07,3.46063e-10,0.0452077,0.000364919,8.57722e-07,2.26116e-09,0.0455734,0.000366641,8.64505e-07,-1.94005e-09,0.045941,0.000368364,8.58685e-07,1.77384e-09,0.0463102,0.000370087,8.64007e-07,-1.43005e-09,0.0466811,0.000371811,8.59717e-07,3.94634e-09,0.0470538,0.000373542,8.71556e-07,-3.17946e-09,0.0474282,0.000375276,8.62017e-07,1.32104e-09,0.0478043,0.000377003,8.6598e-07,1.62045e-09,0.0481822,0.00037874,8.70842e-07,-3.52297e-10,0.0485618,0.000380481,8.69785e-07,-2.11211e-10,0.0489432,0.00038222,8.69151e-07,1.19716e-09,0.0493263,0.000383962,8.72743e-07,-8.52026e-10,0.0497111,0.000385705,8.70187e-07,2.21092e-09,0.0500977,0.000387452,8.76819e-07,-5.41339e-10,0.050486,0.000389204,8.75195e-07,-4.5361e-11,0.0508761,0.000390954,8.75059e-07,7.22669e-10,0.0512679,0.000392706,8.77227e-07,8.79936e-10,0.0516615,0.000394463,8.79867e-07,-5.17048e-10,0.0520568,0.000396222,8.78316e-07,1.18833e-09,0.0524539,0.000397982,8.81881e-07,-5.11022e-10,0.0528528,0.000399744,8.80348e-07,8.55683e-10,0.0532534,0.000401507,8.82915e-07,8.13562e-10,0.0536558,0.000403276,8.85356e-07,-3.84603e-10,0.05406,0.000405045,8.84202e-07,7.24962e-10,0.0544659,0.000406816,8.86377e-07,1.20986e-09,0.0548736,0.000408592,8.90006e-07,-1.83896e-09,0.0552831,0.000410367,8.84489e-07,2.42071e-09,0.0556944,0.000412143,8.91751e-07,-3.93413e-10,0.0561074,0.000413925,8.90571e-07,-8.46967e-10,0.0565222,0.000415704,8.8803e-07,3.78122e-09,0.0569388,0.000417491,8.99374e-07,-3.1021e-09,0.0573572,0.000419281,8.90068e-07,1.17658e-09,0.0577774,0.000421064,8.93597e-07,2.12117e-09,0.0581993,0.000422858,8.99961e-07,-2.21068e-09,0.0586231,0.000424651,8.93329e-07,2.9961e-09,0.0590486,0.000426447,9.02317e-07,-2.32311e-09,0.059476,0.000428244,8.95348e-07,2.57122e-09,0.0599051,0.000430043,9.03062e-07,-5.11098e-10,0.0603361,0.000431847,9.01528e-07,-5.27166e-10,0.0607688,0.000433649,8.99947e-07,2.61984e-09,0.0612034,0.000435457,9.07806e-07,-2.50141e-09,0.0616397,0.000437265,9.00302e-07,3.66045e-09,0.0620779,0.000439076,9.11283e-07,-4.68977e-09,0.0625179,0.000440885,8.97214e-07,7.64783e-09,0.0629597,0.000442702,9.20158e-07,-7.27499e-09,0.0634033,0.000444521,8.98333e-07,6.55113e-09,0.0638487,0.000446337,9.17986e-07,-4.02844e-09,0.0642959,0.000448161,9.05901e-07,2.11196e-09,0.064745,0.000449979,9.12236e-07,3.03125e-09,0.0651959,0.000451813,9.2133e-07,-6.78648e-09,0.0656486,0.000453635,9.00971e-07,9.21375e-09,0.0661032,0.000455464,9.28612e-07,-7.71684e-09,0.0665596,0.000457299,9.05462e-07,6.7522e-09,0.0670178,0.00045913,9.25718e-07,-4.3907e-09,0.0674778,0.000460968,9.12546e-07,3.36e-09,0.0679397,0.000462803,9.22626e-07,-1.59876e-09,0.0684034,0.000464644,9.1783e-07,3.0351e-09,0.068869,0.000466488,9.26935e-07,-3.09101e-09,0.0693364,0.000468333,9.17662e-07,1.8785e-09,0.0698057,0.000470174,9.23298e-07,3.02733e-09,0.0702768,0.00047203,9.3238e-07,-6.53722e-09,0.0707497,0.000473875,9.12768e-07,8.22054e-09,0.0712245,0.000475725,9.37429e-07,-3.99325e-09,0.0717012,0.000477588,9.2545e-07,3.01839e-10,0.0721797,0.00047944,9.26355e-07,2.78597e-09,0.0726601,0.000481301,9.34713e-07,-3.99507e-09,0.0731423,0.000483158,9.22728e-07,5.7435e-09,0.0736264,0.000485021,9.39958e-07,-4.07776e-09,0.0741123,0.000486888,9.27725e-07,3.11695e-09,0.0746002,0.000488753,9.37076e-07,-9.39394e-10,0.0750898,0.000490625,9.34258e-07,6.4055e-10,0.0755814,0.000492495,9.3618e-07,-1.62265e-09,0.0760748,0.000494363,9.31312e-07,5.84995e-09,0.0765701,0.000496243,9.48861e-07,-6.87601e-09,0.0770673,0.00049812,9.28233e-07,6.75296e-09,0.0775664,0.000499997,9.48492e-07,-5.23467e-09,0.0780673,0.000501878,9.32788e-07,6.73523e-09,0.0785701,0.000503764,9.52994e-07,-6.80514e-09,0.0790748,0.000505649,9.32578e-07,5.5842e-09,0.0795814,0.000507531,9.49331e-07,-6.30583e-10,0.0800899,0.000509428,9.47439e-07,-3.0618e-09,0.0806003,0.000511314,9.38254e-07,5.4273e-09,0.0811125,0.000513206,9.54536e-07,-3.74627e-09,0.0816267,0.000515104,9.43297e-07,2.10713e-09,0.0821427,0.000516997,9.49618e-07,2.76839e-09,0.0826607,0.000518905,9.57924e-07,-5.73006e-09,0.0831805,0.000520803,9.40733e-07,5.25072e-09,0.0837023,0.0005227,9.56486e-07,-3.71718e-10,0.084226,0.000524612,9.5537e-07,-3.76404e-09,0.0847515,0.000526512,9.44078e-07,7.97735e-09,0.085279,0.000528424,9.6801e-07,-5.79367e-09,0.0858084,0.000530343,9.50629e-07,2.96268e-10,0.0863397,0.000532245,9.51518e-07,4.6086e-09,0.0868729,0.000534162,9.65344e-07,-3.82947e-09,0.087408,0.000536081,9.53856e-07,3.25861e-09,0.087945,0.000537998,9.63631e-07,-1.7543e-09,0.088484,0.00053992,9.58368e-07,3.75849e-09,0.0890249,0.000541848,9.69644e-07,-5.82891e-09,0.0895677,0.00054377,9.52157e-07,4.65593e-09,0.0901124,0.000545688,9.66125e-07,2.10643e-09,0.0906591,0.000547627,9.72444e-07,-5.63099e-09,0.0912077,0.000549555,9.55551e-07,5.51627e-09,0.0917582,0.000551483,9.721e-07,-1.53292e-09,0.0923106,0.000553422,9.67501e-07,6.15311e-10,0.092865,0.000555359,9.69347e-07,-9.28291e-10,0.0934213,0.000557295,9.66562e-07,3.09774e-09,0.0939796,0.000559237,9.75856e-07,-4.01186e-09,0.0945398,0.000561177,9.6382e-07,5.49892e-09,0.095102,0.000563121,9.80317e-07,-3.08258e-09,0.0956661,0.000565073,9.71069e-07,-6.19176e-10,0.0962321,0.000567013,9.69212e-07,5.55932e-09,0.0968001,0.000568968,9.8589e-07,-6.71704e-09,0.09737,0.00057092,9.65738e-07,6.40762e-09,0.0979419,0.00057287,9.84961e-07,-4.0122e-09,0.0985158,0.000574828,9.72925e-07,2.19059e-09,0.0990916,0.000576781,9.79496e-07,2.70048e-09,0.0996693,0.000578748,9.87598e-07,-5.54193e-09,0.100249,0.000580706,9.70972e-07,4.56597e-09,0.100831,0.000582662,9.8467e-07,2.17923e-09,0.101414,0.000584638,9.91208e-07,-5.83232e-09,0.102,0.000586603,9.73711e-07,6.24884e-09,0.102588,0.000588569,9.92457e-07,-4.26178e-09,0.103177,0.000590541,9.79672e-07,3.34781e-09,0.103769,0.00059251,9.89715e-07,-1.67904e-09,0.104362,0.000594485,9.84678e-07,3.36839e-09,0.104958,0.000596464,9.94783e-07,-4.34397e-09,0.105555,0.000598441,9.81751e-07,6.55696e-09,0.106155,0.000600424,1.00142e-06,-6.98272e-09,0.106756,0.000602406,9.80474e-07,6.4728e-09,0.107359,0.000604386,9.99893e-07,-4.00742e-09,0.107965,0.000606374,9.8787e-07,2.10654e-09,0.108572,0.000608356,9.9419e-07,3.0318e-09,0.109181,0.000610353,1.00329e-06,-6.7832e-09,0.109793,0.00061234,9.82936e-07,9.1998e-09,0.110406,0.000614333,1.01054e-06,-7.6642e-09,0.111021,0.000616331,9.87543e-07,6.55579e-09,0.111639,0.000618326,1.00721e-06,-3.65791e-09,0.112258,0.000620329,9.96236e-07,6.25467e-10,0.112879,0.000622324,9.98113e-07,1.15593e-09,0.113503,0.000624323,1.00158e-06,2.20158e-09,0.114128,0.000626333,1.00819e-06,-2.51191e-09,0.114755,0.000628342,1.00065e-06,3.95517e-10,0.115385,0.000630345,1.00184e-06,9.29807e-10,0.116016,0.000632351,1.00463e-06,3.33599e-09,0.116649,0.00063437,1.01463e-06,-6.82329e-09,0.117285,0.000636379,9.94163e-07,9.05595e-09,0.117922,0.000638395,1.02133e-06,-7.04862e-09,0.118562,0.000640416,1.00019e-06,4.23737e-09,0.119203,0.000642429,1.0129e-06,-2.45033e-09,0.119847,0.000644448,1.00555e-06,5.56395e-09,0.120492,0.000646475,1.02224e-06,-4.9043e-09,0.121139,0.000648505,1.00753e-06,-8.47952e-10,0.121789,0.000650518,1.00498e-06,8.29622e-09,0.122441,0.000652553,1.02987e-06,-9.98538e-09,0.123094,0.000654582,9.99914e-07,9.2936e-09,0.12375,0.00065661,1.02779e-06,-4.83707e-09,0.124407,0.000658651,1.01328e-06,2.60411e-09,0.125067,0.000660685,1.0211e-06,-5.57945e-09,0.125729,0.000662711,1.00436e-06,1.22631e-08,0.126392,0.000664756,1.04115e-06,-1.36704e-08,0.127058,0.000666798,1.00014e-06,1.26161e-08,0.127726,0.000668836,1.03798e-06,-6.99155e-09,0.128396,0.000670891,1.01701e-06,4.48836e-10,0.129068,0.000672926,1.01836e-06,5.19606e-09,0.129742,0.000674978,1.03394e-06,-6.3319e-09,0.130418,0.000677027,1.01495e-06,5.2305e-09,0.131096,0.000679073,1.03064e-06,3.11123e-10,0.131776,0.000681135,1.03157e-06,-6.47511e-09,0.132458,0.000683179,1.01215e-06,1.06882e-08,0.133142,0.000685235,1.04421e-06,-6.47519e-09,0.133829,0.000687304,1.02479e-06,3.11237e-10,0.134517,0.000689355,1.02572e-06,5.23035e-09,0.135207,0.000691422,1.04141e-06,-6.3316e-09,0.1359,0.000693486,1.02242e-06,5.19484e-09,0.136594,0.000695546,1.038e-06,4.53497e-10,0.137291,0.000697623,1.03936e-06,-7.00891e-09,0.137989,0.000699681,1.01834e-06,1.2681e-08,0.13869,0.000701756,1.05638e-06,-1.39128e-08,0.139393,0.000703827,1.01464e-06,1.31679e-08,0.140098,0.000705896,1.05414e-06,-8.95659e-09,0.140805,0.000707977,1.02727e-06,7.75742e-09,0.141514,0.000710055,1.05055e-06,-7.17182e-09,0.142225,0.000712135,1.02903e-06,6.02862e-09,0.142938,0.000714211,1.04712e-06,-2.04163e-09,0.143653,0.000716299,1.04099e-06,2.13792e-09,0.144371,0.000718387,1.04741e-06,-6.51009e-09,0.14509,0.000720462,1.02787e-06,9.00123e-09,0.145812,0.000722545,1.05488e-06,3.07523e-10,0.146535,0.000724656,1.0558e-06,-1.02312e-08,0.147261,0.000726737,1.02511e-06,1.0815e-08,0.147989,0.000728819,1.05755e-06,-3.22681e-09,0.148719,0.000730925,1.04787e-06,2.09244e-09,0.14945,0.000733027,1.05415e-06,-5.143e-09,0.150185,0.00073512,1.03872e-06,3.57844e-09,0.150921,0.000737208,1.04946e-06,5.73027e-09,0.151659,0.000739324,1.06665e-06,-1.15983e-08,0.152399,0.000741423,1.03185e-06,1.08605e-08,0.153142,0.000743519,1.06443e-06,-2.04106e-09,0.153886,0.000745642,1.05831e-06,-2.69642e-09,0.154633,0.00074775,1.05022e-06,-2.07425e-09,0.155382,0.000749844,1.044e-06,1.09934e-08,0.156133,0.000751965,1.07698e-06,-1.20972e-08,0.156886,0.000754083,1.04069e-06,7.59288e-09,0.157641,0.000756187,1.06347e-06,-3.37305e-09,0.158398,0.000758304,1.05335e-06,5.89921e-09,0.159158,0.000760428,1.07104e-06,-5.32248e-09,0.159919,0.000762554,1.05508e-06,4.8927e-10,0.160683,0.000764666,1.05654e-06,3.36547e-09,0.161448,0.000766789,1.06664e-06,9.50081e-10,0.162216,0.000768925,1.06949e-06,-7.16568e-09,0.162986,0.000771043,1.04799e-06,1.28114e-08,0.163758,0.000773177,1.08643e-06,-1.42774e-08,0.164533,0.000775307,1.0436e-06,1.44956e-08,0.165309,0.000777438,1.08708e-06,-1.39025e-08,0.166087,0.00077957,1.04538e-06,1.13118e-08,0.166868,0.000781695,1.07931e-06,-1.54224e-09,0.167651,0.000783849,1.07468e-06,-5.14312e-09,0.168436,0.000785983,1.05925e-06,7.21381e-09,0.169223,0.000788123,1.0809e-06,-8.81096e-09,0.170012,0.000790259,1.05446e-06,1.31289e-08,0.170803,0.000792407,1.09385e-06,-1.39022e-08,0.171597,0.000794553,1.05214e-06,1.26775e-08,0.172392,0.000796695,1.09018e-06,-7.00557e-09,0.17319,0.000798855,1.06916e-06,4.43796e-10,0.17399,0.000800994,1.07049e-06,5.23031e-09,0.174792,0.000803151,1.08618e-06,-6.46397e-09,0.175596,0.000805304,1.06679e-06,5.72444e-09,0.176403,0.000807455,1.08396e-06,-1.53254e-09,0.177211,0.000809618,1.07937e-06,4.05673e-10,0.178022,0.000811778,1.08058e-06,-9.01916e-11,0.178835,0.000813939,1.08031e-06,-4.49821e-11,0.17965,0.000816099,1.08018e-06,2.70234e-10,0.180467,0.00081826,1.08099e-06,-1.03603e-09,0.181286,0.000820419,1.07788e-06,3.87392e-09,0.182108,0.000822587,1.0895e-06,4.41522e-10,0.182932,0.000824767,1.09083e-06,-5.63997e-09,0.183758,0.000826932,1.07391e-06,7.21707e-09,0.184586,0.000829101,1.09556e-06,-8.32718e-09,0.185416,0.000831267,1.07058e-06,1.11907e-08,0.186248,0.000833442,1.10415e-06,-6.63336e-09,0.187083,0.00083563,1.08425e-06,4.41484e-10,0.187919,0.0008378,1.08557e-06,4.86754e-09,0.188758,0.000839986,1.10017e-06,-5.01041e-09,0.189599,0.000842171,1.08514e-06,2.72811e-10,0.190443,0.000844342,1.08596e-06,3.91916e-09,0.191288,0.000846526,1.09772e-06,-1.04819e-09,0.192136,0.000848718,1.09457e-06,2.73531e-10,0.192985,0.000850908,1.0954e-06,-4.58916e-11,0.193837,0.000853099,1.09526e-06,-9.01158e-11,0.194692,0.000855289,1.09499e-06,4.06506e-10,0.195548,0.00085748,1.09621e-06,-1.53595e-09,0.196407,0.000859668,1.0916e-06,5.73717e-09,0.197267,0.000861869,1.10881e-06,-6.51164e-09,0.19813,0.000864067,1.08928e-06,5.40831e-09,0.198995,0.000866261,1.1055e-06,-2.20401e-10,0.199863,0.000868472,1.10484e-06,-4.52652e-09,0.200732,0.000870668,1.09126e-06,3.42508e-09,0.201604,0.000872861,1.10153e-06,5.72762e-09,0.202478,0.000875081,1.11872e-06,-1.14344e-08,0.203354,0.000877284,1.08441e-06,1.02076e-08,0.204233,0.000879484,1.11504e-06,4.06355e-10,0.205113,0.000881715,1.11626e-06,-1.18329e-08,0.205996,0.000883912,1.08076e-06,1.71227e-08,0.206881,0.000886125,1.13213e-06,-1.19546e-08,0.207768,0.000888353,1.09626e-06,8.93465e-10,0.208658,0.000890548,1.09894e-06,8.38062e-09,0.209549,0.000892771,1.12408e-06,-4.61353e-09,0.210443,0.000895006,1.11024e-06,-4.82756e-09,0.211339,0.000897212,1.09576e-06,9.02245e-09,0.212238,0.00089943,1.12283e-06,-1.45997e-09,0.213138,0.000901672,1.11845e-06,-3.18255e-09,0.214041,0.000903899,1.1089e-06,-7.11073e-10,0.214946,0.000906115,1.10677e-06,6.02692e-09,0.215853,0.000908346,1.12485e-06,-8.49548e-09,0.216763,0.00091057,1.09936e-06,1.30537e-08,0.217675,0.000912808,1.13852e-06,-1.3917e-08,0.218588,0.000915044,1.09677e-06,1.28121e-08,0.219505,0.000917276,1.13521e-06,-7.5288e-09,0.220423,0.000919523,1.11262e-06,2.40205e-09,0.221344,0.000921756,1.11983e-06,-2.07941e-09,0.222267,0.000923989,1.11359e-06,5.91551e-09,0.223192,0.000926234,1.13134e-06,-6.68149e-09,0.224119,0.000928477,1.11129e-06,5.90929e-09,0.225049,0.000930717,1.12902e-06,-2.05436e-09,0.22598,0.000932969,1.12286e-06,2.30807e-09,0.226915,0.000935222,1.12978e-06,-7.17796e-09,0.227851,0.00093746,1.10825e-06,1.15028e-08,0.228789,0.000939711,1.14276e-06,-9.03083e-09,0.22973,0.000941969,1.11566e-06,9.71932e-09,0.230673,0.00094423,1.14482e-06,-1.49452e-08,0.231619,0.000946474,1.09998e-06,2.02591e-08,0.232566,0.000948735,1.16076e-06,-2.13879e-08,0.233516,0.000950993,1.0966e-06,2.05888e-08,0.234468,0.000953247,1.15837e-06,-1.62642e-08,0.235423,0.000955515,1.10957e-06,1.46658e-08,0.236379,0.000957779,1.15357e-06,-1.25966e-08,0.237338,0.000960048,1.11578e-06,5.91793e-09,0.238299,0.000962297,1.13353e-06,3.82602e-09,0.239263,0.000964576,1.14501e-06,-6.3208e-09,0.240229,0.000966847,1.12605e-06,6.55613e-09,0.241197,0.000969119,1.14572e-06,-5.00268e-09,0.242167,0.000971395,1.13071e-06,-1.44659e-09,0.243139,0.000973652,1.12637e-06,1.07891e-08,0.244114,0.000975937,1.15874e-06,-1.19073e-08,0.245091,0.000978219,1.12302e-06,7.03782e-09,0.246071,0.000980486,1.14413e-06,-1.34276e-09,0.247052,0.00098277,1.1401e-06,-1.66669e-09,0.248036,0.000985046,1.1351e-06,8.00935e-09,0.249022,0.00098734,1.15913e-06,-1.54694e-08,0.250011,0.000989612,1.11272e-06,2.4066e-08,0.251002,0.000991909,1.18492e-06,-2.11901e-08,0.251995,0.000994215,1.12135e-06,1.08973e-09,0.25299,0.000996461,1.12462e-06,1.68311e-08,0.253988,0.000998761,1.17511e-06,-8.8094e-09,0.254987,0.00100109,1.14868e-06,-1.13958e-08,0.25599,0.00100335,1.1145e-06,2.45902e-08,0.256994,0.00100565,1.18827e-06,-2.73603e-08,0.258001,0.00100795,1.10618e-06,2.52464e-08,0.25901,0.00101023,1.18192e-06,-1.40207e-08,0.260021,0.00101256,1.13986e-06,1.03387e-09,0.261035,0.00101484,1.14296e-06,9.8853e-09,0.262051,0.00101715,1.17262e-06,-1.07726e-08,0.263069,0.00101947,1.1403e-06,3.40272e-09,0.26409,0.00102176,1.15051e-06,-2.83827e-09,0.265113,0.00102405,1.142e-06,7.95039e-09,0.266138,0.00102636,1.16585e-06,8.39047e-10,0.267166,0.00102869,1.16836e-06,-1.13066e-08,0.268196,0.00103099,1.13444e-06,1.4585e-08,0.269228,0.00103331,1.1782e-06,-1.72314e-08,0.270262,0.00103561,1.1265e-06,2.45382e-08,0.271299,0.00103794,1.20012e-06,-2.13166e-08,0.272338,0.00104028,1.13617e-06,1.12364e-09,0.273379,0.00104255,1.13954e-06,1.68221e-08,0.274423,0.00104488,1.19001e-06,-8.80736e-09,0.275469,0.00104723,1.16358e-06,-1.13948e-08,0.276518,0.00104953,1.1294e-06,2.45839e-08,0.277568,0.00105186,1.20315e-06,-2.73361e-08,0.278621,0.00105418,1.12114e-06,2.51559e-08,0.279677,0.0010565,1.19661e-06,-1.36832e-08,0.280734,0.00105885,1.15556e-06,-2.25706e-10,0.281794,0.00106116,1.15488e-06,1.45862e-08,0.282857,0.00106352,1.19864e-06,-2.83167e-08,0.283921,0.00106583,1.11369e-06,3.90759e-08,0.284988,0.00106817,1.23092e-06,-3.85801e-08,0.286058,0.00107052,1.11518e-06,2.58375e-08,0.287129,0.00107283,1.19269e-06,-5.16498e-09,0.288203,0.0010752,1.1772e-06,-5.17768e-09,0.28928,0.00107754,1.16167e-06,-3.92671e-09,0.290358,0.00107985,1.14988e-06,2.08846e-08,0.29144,0.00108221,1.21254e-06,-2.00072e-08,0.292523,0.00108458,1.15252e-06,-4.60659e-10,0.293609,0.00108688,1.15114e-06,2.18499e-08,0.294697,0.00108925,1.21669e-06,-2.73343e-08,0.295787,0.0010916,1.13468e-06,2.78826e-08,0.29688,0.00109395,1.21833e-06,-2.45915e-08,0.297975,0.00109632,1.14456e-06,1.08787e-08,0.299073,0.00109864,1.17719e-06,1.08788e-08,0.300172,0.00110102,1.20983e-06,-2.45915e-08,0.301275,0.00110337,1.13605e-06,2.78828e-08,0.302379,0.00110573,1.2197e-06,-2.73348e-08,0.303486,0.00110808,1.1377e-06,2.18518e-08,0.304595,0.00111042,1.20325e-06,-4.67556e-10,0.305707,0.00111283,1.20185e-06,-1.99816e-08,0.306821,0.00111517,1.14191e-06,2.07891e-08,0.307937,0.00111752,1.20427e-06,-3.57026e-09,0.309056,0.00111992,1.19356e-06,-6.50797e-09,0.310177,0.00112228,1.17404e-06,-2.00165e-10,0.3113,0.00112463,1.17344e-06,7.30874e-09,0.312426,0.001127,1.19536e-06,7.67424e-10,0.313554,0.00112939,1.19767e-06,-1.03784e-08,0.314685,0.00113176,1.16653e-06,1.09437e-08,0.315818,0.00113412,1.19936e-06,-3.59406e-09,0.316953,0.00113651,1.18858e-06,3.43251e-09,0.318091,0.0011389,1.19888e-06,-1.0136e-08,0.319231,0.00114127,1.16847e-06,7.30915e-09,0.320374,0.00114363,1.1904e-06,1.07018e-08,0.321518,0.00114604,1.2225e-06,-2.03137e-08,0.322666,0.00114842,1.16156e-06,1.09484e-08,0.323815,0.00115078,1.19441e-06,6.32224e-09,0.324967,0.00115319,1.21337e-06,-6.43509e-09,0.326122,0.00115559,1.19407e-06,-1.03842e-08,0.327278,0.00115795,1.16291e-06,1.81697e-08,0.328438,0.00116033,1.21742e-06,-2.6901e-09,0.329599,0.00116276,1.20935e-06,-7.40939e-09,0.330763,0.00116515,1.18713e-06,2.52533e-09,0.331929,0.00116754,1.1947e-06,-2.69191e-09,0.333098,0.00116992,1.18663e-06,8.24218e-09,0.334269,0.00117232,1.21135e-06,-4.74377e-10,0.335443,0.00117474,1.20993e-06,-6.34471e-09,0.336619,0.00117714,1.1909e-06,-3.94922e-09,0.337797,0.00117951,1.17905e-06,2.21417e-08,0.338978,0.00118193,1.24547e-06,-2.50128e-08,0.340161,0.00118435,1.17043e-06,1.8305e-08,0.341346,0.00118674,1.22535e-06,-1.84048e-08,0.342534,0.00118914,1.17013e-06,2.55121e-08,0.343725,0.00119156,1.24667e-06,-2.40389e-08,0.344917,0.00119398,1.17455e-06,1.10389e-08,0.346113,0.00119636,1.20767e-06,9.68574e-09,0.34731,0.0011988,1.23673e-06,-1.99797e-08,0.34851,0.00120122,1.17679e-06,1.06284e-08,0.349713,0.0012036,1.20867e-06,7.26868e-09,0.350917,0.00120604,1.23048e-06,-9.90072e-09,0.352125,0.00120847,1.20078e-06,2.53177e-09,0.353334,0.00121088,1.20837e-06,-2.26199e-10,0.354546,0.0012133,1.20769e-06,-1.62705e-09,0.355761,0.00121571,1.20281e-06,6.73435e-09,0.356978,0.00121813,1.22302e-06,4.49207e-09,0.358197,0.00122059,1.23649e-06,-2.47027e-08,0.359419,0.00122299,1.16238e-06,3.47142e-08,0.360643,0.00122542,1.26653e-06,-2.47472e-08,0.36187,0.00122788,1.19229e-06,4.66965e-09,0.363099,0.00123028,1.20629e-06,6.06872e-09,0.36433,0.00123271,1.2245e-06,8.57729e-10,0.365564,0.00123516,1.22707e-06,-9.49952e-09,0.366801,0.00123759,1.19858e-06,7.33792e-09,0.36804,0.00124001,1.22059e-06,9.95025e-09,0.369281,0.00124248,1.25044e-06,-1.73366e-08,0.370525,0.00124493,1.19843e-06,-2.08464e-10,0.371771,0.00124732,1.1978e-06,1.81704e-08,0.373019,0.00124977,1.25232e-06,-1.28683e-08,0.37427,0.00125224,1.21371e-06,3.50042e-09,0.375524,0.00125468,1.22421e-06,-1.1335e-09,0.37678,0.00125712,1.22081e-06,1.03345e-09,0.378038,0.00125957,1.22391e-06,-3.00023e-09,0.379299,0.00126201,1.21491e-06,1.09676e-08,0.380562,0.00126447,1.24781e-06,-1.10676e-08,0.381828,0.00126693,1.21461e-06,3.50042e-09,0.383096,0.00126937,1.22511e-06,-2.93403e-09,0.384366,0.00127181,1.21631e-06,8.23574e-09,0.385639,0.00127427,1.24102e-06,-2.06607e-10,0.386915,0.00127675,1.2404e-06,-7.40935e-09,0.388193,0.00127921,1.21817e-06,4.1761e-11,0.389473,0.00128165,1.21829e-06,7.24223e-09,0.390756,0.0012841,1.24002e-06,7.91564e-10,0.392042,0.00128659,1.2424e-06,-1.04086e-08,0.393329,0.00128904,1.21117e-06,1.10405e-08,0.39462,0.0012915,1.24429e-06,-3.951e-09,0.395912,0.00129397,1.23244e-06,4.7634e-09,0.397208,0.00129645,1.24673e-06,-1.51025e-08,0.398505,0.0012989,1.20142e-06,2.58443e-08,0.399805,0.00130138,1.27895e-06,-2.86702e-08,0.401108,0.00130385,1.19294e-06,2.92318e-08,0.402413,0.00130632,1.28064e-06,-2.86524e-08,0.403721,0.0013088,1.19468e-06,2.57731e-08,0.405031,0.00131127,1.272e-06,-1.48355e-08,0.406343,0.00131377,1.2275e-06,3.76652e-09,0.407658,0.00131623,1.23879e-06,-2.30784e-10,0.408976,0.00131871,1.2381e-06,-2.84331e-09,0.410296,0.00132118,1.22957e-06,1.16041e-08,0.411618,0.00132367,1.26438e-06,-1.37708e-08,0.412943,0.00132616,1.22307e-06,1.36768e-08,0.41427,0.00132865,1.2641e-06,-1.1134e-08,0.4156,0.00133114,1.2307e-06,1.05714e-09,0.416933,0.00133361,1.23387e-06,6.90538e-09,0.418267,0.00133609,1.25459e-06,1.12372e-09,0.419605,0.00133861,1.25796e-06,-1.14002e-08,0.420945,0.00134109,1.22376e-06,1.46747e-08,0.422287,0.00134358,1.26778e-06,-1.7496e-08,0.423632,0.00134606,1.21529e-06,2.5507e-08,0.424979,0.00134857,1.29182e-06,-2.49272e-08,0.426329,0.00135108,1.21703e-06,1.45972e-08,0.427681,0.00135356,1.26083e-06,-3.65935e-09,0.429036,0.00135607,1.24985e-06,4.00178e-11,0.430393,0.00135857,1.24997e-06,3.49917e-09,0.431753,0.00136108,1.26047e-06,-1.40366e-08,0.433116,0.00136356,1.21836e-06,2.28448e-08,0.43448,0.00136606,1.28689e-06,-1.77378e-08,0.435848,0.00136858,1.23368e-06,1.83043e-08,0.437218,0.0013711,1.28859e-06,-2.56769e-08,0.43859,0.0013736,1.21156e-06,2.47987e-08,0.439965,0.0013761,1.28595e-06,-1.39133e-08,0.441342,0.00137863,1.24421e-06,1.05202e-09,0.442722,0.00138112,1.24737e-06,9.70507e-09,0.444104,0.00138365,1.27649e-06,-1.00698e-08,0.445489,0.00138617,1.24628e-06,7.72123e-10,0.446877,0.00138867,1.24859e-06,6.98132e-09,0.448267,0.00139118,1.26954e-06,1.10477e-09,0.449659,0.00139373,1.27285e-06,-1.14003e-08,0.451054,0.00139624,1.23865e-06,1.4694e-08,0.452452,0.00139876,1.28273e-06,-1.75734e-08,0.453852,0.00140127,1.23001e-06,2.5797e-08,0.455254,0.00140381,1.3074e-06,-2.60097e-08,0.456659,0.00140635,1.22937e-06,1.86371e-08,0.458067,0.00140886,1.28529e-06,-1.8736e-08,0.459477,0.00141137,1.22908e-06,2.65048e-08,0.46089,0.00141391,1.30859e-06,-2.76784e-08,0.462305,0.00141645,1.22556e-06,2.46043e-08,0.463722,0.00141897,1.29937e-06,-1.11341e-08,0.465143,0.00142154,1.26597e-06,-9.87033e-09,0.466565,0.00142404,1.23636e-06,2.08131e-08,0.467991,0.00142657,1.2988e-06,-1.37773e-08,0.469419,0.00142913,1.25746e-06,4.49378e-09,0.470849,0.00143166,1.27094e-06,-4.19781e-09,0.472282,0.00143419,1.25835e-06,1.22975e-08,0.473717,0.00143674,1.29524e-06,-1.51902e-08,0.475155,0.00143929,1.24967e-06,1.86608e-08,0.476596,0.00144184,1.30566e-06,-2.96506e-08,0.478039,0.00144436,1.2167e-06,4.03368e-08,0.479485,0.00144692,1.33771e-06,-4.22896e-08,0.480933,0.00144947,1.21085e-06,3.94148e-08,0.482384,0.00145201,1.32909e-06,-2.59626e-08,0.483837,0.00145459,1.2512e-06,4.83124e-09,0.485293,0.0014571,1.2657e-06,6.63757e-09,0.486751,0.00145966,1.28561e-06,-1.57911e-09,0.488212,0.00146222,1.28087e-06,-3.21468e-10,0.489676,0.00146478,1.27991e-06,2.86517e-09,0.491142,0.00146735,1.2885e-06,-1.11392e-08,0.49261,0.00146989,1.25508e-06,1.18893e-08,0.494081,0.00147244,1.29075e-06,-6.61574e-09,0.495555,0.001475,1.27091e-06,1.45736e-08,0.497031,0.00147759,1.31463e-06,-2.18759e-08,0.49851,0.00148015,1.249e-06,1.33252e-08,0.499992,0.00148269,1.28897e-06,-1.62277e-09,0.501476,0.00148526,1.28411e-06,-6.83421e-09,0.502962,0.00148781,1.2636e-06,2.89596e-08,0.504451,0.00149042,1.35048e-06,-4.93997e-08,0.505943,0.00149298,1.20228e-06,4.94299e-08,0.507437,0.00149553,1.35057e-06,-2.91107e-08,0.508934,0.00149814,1.26324e-06,7.40848e-09,0.510434,0.00150069,1.28547e-06,-5.23187e-10,0.511936,0.00150326,1.2839e-06,-5.31585e-09,0.51344,0.00150581,1.26795e-06,2.17866e-08,0.514947,0.00150841,1.33331e-06,-2.22257e-08,0.516457,0.00151101,1.26663e-06,7.51178e-09,0.517969,0.00151357,1.28917e-06,-7.82128e-09,0.519484,0.00151613,1.2657e-06,2.37733e-08,0.521002,0.00151873,1.33702e-06,-2.76674e-08,0.522522,0.00152132,1.25402e-06,2.72917e-08,0.524044,0.00152391,1.3359e-06,-2.18949e-08,0.525569,0.00152652,1.27021e-06,6.83372e-10,0.527097,0.00152906,1.27226e-06,1.91613e-08,0.528628,0.00153166,1.32974e-06,-1.77241e-08,0.53016,0.00153427,1.27657e-06,-7.86963e-09,0.531696,0.0015368,1.25296e-06,4.92027e-08,0.533234,0.00153945,1.40057e-06,-6.9732e-08,0.534775,0.00154204,1.19138e-06,5.09114e-08,0.536318,0.00154458,1.34411e-06,-1.4704e-08,0.537864,0.00154722,1.3e-06,7.9048e-09,0.539413,0.00154984,1.32371e-06,-1.69152e-08,0.540964,0.00155244,1.27297e-06,1.51355e-10,0.542517,0.00155499,1.27342e-06,1.63099e-08,0.544074,0.00155758,1.32235e-06,-5.78647e-09,0.545633,0.00156021,1.30499e-06,6.83599e-09,0.547194,0.00156284,1.3255e-06,-2.15575e-08,0.548758,0.00156543,1.26083e-06,1.97892e-08,0.550325,0.00156801,1.32019e-06,2.00525e-09,0.551894,0.00157065,1.32621e-06,-2.78103e-08,0.553466,0.00157322,1.24278e-06,4.96314e-08,0.555041,0.00157586,1.39167e-06,-5.1506e-08,0.556618,0.00157849,1.23716e-06,3.71835e-08,0.558198,0.00158107,1.34871e-06,-3.76233e-08,0.55978,0.00158366,1.23584e-06,5.37052e-08,0.561365,0.00158629,1.39695e-06,-5.79884e-08,0.562953,0.00158891,1.22299e-06,5.90392e-08,0.564543,0.00159153,1.4001e-06,-5.89592e-08,0.566136,0.00159416,1.22323e-06,5.7588e-08,0.567731,0.00159678,1.39599e-06,-5.21835e-08,0.569329,0.00159941,1.23944e-06,3.19369e-08,0.57093,0.00160199,1.33525e-06,-1.59594e-08,0.572533,0.00160461,1.28737e-06,3.19006e-08,0.574139,0.00160728,1.38307e-06,-5.20383e-08,0.575748,0.00160989,1.22696e-06,5.70431e-08,0.577359,0.00161251,1.39809e-06,-5.69247e-08,0.578973,0.00161514,1.22731e-06,5.14463e-08,0.580589,0.00161775,1.38165e-06,-2.9651e-08,0.582208,0.00162042,1.2927e-06,7.55339e-09,0.58383,0.00162303,1.31536e-06,-5.62636e-10,0.585455,0.00162566,1.31367e-06,-5.30281e-09,0.587081,0.00162827,1.29776e-06,2.17738e-08,0.588711,0.00163093,1.36309e-06,-2.21875e-08,0.590343,0.00163359,1.29652e-06,7.37164e-09,0.591978,0.00163621,1.31864e-06,-7.29907e-09,0.593616,0.00163882,1.29674e-06,2.18247e-08,0.595256,0.00164148,1.36221e-06,-2.03952e-08,0.596899,0.00164414,1.30103e-06,1.51241e-10,0.598544,0.00164675,1.30148e-06,1.97902e-08,0.600192,0.00164941,1.36085e-06,-1.97074e-08,0.601843,0.00165207,1.30173e-06,-5.65175e-10,0.603496,0.00165467,1.30004e-06,2.1968e-08,0.605152,0.00165734,1.36594e-06,-2.77024e-08,0.606811,0.00165999,1.28283e-06,2.92369e-08,0.608472,0.00166264,1.37054e-06,-2.96407e-08,0.610136,0.00166529,1.28162e-06,2.97215e-08,0.611803,0.00166795,1.37079e-06,-2.96408e-08,0.613472,0.0016706,1.28186e-06,2.92371e-08,0.615144,0.00167325,1.36957e-06,-2.77031e-08,0.616819,0.00167591,1.28647e-06,2.19708e-08,0.618496,0.00167855,1.35238e-06,-5.75407e-10,0.620176,0.00168125,1.35065e-06,-1.9669e-08,0.621858,0.00168389,1.29164e-06,1.96468e-08,0.623544,0.00168653,1.35058e-06,6.86403e-10,0.625232,0.00168924,1.35264e-06,-2.23924e-08,0.626922,0.00169187,1.28547e-06,2.92788e-08,0.628615,0.00169453,1.3733e-06,-3.51181e-08,0.630311,0.00169717,1.26795e-06,5.15889e-08,0.63201,0.00169987,1.42272e-06,-5.2028e-08,0.633711,0.00170255,1.26663e-06,3.73139e-08,0.635415,0.0017052,1.37857e-06,-3.76227e-08,0.637121,0.00170784,1.2657e-06,5.35722e-08,0.63883,0.00171054,1.42642e-06,-5.74567e-08,0.640542,0.00171322,1.25405e-06,5.70456e-08,0.642257,0.0017159,1.42519e-06,-5.15163e-08,0.643974,0.00171859,1.27064e-06,2.98103e-08,0.645694,0.00172122,1.36007e-06,-8.12016e-09,0.647417,0.00172392,1.33571e-06,2.67039e-09,0.649142,0.0017266,1.34372e-06,-2.56152e-09,0.65087,0.00172928,1.33604e-06,7.57571e-09,0.6526,0.00173197,1.35876e-06,-2.77413e-08,0.654334,0.00173461,1.27554e-06,4.3785e-08,0.65607,0.00173729,1.40689e-06,-2.81896e-08,0.657808,0.00174002,1.32233e-06,9.36893e-09,0.65955,0.00174269,1.35043e-06,-9.28617e-09,0.661294,0.00174536,1.32257e-06,2.77757e-08,0.66304,0.00174809,1.4059e-06,-4.2212e-08,0.66479,0.00175078,1.27926e-06,2.1863e-08,0.666542,0.0017534,1.34485e-06,1.43648e-08,0.668297,0.00175613,1.38795e-06,-1.97177e-08,0.670054,0.00175885,1.3288e-06,4.90115e-09,0.671814,0.00176152,1.3435e-06,1.13232e-10,0.673577,0.00176421,1.34384e-06,-5.3542e-09,0.675343,0.00176688,1.32778e-06,2.13035e-08,0.677111,0.0017696,1.39169e-06,-2.02553e-08,0.678882,0.00177232,1.33092e-06,1.13005e-10,0.680656,0.00177499,1.33126e-06,1.98031e-08,0.682432,0.00177771,1.39067e-06,-1.97211e-08,0.684211,0.00178043,1.33151e-06,-5.2349e-10,0.685993,0.00178309,1.32994e-06,2.18151e-08,0.687777,0.00178582,1.39538e-06,-2.71325e-08,0.689564,0.00178853,1.31398e-06,2.71101e-08,0.691354,0.00179124,1.39531e-06,-2.17035e-08,0.693147,0.00179396,1.3302e-06,9.92865e-11,0.694942,0.00179662,1.3305e-06,2.13063e-08,0.69674,0.00179935,1.39442e-06,-2.57198e-08,0.698541,0.00180206,1.31726e-06,2.19682e-08,0.700344,0.00180476,1.38317e-06,-2.54852e-09,0.70215,0.00180752,1.37552e-06,-1.17741e-08,0.703959,0.00181023,1.3402e-06,-9.95999e-09,0.705771,0.00181288,1.31032e-06,5.16141e-08,0.707585,0.00181566,1.46516e-06,-7.72869e-08,0.709402,0.00181836,1.2333e-06,7.87197e-08,0.711222,0.00182106,1.46946e-06,-5.87781e-08,0.713044,0.00182382,1.29312e-06,3.71834e-08,0.714869,0.00182652,1.40467e-06,-3.03511e-08,0.716697,0.00182924,1.31362e-06,2.46161e-08,0.718528,0.00183194,1.38747e-06,-8.5087e-09,0.720361,0.00183469,1.36194e-06,9.41892e-09,0.722197,0.00183744,1.3902e-06,-2.91671e-08,0.724036,0.00184014,1.3027e-06,4.76448e-08,0.725878,0.00184288,1.44563e-06,-4.22028e-08,0.727722,0.00184565,1.31902e-06,1.95682e-09,0.729569,0.00184829,1.3249e-06,3.43754e-08,0.731419,0.00185104,1.42802e-06,-2.0249e-08,0.733271,0.00185384,1.36727e-06,-1.29838e-08,0.735126,0.00185654,1.32832e-06,1.25794e-08,0.736984,0.00185923,1.36606e-06,2.22711e-08,0.738845,0.00186203,1.43287e-06,-4.20594e-08,0.740708,0.00186477,1.3067e-06,2.67571e-08,0.742574,0.00186746,1.38697e-06,-5.36424e-09,0.744443,0.00187022,1.37087e-06,-5.30023e-09,0.746315,0.00187295,1.35497e-06,2.65653e-08,0.748189,0.00187574,1.43467e-06,-4.13564e-08,0.750066,0.00187848,1.3106e-06,1.9651e-08,0.751946,0.00188116,1.36955e-06,2.23572e-08,0.753828,0.00188397,1.43663e-06,-4.9475e-08,0.755714,0.00188669,1.2882e-06,5.63335e-08,0.757602,0.00188944,1.4572e-06,-5.66499e-08,0.759493,0.00189218,1.28725e-06,5.10567e-08,0.761386,0.00189491,1.44042e-06,-2.83677e-08,0.763283,0.00189771,1.35532e-06,2.80962e-09,0.765182,0.00190042,1.36375e-06,1.71293e-08,0.767083,0.0019032,1.41513e-06,-1.17221e-08,0.768988,0.001906,1.37997e-06,-2.98453e-08,0.770895,0.00190867,1.29043e-06,7.14987e-08,0.772805,0.00191146,1.50493e-06,-7.73354e-08,0.774718,0.00191424,1.27292e-06,5.90292e-08,0.776634,0.00191697,1.45001e-06,-3.9572e-08,0.778552,0.00191975,1.33129e-06,3.9654e-08,0.780473,0.00192253,1.45026e-06,-5.94395e-08,0.782397,0.00192525,1.27194e-06,7.88945e-08,0.784324,0.00192803,1.50862e-06,-7.73249e-08,0.786253,0.00193082,1.27665e-06,5.15913e-08,0.788185,0.00193352,1.43142e-06,-9.83099e-09,0.79012,0.00193636,1.40193e-06,-1.22672e-08,0.792058,0.00193912,1.36513e-06,-7.05275e-10,0.793999,0.00194185,1.36301e-06,1.50883e-08,0.795942,0.00194462,1.40828e-06,-4.33147e-11,0.797888,0.00194744,1.40815e-06,-1.49151e-08,0.799837,0.00195021,1.3634e-06,9.93244e-11,0.801788,0.00195294,1.3637e-06,1.45179e-08,0.803743,0.00195571,1.40725e-06,1.43363e-09,0.8057,0.00195853,1.41155e-06,-2.02525e-08,0.80766,0.00196129,1.35079e-06,1.99718e-08,0.809622,0.00196405,1.41071e-06,-3.01649e-11,0.811588,0.00196687,1.41062e-06,-1.9851e-08,0.813556,0.00196964,1.35107e-06,1.98296e-08,0.815527,0.0019724,1.41056e-06,1.37485e-10,0.817501,0.00197522,1.41097e-06,-2.03796e-08,0.819477,0.00197798,1.34983e-06,2.17763e-08,0.821457,0.00198074,1.41516e-06,-7.12085e-09,0.823439,0.00198355,1.3938e-06,6.70707e-09,0.825424,0.00198636,1.41392e-06,-1.97074e-08,0.827412,0.00198913,1.35479e-06,1.25179e-08,0.829402,0.00199188,1.39235e-06,2.92405e-08,0.831396,0.00199475,1.48007e-06,-6.98755e-08,0.833392,0.0019975,1.27044e-06,7.14477e-08,0.835391,0.00200026,1.48479e-06,-3.71014e-08,0.837392,0.00200311,1.37348e-06,1.73533e-08,0.839397,0.00200591,1.42554e-06,-3.23118e-08,0.841404,0.00200867,1.32861e-06,5.2289e-08,0.843414,0.00201148,1.48547e-06,-5.76348e-08,0.845427,0.00201428,1.31257e-06,5.9041e-08,0.847443,0.00201708,1.48969e-06,-5.93197e-08,0.849461,0.00201988,1.31173e-06,5.90289e-08,0.851482,0.00202268,1.48882e-06,-5.75864e-08,0.853507,0.00202549,1.31606e-06,5.21075e-08,0.855533,0.00202828,1.47238e-06,-3.16344e-08,0.857563,0.00203113,1.37748e-06,1.48257e-08,0.859596,0.00203393,1.42196e-06,-2.76684e-08,0.861631,0.00203669,1.33895e-06,3.62433e-08,0.863669,0.00203947,1.44768e-06,1.90463e-09,0.86571,0.00204237,1.45339e-06,-4.38617e-08,0.867754,0.00204515,1.32181e-06,5.43328e-08,0.8698,0.00204796,1.48481e-06,-5.42603e-08,0.87185,0.00205076,1.32203e-06,4.34989e-08,0.873902,0.00205354,1.45252e-06,-5.26029e-10,0.875957,0.00205644,1.45095e-06,-4.13949e-08,0.878015,0.00205922,1.32676e-06,4.68962e-08,0.880075,0.00206201,1.46745e-06,-2.69807e-08,0.882139,0.00206487,1.38651e-06,1.42181e-09,0.884205,0.00206764,1.39077e-06,2.12935e-08,0.886274,0.00207049,1.45465e-06,-2.69912e-08,0.888346,0.00207332,1.37368e-06,2.70664e-08,0.890421,0.00207615,1.45488e-06,-2.16698e-08,0.892498,0.00207899,1.38987e-06,8.14756e-12,0.894579,0.00208177,1.38989e-06,2.16371e-08,0.896662,0.00208462,1.45481e-06,-2.6952e-08,0.898748,0.00208744,1.37395e-06,2.65663e-08,0.900837,0.00209027,1.45365e-06,-1.97084e-08,0.902928,0.00209312,1.39452e-06,-7.33731e-09,0.905023,0.00209589,1.37251e-06,4.90578e-08,0.90712,0.00209878,1.51968e-06,-6.96845e-08,0.90922,0.00210161,1.31063e-06,5.08664e-08,0.911323,0.00210438,1.46323e-06,-1.45717e-08,0.913429,0.00210727,1.41952e-06,7.42038e-09,0.915538,0.00211013,1.44178e-06,-1.51097e-08,0.917649,0.00211297,1.39645e-06,-6.58618e-09,0.919764,0.00211574,1.37669e-06,4.14545e-08,0.921881,0.00211862,1.50105e-06,-4.00222e-08,0.924001,0.0021215,1.38099e-06,-5.7518e-10,0.926124,0.00212426,1.37926e-06,4.23229e-08,0.92825,0.00212714,1.50623e-06,-4.9507e-08,0.930378,0.00213001,1.35771e-06,3.64958e-08,0.93251,0.00213283,1.4672e-06,-3.68713e-08,0.934644,0.00213566,1.35658e-06,5.13848e-08,0.936781,0.00213852,1.51074e-06,-4.94585e-08,0.938921,0.0021414,1.36236e-06,2.72399e-08,0.941064,0.0021442,1.44408e-06,1.0372e-10,0.943209,0.00214709,1.44439e-06,-2.76547e-08,0.945358,0.0021499,1.36143e-06,5.09106e-08,0.947509,0.00215277,1.51416e-06,-5.67784e-08,0.949663,0.00215563,1.34382e-06,5.69935e-08,0.95182,0.00215849,1.5148e-06,-5.19861e-08,0.95398,0.00216136,1.35885e-06,3.17417e-08,0.956143,0.00216418,1.45407e-06,-1.53758e-08,0.958309,0.00216704,1.40794e-06,2.97615e-08,0.960477,0.00216994,1.49723e-06,-4.40657e-08,0.962649,0.00217281,1.36503e-06,2.72919e-08,0.964823,0.00217562,1.44691e-06,-5.49729e-09,0.967,0.0021785,1.43041e-06,-5.30273e-09,0.96918,0.00218134,1.41451e-06,2.67084e-08,0.971363,0.00218425,1.49463e-06,-4.19265e-08,0.973548,0.00218711,1.36885e-06,2.17881e-08,0.975737,0.00218992,1.43422e-06,1.43789e-08,0.977928,0.00219283,1.47735e-06,-1.96989e-08,0.980122,0.00219572,1.41826e-06,4.81221e-09,0.98232,0.00219857,1.43269e-06,4.50048e-10,0.98452,0.00220144,1.43404e-06,-6.61237e-09,0.986722,0.00220429,1.41421e-06,2.59993e-08,0.988928,0.0022072,1.4922e-06,-3.77803e-08,0.991137,0.00221007,1.37886e-06,5.9127e-09,0.993348,0.00221284,1.3966e-06,1.33339e-07,0.995563,0.00221604,1.79662e-06,-5.98872e-07,0.99778,0.00222015,0.,0.}; + + template + __device__ __forceinline__ void RGB2LabConvert_32F(const T& src, D& dst) + { + const float _1_3 = 1.0f / 3.0f; + const float _a = 16.0f / 116.0f; + + float B = blueIdx == 0 ? src.x : src.z; + float G = src.y; + float R = blueIdx == 0 ? src.z : src.x; + + if (srgb) + { + B = splineInterpolate(B * GAMMA_TAB_SIZE, c_sRGBGammaTab, GAMMA_TAB_SIZE); + G = splineInterpolate(G * GAMMA_TAB_SIZE, c_sRGBGammaTab, GAMMA_TAB_SIZE); + R = splineInterpolate(R * GAMMA_TAB_SIZE, c_sRGBGammaTab, GAMMA_TAB_SIZE); + } + + float X = B * 0.189828f + G * 0.376219f + R * 0.433953f; + float Y = B * 0.072169f + G * 0.715160f + R * 0.212671f; + float Z = B * 0.872766f + G * 0.109477f + R * 0.017758f; + + float FX = X > 0.008856f ? ::powf(X, _1_3) : (7.787f * X + _a); + float FY = Y > 0.008856f ? ::powf(Y, _1_3) : (7.787f * Y + _a); + float FZ = Z > 0.008856f ? ::powf(Z, _1_3) : (7.787f * Z + _a); + + float L = Y > 0.008856f ? (116.f * FY - 16.f) : (903.3f * Y); + float a = 500.f * (FX - FY); + float b = 200.f * (FY - FZ); + + dst.x = L; + dst.y = a; + dst.z = b; + } + + template struct RGB2Lab; + + template + struct RGB2Lab + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + RGB2LabConvert_8U(src, dst); + + return dst; + } + + __host__ __device__ __forceinline__ RGB2Lab() {} + __host__ __device__ __forceinline__ RGB2Lab(const RGB2Lab&) {} + }; + + template + struct RGB2Lab + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + RGB2LabConvert_32F(src, dst); + + return dst; + } + + __host__ __device__ __forceinline__ RGB2Lab() {} + __host__ __device__ __forceinline__ RGB2Lab(const RGB2Lab&) {} + }; + } + +#define OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(name, scn, dcn, srgb, blueIdx) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2Lab functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + namespace color_detail + { + __constant__ float c_sRGBInvGammaTab[] = {0,0.0126255,0.,-8.33961e-06,0.0126172,0.0126005,-2.50188e-05,4.1698e-05,0.0252344,0.0126756,0.000100075,-0.000158451,0.0378516,0.0124004,-0.000375277,-0.000207393,0.0496693,0.0110276,-0.000997456,0.00016837,0.0598678,0.00953783,-0.000492346,2.07235e-05,0.068934,0.00861531,-0.000430176,3.62876e-05,0.0771554,0.00786382,-0.000321313,1.87625e-05,0.0847167,0.00727748,-0.000265025,1.53594e-05,0.0917445,0.00679351,-0.000218947,1.10545e-05,0.0983301,0.00638877,-0.000185784,8.66984e-06,0.104542,0.00604322,-0.000159774,6.82996e-06,0.110432,0.00574416,-0.000139284,5.51008e-06,0.116042,0.00548212,-0.000122754,4.52322e-06,0.121406,0.00525018,-0.000109184,3.75557e-06,0.126551,0.00504308,-9.79177e-05,3.17134e-06,0.131499,0.00485676,-8.84037e-05,2.68469e-06,0.13627,0.004688,-8.03496e-05,2.31725e-06,0.14088,0.00453426,-7.33978e-05,2.00868e-06,0.145343,0.00439349,-6.73718e-05,1.74775e-06,0.149671,0.00426399,-6.21286e-05,1.53547e-06,0.153875,0.00414434,-5.75222e-05,1.364e-06,0.157963,0.00403338,-5.34301e-05,1.20416e-06,0.161944,0.00393014,-4.98177e-05,1.09114e-06,0.165825,0.00383377,-4.65443e-05,9.57987e-07,0.169613,0.00374356,-4.36703e-05,8.88359e-07,0.173314,0.00365888,-4.10052e-05,7.7849e-07,0.176933,0.00357921,-3.86697e-05,7.36254e-07,0.180474,0.00350408,-3.6461e-05,6.42534e-07,0.183942,0.00343308,-3.45334e-05,6.12614e-07,0.187342,0.00336586,-3.26955e-05,5.42894e-07,0.190675,0.00330209,-3.10669e-05,5.08967e-07,0.193947,0.00324149,-2.954e-05,4.75977e-07,0.197159,0.00318383,-2.8112e-05,4.18343e-07,0.200315,0.00312887,-2.6857e-05,4.13651e-07,0.203418,0.00307639,-2.5616e-05,3.70847e-07,0.206469,0.00302627,-2.45035e-05,3.3813e-07,0.209471,0.00297828,-2.34891e-05,3.32999e-07,0.212426,0.0029323,-2.24901e-05,2.96826e-07,0.215336,0.00288821,-2.15996e-05,2.82736e-07,0.218203,0.00284586,-2.07514e-05,2.70961e-07,0.221029,0.00280517,-1.99385e-05,2.42744e-07,0.223814,0.00276602,-1.92103e-05,2.33277e-07,0.226561,0.0027283,-1.85105e-05,2.2486e-07,0.229271,0.00269195,-1.78359e-05,2.08383e-07,0.231945,0.00265691,-1.72108e-05,1.93305e-07,0.234585,0.00262307,-1.66308e-05,1.80687e-07,0.237192,0.00259035,-1.60888e-05,1.86632e-07,0.239766,0.00255873,-1.55289e-05,1.60569e-07,0.24231,0.00252815,-1.50472e-05,1.54566e-07,0.244823,0.00249852,-1.45835e-05,1.59939e-07,0.247307,0.00246983,-1.41037e-05,1.29549e-07,0.249763,0.00244202,-1.3715e-05,1.41429e-07,0.252191,0.00241501,-1.32907e-05,1.39198e-07,0.254593,0.00238885,-1.28731e-05,1.06444e-07,0.256969,0.00236342,-1.25538e-05,1.2048e-07,0.25932,0.00233867,-1.21924e-05,1.26892e-07,0.261647,0.00231467,-1.18117e-05,8.72084e-08,0.26395,0.00229131,-1.15501e-05,1.20323e-07,0.26623,0.00226857,-1.11891e-05,8.71514e-08,0.268487,0.00224645,-1.09276e-05,9.73165e-08,0.270723,0.00222489,-1.06357e-05,8.98259e-08,0.272937,0.00220389,-1.03662e-05,7.98218e-08,0.275131,0.00218339,-1.01267e-05,9.75254e-08,0.277304,0.00216343,-9.83416e-06,6.65195e-08,0.279458,0.00214396,-9.63461e-06,8.34313e-08,0.281592,0.00212494,-9.38431e-06,7.65919e-08,0.283708,0.00210641,-9.15454e-06,5.7236e-08,0.285805,0.00208827,-8.98283e-06,8.18939e-08,0.287885,0.00207055,-8.73715e-06,6.2224e-08,0.289946,0.00205326,-8.55047e-06,5.66388e-08,0.291991,0.00203633,-8.38056e-06,6.88491e-08,0.294019,0.00201978,-8.17401e-06,5.53955e-08,0.296031,0.00200359,-8.00782e-06,6.71971e-08,0.298027,0.00198778,-7.80623e-06,3.34439e-08,0.300007,0.00197227,-7.7059e-06,6.7248e-08,0.301971,0.00195706,-7.50416e-06,5.51915e-08,0.303921,0.00194221,-7.33858e-06,3.98124e-08,0.305856,0.00192766,-7.21915e-06,5.37795e-08,0.307776,0.00191338,-7.05781e-06,4.30919e-08,0.309683,0.00189939,-6.92853e-06,4.20744e-08,0.311575,0.00188566,-6.80231e-06,5.68321e-08,0.313454,0.00187223,-6.63181e-06,2.86195e-08,0.31532,0.00185905,-6.54595e-06,3.73075e-08,0.317172,0.00184607,-6.43403e-06,6.05684e-08,0.319012,0.00183338,-6.25233e-06,1.84426e-08,0.320839,0.00182094,-6.197e-06,4.44757e-08,0.322654,0.00180867,-6.06357e-06,4.20729e-08,0.324456,0.00179667,-5.93735e-06,2.56511e-08,0.326247,0.00178488,-5.8604e-06,3.41368e-08,0.328026,0.00177326,-5.75799e-06,4.64177e-08,0.329794,0.00176188,-5.61874e-06,1.86107e-08,0.33155,0.0017507,-5.5629e-06,2.81511e-08,0.333295,0.00173966,-5.47845e-06,4.75987e-08,0.335029,0.00172884,-5.33565e-06,1.98726e-08,0.336753,0.00171823,-5.27604e-06,2.19226e-08,0.338466,0.00170775,-5.21027e-06,4.14483e-08,0.340169,0.00169745,-5.08592e-06,2.09017e-08,0.341861,0.00168734,-5.02322e-06,2.39561e-08,0.343543,0.00167737,-4.95135e-06,3.22852e-08,0.345216,0.00166756,-4.85449e-06,2.57173e-08,0.346878,0.00165793,-4.77734e-06,1.38569e-08,0.348532,0.00164841,-4.73577e-06,3.80634e-08,0.350175,0.00163906,-4.62158e-06,1.27043e-08,0.35181,0.00162985,-4.58347e-06,3.03279e-08,0.353435,0.00162078,-4.49249e-06,1.49961e-08,0.355051,0.00161184,-4.4475e-06,2.88977e-08,0.356659,0.00160303,-4.3608e-06,1.84241e-08,0.358257,0.00159436,-4.30553e-06,1.6616e-08,0.359848,0.0015858,-4.25568e-06,3.43218e-08,0.361429,0.00157739,-4.15272e-06,-4.89172e-09,0.363002,0.00156907,-4.16739e-06,4.48498e-08,0.364567,0.00156087,-4.03284e-06,4.30676e-09,0.366124,0.00155282,-4.01992e-06,2.73303e-08,0.367673,0.00154486,-3.93793e-06,5.58036e-09,0.369214,0.001537,-3.92119e-06,3.97554e-08,0.370747,0.00152928,-3.80193e-06,-1.55904e-08,0.372272,0.00152163,-3.8487e-06,5.24081e-08,0.37379,0.00151409,-3.69147e-06,-1.52272e-08,0.375301,0.00150666,-3.73715e-06,3.83028e-08,0.376804,0.0014993,-3.62225e-06,1.10278e-08,0.378299,0.00149209,-3.58916e-06,6.99326e-09,0.379788,0.00148493,-3.56818e-06,2.06038e-08,0.381269,0.00147786,-3.50637e-06,2.98009e-08,0.382744,0.00147093,-3.41697e-06,-2.05978e-08,0.384211,0.00146404,-3.47876e-06,5.25899e-08,0.385672,0.00145724,-3.32099e-06,-1.09471e-08,0.387126,0.00145056,-3.35383e-06,2.10009e-08,0.388573,0.00144392,-3.29083e-06,1.63501e-08,0.390014,0.00143739,-3.24178e-06,3.00641e-09,0.391448,0.00143091,-3.23276e-06,3.12282e-08,0.392875,0.00142454,-3.13908e-06,-8.70932e-09,0.394297,0.00141824,-3.16521e-06,3.34114e-08,0.395712,0.00141201,-3.06497e-06,-5.72754e-09,0.397121,0.00140586,-3.08215e-06,1.9301e-08,0.398524,0.00139975,-3.02425e-06,1.7931e-08,0.39992,0.00139376,-2.97046e-06,-1.61822e-09,0.401311,0.00138781,-2.97531e-06,1.83442e-08,0.402696,0.00138192,-2.92028e-06,1.76485e-08,0.404075,0.00137613,-2.86733e-06,4.68617e-10,0.405448,0.00137039,-2.86593e-06,1.02794e-08,0.406816,0.00136469,-2.83509e-06,1.80179e-08,0.408178,0.00135908,-2.78104e-06,7.05594e-09,0.409534,0.00135354,-2.75987e-06,1.33633e-08,0.410885,0.00134806,-2.71978e-06,-9.04568e-10,0.41223,0.00134261,-2.72249e-06,2.0057e-08,0.41357,0.00133723,-2.66232e-06,1.00841e-08,0.414905,0.00133194,-2.63207e-06,-7.88835e-10,0.416234,0.00132667,-2.63444e-06,2.28734e-08,0.417558,0.00132147,-2.56582e-06,-1.29785e-09,0.418877,0.00131633,-2.56971e-06,1.21205e-08,0.420191,0.00131123,-2.53335e-06,1.24202e-08,0.421499,0.0013062,-2.49609e-06,-2.19681e-09,0.422803,0.0013012,-2.50268e-06,2.61696e-08,0.424102,0.00129628,-2.42417e-06,-1.30747e-08,0.425396,0.00129139,-2.46339e-06,2.6129e-08,0.426685,0.00128654,-2.38501e-06,-2.03454e-09,0.427969,0.00128176,-2.39111e-06,1.18115e-08,0.429248,0.00127702,-2.35567e-06,1.43932e-08,0.430523,0.00127235,-2.31249e-06,-9.77965e-09,0.431793,0.00126769,-2.34183e-06,2.47253e-08,0.433058,0.00126308,-2.26766e-06,2.85278e-10,0.434319,0.00125855,-2.2668e-06,3.93614e-09,0.435575,0.00125403,-2.25499e-06,1.37722e-08,0.436827,0.00124956,-2.21368e-06,5.79803e-10,0.438074,0.00124513,-2.21194e-06,1.37112e-08,0.439317,0.00124075,-2.1708e-06,4.17973e-09,0.440556,0.00123642,-2.15826e-06,-6.27703e-10,0.44179,0.0012321,-2.16015e-06,2.81332e-08,0.44302,0.00122787,-2.07575e-06,-2.24985e-08,0.444246,0.00122365,-2.14324e-06,3.20586e-08,0.445467,0.00121946,-2.04707e-06,-1.6329e-08,0.446685,0.00121532,-2.09605e-06,3.32573e-08,0.447898,0.00121122,-1.99628e-06,-2.72927e-08,0.449107,0.00120715,-2.07816e-06,4.6111e-08,0.450312,0.00120313,-1.93983e-06,-3.79416e-08,0.451514,0.00119914,-2.05365e-06,4.60507e-08,0.452711,0.00119517,-1.9155e-06,-2.7052e-08,0.453904,0.00119126,-1.99666e-06,3.23551e-08,0.455093,0.00118736,-1.89959e-06,-1.29613e-08,0.456279,0.00118352,-1.93848e-06,1.94905e-08,0.45746,0.0011797,-1.88e-06,-5.39588e-09,0.458638,0.00117593,-1.89619e-06,2.09282e-09,0.459812,0.00117214,-1.88991e-06,2.68267e-08,0.460982,0.00116844,-1.80943e-06,-1.99925e-08,0.462149,0.00116476,-1.86941e-06,2.3341e-08,0.463312,0.00116109,-1.79939e-06,-1.37674e-08,0.464471,0.00115745,-1.84069e-06,3.17287e-08,0.465627,0.00115387,-1.7455e-06,-2.37407e-08,0.466779,0.00115031,-1.81673e-06,3.34315e-08,0.467927,0.00114677,-1.71643e-06,-2.05786e-08,0.469073,0.00114328,-1.77817e-06,1.90802e-08,0.470214,0.00113978,-1.72093e-06,3.86247e-09,0.471352,0.00113635,-1.70934e-06,-4.72759e-09,0.472487,0.00113292,-1.72352e-06,1.50478e-08,0.473618,0.00112951,-1.67838e-06,4.14108e-09,0.474746,0.00112617,-1.66595e-06,-1.80986e-09,0.47587,0.00112283,-1.67138e-06,3.09816e-09,0.476991,0.0011195,-1.66209e-06,1.92198e-08,0.478109,0.00111623,-1.60443e-06,-2.03726e-08,0.479224,0.00111296,-1.66555e-06,3.2468e-08,0.480335,0.00110973,-1.56814e-06,-2.00922e-08,0.481443,0.00110653,-1.62842e-06,1.80983e-08,0.482548,0.00110333,-1.57413e-06,7.30362e-09,0.48365,0.0011002,-1.55221e-06,-1.75107e-08,0.484749,0.00109705,-1.60475e-06,3.29373e-08,0.485844,0.00109393,-1.50594e-06,-2.48315e-08,0.486937,0.00109085,-1.58043e-06,3.65865e-08,0.488026,0.0010878,-1.47067e-06,-3.21078e-08,0.489112,0.00108476,-1.56699e-06,3.22397e-08,0.490195,0.00108172,-1.47027e-06,-7.44391e-09,0.491276,0.00107876,-1.49261e-06,-2.46428e-09,0.492353,0.00107577,-1.5e-06,1.73011e-08,0.493427,0.00107282,-1.4481e-06,-7.13552e-09,0.494499,0.0010699,-1.4695e-06,1.1241e-08,0.495567,0.001067,-1.43578e-06,-8.02637e-09,0.496633,0.0010641,-1.45986e-06,2.08645e-08,0.497695,0.00106124,-1.39726e-06,-1.58271e-08,0.498755,0.0010584,-1.44475e-06,1.26415e-08,0.499812,0.00105555,-1.40682e-06,2.48655e-08,0.500866,0.00105281,-1.33222e-06,-5.24988e-08,0.501918,0.00104999,-1.48972e-06,6.59206e-08,0.502966,0.00104721,-1.29196e-06,-3.237e-08,0.504012,0.00104453,-1.38907e-06,3.95479e-09,0.505055,0.00104176,-1.3772e-06,1.65509e-08,0.506096,0.00103905,-1.32755e-06,-1.05539e-08,0.507133,0.00103637,-1.35921e-06,2.56648e-08,0.508168,0.00103373,-1.28222e-06,-3.25007e-08,0.509201,0.00103106,-1.37972e-06,4.47336e-08,0.51023,0.00102844,-1.24552e-06,-2.72245e-08,0.511258,0.00102587,-1.32719e-06,4.55952e-09,0.512282,0.00102323,-1.31352e-06,8.98645e-09,0.513304,0.00102063,-1.28656e-06,1.90992e-08,0.514323,0.00101811,-1.22926e-06,-2.57786e-08,0.51534,0.00101557,-1.30659e-06,2.44104e-08,0.516355,0.00101303,-1.23336e-06,-1.22581e-08,0.517366,0.00101053,-1.27014e-06,2.4622e-08,0.518376,0.00100806,-1.19627e-06,-2.66253e-08,0.519383,0.00100559,-1.27615e-06,2.22744e-08,0.520387,0.00100311,-1.20932e-06,-2.8679e-09,0.521389,0.00100068,-1.21793e-06,-1.08029e-08,0.522388,0.000998211,-1.25034e-06,4.60795e-08,0.523385,0.000995849,-1.1121e-06,-5.4306e-08,0.52438,0.000993462,-1.27502e-06,5.19354e-08,0.525372,0.000991067,-1.11921e-06,-3.42262e-08,0.526362,0.000988726,-1.22189e-06,2.53646e-08,0.52735,0.000986359,-1.14579e-06,-7.62782e-09,0.528335,0.000984044,-1.16868e-06,5.14668e-09,0.529318,0.000981722,-1.15324e-06,-1.29589e-08,0.530298,0.000979377,-1.19211e-06,4.66888e-08,0.531276,0.000977133,-1.05205e-06,-5.45868e-08,0.532252,0.000974865,-1.21581e-06,5.24495e-08,0.533226,0.000972591,-1.05846e-06,-3.60019e-08,0.534198,0.000970366,-1.16647e-06,3.19537e-08,0.535167,0.000968129,-1.07061e-06,-3.2208e-08,0.536134,0.000965891,-1.16723e-06,3.72738e-08,0.537099,0.000963668,-1.05541e-06,2.32205e-09,0.538061,0.000961564,-1.04844e-06,-4.65618e-08,0.539022,0.000959328,-1.18813e-06,6.47159e-08,0.53998,0.000957146,-9.93979e-07,-3.3488e-08,0.540936,0.000955057,-1.09444e-06,9.63166e-09,0.54189,0.000952897,-1.06555e-06,-5.03871e-09,0.542842,0.000950751,-1.08066e-06,1.05232e-08,0.543792,0.000948621,-1.04909e-06,2.25503e-08,0.544739,0.000946591,-9.81444e-07,-4.11195e-08,0.545685,0.000944504,-1.1048e-06,2.27182e-08,0.546628,0.000942363,-1.03665e-06,9.85146e-09,0.54757,0.000940319,-1.00709e-06,-2.51938e-09,0.548509,0.000938297,-1.01465e-06,2.25858e-10,0.549446,0.000936269,-1.01397e-06,1.61598e-09,0.550381,0.000934246,-1.00913e-06,-6.68983e-09,0.551315,0.000932207,-1.0292e-06,2.51434e-08,0.552246,0.000930224,-9.53765e-07,-3.42793e-08,0.553175,0.000928214,-1.0566e-06,5.23688e-08,0.554102,0.000926258,-8.99497e-07,-5.59865e-08,0.555028,0.000924291,-1.06746e-06,5.23679e-08,0.555951,0.000922313,-9.10352e-07,-3.42763e-08,0.556872,0.00092039,-1.01318e-06,2.51326e-08,0.557792,0.000918439,-9.37783e-07,-6.64954e-09,0.558709,0.000916543,-9.57732e-07,1.46554e-09,0.559625,0.000914632,-9.53335e-07,7.87281e-10,0.560538,0.000912728,-9.50973e-07,-4.61466e-09,0.56145,0.000910812,-9.64817e-07,1.76713e-08,0.56236,0.000908935,-9.11804e-07,-6.46564e-09,0.563268,0.000907092,-9.312e-07,8.19121e-09,0.564174,0.000905255,-9.06627e-07,-2.62992e-08,0.565078,0.000903362,-9.85524e-07,3.74007e-08,0.565981,0.000901504,-8.73322e-07,-4.0942e-09,0.566882,0.000899745,-8.85605e-07,-2.1024e-08,0.56778,0.00089791,-9.48677e-07,2.85854e-08,0.568677,0.000896099,-8.62921e-07,-3.3713e-08,0.569573,0.000894272,-9.64059e-07,4.6662e-08,0.570466,0.000892484,-8.24073e-07,-3.37258e-08,0.571358,0.000890734,-9.25251e-07,2.86365e-08,0.572247,0.00088897,-8.39341e-07,-2.12155e-08,0.573135,0.000887227,-9.02988e-07,-3.37913e-09,0.574022,0.000885411,-9.13125e-07,3.47319e-08,0.574906,0.000883689,-8.08929e-07,-1.63394e-08,0.575789,0.000882022,-8.57947e-07,-2.8979e-08,0.57667,0.00088022,-9.44885e-07,7.26509e-08,0.57755,0.000878548,-7.26932e-07,-8.28106e-08,0.578427,0.000876845,-9.75364e-07,7.97774e-08,0.579303,0.000875134,-7.36032e-07,-5.74849e-08,0.580178,0.00087349,-9.08486e-07,3.09529e-08,0.58105,0.000871765,-8.15628e-07,-6.72206e-09,0.581921,0.000870114,-8.35794e-07,-4.06451e-09,0.582791,0.00086843,-8.47987e-07,2.29799e-08,0.583658,0.000866803,-7.79048e-07,-2.82503e-08,0.584524,0.00086516,-8.63799e-07,3.04167e-08,0.585388,0.000863524,-7.72548e-07,-3.38119e-08,0.586251,0.000861877,-8.73984e-07,4.52264e-08,0.587112,0.000860265,-7.38305e-07,-2.78842e-08,0.587972,0.000858705,-8.21958e-07,6.70567e-09,0.58883,0.000857081,-8.01841e-07,1.06161e-09,0.589686,0.000855481,-7.98656e-07,-1.09521e-08,0.590541,0.00085385,-8.31512e-07,4.27468e-08,0.591394,0.000852316,-7.03272e-07,-4.08257e-08,0.592245,0.000850787,-8.25749e-07,1.34677e-09,0.593095,0.000849139,-8.21709e-07,3.54387e-08,0.593944,0.000847602,-7.15393e-07,-2.38924e-08,0.59479,0.0008461,-7.8707e-07,5.26143e-10,0.595636,0.000844527,-7.85491e-07,2.17879e-08,0.596479,0.000843021,-7.20127e-07,-2.80733e-08,0.597322,0.000841497,-8.04347e-07,3.09005e-08,0.598162,0.000839981,-7.11646e-07,-3.5924e-08,0.599002,0.00083845,-8.19418e-07,5.3191e-08,0.599839,0.000836971,-6.59845e-07,-5.76307e-08,0.600676,0.000835478,-8.32737e-07,5.81227e-08,0.60151,0.000833987,-6.58369e-07,-5.56507e-08,0.602344,0.000832503,-8.25321e-07,4.52706e-08,0.603175,0.000830988,-6.89509e-07,-6.22236e-09,0.604006,0.000829591,-7.08176e-07,-2.03811e-08,0.604834,0.000828113,-7.6932e-07,2.8142e-08,0.605662,0.000826659,-6.84894e-07,-3.25822e-08,0.606488,0.000825191,-7.8264e-07,4.25823e-08,0.607312,0.000823754,-6.54893e-07,-1.85376e-08,0.608135,0.000822389,-7.10506e-07,-2.80365e-08,0.608957,0.000820883,-7.94616e-07,7.1079e-08,0.609777,0.000819507,-5.81379e-07,-7.74655e-08,0.610596,0.000818112,-8.13775e-07,5.9969e-08,0.611413,0.000816665,-6.33868e-07,-4.32013e-08,0.612229,0.000815267,-7.63472e-07,5.32313e-08,0.613044,0.0008139,-6.03778e-07,-5.05148e-08,0.613857,0.000812541,-7.55323e-07,2.96187e-08,0.614669,0.000811119,-6.66466e-07,-8.35545e-09,0.615479,0.000809761,-6.91533e-07,3.80301e-09,0.616288,0.00080839,-6.80124e-07,-6.85666e-09,0.617096,0.000807009,-7.00694e-07,2.36237e-08,0.617903,0.000805678,-6.29822e-07,-2.80336e-08,0.618708,0.000804334,-7.13923e-07,2.8906e-08,0.619511,0.000802993,-6.27205e-07,-2.79859e-08,0.620314,0.000801655,-7.11163e-07,2.34329e-08,0.621114,0.000800303,-6.40864e-07,-6.14108e-09,0.621914,0.000799003,-6.59287e-07,1.13151e-09,0.622712,0.000797688,-6.55893e-07,1.61507e-09,0.62351,0.000796381,-6.51048e-07,-7.59186e-09,0.624305,0.000795056,-6.73823e-07,2.87524e-08,0.6251,0.000793794,-5.87566e-07,-4.7813e-08,0.625893,0.000792476,-7.31005e-07,4.32901e-08,0.626685,0.000791144,-6.01135e-07,-6.13814e-09,0.627475,0.000789923,-6.19549e-07,-1.87376e-08,0.628264,0.000788628,-6.75762e-07,2.14837e-08,0.629052,0.000787341,-6.11311e-07,-7.59265e-09,0.629839,0.000786095,-6.34089e-07,8.88692e-09,0.630625,0.000784854,-6.07428e-07,-2.7955e-08,0.631409,0.000783555,-6.91293e-07,4.33285e-08,0.632192,0.000782302,-5.61307e-07,-2.61497e-08,0.632973,0.000781101,-6.39757e-07,1.6658e-09,0.633754,0.000779827,-6.34759e-07,1.94866e-08,0.634533,0.000778616,-5.76299e-07,-2.00076e-08,0.635311,0.000777403,-6.36322e-07,9.39091e-10,0.636088,0.000776133,-6.33505e-07,1.62512e-08,0.636863,0.000774915,-5.84751e-07,-6.33937e-09,0.637638,0.000773726,-6.03769e-07,9.10609e-09,0.638411,0.000772546,-5.76451e-07,-3.00849e-08,0.639183,0.000771303,-6.66706e-07,5.1629e-08,0.639953,0.000770125,-5.11819e-07,-5.7222e-08,0.640723,0.000768929,-6.83485e-07,5.80497e-08,0.641491,0.000767736,-5.09336e-07,-5.57674e-08,0.642259,0.000766551,-6.76638e-07,4.58105e-08,0.643024,0.000765335,-5.39206e-07,-8.26541e-09,0.643789,0.000764231,-5.64002e-07,-1.27488e-08,0.644553,0.000763065,-6.02249e-07,-3.44168e-10,0.645315,0.00076186,-6.03281e-07,1.41254e-08,0.646077,0.000760695,-5.60905e-07,3.44727e-09,0.646837,0.000759584,-5.50563e-07,-2.79144e-08,0.647596,0.000758399,-6.34307e-07,4.86057e-08,0.648354,0.000757276,-4.88489e-07,-4.72989e-08,0.64911,0.000756158,-6.30386e-07,2.13807e-08,0.649866,0.000754961,-5.66244e-07,2.13808e-08,0.65062,0.000753893,-5.02102e-07,-4.7299e-08,0.651374,0.000752746,-6.43999e-07,4.86059e-08,0.652126,0.000751604,-4.98181e-07,-2.79154e-08,0.652877,0.000750524,-5.81927e-07,3.45089e-09,0.653627,0.000749371,-5.71575e-07,1.41119e-08,0.654376,0.00074827,-5.29239e-07,-2.93748e-10,0.655123,0.00074721,-5.3012e-07,-1.29368e-08,0.65587,0.000746111,-5.68931e-07,-7.56355e-09,0.656616,0.000744951,-5.91621e-07,4.3191e-08,0.65736,0.000743897,-4.62048e-07,-4.59911e-08,0.658103,0.000742835,-6.00022e-07,2.15642e-08,0.658846,0.0007417,-5.35329e-07,1.93389e-08,0.659587,0.000740687,-4.77312e-07,-3.93152e-08,0.660327,0.000739615,-5.95258e-07,1.87126e-08,0.661066,0.00073848,-5.3912e-07,2.40695e-08,0.661804,0.000737474,-4.66912e-07,-5.53859e-08,0.662541,0.000736374,-6.33069e-07,7.82648e-08,0.663277,0.000735343,-3.98275e-07,-7.88593e-08,0.664012,0.00073431,-6.34853e-07,5.83585e-08,0.664745,0.000733215,-4.59777e-07,-3.53656e-08,0.665478,0.000732189,-5.65874e-07,2.34994e-08,0.66621,0.000731128,-4.95376e-07,9.72743e-10,0.66694,0.00073014,-4.92458e-07,-2.73903e-08,0.66767,0.000729073,-5.74629e-07,4.89839e-08,0.668398,0.000728071,-4.27677e-07,-4.93359e-08,0.669126,0.000727068,-5.75685e-07,2.91504e-08,0.669853,0.000726004,-4.88234e-07,-7.66109e-09,0.670578,0.000725004,-5.11217e-07,1.49392e-09,0.671303,0.000723986,-5.06735e-07,1.68533e-09,0.672026,0.000722978,-5.01679e-07,-8.23525e-09,0.672749,0.00072195,-5.26385e-07,3.12556e-08,0.67347,0.000720991,-4.32618e-07,-5.71825e-08,0.674191,0.000719954,-6.04166e-07,7.8265e-08,0.67491,0.00071898,-3.69371e-07,-7.70634e-08,0.675628,0.00071801,-6.00561e-07,5.11747e-08,0.676346,0.000716963,-4.47037e-07,-8.42615e-09,0.677062,0.000716044,-4.72315e-07,-1.747e-08,0.677778,0.000715046,-5.24725e-07,1.87015e-08,0.678493,0.000714053,-4.68621e-07,2.26856e-09,0.679206,0.000713123,-4.61815e-07,-2.77758e-08,0.679919,0.000712116,-5.45142e-07,4.92298e-08,0.68063,0.000711173,-3.97453e-07,-4.99339e-08,0.681341,0.000710228,-5.47255e-07,3.12967e-08,0.682051,0.000709228,-4.53365e-07,-1.56481e-08,0.68276,0.000708274,-5.00309e-07,3.12958e-08,0.683467,0.000707367,-4.06422e-07,-4.99303e-08,0.684174,0.000706405,-5.56213e-07,4.9216e-08,0.68488,0.00070544,-4.08565e-07,-2.77245e-08,0.685585,0.00070454,-4.91738e-07,2.07748e-09,0.686289,0.000703562,-4.85506e-07,1.94146e-08,0.686992,0.00070265,-4.27262e-07,-2.01314e-08,0.687695,0.000701735,-4.87656e-07,1.50616e-09,0.688396,0.000700764,-4.83137e-07,1.41067e-08,0.689096,0.00069984,-4.40817e-07,1.67168e-09,0.689795,0.000698963,-4.35802e-07,-2.07934e-08,0.690494,0.000698029,-4.98182e-07,2.18972e-08,0.691192,0.000697099,-4.32491e-07,-7.19092e-09,0.691888,0.000696212,-4.54064e-07,6.86642e-09,0.692584,0.000695325,-4.33464e-07,-2.02747e-08,0.693279,0.000694397,-4.94288e-07,1.46279e-08,0.693973,0.000693452,-4.50405e-07,2.13678e-08,0.694666,0.000692616,-3.86301e-07,-4.04945e-08,0.695358,0.000691721,-5.07785e-07,2.14009e-08,0.696049,0.00069077,-4.43582e-07,1.44955e-08,0.69674,0.000689926,-4.00096e-07,-1.97783e-08,0.697429,0.000689067,-4.5943e-07,5.01296e-09,0.698118,0.000688163,-4.44392e-07,-2.73521e-10,0.698805,0.000687273,-4.45212e-07,-3.91893e-09,0.699492,0.000686371,-4.56969e-07,1.59493e-08,0.700178,0.000685505,-4.09121e-07,-2.73351e-10,0.700863,0.000684686,-4.09941e-07,-1.4856e-08,0.701548,0.000683822,-4.54509e-07,9.25979e-11,0.702231,0.000682913,-4.54231e-07,1.44855e-08,0.702913,0.000682048,-4.10775e-07,1.56992e-09,0.703595,0.000681231,-4.06065e-07,-2.07652e-08,0.704276,0.000680357,-4.68361e-07,2.18864e-08,0.704956,0.000679486,-4.02701e-07,-7.17595e-09,0.705635,0.000678659,-4.24229e-07,6.81748e-09,0.706313,0.000677831,-4.03777e-07,-2.0094e-08,0.70699,0.000676963,-4.64059e-07,1.39538e-08,0.707667,0.000676077,-4.22197e-07,2.38835e-08,0.708343,0.000675304,-3.50547e-07,-4.98831e-08,0.709018,0.000674453,-5.00196e-07,5.64395e-08,0.709692,0.000673622,-3.30878e-07,-5.66657e-08,0.710365,0.00067279,-5.00875e-07,5.1014e-08,0.711037,0.000671942,-3.47833e-07,-2.81809e-08,0.711709,0.000671161,-4.32376e-07,2.10513e-09,0.712379,0.000670303,-4.2606e-07,1.97604e-08,0.713049,0.00066951,-3.66779e-07,-2.15422e-08,0.713718,0.000668712,-4.31406e-07,6.8038e-09,0.714387,0.000667869,-4.10994e-07,-5.67295e-09,0.715054,0.00066703,-4.28013e-07,1.5888e-08,0.715721,0.000666222,-3.80349e-07,1.72576e-09,0.716387,0.000665467,-3.75172e-07,-2.27911e-08,0.717052,0.000664648,-4.43545e-07,2.9834e-08,0.717716,0.00066385,-3.54043e-07,-3.69401e-08,0.718379,0.000663031,-4.64864e-07,5.83219e-08,0.719042,0.000662277,-2.89898e-07,-7.71382e-08,0.719704,0.000661465,-5.21313e-07,7.14171e-08,0.720365,0.000660637,-3.07061e-07,-2.97161e-08,0.721025,0.000659934,-3.96209e-07,-1.21575e-08,0.721685,0.000659105,-4.32682e-07,1.87412e-08,0.722343,0.000658296,-3.76458e-07,-3.2029e-09,0.723001,0.000657533,-3.86067e-07,-5.9296e-09,0.723659,0.000656743,-4.03856e-07,2.69213e-08,0.724315,0.000656016,-3.23092e-07,-4.21511e-08,0.724971,0.000655244,-4.49545e-07,2.24737e-08,0.725625,0.000654412,-3.82124e-07,1.18611e-08,0.726279,0.000653683,-3.46541e-07,-1.03132e-08,0.726933,0.000652959,-3.7748e-07,-3.02128e-08,0.727585,0.000652114,-4.68119e-07,7.15597e-08,0.728237,0.000651392,-2.5344e-07,-7.72119e-08,0.728888,0.000650654,-4.85075e-07,5.8474e-08,0.729538,0.000649859,-3.09654e-07,-3.74746e-08,0.730188,0.000649127,-4.22077e-07,3.18197e-08,0.730837,0.000648379,-3.26618e-07,-3.01997e-08,0.731485,0.000647635,-4.17217e-07,2.93747e-08,0.732132,0.000646888,-3.29093e-07,-2.76943e-08,0.732778,0.000646147,-4.12176e-07,2.17979e-08,0.733424,0.000645388,-3.46783e-07,1.07292e-10,0.734069,0.000644695,-3.46461e-07,-2.22271e-08,0.734713,0.000643935,-4.13142e-07,2.91963e-08,0.735357,0.000643197,-3.25553e-07,-3.49536e-08,0.736,0.000642441,-4.30414e-07,5.10133e-08,0.736642,0.000641733,-2.77374e-07,-4.98904e-08,0.737283,0.000641028,-4.27045e-07,2.93392e-08,0.737924,0.000640262,-3.39028e-07,-7.86156e-09,0.738564,0.000639561,-3.62612e-07,2.10703e-09,0.739203,0.000638842,-3.56291e-07,-5.6653e-10,0.739842,0.000638128,-3.57991e-07,1.59086e-10,0.740479,0.000637412,-3.57513e-07,-6.98321e-11,0.741116,0.000636697,-3.57723e-07,1.20214e-10,0.741753,0.000635982,-3.57362e-07,-4.10987e-10,0.742388,0.000635266,-3.58595e-07,1.5237e-09,0.743023,0.000634553,-3.54024e-07,-5.68376e-09,0.743657,0.000633828,-3.71075e-07,2.12113e-08,0.744291,0.00063315,-3.07441e-07,-1.95569e-08,0.744924,0.000632476,-3.66112e-07,-2.58816e-09,0.745556,0.000631736,-3.73877e-07,2.99096e-08,0.746187,0.000631078,-2.84148e-07,-5.74454e-08,0.746818,0.000630337,-4.56484e-07,8.06629e-08,0.747448,0.000629666,-2.14496e-07,-8.63922e-08,0.748077,0.000628978,-4.73672e-07,8.60918e-08,0.748706,0.000628289,-2.15397e-07,-7.91613e-08,0.749334,0.000627621,-4.5288e-07,5.17393e-08,0.749961,0.00062687,-2.97663e-07,-8.58662e-09,0.750588,0.000626249,-3.23422e-07,-1.73928e-08,0.751214,0.00062555,-3.75601e-07,1.85532e-08,0.751839,0.000624855,-3.19941e-07,2.78479e-09,0.752463,0.000624223,-3.11587e-07,-2.96923e-08,0.753087,0.000623511,-4.00664e-07,5.63799e-08,0.75371,0.000622879,-2.31524e-07,-7.66179e-08,0.754333,0.000622186,-4.61378e-07,7.12778e-08,0.754955,0.000621477,-2.47545e-07,-2.96794e-08,0.755576,0.000620893,-3.36583e-07,-1.21648e-08,0.756196,0.000620183,-3.73077e-07,1.87339e-08,0.756816,0.000619493,-3.16875e-07,-3.16622e-09,0.757435,0.00061885,-3.26374e-07,-6.0691e-09,0.758054,0.000618179,-3.44581e-07,2.74426e-08,0.758672,0.000617572,-2.62254e-07,-4.40968e-08,0.759289,0.000616915,-3.94544e-07,2.97352e-08,0.759906,0.000616215,-3.05338e-07,-1.52393e-08,0.760522,0.000615559,-3.51056e-07,3.12221e-08,0.761137,0.000614951,-2.5739e-07,-5.00443e-08,0.761751,0.000614286,-4.07523e-07,4.9746e-08,0.762365,0.00061362,-2.58285e-07,-2.97303e-08,0.762979,0.000613014,-3.47476e-07,9.57079e-09,0.763591,0.000612348,-3.18764e-07,-8.55287e-09,0.764203,0.000611685,-3.44422e-07,2.46407e-08,0.764815,0.00061107,-2.705e-07,-3.04053e-08,0.765426,0.000610437,-3.61716e-07,3.73759e-08,0.766036,0.000609826,-2.49589e-07,-5.94935e-08,0.766645,0.000609149,-4.28069e-07,8.13889e-08,0.767254,0.000608537,-1.83902e-07,-8.72483e-08,0.767862,0.000607907,-4.45647e-07,8.87901e-08,0.76847,0.000607282,-1.79277e-07,-8.90983e-08,0.769077,0.000606656,-4.46572e-07,8.87892e-08,0.769683,0.000606029,-1.80204e-07,-8.72446e-08,0.770289,0.000605407,-4.41938e-07,8.13752e-08,0.770894,0.000604768,-1.97812e-07,-5.94423e-08,0.771498,0.000604194,-3.76139e-07,3.71848e-08,0.772102,0.000603553,-2.64585e-07,-2.96922e-08,0.772705,0.000602935,-3.53661e-07,2.19793e-08,0.773308,0.000602293,-2.87723e-07,1.37955e-09,0.77391,0.000601722,-2.83585e-07,-2.74976e-08,0.774512,0.000601072,-3.66077e-07,4.9006e-08,0.775112,0.000600487,-2.19059e-07,-4.93171e-08,0.775712,0.000599901,-3.67011e-07,2.90531e-08,0.776312,0.000599254,-2.79851e-07,-7.29081e-09,0.776911,0.000598673,-3.01724e-07,1.10077e-10,0.777509,0.00059807,-3.01393e-07,6.85053e-09,0.778107,0.000597487,-2.80842e-07,-2.75123e-08,0.778704,0.000596843,-3.63379e-07,4.35939e-08,0.779301,0.000596247,-2.32597e-07,-2.7654e-08,0.779897,0.000595699,-3.15559e-07,7.41741e-09,0.780492,0.00059509,-2.93307e-07,-2.01562e-09,0.781087,0.000594497,-2.99354e-07,6.45059e-10,0.781681,0.000593901,-2.97418e-07,-5.64635e-10,0.782275,0.000593304,-2.99112e-07,1.61347e-09,0.782868,0.000592711,-2.94272e-07,-5.88926e-09,0.78346,0.000592105,-3.1194e-07,2.19436e-08,0.784052,0.000591546,-2.46109e-07,-2.22805e-08,0.784643,0.000590987,-3.1295e-07,7.57368e-09,0.785234,0.000590384,-2.90229e-07,-8.01428e-09,0.785824,0.00058978,-3.14272e-07,2.44834e-08,0.786414,0.000589225,-2.40822e-07,-3.03148e-08,0.787003,0.000588652,-3.31766e-07,3.7171e-08,0.787591,0.0005881,-2.20253e-07,-5.87646e-08,0.788179,0.000587483,-3.96547e-07,7.86782e-08,0.788766,0.000586926,-1.60512e-07,-7.71342e-08,0.789353,0.000586374,-3.91915e-07,5.10444e-08,0.789939,0.000585743,-2.38782e-07,-7.83422e-09,0.790524,0.000585242,-2.62284e-07,-1.97076e-08,0.791109,0.000584658,-3.21407e-07,2.70598e-08,0.791693,0.000584097,-2.40228e-07,-2.89269e-08,0.792277,0.000583529,-3.27008e-07,2.90431e-08,0.792861,0.000582963,-2.39879e-07,-2.76409e-08,0.793443,0.0005824,-3.22802e-07,2.1916e-08,0.794025,0.00058182,-2.57054e-07,-4.18368e-10,0.794607,0.000581305,-2.58309e-07,-2.02425e-08,0.795188,0.000580727,-3.19036e-07,2.17838e-08,0.795768,0.000580155,-2.53685e-07,-7.28814e-09,0.796348,0.000579625,-2.75549e-07,7.36871e-09,0.796928,0.000579096,-2.53443e-07,-2.21867e-08,0.797506,0.000578523,-3.20003e-07,2.17736e-08,0.798085,0.000577948,-2.54683e-07,-5.30296e-09,0.798662,0.000577423,-2.70592e-07,-5.61698e-10,0.799239,0.00057688,-2.72277e-07,7.54977e-09,0.799816,0.000576358,-2.49627e-07,-2.96374e-08,0.800392,0.00057577,-3.38539e-07,5.1395e-08,0.800968,0.000575247,-1.84354e-07,-5.67335e-08,0.801543,0.000574708,-3.54555e-07,5.63297e-08,0.802117,0.000574168,-1.85566e-07,-4.93759e-08,0.802691,0.000573649,-3.33693e-07,2.19646e-08,0.803264,0.000573047,-2.678e-07,2.1122e-08,0.803837,0.000572575,-2.04433e-07,-4.68482e-08,0.804409,0.000572026,-3.44978e-07,4.70613e-08,0.804981,0.000571477,-2.03794e-07,-2.21877e-08,0.805552,0.000571003,-2.70357e-07,-1.79153e-08,0.806123,0.000570408,-3.24103e-07,3.42443e-08,0.806693,0.000569863,-2.2137e-07,1.47556e-10,0.807263,0.000569421,-2.20928e-07,-3.48345e-08,0.807832,0.000568874,-3.25431e-07,1.99812e-08,0.808401,0.000568283,-2.65487e-07,1.45143e-08,0.808969,0.000567796,-2.21945e-07,-1.84338e-08,0.809536,0.000567297,-2.77246e-07,-3.83608e-10,0.810103,0.000566741,-2.78397e-07,1.99683e-08,0.81067,0.000566244,-2.18492e-07,-1.98848e-08,0.811236,0.000565747,-2.78146e-07,-3.38976e-11,0.811801,0.000565191,-2.78248e-07,2.00204e-08,0.812366,0.000564695,-2.18187e-07,-2.04429e-08,0.812931,0.000564197,-2.79516e-07,2.1467e-09,0.813495,0.000563644,-2.73076e-07,1.18561e-08,0.814058,0.000563134,-2.37507e-07,1.00334e-08,0.814621,0.000562689,-2.07407e-07,-5.19898e-08,0.815183,0.000562118,-3.63376e-07,7.87163e-08,0.815745,0.000561627,-1.27227e-07,-8.40616e-08,0.816306,0.000561121,-3.79412e-07,7.87163e-08,0.816867,0.000560598,-1.43263e-07,-5.19898e-08,0.817428,0.000560156,-2.99233e-07,1.00335e-08,0.817988,0.000559587,-2.69132e-07,1.18559e-08,0.818547,0.000559085,-2.33564e-07,2.14764e-09,0.819106,0.000558624,-2.27122e-07,-2.04464e-08,0.819664,0.000558108,-2.88461e-07,2.00334e-08,0.820222,0.000557591,-2.28361e-07,-8.24277e-11,0.820779,0.000557135,-2.28608e-07,-1.97037e-08,0.821336,0.000556618,-2.87719e-07,1.92925e-08,0.821893,0.000556101,-2.29841e-07,2.13831e-09,0.822448,0.000555647,-2.23427e-07,-2.78458e-08,0.823004,0.000555117,-3.06964e-07,4.96402e-08,0.823559,0.000554652,-1.58043e-07,-5.15058e-08,0.824113,0.000554181,-3.12561e-07,3.71737e-08,0.824667,0.000553668,-2.0104e-07,-3.75844e-08,0.82522,0.000553153,-3.13793e-07,5.35592e-08,0.825773,0.000552686,-1.53115e-07,-5.74431e-08,0.826326,0.000552207,-3.25444e-07,5.7004e-08,0.826878,0.000551728,-1.54433e-07,-5.13635e-08,0.827429,0.000551265,-3.08523e-07,2.92406e-08,0.82798,0.000550735,-2.20801e-07,-5.99424e-09,0.828531,0.000550276,-2.38784e-07,-5.26363e-09,0.829081,0.000549782,-2.54575e-07,2.70488e-08,0.82963,0.000549354,-1.73429e-07,-4.33268e-08,0.83018,0.000548878,-3.03409e-07,2.7049e-08,0.830728,0.000548352,-2.22262e-07,-5.26461e-09,0.831276,0.000547892,-2.38056e-07,-5.99057e-09,0.831824,0.000547397,-2.56027e-07,2.92269e-08,0.832371,0.000546973,-1.68347e-07,-5.13125e-08,0.832918,0.000546482,-3.22284e-07,5.68139e-08,0.833464,0.000546008,-1.51843e-07,-5.67336e-08,0.83401,0.000545534,-3.22043e-07,5.09113e-08,0.834555,0.000545043,-1.6931e-07,-2.77022e-08,0.8351,0.000544621,-2.52416e-07,2.92924e-10,0.835644,0.000544117,-2.51537e-07,2.65305e-08,0.836188,0.000543694,-1.71946e-07,-4.68105e-08,0.836732,0.00054321,-3.12377e-07,4.15021e-08,0.837275,0.000542709,-1.87871e-07,1.13355e-11,0.837817,0.000542334,-1.87837e-07,-4.15474e-08,0.838359,0.000541833,-3.12479e-07,4.69691e-08,0.838901,0.000541349,-1.71572e-07,-2.71196e-08,0.839442,0.000540925,-2.52931e-07,1.90462e-09,0.839983,0.000540425,-2.47217e-07,1.95011e-08,0.840523,0.000539989,-1.88713e-07,-2.03045e-08,0.841063,0.00053955,-2.49627e-07,2.11216e-09,0.841602,0.000539057,-2.4329e-07,1.18558e-08,0.842141,0.000538606,-2.07723e-07,1.00691e-08,0.842679,0.000538221,-1.77516e-07,-5.21324e-08,0.843217,0.00053771,-3.33913e-07,7.92513e-08,0.843755,0.00053728,-9.6159e-08,-8.60587e-08,0.844292,0.000536829,-3.54335e-07,8.61696e-08,0.844828,0.000536379,-9.58263e-08,-7.98057e-08,0.845364,0.000535948,-3.35243e-07,5.42394e-08,0.8459,0.00053544,-1.72525e-07,-1.79426e-08,0.846435,0.000535041,-2.26353e-07,1.75308e-08,0.84697,0.000534641,-1.73761e-07,-5.21806e-08,0.847505,0.000534137,-3.30302e-07,7.19824e-08,0.848038,0.000533692,-1.14355e-07,-5.69349e-08,0.848572,0.000533293,-2.8516e-07,3.65479e-08,0.849105,0.000532832,-1.75516e-07,-2.96519e-08,0.849638,0.000532392,-2.64472e-07,2.2455e-08,0.85017,0.000531931,-1.97107e-07,-5.63451e-10,0.850702,0.000531535,-1.98797e-07,-2.02011e-08,0.851233,0.000531077,-2.59401e-07,2.17634e-08,0.851764,0.000530623,-1.94111e-07,-7.24794e-09,0.852294,0.000530213,-2.15854e-07,7.22832e-09,0.852824,0.000529803,-1.94169e-07,-2.16653e-08,0.853354,0.00052935,-2.59165e-07,1.98283e-08,0.853883,0.000528891,-1.9968e-07,1.95678e-09,0.854412,0.000528497,-1.9381e-07,-2.76554e-08,0.85494,0.000528027,-2.76776e-07,4.90603e-08,0.855468,0.00052762,-1.29596e-07,-4.93764e-08,0.855995,0.000527213,-2.77725e-07,2.92361e-08,0.856522,0.000526745,-1.90016e-07,-7.96341e-09,0.857049,0.000526341,-2.13907e-07,2.61752e-09,0.857575,0.000525922,-2.06054e-07,-2.50665e-09,0.8581,0.000525502,-2.13574e-07,7.40906e-09,0.858626,0.000525097,-1.91347e-07,-2.71296e-08,0.859151,0.000524633,-2.72736e-07,4.15048e-08,0.859675,0.000524212,-1.48221e-07,-1.96802e-08,0.860199,0.000523856,-2.07262e-07,-2.23886e-08,0.860723,0.000523375,-2.74428e-07,4.96299e-08,0.861246,0.000522975,-1.25538e-07,-5.69216e-08,0.861769,0.000522553,-2.96303e-07,5.88473e-08,0.862291,0.000522137,-1.19761e-07,-5.92584e-08,0.862813,0.00052172,-2.97536e-07,5.8977e-08,0.863334,0.000521301,-1.20605e-07,-5.74403e-08,0.863855,0.000520888,-2.92926e-07,5.15751e-08,0.864376,0.000520457,-1.38201e-07,-2.96506e-08,0.864896,0.000520091,-2.27153e-07,7.42277e-09,0.865416,0.000519659,-2.04885e-07,-4.05057e-11,0.865936,0.00051925,-2.05006e-07,-7.26074e-09,0.866455,0.000518818,-2.26788e-07,2.90835e-08,0.866973,0.000518451,-1.39538e-07,-4.94686e-08,0.867492,0.000518024,-2.87944e-07,4.95814e-08,0.868009,0.000517597,-1.39199e-07,-2.96479e-08,0.868527,0.000517229,-2.28143e-07,9.40539e-09,0.869044,0.000516801,-1.99927e-07,-7.9737e-09,0.86956,0.000516378,-2.23848e-07,2.24894e-08,0.870077,0.000515997,-1.5638e-07,-2.23793e-08,0.870592,0.000515617,-2.23517e-07,7.42302e-09,0.871108,0.000515193,-2.01248e-07,-7.31283e-09,0.871623,0.000514768,-2.23187e-07,2.18283e-08,0.872137,0.000514387,-1.57702e-07,-2.03959e-08,0.872652,0.000514011,-2.1889e-07,1.50711e-10,0.873165,0.000513573,-2.18437e-07,1.97931e-08,0.873679,0.000513196,-1.59058e-07,-1.97183e-08,0.874192,0.000512819,-2.18213e-07,-5.24324e-10,0.874704,0.000512381,-2.19786e-07,2.18156e-08,0.875217,0.000512007,-1.54339e-07,-2.71336e-08,0.875728,0.000511616,-2.3574e-07,2.71141e-08,0.87624,0.000511226,-1.54398e-07,-2.17182e-08,0.876751,0.000510852,-2.19552e-07,1.54131e-10,0.877262,0.000510414,-2.1909e-07,2.11017e-08,0.877772,0.000510039,-1.55785e-07,-2.49562e-08,0.878282,0.000509652,-2.30654e-07,1.91183e-08,0.878791,0.000509248,-1.73299e-07,8.08751e-09,0.8793,0.000508926,-1.49036e-07,-5.14684e-08,0.879809,0.000508474,-3.03441e-07,7.85766e-08,0.880317,0.000508103,-6.77112e-08,-8.40242e-08,0.880825,0.000507715,-3.19784e-07,7.87063e-08,0.881333,0.000507312,-8.36649e-08,-5.19871e-08,0.88184,0.000506988,-2.39626e-07,1.00327e-08,0.882346,0.000506539,-2.09528e-07,1.18562e-08,0.882853,0.000506156,-1.73959e-07,2.14703e-09,0.883359,0.000505814,-1.67518e-07,-2.04444e-08,0.883864,0.000505418,-2.28851e-07,2.00258e-08,0.88437,0.00050502,-1.68774e-07,-5.42855e-11,0.884874,0.000504682,-1.68937e-07,-1.98087e-08,0.885379,0.000504285,-2.28363e-07,1.96842e-08,0.885883,0.000503887,-1.6931e-07,6.76342e-10,0.886387,0.000503551,-1.67281e-07,-2.23896e-08,0.88689,0.000503149,-2.3445e-07,2.92774e-08,0.887393,0.000502768,-1.46618e-07,-3.51152e-08,0.887896,0.00050237,-2.51963e-07,5.15787e-08,0.888398,0.00050202,-9.72271e-08,-5.19903e-08,0.8889,0.00050167,-2.53198e-07,3.71732e-08,0.889401,0.000501275,-1.41678e-07,-3.70978e-08,0.889902,0.00050088,-2.52972e-07,5.16132e-08,0.890403,0.000500529,-9.81321e-08,-5.01459e-08,0.890903,0.000500183,-2.4857e-07,2.9761e-08,0.891403,0.000499775,-1.59287e-07,-9.29351e-09,0.891903,0.000499428,-1.87167e-07,7.41301e-09,0.892402,0.000499076,-1.64928e-07,-2.03585e-08,0.892901,0.000498685,-2.26004e-07,1.44165e-08,0.893399,0.000498276,-1.82754e-07,2.22974e-08,0.893898,0.000497978,-1.15862e-07,-4.40013e-08,0.894395,0.000497614,-2.47866e-07,3.44985e-08,0.894893,0.000497222,-1.44371e-07,-3.43882e-08,0.89539,0.00049683,-2.47535e-07,4.34497e-08,0.895886,0.000496465,-1.17186e-07,-2.02012e-08,0.896383,0.00049617,-1.7779e-07,-2.22497e-08,0.896879,0.000495748,-2.44539e-07,4.95952e-08,0.897374,0.000495408,-9.57532e-08,-5.69217e-08,0.89787,0.000495045,-2.66518e-07,5.88823e-08,0.898364,0.000494689,-8.98713e-08,-5.93983e-08,0.898859,0.000494331,-2.68066e-07,5.95017e-08,0.899353,0.000493973,-8.95613e-08,-5.9399e-08,0.899847,0.000493616,-2.67758e-07,5.8885e-08,0.90034,0.000493257,-9.11033e-08,-5.69317e-08,0.900833,0.000492904,-2.61898e-07,4.96326e-08,0.901326,0.000492529,-1.13001e-07,-2.23893e-08,0.901819,0.000492236,-1.80169e-07,-1.968e-08,0.902311,0.000491817,-2.39209e-07,4.15047e-08,0.902802,0.000491463,-1.14694e-07,-2.71296e-08,0.903293,0.000491152,-1.96083e-07,7.409e-09,0.903784,0.000490782,-1.73856e-07,-2.50645e-09,0.904275,0.000490427,-1.81376e-07,2.61679e-09,0.904765,0.000490072,-1.73525e-07,-7.96072e-09,0.905255,0.000489701,-1.97407e-07,2.92261e-08,0.905745,0.000489394,-1.09729e-07,-4.93389e-08,0.906234,0.000489027,-2.57746e-07,4.89204e-08,0.906723,0.000488658,-1.10985e-07,-2.71333e-08,0.907211,0.000488354,-1.92385e-07,8.30861e-12,0.907699,0.00048797,-1.9236e-07,2.71001e-08,0.908187,0.000487666,-1.1106e-07,-4.88041e-08,0.908675,0.000487298,-2.57472e-07,4.89069e-08,0.909162,0.000486929,-1.10751e-07,-2.76143e-08,0.909649,0.000486625,-1.93594e-07,1.9457e-09,0.910135,0.000486244,-1.87757e-07,1.98315e-08,0.910621,0.000485928,-1.28262e-07,-2.16671e-08,0.911107,0.000485606,-1.93264e-07,7.23216e-09,0.911592,0.000485241,-1.71567e-07,-7.26152e-09,0.912077,0.000484877,-1.93352e-07,2.18139e-08,0.912562,0.000484555,-1.2791e-07,-2.03895e-08,0.913047,0.000484238,-1.89078e-07,1.39494e-10,0.913531,0.000483861,-1.8866e-07,1.98315e-08,0.914014,0.000483543,-1.29165e-07,-1.98609e-08,0.914498,0.000483225,-1.88748e-07,7.39912e-12,0.914981,0.000482847,-1.88726e-07,1.98313e-08,0.915463,0.000482529,-1.29232e-07,-1.9728e-08,0.915946,0.000482212,-1.88416e-07,-5.24035e-10,0.916428,0.000481833,-1.89988e-07,2.18241e-08,0.916909,0.000481519,-1.24516e-07,-2.71679e-08,0.917391,0.000481188,-2.06019e-07,2.72427e-08,0.917872,0.000480858,-1.24291e-07,-2.21985e-08,0.918353,0.000480543,-1.90886e-07,1.94644e-09,0.918833,0.000480167,-1.85047e-07,1.44127e-08,0.919313,0.00047984,-1.41809e-07,7.39438e-12,0.919793,0.000479556,-1.41787e-07,-1.44423e-08,0.920272,0.000479229,-1.85114e-07,-1.84291e-09,0.920751,0.000478854,-1.90642e-07,2.18139e-08,0.92123,0.000478538,-1.25201e-07,-2.58081e-08,0.921708,0.00047821,-2.02625e-07,2.18139e-08,0.922186,0.00047787,-1.37183e-07,-1.84291e-09,0.922664,0.00047759,-1.42712e-07,-1.44423e-08,0.923141,0.000477262,-1.86039e-07,7.34701e-12,0.923618,0.00047689,-1.86017e-07,1.44129e-08,0.924095,0.000476561,-1.42778e-07,1.94572e-09,0.924572,0.000476281,-1.36941e-07,-2.21958e-08,0.925048,0.000475941,-2.03528e-07,2.72327e-08,0.925523,0.000475615,-1.2183e-07,-2.71304e-08,0.925999,0.00047529,-2.03221e-07,2.16843e-08,0.926474,0.000474949,-1.38168e-07,-2.16005e-12,0.926949,0.000474672,-1.38175e-07,-2.16756e-08,0.927423,0.000474331,-2.03202e-07,2.71001e-08,0.927897,0.000474006,-1.21902e-07,-2.71201e-08,0.928371,0.000473681,-2.03262e-07,2.17757e-08,0.928845,0.00047334,-1.37935e-07,-3.78028e-10,0.929318,0.000473063,-1.39069e-07,-2.02636e-08,0.929791,0.000472724,-1.9986e-07,2.18276e-08,0.930263,0.000472389,-1.34377e-07,-7.44231e-09,0.930736,0.000472098,-1.56704e-07,7.94165e-09,0.931208,0.000471809,-1.32879e-07,-2.43243e-08,0.931679,0.00047147,-2.05851e-07,2.97508e-08,0.932151,0.000471148,-1.16599e-07,-3.50742e-08,0.932622,0.000470809,-2.21822e-07,5.09414e-08,0.933092,0.000470518,-6.89976e-08,-4.94821e-08,0.933563,0.000470232,-2.17444e-07,2.77775e-08,0.934033,0.00046988,-1.34111e-07,-2.02351e-09,0.934502,0.000469606,-1.40182e-07,-1.96835e-08,0.934972,0.000469267,-1.99232e-07,2.11529e-08,0.935441,0.000468932,-1.35774e-07,-5.32332e-09,0.93591,0.000468644,-1.51743e-07,1.40413e-10,0.936378,0.000468341,-1.51322e-07,4.76166e-09,0.936846,0.000468053,-1.37037e-07,-1.9187e-08,0.937314,0.000467721,-1.94598e-07,1.23819e-08,0.937782,0.000467369,-1.57453e-07,2.92642e-08,0.938249,0.000467142,-6.96601e-08,-6.98342e-08,0.938716,0.000466793,-2.79163e-07,7.12586e-08,0.939183,0.000466449,-6.53869e-08,-3.63863e-08,0.939649,0.000466209,-1.74546e-07,1.46818e-08,0.940115,0.000465904,-1.305e-07,-2.2341e-08,0.940581,0.000465576,-1.97523e-07,1.50774e-08,0.941046,0.000465226,-1.52291e-07,2.16359e-08,0.941511,0.000464986,-8.73832e-08,-4.20162e-08,0.941976,0.000464685,-2.13432e-07,2.72198e-08,0.942441,0.00046434,-1.31773e-07,-7.2581e-09,0.942905,0.000464055,-1.53547e-07,1.81263e-09,0.943369,0.000463753,-1.48109e-07,7.58386e-12,0.943832,0.000463457,-1.48086e-07,-1.84298e-09,0.944296,0.000463155,-1.53615e-07,7.36433e-09,0.944759,0.00046287,-1.31522e-07,-2.76143e-08,0.945221,0.000462524,-2.14365e-07,4.34883e-08,0.945684,0.000462226,-8.39003e-08,-2.71297e-08,0.946146,0.000461977,-1.65289e-07,5.42595e-09,0.946608,0.000461662,-1.49012e-07,5.42593e-09,0.947069,0.000461381,-1.32734e-07,-2.71297e-08,0.94753,0.000461034,-2.14123e-07,4.34881e-08,0.947991,0.000460736,-8.36585e-08,-2.76134e-08,0.948452,0.000460486,-1.66499e-07,7.36083e-09,0.948912,0.000460175,-1.44416e-07,-1.82993e-09,0.949372,0.000459881,-1.49906e-07,-4.11073e-11,0.949832,0.000459581,-1.50029e-07,1.99434e-09,0.950291,0.000459287,-1.44046e-07,-7.93627e-09,0.950751,0.000458975,-1.67855e-07,2.97507e-08,0.951209,0.000458728,-7.86029e-08,-5.1462e-08,0.951668,0.000458417,-2.32989e-07,5.6888e-08,0.952126,0.000458121,-6.2325e-08,-5.68806e-08,0.952584,0.000457826,-2.32967e-07,5.14251e-08,0.953042,0.000457514,-7.86914e-08,-2.96107e-08,0.953499,0.000457268,-1.67523e-07,7.41296e-09,0.953956,0.000456955,-1.45285e-07,-4.11262e-11,0.954413,0.000456665,-1.45408e-07,-7.24847e-09,0.95487,0.000456352,-1.67153e-07,2.9035e-08,0.955326,0.000456105,-8.00484e-08,-4.92869e-08,0.955782,0.000455797,-2.27909e-07,4.89032e-08,0.956238,0.000455488,-8.11994e-08,-2.71166e-08,0.956693,0.000455244,-1.62549e-07,-4.13678e-11,0.957148,0.000454919,-1.62673e-07,2.72821e-08,0.957603,0.000454675,-8.0827e-08,-4.94824e-08,0.958057,0.000454365,-2.29274e-07,5.14382e-08,0.958512,0.000454061,-7.49597e-08,-3.7061e-08,0.958965,0.0004538,-1.86143e-07,3.72013e-08,0.959419,0.000453539,-7.45389e-08,-5.21396e-08,0.959873,0.000453234,-2.30958e-07,5.21476e-08,0.960326,0.000452928,-7.45146e-08,-3.72416e-08,0.960778,0.000452667,-1.8624e-07,3.72143e-08,0.961231,0.000452407,-7.45967e-08,-5.20109e-08,0.961683,0.000452101,-2.30629e-07,5.16199e-08,0.962135,0.000451795,-7.57696e-08,-3.52595e-08,0.962587,0.000451538,-1.81548e-07,2.98133e-08,0.963038,0.000451264,-9.2108e-08,-2.43892e-08,0.963489,0.000451007,-1.65276e-07,8.13892e-09,0.96394,0.000450701,-1.40859e-07,-8.16647e-09,0.964391,0.000450394,-1.65358e-07,2.45269e-08,0.964841,0.000450137,-9.17775e-08,-3.03367e-08,0.965291,0.000449863,-1.82787e-07,3.7215e-08,0.965741,0.000449609,-7.11424e-08,-5.89188e-08,0.96619,0.00044929,-2.47899e-07,7.92509e-08,0.966639,0.000449032,-1.01462e-08,-7.92707e-08,0.967088,0.000448773,-2.47958e-07,5.90181e-08,0.967537,0.000448455,-7.0904e-08,-3.75925e-08,0.967985,0.0004482,-1.83681e-07,3.17471e-08,0.968433,0.000447928,-8.84401e-08,-2.97913e-08,0.968881,0.000447662,-1.77814e-07,2.78133e-08,0.969329,0.000447389,-9.4374e-08,-2.18572e-08,0.969776,0.000447135,-1.59946e-07,1.10134e-11,0.970223,0.000446815,-1.59913e-07,2.18132e-08,0.97067,0.000446561,-9.44732e-08,-2.76591e-08,0.971116,0.000446289,-1.7745e-07,2.92185e-08,0.971562,0.000446022,-8.97948e-08,-2.96104e-08,0.972008,0.000445753,-1.78626e-07,2.96185e-08,0.972454,0.000445485,-8.97706e-08,-2.92588e-08,0.972899,0.000445218,-1.77547e-07,2.78123e-08,0.973344,0.000444946,-9.41103e-08,-2.23856e-08,0.973789,0.000444691,-1.61267e-07,2.12559e-09,0.974233,0.000444374,-1.5489e-07,1.38833e-08,0.974678,0.000444106,-1.13241e-07,1.94591e-09,0.975122,0.000443886,-1.07403e-07,-2.16669e-08,0.975565,0.000443606,-1.72404e-07,2.5117e-08,0.976009,0.000443336,-9.70526e-08,-1.91963e-08,0.976452,0.000443085,-1.54642e-07,-7.93627e-09,0.976895,0.000442752,-1.7845e-07,5.09414e-08,0.977338,0.000442548,-2.56262e-08,-7.66201e-08,0.97778,0.000442266,-2.55486e-07,7.67249e-08,0.978222,0.000441986,-2.53118e-08,-5.14655e-08,0.978664,0.000441781,-1.79708e-07,9.92773e-09,0.979106,0.000441451,-1.49925e-07,1.17546e-08,0.979547,0.000441186,-1.14661e-07,2.65868e-09,0.979988,0.000440965,-1.06685e-07,-2.23893e-08,0.980429,0.000440684,-1.73853e-07,2.72939e-08,0.980869,0.000440419,-9.19716e-08,-2.71816e-08,0.98131,0.000440153,-1.73516e-07,2.18278e-08,0.98175,0.000439872,-1.08033e-07,-5.24833e-10,0.982189,0.000439654,-1.09607e-07,-1.97284e-08,0.982629,0.000439376,-1.68793e-07,1.98339e-08,0.983068,0.000439097,-1.09291e-07,-2.62901e-12,0.983507,0.000438879,-1.09299e-07,-1.98234e-08,0.983946,0.000438601,-1.68769e-07,1.96916e-08,0.984384,0.000438322,-1.09694e-07,6.6157e-10,0.984823,0.000438105,-1.0771e-07,-2.23379e-08,0.985261,0.000437823,-1.74723e-07,2.90855e-08,0.985698,0.00043756,-8.74669e-08,-3.43992e-08,0.986136,0.000437282,-1.90665e-07,4.89068e-08,0.986573,0.000437048,-4.39442e-08,-4.20188e-08,0.98701,0.000436834,-1.7e-07,-4.11073e-11,0.987446,0.000436494,-1.70124e-07,4.21832e-08,0.987883,0.00043628,-4.35742e-08,-4.94824e-08,0.988319,0.000436044,-1.92021e-07,3.6537e-08,0.988755,0.00043577,-8.24102e-08,-3.70611e-08,0.989191,0.000435494,-1.93593e-07,5.21026e-08,0.989626,0.000435263,-3.72855e-08,-5.21402e-08,0.990061,0.000435032,-1.93706e-07,3.7249e-08,0.990496,0.000434756,-8.19592e-08,-3.72512e-08,0.990931,0.000434481,-1.93713e-07,5.21511e-08,0.991365,0.00043425,-3.72595e-08,-5.21439e-08,0.991799,0.000434019,-1.93691e-07,3.72152e-08,0.992233,0.000433743,-8.20456e-08,-3.71123e-08,0.992667,0.000433468,-1.93382e-07,5.16292e-08,0.9931,0.000433236,-3.84947e-08,-5.01953e-08,0.993533,0.000433008,-1.89081e-07,2.99427e-08,0.993966,0.00043272,-9.92525e-08,-9.9708e-09,0.994399,0.000432491,-1.29165e-07,9.94051e-09,0.994831,0.000432263,-9.93434e-08,-2.97912e-08,0.995263,0.000431975,-1.88717e-07,4.96198e-08,0.995695,0.000431746,-3.98578e-08,-4.94785e-08,0.996127,0.000431518,-1.88293e-07,2.9085e-08,0.996558,0.000431229,-1.01038e-07,-7.25675e-09,0.996989,0.000431005,-1.22809e-07,-5.79945e-11,0.99742,0.000430759,-1.22983e-07,7.48873e-09,0.997851,0.000430536,-1.00516e-07,-2.98969e-08,0.998281,0.000430245,-1.90207e-07,5.24942e-08,0.998711,0.000430022,-3.27246e-08,-6.08706e-08,0.999141,0.000429774,-2.15336e-07,7.17788e-08,0.999571,0.000429392,0.,0.}; + + template + __device__ __forceinline__ void Lab2RGBConvert_32F(const T& src, D& dst) + { + const float lThresh = 0.008856f * 903.3f; + const float fThresh = 7.787f * 0.008856f + 16.0f / 116.0f; + + float Y, fy; + + if (src.x <= lThresh) + { + Y = src.x / 903.3f; + fy = 7.787f * Y + 16.0f / 116.0f; + } + else + { + fy = (src.x + 16.0f) / 116.0f; + Y = fy * fy * fy; + } + + float X = src.y / 500.0f + fy; + float Z = fy - src.z / 200.0f; + + if (X <= fThresh) + X = (X - 16.0f / 116.0f) / 7.787f; + else + X = X * X * X; + + if (Z <= fThresh) + Z = (Z - 16.0f / 116.0f) / 7.787f; + else + Z = Z * Z * Z; + + float B = 0.052891f * X - 0.204043f * Y + 1.151152f * Z; + float G = -0.921235f * X + 1.875991f * Y + 0.045244f * Z; + float R = 3.079933f * X - 1.537150f * Y - 0.542782f * Z; + + if (srgb) + { + B = splineInterpolate(B * GAMMA_TAB_SIZE, c_sRGBInvGammaTab, GAMMA_TAB_SIZE); + G = splineInterpolate(G * GAMMA_TAB_SIZE, c_sRGBInvGammaTab, GAMMA_TAB_SIZE); + R = splineInterpolate(R * GAMMA_TAB_SIZE, c_sRGBInvGammaTab, GAMMA_TAB_SIZE); + } + + dst.x = blueIdx == 0 ? B : R; + dst.y = G; + dst.z = blueIdx == 0 ? R : B; + setAlpha(dst, ColorChannel::max()); + } + + template + __device__ __forceinline__ void Lab2RGBConvert_8U(const T& src, D& dst) + { + float3 srcf, dstf; + + srcf.x = src.x * (100.f / 255.f); + srcf.y = src.y - 128; + srcf.z = src.z - 128; + + Lab2RGBConvert_32F(srcf, dstf); + + dst.x = saturate_cast(dstf.x * 255.f); + dst.y = saturate_cast(dstf.y * 255.f); + dst.z = saturate_cast(dstf.z * 255.f); + setAlpha(dst, ColorChannel::max()); + } + + template struct Lab2RGB; + + template + struct Lab2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + Lab2RGBConvert_8U(src, dst); + + return dst; + } + + __host__ __device__ __forceinline__ Lab2RGB() {} + __host__ __device__ __forceinline__ Lab2RGB(const Lab2RGB&) {} + }; + + template + struct Lab2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + Lab2RGBConvert_32F(src, dst); + + return dst; + } + + __host__ __device__ __forceinline__ Lab2RGB() {} + __host__ __device__ __forceinline__ Lab2RGB(const Lab2RGB&) {} + }; + } + +#define OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(name, scn, dcn, srgb, blueIdx) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::Lab2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + +///////////////////////////////////// RGB <-> Luv ///////////////////////////////////// + + namespace color_detail + { + __constant__ float c_LabCbrtTab[] = {0.137931,0.0114066,0.,1.18859e-07,0.149338,0.011407,3.56578e-07,-5.79396e-07,0.160745,0.0114059,-1.38161e-06,2.16892e-06,0.172151,0.0114097,5.12516e-06,-8.0814e-06,0.183558,0.0113957,-1.9119e-05,3.01567e-05,0.194965,0.0114479,7.13509e-05,-0.000112545,0.206371,0.011253,-0.000266285,-0.000106493,0.217252,0.0104009,-0.000585765,7.32149e-05,0.22714,0.00944906,-0.00036612,1.21917e-05,0.236235,0.0087534,-0.000329545,2.01753e-05,0.244679,0.00815483,-0.000269019,1.24435e-05,0.252577,0.00765412,-0.000231689,1.05618e-05,0.26001,0.00722243,-0.000200003,8.26662e-06,0.267041,0.00684723,-0.000175203,6.76746e-06,0.27372,0.00651712,-0.000154901,5.61192e-06,0.280088,0.00622416,-0.000138065,4.67009e-06,0.286179,0.00596204,-0.000124055,3.99012e-06,0.292021,0.0057259,-0.000112085,3.36032e-06,0.297638,0.00551181,-0.000102004,2.95338e-06,0.30305,0.00531666,-9.31435e-05,2.52875e-06,0.308277,0.00513796,-8.55572e-05,2.22022e-06,0.313331,0.00497351,-7.88966e-05,1.97163e-06,0.318228,0.00482163,-7.29817e-05,1.7248e-06,0.322978,0.00468084,-6.78073e-05,1.55998e-06,0.327593,0.0045499,-6.31274e-05,1.36343e-06,0.332081,0.00442774,-5.90371e-05,1.27136e-06,0.336451,0.00431348,-5.5223e-05,1.09111e-06,0.34071,0.00420631,-5.19496e-05,1.0399e-06,0.344866,0.00410553,-4.88299e-05,9.18347e-07,0.348923,0.00401062,-4.60749e-05,8.29942e-07,0.352889,0.00392096,-4.35851e-05,7.98478e-07,0.356767,0.00383619,-4.11896e-05,6.84917e-07,0.360562,0.00375586,-3.91349e-05,6.63976e-07,0.36428,0.00367959,-3.7143e-05,5.93086e-07,0.367923,0.00360708,-3.53637e-05,5.6976e-07,0.371495,0.00353806,-3.36544e-05,4.95533e-07,0.375,0.00347224,-3.21678e-05,4.87951e-07,0.378441,0.00340937,-3.0704e-05,4.4349e-07,0.38182,0.00334929,-2.93735e-05,4.20297e-07,0.38514,0.0032918,-2.81126e-05,3.7872e-07,0.388404,0.00323671,-2.69764e-05,3.596e-07,0.391614,0.00318384,-2.58976e-05,3.5845e-07,0.394772,0.00313312,-2.48223e-05,2.92765e-07,0.397881,0.00308435,-2.3944e-05,3.18232e-07,0.400942,0.00303742,-2.29893e-05,2.82046e-07,0.403957,0.00299229,-2.21432e-05,2.52315e-07,0.406927,0.00294876,-2.13862e-05,2.58416e-07,0.409855,0.00290676,-2.0611e-05,2.33939e-07,0.412741,0.00286624,-1.99092e-05,2.36342e-07,0.415587,0.00282713,-1.92001e-05,1.916e-07,0.418396,0.00278931,-1.86253e-05,2.1915e-07,0.421167,0.00275271,-1.79679e-05,1.83498e-07,0.423901,0.00271733,-1.74174e-05,1.79343e-07,0.426602,0.00268303,-1.68794e-05,1.72013e-07,0.429268,0.00264979,-1.63633e-05,1.75686e-07,0.431901,0.00261759,-1.58363e-05,1.3852e-07,0.434503,0.00258633,-1.54207e-05,1.64304e-07,0.437074,0.00255598,-1.49278e-05,1.28136e-07,0.439616,0.00252651,-1.45434e-05,1.57618e-07,0.442128,0.0024979,-1.40705e-05,1.0566e-07,0.444612,0.00247007,-1.37535e-05,1.34998e-07,0.447068,0.00244297,-1.33485e-05,1.29207e-07,0.449498,0.00241666,-1.29609e-05,9.32347e-08,0.451902,0.00239102,-1.26812e-05,1.23703e-07,0.45428,0.00236603,-1.23101e-05,9.74072e-08,0.456634,0.0023417,-1.20179e-05,1.12518e-07,0.458964,0.002318,-1.16803e-05,7.83681e-08,0.46127,0.00229488,-1.14452e-05,1.10452e-07,0.463554,0.00227232,-1.11139e-05,7.58719e-08,0.465815,0.00225032,-1.08863e-05,9.2699e-08,0.468055,0.00222882,-1.06082e-05,8.97738e-08,0.470273,0.00220788,-1.03388e-05,5.4845e-08,0.47247,0.00218736,-1.01743e-05,1.0808e-07,0.474648,0.00216734,-9.85007e-06,4.9277e-08,0.476805,0.00214779,-9.70224e-06,8.22408e-08,0.478943,0.00212863,-9.45551e-06,6.87942e-08,0.481063,0.00210993,-9.24913e-06,5.98144e-08,0.483163,0.00209161,-9.06969e-06,7.93789e-08,0.485246,0.00207371,-8.83155e-06,3.99032e-08,0.487311,0.00205616,-8.71184e-06,8.88325e-08,0.489358,0.002039,-8.44534e-06,2.20004e-08,0.491389,0.00202218,-8.37934e-06,9.13872e-08,0.493403,0.0020057,-8.10518e-06,2.96829e-08,0.495401,0.00198957,-8.01613e-06,5.81028e-08,0.497382,0.00197372,-7.84183e-06,6.5731e-08,0.499348,0.00195823,-7.64463e-06,3.66019e-08,0.501299,0.00194305,-7.53483e-06,2.62811e-08,0.503234,0.00192806,-7.45598e-06,9.66907e-08,0.505155,0.00191344,-7.16591e-06,4.18928e-09,0.507061,0.00189912,-7.15334e-06,6.53665e-08,0.508953,0.00188501,-6.95724e-06,3.23686e-08,0.510831,0.00187119,-6.86014e-06,4.35774e-08,0.512696,0.0018576,-6.72941e-06,3.17406e-08,0.514547,0.00184424,-6.63418e-06,6.78785e-08,0.516384,0.00183117,-6.43055e-06,-5.23126e-09,0.518209,0.0018183,-6.44624e-06,7.22562e-08,0.520021,0.00180562,-6.22947e-06,1.42292e-08,0.52182,0.0017932,-6.18679e-06,4.9641e-08,0.523607,0.00178098,-6.03786e-06,2.56259e-08,0.525382,0.00176898,-5.96099e-06,2.66696e-08,0.527145,0.00175714,-5.88098e-06,4.65094e-08,0.528897,0.00174552,-5.74145e-06,2.57114e-08,0.530637,0.00173411,-5.66431e-06,2.94588e-08,0.532365,0.00172287,-5.57594e-06,3.52667e-08,0.534082,0.00171182,-5.47014e-06,8.28868e-09,0.535789,0.00170091,-5.44527e-06,5.07871e-08,0.537484,0.00169017,-5.29291e-06,2.69817e-08,0.539169,0.00167967,-5.21197e-06,2.01009e-08,0.540844,0.0016693,-5.15166e-06,1.18237e-08,0.542508,0.00165903,-5.11619e-06,5.18135e-08,0.544162,0.00164896,-4.96075e-06,1.9341e-08,0.545806,0.00163909,-4.90273e-06,-9.96867e-09,0.54744,0.00162926,-4.93263e-06,8.01382e-08,0.549064,0.00161963,-4.69222e-06,-1.25601e-08,0.550679,0.00161021,-4.7299e-06,2.97067e-08,0.552285,0.00160084,-4.64078e-06,1.29426e-08,0.553881,0.0015916,-4.60195e-06,3.77327e-08,0.555468,0.00158251,-4.48875e-06,1.49412e-08,0.557046,0.00157357,-4.44393e-06,2.17118e-08,0.558615,0.00156475,-4.3788e-06,1.74206e-08,0.560176,0.00155605,-4.32653e-06,2.78152e-08,0.561727,0.00154748,-4.24309e-06,-9.47239e-09,0.563271,0.00153896,-4.27151e-06,6.9679e-08,0.564805,0.00153063,-4.06247e-06,-3.08246e-08,0.566332,0.00152241,-4.15494e-06,5.36188e-08,0.56785,0.00151426,-3.99409e-06,-4.83594e-09,0.56936,0.00150626,-4.00859e-06,2.53293e-08,0.570863,0.00149832,-3.93261e-06,2.27286e-08,0.572357,0.00149052,-3.86442e-06,2.96541e-09,0.573844,0.0014828,-3.85552e-06,2.50147e-08,0.575323,0.00147516,-3.78048e-06,1.61842e-08,0.576794,0.00146765,-3.73193e-06,2.94582e-08,0.578258,0.00146028,-3.64355e-06,-1.48076e-08,0.579715,0.00145295,-3.68798e-06,2.97724e-08,0.581164,0.00144566,-3.59866e-06,1.49272e-08,0.582606,0.00143851,-3.55388e-06,2.97285e-08,0.584041,0.00143149,-3.46469e-06,-1.46323e-08,0.585469,0.00142451,-3.50859e-06,2.88004e-08,0.58689,0.00141758,-3.42219e-06,1.864e-08,0.588304,0.00141079,-3.36627e-06,1.58482e-08,0.589712,0.00140411,-3.31872e-06,-2.24279e-08,0.591112,0.00139741,-3.38601e-06,7.38639e-08,0.592507,0.00139085,-3.16441e-06,-3.46088e-08,0.593894,0.00138442,-3.26824e-06,4.96675e-09,0.595275,0.0013779,-3.25334e-06,7.4346e-08,0.59665,0.00137162,-3.0303e-06,-6.39319e-08,0.598019,0.00136536,-3.2221e-06,6.21725e-08,0.599381,0.00135911,-3.03558e-06,-5.94423e-09,0.600737,0.00135302,-3.05341e-06,2.12091e-08,0.602087,0.00134697,-2.98979e-06,-1.92876e-08,0.603431,0.00134094,-3.04765e-06,5.5941e-08,0.604769,0.00133501,-2.87983e-06,-2.56622e-08,0.606101,0.00132917,-2.95681e-06,4.67078e-08,0.607427,0.0013234,-2.81669e-06,-4.19592e-08,0.608748,0.00131764,-2.94257e-06,6.15243e-08,0.610062,0.00131194,-2.75799e-06,-2.53244e-08,0.611372,0.00130635,-2.83397e-06,3.97739e-08,0.612675,0.0013008,-2.71465e-06,-1.45618e-08,0.613973,0.00129533,-2.75833e-06,1.84733e-08,0.615266,0.00128986,-2.70291e-06,2.73606e-10,0.616553,0.00128446,-2.70209e-06,4.00367e-08,0.617835,0.00127918,-2.58198e-06,-4.12113e-08,0.619111,0.00127389,-2.70561e-06,6.52039e-08,0.620383,0.00126867,-2.51e-06,-4.07901e-08,0.621649,0.00126353,-2.63237e-06,3.83516e-08,0.62291,0.00125838,-2.51732e-06,6.59315e-09,0.624166,0.00125337,-2.49754e-06,-5.11939e-09,0.625416,0.00124836,-2.5129e-06,1.38846e-08,0.626662,0.00124337,-2.47124e-06,9.18514e-09,0.627903,0.00123846,-2.44369e-06,8.97952e-09,0.629139,0.0012336,-2.41675e-06,1.45012e-08,0.63037,0.00122881,-2.37325e-06,-7.37949e-09,0.631597,0.00122404,-2.39538e-06,1.50169e-08,0.632818,0.00121929,-2.35033e-06,6.91648e-09,0.634035,0.00121461,-2.32958e-06,1.69219e-08,0.635248,0.00121,-2.27882e-06,-1.49997e-08,0.636455,0.0012054,-2.32382e-06,4.30769e-08,0.637659,0.00120088,-2.19459e-06,-3.80986e-08,0.638857,0.00119638,-2.30888e-06,4.97134e-08,0.640051,0.00119191,-2.15974e-06,-4.15463e-08,0.641241,0.00118747,-2.28438e-06,5.68667e-08,0.642426,0.00118307,-2.11378e-06,-7.10641e-09,0.643607,0.00117882,-2.1351e-06,-2.8441e-08,0.644784,0.00117446,-2.22042e-06,6.12658e-08,0.645956,0.00117021,-2.03663e-06,-3.78083e-08,0.647124,0.00116602,-2.15005e-06,3.03627e-08,0.648288,0.00116181,-2.05896e-06,-2.40379e-08,0.649448,0.00115762,-2.13108e-06,6.57887e-08,0.650603,0.00115356,-1.93371e-06,-6.03028e-08,0.651755,0.00114951,-2.11462e-06,5.62134e-08,0.652902,0.00114545,-1.94598e-06,-4.53417e-08,0.654046,0.00114142,-2.082e-06,6.55489e-08,0.655185,0.00113745,-1.88536e-06,-3.80396e-08,0.656321,0.00113357,-1.99948e-06,2.70049e-08,0.657452,0.00112965,-1.91846e-06,-1.03755e-08,0.65858,0.00112578,-1.94959e-06,1.44973e-08,0.659704,0.00112192,-1.9061e-06,1.1991e-08,0.660824,0.00111815,-1.87012e-06,-2.85634e-09,0.66194,0.0011144,-1.87869e-06,-5.65782e-10,0.663053,0.00111064,-1.88039e-06,5.11947e-09,0.664162,0.0011069,-1.86503e-06,3.96924e-08,0.665267,0.00110328,-1.74595e-06,-4.46795e-08,0.666368,0.00109966,-1.87999e-06,1.98161e-08,0.667466,0.00109596,-1.82054e-06,2.502e-08,0.66856,0.00109239,-1.74548e-06,-6.86593e-10,0.669651,0.0010889,-1.74754e-06,-2.22739e-08,0.670738,0.00108534,-1.81437e-06,3.01776e-08,0.671821,0.0010818,-1.72383e-06,2.07732e-08,0.672902,0.00107841,-1.66151e-06,-5.36658e-08,0.673978,0.00107493,-1.82251e-06,7.46802e-08,0.675051,0.00107151,-1.59847e-06,-6.62411e-08,0.676121,0.00106811,-1.79719e-06,7.10748e-08,0.677188,0.00106473,-1.58397e-06,-3.92441e-08,0.678251,0.00106145,-1.7017e-06,2.62973e-08,0.679311,0.00105812,-1.62281e-06,-6.34035e-09,0.680367,0.00105486,-1.64183e-06,-9.36249e-10,0.68142,0.00105157,-1.64464e-06,1.00854e-08,0.68247,0.00104831,-1.61438e-06,2.01995e-08,0.683517,0.00104514,-1.55378e-06,-3.1279e-08,0.68456,0.00104194,-1.64762e-06,4.53114e-08,0.685601,0.00103878,-1.51169e-06,-3.07573e-08,0.686638,0.00103567,-1.60396e-06,1.81133e-08,0.687672,0.00103251,-1.54962e-06,1.79085e-08,0.688703,0.00102947,-1.49589e-06,-3.01428e-08,0.689731,0.00102639,-1.58632e-06,4.30583e-08,0.690756,0.00102334,-1.45715e-06,-2.28814e-08,0.691778,0.00102036,-1.52579e-06,-1.11373e-08,0.692797,0.00101727,-1.5592e-06,6.74305e-08,0.693812,0.00101436,-1.35691e-06,-7.97709e-08,0.694825,0.0010114,-1.59622e-06,7.28391e-08,0.695835,0.00100843,-1.37771e-06,-3.27715e-08,0.696842,0.00100558,-1.47602e-06,-1.35807e-09,0.697846,0.00100262,-1.48009e-06,3.82037e-08,0.698847,0.000999775,-1.36548e-06,-3.22474e-08,0.699846,0.000996948,-1.46223e-06,3.11809e-08,0.700841,0.000994117,-1.36868e-06,-3.28714e-08,0.701834,0.000991281,-1.4673e-06,4.07001e-08,0.702824,0.000988468,-1.3452e-06,-1.07197e-08,0.703811,0.000985746,-1.37736e-06,2.17866e-09,0.704795,0.000982998,-1.37082e-06,2.00521e-09,0.705777,0.000980262,-1.3648e-06,-1.01996e-08,0.706756,0.000977502,-1.3954e-06,3.87931e-08,0.707732,0.000974827,-1.27902e-06,-2.57632e-08,0.708706,0.000972192,-1.35631e-06,4.65513e-09,0.709676,0.000969493,-1.34235e-06,7.14257e-09,0.710645,0.00096683,-1.32092e-06,2.63791e-08,0.71161,0.000964267,-1.24178e-06,-5.30543e-08,0.712573,0.000961625,-1.40095e-06,6.66289e-08,0.713533,0.000959023,-1.20106e-06,-3.46474e-08,0.714491,0.000956517,-1.305e-06,1.23559e-08,0.715446,0.000953944,-1.26793e-06,-1.47763e-08,0.716399,0.000951364,-1.31226e-06,4.67494e-08,0.717349,0.000948879,-1.17201e-06,-5.3012e-08,0.718297,0.000946376,-1.33105e-06,4.60894e-08,0.719242,0.000943852,-1.19278e-06,-1.21366e-08,0.720185,0.00094143,-1.22919e-06,2.45673e-09,0.721125,0.000938979,-1.22182e-06,2.30966e-09,0.722063,0.000936543,-1.21489e-06,-1.16954e-08,0.722998,0.000934078,-1.24998e-06,4.44718e-08,0.723931,0.000931711,-1.11656e-06,-4.69823e-08,0.724861,0.000929337,-1.25751e-06,2.4248e-08,0.725789,0.000926895,-1.18477e-06,9.5949e-09,0.726715,0.000924554,-1.15598e-06,-3.02286e-09,0.727638,0.000922233,-1.16505e-06,2.49649e-09,0.72856,0.00091991,-1.15756e-06,-6.96321e-09,0.729478,0.000917575,-1.17845e-06,2.53564e-08,0.730395,0.000915294,-1.10238e-06,-3.48578e-08,0.731309,0.000912984,-1.20695e-06,5.44704e-08,0.732221,0.000910734,-1.04354e-06,-6.38144e-08,0.73313,0.000908455,-1.23499e-06,8.15781e-08,0.734038,0.00090623,-9.90253e-07,-8.3684e-08,0.734943,0.000903999,-1.2413e-06,7.43441e-08,0.735846,0.000901739,-1.01827e-06,-3.48787e-08,0.736746,0.000899598,-1.12291e-06,5.56596e-09,0.737645,0.000897369,-1.10621e-06,1.26148e-08,0.738541,0.000895194,-1.06837e-06,3.57935e-09,0.739435,0.000893068,-1.05763e-06,-2.69322e-08,0.740327,0.000890872,-1.13842e-06,4.45448e-08,0.741217,0.000888729,-1.00479e-06,-3.20376e-08,0.742105,0.000886623,-1.1009e-06,2.40011e-08,0.74299,0.000884493,-1.0289e-06,-4.36209e-09,0.743874,0.000882422,-1.04199e-06,-6.55268e-09,0.744755,0.000880319,-1.06164e-06,3.05728e-08,0.745634,0.000878287,-9.69926e-07,-5.61338e-08,0.746512,0.000876179,-1.13833e-06,7.4753e-08,0.747387,0.000874127,-9.14068e-07,-6.40644e-08,0.74826,0.000872106,-1.10626e-06,6.22955e-08,0.749131,0.000870081,-9.19375e-07,-6.59083e-08,0.75,0.000868044,-1.1171e-06,8.21284e-08,0.750867,0.000866056,-8.70714e-07,-8.37915e-08,0.751732,0.000864064,-1.12209e-06,7.42237e-08,0.752595,0.000862042,-8.99418e-07,-3.42894e-08,0.753456,0.00086014,-1.00229e-06,3.32955e-09,0.754315,0.000858146,-9.92297e-07,2.09712e-08,0.755173,0.000856224,-9.29384e-07,-2.76096e-08,0.756028,0.000854282,-1.01221e-06,2.98627e-08,0.756881,0.000852348,-9.22625e-07,-3.22365e-08,0.757733,0.000850406,-1.01933e-06,3.94786e-08,0.758582,0.000848485,-9.00898e-07,-6.46833e-09,0.75943,0.000846664,-9.20303e-07,-1.36052e-08,0.760275,0.000844783,-9.61119e-07,1.28447e-09,0.761119,0.000842864,-9.57266e-07,8.4674e-09,0.761961,0.000840975,-9.31864e-07,2.44506e-08,0.762801,0.000839185,-8.58512e-07,-4.6665e-08,0.763639,0.000837328,-9.98507e-07,4.30001e-08,0.764476,0.00083546,-8.69507e-07,-6.12609e-09,0.76531,0.000833703,-8.87885e-07,-1.84959e-08,0.766143,0.000831871,-9.43372e-07,2.05052e-08,0.766974,0.000830046,-8.81857e-07,-3.92026e-09,0.767803,0.000828271,-8.93618e-07,-4.82426e-09,0.768631,0.000826469,-9.0809e-07,2.32172e-08,0.769456,0.000824722,-8.38439e-07,-2.84401e-08,0.77028,0.00082296,-9.23759e-07,3.09386e-08,0.771102,0.000821205,-8.30943e-07,-3.57099e-08,0.771922,0.000819436,-9.38073e-07,5.22963e-08,0.772741,0.000817717,-7.81184e-07,-5.42658e-08,0.773558,0.000815992,-9.43981e-07,4.55579e-08,0.774373,0.000814241,-8.07308e-07,-8.75656e-09,0.775186,0.0008126,-8.33578e-07,-1.05315e-08,0.775998,0.000810901,-8.65172e-07,-8.72188e-09,0.776808,0.000809145,-8.91338e-07,4.54191e-08,0.777616,0.000807498,-7.5508e-07,-5.37454e-08,0.778423,0.000805827,-9.16317e-07,5.03532e-08,0.779228,0.000804145,-7.65257e-07,-2.84584e-08,0.780031,0.000802529,-8.50632e-07,3.87579e-09,0.780833,0.00080084,-8.39005e-07,1.29552e-08,0.781633,0.0007992,-8.00139e-07,3.90804e-09,0.782432,0.000797612,-7.88415e-07,-2.85874e-08,0.783228,0.000795949,-8.74177e-07,5.0837e-08,0.784023,0.000794353,-7.21666e-07,-5.55513e-08,0.784817,0.000792743,-8.8832e-07,5.21587e-08,0.785609,0.000791123,-7.31844e-07,-3.38744e-08,0.786399,0.000789558,-8.33467e-07,2.37342e-08,0.787188,0.000787962,-7.62264e-07,-1.45775e-09,0.787975,0.000786433,-7.66638e-07,-1.79034e-08,0.788761,0.000784846,-8.20348e-07,1.34665e-08,0.789545,0.000783246,-7.79948e-07,2.3642e-08,0.790327,0.000781757,-7.09022e-07,-4.84297e-08,0.791108,0.000780194,-8.54311e-07,5.08674e-08,0.791888,0.000778638,-7.01709e-07,-3.58303e-08,0.792666,0.000777127,-8.092e-07,3.28493e-08,0.793442,0.000775607,-7.10652e-07,-3.59624e-08,0.794217,0.000774078,-8.1854e-07,5.13959e-08,0.79499,0.000772595,-6.64352e-07,-5.04121e-08,0.795762,0.000771115,-8.15588e-07,3.10431e-08,0.796532,0.000769577,-7.22459e-07,-1.41557e-08,0.797301,0.00076809,-7.64926e-07,2.55795e-08,0.798069,0.000766636,-6.88187e-07,-2.85578e-08,0.798835,0.000765174,-7.73861e-07,2.90472e-08,0.799599,0.000763714,-6.86719e-07,-2.80262e-08,0.800362,0.000762256,-7.70798e-07,2.34531e-08,0.801123,0.000760785,-7.00438e-07,-6.18144e-09,0.801884,0.000759366,-7.18983e-07,1.27263e-09,0.802642,0.000757931,-7.15165e-07,1.09101e-09,0.803399,0.000756504,-7.11892e-07,-5.63675e-09,0.804155,0.000755064,-7.28802e-07,2.14559e-08,0.80491,0.00075367,-6.64434e-07,-2.05821e-08,0.805663,0.00075228,-7.26181e-07,1.26812e-09,0.806414,0.000750831,-7.22377e-07,1.55097e-08,0.807164,0.000749433,-6.75848e-07,-3.70216e-09,0.807913,0.00074807,-6.86954e-07,-7.0105e-10,0.80866,0.000746694,-6.89057e-07,6.5063e-09,0.809406,0.000745336,-6.69538e-07,-2.53242e-08,0.810151,0.000743921,-7.45511e-07,3.51858e-08,0.810894,0.000742535,-6.39953e-07,3.79034e-09,0.811636,0.000741267,-6.28582e-07,-5.03471e-08,0.812377,0.000739858,-7.79624e-07,7.83886e-08,0.813116,0.000738534,-5.44458e-07,-8.43935e-08,0.813854,0.000737192,-7.97638e-07,8.03714e-08,0.81459,0.000735838,-5.56524e-07,-5.82784e-08,0.815325,0.00073455,-7.31359e-07,3.35329e-08,0.816059,0.000733188,-6.3076e-07,-1.62486e-08,0.816792,0.000731878,-6.79506e-07,3.14614e-08,0.817523,0.000730613,-5.85122e-07,-4.99925e-08,0.818253,0.000729293,-7.35099e-07,4.92994e-08,0.818982,0.000727971,-5.87201e-07,-2.79959e-08,0.819709,0.000726712,-6.71189e-07,3.07959e-09,0.820435,0.000725379,-6.6195e-07,1.56777e-08,0.82116,0.000724102,-6.14917e-07,-6.18564e-09,0.821883,0.000722854,-6.33474e-07,9.06488e-09,0.822606,0.000721614,-6.06279e-07,-3.00739e-08,0.823327,0.000720311,-6.96501e-07,5.16262e-08,0.824046,0.000719073,-5.41623e-07,-5.72214e-08,0.824765,0.000717818,-7.13287e-07,5.80503e-08,0.825482,0.000716566,-5.39136e-07,-5.57703e-08,0.826198,0.00071532,-7.06447e-07,4.58215e-08,0.826912,0.000714045,-5.68983e-07,-8.30636e-09,0.827626,0.000712882,-5.93902e-07,-1.25961e-08,0.828338,0.000711656,-6.3169e-07,-9.13985e-10,0.829049,0.00071039,-6.34432e-07,1.62519e-08,0.829759,0.00070917,-5.85676e-07,-4.48904e-09,0.830468,0.000707985,-5.99143e-07,1.70418e-09,0.831175,0.000706792,-5.9403e-07,-2.32768e-09,0.831881,0.000705597,-6.01014e-07,7.60648e-09,0.832586,0.000704418,-5.78194e-07,-2.80982e-08,0.83329,0.000703177,-6.62489e-07,4.51817e-08,0.833993,0.000701988,-5.26944e-07,-3.34192e-08,0.834694,0.000700834,-6.27201e-07,2.88904e-08,0.835394,0.000699666,-5.4053e-07,-2.25378e-08,0.836093,0.000698517,-6.08143e-07,1.65589e-09,0.836791,0.000697306,-6.03176e-07,1.59142e-08,0.837488,0.000696147,-5.55433e-07,-5.70801e-09,0.838184,0.000695019,-5.72557e-07,6.91792e-09,0.838878,0.000693895,-5.51803e-07,-2.19637e-08,0.839571,0.000692725,-6.17694e-07,2.13321e-08,0.840263,0.000691554,-5.53698e-07,-3.75996e-09,0.840954,0.000690435,-5.64978e-07,-6.29219e-09,0.841644,0.000689287,-5.83855e-07,2.89287e-08,0.842333,0.000688206,-4.97068e-07,-4.98181e-08,0.843021,0.000687062,-6.46523e-07,5.11344e-08,0.843707,0.000685922,-4.9312e-07,-3.55102e-08,0.844393,0.00068483,-5.9965e-07,3.13019e-08,0.845077,0.000683724,-5.05745e-07,-3.00925e-08,0.84576,0.000682622,-5.96022e-07,2.94636e-08,0.846442,0.000681519,-5.07631e-07,-2.81572e-08,0.847123,0.000680419,-5.92103e-07,2.35606e-08,0.847803,0.000679306,-5.21421e-07,-6.48045e-09,0.848482,0.000678243,-5.40863e-07,2.36124e-09,0.849159,0.000677169,-5.33779e-07,-2.96461e-09,0.849836,0.000676092,-5.42673e-07,9.49728e-09,0.850512,0.000675035,-5.14181e-07,-3.50245e-08,0.851186,0.000673902,-6.19254e-07,7.09959e-08,0.851859,0.000672876,-4.06267e-07,-7.01453e-08,0.852532,0.000671853,-6.16703e-07,3.07714e-08,0.853203,0.000670712,-5.24388e-07,6.66423e-09,0.853873,0.000669684,-5.04396e-07,2.17629e-09,0.854542,0.000668681,-4.97867e-07,-1.53693e-08,0.855211,0.000667639,-5.43975e-07,-3.03752e-10,0.855878,0.000666551,-5.44886e-07,1.65844e-08,0.856544,0.000665511,-4.95133e-07,-6.42907e-09,0.857209,0.000664501,-5.1442e-07,9.13195e-09,0.857873,0.0006635,-4.87024e-07,-3.00987e-08,0.858536,0.000662435,-5.7732e-07,5.16584e-08,0.859198,0.000661436,-4.22345e-07,-5.73255e-08,0.859859,0.000660419,-5.94322e-07,5.84343e-08,0.860518,0.000659406,-4.19019e-07,-5.72022e-08,0.861177,0.000658396,-5.90626e-07,5.11653e-08,0.861835,0.000657368,-4.3713e-07,-2.82495e-08,0.862492,0.000656409,-5.21878e-07,2.22788e-09,0.863148,0.000655372,-5.15195e-07,1.9338e-08,0.863803,0.0006544,-4.5718e-07,-1.99754e-08,0.864457,0.000653425,-5.17107e-07,9.59024e-10,0.86511,0.000652394,-5.1423e-07,1.61393e-08,0.865762,0.000651414,-4.65812e-07,-5.91149e-09,0.866413,0.000650465,-4.83546e-07,7.50665e-09,0.867063,0.00064952,-4.61026e-07,-2.4115e-08,0.867712,0.000648526,-5.33371e-07,2.93486e-08,0.86836,0.000647547,-4.45325e-07,-3.36748e-08,0.869007,0.000646555,-5.4635e-07,4.57461e-08,0.869653,0.0006456,-4.09112e-07,-3.01002e-08,0.870298,0.000644691,-4.99412e-07,1.50501e-08,0.870942,0.000643738,-4.54262e-07,-3.01002e-08,0.871585,0.000642739,-5.44563e-07,4.57461e-08,0.872228,0.000641787,-4.07324e-07,-3.36748e-08,0.872869,0.000640871,-5.08349e-07,2.93486e-08,0.873509,0.000639943,-4.20303e-07,-2.4115e-08,0.874149,0.00063903,-4.92648e-07,7.50655e-09,0.874787,0.000638067,-4.70128e-07,-5.91126e-09,0.875425,0.000637109,-4.87862e-07,1.61385e-08,0.876062,0.000636182,-4.39447e-07,9.61961e-10,0.876697,0.000635306,-4.36561e-07,-1.99863e-08,0.877332,0.000634373,-4.9652e-07,1.93785e-08,0.877966,0.000633438,-4.38384e-07,2.07697e-09,0.878599,0.000632567,-4.32153e-07,-2.76864e-08,0.879231,0.00063162,-5.15212e-07,4.90641e-08,0.879862,0.000630737,-3.6802e-07,-4.93606e-08,0.880493,0.000629852,-5.16102e-07,2.9169e-08,0.881122,0.000628908,-4.28595e-07,-7.71083e-09,0.881751,0.000628027,-4.51727e-07,1.6744e-09,0.882378,0.000627129,-4.46704e-07,1.01317e-09,0.883005,0.000626239,-4.43665e-07,-5.72703e-09,0.883631,0.000625334,-4.60846e-07,2.1895e-08,0.884255,0.000624478,-3.95161e-07,-2.22481e-08,0.88488,0.000623621,-4.61905e-07,7.4928e-09,0.885503,0.00062272,-4.39427e-07,-7.72306e-09,0.886125,0.000621818,-4.62596e-07,2.33995e-08,0.886746,0.000620963,-3.92398e-07,-2.62704e-08,0.887367,0.000620099,-4.71209e-07,2.20775e-08,0.887987,0.000619223,-4.04976e-07,-2.43496e-09,0.888605,0.000618406,-4.12281e-07,-1.23377e-08,0.889223,0.000617544,-4.49294e-07,-7.81876e-09,0.88984,0.000616622,-4.72751e-07,4.36128e-08,0.890457,0.000615807,-3.41912e-07,-4.7423e-08,0.891072,0.000614981,-4.84181e-07,2.68698e-08,0.891687,0.000614093,-4.03572e-07,-4.51384e-10,0.8923,0.000613285,-4.04926e-07,-2.50643e-08,0.892913,0.0006124,-4.80119e-07,4.11038e-08,0.893525,0.000611563,-3.56808e-07,-2.01414e-08,0.894136,0.000610789,-4.17232e-07,-2.01426e-08,0.894747,0.000609894,-4.7766e-07,4.11073e-08,0.895356,0.000609062,-3.54338e-07,-2.50773e-08,0.895965,0.000608278,-4.2957e-07,-4.02954e-10,0.896573,0.000607418,-4.30779e-07,2.66891e-08,0.89718,0.000606636,-3.50711e-07,-4.67489e-08,0.897786,0.000605795,-4.90958e-07,4.10972e-08,0.898391,0.000604936,-3.67666e-07,1.56948e-09,0.898996,0.000604205,-3.62958e-07,-4.73751e-08,0.8996,0.000603337,-5.05083e-07,6.87214e-08,0.900202,0.000602533,-2.98919e-07,-4.86966e-08,0.900805,0.000601789,-4.45009e-07,6.85589e-09,0.901406,0.00060092,-4.24441e-07,2.1273e-08,0.902007,0.000600135,-3.60622e-07,-3.23434e-08,0.902606,0.000599317,-4.57652e-07,4.84959e-08,0.903205,0.000598547,-3.12164e-07,-4.24309e-08,0.903803,0.000597795,-4.39457e-07,2.01844e-09,0.904401,0.000596922,-4.33402e-07,3.43571e-08,0.904997,0.000596159,-3.30331e-07,-2.02374e-08,0.905593,0.000595437,-3.91043e-07,-1.30123e-08,0.906188,0.000594616,-4.3008e-07,1.26819e-08,0.906782,0.000593794,-3.92034e-07,2.18894e-08,0.907376,0.000593076,-3.26366e-07,-4.06349e-08,0.907968,0.000592301,-4.4827e-07,2.1441e-08,0.90856,0.000591469,-3.83947e-07,1.44754e-08,0.909151,0.000590744,-3.40521e-07,-1.97379e-08,0.909742,0.000590004,-3.99735e-07,4.87161e-09,0.910331,0.000589219,-3.8512e-07,2.51532e-10,0.91092,0.00058845,-3.84366e-07,-5.87776e-09,0.911508,0.000587663,-4.01999e-07,2.32595e-08,0.912096,0.000586929,-3.3222e-07,-2.75554e-08,0.912682,0.000586182,-4.14887e-07,2.73573e-08,0.913268,0.000585434,-3.32815e-07,-2.22692e-08,0.913853,0.000584702,-3.99622e-07,2.11486e-09,0.914437,0.000583909,-3.93278e-07,1.38098e-08,0.915021,0.000583164,-3.51848e-07,2.25042e-09,0.915604,0.000582467,-3.45097e-07,-2.28115e-08,0.916186,0.000581708,-4.13531e-07,2.93911e-08,0.916767,0.000580969,-3.25358e-07,-3.51481e-08,0.917348,0.000580213,-4.30803e-07,5.15967e-08,0.917928,0.000579506,-2.76012e-07,-5.20296e-08,0.918507,0.000578798,-4.32101e-07,3.73124e-08,0.919085,0.000578046,-3.20164e-07,-3.76154e-08,0.919663,0.000577293,-4.3301e-07,5.35447e-08,0.92024,0.000576587,-2.72376e-07,-5.7354e-08,0.920816,0.000575871,-4.44438e-07,5.66621e-08,0.921391,0.000575152,-2.74452e-07,-5.00851e-08,0.921966,0.000574453,-4.24707e-07,2.4469e-08,0.92254,0.000573677,-3.513e-07,1.18138e-08,0.923114,0.000573009,-3.15859e-07,-1.21195e-08,0.923686,0.000572341,-3.52217e-07,-2.29403e-08,0.924258,0.000571568,-4.21038e-07,4.4276e-08,0.924829,0.000570859,-2.8821e-07,-3.49546e-08,0.9254,0.000570178,-3.93074e-07,3.59377e-08,0.92597,0.000569499,-2.85261e-07,-4.91915e-08,0.926539,0.000568781,-4.32835e-07,4.16189e-08,0.927107,0.00056804,-3.07979e-07,1.92523e-09,0.927675,0.00056743,-3.02203e-07,-4.93198e-08,0.928242,0.000566678,-4.50162e-07,7.61447e-08,0.928809,0.000566006,-2.21728e-07,-7.6445e-08,0.929374,0.000565333,-4.51063e-07,5.08216e-08,0.929939,0.000564583,-2.98599e-07,-7.63212e-09,0.930503,0.000563963,-3.21495e-07,-2.02931e-08,0.931067,0.000563259,-3.82374e-07,2.92001e-08,0.93163,0.000562582,-2.94774e-07,-3.69025e-08,0.932192,0.000561882,-4.05482e-07,5.88053e-08,0.932754,0.000561247,-2.29066e-07,-7.91094e-08,0.933315,0.000560552,-4.66394e-07,7.88184e-08,0.933875,0.000559856,-2.29939e-07,-5.73501e-08,0.934434,0.000559224,-4.01989e-07,3.13727e-08,0.934993,0.000558514,-3.07871e-07,-8.53611e-09,0.935551,0.000557873,-3.33479e-07,2.77175e-09,0.936109,0.000557214,-3.25164e-07,-2.55091e-09,0.936666,0.000556556,-3.32817e-07,7.43188e-09,0.937222,0.000555913,-3.10521e-07,-2.71766e-08,0.937778,0.00055521,-3.92051e-07,4.167e-08,0.938333,0.000554551,-2.67041e-07,-2.02941e-08,0.938887,0.000553956,-3.27923e-07,-2.00984e-08,0.93944,0.00055324,-3.88218e-07,4.10828e-08,0.939993,0.000552587,-2.6497e-07,-2.50237e-08,0.940546,0.000551982,-3.40041e-07,-5.92583e-10,0.941097,0.0005513,-3.41819e-07,2.7394e-08,0.941648,0.000550698,-2.59637e-07,-4.93788e-08,0.942199,0.000550031,-4.07773e-07,5.09119e-08,0.942748,0.000549368,-2.55038e-07,-3.50595e-08,0.943297,0.000548753,-3.60216e-07,2.97214e-08,0.943846,0.000548122,-2.71052e-07,-2.42215e-08,0.944394,0.000547507,-3.43716e-07,7.55985e-09,0.944941,0.000546842,-3.21037e-07,-6.01796e-09,0.945487,0.000546182,-3.3909e-07,1.65119e-08,0.946033,0.000545553,-2.89555e-07,-4.2498e-10,0.946578,0.000544973,-2.9083e-07,-1.4812e-08,0.947123,0.000544347,-3.35266e-07,6.83068e-11,0.947667,0.000543676,-3.35061e-07,1.45388e-08,0.94821,0.00054305,-2.91444e-07,1.38123e-09,0.948753,0.000542471,-2.87301e-07,-2.00637e-08,0.949295,0.000541836,-3.47492e-07,1.92688e-08,0.949837,0.000541199,-2.89685e-07,2.59298e-09,0.950378,0.000540628,-2.81906e-07,-2.96407e-08,0.950918,0.000539975,-3.70829e-07,5.63652e-08,0.951458,0.000539402,-2.01733e-07,-7.66107e-08,0.951997,0.000538769,-4.31565e-07,7.12638e-08,0.952535,0.00053812,-2.17774e-07,-2.96305e-08,0.953073,0.000537595,-3.06665e-07,-1.23464e-08,0.95361,0.000536945,-3.43704e-07,1.94114e-08,0.954147,0.000536316,-2.8547e-07,-5.69451e-09,0.954683,0.000535728,-3.02554e-07,3.36666e-09,0.955219,0.000535133,-2.92454e-07,-7.77208e-09,0.955753,0.000534525,-3.1577e-07,2.77216e-08,0.956288,0.000533976,-2.32605e-07,-4.35097e-08,0.956821,0.00053338,-3.63134e-07,2.7108e-08,0.957354,0.000532735,-2.8181e-07,-5.31772e-09,0.957887,0.000532156,-2.97764e-07,-5.83718e-09,0.958419,0.000531543,-3.15275e-07,2.86664e-08,0.95895,0.000530998,-2.29276e-07,-4.9224e-08,0.959481,0.000530392,-3.76948e-07,4.90201e-08,0.960011,0.000529785,-2.29887e-07,-2.76471e-08,0.96054,0.000529243,-3.12829e-07,1.96385e-09,0.961069,0.000528623,-3.06937e-07,1.97917e-08,0.961598,0.000528068,-2.47562e-07,-2.15261e-08,0.962125,0.000527508,-3.1214e-07,6.70795e-09,0.962653,0.000526904,-2.92016e-07,-5.30573e-09,0.963179,0.000526304,-3.07934e-07,1.4515e-08,0.963705,0.000525732,-2.64389e-07,6.85048e-09,0.964231,0.000525224,-2.43837e-07,-4.19169e-08,0.964756,0.00052461,-3.69588e-07,4.1608e-08,0.96528,0.000523996,-2.44764e-07,-5.30598e-09,0.965804,0.000523491,-2.60682e-07,-2.03841e-08,0.966327,0.000522908,-3.21834e-07,2.72378e-08,0.966849,0.000522346,-2.40121e-07,-2.89625e-08,0.967371,0.000521779,-3.27008e-07,2.90075e-08,0.967893,0.000521212,-2.39986e-07,-2.74629e-08,0.968414,0.00052065,-3.22374e-07,2.12396e-08,0.968934,0.000520069,-2.58656e-07,2.10922e-09,0.969454,0.000519558,-2.52328e-07,-2.96765e-08,0.969973,0.000518964,-3.41357e-07,5.6992e-08,0.970492,0.000518452,-1.70382e-07,-7.90821e-08,0.97101,0.000517874,-4.07628e-07,8.05224e-08,0.971528,0.000517301,-1.66061e-07,-6.41937e-08,0.972045,0.000516776,-3.58642e-07,5.70429e-08,0.972561,0.00051623,-1.87513e-07,-4.47686e-08,0.973077,0.00051572,-3.21819e-07,2.82237e-09,0.973593,0.000515085,-3.13352e-07,3.34792e-08,0.974108,0.000514559,-2.12914e-07,-1.75298e-08,0.974622,0.000514081,-2.65503e-07,-2.29648e-08,0.975136,0.000513481,-3.34398e-07,4.97843e-08,0.975649,0.000512961,-1.85045e-07,-5.6963e-08,0.976162,0.00051242,-3.55934e-07,5.88585e-08,0.976674,0.000511885,-1.79359e-07,-5.92616e-08,0.977185,0.000511348,-3.57143e-07,5.89785e-08,0.977696,0.000510811,-1.80208e-07,-5.74433e-08,0.978207,0.000510278,-3.52538e-07,5.15854e-08,0.978717,0.000509728,-1.97781e-07,-2.9689e-08,0.979226,0.000509243,-2.86848e-07,7.56591e-09,0.979735,0.000508692,-2.64151e-07,-5.74649e-10,0.980244,0.000508162,-2.65875e-07,-5.26732e-09,0.980752,0.000507615,-2.81677e-07,2.16439e-08,0.981259,0.000507116,-2.16745e-07,-2.17037e-08,0.981766,0.000506618,-2.81856e-07,5.56636e-09,0.982272,0.000506071,-2.65157e-07,-5.61689e-10,0.982778,0.000505539,-2.66842e-07,-3.31963e-09,0.983283,0.000504995,-2.76801e-07,1.38402e-08,0.983788,0.000504483,-2.3528e-07,7.56339e-09,0.984292,0.000504035,-2.1259e-07,-4.40938e-08,0.984796,0.000503478,-3.44871e-07,4.96026e-08,0.985299,0.000502937,-1.96064e-07,-3.51071e-08,0.985802,0.000502439,-3.01385e-07,3.12212e-08,0.986304,0.00050193,-2.07721e-07,-3.0173e-08,0.986806,0.000501424,-2.9824e-07,2.9866e-08,0.987307,0.000500917,-2.08642e-07,-2.96865e-08,0.987808,0.000500411,-2.97702e-07,2.92753e-08,0.988308,0.000499903,-2.09876e-07,-2.78101e-08,0.988807,0.0004994,-2.93306e-07,2.23604e-08,0.989307,0.000498881,-2.26225e-07,-2.02681e-09,0.989805,0.000498422,-2.32305e-07,-1.42531e-08,0.990303,0.000497915,-2.75065e-07,-5.65232e-10,0.990801,0.000497363,-2.76761e-07,1.65141e-08,0.991298,0.000496859,-2.27218e-07,-5.88639e-09,0.991795,0.000496387,-2.44878e-07,7.0315e-09,0.992291,0.000495918,-2.23783e-07,-2.22396e-08,0.992787,0.000495404,-2.90502e-07,2.23224e-08,0.993282,0.00049489,-2.23535e-07,-7.44543e-09,0.993776,0.000494421,-2.45871e-07,7.45924e-09,0.994271,0.000493951,-2.23493e-07,-2.23915e-08,0.994764,0.000493437,-2.90668e-07,2.25021e-08,0.995257,0.000492923,-2.23161e-07,-8.01218e-09,0.99575,0.000492453,-2.47198e-07,9.54669e-09,0.996242,0.000491987,-2.18558e-07,-3.01746e-08,0.996734,0.000491459,-3.09082e-07,5.1547e-08,0.997225,0.000490996,-1.54441e-07,-5.68039e-08,0.997716,0.000490517,-3.24853e-07,5.64594e-08,0.998206,0.000490036,-1.55474e-07,-4.98245e-08,0.998696,0.000489576,-3.04948e-07,2.36292e-08,0.999186,0.000489037,-2.3406e-07,1.49121e-08,0.999674,0.000488613,-1.89324e-07,-2.3673e-08,1.00016,0.000488164,-2.60343e-07,2.01754e-08,1.00065,0.000487704,-1.99816e-07,-5.70288e-08,1.00114,0.000487133,-3.70903e-07,8.87303e-08,1.00162,0.000486657,-1.04712e-07,-5.94737e-08,1.00211,0.000486269,-2.83133e-07,2.99553e-08,1.0026,0.000485793,-1.93267e-07,-6.03474e-08,1.00308,0.000485225,-3.74309e-07,9.2225e-08,1.00357,0.000484754,-9.76345e-08,-7.0134e-08,1.00405,0.000484348,-3.08036e-07,6.91016e-08,1.00454,0.000483939,-1.00731e-07,-8.70633e-08,1.00502,0.000483476,-3.61921e-07,4.07328e-08,1.0055,0.000482875,-2.39723e-07,4.33413e-08,1.00599,0.000482525,-1.09699e-07,-9.48886e-08,1.00647,0.000482021,-3.94365e-07,9.77947e-08,1.00695,0.000481526,-1.00981e-07,-5.78713e-08,1.00743,0.00048115,-2.74595e-07,1.44814e-08,1.00791,0.000480645,-2.31151e-07,-5.42665e-11,1.00839,0.000480182,-2.31314e-07,-1.42643e-08,1.00887,0.000479677,-2.74106e-07,5.71115e-08,1.00935,0.0004793,-1.02772e-07,-9.49724e-08,1.00983,0.000478809,-3.87689e-07,8.43596e-08,1.01031,0.000478287,-1.3461e-07,-4.04755e-09,1.01079,0.000478006,-1.46753e-07,-6.81694e-08,1.01127,0.000477508,-3.51261e-07,3.83067e-08,1.01174,0.00047692,-2.36341e-07,3.41521e-08,1.01222,0.00047655,-1.33885e-07,-5.57058e-08,1.0127,0.000476115,-3.01002e-07,6.94616e-08,1.01317,0.000475721,-9.26174e-08,-1.02931e-07,1.01365,0.000475227,-4.01412e-07,1.03846e-07,1.01412,0.000474736,-8.98751e-08,-7.40321e-08,1.0146,0.000474334,-3.11971e-07,7.30735e-08,1.01507,0.00047393,-9.27508e-08,-9.90527e-08,1.01554,0.000473447,-3.89909e-07,8.47188e-08,1.01602,0.000472921,-1.35753e-07,-1.40381e-09,1.01649,0.000472645,-1.39964e-07,-7.91035e-08,1.01696,0.000472128,-3.77275e-07,7.93993e-08,1.01744,0.000471612,-1.39077e-07,-7.52607e-11,1.01791,0.000471334,-1.39302e-07,-7.90983e-08,1.01838,0.000470818,-3.76597e-07,7.80499e-08,1.01885,0.000470299,-1.42448e-07,5.31733e-09,1.01932,0.00047003,-1.26496e-07,-9.93193e-08,1.01979,0.000469479,-4.24453e-07,1.53541e-07,1.02026,0.00046909,3.617e-08,-1.57217e-07,1.02073,0.000468691,-4.35482e-07,1.177e-07,1.02119,0.000468173,-8.23808e-08,-7.51659e-08,1.02166,0.000467783,-3.07878e-07,6.37538e-08,1.02213,0.000467358,-1.16617e-07,-6.064e-08,1.0226,0.000466943,-2.98537e-07,5.9597e-08,1.02306,0.000466525,-1.19746e-07,-5.85386e-08,1.02353,0.00046611,-2.95362e-07,5.53482e-08,1.024,0.000465685,-1.29317e-07,-4.36449e-08,1.02446,0.000465296,-2.60252e-07,2.20268e-11,1.02493,0.000464775,-2.60186e-07,4.35568e-08,1.02539,0.000464386,-1.29516e-07,-5.50398e-08,1.02586,0.000463961,-2.94635e-07,5.73932e-08,1.02632,0.000463544,-1.22456e-07,-5.53236e-08,1.02678,0.000463133,-2.88426e-07,4.46921e-08,1.02725,0.000462691,-1.5435e-07,-4.23534e-09,1.02771,0.000462369,-1.67056e-07,-2.77507e-08,1.02817,0.000461952,-2.50308e-07,-3.97101e-09,1.02863,0.000461439,-2.62221e-07,4.36348e-08,1.02909,0.000461046,-1.31317e-07,-5.13589e-08,1.02955,0.000460629,-2.85394e-07,4.25913e-08,1.03001,0.000460186,-1.5762e-07,2.0285e-10,1.03047,0.000459871,-1.57011e-07,-4.34027e-08,1.03093,0.000459427,-2.87219e-07,5.41987e-08,1.03139,0.000459015,-1.24623e-07,-5.4183e-08,1.03185,0.000458604,-2.87172e-07,4.33239e-08,1.03231,0.000458159,-1.572e-07,9.65817e-11,1.03277,0.000457845,-1.56911e-07,-4.37103e-08,1.03323,0.0004574,-2.88041e-07,5.55351e-08,1.03368,0.000456991,-1.21436e-07,-5.9221e-08,1.03414,0.00045657,-2.99099e-07,6.21394e-08,1.0346,0.000456158,-1.1268e-07,-7.01275e-08,1.03505,0.000455723,-3.23063e-07,9.91614e-08,1.03551,0.000455374,-2.55788e-08,-8.80996e-08,1.03596,0.000455058,-2.89878e-07,1.48184e-08,1.03642,0.000454523,-2.45422e-07,2.88258e-08,1.03687,0.000454119,-1.58945e-07,-1.09125e-08,1.03733,0.000453768,-1.91682e-07,1.48241e-08,1.03778,0.000453429,-1.4721e-07,-4.83838e-08,1.03823,0.00045299,-2.92361e-07,5.95019e-08,1.03869,0.000452584,-1.13856e-07,-7.04146e-08,1.03914,0.000452145,-3.25099e-07,1.02947e-07,1.03959,0.000451803,-1.62583e-08,-1.02955e-07,1.04004,0.000451462,-3.25123e-07,7.04544e-08,1.04049,0.000451023,-1.1376e-07,-5.96534e-08,1.04094,0.000450616,-2.9272e-07,4.89499e-08,1.04139,0.000450178,-1.45871e-07,-1.69369e-08,1.04184,0.000449835,-1.96681e-07,1.87977e-08,1.04229,0.000449498,-1.40288e-07,-5.82539e-08,1.04274,0.000449043,-3.1505e-07,9.50087e-08,1.04319,0.000448698,-3.00238e-08,-8.33623e-08,1.04364,0.000448388,-2.80111e-07,2.20363e-11,1.04409,0.000447828,-2.80045e-07,8.32742e-08,1.04454,0.000447517,-3.02221e-08,-9.47002e-08,1.04498,0.000447173,-3.14323e-07,5.7108e-08,1.04543,0.000446716,-1.42999e-07,-1.45225e-08,1.04588,0.000446386,-1.86566e-07,9.82022e-10,1.04632,0.000446016,-1.8362e-07,1.05944e-08,1.04677,0.00044568,-1.51837e-07,-4.33597e-08,1.04721,0.000445247,-2.81916e-07,4.36352e-08,1.04766,0.000444814,-1.51011e-07,-1.19717e-08,1.0481,0.000444476,-1.86926e-07,4.25158e-09,1.04855,0.000444115,-1.74171e-07,-5.03461e-09,1.04899,0.000443751,-1.89275e-07,1.58868e-08,1.04944,0.00044342,-1.41614e-07,-5.85127e-08,1.04988,0.000442961,-3.17152e-07,9.89548e-08,1.05032,0.000442624,-2.0288e-08,-9.88878e-08,1.05076,0.000442287,-3.16951e-07,5.81779e-08,1.05121,0.000441827,-1.42418e-07,-1.46144e-08,1.05165,0.000441499,-1.86261e-07,2.79892e-10,1.05209,0.000441127,-1.85421e-07,1.34949e-08,1.05253,0.000440797,-1.44937e-07,-5.42594e-08,1.05297,0.000440344,-3.07715e-07,8.43335e-08,1.05341,0.000439982,-5.47146e-08,-4.46558e-08,1.05385,0.000439738,-1.88682e-07,-2.49193e-08,1.05429,0.000439286,-2.6344e-07,2.5124e-08,1.05473,0.000438835,-1.88068e-07,4.36328e-08,1.05517,0.000438589,-5.71699e-08,-8.04459e-08,1.05561,0.000438234,-2.98508e-07,3.97324e-08,1.05605,0.000437756,-1.79311e-07,4.07258e-08,1.05648,0.000437519,-5.71332e-08,-8.34263e-08,1.05692,0.000437155,-3.07412e-07,5.45608e-08,1.05736,0.000436704,-1.4373e-07,-1.56078e-08,1.05779,0.000436369,-1.90553e-07,7.87043e-09,1.05823,0.000436012,-1.66942e-07,-1.58739e-08,1.05867,0.00043563,-2.14563e-07,5.56251e-08,1.0591,0.000435368,-4.76881e-08,-8.74172e-08,1.05954,0.000435011,-3.0994e-07,5.56251e-08,1.05997,0.000434558,-1.43064e-07,-1.58739e-08,1.06041,0.000434224,-1.90686e-07,7.87042e-09,1.06084,0.000433866,-1.67075e-07,-1.56078e-08,1.06127,0.000433485,-2.13898e-07,5.45609e-08,1.06171,0.000433221,-5.02157e-08,-8.34263e-08,1.06214,0.00043287,-3.00495e-07,4.07258e-08,1.06257,0.000432391,-1.78317e-07,3.97325e-08,1.063,0.000432154,-5.91198e-08,-8.04464e-08,1.06344,0.000431794,-3.00459e-07,4.36347e-08,1.06387,0.000431324,-1.69555e-07,2.5117e-08,1.0643,0.000431061,-9.42041e-08,-2.48934e-08,1.06473,0.000430798,-1.68884e-07,-4.47527e-08,1.06516,0.000430326,-3.03142e-07,8.46951e-08,1.06559,0.000429973,-4.90573e-08,-5.56089e-08,1.06602,0.000429708,-2.15884e-07,1.85314e-08,1.06645,0.000429332,-1.6029e-07,-1.85166e-08,1.06688,0.000428956,-2.1584e-07,5.5535e-08,1.06731,0.000428691,-4.92347e-08,-8.44142e-08,1.06774,0.000428339,-3.02477e-07,4.37032e-08,1.06816,0.000427865,-1.71368e-07,2.88107e-08,1.06859,0.000427609,-8.49356e-08,-3.97367e-08,1.06902,0.00042732,-2.04146e-07,1.09267e-08,1.06945,0.000426945,-1.71365e-07,-3.97023e-09,1.06987,0.00042659,-1.83276e-07,4.9542e-09,1.0703,0.000426238,-1.68414e-07,-1.58466e-08,1.07073,0.000425854,-2.15953e-07,5.84321e-08,1.07115,0.000425597,-4.0657e-08,-9.86725e-08,1.07158,0.00042522,-3.36674e-07,9.78392e-08,1.072,0.00042484,-4.31568e-08,-5.42658e-08,1.07243,0.000424591,-2.05954e-07,1.45377e-11,1.07285,0.000424179,-2.0591e-07,5.42076e-08,1.07328,0.00042393,-4.32877e-08,-9.76357e-08,1.0737,0.00042355,-3.36195e-07,9.79165e-08,1.07412,0.000423172,-4.24451e-08,-5.56118e-08,1.07455,0.00042292,-2.09281e-07,5.32143e-09,1.07497,0.000422518,-1.93316e-07,3.43261e-08,1.07539,0.000422234,-9.0338e-08,-2.34165e-08,1.07581,0.000421983,-1.60588e-07,-5.98692e-08,1.07623,0.000421482,-3.40195e-07,1.43684e-07,1.07666,0.000421233,9.08574e-08,-1.5724e-07,1.07708,0.000420943,-3.80862e-07,1.27647e-07,1.0775,0.000420564,2.0791e-09,-1.1493e-07,1.07792,0.000420223,-3.4271e-07,9.36534e-08,1.07834,0.000419819,-6.17499e-08,-2.12653e-08,1.07876,0.000419632,-1.25546e-07,-8.59219e-09,1.07918,0.000419355,-1.51322e-07,-6.35752e-08,1.0796,0.000418861,-3.42048e-07,1.43684e-07,1.08002,0.000418608,8.90034e-08,-1.53532e-07,1.08043,0.000418326,-3.71593e-07,1.12817e-07,1.08085,0.000417921,-3.31414e-08,-5.93184e-08,1.08127,0.000417677,-2.11097e-07,5.24697e-09,1.08169,0.00041727,-1.95356e-07,3.83305e-08,1.0821,0.000416995,-8.03642e-08,-3.93597e-08,1.08252,0.000416716,-1.98443e-07,-1.0094e-10,1.08294,0.000416319,-1.98746e-07,3.97635e-08,1.08335,0.00041604,-7.94557e-08,-3.97437e-08,1.08377,0.000415762,-1.98687e-07,1.94215e-12,1.08419,0.000415365,-1.98681e-07,3.97359e-08,1.0846,0.000415087,-7.94732e-08,-3.97362e-08,1.08502,0.000414809,-1.98682e-07,-4.31063e-13,1.08543,0.000414411,-1.98683e-07,3.97379e-08,1.08584,0.000414133,-7.94694e-08,-3.97418e-08,1.08626,0.000413855,-1.98695e-07,2.00563e-11,1.08667,0.000413458,-1.98635e-07,3.96616e-08,1.08709,0.000413179,-7.965e-08,-3.9457e-08,1.0875,0.000412902,-1.98021e-07,-1.04281e-09,1.08791,0.000412502,-2.01149e-07,4.36282e-08,1.08832,0.000412231,-7.02648e-08,-5.42608e-08,1.08874,0.000411928,-2.33047e-07,5.42057e-08,1.08915,0.000411624,-7.04301e-08,-4.33527e-08,1.08956,0.000411353,-2.00488e-07,-4.07378e-12,1.08997,0.000410952,-2.005e-07,4.3369e-08,1.09038,0.000410681,-7.03934e-08,-5.42627e-08,1.09079,0.000410378,-2.33182e-07,5.44726e-08,1.0912,0.000410075,-6.97637e-08,-4.44186e-08,1.09161,0.000409802,-2.03019e-07,3.99235e-09,1.09202,0.000409408,-1.91042e-07,2.84491e-08,1.09243,0.000409111,-1.05695e-07,1.42043e-09,1.09284,0.000408904,-1.01434e-07,-3.41308e-08,1.09325,0.000408599,-2.03826e-07,1.58937e-08,1.09366,0.000408239,-1.56145e-07,-2.94438e-08,1.09406,0.000407838,-2.44476e-07,1.01881e-07,1.09447,0.000407655,6.11676e-08,-1.39663e-07,1.09488,0.000407358,-3.57822e-07,9.91432e-08,1.09529,0.00040694,-6.03921e-08,-1.84912e-08,1.09569,0.000406764,-1.15866e-07,-2.51785e-08,1.0961,0.000406457,-1.91401e-07,-4.03115e-12,1.09651,0.000406074,-1.91413e-07,2.51947e-08,1.09691,0.000405767,-1.15829e-07,1.84346e-08,1.09732,0.00040559,-6.05254e-08,-9.89332e-08,1.09772,0.000405172,-3.57325e-07,1.3888e-07,1.09813,0.000404874,5.93136e-08,-9.8957e-08,1.09853,0.000404696,-2.37557e-07,1.853e-08,1.09894,0.000404277,-1.81968e-07,2.48372e-08,1.09934,0.000403987,-1.07456e-07,1.33047e-09,1.09975,0.000403776,-1.03465e-07,-3.01591e-08,1.10015,0.000403479,-1.93942e-07,9.66054e-11,1.10055,0.000403091,-1.93652e-07,2.97727e-08,1.10096,0.000402793,-1.04334e-07,2.19273e-11,1.10136,0.000402585,-1.04268e-07,-2.98604e-08,1.10176,0.000402287,-1.93849e-07,2.10325e-10,1.10216,0.0004019,-1.93218e-07,2.90191e-08,1.10256,0.0004016,-1.06161e-07,2.92264e-09,1.10297,0.000401397,-9.73931e-08,-4.07096e-08,1.10337,0.00040108,-2.19522e-07,4.07067e-08,1.10377,0.000400763,-9.7402e-08,-2.90783e-09,1.10417,0.000400559,-1.06126e-07,-2.90754e-08,1.10457,0.00040026,-1.93352e-07,9.00021e-14,1.10497,0.000399873,-1.93351e-07,2.9075e-08,1.10537,0.000399574,-1.06126e-07,2.90902e-09,1.10577,0.00039937,-9.73992e-08,-4.07111e-08,1.10617,0.000399053,-2.19533e-07,4.07262e-08,1.10657,0.000398736,-9.73541e-08,-2.98424e-09,1.10697,0.000398533,-1.06307e-07,-2.87892e-08,1.10736,0.000398234,-1.92674e-07,-1.06824e-09,1.10776,0.000397845,-1.95879e-07,3.30622e-08,1.10816,0.000397552,-9.66926e-08,-1.19712e-08,1.10856,0.000397323,-1.32606e-07,1.48225e-08,1.10895,0.000397102,-8.81387e-08,-4.73187e-08,1.10935,0.000396784,-2.30095e-07,5.52429e-08,1.10975,0.00039649,-6.4366e-08,-5.44437e-08,1.11014,0.000396198,-2.27697e-07,4.33226e-08,1.11054,0.000395872,-9.77293e-08,3.62656e-10,1.11094,0.000395678,-9.66414e-08,-4.47732e-08,1.11133,0.00039535,-2.30961e-07,5.95208e-08,1.11173,0.000395067,-5.23985e-08,-7.41008e-08,1.11212,0.00039474,-2.74701e-07,1.17673e-07,1.11252,0.000394543,7.83181e-08,-1.58172e-07,1.11291,0.000394225,-3.96199e-07,1.57389e-07,1.1133,0.000393905,7.59679e-08,-1.13756e-07,1.1137,0.000393716,-2.653e-07,5.92165e-08,1.11409,0.000393363,-8.76507e-08,-3.90074e-09,1.11449,0.000393176,-9.93529e-08,-4.36136e-08,1.11488,0.000392846,-2.30194e-07,5.91457e-08,1.11527,0.000392563,-5.27564e-08,-7.376e-08,1.11566,0.000392237,-2.74037e-07,1.16685e-07,1.11606,0.000392039,7.60189e-08,-1.54562e-07,1.11645,0.000391727,-3.87667e-07,1.43935e-07,1.11684,0.000391384,4.4137e-08,-6.35487e-08,1.11723,0.000391281,-1.46509e-07,-8.94896e-09,1.11762,0.000390961,-1.73356e-07,-1.98647e-08,1.11801,0.000390555,-2.3295e-07,8.8408e-08,1.1184,0.000390354,3.22736e-08,-9.53486e-08,1.11879,0.000390133,-2.53772e-07,5.45677e-08,1.11918,0.000389789,-9.0069e-08,-3.71296e-09,1.11957,0.000389598,-1.01208e-07,-3.97159e-08,1.11996,0.000389276,-2.20355e-07,4.33671e-08,1.12035,0.000388966,-9.02542e-08,-1.45431e-08,1.12074,0.000388741,-1.33883e-07,1.48052e-08,1.12113,0.000388518,-8.94678e-08,-4.46778e-08,1.12152,0.000388205,-2.23501e-07,4.46966e-08,1.12191,0.000387892,-8.94114e-08,-1.48992e-08,1.12229,0.000387669,-1.34109e-07,1.49003e-08,1.12268,0.000387445,-8.94082e-08,-4.47019e-08,1.12307,0.000387132,-2.23514e-07,4.4698e-08,1.12345,0.000386819,-8.942e-08,-1.48806e-08,1.12384,0.000386596,-1.34062e-07,1.48245e-08,1.12423,0.000386372,-8.95885e-08,-4.44172e-08,1.12461,0.00038606,-2.2284e-07,4.36351e-08,1.125,0.000385745,-9.19348e-08,-1.09139e-08,1.12539,0.000385528,-1.24677e-07,2.05584e-11,1.12577,0.000385279,-1.24615e-07,1.08317e-08,1.12616,0.000385062,-9.21198e-08,-4.33473e-08,1.12654,0.000384748,-2.22162e-07,4.33481e-08,1.12693,0.000384434,-9.21174e-08,-1.08356e-08,1.12731,0.000384217,-1.24624e-07,-5.50907e-12,1.12769,0.000383968,-1.24641e-07,1.08577e-08,1.12808,0.000383751,-9.20679e-08,-4.34252e-08,1.12846,0.000383437,-2.22343e-07,4.36337e-08,1.12884,0.000383123,-9.14422e-08,-1.19005e-08,1.12923,0.000382904,-1.27144e-07,3.96813e-09,1.12961,0.000382662,-1.15239e-07,-3.97207e-09,1.12999,0.000382419,-1.27155e-07,1.19201e-08,1.13038,0.000382201,-9.1395e-08,-4.37085e-08,1.13076,0.000381887,-2.2252e-07,4.37046e-08,1.13114,0.000381573,-9.14068e-08,-1.19005e-08,1.13152,0.000381355,-1.27108e-07,3.89734e-09,1.1319,0.000381112,-1.15416e-07,-3.68887e-09,1.13228,0.00038087,-1.26483e-07,1.08582e-08,1.13266,0.00038065,-9.39083e-08,-3.97438e-08,1.13304,0.000380343,-2.1314e-07,2.89076e-08,1.13342,0.000380003,-1.26417e-07,4.33225e-08,1.1338,0.00037988,3.55072e-09,-8.29883e-08,1.13418,0.000379638,-2.45414e-07,5.0212e-08,1.13456,0.000379298,-9.47781e-08,1.34964e-09,1.13494,0.000379113,-9.07292e-08,-5.56105e-08,1.13532,0.000378764,-2.57561e-07,1.01883e-07,1.1357,0.000378555,4.80889e-08,-1.13504e-07,1.13608,0.000378311,-2.92423e-07,1.13713e-07,1.13646,0.000378067,4.87176e-08,-1.02931e-07,1.13683,0.000377856,-2.60076e-07,5.95923e-08,1.13721,0.000377514,-8.12988e-08,-1.62288e-08,1.13759,0.000377303,-1.29985e-07,5.32278e-09,1.13797,0.000377059,-1.14017e-07,-5.06237e-09,1.13834,0.000376816,-1.29204e-07,1.49267e-08,1.13872,0.000376602,-8.44237e-08,-5.46444e-08,1.1391,0.000376269,-2.48357e-07,8.44417e-08,1.13947,0.000376026,4.96815e-09,-4.47039e-08,1.13985,0.000375902,-1.29143e-07,-2.48355e-08,1.14023,0.000375569,-2.0365e-07,2.48368e-08,1.1406,0.000375236,-1.2914e-07,4.46977e-08,1.14098,0.000375112,4.95341e-09,-8.44184e-08,1.14135,0.000374869,-2.48302e-07,5.45572e-08,1.14173,0.000374536,-8.463e-08,-1.46013e-08,1.1421,0.000374323,-1.28434e-07,3.8478e-09,1.14247,0.000374077,-1.1689e-07,-7.89941e-10,1.14285,0.000373841,-1.1926e-07,-6.88042e-10,1.14322,0.0003736,-1.21324e-07,3.54213e-09,1.1436,0.000373368,-1.10698e-07,-1.34805e-08,1.14397,0.000373107,-1.51139e-07,5.03798e-08,1.14434,0.000372767,0.,0.}; + + template + __device__ __forceinline__ void RGB2LuvConvert_32F(const T& src, D& dst) + { + const float _d = 1.f / (0.950456f + 15 + 1.088754f * 3); + const float _un = 13 * (4 * 0.950456f * _d); + const float _vn = 13 * (9 * _d); + + float B = blueIdx == 0 ? src.x : src.z; + float G = src.y; + float R = blueIdx == 0 ? src.z : src.x; + + if (srgb) + { + B = splineInterpolate(B * GAMMA_TAB_SIZE, c_sRGBGammaTab, GAMMA_TAB_SIZE); + G = splineInterpolate(G * GAMMA_TAB_SIZE, c_sRGBGammaTab, GAMMA_TAB_SIZE); + R = splineInterpolate(R * GAMMA_TAB_SIZE, c_sRGBGammaTab, GAMMA_TAB_SIZE); + } + + float X = R * 0.412453f + G * 0.357580f + B * 0.180423f; + float Y = R * 0.212671f + G * 0.715160f + B * 0.072169f; + float Z = R * 0.019334f + G * 0.119193f + B * 0.950227f; + + float L = splineInterpolate(Y * (LAB_CBRT_TAB_SIZE / 1.5f), c_LabCbrtTab, LAB_CBRT_TAB_SIZE); + L = 116.f * L - 16.f; + + const float d = (4 * 13) / ::fmaxf(X + 15 * Y + 3 * Z, numeric_limits::epsilon()); + float u = L * (X * d - _un); + float v = L * ((9 * 0.25f) * Y * d - _vn); + + dst.x = L; + dst.y = u; + dst.z = v; + } + + template + __device__ __forceinline__ void RGB2LuvConvert_8U(const T& src, D& dst) + { + float3 srcf, dstf; + + srcf.x = src.x * (1.f / 255.f); + srcf.y = src.y * (1.f / 255.f); + srcf.z = src.z * (1.f / 255.f); + + RGB2LuvConvert_32F(srcf, dstf); + + dst.x = saturate_cast(dstf.x * 2.55f); + dst.y = saturate_cast(dstf.y * 0.72033898305084743f + 96.525423728813564f); + dst.z = saturate_cast(dstf.z * 0.9732824427480916f + 136.259541984732824f); + } + + template struct RGB2Luv; + + template + struct RGB2Luv + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + RGB2LuvConvert_8U(src, dst); + + return dst; + } + + __host__ __device__ __forceinline__ RGB2Luv() {} + __host__ __device__ __forceinline__ RGB2Luv(const RGB2Luv&) {} + }; + + template + struct RGB2Luv + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + RGB2LuvConvert_32F(src, dst); + + return dst; + } + + __host__ __device__ __forceinline__ RGB2Luv() {} + __host__ __device__ __forceinline__ RGB2Luv(const RGB2Luv&) {} + }; + } + +#define OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(name, scn, dcn, srgb, blueIdx) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::RGB2Luv functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + namespace color_detail + { + template + __device__ __forceinline__ void Luv2RGBConvert_32F(const T& src, D& dst) + { + const float _d = 1.f / (0.950456f + 15 + 1.088754f * 3); + const float _un = 4 * 0.950456f * _d; + const float _vn = 9 * _d; + + float L = src.x; + float u = src.y; + float v = src.z; + + float Y = (L + 16.f) * (1.f / 116.f); + Y = Y * Y * Y; + + float d = (1.f / 13.f) / L; + u = u * d + _un; + v = v * d + _vn; + + float iv = 1.f / v; + float X = 2.25f * u * Y * iv; + float Z = (12 - 3 * u - 20 * v) * Y * 0.25f * iv; + + float B = 0.055648f * X - 0.204043f * Y + 1.057311f * Z; + float G = -0.969256f * X + 1.875991f * Y + 0.041556f * Z; + float R = 3.240479f * X - 1.537150f * Y - 0.498535f * Z; + + if (srgb) + { + B = splineInterpolate(B * GAMMA_TAB_SIZE, c_sRGBInvGammaTab, GAMMA_TAB_SIZE); + G = splineInterpolate(G * GAMMA_TAB_SIZE, c_sRGBInvGammaTab, GAMMA_TAB_SIZE); + R = splineInterpolate(R * GAMMA_TAB_SIZE, c_sRGBInvGammaTab, GAMMA_TAB_SIZE); + } + + dst.x = blueIdx == 0 ? B : R; + dst.y = G; + dst.z = blueIdx == 0 ? R : B; + setAlpha(dst, ColorChannel::max()); + } + + template + __device__ __forceinline__ void Luv2RGBConvert_8U(const T& src, D& dst) + { + float3 srcf, dstf; + + srcf.x = src.x * (100.f / 255.f); + srcf.y = src.y * 1.388235294117647f - 134.f; + srcf.z = src.z * 1.027450980392157f - 140.f; + + Luv2RGBConvert_32F(srcf, dstf); + + dst.x = saturate_cast(dstf.x * 255.f); + dst.y = saturate_cast(dstf.y * 255.f); + dst.z = saturate_cast(dstf.z * 255.f); + setAlpha(dst, ColorChannel::max()); + } + + template struct Luv2RGB; + + template + struct Luv2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + Luv2RGBConvert_8U(src, dst); + + return dst; + } + + __host__ __device__ __forceinline__ Luv2RGB() {} + __host__ __device__ __forceinline__ Luv2RGB(const Luv2RGB&) {} + }; + + template + struct Luv2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + Luv2RGBConvert_32F(src, dst); + + return dst; + } + + __host__ __device__ __forceinline__ Luv2RGB() {} + __host__ __device__ __forceinline__ Luv2RGB(const Luv2RGB&) {} + }; + } + +#define OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(name, scn, dcn, srgb, blueIdx) \ + template struct name ## _traits \ + { \ + typedef ::cv::gpu::device::color_detail::Luv2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + #undef CV_DESCALE + +}}} // namespace cv { namespace gpu { namespace device + +#endif // __OPENCV_GPU_COLOR_DETAIL_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/reduce.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/reduce.hpp new file mode 100644 index 0000000..091a160 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/reduce.hpp @@ -0,0 +1,361 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_REDUCE_DETAIL_HPP__ +#define __OPENCV_GPU_REDUCE_DETAIL_HPP__ + +#include +#include "../warp.hpp" +#include "../warp_shuffle.hpp" + +namespace cv { namespace gpu { namespace device +{ + namespace reduce_detail + { + template struct GetType; + template struct GetType + { + typedef T type; + }; + template struct GetType + { + typedef T type; + }; + template struct GetType + { + typedef T type; + }; + + template + struct For + { + template + static __device__ void loadToSmem(const PointerTuple& smem, const ValTuple& val, unsigned int tid) + { + thrust::get(smem)[tid] = thrust::get(val); + + For::loadToSmem(smem, val, tid); + } + template + static __device__ void loadFromSmem(const PointerTuple& smem, const ValTuple& val, unsigned int tid) + { + thrust::get(val) = thrust::get(smem)[tid]; + + For::loadFromSmem(smem, val, tid); + } + + template + static __device__ void merge(const PointerTuple& smem, const ValTuple& val, unsigned int tid, unsigned int delta, const OpTuple& op) + { + typename GetType::type>::type reg = thrust::get(smem)[tid + delta]; + thrust::get(smem)[tid] = thrust::get(val) = thrust::get(op)(thrust::get(val), reg); + + For::merge(smem, val, tid, delta, op); + } + template + static __device__ void mergeShfl(const ValTuple& val, unsigned int delta, unsigned int width, const OpTuple& op) + { + typename GetType::type>::type reg = shfl_down(thrust::get(val), delta, width); + thrust::get(val) = thrust::get(op)(thrust::get(val), reg); + + For::mergeShfl(val, delta, width, op); + } + }; + template + struct For + { + template + static __device__ void loadToSmem(const PointerTuple&, const ValTuple&, unsigned int) + { + } + template + static __device__ void loadFromSmem(const PointerTuple&, const ValTuple&, unsigned int) + { + } + + template + static __device__ void merge(const PointerTuple&, const ValTuple&, unsigned int, unsigned int, const OpTuple&) + { + } + template + static __device__ void mergeShfl(const ValTuple&, unsigned int, unsigned int, const OpTuple&) + { + } + }; + + template + __device__ __forceinline__ void loadToSmem(volatile T* smem, T& val, unsigned int tid) + { + smem[tid] = val; + } + template + __device__ __forceinline__ void loadFromSmem(volatile T* smem, T& val, unsigned int tid) + { + val = smem[tid]; + } + template + __device__ __forceinline__ void loadToSmem(const thrust::tuple& smem, + const thrust::tuple& val, + unsigned int tid) + { + For<0, thrust::tuple_size >::value>::loadToSmem(smem, val, tid); + } + template + __device__ __forceinline__ void loadFromSmem(const thrust::tuple& smem, + const thrust::tuple& val, + unsigned int tid) + { + For<0, thrust::tuple_size >::value>::loadFromSmem(smem, val, tid); + } + + template + __device__ __forceinline__ void merge(volatile T* smem, T& val, unsigned int tid, unsigned int delta, const Op& op) + { + T reg = smem[tid + delta]; + smem[tid] = val = op(val, reg); + } + template + __device__ __forceinline__ void mergeShfl(T& val, unsigned int delta, unsigned int width, const Op& op) + { + T reg = shfl_down(val, delta, width); + val = op(val, reg); + } + template + __device__ __forceinline__ void merge(const thrust::tuple& smem, + const thrust::tuple& val, + unsigned int tid, + unsigned int delta, + const thrust::tuple& op) + { + For<0, thrust::tuple_size >::value>::merge(smem, val, tid, delta, op); + } + template + __device__ __forceinline__ void mergeShfl(const thrust::tuple& val, + unsigned int delta, + unsigned int width, + const thrust::tuple& op) + { + For<0, thrust::tuple_size >::value>::mergeShfl(val, delta, width, op); + } + + template struct Generic + { + template + static __device__ void reduce(Pointer smem, Reference val, unsigned int tid, Op op) + { + loadToSmem(smem, val, tid); + if (N >= 32) + __syncthreads(); + + if (N >= 2048) + { + if (tid < 1024) + merge(smem, val, tid, 1024, op); + + __syncthreads(); + } + if (N >= 1024) + { + if (tid < 512) + merge(smem, val, tid, 512, op); + + __syncthreads(); + } + if (N >= 512) + { + if (tid < 256) + merge(smem, val, tid, 256, op); + + __syncthreads(); + } + if (N >= 256) + { + if (tid < 128) + merge(smem, val, tid, 128, op); + + __syncthreads(); + } + if (N >= 128) + { + if (tid < 64) + merge(smem, val, tid, 64, op); + + __syncthreads(); + } + if (N >= 64) + { + if (tid < 32) + merge(smem, val, tid, 32, op); + } + + if (tid < 16) + { + merge(smem, val, tid, 16, op); + merge(smem, val, tid, 8, op); + merge(smem, val, tid, 4, op); + merge(smem, val, tid, 2, op); + merge(smem, val, tid, 1, op); + } + } + }; + + template + struct Unroll + { + static __device__ void loopShfl(Reference val, Op op, unsigned int N) + { + mergeShfl(val, I, N, op); + Unroll::loopShfl(val, op, N); + } + static __device__ void loop(Pointer smem, Reference val, unsigned int tid, Op op) + { + merge(smem, val, tid, I, op); + Unroll::loop(smem, val, tid, op); + } + }; + template + struct Unroll<0, Pointer, Reference, Op> + { + static __device__ void loopShfl(Reference, Op, unsigned int) + { + } + static __device__ void loop(Pointer, Reference, unsigned int, Op) + { + } + }; + + template struct WarpOptimized + { + template + static __device__ void reduce(Pointer smem, Reference val, unsigned int tid, Op op) + { + #if __CUDA_ARCH__ >= 300 + (void) smem; + (void) tid; + + Unroll::loopShfl(val, op, N); + #else + loadToSmem(smem, val, tid); + + if (tid < N / 2) + Unroll::loop(smem, val, tid, op); + #endif + } + }; + + template struct GenericOptimized32 + { + enum { M = N / 32 }; + + template + static __device__ void reduce(Pointer smem, Reference val, unsigned int tid, Op op) + { + const unsigned int laneId = Warp::laneId(); + + #if __CUDA_ARCH__ >= 300 + Unroll<16, Pointer, Reference, Op>::loopShfl(val, op, warpSize); + + if (laneId == 0) + loadToSmem(smem, val, tid / 32); + #else + loadToSmem(smem, val, tid); + + if (laneId < 16) + Unroll<16, Pointer, Reference, Op>::loop(smem, val, tid, op); + + __syncthreads(); + + if (laneId == 0) + loadToSmem(smem, val, tid / 32); + #endif + + __syncthreads(); + + loadFromSmem(smem, val, tid); + + if (tid < 32) + { + #if __CUDA_ARCH__ >= 300 + Unroll::loopShfl(val, op, M); + #else + Unroll::loop(smem, val, tid, op); + #endif + } + } + }; + + template struct StaticIf; + template struct StaticIf + { + typedef T1 type; + }; + template struct StaticIf + { + typedef T2 type; + }; + + template struct IsPowerOf2 + { + enum { value = ((N != 0) && !(N & (N - 1))) }; + }; + + template struct Dispatcher + { + typedef typename StaticIf< + (N <= 32) && IsPowerOf2::value, + WarpOptimized, + typename StaticIf< + (N <= 1024) && IsPowerOf2::value, + GenericOptimized32, + Generic + >::type + >::type reductor; + }; + } +}}} + +#endif // __OPENCV_GPU_REDUCE_DETAIL_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/reduce_key_val.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/reduce_key_val.hpp new file mode 100644 index 0000000..a84e0c2 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/reduce_key_val.hpp @@ -0,0 +1,498 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_PRED_VAL_REDUCE_DETAIL_HPP__ +#define __OPENCV_GPU_PRED_VAL_REDUCE_DETAIL_HPP__ + +#include +#include "../warp.hpp" +#include "../warp_shuffle.hpp" + +namespace cv { namespace gpu { namespace device +{ + namespace reduce_key_val_detail + { + template struct GetType; + template struct GetType + { + typedef T type; + }; + template struct GetType + { + typedef T type; + }; + template struct GetType + { + typedef T type; + }; + + template + struct For + { + template + static __device__ void loadToSmem(const PointerTuple& smem, const ReferenceTuple& data, unsigned int tid) + { + thrust::get(smem)[tid] = thrust::get(data); + + For::loadToSmem(smem, data, tid); + } + template + static __device__ void loadFromSmem(const PointerTuple& smem, const ReferenceTuple& data, unsigned int tid) + { + thrust::get(data) = thrust::get(smem)[tid]; + + For::loadFromSmem(smem, data, tid); + } + + template + static __device__ void copyShfl(const ReferenceTuple& val, unsigned int delta, int width) + { + thrust::get(val) = shfl_down(thrust::get(val), delta, width); + + For::copyShfl(val, delta, width); + } + template + static __device__ void copy(const PointerTuple& svals, const ReferenceTuple& val, unsigned int tid, unsigned int delta) + { + thrust::get(svals)[tid] = thrust::get(val) = thrust::get(svals)[tid + delta]; + + For::copy(svals, val, tid, delta); + } + + template + static __device__ void mergeShfl(const KeyReferenceTuple& key, const ValReferenceTuple& val, const CmpTuple& cmp, unsigned int delta, int width) + { + typename GetType::type>::type reg = shfl_down(thrust::get(key), delta, width); + + if (thrust::get(cmp)(reg, thrust::get(key))) + { + thrust::get(key) = reg; + thrust::get(val) = shfl_down(thrust::get(val), delta, width); + } + + For::mergeShfl(key, val, cmp, delta, width); + } + template + static __device__ void merge(const KeyPointerTuple& skeys, const KeyReferenceTuple& key, + const ValPointerTuple& svals, const ValReferenceTuple& val, + const CmpTuple& cmp, + unsigned int tid, unsigned int delta) + { + typename GetType::type>::type reg = thrust::get(skeys)[tid + delta]; + + if (thrust::get(cmp)(reg, thrust::get(key))) + { + thrust::get(skeys)[tid] = thrust::get(key) = reg; + thrust::get(svals)[tid] = thrust::get(val) = thrust::get(svals)[tid + delta]; + } + + For::merge(skeys, key, svals, val, cmp, tid, delta); + } + }; + template + struct For + { + template + static __device__ void loadToSmem(const PointerTuple&, const ReferenceTuple&, unsigned int) + { + } + template + static __device__ void loadFromSmem(const PointerTuple&, const ReferenceTuple&, unsigned int) + { + } + + template + static __device__ void copyShfl(const ReferenceTuple&, unsigned int, int) + { + } + template + static __device__ void copy(const PointerTuple&, const ReferenceTuple&, unsigned int, unsigned int) + { + } + + template + static __device__ void mergeShfl(const KeyReferenceTuple&, const ValReferenceTuple&, const CmpTuple&, unsigned int, int) + { + } + template + static __device__ void merge(const KeyPointerTuple&, const KeyReferenceTuple&, + const ValPointerTuple&, const ValReferenceTuple&, + const CmpTuple&, + unsigned int, unsigned int) + { + } + }; + + ////////////////////////////////////////////////////// + // loadToSmem + + template + __device__ __forceinline__ void loadToSmem(volatile T* smem, T& data, unsigned int tid) + { + smem[tid] = data; + } + template + __device__ __forceinline__ void loadFromSmem(volatile T* smem, T& data, unsigned int tid) + { + data = smem[tid]; + } + template + __device__ __forceinline__ void loadToSmem(const thrust::tuple& smem, + const thrust::tuple& data, + unsigned int tid) + { + For<0, thrust::tuple_size >::value>::loadToSmem(smem, data, tid); + } + template + __device__ __forceinline__ void loadFromSmem(const thrust::tuple& smem, + const thrust::tuple& data, + unsigned int tid) + { + For<0, thrust::tuple_size >::value>::loadFromSmem(smem, data, tid); + } + + ////////////////////////////////////////////////////// + // copyVals + + template + __device__ __forceinline__ void copyValsShfl(V& val, unsigned int delta, int width) + { + val = shfl_down(val, delta, width); + } + template + __device__ __forceinline__ void copyVals(volatile V* svals, V& val, unsigned int tid, unsigned int delta) + { + svals[tid] = val = svals[tid + delta]; + } + template + __device__ __forceinline__ void copyValsShfl(const thrust::tuple& val, + unsigned int delta, + int width) + { + For<0, thrust::tuple_size >::value>::copyShfl(val, delta, width); + } + template + __device__ __forceinline__ void copyVals(const thrust::tuple& svals, + const thrust::tuple& val, + unsigned int tid, unsigned int delta) + { + For<0, thrust::tuple_size >::value>::copy(svals, val, tid, delta); + } + + ////////////////////////////////////////////////////// + // merge + + template + __device__ __forceinline__ void mergeShfl(K& key, V& val, const Cmp& cmp, unsigned int delta, int width) + { + K reg = shfl_down(key, delta, width); + + if (cmp(reg, key)) + { + key = reg; + copyValsShfl(val, delta, width); + } + } + template + __device__ __forceinline__ void merge(volatile K* skeys, K& key, volatile V* svals, V& val, const Cmp& cmp, unsigned int tid, unsigned int delta) + { + K reg = skeys[tid + delta]; + + if (cmp(reg, key)) + { + skeys[tid] = key = reg; + copyVals(svals, val, tid, delta); + } + } + template + __device__ __forceinline__ void mergeShfl(K& key, + const thrust::tuple& val, + const Cmp& cmp, + unsigned int delta, int width) + { + K reg = shfl_down(key, delta, width); + + if (cmp(reg, key)) + { + key = reg; + copyValsShfl(val, delta, width); + } + } + template + __device__ __forceinline__ void merge(volatile K* skeys, K& key, + const thrust::tuple& svals, + const thrust::tuple& val, + const Cmp& cmp, unsigned int tid, unsigned int delta) + { + K reg = skeys[tid + delta]; + + if (cmp(reg, key)) + { + skeys[tid] = key = reg; + copyVals(svals, val, tid, delta); + } + } + template + __device__ __forceinline__ void mergeShfl(const thrust::tuple& key, + const thrust::tuple& val, + const thrust::tuple& cmp, + unsigned int delta, int width) + { + For<0, thrust::tuple_size >::value>::mergeShfl(key, val, cmp, delta, width); + } + template + __device__ __forceinline__ void merge(const thrust::tuple& skeys, + const thrust::tuple& key, + const thrust::tuple& svals, + const thrust::tuple& val, + const thrust::tuple& cmp, + unsigned int tid, unsigned int delta) + { + For<0, thrust::tuple_size >::value>::merge(skeys, key, svals, val, cmp, tid, delta); + } + + ////////////////////////////////////////////////////// + // Generic + + template struct Generic + { + template + static __device__ void reduce(KP skeys, KR key, VP svals, VR val, unsigned int tid, Cmp cmp) + { + loadToSmem(skeys, key, tid); + loadValsToSmem(svals, val, tid); + if (N >= 32) + __syncthreads(); + + if (N >= 2048) + { + if (tid < 1024) + merge(skeys, key, svals, val, cmp, tid, 1024); + + __syncthreads(); + } + if (N >= 1024) + { + if (tid < 512) + merge(skeys, key, svals, val, cmp, tid, 512); + + __syncthreads(); + } + if (N >= 512) + { + if (tid < 256) + merge(skeys, key, svals, val, cmp, tid, 256); + + __syncthreads(); + } + if (N >= 256) + { + if (tid < 128) + merge(skeys, key, svals, val, cmp, tid, 128); + + __syncthreads(); + } + if (N >= 128) + { + if (tid < 64) + merge(skeys, key, svals, val, cmp, tid, 64); + + __syncthreads(); + } + if (N >= 64) + { + if (tid < 32) + merge(skeys, key, svals, val, cmp, tid, 32); + } + + if (tid < 16) + { + merge(skeys, key, svals, val, cmp, tid, 16); + merge(skeys, key, svals, val, cmp, tid, 8); + merge(skeys, key, svals, val, cmp, tid, 4); + merge(skeys, key, svals, val, cmp, tid, 2); + merge(skeys, key, svals, val, cmp, tid, 1); + } + } + }; + + template + struct Unroll + { + static __device__ void loopShfl(KR key, VR val, Cmp cmp, unsigned int N) + { + mergeShfl(key, val, cmp, I, N); + Unroll::loopShfl(key, val, cmp, N); + } + static __device__ void loop(KP skeys, KR key, VP svals, VR val, unsigned int tid, Cmp cmp) + { + merge(skeys, key, svals, val, cmp, tid, I); + Unroll::loop(skeys, key, svals, val, tid, cmp); + } + }; + template + struct Unroll<0, KP, KR, VP, VR, Cmp> + { + static __device__ void loopShfl(KR, VR, Cmp, unsigned int) + { + } + static __device__ void loop(KP, KR, VP, VR, unsigned int, Cmp) + { + } + }; + + template struct WarpOptimized + { + template + static __device__ void reduce(KP skeys, KR key, VP svals, VR val, unsigned int tid, Cmp cmp) + { + #if 0 // __CUDA_ARCH__ >= 300 + (void) skeys; + (void) svals; + (void) tid; + + Unroll::loopShfl(key, val, cmp, N); + #else + loadToSmem(skeys, key, tid); + loadToSmem(svals, val, tid); + + if (tid < N / 2) + Unroll::loop(skeys, key, svals, val, tid, cmp); + #endif + } + }; + + template struct GenericOptimized32 + { + enum { M = N / 32 }; + + template + static __device__ void reduce(KP skeys, KR key, VP svals, VR val, unsigned int tid, Cmp cmp) + { + const unsigned int laneId = Warp::laneId(); + + #if 0 // __CUDA_ARCH__ >= 300 + Unroll<16, KP, KR, VP, VR, Cmp>::loopShfl(key, val, cmp, warpSize); + + if (laneId == 0) + { + loadToSmem(skeys, key, tid / 32); + loadToSmem(svals, val, tid / 32); + } + #else + loadToSmem(skeys, key, tid); + loadToSmem(svals, val, tid); + + if (laneId < 16) + Unroll<16, KP, KR, VP, VR, Cmp>::loop(skeys, key, svals, val, tid, cmp); + + __syncthreads(); + + if (laneId == 0) + { + loadToSmem(skeys, key, tid / 32); + loadToSmem(svals, val, tid / 32); + } + #endif + + __syncthreads(); + + loadFromSmem(skeys, key, tid); + + if (tid < 32) + { + #if 0 // __CUDA_ARCH__ >= 300 + loadFromSmem(svals, val, tid); + + Unroll::loopShfl(key, val, cmp, M); + #else + Unroll::loop(skeys, key, svals, val, tid, cmp); + #endif + } + } + }; + + template struct StaticIf; + template struct StaticIf + { + typedef T1 type; + }; + template struct StaticIf + { + typedef T2 type; + }; + + template struct IsPowerOf2 + { + enum { value = ((N != 0) && !(N & (N - 1))) }; + }; + + template struct Dispatcher + { + typedef typename StaticIf< + (N <= 32) && IsPowerOf2::value, + WarpOptimized, + typename StaticIf< + (N <= 1024) && IsPowerOf2::value, + GenericOptimized32, + Generic + >::type + >::type reductor; + }; + } +}}} + +#endif // __OPENCV_GPU_PRED_VAL_REDUCE_DETAIL_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/transform_detail.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/transform_detail.hpp new file mode 100644 index 0000000..10da593 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/transform_detail.hpp @@ -0,0 +1,395 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_TRANSFORM_DETAIL_HPP__ +#define __OPENCV_GPU_TRANSFORM_DETAIL_HPP__ + +#include "../common.hpp" +#include "../vec_traits.hpp" +#include "../functional.hpp" + +namespace cv { namespace gpu { namespace device +{ + namespace transform_detail + { + //! Read Write Traits + + template struct UnaryReadWriteTraits + { + typedef typename TypeVec::vec_type read_type; + typedef typename TypeVec::vec_type write_type; + }; + + template struct BinaryReadWriteTraits + { + typedef typename TypeVec::vec_type read_type1; + typedef typename TypeVec::vec_type read_type2; + typedef typename TypeVec::vec_type write_type; + }; + + //! Transform kernels + + template struct OpUnroller; + template <> struct OpUnroller<1> + { + template + static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, UnOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.x = op(src.x); + } + + template + static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, BinOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.x = op(src1.x, src2.x); + } + }; + template <> struct OpUnroller<2> + { + template + static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, UnOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.x = op(src.x); + if (mask(y, x_shifted + 1)) + dst.y = op(src.y); + } + + template + static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, BinOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.x = op(src1.x, src2.x); + if (mask(y, x_shifted + 1)) + dst.y = op(src1.y, src2.y); + } + }; + template <> struct OpUnroller<3> + { + template + static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, const UnOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.x = op(src.x); + if (mask(y, x_shifted + 1)) + dst.y = op(src.y); + if (mask(y, x_shifted + 2)) + dst.z = op(src.z); + } + + template + static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, const BinOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.x = op(src1.x, src2.x); + if (mask(y, x_shifted + 1)) + dst.y = op(src1.y, src2.y); + if (mask(y, x_shifted + 2)) + dst.z = op(src1.z, src2.z); + } + }; + template <> struct OpUnroller<4> + { + template + static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, const UnOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.x = op(src.x); + if (mask(y, x_shifted + 1)) + dst.y = op(src.y); + if (mask(y, x_shifted + 2)) + dst.z = op(src.z); + if (mask(y, x_shifted + 3)) + dst.w = op(src.w); + } + + template + static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, const BinOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.x = op(src1.x, src2.x); + if (mask(y, x_shifted + 1)) + dst.y = op(src1.y, src2.y); + if (mask(y, x_shifted + 2)) + dst.z = op(src1.z, src2.z); + if (mask(y, x_shifted + 3)) + dst.w = op(src1.w, src2.w); + } + }; + template <> struct OpUnroller<8> + { + template + static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, const UnOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.a0 = op(src.a0); + if (mask(y, x_shifted + 1)) + dst.a1 = op(src.a1); + if (mask(y, x_shifted + 2)) + dst.a2 = op(src.a2); + if (mask(y, x_shifted + 3)) + dst.a3 = op(src.a3); + if (mask(y, x_shifted + 4)) + dst.a4 = op(src.a4); + if (mask(y, x_shifted + 5)) + dst.a5 = op(src.a5); + if (mask(y, x_shifted + 6)) + dst.a6 = op(src.a6); + if (mask(y, x_shifted + 7)) + dst.a7 = op(src.a7); + } + + template + static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, const BinOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.a0 = op(src1.a0, src2.a0); + if (mask(y, x_shifted + 1)) + dst.a1 = op(src1.a1, src2.a1); + if (mask(y, x_shifted + 2)) + dst.a2 = op(src1.a2, src2.a2); + if (mask(y, x_shifted + 3)) + dst.a3 = op(src1.a3, src2.a3); + if (mask(y, x_shifted + 4)) + dst.a4 = op(src1.a4, src2.a4); + if (mask(y, x_shifted + 5)) + dst.a5 = op(src1.a5, src2.a5); + if (mask(y, x_shifted + 6)) + dst.a6 = op(src1.a6, src2.a6); + if (mask(y, x_shifted + 7)) + dst.a7 = op(src1.a7, src2.a7); + } + }; + + template + static __global__ void transformSmart(const PtrStepSz src_, PtrStep dst_, const Mask mask, const UnOp op) + { + typedef TransformFunctorTraits ft; + typedef typename UnaryReadWriteTraits::read_type read_type; + typedef typename UnaryReadWriteTraits::write_type write_type; + + const int x = threadIdx.x + blockIdx.x * blockDim.x; + const int y = threadIdx.y + blockIdx.y * blockDim.y; + const int x_shifted = x * ft::smart_shift; + + if (y < src_.rows) + { + const T* src = src_.ptr(y); + D* dst = dst_.ptr(y); + + if (x_shifted + ft::smart_shift - 1 < src_.cols) + { + const read_type src_n_el = ((const read_type*)src)[x]; + write_type dst_n_el = ((const write_type*)dst)[x]; + + OpUnroller::unroll(src_n_el, dst_n_el, mask, op, x_shifted, y); + + ((write_type*)dst)[x] = dst_n_el; + } + else + { + for (int real_x = x_shifted; real_x < src_.cols; ++real_x) + { + if (mask(y, real_x)) + dst[real_x] = op(src[real_x]); + } + } + } + } + + template + __global__ static void transformSimple(const PtrStepSz src, PtrStep dst, const Mask mask, const UnOp op) + { + const int x = blockDim.x * blockIdx.x + threadIdx.x; + const int y = blockDim.y * blockIdx.y + threadIdx.y; + + if (x < src.cols && y < src.rows && mask(y, x)) + { + dst.ptr(y)[x] = op(src.ptr(y)[x]); + } + } + + template + static __global__ void transformSmart(const PtrStepSz src1_, const PtrStep src2_, PtrStep dst_, + const Mask mask, const BinOp op) + { + typedef TransformFunctorTraits ft; + typedef typename BinaryReadWriteTraits::read_type1 read_type1; + typedef typename BinaryReadWriteTraits::read_type2 read_type2; + typedef typename BinaryReadWriteTraits::write_type write_type; + + const int x = threadIdx.x + blockIdx.x * blockDim.x; + const int y = threadIdx.y + blockIdx.y * blockDim.y; + const int x_shifted = x * ft::smart_shift; + + if (y < src1_.rows) + { + const T1* src1 = src1_.ptr(y); + const T2* src2 = src2_.ptr(y); + D* dst = dst_.ptr(y); + + if (x_shifted + ft::smart_shift - 1 < src1_.cols) + { + const read_type1 src1_n_el = ((const read_type1*)src1)[x]; + const read_type2 src2_n_el = ((const read_type2*)src2)[x]; + write_type dst_n_el = ((const write_type*)dst)[x]; + + OpUnroller::unroll(src1_n_el, src2_n_el, dst_n_el, mask, op, x_shifted, y); + + ((write_type*)dst)[x] = dst_n_el; + } + else + { + for (int real_x = x_shifted; real_x < src1_.cols; ++real_x) + { + if (mask(y, real_x)) + dst[real_x] = op(src1[real_x], src2[real_x]); + } + } + } + } + + template + static __global__ void transformSimple(const PtrStepSz src1, const PtrStep src2, PtrStep dst, + const Mask mask, const BinOp op) + { + const int x = blockDim.x * blockIdx.x + threadIdx.x; + const int y = blockDim.y * blockIdx.y + threadIdx.y; + + if (x < src1.cols && y < src1.rows && mask(y, x)) + { + const T1 src1_data = src1.ptr(y)[x]; + const T2 src2_data = src2.ptr(y)[x]; + dst.ptr(y)[x] = op(src1_data, src2_data); + } + } + + template struct TransformDispatcher; + template<> struct TransformDispatcher + { + template + static void call(PtrStepSz src, PtrStepSz dst, UnOp op, Mask mask, cudaStream_t stream) + { + typedef TransformFunctorTraits ft; + + const dim3 threads(ft::simple_block_dim_x, ft::simple_block_dim_y, 1); + const dim3 grid(divUp(src.cols, threads.x), divUp(src.rows, threads.y), 1); + + transformSimple<<>>(src, dst, mask, op); + cudaSafeCall( cudaGetLastError() ); + + if (stream == 0) + cudaSafeCall( cudaDeviceSynchronize() ); + } + + template + static void call(PtrStepSz src1, PtrStepSz src2, PtrStepSz dst, BinOp op, Mask mask, cudaStream_t stream) + { + typedef TransformFunctorTraits ft; + + const dim3 threads(ft::simple_block_dim_x, ft::simple_block_dim_y, 1); + const dim3 grid(divUp(src1.cols, threads.x), divUp(src1.rows, threads.y), 1); + + transformSimple<<>>(src1, src2, dst, mask, op); + cudaSafeCall( cudaGetLastError() ); + + if (stream == 0) + cudaSafeCall( cudaDeviceSynchronize() ); + } + }; + template<> struct TransformDispatcher + { + template + static void call(PtrStepSz src, PtrStepSz dst, UnOp op, Mask mask, cudaStream_t stream) + { + typedef TransformFunctorTraits ft; + + StaticAssert::check(); + + if (!isAligned(src.data, ft::smart_shift * sizeof(T)) || !isAligned(src.step, ft::smart_shift * sizeof(T)) || + !isAligned(dst.data, ft::smart_shift * sizeof(D)) || !isAligned(dst.step, ft::smart_shift * sizeof(D))) + { + TransformDispatcher::call(src, dst, op, mask, stream); + return; + } + + const dim3 threads(ft::smart_block_dim_x, ft::smart_block_dim_y, 1); + const dim3 grid(divUp(src.cols, threads.x * ft::smart_shift), divUp(src.rows, threads.y), 1); + + transformSmart<<>>(src, dst, mask, op); + cudaSafeCall( cudaGetLastError() ); + + if (stream == 0) + cudaSafeCall( cudaDeviceSynchronize() ); + } + + template + static void call(PtrStepSz src1, PtrStepSz src2, PtrStepSz dst, BinOp op, Mask mask, cudaStream_t stream) + { + typedef TransformFunctorTraits ft; + + StaticAssert::check(); + + if (!isAligned(src1.data, ft::smart_shift * sizeof(T1)) || !isAligned(src1.step, ft::smart_shift * sizeof(T1)) || + !isAligned(src2.data, ft::smart_shift * sizeof(T2)) || !isAligned(src2.step, ft::smart_shift * sizeof(T2)) || + !isAligned(dst.data, ft::smart_shift * sizeof(D)) || !isAligned(dst.step, ft::smart_shift * sizeof(D))) + { + TransformDispatcher::call(src1, src2, dst, op, mask, stream); + return; + } + + const dim3 threads(ft::smart_block_dim_x, ft::smart_block_dim_y, 1); + const dim3 grid(divUp(src1.cols, threads.x * ft::smart_shift), divUp(src1.rows, threads.y), 1); + + transformSmart<<>>(src1, src2, dst, mask, op); + cudaSafeCall( cudaGetLastError() ); + + if (stream == 0) + cudaSafeCall( cudaDeviceSynchronize() ); + } + }; + } // namespace transform_detail +}}} // namespace cv { namespace gpu { namespace device + +#endif // __OPENCV_GPU_TRANSFORM_DETAIL_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/type_traits_detail.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/type_traits_detail.hpp new file mode 100644 index 0000000..97ff00d --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/type_traits_detail.hpp @@ -0,0 +1,187 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_TYPE_TRAITS_DETAIL_HPP__ +#define __OPENCV_GPU_TYPE_TRAITS_DETAIL_HPP__ + +#include "../common.hpp" +#include "../vec_traits.hpp" + +namespace cv { namespace gpu { namespace device +{ + namespace type_traits_detail + { + template struct Select { typedef T1 type; }; + template struct Select { typedef T2 type; }; + + template struct IsSignedIntergral { enum {value = 0}; }; + template <> struct IsSignedIntergral { enum {value = 1}; }; + template <> struct IsSignedIntergral { enum {value = 1}; }; + template <> struct IsSignedIntergral { enum {value = 1}; }; + template <> struct IsSignedIntergral { enum {value = 1}; }; + template <> struct IsSignedIntergral { enum {value = 1}; }; + template <> struct IsSignedIntergral { enum {value = 1}; }; + + template struct IsUnsignedIntegral { enum {value = 0}; }; + template <> struct IsUnsignedIntegral { enum {value = 1}; }; + template <> struct IsUnsignedIntegral { enum {value = 1}; }; + template <> struct IsUnsignedIntegral { enum {value = 1}; }; + template <> struct IsUnsignedIntegral { enum {value = 1}; }; + template <> struct IsUnsignedIntegral { enum {value = 1}; }; + template <> struct IsUnsignedIntegral { enum {value = 1}; }; + + template struct IsIntegral { enum {value = IsSignedIntergral::value || IsUnsignedIntegral::value}; }; + template <> struct IsIntegral { enum {value = 1}; }; + template <> struct IsIntegral { enum {value = 1}; }; + + template struct IsFloat { enum {value = 0}; }; + template <> struct IsFloat { enum {value = 1}; }; + template <> struct IsFloat { enum {value = 1}; }; + + template struct IsVec { enum {value = 0}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + + template struct AddParameterType { typedef const U& type; }; + template struct AddParameterType { typedef U& type; }; + template <> struct AddParameterType { typedef void type; }; + + template struct ReferenceTraits + { + enum { value = false }; + typedef U type; + }; + template struct ReferenceTraits + { + enum { value = true }; + typedef U type; + }; + + template struct PointerTraits + { + enum { value = false }; + typedef void type; + }; + template struct PointerTraits + { + enum { value = true }; + typedef U type; + }; + template struct PointerTraits + { + enum { value = true }; + typedef U type; + }; + + template struct UnConst + { + typedef U type; + enum { value = 0 }; + }; + template struct UnConst + { + typedef U type; + enum { value = 1 }; + }; + template struct UnConst + { + typedef U& type; + enum { value = 1 }; + }; + + template struct UnVolatile + { + typedef U type; + enum { value = 0 }; + }; + template struct UnVolatile + { + typedef U type; + enum { value = 1 }; + }; + template struct UnVolatile + { + typedef U& type; + enum { value = 1 }; + }; + } // namespace type_traits_detail +}}} // namespace cv { namespace gpu { namespace device + +#endif // __OPENCV_GPU_TYPE_TRAITS_DETAIL_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/vec_distance_detail.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/vec_distance_detail.hpp new file mode 100644 index 0000000..78ab556 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/vec_distance_detail.hpp @@ -0,0 +1,117 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_VEC_DISTANCE_DETAIL_HPP__ +#define __OPENCV_GPU_VEC_DISTANCE_DETAIL_HPP__ + +#include "../datamov_utils.hpp" + +namespace cv { namespace gpu { namespace device +{ + namespace vec_distance_detail + { + template struct UnrollVecDiffCached + { + template + static __device__ void calcCheck(const T1* vecCached, const T2* vecGlob, int len, Dist& dist, int ind) + { + if (ind < len) + { + T1 val1 = *vecCached++; + + T2 val2; + ForceGlob::Load(vecGlob, ind, val2); + + dist.reduceIter(val1, val2); + + UnrollVecDiffCached::calcCheck(vecCached, vecGlob, len, dist, ind + THREAD_DIM); + } + } + + template + static __device__ void calcWithoutCheck(const T1* vecCached, const T2* vecGlob, Dist& dist) + { + T1 val1 = *vecCached++; + + T2 val2; + ForceGlob::Load(vecGlob, 0, val2); + vecGlob += THREAD_DIM; + + dist.reduceIter(val1, val2); + + UnrollVecDiffCached::calcWithoutCheck(vecCached, vecGlob, dist); + } + }; + template struct UnrollVecDiffCached + { + template + static __device__ __forceinline__ void calcCheck(const T1*, const T2*, int, Dist&, int) + { + } + + template + static __device__ __forceinline__ void calcWithoutCheck(const T1*, const T2*, Dist&) + { + } + }; + + template struct VecDiffCachedCalculator; + template struct VecDiffCachedCalculator + { + template + static __device__ __forceinline__ void calc(const T1* vecCached, const T2* vecGlob, int len, Dist& dist, int tid) + { + UnrollVecDiffCached::calcCheck(vecCached, vecGlob, len, dist, tid); + } + }; + template struct VecDiffCachedCalculator + { + template + static __device__ __forceinline__ void calc(const T1* vecCached, const T2* vecGlob, int len, Dist& dist, int tid) + { + UnrollVecDiffCached::calcWithoutCheck(vecCached, vecGlob + tid, dist); + } + }; + } // namespace vec_distance_detail +}}} // namespace cv { namespace gpu { namespace device + +#endif // __OPENCV_GPU_VEC_DISTANCE_DETAIL_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/dynamic_smem.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/dynamic_smem.hpp new file mode 100644 index 0000000..cf431d9 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/dynamic_smem.hpp @@ -0,0 +1,80 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_DYNAMIC_SMEM_HPP__ +#define __OPENCV_GPU_DYNAMIC_SMEM_HPP__ + +namespace cv { namespace gpu { namespace device +{ + template struct DynamicSharedMem + { + __device__ __forceinline__ operator T*() + { + extern __shared__ int __smem[]; + return (T*)__smem; + } + + __device__ __forceinline__ operator const T*() const + { + extern __shared__ int __smem[]; + return (T*)__smem; + } + }; + + // specialize for double to avoid unaligned memory access compile errors + template<> struct DynamicSharedMem + { + __device__ __forceinline__ operator double*() + { + extern __shared__ double __smem_d[]; + return (double*)__smem_d; + } + + __device__ __forceinline__ operator const double*() const + { + extern __shared__ double __smem_d[]; + return (double*)__smem_d; + } + }; +}}} + +#endif // __OPENCV_GPU_DYNAMIC_SMEM_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/emulation.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/emulation.hpp new file mode 100644 index 0000000..bf47bc5 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/emulation.hpp @@ -0,0 +1,138 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_GPU_EMULATION_HPP_ +#define OPENCV_GPU_EMULATION_HPP_ + +#include "warp_reduce.hpp" + +namespace cv { namespace gpu { namespace device +{ + struct Emulation + { + + static __device__ __forceinline__ int syncthreadsOr(int pred) + { +#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) + // just campilation stab + return 0; +#else + return __syncthreads_or(pred); +#endif + } + + template + static __forceinline__ __device__ int Ballot(int predicate) + { +#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ >= 200) + return __ballot(predicate); +#else + __shared__ volatile int cta_buffer[CTA_SIZE]; + + int tid = threadIdx.x; + cta_buffer[tid] = predicate ? (1 << (tid & 31)) : 0; + return warp_reduce(cta_buffer); +#endif + } + + struct smem + { + enum { TAG_MASK = (1U << ( (sizeof(unsigned int) << 3) - 5U)) - 1U }; + + template + static __device__ __forceinline__ T atomicInc(T* address, T val) + { +#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 120) + T count; + unsigned int tag = threadIdx.x << ( (sizeof(unsigned int) << 3) - 5U); + do + { + count = *address & TAG_MASK; + count = tag | (count + 1); + *address = count; + } while (*address != count); + + return (count & TAG_MASK) - 1; +#else + return ::atomicInc(address, val); +#endif + } + + template + static __device__ __forceinline__ T atomicAdd(T* address, T val) + { +#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 120) + T count; + unsigned int tag = threadIdx.x << ( (sizeof(unsigned int) << 3) - 5U); + do + { + count = *address & TAG_MASK; + count = tag | (count + val); + *address = count; + } while (*address != count); + + return (count & TAG_MASK) - val; +#else + return ::atomicAdd(address, val); +#endif + } + + template + static __device__ __forceinline__ T atomicMin(T* address, T val) + { +#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 120) + T count = ::min(*address, val); + do + { + *address = count; + } while (*address > count); + + return count; +#else + return ::atomicMin(address, val); +#endif + } + }; + }; +}}} // namespace cv { namespace gpu { namespace device + +#endif /* OPENCV_GPU_EMULATION_HPP_ */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/filters.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/filters.hpp new file mode 100644 index 0000000..d193969 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/filters.hpp @@ -0,0 +1,278 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_FILTERS_HPP__ +#define __OPENCV_GPU_FILTERS_HPP__ + +#include "saturate_cast.hpp" +#include "vec_traits.hpp" +#include "vec_math.hpp" +#include "type_traits.hpp" + +namespace cv { namespace gpu { namespace device +{ + template struct PointFilter + { + typedef typename Ptr2D::elem_type elem_type; + typedef float index_type; + + explicit __host__ __device__ __forceinline__ PointFilter(const Ptr2D& src_, float fx = 0.f, float fy = 0.f) + : src(src_) + { + (void)fx; + (void)fy; + } + + __device__ __forceinline__ elem_type operator ()(float y, float x) const + { + return src(__float2int_rz(y), __float2int_rz(x)); + } + + const Ptr2D src; + }; + + template struct LinearFilter + { + typedef typename Ptr2D::elem_type elem_type; + typedef float index_type; + + explicit __host__ __device__ __forceinline__ LinearFilter(const Ptr2D& src_, float fx = 0.f, float fy = 0.f) + : src(src_) + { + (void)fx; + (void)fy; + } + __device__ __forceinline__ elem_type operator ()(float y, float x) const + { + typedef typename TypeVec::cn>::vec_type work_type; + + work_type out = VecTraits::all(0); + + const int x1 = __float2int_rd(x); + const int y1 = __float2int_rd(y); + const int x2 = x1 + 1; + const int y2 = y1 + 1; + + elem_type src_reg = src(y1, x1); + out = out + src_reg * ((x2 - x) * (y2 - y)); + + src_reg = src(y1, x2); + out = out + src_reg * ((x - x1) * (y2 - y)); + + src_reg = src(y2, x1); + out = out + src_reg * ((x2 - x) * (y - y1)); + + src_reg = src(y2, x2); + out = out + src_reg * ((x - x1) * (y - y1)); + + return saturate_cast(out); + } + + const Ptr2D src; + }; + + template struct CubicFilter + { + typedef typename Ptr2D::elem_type elem_type; + typedef float index_type; + typedef typename TypeVec::cn>::vec_type work_type; + + explicit __host__ __device__ __forceinline__ CubicFilter(const Ptr2D& src_, float fx = 0.f, float fy = 0.f) + : src(src_) + { + (void)fx; + (void)fy; + } + + static __device__ __forceinline__ float bicubicCoeff(float x_) + { + float x = fabsf(x_); + if (x <= 1.0f) + { + return x * x * (1.5f * x - 2.5f) + 1.0f; + } + else if (x < 2.0f) + { + return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f; + } + else + { + return 0.0f; + } + } + + __device__ elem_type operator ()(float y, float x) const + { + const float xmin = ::ceilf(x - 2.0f); + const float xmax = ::floorf(x + 2.0f); + + const float ymin = ::ceilf(y - 2.0f); + const float ymax = ::floorf(y + 2.0f); + + work_type sum = VecTraits::all(0); + float wsum = 0.0f; + + for (float cy = ymin; cy <= ymax; cy += 1.0f) + { + for (float cx = xmin; cx <= xmax; cx += 1.0f) + { + const float w = bicubicCoeff(x - cx) * bicubicCoeff(y - cy); + sum = sum + w * src(__float2int_rd(cy), __float2int_rd(cx)); + wsum += w; + } + } + + work_type res = (!wsum)? VecTraits::all(0) : sum / wsum; + + return saturate_cast(res); + } + + const Ptr2D src; + }; + // for integer scaling + template struct IntegerAreaFilter + { + typedef typename Ptr2D::elem_type elem_type; + typedef float index_type; + + explicit __host__ __device__ __forceinline__ IntegerAreaFilter(const Ptr2D& src_, float scale_x_, float scale_y_) + : src(src_), scale_x(scale_x_), scale_y(scale_y_), scale(1.f / (scale_x * scale_y)) {} + + __device__ __forceinline__ elem_type operator ()(float y, float x) const + { + float fsx1 = x * scale_x; + float fsx2 = fsx1 + scale_x; + + int sx1 = __float2int_ru(fsx1); + int sx2 = __float2int_rd(fsx2); + + float fsy1 = y * scale_y; + float fsy2 = fsy1 + scale_y; + + int sy1 = __float2int_ru(fsy1); + int sy2 = __float2int_rd(fsy2); + + typedef typename TypeVec::cn>::vec_type work_type; + work_type out = VecTraits::all(0.f); + + for(int dy = sy1; dy < sy2; ++dy) + for(int dx = sx1; dx < sx2; ++dx) + { + out = out + src(dy, dx) * scale; + } + + return saturate_cast(out); + } + + const Ptr2D src; + float scale_x, scale_y ,scale; + }; + + template struct AreaFilter + { + typedef typename Ptr2D::elem_type elem_type; + typedef float index_type; + + explicit __host__ __device__ __forceinline__ AreaFilter(const Ptr2D& src_, float scale_x_, float scale_y_) + : src(src_), scale_x(scale_x_), scale_y(scale_y_){} + + __device__ __forceinline__ elem_type operator ()(float y, float x) const + { + float fsx1 = x * scale_x; + float fsx2 = fsx1 + scale_x; + + int sx1 = __float2int_ru(fsx1); + int sx2 = __float2int_rd(fsx2); + + float fsy1 = y * scale_y; + float fsy2 = fsy1 + scale_y; + + int sy1 = __float2int_ru(fsy1); + int sy2 = __float2int_rd(fsy2); + + float scale = 1.f / (fminf(scale_x, src.width - fsx1) * fminf(scale_y, src.height - fsy1)); + + typedef typename TypeVec::cn>::vec_type work_type; + work_type out = VecTraits::all(0.f); + + for (int dy = sy1; dy < sy2; ++dy) + { + for (int dx = sx1; dx < sx2; ++dx) + out = out + src(dy, dx) * scale; + + if (sx1 > fsx1) + out = out + src(dy, (sx1 -1) ) * ((sx1 - fsx1) * scale); + + if (sx2 < fsx2) + out = out + src(dy, sx2) * ((fsx2 -sx2) * scale); + } + + if (sy1 > fsy1) + for (int dx = sx1; dx < sx2; ++dx) + out = out + src( (sy1 - 1) , dx) * ((sy1 -fsy1) * scale); + + if (sy2 < fsy2) + for (int dx = sx1; dx < sx2; ++dx) + out = out + src(sy2, dx) * ((fsy2 -sy2) * scale); + + if ((sy1 > fsy1) && (sx1 > fsx1)) + out = out + src( (sy1 - 1) , (sx1 - 1)) * ((sy1 -fsy1) * (sx1 -fsx1) * scale); + + if ((sy1 > fsy1) && (sx2 < fsx2)) + out = out + src( (sy1 - 1) , sx2) * ((sy1 -fsy1) * (fsx2 -sx2) * scale); + + if ((sy2 < fsy2) && (sx2 < fsx2)) + out = out + src(sy2, sx2) * ((fsy2 -sy2) * (fsx2 -sx2) * scale); + + if ((sy2 < fsy2) && (sx1 > fsx1)) + out = out + src(sy2, (sx1 - 1)) * ((fsy2 -sy2) * (sx1 -fsx1) * scale); + + return saturate_cast(out); + } + + const Ptr2D src; + float scale_x, scale_y; + int width, haight; + }; +}}} // namespace cv { namespace gpu { namespace device + +#endif // __OPENCV_GPU_FILTERS_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/funcattrib.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/funcattrib.hpp new file mode 100644 index 0000000..2ed7980 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/funcattrib.hpp @@ -0,0 +1,71 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_DEVICE_FUNCATTRIB_HPP_ +#define __OPENCV_GPU_DEVICE_FUNCATTRIB_HPP_ + +#include + +namespace cv { namespace gpu { namespace device +{ + template + void printFuncAttrib(Func& func) + { + + cudaFuncAttributes attrs; + cudaFuncGetAttributes(&attrs, func); + + printf("=== Function stats ===\n"); + printf("Name: \n"); + printf("sharedSizeBytes = %d\n", attrs.sharedSizeBytes); + printf("constSizeBytes = %d\n", attrs.constSizeBytes); + printf("localSizeBytes = %d\n", attrs.localSizeBytes); + printf("maxThreadsPerBlock = %d\n", attrs.maxThreadsPerBlock); + printf("numRegs = %d\n", attrs.numRegs); + printf("ptxVersion = %d\n", attrs.ptxVersion); + printf("binaryVersion = %d\n", attrs.binaryVersion); + printf("\n"); + fflush(stdout); + } +}}} // namespace cv { namespace gpu { namespace device + +#endif /* __OPENCV_GPU_DEVICE_FUNCATTRIB_HPP_ */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/functional.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/functional.hpp new file mode 100644 index 0000000..db26473 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/functional.hpp @@ -0,0 +1,789 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_FUNCTIONAL_HPP__ +#define __OPENCV_GPU_FUNCTIONAL_HPP__ + +#include +#include "saturate_cast.hpp" +#include "vec_traits.hpp" +#include "type_traits.hpp" +#include "device_functions.h" + +namespace cv { namespace gpu { namespace device +{ + // Function Objects + template struct unary_function : public std::unary_function {}; + template struct binary_function : public std::binary_function {}; + + // Arithmetic Operations + template struct plus : binary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a + b; + } + __host__ __device__ __forceinline__ plus() {} + __host__ __device__ __forceinline__ plus(const plus&) {} + }; + + template struct minus : binary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a - b; + } + __host__ __device__ __forceinline__ minus() {} + __host__ __device__ __forceinline__ minus(const minus&) {} + }; + + template struct multiplies : binary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a * b; + } + __host__ __device__ __forceinline__ multiplies() {} + __host__ __device__ __forceinline__ multiplies(const multiplies&) {} + }; + + template struct divides : binary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a / b; + } + __host__ __device__ __forceinline__ divides() {} + __host__ __device__ __forceinline__ divides(const divides&) {} + }; + + template struct modulus : binary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a % b; + } + __host__ __device__ __forceinline__ modulus() {} + __host__ __device__ __forceinline__ modulus(const modulus&) {} + }; + + template struct negate : unary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType a) const + { + return -a; + } + __host__ __device__ __forceinline__ negate() {} + __host__ __device__ __forceinline__ negate(const negate&) {} + }; + + // Comparison Operations + template struct equal_to : binary_function + { + __device__ __forceinline__ bool operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a == b; + } + __host__ __device__ __forceinline__ equal_to() {} + __host__ __device__ __forceinline__ equal_to(const equal_to&) {} + }; + + template struct not_equal_to : binary_function + { + __device__ __forceinline__ bool operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a != b; + } + __host__ __device__ __forceinline__ not_equal_to() {} + __host__ __device__ __forceinline__ not_equal_to(const not_equal_to&) {} + }; + + template struct greater : binary_function + { + __device__ __forceinline__ bool operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a > b; + } + __host__ __device__ __forceinline__ greater() {} + __host__ __device__ __forceinline__ greater(const greater&) {} + }; + + template struct less : binary_function + { + __device__ __forceinline__ bool operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a < b; + } + __host__ __device__ __forceinline__ less() {} + __host__ __device__ __forceinline__ less(const less&) {} + }; + + template struct greater_equal : binary_function + { + __device__ __forceinline__ bool operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a >= b; + } + __host__ __device__ __forceinline__ greater_equal() {} + __host__ __device__ __forceinline__ greater_equal(const greater_equal&) {} + }; + + template struct less_equal : binary_function + { + __device__ __forceinline__ bool operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a <= b; + } + __host__ __device__ __forceinline__ less_equal() {} + __host__ __device__ __forceinline__ less_equal(const less_equal&) {} + }; + + // Logical Operations + template struct logical_and : binary_function + { + __device__ __forceinline__ bool operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a && b; + } + __host__ __device__ __forceinline__ logical_and() {} + __host__ __device__ __forceinline__ logical_and(const logical_and&) {} + }; + + template struct logical_or : binary_function + { + __device__ __forceinline__ bool operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a || b; + } + __host__ __device__ __forceinline__ logical_or() {} + __host__ __device__ __forceinline__ logical_or(const logical_or&) {} + }; + + template struct logical_not : unary_function + { + __device__ __forceinline__ bool operator ()(typename TypeTraits::ParameterType a) const + { + return !a; + } + __host__ __device__ __forceinline__ logical_not() {} + __host__ __device__ __forceinline__ logical_not(const logical_not&) {} + }; + + // Bitwise Operations + template struct bit_and : binary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a & b; + } + __host__ __device__ __forceinline__ bit_and() {} + __host__ __device__ __forceinline__ bit_and(const bit_and&) {} + }; + + template struct bit_or : binary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a | b; + } + __host__ __device__ __forceinline__ bit_or() {} + __host__ __device__ __forceinline__ bit_or(const bit_or&) {} + }; + + template struct bit_xor : binary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a ^ b; + } + __host__ __device__ __forceinline__ bit_xor() {} + __host__ __device__ __forceinline__ bit_xor(const bit_xor&) {} + }; + + template struct bit_not : unary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType v) const + { + return ~v; + } + __host__ __device__ __forceinline__ bit_not() {} + __host__ __device__ __forceinline__ bit_not(const bit_not&) {} + }; + + // Generalized Identity Operations + template struct identity : unary_function + { + __device__ __forceinline__ typename TypeTraits::ParameterType operator()(typename TypeTraits::ParameterType x) const + { + return x; + } + __host__ __device__ __forceinline__ identity() {} + __host__ __device__ __forceinline__ identity(const identity&) {} + }; + + template struct project1st : binary_function + { + __device__ __forceinline__ typename TypeTraits::ParameterType operator()(typename TypeTraits::ParameterType lhs, typename TypeTraits::ParameterType rhs) const + { + return lhs; + } + __host__ __device__ __forceinline__ project1st() {} + __host__ __device__ __forceinline__ project1st(const project1st&) {} + }; + + template struct project2nd : binary_function + { + __device__ __forceinline__ typename TypeTraits::ParameterType operator()(typename TypeTraits::ParameterType lhs, typename TypeTraits::ParameterType rhs) const + { + return rhs; + } + __host__ __device__ __forceinline__ project2nd() {} + __host__ __device__ __forceinline__ project2nd(const project2nd&) {} + }; + + // Min/Max Operations + +#define OPENCV_GPU_IMPLEMENT_MINMAX(name, type, op) \ + template <> struct name : binary_function \ + { \ + __device__ __forceinline__ type operator()(type lhs, type rhs) const {return op(lhs, rhs);} \ + __host__ __device__ __forceinline__ name() {}\ + __host__ __device__ __forceinline__ name(const name&) {}\ + }; + + template struct maximum : binary_function + { + __device__ __forceinline__ T operator()(typename TypeTraits::ParameterType lhs, typename TypeTraits::ParameterType rhs) const + { + return max(lhs, rhs); + } + __host__ __device__ __forceinline__ maximum() {} + __host__ __device__ __forceinline__ maximum(const maximum&) {} + }; + + OPENCV_GPU_IMPLEMENT_MINMAX(maximum, uchar, ::max) + OPENCV_GPU_IMPLEMENT_MINMAX(maximum, schar, ::max) + OPENCV_GPU_IMPLEMENT_MINMAX(maximum, char, ::max) + OPENCV_GPU_IMPLEMENT_MINMAX(maximum, ushort, ::max) + OPENCV_GPU_IMPLEMENT_MINMAX(maximum, short, ::max) + OPENCV_GPU_IMPLEMENT_MINMAX(maximum, int, ::max) + OPENCV_GPU_IMPLEMENT_MINMAX(maximum, uint, ::max) + OPENCV_GPU_IMPLEMENT_MINMAX(maximum, float, ::fmax) + OPENCV_GPU_IMPLEMENT_MINMAX(maximum, double, ::fmax) + + template struct minimum : binary_function + { + __device__ __forceinline__ T operator()(typename TypeTraits::ParameterType lhs, typename TypeTraits::ParameterType rhs) const + { + return min(lhs, rhs); + } + __host__ __device__ __forceinline__ minimum() {} + __host__ __device__ __forceinline__ minimum(const minimum&) {} + }; + + OPENCV_GPU_IMPLEMENT_MINMAX(minimum, uchar, ::min) + OPENCV_GPU_IMPLEMENT_MINMAX(minimum, schar, ::min) + OPENCV_GPU_IMPLEMENT_MINMAX(minimum, char, ::min) + OPENCV_GPU_IMPLEMENT_MINMAX(minimum, ushort, ::min) + OPENCV_GPU_IMPLEMENT_MINMAX(minimum, short, ::min) + OPENCV_GPU_IMPLEMENT_MINMAX(minimum, int, ::min) + OPENCV_GPU_IMPLEMENT_MINMAX(minimum, uint, ::min) + OPENCV_GPU_IMPLEMENT_MINMAX(minimum, float, ::fmin) + OPENCV_GPU_IMPLEMENT_MINMAX(minimum, double, ::fmin) + +#undef OPENCV_GPU_IMPLEMENT_MINMAX + + // Math functions + + template struct abs_func : unary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType x) const + { + return abs(x); + } + + __host__ __device__ __forceinline__ abs_func() {} + __host__ __device__ __forceinline__ abs_func(const abs_func&) {} + }; + template <> struct abs_func : unary_function + { + __device__ __forceinline__ unsigned char operator ()(unsigned char x) const + { + return x; + } + + __host__ __device__ __forceinline__ abs_func() {} + __host__ __device__ __forceinline__ abs_func(const abs_func&) {} + }; + template <> struct abs_func : unary_function + { + __device__ __forceinline__ signed char operator ()(signed char x) const + { + return ::abs((int)x); + } + + __host__ __device__ __forceinline__ abs_func() {} + __host__ __device__ __forceinline__ abs_func(const abs_func&) {} + }; + template <> struct abs_func : unary_function + { + __device__ __forceinline__ char operator ()(char x) const + { + return ::abs((int)x); + } + + __host__ __device__ __forceinline__ abs_func() {} + __host__ __device__ __forceinline__ abs_func(const abs_func&) {} + }; + template <> struct abs_func : unary_function + { + __device__ __forceinline__ unsigned short operator ()(unsigned short x) const + { + return x; + } + + __host__ __device__ __forceinline__ abs_func() {} + __host__ __device__ __forceinline__ abs_func(const abs_func&) {} + }; + template <> struct abs_func : unary_function + { + __device__ __forceinline__ short operator ()(short x) const + { + return ::abs((int)x); + } + + __host__ __device__ __forceinline__ abs_func() {} + __host__ __device__ __forceinline__ abs_func(const abs_func&) {} + }; + template <> struct abs_func : unary_function + { + __device__ __forceinline__ unsigned int operator ()(unsigned int x) const + { + return x; + } + + __host__ __device__ __forceinline__ abs_func() {} + __host__ __device__ __forceinline__ abs_func(const abs_func&) {} + }; + template <> struct abs_func : unary_function + { + __device__ __forceinline__ int operator ()(int x) const + { + return ::abs(x); + } + + __host__ __device__ __forceinline__ abs_func() {} + __host__ __device__ __forceinline__ abs_func(const abs_func&) {} + }; + template <> struct abs_func : unary_function + { + __device__ __forceinline__ float operator ()(float x) const + { + return ::fabsf(x); + } + + __host__ __device__ __forceinline__ abs_func() {} + __host__ __device__ __forceinline__ abs_func(const abs_func&) {} + }; + template <> struct abs_func : unary_function + { + __device__ __forceinline__ double operator ()(double x) const + { + return ::fabs(x); + } + + __host__ __device__ __forceinline__ abs_func() {} + __host__ __device__ __forceinline__ abs_func(const abs_func&) {} + }; + +#define OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(name, func) \ + template struct name ## _func : unary_function \ + { \ + __device__ __forceinline__ float operator ()(typename TypeTraits::ParameterType v) const \ + { \ + return func ## f(v); \ + } \ + __host__ __device__ __forceinline__ name ## _func() {} \ + __host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \ + }; \ + template <> struct name ## _func : unary_function \ + { \ + __device__ __forceinline__ double operator ()(double v) const \ + { \ + return func(v); \ + } \ + __host__ __device__ __forceinline__ name ## _func() {} \ + __host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \ + }; + +#define OPENCV_GPU_IMPLEMENT_BIN_FUNCTOR(name, func) \ + template struct name ## _func : binary_function \ + { \ + __device__ __forceinline__ float operator ()(typename TypeTraits::ParameterType v1, typename TypeTraits::ParameterType v2) const \ + { \ + return func ## f(v1, v2); \ + } \ + __host__ __device__ __forceinline__ name ## _func() {} \ + __host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \ + }; \ + template <> struct name ## _func : binary_function \ + { \ + __device__ __forceinline__ double operator ()(double v1, double v2) const \ + { \ + return func(v1, v2); \ + } \ + __host__ __device__ __forceinline__ name ## _func() {} \ + __host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \ + }; + + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(sqrt, ::sqrt) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(exp, ::exp) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(exp2, ::exp2) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(exp10, ::exp10) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(log, ::log) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(log2, ::log2) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(log10, ::log10) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(sin, ::sin) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(cos, ::cos) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(tan, ::tan) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(asin, ::asin) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(acos, ::acos) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(atan, ::atan) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(sinh, ::sinh) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(cosh, ::cosh) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(tanh, ::tanh) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(asinh, ::asinh) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(acosh, ::acosh) + OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(atanh, ::atanh) + + OPENCV_GPU_IMPLEMENT_BIN_FUNCTOR(hypot, ::hypot) + OPENCV_GPU_IMPLEMENT_BIN_FUNCTOR(atan2, ::atan2) + OPENCV_GPU_IMPLEMENT_BIN_FUNCTOR(pow, ::pow) + + #undef OPENCV_GPU_IMPLEMENT_UN_FUNCTOR + #undef OPENCV_GPU_IMPLEMENT_UN_FUNCTOR_NO_DOUBLE + #undef OPENCV_GPU_IMPLEMENT_BIN_FUNCTOR + + template struct hypot_sqr_func : binary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType src1, typename TypeTraits::ParameterType src2) const + { + return src1 * src1 + src2 * src2; + } + __host__ __device__ __forceinline__ hypot_sqr_func() {} + __host__ __device__ __forceinline__ hypot_sqr_func(const hypot_sqr_func&) {} + }; + + // Saturate Cast Functor + template struct saturate_cast_func : unary_function + { + __device__ __forceinline__ D operator ()(typename TypeTraits::ParameterType v) const + { + return saturate_cast(v); + } + __host__ __device__ __forceinline__ saturate_cast_func() {} + __host__ __device__ __forceinline__ saturate_cast_func(const saturate_cast_func&) {} + }; + + // Threshold Functors + template struct thresh_binary_func : unary_function + { + __host__ __device__ __forceinline__ thresh_binary_func(T thresh_, T maxVal_) : thresh(thresh_), maxVal(maxVal_) {} + + __device__ __forceinline__ T operator()(typename TypeTraits::ParameterType src) const + { + return (src > thresh) * maxVal; + } + + __host__ __device__ __forceinline__ thresh_binary_func() {} + __host__ __device__ __forceinline__ thresh_binary_func(const thresh_binary_func& other) + : thresh(other.thresh), maxVal(other.maxVal) {} + + const T thresh; + const T maxVal; + }; + + template struct thresh_binary_inv_func : unary_function + { + __host__ __device__ __forceinline__ thresh_binary_inv_func(T thresh_, T maxVal_) : thresh(thresh_), maxVal(maxVal_) {} + + __device__ __forceinline__ T operator()(typename TypeTraits::ParameterType src) const + { + return (src <= thresh) * maxVal; + } + + __host__ __device__ __forceinline__ thresh_binary_inv_func() {} + __host__ __device__ __forceinline__ thresh_binary_inv_func(const thresh_binary_inv_func& other) + : thresh(other.thresh), maxVal(other.maxVal) {} + + const T thresh; + const T maxVal; + }; + + template struct thresh_trunc_func : unary_function + { + explicit __host__ __device__ __forceinline__ thresh_trunc_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {(void)maxVal_;} + + __device__ __forceinline__ T operator()(typename TypeTraits::ParameterType src) const + { + return minimum()(src, thresh); + } + + __host__ __device__ __forceinline__ thresh_trunc_func() {} + __host__ __device__ __forceinline__ thresh_trunc_func(const thresh_trunc_func& other) + : thresh(other.thresh) {} + + const T thresh; + }; + + template struct thresh_to_zero_func : unary_function + { + explicit __host__ __device__ __forceinline__ thresh_to_zero_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {(void)maxVal_;} + + __device__ __forceinline__ T operator()(typename TypeTraits::ParameterType src) const + { + return (src > thresh) * src; + } + + __host__ __device__ __forceinline__ thresh_to_zero_func() {} + __host__ __device__ __forceinline__ thresh_to_zero_func(const thresh_to_zero_func& other) + : thresh(other.thresh) {} + + const T thresh; + }; + + template struct thresh_to_zero_inv_func : unary_function + { + explicit __host__ __device__ __forceinline__ thresh_to_zero_inv_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {(void)maxVal_;} + + __device__ __forceinline__ T operator()(typename TypeTraits::ParameterType src) const + { + return (src <= thresh) * src; + } + + __host__ __device__ __forceinline__ thresh_to_zero_inv_func() {} + __host__ __device__ __forceinline__ thresh_to_zero_inv_func(const thresh_to_zero_inv_func& other) + : thresh(other.thresh) {} + + const T thresh; + }; + + // Function Object Adaptors + template struct unary_negate : unary_function + { + explicit __host__ __device__ __forceinline__ unary_negate(const Predicate& p) : pred(p) {} + + __device__ __forceinline__ bool operator()(typename TypeTraits::ParameterType x) const + { + return !pred(x); + } + + __host__ __device__ __forceinline__ unary_negate() {} + __host__ __device__ __forceinline__ unary_negate(const unary_negate& other) : pred(other.pred) {} + + const Predicate pred; + }; + + template __host__ __device__ __forceinline__ unary_negate not1(const Predicate& pred) + { + return unary_negate(pred); + } + + template struct binary_negate : binary_function + { + explicit __host__ __device__ __forceinline__ binary_negate(const Predicate& p) : pred(p) {} + + __device__ __forceinline__ bool operator()(typename TypeTraits::ParameterType x, + typename TypeTraits::ParameterType y) const + { + return !pred(x,y); + } + + __host__ __device__ __forceinline__ binary_negate() {} + __host__ __device__ __forceinline__ binary_negate(const binary_negate& other) : pred(other.pred) {} + + const Predicate pred; + }; + + template __host__ __device__ __forceinline__ binary_negate not2(const BinaryPredicate& pred) + { + return binary_negate(pred); + } + + template struct binder1st : unary_function + { + __host__ __device__ __forceinline__ binder1st(const Op& op_, const typename Op::first_argument_type& arg1_) : op(op_), arg1(arg1_) {} + + __device__ __forceinline__ typename Op::result_type operator ()(typename TypeTraits::ParameterType a) const + { + return op(arg1, a); + } + + __host__ __device__ __forceinline__ binder1st() {} + __host__ __device__ __forceinline__ binder1st(const binder1st& other) : op(other.op), arg1(other.arg1) {} + + const Op op; + const typename Op::first_argument_type arg1; + }; + + template __host__ __device__ __forceinline__ binder1st bind1st(const Op& op, const T& x) + { + return binder1st(op, typename Op::first_argument_type(x)); + } + + template struct binder2nd : unary_function + { + __host__ __device__ __forceinline__ binder2nd(const Op& op_, const typename Op::second_argument_type& arg2_) : op(op_), arg2(arg2_) {} + + __forceinline__ __device__ typename Op::result_type operator ()(typename TypeTraits::ParameterType a) const + { + return op(a, arg2); + } + + __host__ __device__ __forceinline__ binder2nd() {} + __host__ __device__ __forceinline__ binder2nd(const binder2nd& other) : op(other.op), arg2(other.arg2) {} + + const Op op; + const typename Op::second_argument_type arg2; + }; + + template __host__ __device__ __forceinline__ binder2nd bind2nd(const Op& op, const T& x) + { + return binder2nd(op, typename Op::second_argument_type(x)); + } + + // Functor Traits + template struct IsUnaryFunction + { + typedef char Yes; + struct No {Yes a[2];}; + + template static Yes check(unary_function); + static No check(...); + + static F makeF(); + + enum { value = (sizeof(check(makeF())) == sizeof(Yes)) }; + }; + + template struct IsBinaryFunction + { + typedef char Yes; + struct No {Yes a[2];}; + + template static Yes check(binary_function); + static No check(...); + + static F makeF(); + + enum { value = (sizeof(check(makeF())) == sizeof(Yes)) }; + }; + + namespace functional_detail + { + template struct UnOpShift { enum { shift = 1 }; }; + template struct UnOpShift { enum { shift = 4 }; }; + template struct UnOpShift { enum { shift = 2 }; }; + + template struct DefaultUnaryShift + { + enum { shift = UnOpShift::shift }; + }; + + template struct BinOpShift { enum { shift = 1 }; }; + template struct BinOpShift { enum { shift = 4 }; }; + template struct BinOpShift { enum { shift = 2 }; }; + + template struct DefaultBinaryShift + { + enum { shift = BinOpShift::shift }; + }; + + template ::value> struct ShiftDispatcher; + template struct ShiftDispatcher + { + enum { shift = DefaultUnaryShift::shift }; + }; + template struct ShiftDispatcher + { + enum { shift = DefaultBinaryShift::shift }; + }; + } + + template struct DefaultTransformShift + { + enum { shift = functional_detail::ShiftDispatcher::shift }; + }; + + template struct DefaultTransformFunctorTraits + { + enum { simple_block_dim_x = 16 }; + enum { simple_block_dim_y = 16 }; + + enum { smart_block_dim_x = 16 }; + enum { smart_block_dim_y = 16 }; + enum { smart_shift = DefaultTransformShift::shift }; + }; + + template struct TransformFunctorTraits : DefaultTransformFunctorTraits {}; + +#define OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(type) \ + template <> struct TransformFunctorTraits< type > : DefaultTransformFunctorTraits< type > +}}} // namespace cv { namespace gpu { namespace device + +#endif // __OPENCV_GPU_FUNCTIONAL_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/limits.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/limits.hpp new file mode 100644 index 0000000..5959780 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/limits.hpp @@ -0,0 +1,122 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_LIMITS_GPU_HPP__ +#define __OPENCV_GPU_LIMITS_GPU_HPP__ + +#include +#include +#include "common.hpp" + +namespace cv { namespace gpu { namespace device +{ + +template struct numeric_limits; + +template <> struct numeric_limits +{ + __device__ __forceinline__ static bool min() { return false; } + __device__ __forceinline__ static bool max() { return true; } + static const bool is_signed = false; +}; + +template <> struct numeric_limits +{ + __device__ __forceinline__ static signed char min() { return SCHAR_MIN; } + __device__ __forceinline__ static signed char max() { return SCHAR_MAX; } + static const bool is_signed = true; +}; + +template <> struct numeric_limits +{ + __device__ __forceinline__ static unsigned char min() { return 0; } + __device__ __forceinline__ static unsigned char max() { return UCHAR_MAX; } + static const bool is_signed = false; +}; + +template <> struct numeric_limits +{ + __device__ __forceinline__ static short min() { return SHRT_MIN; } + __device__ __forceinline__ static short max() { return SHRT_MAX; } + static const bool is_signed = true; +}; + +template <> struct numeric_limits +{ + __device__ __forceinline__ static unsigned short min() { return 0; } + __device__ __forceinline__ static unsigned short max() { return USHRT_MAX; } + static const bool is_signed = false; +}; + +template <> struct numeric_limits +{ + __device__ __forceinline__ static int min() { return INT_MIN; } + __device__ __forceinline__ static int max() { return INT_MAX; } + static const bool is_signed = true; +}; + +template <> struct numeric_limits +{ + __device__ __forceinline__ static unsigned int min() { return 0; } + __device__ __forceinline__ static unsigned int max() { return UINT_MAX; } + static const bool is_signed = false; +}; + +template <> struct numeric_limits +{ + __device__ __forceinline__ static float min() { return FLT_MIN; } + __device__ __forceinline__ static float max() { return FLT_MAX; } + __device__ __forceinline__ static float epsilon() { return FLT_EPSILON; } + static const bool is_signed = true; +}; + +template <> struct numeric_limits +{ + __device__ __forceinline__ static double min() { return DBL_MIN; } + __device__ __forceinline__ static double max() { return DBL_MAX; } + __device__ __forceinline__ static double epsilon() { return DBL_EPSILON; } + static const bool is_signed = true; +}; + +}}} // namespace cv { namespace gpu { namespace device { + +#endif // __OPENCV_GPU_LIMITS_GPU_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/reduce.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/reduce.hpp new file mode 100644 index 0000000..2161b06 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/reduce.hpp @@ -0,0 +1,197 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_REDUCE_HPP__ +#define __OPENCV_GPU_REDUCE_HPP__ + +#include +#include "detail/reduce.hpp" +#include "detail/reduce_key_val.hpp" + +namespace cv { namespace gpu { namespace device +{ + template + __device__ __forceinline__ void reduce(volatile T* smem, T& val, unsigned int tid, const Op& op) + { + reduce_detail::Dispatcher::reductor::template reduce(smem, val, tid, op); + } + template + __device__ __forceinline__ void reduce(const thrust::tuple& smem, + const thrust::tuple& val, + unsigned int tid, + const thrust::tuple& op) + { + reduce_detail::Dispatcher::reductor::template reduce< + const thrust::tuple&, + const thrust::tuple&, + const thrust::tuple&>(smem, val, tid, op); + } + + template + __device__ __forceinline__ void reduceKeyVal(volatile K* skeys, K& key, volatile V* svals, V& val, unsigned int tid, const Cmp& cmp) + { + reduce_key_val_detail::Dispatcher::reductor::template reduce(skeys, key, svals, val, tid, cmp); + } + template + __device__ __forceinline__ void reduceKeyVal(volatile K* skeys, K& key, + const thrust::tuple& svals, + const thrust::tuple& val, + unsigned int tid, const Cmp& cmp) + { + reduce_key_val_detail::Dispatcher::reductor::template reduce&, + const thrust::tuple&, + const Cmp&>(skeys, key, svals, val, tid, cmp); + } + template + __device__ __forceinline__ void reduceKeyVal(const thrust::tuple& skeys, + const thrust::tuple& key, + const thrust::tuple& svals, + const thrust::tuple& val, + unsigned int tid, + const thrust::tuple& cmp) + { + reduce_key_val_detail::Dispatcher::reductor::template reduce< + const thrust::tuple&, + const thrust::tuple&, + const thrust::tuple&, + const thrust::tuple&, + const thrust::tuple& + >(skeys, key, svals, val, tid, cmp); + } + + // smem_tuple + + template + __device__ __forceinline__ + thrust::tuple + smem_tuple(T0* t0) + { + return thrust::make_tuple((volatile T0*) t0); + } + + template + __device__ __forceinline__ + thrust::tuple + smem_tuple(T0* t0, T1* t1) + { + return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1); + } + + template + __device__ __forceinline__ + thrust::tuple + smem_tuple(T0* t0, T1* t1, T2* t2) + { + return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2); + } + + template + __device__ __forceinline__ + thrust::tuple + smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3) + { + return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3); + } + + template + __device__ __forceinline__ + thrust::tuple + smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4) + { + return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4); + } + + template + __device__ __forceinline__ + thrust::tuple + smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5) + { + return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5); + } + + template + __device__ __forceinline__ + thrust::tuple + smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5, T6* t6) + { + return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5, (volatile T6*) t6); + } + + template + __device__ __forceinline__ + thrust::tuple + smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5, T6* t6, T7* t7) + { + return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5, (volatile T6*) t6, (volatile T7*) t7); + } + + template + __device__ __forceinline__ + thrust::tuple + smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5, T6* t6, T7* t7, T8* t8) + { + return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5, (volatile T6*) t6, (volatile T7*) t7, (volatile T8*) t8); + } + + template + __device__ __forceinline__ + thrust::tuple + smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5, T6* t6, T7* t7, T8* t8, T9* t9) + { + return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5, (volatile T6*) t6, (volatile T7*) t7, (volatile T8*) t8, (volatile T9*) t9); + } +}}} + +#endif // __OPENCV_GPU_UTILITY_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/saturate_cast.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/saturate_cast.hpp new file mode 100644 index 0000000..7a2799f --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/saturate_cast.hpp @@ -0,0 +1,284 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_SATURATE_CAST_HPP__ +#define __OPENCV_GPU_SATURATE_CAST_HPP__ + +#include "common.hpp" + +namespace cv { namespace gpu { namespace device +{ + template __device__ __forceinline__ _Tp saturate_cast(uchar v) { return _Tp(v); } + template __device__ __forceinline__ _Tp saturate_cast(schar v) { return _Tp(v); } + template __device__ __forceinline__ _Tp saturate_cast(ushort v) { return _Tp(v); } + template __device__ __forceinline__ _Tp saturate_cast(short v) { return _Tp(v); } + template __device__ __forceinline__ _Tp saturate_cast(uint v) { return _Tp(v); } + template __device__ __forceinline__ _Tp saturate_cast(int v) { return _Tp(v); } + template __device__ __forceinline__ _Tp saturate_cast(float v) { return _Tp(v); } + template __device__ __forceinline__ _Tp saturate_cast(double v) { return _Tp(v); } + + template<> __device__ __forceinline__ uchar saturate_cast(schar v) + { + uint res = 0; + int vi = v; + asm("cvt.sat.u8.s8 %0, %1;" : "=r"(res) : "r"(vi)); + return res; + } + template<> __device__ __forceinline__ uchar saturate_cast(short v) + { + uint res = 0; + asm("cvt.sat.u8.s16 %0, %1;" : "=r"(res) : "h"(v)); + return res; + } + template<> __device__ __forceinline__ uchar saturate_cast(ushort v) + { + uint res = 0; + asm("cvt.sat.u8.u16 %0, %1;" : "=r"(res) : "h"(v)); + return res; + } + template<> __device__ __forceinline__ uchar saturate_cast(int v) + { + uint res = 0; + asm("cvt.sat.u8.s32 %0, %1;" : "=r"(res) : "r"(v)); + return res; + } + template<> __device__ __forceinline__ uchar saturate_cast(uint v) + { + uint res = 0; + asm("cvt.sat.u8.u32 %0, %1;" : "=r"(res) : "r"(v)); + return res; + } + template<> __device__ __forceinline__ uchar saturate_cast(float v) + { + uint res = 0; + asm("cvt.rni.sat.u8.f32 %0, %1;" : "=r"(res) : "f"(v)); + return res; + } + template<> __device__ __forceinline__ uchar saturate_cast(double v) + { + #if __CUDA_ARCH__ >= 130 + uint res = 0; + asm("cvt.rni.sat.u8.f64 %0, %1;" : "=r"(res) : "d"(v)); + return res; + #else + return saturate_cast((float)v); + #endif + } + + template<> __device__ __forceinline__ schar saturate_cast(uchar v) + { + uint res = 0; + uint vi = v; + asm("cvt.sat.s8.u8 %0, %1;" : "=r"(res) : "r"(vi)); + return res; + } + template<> __device__ __forceinline__ schar saturate_cast(short v) + { + uint res = 0; + asm("cvt.sat.s8.s16 %0, %1;" : "=r"(res) : "h"(v)); + return res; + } + template<> __device__ __forceinline__ schar saturate_cast(ushort v) + { + uint res = 0; + asm("cvt.sat.s8.u16 %0, %1;" : "=r"(res) : "h"(v)); + return res; + } + template<> __device__ __forceinline__ schar saturate_cast(int v) + { + uint res = 0; + asm("cvt.sat.s8.s32 %0, %1;" : "=r"(res) : "r"(v)); + return res; + } + template<> __device__ __forceinline__ schar saturate_cast(uint v) + { + uint res = 0; + asm("cvt.sat.s8.u32 %0, %1;" : "=r"(res) : "r"(v)); + return res; + } + template<> __device__ __forceinline__ schar saturate_cast(float v) + { + uint res = 0; + asm("cvt.rni.sat.s8.f32 %0, %1;" : "=r"(res) : "f"(v)); + return res; + } + template<> __device__ __forceinline__ schar saturate_cast(double v) + { + #if __CUDA_ARCH__ >= 130 + uint res = 0; + asm("cvt.rni.sat.s8.f64 %0, %1;" : "=r"(res) : "d"(v)); + return res; + #else + return saturate_cast((float)v); + #endif + } + + template<> __device__ __forceinline__ ushort saturate_cast(schar v) + { + ushort res = 0; + int vi = v; + asm("cvt.sat.u16.s8 %0, %1;" : "=h"(res) : "r"(vi)); + return res; + } + template<> __device__ __forceinline__ ushort saturate_cast(short v) + { + ushort res = 0; + asm("cvt.sat.u16.s16 %0, %1;" : "=h"(res) : "h"(v)); + return res; + } + template<> __device__ __forceinline__ ushort saturate_cast(int v) + { + ushort res = 0; + asm("cvt.sat.u16.s32 %0, %1;" : "=h"(res) : "r"(v)); + return res; + } + template<> __device__ __forceinline__ ushort saturate_cast(uint v) + { + ushort res = 0; + asm("cvt.sat.u16.u32 %0, %1;" : "=h"(res) : "r"(v)); + return res; + } + template<> __device__ __forceinline__ ushort saturate_cast(float v) + { + ushort res = 0; + asm("cvt.rni.sat.u16.f32 %0, %1;" : "=h"(res) : "f"(v)); + return res; + } + template<> __device__ __forceinline__ ushort saturate_cast(double v) + { + #if __CUDA_ARCH__ >= 130 + ushort res = 0; + asm("cvt.rni.sat.u16.f64 %0, %1;" : "=h"(res) : "d"(v)); + return res; + #else + return saturate_cast((float)v); + #endif + } + + template<> __device__ __forceinline__ short saturate_cast(ushort v) + { + short res = 0; + asm("cvt.sat.s16.u16 %0, %1;" : "=h"(res) : "h"(v)); + return res; + } + template<> __device__ __forceinline__ short saturate_cast(int v) + { + short res = 0; + asm("cvt.sat.s16.s32 %0, %1;" : "=h"(res) : "r"(v)); + return res; + } + template<> __device__ __forceinline__ short saturate_cast(uint v) + { + short res = 0; + asm("cvt.sat.s16.u32 %0, %1;" : "=h"(res) : "r"(v)); + return res; + } + template<> __device__ __forceinline__ short saturate_cast(float v) + { + short res = 0; + asm("cvt.rni.sat.s16.f32 %0, %1;" : "=h"(res) : "f"(v)); + return res; + } + template<> __device__ __forceinline__ short saturate_cast(double v) + { + #if __CUDA_ARCH__ >= 130 + short res = 0; + asm("cvt.rni.sat.s16.f64 %0, %1;" : "=h"(res) : "d"(v)); + return res; + #else + return saturate_cast((float)v); + #endif + } + + template<> __device__ __forceinline__ int saturate_cast(uint v) + { + int res = 0; + asm("cvt.sat.s32.u32 %0, %1;" : "=r"(res) : "r"(v)); + return res; + } + template<> __device__ __forceinline__ int saturate_cast(float v) + { + return __float2int_rn(v); + } + template<> __device__ __forceinline__ int saturate_cast(double v) + { + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130 + return __double2int_rn(v); + #else + return saturate_cast((float)v); + #endif + } + + template<> __device__ __forceinline__ uint saturate_cast(schar v) + { + uint res = 0; + int vi = v; + asm("cvt.sat.u32.s8 %0, %1;" : "=r"(res) : "r"(vi)); + return res; + } + template<> __device__ __forceinline__ uint saturate_cast(short v) + { + uint res = 0; + asm("cvt.sat.u32.s16 %0, %1;" : "=r"(res) : "h"(v)); + return res; + } + template<> __device__ __forceinline__ uint saturate_cast(int v) + { + uint res = 0; + asm("cvt.sat.u32.s32 %0, %1;" : "=r"(res) : "r"(v)); + return res; + } + template<> __device__ __forceinline__ uint saturate_cast(float v) + { + return __float2uint_rn(v); + } + template<> __device__ __forceinline__ uint saturate_cast(double v) + { + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130 + return __double2uint_rn(v); + #else + return saturate_cast((float)v); + #endif + } +}}} + +#endif /* __OPENCV_GPU_SATURATE_CAST_HPP__ */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/scan.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/scan.hpp new file mode 100644 index 0000000..3d8da16 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/scan.hpp @@ -0,0 +1,250 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_SCAN_HPP__ +#define __OPENCV_GPU_SCAN_HPP__ + +#include "opencv2/gpu/device/common.hpp" +#include "opencv2/gpu/device/utility.hpp" +#include "opencv2/gpu/device/warp.hpp" +#include "opencv2/gpu/device/warp_shuffle.hpp" + +namespace cv { namespace gpu { namespace device +{ + enum ScanKind { EXCLUSIVE = 0, INCLUSIVE = 1 }; + + template struct WarpScan + { + __device__ __forceinline__ WarpScan() {} + __device__ __forceinline__ WarpScan(const WarpScan& other) { (void)other; } + + __device__ __forceinline__ T operator()( volatile T *ptr , const unsigned int idx) + { + const unsigned int lane = idx & 31; + F op; + + if ( lane >= 1) ptr [idx ] = op(ptr [idx - 1], ptr [idx]); + if ( lane >= 2) ptr [idx ] = op(ptr [idx - 2], ptr [idx]); + if ( lane >= 4) ptr [idx ] = op(ptr [idx - 4], ptr [idx]); + if ( lane >= 8) ptr [idx ] = op(ptr [idx - 8], ptr [idx]); + if ( lane >= 16) ptr [idx ] = op(ptr [idx - 16], ptr [idx]); + + if( Kind == INCLUSIVE ) + return ptr [idx]; + else + return (lane > 0) ? ptr [idx - 1] : 0; + } + + __device__ __forceinline__ unsigned int index(const unsigned int tid) + { + return tid; + } + + __device__ __forceinline__ void init(volatile T *ptr){} + + static const int warp_offset = 0; + + typedef WarpScan merge; + }; + + template struct WarpScanNoComp + { + __device__ __forceinline__ WarpScanNoComp() {} + __device__ __forceinline__ WarpScanNoComp(const WarpScanNoComp& other) { (void)other; } + + __device__ __forceinline__ T operator()( volatile T *ptr , const unsigned int idx) + { + const unsigned int lane = threadIdx.x & 31; + F op; + + ptr [idx ] = op(ptr [idx - 1], ptr [idx]); + ptr [idx ] = op(ptr [idx - 2], ptr [idx]); + ptr [idx ] = op(ptr [idx - 4], ptr [idx]); + ptr [idx ] = op(ptr [idx - 8], ptr [idx]); + ptr [idx ] = op(ptr [idx - 16], ptr [idx]); + + if( Kind == INCLUSIVE ) + return ptr [idx]; + else + return (lane > 0) ? ptr [idx - 1] : 0; + } + + __device__ __forceinline__ unsigned int index(const unsigned int tid) + { + return (tid >> warp_log) * warp_smem_stride + 16 + (tid & warp_mask); + } + + __device__ __forceinline__ void init(volatile T *ptr) + { + ptr[threadIdx.x] = 0; + } + + static const int warp_smem_stride = 32 + 16 + 1; + static const int warp_offset = 16; + static const int warp_log = 5; + static const int warp_mask = 31; + + typedef WarpScanNoComp merge; + }; + + template struct BlockScan + { + __device__ __forceinline__ BlockScan() {} + __device__ __forceinline__ BlockScan(const BlockScan& other) { (void)other; } + + __device__ __forceinline__ T operator()(volatile T *ptr) + { + const unsigned int tid = threadIdx.x; + const unsigned int lane = tid & warp_mask; + const unsigned int warp = tid >> warp_log; + + Sc scan; + typename Sc::merge merge_scan; + const unsigned int idx = scan.index(tid); + + T val = scan(ptr, idx); + __syncthreads (); + + if( warp == 0) + scan.init(ptr); + __syncthreads (); + + if( lane == 31 ) + ptr [scan.warp_offset + warp ] = (Kind == INCLUSIVE) ? val : ptr [idx]; + __syncthreads (); + + if( warp == 0 ) + merge_scan(ptr, idx); + __syncthreads(); + + if ( warp > 0) + val = ptr [scan.warp_offset + warp - 1] + val; + __syncthreads (); + + ptr[idx] = val; + __syncthreads (); + + return val ; + } + + static const int warp_log = 5; + static const int warp_mask = 31; + }; + + template + __device__ T warpScanInclusive(T idata, volatile T* s_Data, unsigned int tid) + { + #if __CUDA_ARCH__ >= 300 + const unsigned int laneId = cv::gpu::device::Warp::laneId(); + + // scan on shuffl functions + #pragma unroll + for (int i = 1; i <= (OPENCV_GPU_WARP_SIZE / 2); i *= 2) + { + const T n = cv::gpu::device::shfl_up(idata, i); + if (laneId >= i) + idata += n; + } + + return idata; + #else + unsigned int pos = 2 * tid - (tid & (OPENCV_GPU_WARP_SIZE - 1)); + s_Data[pos] = 0; + pos += OPENCV_GPU_WARP_SIZE; + s_Data[pos] = idata; + + s_Data[pos] += s_Data[pos - 1]; + s_Data[pos] += s_Data[pos - 2]; + s_Data[pos] += s_Data[pos - 4]; + s_Data[pos] += s_Data[pos - 8]; + s_Data[pos] += s_Data[pos - 16]; + + return s_Data[pos]; + #endif + } + + template + __device__ __forceinline__ T warpScanExclusive(T idata, volatile T* s_Data, unsigned int tid) + { + return warpScanInclusive(idata, s_Data, tid) - idata; + } + + template + __device__ T blockScanInclusive(T idata, volatile T* s_Data, unsigned int tid) + { + if (tiNumScanThreads > OPENCV_GPU_WARP_SIZE) + { + //Bottom-level inclusive warp scan + T warpResult = warpScanInclusive(idata, s_Data, tid); + + //Save top elements of each warp for exclusive warp scan + //sync to wait for warp scans to complete (because s_Data is being overwritten) + __syncthreads(); + if ((tid & (OPENCV_GPU_WARP_SIZE - 1)) == (OPENCV_GPU_WARP_SIZE - 1)) + { + s_Data[tid >> OPENCV_GPU_LOG_WARP_SIZE] = warpResult; + } + + //wait for warp scans to complete + __syncthreads(); + + if (tid < (tiNumScanThreads / OPENCV_GPU_WARP_SIZE) ) + { + //grab top warp elements + T val = s_Data[tid]; + //calculate exclusive scan and write back to shared memory + s_Data[tid] = warpScanExclusive(val, s_Data, tid); + } + + //return updated warp scans with exclusive scan results + __syncthreads(); + + return warpResult + s_Data[tid >> OPENCV_GPU_LOG_WARP_SIZE]; + } + else + { + return warpScanInclusive(idata, s_Data, tid); + } + } +}}} + +#endif // __OPENCV_GPU_SCAN_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/simd_functions.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/simd_functions.hpp new file mode 100644 index 0000000..b0377e5 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/simd_functions.hpp @@ -0,0 +1,909 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/* + * Copyright (c) 2013 NVIDIA Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of NVIDIA Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __OPENCV_GPU_SIMD_FUNCTIONS_HPP__ +#define __OPENCV_GPU_SIMD_FUNCTIONS_HPP__ + +#include "common.hpp" + +/* + This header file contains inline functions that implement intra-word SIMD + operations, that are hardware accelerated on sm_3x (Kepler) GPUs. Efficient + emulation code paths are provided for earlier architectures (sm_1x, sm_2x) + to make the code portable across all GPUs supported by CUDA. The following + functions are currently implemented: + + vadd2(a,b) per-halfword unsigned addition, with wrap-around: a + b + vsub2(a,b) per-halfword unsigned subtraction, with wrap-around: a - b + vabsdiff2(a,b) per-halfword unsigned absolute difference: |a - b| + vavg2(a,b) per-halfword unsigned average: (a + b) / 2 + vavrg2(a,b) per-halfword unsigned rounded average: (a + b + 1) / 2 + vseteq2(a,b) per-halfword unsigned comparison: a == b ? 1 : 0 + vcmpeq2(a,b) per-halfword unsigned comparison: a == b ? 0xffff : 0 + vsetge2(a,b) per-halfword unsigned comparison: a >= b ? 1 : 0 + vcmpge2(a,b) per-halfword unsigned comparison: a >= b ? 0xffff : 0 + vsetgt2(a,b) per-halfword unsigned comparison: a > b ? 1 : 0 + vcmpgt2(a,b) per-halfword unsigned comparison: a > b ? 0xffff : 0 + vsetle2(a,b) per-halfword unsigned comparison: a <= b ? 1 : 0 + vcmple2(a,b) per-halfword unsigned comparison: a <= b ? 0xffff : 0 + vsetlt2(a,b) per-halfword unsigned comparison: a < b ? 1 : 0 + vcmplt2(a,b) per-halfword unsigned comparison: a < b ? 0xffff : 0 + vsetne2(a,b) per-halfword unsigned comparison: a != b ? 1 : 0 + vcmpne2(a,b) per-halfword unsigned comparison: a != b ? 0xffff : 0 + vmax2(a,b) per-halfword unsigned maximum: max(a, b) + vmin2(a,b) per-halfword unsigned minimum: min(a, b) + + vadd4(a,b) per-byte unsigned addition, with wrap-around: a + b + vsub4(a,b) per-byte unsigned subtraction, with wrap-around: a - b + vabsdiff4(a,b) per-byte unsigned absolute difference: |a - b| + vavg4(a,b) per-byte unsigned average: (a + b) / 2 + vavrg4(a,b) per-byte unsigned rounded average: (a + b + 1) / 2 + vseteq4(a,b) per-byte unsigned comparison: a == b ? 1 : 0 + vcmpeq4(a,b) per-byte unsigned comparison: a == b ? 0xff : 0 + vsetge4(a,b) per-byte unsigned comparison: a >= b ? 1 : 0 + vcmpge4(a,b) per-byte unsigned comparison: a >= b ? 0xff : 0 + vsetgt4(a,b) per-byte unsigned comparison: a > b ? 1 : 0 + vcmpgt4(a,b) per-byte unsigned comparison: a > b ? 0xff : 0 + vsetle4(a,b) per-byte unsigned comparison: a <= b ? 1 : 0 + vcmple4(a,b) per-byte unsigned comparison: a <= b ? 0xff : 0 + vsetlt4(a,b) per-byte unsigned comparison: a < b ? 1 : 0 + vcmplt4(a,b) per-byte unsigned comparison: a < b ? 0xff : 0 + vsetne4(a,b) per-byte unsigned comparison: a != b ? 1: 0 + vcmpne4(a,b) per-byte unsigned comparison: a != b ? 0xff: 0 + vmax4(a,b) per-byte unsigned maximum: max(a, b) + vmin4(a,b) per-byte unsigned minimum: min(a, b) +*/ + +namespace cv { namespace gpu { namespace device +{ + // 2 + + static __device__ __forceinline__ unsigned int vadd2(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vadd2.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #elif __CUDA_ARCH__ >= 200 + asm("vadd.u32.u32.u32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vadd.u32.u32.u32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int s; + s = a ^ b; // sum bits + r = a + b; // actual sum + s = s ^ r; // determine carry-ins for each bit position + s = s & 0x00010000; // carry-in to high word (= carry-out from low word) + r = r - s; // subtract out carry-out from low word + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vsub2(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vsub2.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #elif __CUDA_ARCH__ >= 200 + asm("vsub.u32.u32.u32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vsub.u32.u32.u32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int s; + s = a ^ b; // sum bits + r = a - b; // actual sum + s = s ^ r; // determine carry-ins for each bit position + s = s & 0x00010000; // borrow to high word + r = r + s; // compensate for borrow from low word + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vabsdiff2(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vabsdiff2.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #elif __CUDA_ARCH__ >= 200 + asm("vabsdiff.u32.u32.u32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vabsdiff.u32.u32.u32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int s, t, u, v; + s = a & 0x0000ffff; // extract low halfword + r = b & 0x0000ffff; // extract low halfword + u = ::max(r, s); // maximum of low halfwords + v = ::min(r, s); // minimum of low halfwords + s = a & 0xffff0000; // extract high halfword + r = b & 0xffff0000; // extract high halfword + t = ::max(r, s); // maximum of high halfwords + s = ::min(r, s); // minimum of high halfwords + r = u | t; // maximum of both halfwords + s = v | s; // minimum of both halfwords + r = r - s; // |a - b| = max(a,b) - min(a,b); + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vavg2(unsigned int a, unsigned int b) + { + unsigned int r, s; + + // HAKMEM #23: a + b = 2 * (a & b) + (a ^ b) ==> + // (a + b) / 2 = (a & b) + ((a ^ b) >> 1) + s = a ^ b; + r = a & b; + s = s & 0xfffefffe; // ensure shift doesn't cross halfword boundaries + s = s >> 1; + s = r + s; + + return s; + } + + static __device__ __forceinline__ unsigned int vavrg2(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vavrg2.u32.u32.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + // HAKMEM #23: a + b = 2 * (a | b) - (a ^ b) ==> + // (a + b + 1) / 2 = (a | b) - ((a ^ b) >> 1) + unsigned int s; + s = a ^ b; + r = a | b; + s = s & 0xfffefffe; // ensure shift doesn't cross half-word boundaries + s = s >> 1; + r = r - s; + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vseteq2(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vset2.u32.u32.eq %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + // inspired by Alan Mycroft's null-byte detection algorithm: + // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080)) + unsigned int c; + r = a ^ b; // 0x0000 if a == b + c = r | 0x80008000; // set msbs, to catch carry out + r = r ^ c; // extract msbs, msb = 1 if r < 0x8000 + c = c - 0x00010001; // msb = 0, if r was 0x0000 or 0x8000 + c = r & ~c; // msb = 1, if r was 0x0000 + r = c >> 15; // convert to bool + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vcmpeq2(unsigned int a, unsigned int b) + { + unsigned int r, c; + + #if __CUDA_ARCH__ >= 300 + r = vseteq2(a, b); + c = r << 16; // convert bool + r = c - r; // into mask + #else + // inspired by Alan Mycroft's null-byte detection algorithm: + // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080)) + r = a ^ b; // 0x0000 if a == b + c = r | 0x80008000; // set msbs, to catch carry out + r = r ^ c; // extract msbs, msb = 1 if r < 0x8000 + c = c - 0x00010001; // msb = 0, if r was 0x0000 or 0x8000 + c = r & ~c; // msb = 1, if r was 0x0000 + r = c >> 15; // convert + r = c - r; // msbs to + r = c | r; // mask + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vsetge2(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vset2.u32.u32.ge %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int c; + asm("not.b32 %0, %0;" : "+r"(b)); + c = vavrg2(a, b); // (a + ~b + 1) / 2 = (a - b) / 2 + c = c & 0x80008000; // msb = carry-outs + r = c >> 15; // convert to bool + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vcmpge2(unsigned int a, unsigned int b) + { + unsigned int r, c; + + #if __CUDA_ARCH__ >= 300 + r = vsetge2(a, b); + c = r << 16; // convert bool + r = c - r; // into mask + #else + asm("not.b32 %0, %0;" : "+r"(b)); + c = vavrg2(a, b); // (a + ~b + 1) / 2 = (a - b) / 2 + c = c & 0x80008000; // msb = carry-outs + r = c >> 15; // convert + r = c - r; // msbs to + r = c | r; // mask + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vsetgt2(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vset2.u32.u32.gt %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int c; + asm("not.b32 %0, %0;" : "+r"(b)); + c = vavg2(a, b); // (a + ~b) / 2 = (a - b) / 2 [rounded down] + c = c & 0x80008000; // msbs = carry-outs + r = c >> 15; // convert to bool + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vcmpgt2(unsigned int a, unsigned int b) + { + unsigned int r, c; + + #if __CUDA_ARCH__ >= 300 + r = vsetgt2(a, b); + c = r << 16; // convert bool + r = c - r; // into mask + #else + asm("not.b32 %0, %0;" : "+r"(b)); + c = vavg2(a, b); // (a + ~b) / 2 = (a - b) / 2 [rounded down] + c = c & 0x80008000; // msbs = carry-outs + r = c >> 15; // convert + r = c - r; // msbs to + r = c | r; // mask + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vsetle2(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vset2.u32.u32.le %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int c; + asm("not.b32 %0, %0;" : "+r"(a)); + c = vavrg2(a, b); // (b + ~a + 1) / 2 = (b - a) / 2 + c = c & 0x80008000; // msb = carry-outs + r = c >> 15; // convert to bool + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vcmple2(unsigned int a, unsigned int b) + { + unsigned int r, c; + + #if __CUDA_ARCH__ >= 300 + r = vsetle2(a, b); + c = r << 16; // convert bool + r = c - r; // into mask + #else + asm("not.b32 %0, %0;" : "+r"(a)); + c = vavrg2(a, b); // (b + ~a + 1) / 2 = (b - a) / 2 + c = c & 0x80008000; // msb = carry-outs + r = c >> 15; // convert + r = c - r; // msbs to + r = c | r; // mask + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vsetlt2(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vset2.u32.u32.lt %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int c; + asm("not.b32 %0, %0;" : "+r"(a)); + c = vavg2(a, b); // (b + ~a) / 2 = (b - a) / 2 [rounded down] + c = c & 0x80008000; // msb = carry-outs + r = c >> 15; // convert to bool + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vcmplt2(unsigned int a, unsigned int b) + { + unsigned int r, c; + + #if __CUDA_ARCH__ >= 300 + r = vsetlt2(a, b); + c = r << 16; // convert bool + r = c - r; // into mask + #else + asm("not.b32 %0, %0;" : "+r"(a)); + c = vavg2(a, b); // (b + ~a) / 2 = (b - a) / 2 [rounded down] + c = c & 0x80008000; // msb = carry-outs + r = c >> 15; // convert + r = c - r; // msbs to + r = c | r; // mask + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vsetne2(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm ("vset2.u32.u32.ne %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + // inspired by Alan Mycroft's null-byte detection algorithm: + // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080)) + unsigned int c; + r = a ^ b; // 0x0000 if a == b + c = r | 0x80008000; // set msbs, to catch carry out + c = c - 0x00010001; // msb = 0, if r was 0x0000 or 0x8000 + c = r | c; // msb = 1, if r was not 0x0000 + c = c & 0x80008000; // extract msbs + r = c >> 15; // convert to bool + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vcmpne2(unsigned int a, unsigned int b) + { + unsigned int r, c; + + #if __CUDA_ARCH__ >= 300 + r = vsetne2(a, b); + c = r << 16; // convert bool + r = c - r; // into mask + #else + // inspired by Alan Mycroft's null-byte detection algorithm: + // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080)) + r = a ^ b; // 0x0000 if a == b + c = r | 0x80008000; // set msbs, to catch carry out + c = c - 0x00010001; // msb = 0, if r was 0x0000 or 0x8000 + c = r | c; // msb = 1, if r was not 0x0000 + c = c & 0x80008000; // extract msbs + r = c >> 15; // convert + r = c - r; // msbs to + r = c | r; // mask + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vmax2(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vmax2.u32.u32.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #elif __CUDA_ARCH__ >= 200 + asm("vmax.u32.u32.u32 %0.h0, %1.h0, %2.h0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vmax.u32.u32.u32 %0.h1, %1.h1, %2.h1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int s, t, u; + r = a & 0x0000ffff; // extract low halfword + s = b & 0x0000ffff; // extract low halfword + t = ::max(r, s); // maximum of low halfwords + r = a & 0xffff0000; // extract high halfword + s = b & 0xffff0000; // extract high halfword + u = ::max(r, s); // maximum of high halfwords + r = t | u; // combine halfword maximums + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vmin2(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vmin2.u32.u32.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #elif __CUDA_ARCH__ >= 200 + asm("vmin.u32.u32.u32 %0.h0, %1.h0, %2.h0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vmin.u32.u32.u32 %0.h1, %1.h1, %2.h1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int s, t, u; + r = a & 0x0000ffff; // extract low halfword + s = b & 0x0000ffff; // extract low halfword + t = ::min(r, s); // minimum of low halfwords + r = a & 0xffff0000; // extract high halfword + s = b & 0xffff0000; // extract high halfword + u = ::min(r, s); // minimum of high halfwords + r = t | u; // combine halfword minimums + #endif + + return r; + } + + // 4 + + static __device__ __forceinline__ unsigned int vadd4(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vadd4.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #elif __CUDA_ARCH__ >= 200 + asm("vadd.u32.u32.u32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vadd.u32.u32.u32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vadd.u32.u32.u32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vadd.u32.u32.u32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int s, t; + s = a ^ b; // sum bits + r = a & 0x7f7f7f7f; // clear msbs + t = b & 0x7f7f7f7f; // clear msbs + s = s & 0x80808080; // msb sum bits + r = r + t; // add without msbs, record carry-out in msbs + r = r ^ s; // sum of msb sum and carry-in bits, w/o carry-out + #endif /* __CUDA_ARCH__ >= 300 */ + + return r; + } + + static __device__ __forceinline__ unsigned int vsub4(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vsub4.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #elif __CUDA_ARCH__ >= 200 + asm("vsub.u32.u32.u32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vsub.u32.u32.u32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vsub.u32.u32.u32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vsub.u32.u32.u32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int s, t; + s = a ^ ~b; // inverted sum bits + r = a | 0x80808080; // set msbs + t = b & 0x7f7f7f7f; // clear msbs + s = s & 0x80808080; // inverted msb sum bits + r = r - t; // subtract w/o msbs, record inverted borrows in msb + r = r ^ s; // combine inverted msb sum bits and borrows + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vavg4(unsigned int a, unsigned int b) + { + unsigned int r, s; + + // HAKMEM #23: a + b = 2 * (a & b) + (a ^ b) ==> + // (a + b) / 2 = (a & b) + ((a ^ b) >> 1) + s = a ^ b; + r = a & b; + s = s & 0xfefefefe; // ensure following shift doesn't cross byte boundaries + s = s >> 1; + s = r + s; + + return s; + } + + static __device__ __forceinline__ unsigned int vavrg4(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vavrg4.u32.u32.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + // HAKMEM #23: a + b = 2 * (a | b) - (a ^ b) ==> + // (a + b + 1) / 2 = (a | b) - ((a ^ b) >> 1) + unsigned int c; + c = a ^ b; + r = a | b; + c = c & 0xfefefefe; // ensure following shift doesn't cross byte boundaries + c = c >> 1; + r = r - c; + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vseteq4(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vset4.u32.u32.eq %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + // inspired by Alan Mycroft's null-byte detection algorithm: + // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080)) + unsigned int c; + r = a ^ b; // 0x00 if a == b + c = r | 0x80808080; // set msbs, to catch carry out + r = r ^ c; // extract msbs, msb = 1 if r < 0x80 + c = c - 0x01010101; // msb = 0, if r was 0x00 or 0x80 + c = r & ~c; // msb = 1, if r was 0x00 + r = c >> 7; // convert to bool + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vcmpeq4(unsigned int a, unsigned int b) + { + unsigned int r, t; + + #if __CUDA_ARCH__ >= 300 + r = vseteq4(a, b); + t = r << 8; // convert bool + r = t - r; // to mask + #else + // inspired by Alan Mycroft's null-byte detection algorithm: + // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080)) + t = a ^ b; // 0x00 if a == b + r = t | 0x80808080; // set msbs, to catch carry out + t = t ^ r; // extract msbs, msb = 1 if t < 0x80 + r = r - 0x01010101; // msb = 0, if t was 0x00 or 0x80 + r = t & ~r; // msb = 1, if t was 0x00 + t = r >> 7; // build mask + t = r - t; // from + r = t | r; // msbs + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vsetle4(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vset4.u32.u32.le %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int c; + asm("not.b32 %0, %0;" : "+r"(a)); + c = vavrg4(a, b); // (b + ~a + 1) / 2 = (b - a) / 2 + c = c & 0x80808080; // msb = carry-outs + r = c >> 7; // convert to bool + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vcmple4(unsigned int a, unsigned int b) + { + unsigned int r, c; + + #if __CUDA_ARCH__ >= 300 + r = vsetle4(a, b); + c = r << 8; // convert bool + r = c - r; // to mask + #else + asm("not.b32 %0, %0;" : "+r"(a)); + c = vavrg4(a, b); // (b + ~a + 1) / 2 = (b - a) / 2 + c = c & 0x80808080; // msbs = carry-outs + r = c >> 7; // convert + r = c - r; // msbs to + r = c | r; // mask + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vsetlt4(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vset4.u32.u32.lt %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int c; + asm("not.b32 %0, %0;" : "+r"(a)); + c = vavg4(a, b); // (b + ~a) / 2 = (b - a) / 2 [rounded down] + c = c & 0x80808080; // msb = carry-outs + r = c >> 7; // convert to bool + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vcmplt4(unsigned int a, unsigned int b) + { + unsigned int r, c; + + #if __CUDA_ARCH__ >= 300 + r = vsetlt4(a, b); + c = r << 8; // convert bool + r = c - r; // to mask + #else + asm("not.b32 %0, %0;" : "+r"(a)); + c = vavg4(a, b); // (b + ~a) / 2 = (b - a) / 2 [rounded down] + c = c & 0x80808080; // msbs = carry-outs + r = c >> 7; // convert + r = c - r; // msbs to + r = c | r; // mask + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vsetge4(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vset4.u32.u32.ge %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int c; + asm("not.b32 %0, %0;" : "+r"(b)); + c = vavrg4(a, b); // (a + ~b + 1) / 2 = (a - b) / 2 + c = c & 0x80808080; // msb = carry-outs + r = c >> 7; // convert to bool + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vcmpge4(unsigned int a, unsigned int b) + { + unsigned int r, s; + + #if __CUDA_ARCH__ >= 300 + r = vsetge4(a, b); + s = r << 8; // convert bool + r = s - r; // to mask + #else + asm ("not.b32 %0,%0;" : "+r"(b)); + r = vavrg4 (a, b); // (a + ~b + 1) / 2 = (a - b) / 2 + r = r & 0x80808080; // msb = carry-outs + s = r >> 7; // build mask + s = r - s; // from + r = s | r; // msbs + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vsetgt4(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vset4.u32.u32.gt %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int c; + asm("not.b32 %0, %0;" : "+r"(b)); + c = vavg4(a, b); // (a + ~b) / 2 = (a - b) / 2 [rounded down] + c = c & 0x80808080; // msb = carry-outs + r = c >> 7; // convert to bool + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vcmpgt4(unsigned int a, unsigned int b) + { + unsigned int r, c; + + #if __CUDA_ARCH__ >= 300 + r = vsetgt4(a, b); + c = r << 8; // convert bool + r = c - r; // to mask + #else + asm("not.b32 %0, %0;" : "+r"(b)); + c = vavg4(a, b); // (a + ~b) / 2 = (a - b) / 2 [rounded down] + c = c & 0x80808080; // msb = carry-outs + r = c >> 7; // convert + r = c - r; // msbs to + r = c | r; // mask + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vsetne4(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vset4.u32.u32.ne %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + // inspired by Alan Mycroft's null-byte detection algorithm: + // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080)) + unsigned int c; + r = a ^ b; // 0x00 if a == b + c = r | 0x80808080; // set msbs, to catch carry out + c = c - 0x01010101; // msb = 0, if r was 0x00 or 0x80 + c = r | c; // msb = 1, if r was not 0x00 + c = c & 0x80808080; // extract msbs + r = c >> 7; // convert to bool + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vcmpne4(unsigned int a, unsigned int b) + { + unsigned int r, c; + + #if __CUDA_ARCH__ >= 300 + r = vsetne4(a, b); + c = r << 8; // convert bool + r = c - r; // to mask + #else + // inspired by Alan Mycroft's null-byte detection algorithm: + // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080)) + r = a ^ b; // 0x00 if a == b + c = r | 0x80808080; // set msbs, to catch carry out + c = c - 0x01010101; // msb = 0, if r was 0x00 or 0x80 + c = r | c; // msb = 1, if r was not 0x00 + c = c & 0x80808080; // extract msbs + r = c >> 7; // convert + r = c - r; // msbs to + r = c | r; // mask + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vabsdiff4(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vabsdiff4.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #elif __CUDA_ARCH__ >= 200 + asm("vabsdiff.u32.u32.u32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vabsdiff.u32.u32.u32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vabsdiff.u32.u32.u32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vabsdiff.u32.u32.u32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int s; + s = vcmpge4(a, b); // mask = 0xff if a >= b + r = a ^ b; // + s = (r & s) ^ b; // select a when a >= b, else select b => max(a,b) + r = s ^ r; // select a when b >= a, else select b => min(a,b) + r = s - r; // |a - b| = max(a,b) - min(a,b); + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vmax4(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vmax4.u32.u32.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #elif __CUDA_ARCH__ >= 200 + asm("vmax.u32.u32.u32 %0.b0, %1.b0, %2.b0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vmax.u32.u32.u32 %0.b1, %1.b1, %2.b1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vmax.u32.u32.u32 %0.b2, %1.b2, %2.b2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vmax.u32.u32.u32 %0.b3, %1.b3, %2.b3, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int s; + s = vcmpge4(a, b); // mask = 0xff if a >= b + r = a & s; // select a when b >= a + s = b & ~s; // select b when b < a + r = r | s; // combine byte selections + #endif + + return r; // byte-wise unsigned maximum + } + + static __device__ __forceinline__ unsigned int vmin4(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vmin4.u32.u32.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #elif __CUDA_ARCH__ >= 200 + asm("vmin.u32.u32.u32 %0.b0, %1.b0, %2.b0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vmin.u32.u32.u32 %0.b1, %1.b1, %2.b1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vmin.u32.u32.u32 %0.b2, %1.b2, %2.b2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vmin.u32.u32.u32 %0.b3, %1.b3, %2.b3, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int s; + s = vcmpge4(b, a); // mask = 0xff if a >= b + r = a & s; // select a when b >= a + s = b & ~s; // select b when b < a + r = r | s; // combine byte selections + #endif + + return r; + } +}}} + +#endif // __OPENCV_GPU_SIMD_FUNCTIONS_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/static_check.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/static_check.hpp new file mode 100644 index 0000000..e77691b --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/static_check.hpp @@ -0,0 +1,67 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_GPU_DEVICE_STATIC_CHECK_HPP__ +#define __OPENCV_GPU_GPU_DEVICE_STATIC_CHECK_HPP__ + +#if defined(__CUDACC__) + #define __OPENCV_GPU_HOST_DEVICE__ __host__ __device__ __forceinline__ +#else + #define __OPENCV_GPU_HOST_DEVICE__ +#endif + +namespace cv { namespace gpu +{ + namespace device + { + template struct Static {}; + + template<> struct Static + { + __OPENCV_GPU_HOST_DEVICE__ static void check() {}; + }; + } +}} + +#undef __OPENCV_GPU_HOST_DEVICE__ + +#endif /* __OPENCV_GPU_GPU_DEVICE_STATIC_CHECK_HPP__ */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/transform.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/transform.hpp new file mode 100644 index 0000000..636caac --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/transform.hpp @@ -0,0 +1,67 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_TRANSFORM_HPP__ +#define __OPENCV_GPU_TRANSFORM_HPP__ + +#include "common.hpp" +#include "utility.hpp" +#include "detail/transform_detail.hpp" + +namespace cv { namespace gpu { namespace device +{ + template + static inline void transform(PtrStepSz src, PtrStepSz dst, UnOp op, const Mask& mask, cudaStream_t stream) + { + typedef TransformFunctorTraits ft; + transform_detail::TransformDispatcher::cn == 1 && VecTraits::cn == 1 && ft::smart_shift != 1>::call(src, dst, op, mask, stream); + } + + template + static inline void transform(PtrStepSz src1, PtrStepSz src2, PtrStepSz dst, BinOp op, const Mask& mask, cudaStream_t stream) + { + typedef TransformFunctorTraits ft; + transform_detail::TransformDispatcher::cn == 1 && VecTraits::cn == 1 && VecTraits::cn == 1 && ft::smart_shift != 1>::call(src1, src2, dst, op, mask, stream); + } +}}} + +#endif // __OPENCV_GPU_TRANSFORM_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/type_traits.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/type_traits.hpp new file mode 100644 index 0000000..1b36acc --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/type_traits.hpp @@ -0,0 +1,82 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_TYPE_TRAITS_HPP__ +#define __OPENCV_GPU_TYPE_TRAITS_HPP__ + +#include "detail/type_traits_detail.hpp" + +namespace cv { namespace gpu { namespace device +{ + template struct IsSimpleParameter + { + enum {value = type_traits_detail::IsIntegral::value || type_traits_detail::IsFloat::value || + type_traits_detail::PointerTraits::type>::value}; + }; + + template struct TypeTraits + { + typedef typename type_traits_detail::UnConst::type NonConstType; + typedef typename type_traits_detail::UnVolatile::type NonVolatileType; + typedef typename type_traits_detail::UnVolatile::type>::type UnqualifiedType; + typedef typename type_traits_detail::PointerTraits::type PointeeType; + typedef typename type_traits_detail::ReferenceTraits::type ReferredType; + + enum { isConst = type_traits_detail::UnConst::value }; + enum { isVolatile = type_traits_detail::UnVolatile::value }; + + enum { isReference = type_traits_detail::ReferenceTraits::value }; + enum { isPointer = type_traits_detail::PointerTraits::type>::value }; + + enum { isUnsignedInt = type_traits_detail::IsUnsignedIntegral::value }; + enum { isSignedInt = type_traits_detail::IsSignedIntergral::value }; + enum { isIntegral = type_traits_detail::IsIntegral::value }; + enum { isFloat = type_traits_detail::IsFloat::value }; + enum { isArith = isIntegral || isFloat }; + enum { isVec = type_traits_detail::IsVec::value }; + + typedef typename type_traits_detail::Select::value, + T, typename type_traits_detail::AddParameterType::type>::type ParameterType; + }; +}}} + +#endif // __OPENCV_GPU_TYPE_TRAITS_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/utility.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/utility.hpp new file mode 100644 index 0000000..85e81ac --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/utility.hpp @@ -0,0 +1,213 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_UTILITY_HPP__ +#define __OPENCV_GPU_UTILITY_HPP__ + +#include "saturate_cast.hpp" +#include "datamov_utils.hpp" + +namespace cv { namespace gpu { namespace device +{ + #define OPENCV_GPU_LOG_WARP_SIZE (5) + #define OPENCV_GPU_WARP_SIZE (1 << OPENCV_GPU_LOG_WARP_SIZE) + #define OPENCV_GPU_LOG_MEM_BANKS ((__CUDA_ARCH__ >= 200) ? 5 : 4) // 32 banks on fermi, 16 on tesla + #define OPENCV_GPU_MEM_BANKS (1 << OPENCV_GPU_LOG_MEM_BANKS) + + /////////////////////////////////////////////////////////////////////////////// + // swap + + template void __device__ __host__ __forceinline__ swap(T& a, T& b) + { + const T temp = a; + a = b; + b = temp; + } + + /////////////////////////////////////////////////////////////////////////////// + // Mask Reader + + struct SingleMask + { + explicit __host__ __device__ __forceinline__ SingleMask(PtrStepb mask_) : mask(mask_) {} + __host__ __device__ __forceinline__ SingleMask(const SingleMask& mask_): mask(mask_.mask){} + + __device__ __forceinline__ bool operator()(int y, int x) const + { + return mask.ptr(y)[x] != 0; + } + + PtrStepb mask; + }; + + struct SingleMaskChannels + { + __host__ __device__ __forceinline__ SingleMaskChannels(PtrStepb mask_, int channels_) + : mask(mask_), channels(channels_) {} + __host__ __device__ __forceinline__ SingleMaskChannels(const SingleMaskChannels& mask_) + :mask(mask_.mask), channels(mask_.channels){} + + __device__ __forceinline__ bool operator()(int y, int x) const + { + return mask.ptr(y)[x / channels] != 0; + } + + PtrStepb mask; + int channels; + }; + + struct MaskCollection + { + explicit __host__ __device__ __forceinline__ MaskCollection(PtrStepb* maskCollection_) + : maskCollection(maskCollection_) {} + + __device__ __forceinline__ MaskCollection(const MaskCollection& masks_) + : maskCollection(masks_.maskCollection), curMask(masks_.curMask){} + + __device__ __forceinline__ void next() + { + curMask = *maskCollection++; + } + __device__ __forceinline__ void setMask(int z) + { + curMask = maskCollection[z]; + } + + __device__ __forceinline__ bool operator()(int y, int x) const + { + uchar val; + return curMask.data == 0 || (ForceGlob::Load(curMask.ptr(y), x, val), (val != 0)); + } + + const PtrStepb* maskCollection; + PtrStepb curMask; + }; + + struct WithOutMask + { + __host__ __device__ __forceinline__ WithOutMask(){} + __host__ __device__ __forceinline__ WithOutMask(const WithOutMask&){} + + __device__ __forceinline__ void next() const + { + } + __device__ __forceinline__ void setMask(int) const + { + } + + __device__ __forceinline__ bool operator()(int, int) const + { + return true; + } + + __device__ __forceinline__ bool operator()(int, int, int) const + { + return true; + } + + static __device__ __forceinline__ bool check(int, int) + { + return true; + } + + static __device__ __forceinline__ bool check(int, int, int) + { + return true; + } + }; + + /////////////////////////////////////////////////////////////////////////////// + // Solve linear system + + // solve 2x2 linear system Ax=b + template __device__ __forceinline__ bool solve2x2(const T A[2][2], const T b[2], T x[2]) + { + T det = A[0][0] * A[1][1] - A[1][0] * A[0][1]; + + if (det != 0) + { + double invdet = 1.0 / det; + + x[0] = saturate_cast(invdet * (b[0] * A[1][1] - b[1] * A[0][1])); + + x[1] = saturate_cast(invdet * (A[0][0] * b[1] - A[1][0] * b[0])); + + return true; + } + + return false; + } + + // solve 3x3 linear system Ax=b + template __device__ __forceinline__ bool solve3x3(const T A[3][3], const T b[3], T x[3]) + { + T det = A[0][0] * (A[1][1] * A[2][2] - A[1][2] * A[2][1]) + - A[0][1] * (A[1][0] * A[2][2] - A[1][2] * A[2][0]) + + A[0][2] * (A[1][0] * A[2][1] - A[1][1] * A[2][0]); + + if (det != 0) + { + double invdet = 1.0 / det; + + x[0] = saturate_cast(invdet * + (b[0] * (A[1][1] * A[2][2] - A[1][2] * A[2][1]) - + A[0][1] * (b[1] * A[2][2] - A[1][2] * b[2] ) + + A[0][2] * (b[1] * A[2][1] - A[1][1] * b[2] ))); + + x[1] = saturate_cast(invdet * + (A[0][0] * (b[1] * A[2][2] - A[1][2] * b[2] ) - + b[0] * (A[1][0] * A[2][2] - A[1][2] * A[2][0]) + + A[0][2] * (A[1][0] * b[2] - b[1] * A[2][0]))); + + x[2] = saturate_cast(invdet * + (A[0][0] * (A[1][1] * b[2] - b[1] * A[2][1]) - + A[0][1] * (A[1][0] * b[2] - b[1] * A[2][0]) + + b[0] * (A[1][0] * A[2][1] - A[1][1] * A[2][0]))); + + return true; + } + + return false; + } +}}} // namespace cv { namespace gpu { namespace device + +#endif // __OPENCV_GPU_UTILITY_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/vec_distance.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/vec_distance.hpp new file mode 100644 index 0000000..d5b4bb2 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/vec_distance.hpp @@ -0,0 +1,224 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_VEC_DISTANCE_HPP__ +#define __OPENCV_GPU_VEC_DISTANCE_HPP__ + +#include "reduce.hpp" +#include "functional.hpp" +#include "detail/vec_distance_detail.hpp" + +namespace cv { namespace gpu { namespace device +{ + template struct L1Dist + { + typedef int value_type; + typedef int result_type; + + __device__ __forceinline__ L1Dist() : mySum(0) {} + + __device__ __forceinline__ void reduceIter(int val1, int val2) + { + mySum = __sad(val1, val2, mySum); + } + + template __device__ __forceinline__ void reduceAll(int* smem, int tid) + { + reduce(smem, mySum, tid, plus()); + } + + __device__ __forceinline__ operator int() const + { + return mySum; + } + + int mySum; + }; + template <> struct L1Dist + { + typedef float value_type; + typedef float result_type; + + __device__ __forceinline__ L1Dist() : mySum(0.0f) {} + + __device__ __forceinline__ void reduceIter(float val1, float val2) + { + mySum += ::fabs(val1 - val2); + } + + template __device__ __forceinline__ void reduceAll(float* smem, int tid) + { + reduce(smem, mySum, tid, plus()); + } + + __device__ __forceinline__ operator float() const + { + return mySum; + } + + float mySum; + }; + + struct L2Dist + { + typedef float value_type; + typedef float result_type; + + __device__ __forceinline__ L2Dist() : mySum(0.0f) {} + + __device__ __forceinline__ void reduceIter(float val1, float val2) + { + float reg = val1 - val2; + mySum += reg * reg; + } + + template __device__ __forceinline__ void reduceAll(float* smem, int tid) + { + reduce(smem, mySum, tid, plus()); + } + + __device__ __forceinline__ operator float() const + { + return sqrtf(mySum); + } + + float mySum; + }; + + struct HammingDist + { + typedef int value_type; + typedef int result_type; + + __device__ __forceinline__ HammingDist() : mySum(0) {} + + __device__ __forceinline__ void reduceIter(int val1, int val2) + { + mySum += __popc(val1 ^ val2); + } + + template __device__ __forceinline__ void reduceAll(int* smem, int tid) + { + reduce(smem, mySum, tid, plus()); + } + + __device__ __forceinline__ operator int() const + { + return mySum; + } + + int mySum; + }; + + // calc distance between two vectors in global memory + template + __device__ void calcVecDiffGlobal(const T1* vec1, const T2* vec2, int len, Dist& dist, typename Dist::result_type* smem, int tid) + { + for (int i = tid; i < len; i += THREAD_DIM) + { + T1 val1; + ForceGlob::Load(vec1, i, val1); + + T2 val2; + ForceGlob::Load(vec2, i, val2); + + dist.reduceIter(val1, val2); + } + + dist.reduceAll(smem, tid); + } + + // calc distance between two vectors, first vector is cached in register or shared memory, second vector is in global memory + template + __device__ __forceinline__ void calcVecDiffCached(const T1* vecCached, const T2* vecGlob, int len, Dist& dist, typename Dist::result_type* smem, int tid) + { + vec_distance_detail::VecDiffCachedCalculator::calc(vecCached, vecGlob, len, dist, tid); + + dist.reduceAll(smem, tid); + } + + // calc distance between two vectors in global memory + template struct VecDiffGlobal + { + explicit __device__ __forceinline__ VecDiffGlobal(const T1* vec1_, int = 0, void* = 0, int = 0, int = 0) + { + vec1 = vec1_; + } + + template + __device__ __forceinline__ void calc(const T2* vec2, int len, Dist& dist, typename Dist::result_type* smem, int tid) const + { + calcVecDiffGlobal(vec1, vec2, len, dist, smem, tid); + } + + const T1* vec1; + }; + + // calc distance between two vectors, first vector is cached in register memory, second vector is in global memory + template struct VecDiffCachedRegister + { + template __device__ __forceinline__ VecDiffCachedRegister(const T1* vec1, int len, U* smem, int glob_tid, int tid) + { + if (glob_tid < len) + smem[glob_tid] = vec1[glob_tid]; + __syncthreads(); + + U* vec1ValsPtr = vec1Vals; + + #pragma unroll + for (int i = tid; i < MAX_LEN; i += THREAD_DIM) + *vec1ValsPtr++ = smem[i]; + + __syncthreads(); + } + + template + __device__ __forceinline__ void calc(const T2* vec2, int len, Dist& dist, typename Dist::result_type* smem, int tid) const + { + calcVecDiffCached(vec1Vals, vec2, len, dist, smem, tid); + } + + U vec1Vals[MAX_LEN / THREAD_DIM]; + }; +}}} // namespace cv { namespace gpu { namespace device + +#endif // __OPENCV_GPU_VEC_DISTANCE_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/vec_math.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/vec_math.hpp new file mode 100644 index 0000000..a6cb43a --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/vec_math.hpp @@ -0,0 +1,922 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_VECMATH_HPP__ +#define __OPENCV_GPU_VECMATH_HPP__ + +#include "vec_traits.hpp" +#include "saturate_cast.hpp" + +namespace cv { namespace gpu { namespace device +{ + +// saturate_cast + +namespace vec_math_detail +{ + template struct SatCastHelper; + template struct SatCastHelper<1, VecD> + { + template static __device__ __forceinline__ VecD cast(const VecS& v) + { + typedef typename VecTraits::elem_type D; + return VecTraits::make(saturate_cast(v.x)); + } + }; + template struct SatCastHelper<2, VecD> + { + template static __device__ __forceinline__ VecD cast(const VecS& v) + { + typedef typename VecTraits::elem_type D; + return VecTraits::make(saturate_cast(v.x), saturate_cast(v.y)); + } + }; + template struct SatCastHelper<3, VecD> + { + template static __device__ __forceinline__ VecD cast(const VecS& v) + { + typedef typename VecTraits::elem_type D; + return VecTraits::make(saturate_cast(v.x), saturate_cast(v.y), saturate_cast(v.z)); + } + }; + template struct SatCastHelper<4, VecD> + { + template static __device__ __forceinline__ VecD cast(const VecS& v) + { + typedef typename VecTraits::elem_type D; + return VecTraits::make(saturate_cast(v.x), saturate_cast(v.y), saturate_cast(v.z), saturate_cast(v.w)); + } + }; + + template static __device__ __forceinline__ VecD saturate_cast_helper(const VecS& v) + { + return SatCastHelper::cn, VecD>::cast(v); + } +} + +template static __device__ __forceinline__ T saturate_cast(const uchar1& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const char1& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const ushort1& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const short1& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const uint1& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const int1& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const float1& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const double1& v) {return vec_math_detail::saturate_cast_helper(v);} + +template static __device__ __forceinline__ T saturate_cast(const uchar2& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const char2& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const ushort2& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const short2& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const uint2& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const int2& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const float2& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const double2& v) {return vec_math_detail::saturate_cast_helper(v);} + +template static __device__ __forceinline__ T saturate_cast(const uchar3& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const char3& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const ushort3& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const short3& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const uint3& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const int3& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const float3& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const double3& v) {return vec_math_detail::saturate_cast_helper(v);} + +template static __device__ __forceinline__ T saturate_cast(const uchar4& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const char4& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const ushort4& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const short4& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const uint4& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const int4& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const float4& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const double4& v) {return vec_math_detail::saturate_cast_helper(v);} + +// unary operators + +#define CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(op, input_type, output_type) \ + __device__ __forceinline__ output_type ## 1 operator op(const input_type ## 1 & a) \ + { \ + return VecTraits::make(op (a.x)); \ + } \ + __device__ __forceinline__ output_type ## 2 operator op(const input_type ## 2 & a) \ + { \ + return VecTraits::make(op (a.x), op (a.y)); \ + } \ + __device__ __forceinline__ output_type ## 3 operator op(const input_type ## 3 & a) \ + { \ + return VecTraits::make(op (a.x), op (a.y), op (a.z)); \ + } \ + __device__ __forceinline__ output_type ## 4 operator op(const input_type ## 4 & a) \ + { \ + return VecTraits::make(op (a.x), op (a.y), op (a.z), op (a.w)); \ + } + +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(-, char, char) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(-, short, short) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(-, int, int) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(-, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(-, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, char, uchar) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, ushort, uchar) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, short, uchar) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, int, uchar) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, uint, uchar) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, float, uchar) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, double, uchar) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, char, char) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, ushort, ushort) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, short, short) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, int, int) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, uint, uint) + +#undef CV_CUDEV_IMPLEMENT_VEC_UNARY_OP + +// unary functions + +#define CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(func_name, func, input_type, output_type) \ + __device__ __forceinline__ output_type ## 1 func_name(const input_type ## 1 & a) \ + { \ + return VecTraits::make(func (a.x)); \ + } \ + __device__ __forceinline__ output_type ## 2 func_name(const input_type ## 2 & a) \ + { \ + return VecTraits::make(func (a.x), func (a.y)); \ + } \ + __device__ __forceinline__ output_type ## 3 func_name(const input_type ## 3 & a) \ + { \ + return VecTraits::make(func (a.x), func (a.y), func (a.z)); \ + } \ + __device__ __forceinline__ output_type ## 4 func_name(const input_type ## 4 & a) \ + { \ + return VecTraits::make(func (a.x), func (a.y), func (a.z), func (a.w)); \ + } + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, /*::abs*/, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, ::abs, char, char) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, /*::abs*/, ushort, ushort) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, ::abs, short, short) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, ::abs, int, int) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, /*::abs*/, uint, uint) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, ::fabsf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, ::fabs, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrt, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::exp, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::log, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sin, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cos, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tan, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asin, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acos, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atan, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinh, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::cosh, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanh, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinh, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acosh, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanh, double, double) + +#undef CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC + +// binary operators (vec & vec) + +#define CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(op, input_type, output_type) \ + __device__ __forceinline__ output_type ## 1 operator op(const input_type ## 1 & a, const input_type ## 1 & b) \ + { \ + return VecTraits::make(a.x op b.x); \ + } \ + __device__ __forceinline__ output_type ## 2 operator op(const input_type ## 2 & a, const input_type ## 2 & b) \ + { \ + return VecTraits::make(a.x op b.x, a.y op b.y); \ + } \ + __device__ __forceinline__ output_type ## 3 operator op(const input_type ## 3 & a, const input_type ## 3 & b) \ + { \ + return VecTraits::make(a.x op b.x, a.y op b.y, a.z op b.z); \ + } \ + __device__ __forceinline__ output_type ## 4 operator op(const input_type ## 4 & a, const input_type ## 4 & b) \ + { \ + return VecTraits::make(a.x op b.x, a.y op b.y, a.z op b.z, a.w op b.w); \ + } + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, uchar, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, char, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, ushort, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, short, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, int, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, uint, uint) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, float, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, double, double) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, uchar, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, char, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, ushort, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, short, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, int, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, uint, uint) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, float, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, double, double) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, uchar, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, char, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, ushort, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, short, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, int, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, uint, uint) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, float, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, double, double) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, uchar, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, char, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, ushort, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, short, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, int, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, uint, uint) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, float, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, double, double) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, char, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, ushort, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, short, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, int, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, uint, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, float, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, double, uchar) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, char, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, ushort, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, short, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, int, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, uint, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, float, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, double, uchar) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, char, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, ushort, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, short, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, int, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, uint, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, float, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, double, uchar) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, char, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, ushort, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, short, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, int, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, uint, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, float, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, double, uchar) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, char, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, ushort, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, short, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, int, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, uint, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, float, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, double, uchar) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, char, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, ushort, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, short, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, int, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, uint, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, float, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, double, uchar) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, char, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, ushort, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, short, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, int, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, uint, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, float, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, double, uchar) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, char, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, ushort, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, short, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, int, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, uint, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, float, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, double, uchar) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, char, char) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, ushort, ushort) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, short, short) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, int, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, uint, uint) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, char, char) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, ushort, ushort) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, short, short) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, int, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, uint, uint) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, char, char) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, ushort, ushort) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, short, short) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, int, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, uint, uint) + +#undef CV_CUDEV_IMPLEMENT_VEC_BINARY_OP + +// binary operators (vec & scalar) + +#define CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(op, input_type, scalar_type, output_type) \ + __device__ __forceinline__ output_type ## 1 operator op(const input_type ## 1 & a, scalar_type s) \ + { \ + return VecTraits::make(a.x op s); \ + } \ + __device__ __forceinline__ output_type ## 1 operator op(scalar_type s, const input_type ## 1 & b) \ + { \ + return VecTraits::make(s op b.x); \ + } \ + __device__ __forceinline__ output_type ## 2 operator op(const input_type ## 2 & a, scalar_type s) \ + { \ + return VecTraits::make(a.x op s, a.y op s); \ + } \ + __device__ __forceinline__ output_type ## 2 operator op(scalar_type s, const input_type ## 2 & b) \ + { \ + return VecTraits::make(s op b.x, s op b.y); \ + } \ + __device__ __forceinline__ output_type ## 3 operator op(const input_type ## 3 & a, scalar_type s) \ + { \ + return VecTraits::make(a.x op s, a.y op s, a.z op s); \ + } \ + __device__ __forceinline__ output_type ## 3 operator op(scalar_type s, const input_type ## 3 & b) \ + { \ + return VecTraits::make(s op b.x, s op b.y, s op b.z); \ + } \ + __device__ __forceinline__ output_type ## 4 operator op(const input_type ## 4 & a, scalar_type s) \ + { \ + return VecTraits::make(a.x op s, a.y op s, a.z op s, a.w op s); \ + } \ + __device__ __forceinline__ output_type ## 4 operator op(scalar_type s, const input_type ## 4 & b) \ + { \ + return VecTraits::make(s op b.x, s op b.y, s op b.z, s op b.w); \ + } + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uchar, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uchar, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uchar, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, char, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, char, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, char, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, ushort, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, ushort, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, ushort, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, short, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, short, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, short, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, int, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, int, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, int, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uint, uint, uint) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uint, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uint, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, float, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, float, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, double, double, double) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uchar, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uchar, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uchar, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, char, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, char, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, char, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, ushort, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, ushort, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, ushort, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, short, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, short, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, short, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, int, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, int, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, int, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uint, uint, uint) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uint, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uint, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, float, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, float, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, double, double, double) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uchar, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uchar, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uchar, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, char, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, char, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, char, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, ushort, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, ushort, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, ushort, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, short, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, short, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, short, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, int, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, int, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, int, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uint, uint, uint) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uint, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uint, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, float, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, float, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, double, double, double) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uchar, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uchar, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uchar, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, char, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, char, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, char, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, ushort, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, ushort, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, ushort, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, short, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, short, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, short, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, int, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, int, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, int, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uint, uint, uint) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uint, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uint, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, float, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, float, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, double, double, double) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, uchar, uchar, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, char, char, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, ushort, ushort, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, short, short, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, int, int, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, uint, uint, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, float, float, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, double, double, uchar) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, uchar, uchar, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, char, char, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, ushort, ushort, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, short, short, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, int, int, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, uint, uint, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, float, float, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, double, double, uchar) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, uchar, uchar, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, char, char, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, ushort, ushort, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, short, short, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, int, int, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, uint, uint, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, float, float, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, double, double, uchar) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, uchar, uchar, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, char, char, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, ushort, ushort, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, short, short, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, int, int, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, uint, uint, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, float, float, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, double, double, uchar) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, uchar, uchar, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, char, char, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, ushort, ushort, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, short, short, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, int, int, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, uint, uint, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, float, float, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, double, double, uchar) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, uchar, uchar, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, char, char, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, ushort, ushort, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, short, short, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, int, int, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, uint, uint, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, float, float, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, double, double, uchar) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, uchar, uchar, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, char, char, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, ushort, ushort, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, short, short, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, int, int, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, uint, uint, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, float, float, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, double, double, uchar) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, uchar, uchar, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, char, char, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, ushort, ushort, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, short, short, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, int, int, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, uint, uint, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, float, float, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, double, double, uchar) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, uchar, uchar, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, char, char, char) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, ushort, ushort, ushort) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, short, short, short) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, int, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, uint, uint, uint) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, uchar, uchar, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, char, char, char) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, ushort, ushort, ushort) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, short, short, short) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, int, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, uint, uint, uint) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, uchar, uchar, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, char, char, char) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, ushort, ushort, ushort) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, short, short, short) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, int, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, uint, uint, uint) + +#undef CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP + +// binary function (vec & vec) + +#define CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(func_name, func, input_type, output_type) \ + __device__ __forceinline__ output_type ## 1 func_name(const input_type ## 1 & a, const input_type ## 1 & b) \ + { \ + return VecTraits::make(func (a.x, b.x)); \ + } \ + __device__ __forceinline__ output_type ## 2 func_name(const input_type ## 2 & a, const input_type ## 2 & b) \ + { \ + return VecTraits::make(func (a.x, b.x), func (a.y, b.y)); \ + } \ + __device__ __forceinline__ output_type ## 3 func_name(const input_type ## 3 & a, const input_type ## 3 & b) \ + { \ + return VecTraits::make(func (a.x, b.x), func (a.y, b.y), func (a.z, b.z)); \ + } \ + __device__ __forceinline__ output_type ## 4 func_name(const input_type ## 4 & a, const input_type ## 4 & b) \ + { \ + return VecTraits::make(func (a.x, b.x), func (a.y, b.y), func (a.z, b.z), func (a.w, b.w)); \ + } + +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, char, char) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, ushort, ushort) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, short, short) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, uint, uint) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, int, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::fmaxf, float, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::fmax, double, double) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, char, char) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, ushort, ushort) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, short, short) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, uint, uint) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, int, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::fminf, float, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::fmin, double, double) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, char, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, short, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, int, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, float, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypot, double, double) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, char, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, short, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, uint, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, int, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, float, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2, double, double) + +#undef CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC + +// binary function (vec & scalar) + +#define CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(func_name, func, input_type, scalar_type, output_type) \ + __device__ __forceinline__ output_type ## 1 func_name(const input_type ## 1 & a, scalar_type s) \ + { \ + return VecTraits::make(func ((output_type) a.x, (output_type) s)); \ + } \ + __device__ __forceinline__ output_type ## 1 func_name(scalar_type s, const input_type ## 1 & b) \ + { \ + return VecTraits::make(func ((output_type) s, (output_type) b.x)); \ + } \ + __device__ __forceinline__ output_type ## 2 func_name(const input_type ## 2 & a, scalar_type s) \ + { \ + return VecTraits::make(func ((output_type) a.x, (output_type) s), func ((output_type) a.y, (output_type) s)); \ + } \ + __device__ __forceinline__ output_type ## 2 func_name(scalar_type s, const input_type ## 2 & b) \ + { \ + return VecTraits::make(func ((output_type) s, (output_type) b.x), func ((output_type) s, (output_type) b.y)); \ + } \ + __device__ __forceinline__ output_type ## 3 func_name(const input_type ## 3 & a, scalar_type s) \ + { \ + return VecTraits::make(func ((output_type) a.x, (output_type) s), func ((output_type) a.y, (output_type) s), func ((output_type) a.z, (output_type) s)); \ + } \ + __device__ __forceinline__ output_type ## 3 func_name(scalar_type s, const input_type ## 3 & b) \ + { \ + return VecTraits::make(func ((output_type) s, (output_type) b.x), func ((output_type) s, (output_type) b.y), func ((output_type) s, (output_type) b.z)); \ + } \ + __device__ __forceinline__ output_type ## 4 func_name(const input_type ## 4 & a, scalar_type s) \ + { \ + return VecTraits::make(func ((output_type) a.x, (output_type) s), func ((output_type) a.y, (output_type) s), func ((output_type) a.z, (output_type) s), func ((output_type) a.w, (output_type) s)); \ + } \ + __device__ __forceinline__ output_type ## 4 func_name(scalar_type s, const input_type ## 4 & b) \ + { \ + return VecTraits::make(func ((output_type) s, (output_type) b.x), func ((output_type) s, (output_type) b.y), func ((output_type) s, (output_type) b.z), func ((output_type) s, (output_type) b.w)); \ + } + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, uchar, uchar, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, uchar, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, uchar, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, char, char, char) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, char, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, char, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, ushort, ushort, ushort) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, ushort, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, ushort, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, short, short, short) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, short, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, short, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, uint, uint, uint) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, uint, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, uint, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, int, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, int, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, int, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, float, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, float, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, double, double, double) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, uchar, uchar, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, uchar, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, uchar, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, char, char, char) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, char, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, char, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, ushort, ushort, ushort) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, ushort, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, ushort, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, short, short, short) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, short, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, short, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, uint, uint, uint) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, uint, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, uint, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, int, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, int, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, int, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, float, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, float, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, double, double, double) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, uchar, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, uchar, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, char, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, char, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, ushort, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, ushort, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, short, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, short, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, uint, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, uint, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, int, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, int, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, float, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, float, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, double, double, double) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, uchar, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, uchar, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, char, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, char, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, ushort, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, ushort, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, short, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, short, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, uint, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, uint, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, int, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, int, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, float, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, float, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, double, double, double) + +#undef CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC + +}}} // namespace cv { namespace gpu { namespace device + +#endif // __OPENCV_GPU_VECMATH_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/vec_traits.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/vec_traits.hpp new file mode 100644 index 0000000..8d179c8 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/vec_traits.hpp @@ -0,0 +1,280 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_VEC_TRAITS_HPP__ +#define __OPENCV_GPU_VEC_TRAITS_HPP__ + +#include "common.hpp" + +namespace cv { namespace gpu { namespace device +{ + template struct TypeVec; + + struct __align__(8) uchar8 + { + uchar a0, a1, a2, a3, a4, a5, a6, a7; + }; + static __host__ __device__ __forceinline__ uchar8 make_uchar8(uchar a0, uchar a1, uchar a2, uchar a3, uchar a4, uchar a5, uchar a6, uchar a7) + { + uchar8 val = {a0, a1, a2, a3, a4, a5, a6, a7}; + return val; + } + struct __align__(8) char8 + { + schar a0, a1, a2, a3, a4, a5, a6, a7; + }; + static __host__ __device__ __forceinline__ char8 make_char8(schar a0, schar a1, schar a2, schar a3, schar a4, schar a5, schar a6, schar a7) + { + char8 val = {a0, a1, a2, a3, a4, a5, a6, a7}; + return val; + } + struct __align__(16) ushort8 + { + ushort a0, a1, a2, a3, a4, a5, a6, a7; + }; + static __host__ __device__ __forceinline__ ushort8 make_ushort8(ushort a0, ushort a1, ushort a2, ushort a3, ushort a4, ushort a5, ushort a6, ushort a7) + { + ushort8 val = {a0, a1, a2, a3, a4, a5, a6, a7}; + return val; + } + struct __align__(16) short8 + { + short a0, a1, a2, a3, a4, a5, a6, a7; + }; + static __host__ __device__ __forceinline__ short8 make_short8(short a0, short a1, short a2, short a3, short a4, short a5, short a6, short a7) + { + short8 val = {a0, a1, a2, a3, a4, a5, a6, a7}; + return val; + } + struct __align__(32) uint8 + { + uint a0, a1, a2, a3, a4, a5, a6, a7; + }; + static __host__ __device__ __forceinline__ uint8 make_uint8(uint a0, uint a1, uint a2, uint a3, uint a4, uint a5, uint a6, uint a7) + { + uint8 val = {a0, a1, a2, a3, a4, a5, a6, a7}; + return val; + } + struct __align__(32) int8 + { + int a0, a1, a2, a3, a4, a5, a6, a7; + }; + static __host__ __device__ __forceinline__ int8 make_int8(int a0, int a1, int a2, int a3, int a4, int a5, int a6, int a7) + { + int8 val = {a0, a1, a2, a3, a4, a5, a6, a7}; + return val; + } + struct __align__(32) float8 + { + float a0, a1, a2, a3, a4, a5, a6, a7; + }; + static __host__ __device__ __forceinline__ float8 make_float8(float a0, float a1, float a2, float a3, float a4, float a5, float a6, float a7) + { + float8 val = {a0, a1, a2, a3, a4, a5, a6, a7}; + return val; + } + struct double8 + { + double a0, a1, a2, a3, a4, a5, a6, a7; + }; + static __host__ __device__ __forceinline__ double8 make_double8(double a0, double a1, double a2, double a3, double a4, double a5, double a6, double a7) + { + double8 val = {a0, a1, a2, a3, a4, a5, a6, a7}; + return val; + } + +#define OPENCV_GPU_IMPLEMENT_TYPE_VEC(type) \ + template<> struct TypeVec { typedef type vec_type; }; \ + template<> struct TypeVec { typedef type ## 1 vec_type; }; \ + template<> struct TypeVec { typedef type ## 2 vec_type; }; \ + template<> struct TypeVec { typedef type ## 2 vec_type; }; \ + template<> struct TypeVec { typedef type ## 3 vec_type; }; \ + template<> struct TypeVec { typedef type ## 3 vec_type; }; \ + template<> struct TypeVec { typedef type ## 4 vec_type; }; \ + template<> struct TypeVec { typedef type ## 4 vec_type; }; \ + template<> struct TypeVec { typedef type ## 8 vec_type; }; \ + template<> struct TypeVec { typedef type ## 8 vec_type; }; + + OPENCV_GPU_IMPLEMENT_TYPE_VEC(uchar) + OPENCV_GPU_IMPLEMENT_TYPE_VEC(char) + OPENCV_GPU_IMPLEMENT_TYPE_VEC(ushort) + OPENCV_GPU_IMPLEMENT_TYPE_VEC(short) + OPENCV_GPU_IMPLEMENT_TYPE_VEC(int) + OPENCV_GPU_IMPLEMENT_TYPE_VEC(uint) + OPENCV_GPU_IMPLEMENT_TYPE_VEC(float) + OPENCV_GPU_IMPLEMENT_TYPE_VEC(double) + + #undef OPENCV_GPU_IMPLEMENT_TYPE_VEC + + template<> struct TypeVec { typedef schar vec_type; }; + template<> struct TypeVec { typedef char2 vec_type; }; + template<> struct TypeVec { typedef char3 vec_type; }; + template<> struct TypeVec { typedef char4 vec_type; }; + template<> struct TypeVec { typedef char8 vec_type; }; + + template<> struct TypeVec { typedef uchar vec_type; }; + template<> struct TypeVec { typedef uchar2 vec_type; }; + template<> struct TypeVec { typedef uchar3 vec_type; }; + template<> struct TypeVec { typedef uchar4 vec_type; }; + template<> struct TypeVec { typedef uchar8 vec_type; }; + + template struct VecTraits; + +#define OPENCV_GPU_IMPLEMENT_VEC_TRAITS(type) \ + template<> struct VecTraits \ + { \ + typedef type elem_type; \ + enum {cn=1}; \ + static __device__ __host__ __forceinline__ type all(type v) {return v;} \ + static __device__ __host__ __forceinline__ type make(type x) {return x;} \ + static __device__ __host__ __forceinline__ type make(const type* v) {return *v;} \ + }; \ + template<> struct VecTraits \ + { \ + typedef type elem_type; \ + enum {cn=1}; \ + static __device__ __host__ __forceinline__ type ## 1 all(type v) {return make_ ## type ## 1(v);} \ + static __device__ __host__ __forceinline__ type ## 1 make(type x) {return make_ ## type ## 1(x);} \ + static __device__ __host__ __forceinline__ type ## 1 make(const type* v) {return make_ ## type ## 1(*v);} \ + }; \ + template<> struct VecTraits \ + { \ + typedef type elem_type; \ + enum {cn=2}; \ + static __device__ __host__ __forceinline__ type ## 2 all(type v) {return make_ ## type ## 2(v, v);} \ + static __device__ __host__ __forceinline__ type ## 2 make(type x, type y) {return make_ ## type ## 2(x, y);} \ + static __device__ __host__ __forceinline__ type ## 2 make(const type* v) {return make_ ## type ## 2(v[0], v[1]);} \ + }; \ + template<> struct VecTraits \ + { \ + typedef type elem_type; \ + enum {cn=3}; \ + static __device__ __host__ __forceinline__ type ## 3 all(type v) {return make_ ## type ## 3(v, v, v);} \ + static __device__ __host__ __forceinline__ type ## 3 make(type x, type y, type z) {return make_ ## type ## 3(x, y, z);} \ + static __device__ __host__ __forceinline__ type ## 3 make(const type* v) {return make_ ## type ## 3(v[0], v[1], v[2]);} \ + }; \ + template<> struct VecTraits \ + { \ + typedef type elem_type; \ + enum {cn=4}; \ + static __device__ __host__ __forceinline__ type ## 4 all(type v) {return make_ ## type ## 4(v, v, v, v);} \ + static __device__ __host__ __forceinline__ type ## 4 make(type x, type y, type z, type w) {return make_ ## type ## 4(x, y, z, w);} \ + static __device__ __host__ __forceinline__ type ## 4 make(const type* v) {return make_ ## type ## 4(v[0], v[1], v[2], v[3]);} \ + }; \ + template<> struct VecTraits \ + { \ + typedef type elem_type; \ + enum {cn=8}; \ + static __device__ __host__ __forceinline__ type ## 8 all(type v) {return make_ ## type ## 8(v, v, v, v, v, v, v, v);} \ + static __device__ __host__ __forceinline__ type ## 8 make(type a0, type a1, type a2, type a3, type a4, type a5, type a6, type a7) {return make_ ## type ## 8(a0, a1, a2, a3, a4, a5, a6, a7);} \ + static __device__ __host__ __forceinline__ type ## 8 make(const type* v) {return make_ ## type ## 8(v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]);} \ + }; + + OPENCV_GPU_IMPLEMENT_VEC_TRAITS(uchar) + OPENCV_GPU_IMPLEMENT_VEC_TRAITS(ushort) + OPENCV_GPU_IMPLEMENT_VEC_TRAITS(short) + OPENCV_GPU_IMPLEMENT_VEC_TRAITS(int) + OPENCV_GPU_IMPLEMENT_VEC_TRAITS(uint) + OPENCV_GPU_IMPLEMENT_VEC_TRAITS(float) + OPENCV_GPU_IMPLEMENT_VEC_TRAITS(double) + + #undef OPENCV_GPU_IMPLEMENT_VEC_TRAITS + + template<> struct VecTraits + { + typedef char elem_type; + enum {cn=1}; + static __device__ __host__ __forceinline__ char all(char v) {return v;} + static __device__ __host__ __forceinline__ char make(char x) {return x;} + static __device__ __host__ __forceinline__ char make(const char* x) {return *x;} + }; + template<> struct VecTraits + { + typedef schar elem_type; + enum {cn=1}; + static __device__ __host__ __forceinline__ schar all(schar v) {return v;} + static __device__ __host__ __forceinline__ schar make(schar x) {return x;} + static __device__ __host__ __forceinline__ schar make(const schar* x) {return *x;} + }; + template<> struct VecTraits + { + typedef schar elem_type; + enum {cn=1}; + static __device__ __host__ __forceinline__ char1 all(schar v) {return make_char1(v);} + static __device__ __host__ __forceinline__ char1 make(schar x) {return make_char1(x);} + static __device__ __host__ __forceinline__ char1 make(const schar* v) {return make_char1(v[0]);} + }; + template<> struct VecTraits + { + typedef schar elem_type; + enum {cn=2}; + static __device__ __host__ __forceinline__ char2 all(schar v) {return make_char2(v, v);} + static __device__ __host__ __forceinline__ char2 make(schar x, schar y) {return make_char2(x, y);} + static __device__ __host__ __forceinline__ char2 make(const schar* v) {return make_char2(v[0], v[1]);} + }; + template<> struct VecTraits + { + typedef schar elem_type; + enum {cn=3}; + static __device__ __host__ __forceinline__ char3 all(schar v) {return make_char3(v, v, v);} + static __device__ __host__ __forceinline__ char3 make(schar x, schar y, schar z) {return make_char3(x, y, z);} + static __device__ __host__ __forceinline__ char3 make(const schar* v) {return make_char3(v[0], v[1], v[2]);} + }; + template<> struct VecTraits + { + typedef schar elem_type; + enum {cn=4}; + static __device__ __host__ __forceinline__ char4 all(schar v) {return make_char4(v, v, v, v);} + static __device__ __host__ __forceinline__ char4 make(schar x, schar y, schar z, schar w) {return make_char4(x, y, z, w);} + static __device__ __host__ __forceinline__ char4 make(const schar* v) {return make_char4(v[0], v[1], v[2], v[3]);} + }; + template<> struct VecTraits + { + typedef schar elem_type; + enum {cn=8}; + static __device__ __host__ __forceinline__ char8 all(schar v) {return make_char8(v, v, v, v, v, v, v, v);} + static __device__ __host__ __forceinline__ char8 make(schar a0, schar a1, schar a2, schar a3, schar a4, schar a5, schar a6, schar a7) {return make_char8(a0, a1, a2, a3, a4, a5, a6, a7);} + static __device__ __host__ __forceinline__ char8 make(const schar* v) {return make_char8(v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]);} + }; +}}} // namespace cv { namespace gpu { namespace device + +#endif // __OPENCV_GPU_VEC_TRAITS_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/warp.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/warp.hpp new file mode 100644 index 0000000..0f1dc79 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/warp.hpp @@ -0,0 +1,131 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_DEVICE_WARP_HPP__ +#define __OPENCV_GPU_DEVICE_WARP_HPP__ + +namespace cv { namespace gpu { namespace device +{ + struct Warp + { + enum + { + LOG_WARP_SIZE = 5, + WARP_SIZE = 1 << LOG_WARP_SIZE, + STRIDE = WARP_SIZE + }; + + /** \brief Returns the warp lane ID of the calling thread. */ + static __device__ __forceinline__ unsigned int laneId() + { + unsigned int ret; + asm("mov.u32 %0, %laneid;" : "=r"(ret) ); + return ret; + } + + template + static __device__ __forceinline__ void fill(It beg, It end, const T& value) + { + for(It t = beg + laneId(); t < end; t += STRIDE) + *t = value; + } + + template + static __device__ __forceinline__ OutIt copy(InIt beg, InIt end, OutIt out) + { + for(InIt t = beg + laneId(); t < end; t += STRIDE, out += STRIDE) + *out = *t; + return out; + } + + template + static __device__ __forceinline__ OutIt transform(InIt beg, InIt end, OutIt out, UnOp op) + { + for(InIt t = beg + laneId(); t < end; t += STRIDE, out += STRIDE) + *out = op(*t); + return out; + } + + template + static __device__ __forceinline__ OutIt transform(InIt1 beg1, InIt1 end1, InIt2 beg2, OutIt out, BinOp op) + { + unsigned int lane = laneId(); + + InIt1 t1 = beg1 + lane; + InIt2 t2 = beg2 + lane; + for(; t1 < end1; t1 += STRIDE, t2 += STRIDE, out += STRIDE) + *out = op(*t1, *t2); + return out; + } + + template + static __device__ __forceinline__ T reduce(volatile T *ptr, BinOp op) + { + const unsigned int lane = laneId(); + + if (lane < 16) + { + T partial = ptr[lane]; + + ptr[lane] = partial = op(partial, ptr[lane + 16]); + ptr[lane] = partial = op(partial, ptr[lane + 8]); + ptr[lane] = partial = op(partial, ptr[lane + 4]); + ptr[lane] = partial = op(partial, ptr[lane + 2]); + ptr[lane] = partial = op(partial, ptr[lane + 1]); + } + + return *ptr; + } + + template + static __device__ __forceinline__ void yota(OutIt beg, OutIt end, T value) + { + unsigned int lane = laneId(); + value += lane; + + for(OutIt t = beg + lane; t < end; t += STRIDE, value += STRIDE) + *t = value; + } + }; +}}} // namespace cv { namespace gpu { namespace device + +#endif /* __OPENCV_GPU_DEVICE_WARP_HPP__ */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/warp_reduce.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/warp_reduce.hpp new file mode 100644 index 0000000..d4e64c4 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/warp_reduce.hpp @@ -0,0 +1,68 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_GPU_WARP_REDUCE_HPP__ +#define OPENCV_GPU_WARP_REDUCE_HPP__ + +namespace cv { namespace gpu { namespace device +{ + template + __device__ __forceinline__ T warp_reduce(volatile T *ptr , const unsigned int tid = threadIdx.x) + { + const unsigned int lane = tid & 31; // index of thread in warp (0..31) + + if (lane < 16) + { + T partial = ptr[tid]; + + ptr[tid] = partial = partial + ptr[tid + 16]; + ptr[tid] = partial = partial + ptr[tid + 8]; + ptr[tid] = partial = partial + ptr[tid + 4]; + ptr[tid] = partial = partial + ptr[tid + 2]; + ptr[tid] = partial = partial + ptr[tid + 1]; + } + + return ptr[tid - lane]; + } +}}} // namespace cv { namespace gpu { namespace device { + +#endif /* OPENCV_GPU_WARP_REDUCE_HPP__ */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/warp_shuffle.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/warp_shuffle.hpp new file mode 100644 index 0000000..8b4479a --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/warp_shuffle.hpp @@ -0,0 +1,145 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_WARP_SHUFFLE_HPP__ +#define __OPENCV_GPU_WARP_SHUFFLE_HPP__ + +namespace cv { namespace gpu { namespace device +{ + template + __device__ __forceinline__ T shfl(T val, int srcLane, int width = warpSize) + { + #if __CUDA_ARCH__ >= 300 + return __shfl(val, srcLane, width); + #else + return T(); + #endif + } + __device__ __forceinline__ unsigned int shfl(unsigned int val, int srcLane, int width = warpSize) + { + #if __CUDA_ARCH__ >= 300 + return (unsigned int) __shfl((int) val, srcLane, width); + #else + return 0; + #endif + } + __device__ __forceinline__ double shfl(double val, int srcLane, int width = warpSize) + { + #if __CUDA_ARCH__ >= 300 + int lo = __double2loint(val); + int hi = __double2hiint(val); + + lo = __shfl(lo, srcLane, width); + hi = __shfl(hi, srcLane, width); + + return __hiloint2double(hi, lo); + #else + return 0.0; + #endif + } + + template + __device__ __forceinline__ T shfl_down(T val, unsigned int delta, int width = warpSize) + { + #if __CUDA_ARCH__ >= 300 + return __shfl_down(val, delta, width); + #else + return T(); + #endif + } + __device__ __forceinline__ unsigned int shfl_down(unsigned int val, unsigned int delta, int width = warpSize) + { + #if __CUDA_ARCH__ >= 300 + return (unsigned int) __shfl_down((int) val, delta, width); + #else + return 0; + #endif + } + __device__ __forceinline__ double shfl_down(double val, unsigned int delta, int width = warpSize) + { + #if __CUDA_ARCH__ >= 300 + int lo = __double2loint(val); + int hi = __double2hiint(val); + + lo = __shfl_down(lo, delta, width); + hi = __shfl_down(hi, delta, width); + + return __hiloint2double(hi, lo); + #else + return 0.0; + #endif + } + + template + __device__ __forceinline__ T shfl_up(T val, unsigned int delta, int width = warpSize) + { + #if __CUDA_ARCH__ >= 300 + return __shfl_up(val, delta, width); + #else + return T(); + #endif + } + __device__ __forceinline__ unsigned int shfl_up(unsigned int val, unsigned int delta, int width = warpSize) + { + #if __CUDA_ARCH__ >= 300 + return (unsigned int) __shfl_up((int) val, delta, width); + #else + return 0; + #endif + } + __device__ __forceinline__ double shfl_up(double val, unsigned int delta, int width = warpSize) + { + #if __CUDA_ARCH__ >= 300 + int lo = __double2loint(val); + int hi = __double2hiint(val); + + lo = __shfl_up(lo, delta, width); + hi = __shfl_up(hi, delta, width); + + return __hiloint2double(hi, lo); + #else + return 0.0; + #endif + } +}}} + +#endif // __OPENCV_GPU_WARP_SHUFFLE_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/devmem2d.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/devmem2d.hpp new file mode 100644 index 0000000..18dfcd8 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/devmem2d.hpp @@ -0,0 +1,43 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "opencv2/core/cuda_devptrs.hpp" diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/gpu.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/gpu.hpp new file mode 100644 index 0000000..de16982 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/gpu.hpp @@ -0,0 +1,2530 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_HPP__ +#define __OPENCV_GPU_HPP__ + +#ifndef SKIP_INCLUDES +#include +#include +#include +#endif + +#include "opencv2/core/gpumat.hpp" +#include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/objdetect/objdetect.hpp" +#include "opencv2/features2d/features2d.hpp" + +namespace cv { namespace gpu { + +//////////////////////////////// CudaMem //////////////////////////////// +// CudaMem is limited cv::Mat with page locked memory allocation. +// Page locked memory is only needed for async and faster coping to GPU. +// It is convertable to cv::Mat header without reference counting +// so you can use it with other opencv functions. + +// Page-locks the matrix m memory and maps it for the device(s) +CV_EXPORTS void registerPageLocked(Mat& m); +// Unmaps the memory of matrix m, and makes it pageable again. +CV_EXPORTS void unregisterPageLocked(Mat& m); + +class CV_EXPORTS CudaMem +{ +public: + enum { ALLOC_PAGE_LOCKED = 1, ALLOC_ZEROCOPY = 2, ALLOC_WRITE_COMBINED = 4 }; + + CudaMem(); + CudaMem(const CudaMem& m); + + CudaMem(int rows, int cols, int type, int _alloc_type = ALLOC_PAGE_LOCKED); + CudaMem(Size size, int type, int alloc_type = ALLOC_PAGE_LOCKED); + + + //! creates from cv::Mat with coping data + explicit CudaMem(const Mat& m, int alloc_type = ALLOC_PAGE_LOCKED); + + ~CudaMem(); + + CudaMem& operator = (const CudaMem& m); + + //! returns deep copy of the matrix, i.e. the data is copied + CudaMem clone() const; + + //! allocates new matrix data unless the matrix already has specified size and type. + void create(int rows, int cols, int type, int alloc_type = ALLOC_PAGE_LOCKED); + void create(Size size, int type, int alloc_type = ALLOC_PAGE_LOCKED); + + //! decrements reference counter and released memory if needed. + void release(); + + //! returns matrix header with disabled reference counting for CudaMem data. + Mat createMatHeader() const; + operator Mat() const; + + //! maps host memory into device address space and returns GpuMat header for it. Throws exception if not supported by hardware. + GpuMat createGpuMatHeader() const; + operator GpuMat() const; + + //returns if host memory can be mapperd to gpu address space; + static bool canMapHostMemory(); + + // Please see cv::Mat for descriptions + bool isContinuous() const; + size_t elemSize() const; + size_t elemSize1() const; + int type() const; + int depth() const; + int channels() const; + size_t step1() const; + Size size() const; + bool empty() const; + + + // Please see cv::Mat for descriptions + int flags; + int rows, cols; + size_t step; + + uchar* data; + int* refcount; + + uchar* datastart; + uchar* dataend; + + int alloc_type; +}; + +//////////////////////////////// CudaStream //////////////////////////////// +// Encapculates Cuda Stream. Provides interface for async coping. +// Passed to each function that supports async kernel execution. +// Reference counting is enabled + +class CV_EXPORTS Stream +{ +public: + Stream(); + ~Stream(); + + Stream(const Stream&); + Stream& operator =(const Stream&); + + bool queryIfComplete(); + void waitForCompletion(); + + //! downloads asynchronously + // Warning! cv::Mat must point to page locked memory (i.e. to CudaMem data or to its subMat) + void enqueueDownload(const GpuMat& src, CudaMem& dst); + void enqueueDownload(const GpuMat& src, Mat& dst); + + //! uploads asynchronously + // Warning! cv::Mat must point to page locked memory (i.e. to CudaMem data or to its ROI) + void enqueueUpload(const CudaMem& src, GpuMat& dst); + void enqueueUpload(const Mat& src, GpuMat& dst); + + //! copy asynchronously + void enqueueCopy(const GpuMat& src, GpuMat& dst); + + //! memory set asynchronously + void enqueueMemSet(GpuMat& src, Scalar val); + void enqueueMemSet(GpuMat& src, Scalar val, const GpuMat& mask); + + //! converts matrix type, ex from float to uchar depending on type + void enqueueConvert(const GpuMat& src, GpuMat& dst, int dtype, double a = 1, double b = 0); + + //! adds a callback to be called on the host after all currently enqueued items in the stream have completed + typedef void (*StreamCallback)(Stream& stream, int status, void* userData); + void enqueueHostCallback(StreamCallback callback, void* userData); + + static Stream& Null(); + + operator bool() const; + +private: + struct Impl; + + explicit Stream(Impl* impl); + void create(); + void release(); + + Impl *impl; + + friend struct StreamAccessor; +}; + + +//////////////////////////////// Filter Engine //////////////////////////////// + +/*! +The Base Class for 1D or Row-wise Filters + +This is the base class for linear or non-linear filters that process 1D data. +In particular, such filters are used for the "horizontal" filtering parts in separable filters. +*/ +class CV_EXPORTS BaseRowFilter_GPU +{ +public: + BaseRowFilter_GPU(int ksize_, int anchor_) : ksize(ksize_), anchor(anchor_) {} + virtual ~BaseRowFilter_GPU() {} + virtual void operator()(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null()) = 0; + int ksize, anchor; +}; + +/*! +The Base Class for Column-wise Filters + +This is the base class for linear or non-linear filters that process columns of 2D arrays. +Such filters are used for the "vertical" filtering parts in separable filters. +*/ +class CV_EXPORTS BaseColumnFilter_GPU +{ +public: + BaseColumnFilter_GPU(int ksize_, int anchor_) : ksize(ksize_), anchor(anchor_) {} + virtual ~BaseColumnFilter_GPU() {} + virtual void operator()(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null()) = 0; + int ksize, anchor; +}; + +/*! +The Base Class for Non-Separable 2D Filters. + +This is the base class for linear or non-linear 2D filters. +*/ +class CV_EXPORTS BaseFilter_GPU +{ +public: + BaseFilter_GPU(const Size& ksize_, const Point& anchor_) : ksize(ksize_), anchor(anchor_) {} + virtual ~BaseFilter_GPU() {} + virtual void operator()(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null()) = 0; + Size ksize; + Point anchor; +}; + +/*! +The Base Class for Filter Engine. + +The class can be used to apply an arbitrary filtering operation to an image. +It contains all the necessary intermediate buffers. +*/ +class CV_EXPORTS FilterEngine_GPU +{ +public: + virtual ~FilterEngine_GPU() {} + + virtual void apply(const GpuMat& src, GpuMat& dst, Rect roi = Rect(0,0,-1,-1), Stream& stream = Stream::Null()) = 0; +}; + +//! returns the non-separable filter engine with the specified filter +CV_EXPORTS Ptr createFilter2D_GPU(const Ptr& filter2D, int srcType, int dstType); + +//! returns the separable filter engine with the specified filters +CV_EXPORTS Ptr createSeparableFilter_GPU(const Ptr& rowFilter, + const Ptr& columnFilter, int srcType, int bufType, int dstType); +CV_EXPORTS Ptr createSeparableFilter_GPU(const Ptr& rowFilter, + const Ptr& columnFilter, int srcType, int bufType, int dstType, GpuMat& buf); + +//! returns horizontal 1D box filter +//! supports only CV_8UC1 source type and CV_32FC1 sum type +CV_EXPORTS Ptr getRowSumFilter_GPU(int srcType, int sumType, int ksize, int anchor = -1); + +//! returns vertical 1D box filter +//! supports only CV_8UC1 sum type and CV_32FC1 dst type +CV_EXPORTS Ptr getColumnSumFilter_GPU(int sumType, int dstType, int ksize, int anchor = -1); + +//! returns 2D box filter +//! supports CV_8UC1 and CV_8UC4 source type, dst type must be the same as source type +CV_EXPORTS Ptr getBoxFilter_GPU(int srcType, int dstType, const Size& ksize, Point anchor = Point(-1, -1)); + +//! returns box filter engine +CV_EXPORTS Ptr createBoxFilter_GPU(int srcType, int dstType, const Size& ksize, + const Point& anchor = Point(-1,-1)); + +//! returns 2D morphological filter +//! only MORPH_ERODE and MORPH_DILATE are supported +//! supports CV_8UC1 and CV_8UC4 types +//! kernel must have CV_8UC1 type, one rows and cols == ksize.width * ksize.height +CV_EXPORTS Ptr getMorphologyFilter_GPU(int op, int type, const Mat& kernel, const Size& ksize, + Point anchor=Point(-1,-1)); + +//! returns morphological filter engine. Only MORPH_ERODE and MORPH_DILATE are supported. +CV_EXPORTS Ptr createMorphologyFilter_GPU(int op, int type, const Mat& kernel, + const Point& anchor = Point(-1,-1), int iterations = 1); +CV_EXPORTS Ptr createMorphologyFilter_GPU(int op, int type, const Mat& kernel, GpuMat& buf, + const Point& anchor = Point(-1,-1), int iterations = 1); + +//! returns 2D filter with the specified kernel +//! supports CV_8U, CV_16U and CV_32F one and four channel image +CV_EXPORTS Ptr getLinearFilter_GPU(int srcType, int dstType, const Mat& kernel, Point anchor = Point(-1, -1), int borderType = BORDER_DEFAULT); + +//! returns the non-separable linear filter engine +CV_EXPORTS Ptr createLinearFilter_GPU(int srcType, int dstType, const Mat& kernel, + Point anchor = Point(-1,-1), int borderType = BORDER_DEFAULT); + +//! returns the primitive row filter with the specified kernel. +//! supports only CV_8UC1, CV_8UC4, CV_16SC1, CV_16SC2, CV_32SC1, CV_32FC1 source type. +//! there are two version of algorithm: NPP and OpenCV. +//! NPP calls when srcType == CV_8UC1 or srcType == CV_8UC4 and bufType == srcType, +//! otherwise calls OpenCV version. +//! NPP supports only BORDER_CONSTANT border type. +//! OpenCV version supports only CV_32F as buffer depth and +//! BORDER_REFLECT101, BORDER_REPLICATE and BORDER_CONSTANT border types. +CV_EXPORTS Ptr getLinearRowFilter_GPU(int srcType, int bufType, const Mat& rowKernel, + int anchor = -1, int borderType = BORDER_DEFAULT); + +//! returns the primitive column filter with the specified kernel. +//! supports only CV_8UC1, CV_8UC4, CV_16SC1, CV_16SC2, CV_32SC1, CV_32FC1 dst type. +//! there are two version of algorithm: NPP and OpenCV. +//! NPP calls when dstType == CV_8UC1 or dstType == CV_8UC4 and bufType == dstType, +//! otherwise calls OpenCV version. +//! NPP supports only BORDER_CONSTANT border type. +//! OpenCV version supports only CV_32F as buffer depth and +//! BORDER_REFLECT101, BORDER_REPLICATE and BORDER_CONSTANT border types. +CV_EXPORTS Ptr getLinearColumnFilter_GPU(int bufType, int dstType, const Mat& columnKernel, + int anchor = -1, int borderType = BORDER_DEFAULT); + +//! returns the separable linear filter engine +CV_EXPORTS Ptr createSeparableLinearFilter_GPU(int srcType, int dstType, const Mat& rowKernel, + const Mat& columnKernel, const Point& anchor = Point(-1,-1), int rowBorderType = BORDER_DEFAULT, + int columnBorderType = -1); +CV_EXPORTS Ptr createSeparableLinearFilter_GPU(int srcType, int dstType, const Mat& rowKernel, + const Mat& columnKernel, GpuMat& buf, const Point& anchor = Point(-1,-1), int rowBorderType = BORDER_DEFAULT, + int columnBorderType = -1); + +//! returns filter engine for the generalized Sobel operator +CV_EXPORTS Ptr createDerivFilter_GPU(int srcType, int dstType, int dx, int dy, int ksize, + int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1); +CV_EXPORTS Ptr createDerivFilter_GPU(int srcType, int dstType, int dx, int dy, int ksize, GpuMat& buf, + int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1); + +//! returns the Gaussian filter engine +CV_EXPORTS Ptr createGaussianFilter_GPU(int type, Size ksize, double sigma1, double sigma2 = 0, + int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1); +CV_EXPORTS Ptr createGaussianFilter_GPU(int type, Size ksize, GpuMat& buf, double sigma1, double sigma2 = 0, + int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1); + +//! returns maximum filter +CV_EXPORTS Ptr getMaxFilter_GPU(int srcType, int dstType, const Size& ksize, Point anchor = Point(-1,-1)); + +//! returns minimum filter +CV_EXPORTS Ptr getMinFilter_GPU(int srcType, int dstType, const Size& ksize, Point anchor = Point(-1,-1)); + +//! smooths the image using the normalized box filter +//! supports CV_8UC1, CV_8UC4 types +CV_EXPORTS void boxFilter(const GpuMat& src, GpuMat& dst, int ddepth, Size ksize, Point anchor = Point(-1,-1), Stream& stream = Stream::Null()); + +//! a synonym for normalized box filter +static inline void blur(const GpuMat& src, GpuMat& dst, Size ksize, Point anchor = Point(-1,-1), Stream& stream = Stream::Null()) +{ + boxFilter(src, dst, -1, ksize, anchor, stream); +} + +//! erodes the image (applies the local minimum operator) +CV_EXPORTS void erode(const GpuMat& src, GpuMat& dst, const Mat& kernel, Point anchor = Point(-1, -1), int iterations = 1); +CV_EXPORTS void erode(const GpuMat& src, GpuMat& dst, const Mat& kernel, GpuMat& buf, + Point anchor = Point(-1, -1), int iterations = 1, + Stream& stream = Stream::Null()); + +//! dilates the image (applies the local maximum operator) +CV_EXPORTS void dilate(const GpuMat& src, GpuMat& dst, const Mat& kernel, Point anchor = Point(-1, -1), int iterations = 1); +CV_EXPORTS void dilate(const GpuMat& src, GpuMat& dst, const Mat& kernel, GpuMat& buf, + Point anchor = Point(-1, -1), int iterations = 1, + Stream& stream = Stream::Null()); + +//! applies an advanced morphological operation to the image +CV_EXPORTS void morphologyEx(const GpuMat& src, GpuMat& dst, int op, const Mat& kernel, Point anchor = Point(-1, -1), int iterations = 1); +CV_EXPORTS void morphologyEx(const GpuMat& src, GpuMat& dst, int op, const Mat& kernel, GpuMat& buf1, GpuMat& buf2, + Point anchor = Point(-1, -1), int iterations = 1, Stream& stream = Stream::Null()); + +//! applies non-separable 2D linear filter to the image +CV_EXPORTS void filter2D(const GpuMat& src, GpuMat& dst, int ddepth, const Mat& kernel, Point anchor=Point(-1,-1), int borderType = BORDER_DEFAULT, Stream& stream = Stream::Null()); + +//! applies separable 2D linear filter to the image +CV_EXPORTS void sepFilter2D(const GpuMat& src, GpuMat& dst, int ddepth, const Mat& kernelX, const Mat& kernelY, + Point anchor = Point(-1,-1), int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1); +CV_EXPORTS void sepFilter2D(const GpuMat& src, GpuMat& dst, int ddepth, const Mat& kernelX, const Mat& kernelY, GpuMat& buf, + Point anchor = Point(-1,-1), int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1, + Stream& stream = Stream::Null()); + +//! applies generalized Sobel operator to the image +CV_EXPORTS void Sobel(const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, int ksize = 3, double scale = 1, + int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1); +CV_EXPORTS void Sobel(const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, GpuMat& buf, int ksize = 3, double scale = 1, + int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1, Stream& stream = Stream::Null()); + +//! applies the vertical or horizontal Scharr operator to the image +CV_EXPORTS void Scharr(const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, double scale = 1, + int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1); +CV_EXPORTS void Scharr(const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, GpuMat& buf, double scale = 1, + int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1, Stream& stream = Stream::Null()); + +//! smooths the image using Gaussian filter. +CV_EXPORTS void GaussianBlur(const GpuMat& src, GpuMat& dst, Size ksize, double sigma1, double sigma2 = 0, + int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1); +CV_EXPORTS void GaussianBlur(const GpuMat& src, GpuMat& dst, Size ksize, GpuMat& buf, double sigma1, double sigma2 = 0, + int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1, Stream& stream = Stream::Null()); + +//! applies Laplacian operator to the image +//! supports only ksize = 1 and ksize = 3 +CV_EXPORTS void Laplacian(const GpuMat& src, GpuMat& dst, int ddepth, int ksize = 1, double scale = 1, int borderType = BORDER_DEFAULT, Stream& stream = Stream::Null()); + + +////////////////////////////// Arithmetics /////////////////////////////////// + +//! implements generalized matrix product algorithm GEMM from BLAS +CV_EXPORTS void gemm(const GpuMat& src1, const GpuMat& src2, double alpha, + const GpuMat& src3, double beta, GpuMat& dst, int flags = 0, Stream& stream = Stream::Null()); + +//! transposes the matrix +//! supports matrix with element size = 1, 4 and 8 bytes (CV_8UC1, CV_8UC4, CV_16UC2, CV_32FC1, etc) +CV_EXPORTS void transpose(const GpuMat& src1, GpuMat& dst, Stream& stream = Stream::Null()); + +//! reverses the order of the rows, columns or both in a matrix +//! supports 1, 3 and 4 channels images with CV_8U, CV_16U, CV_32S or CV_32F depth +CV_EXPORTS void flip(const GpuMat& a, GpuMat& b, int flipCode, Stream& stream = Stream::Null()); + +//! transforms 8-bit unsigned integers using lookup table: dst(i)=lut(src(i)) +//! destination array will have the depth type as lut and the same channels number as source +//! supports CV_8UC1, CV_8UC3 types +CV_EXPORTS void LUT(const GpuMat& src, const Mat& lut, GpuMat& dst, Stream& stream = Stream::Null()); + +//! makes multi-channel array out of several single-channel arrays +CV_EXPORTS void merge(const GpuMat* src, size_t n, GpuMat& dst, Stream& stream = Stream::Null()); + +//! makes multi-channel array out of several single-channel arrays +CV_EXPORTS void merge(const vector& src, GpuMat& dst, Stream& stream = Stream::Null()); + +//! copies each plane of a multi-channel array to a dedicated array +CV_EXPORTS void split(const GpuMat& src, GpuMat* dst, Stream& stream = Stream::Null()); + +//! copies each plane of a multi-channel array to a dedicated array +CV_EXPORTS void split(const GpuMat& src, vector& dst, Stream& stream = Stream::Null()); + +//! computes magnitude of complex (x(i).re, x(i).im) vector +//! supports only CV_32FC2 type +CV_EXPORTS void magnitude(const GpuMat& xy, GpuMat& magnitude, Stream& stream = Stream::Null()); + +//! computes squared magnitude of complex (x(i).re, x(i).im) vector +//! supports only CV_32FC2 type +CV_EXPORTS void magnitudeSqr(const GpuMat& xy, GpuMat& magnitude, Stream& stream = Stream::Null()); + +//! computes magnitude of each (x(i), y(i)) vector +//! supports only floating-point source +CV_EXPORTS void magnitude(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, Stream& stream = Stream::Null()); + +//! computes squared magnitude of each (x(i), y(i)) vector +//! supports only floating-point source +CV_EXPORTS void magnitudeSqr(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, Stream& stream = Stream::Null()); + +//! computes angle (angle(i)) of each (x(i), y(i)) vector +//! supports only floating-point source +CV_EXPORTS void phase(const GpuMat& x, const GpuMat& y, GpuMat& angle, bool angleInDegrees = false, Stream& stream = Stream::Null()); + +//! converts Cartesian coordinates to polar +//! supports only floating-point source +CV_EXPORTS void cartToPolar(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, GpuMat& angle, bool angleInDegrees = false, Stream& stream = Stream::Null()); + +//! converts polar coordinates to Cartesian +//! supports only floating-point source +CV_EXPORTS void polarToCart(const GpuMat& magnitude, const GpuMat& angle, GpuMat& x, GpuMat& y, bool angleInDegrees = false, Stream& stream = Stream::Null()); + +//! scales and shifts array elements so that either the specified norm (alpha) or the minimum (alpha) and maximum (beta) array values get the specified values +CV_EXPORTS void normalize(const GpuMat& src, GpuMat& dst, double alpha = 1, double beta = 0, + int norm_type = NORM_L2, int dtype = -1, const GpuMat& mask = GpuMat()); +CV_EXPORTS void normalize(const GpuMat& src, GpuMat& dst, double a, double b, + int norm_type, int dtype, const GpuMat& mask, GpuMat& norm_buf, GpuMat& cvt_buf); + + +//////////////////////////// Per-element operations //////////////////////////////////// + +//! adds one matrix to another (c = a + b) +CV_EXPORTS void add(const GpuMat& a, const GpuMat& b, GpuMat& c, const GpuMat& mask = GpuMat(), int dtype = -1, Stream& stream = Stream::Null()); +//! adds scalar to a matrix (c = a + s) +CV_EXPORTS void add(const GpuMat& a, const Scalar& sc, GpuMat& c, const GpuMat& mask = GpuMat(), int dtype = -1, Stream& stream = Stream::Null()); + +//! subtracts one matrix from another (c = a - b) +CV_EXPORTS void subtract(const GpuMat& a, const GpuMat& b, GpuMat& c, const GpuMat& mask = GpuMat(), int dtype = -1, Stream& stream = Stream::Null()); +//! subtracts scalar from a matrix (c = a - s) +CV_EXPORTS void subtract(const GpuMat& a, const Scalar& sc, GpuMat& c, const GpuMat& mask = GpuMat(), int dtype = -1, Stream& stream = Stream::Null()); + +//! computes element-wise weighted product of the two arrays (c = scale * a * b) +CV_EXPORTS void multiply(const GpuMat& a, const GpuMat& b, GpuMat& c, double scale = 1, int dtype = -1, Stream& stream = Stream::Null()); +//! weighted multiplies matrix to a scalar (c = scale * a * s) +CV_EXPORTS void multiply(const GpuMat& a, const Scalar& sc, GpuMat& c, double scale = 1, int dtype = -1, Stream& stream = Stream::Null()); + +//! computes element-wise weighted quotient of the two arrays (c = a / b) +CV_EXPORTS void divide(const GpuMat& a, const GpuMat& b, GpuMat& c, double scale = 1, int dtype = -1, Stream& stream = Stream::Null()); +//! computes element-wise weighted quotient of matrix and scalar (c = a / s) +CV_EXPORTS void divide(const GpuMat& a, const Scalar& sc, GpuMat& c, double scale = 1, int dtype = -1, Stream& stream = Stream::Null()); +//! computes element-wise weighted reciprocal of an array (dst = scale/src2) +CV_EXPORTS void divide(double scale, const GpuMat& b, GpuMat& c, int dtype = -1, Stream& stream = Stream::Null()); + +//! computes the weighted sum of two arrays (dst = alpha*src1 + beta*src2 + gamma) +CV_EXPORTS void addWeighted(const GpuMat& src1, double alpha, const GpuMat& src2, double beta, double gamma, GpuMat& dst, + int dtype = -1, Stream& stream = Stream::Null()); + +//! adds scaled array to another one (dst = alpha*src1 + src2) +static inline void scaleAdd(const GpuMat& src1, double alpha, const GpuMat& src2, GpuMat& dst, Stream& stream = Stream::Null()) +{ + addWeighted(src1, alpha, src2, 1.0, 0.0, dst, -1, stream); +} + +//! computes element-wise absolute difference of two arrays (c = abs(a - b)) +CV_EXPORTS void absdiff(const GpuMat& a, const GpuMat& b, GpuMat& c, Stream& stream = Stream::Null()); +//! computes element-wise absolute difference of array and scalar (c = abs(a - s)) +CV_EXPORTS void absdiff(const GpuMat& a, const Scalar& s, GpuMat& c, Stream& stream = Stream::Null()); + +//! computes absolute value of each matrix element +//! supports CV_16S and CV_32F depth +CV_EXPORTS void abs(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null()); + +//! computes square of each pixel in an image +//! supports CV_8U, CV_16U, CV_16S and CV_32F depth +CV_EXPORTS void sqr(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null()); + +//! computes square root of each pixel in an image +//! supports CV_8U, CV_16U, CV_16S and CV_32F depth +CV_EXPORTS void sqrt(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null()); + +//! computes exponent of each matrix element (b = e**a) +//! supports CV_8U, CV_16U, CV_16S and CV_32F depth +CV_EXPORTS void exp(const GpuMat& a, GpuMat& b, Stream& stream = Stream::Null()); + +//! computes natural logarithm of absolute value of each matrix element: b = log(abs(a)) +//! supports CV_8U, CV_16U, CV_16S and CV_32F depth +CV_EXPORTS void log(const GpuMat& a, GpuMat& b, Stream& stream = Stream::Null()); + +//! computes power of each matrix element: +// (dst(i,j) = pow( src(i,j) , power), if src.type() is integer +// (dst(i,j) = pow(fabs(src(i,j)), power), otherwise +//! supports all, except depth == CV_64F +CV_EXPORTS void pow(const GpuMat& src, double power, GpuMat& dst, Stream& stream = Stream::Null()); + +//! compares elements of two arrays (c = a \ b) +CV_EXPORTS void compare(const GpuMat& a, const GpuMat& b, GpuMat& c, int cmpop, Stream& stream = Stream::Null()); +CV_EXPORTS void compare(const GpuMat& a, Scalar sc, GpuMat& c, int cmpop, Stream& stream = Stream::Null()); + +//! performs per-elements bit-wise inversion +CV_EXPORTS void bitwise_not(const GpuMat& src, GpuMat& dst, const GpuMat& mask=GpuMat(), Stream& stream = Stream::Null()); + +//! calculates per-element bit-wise disjunction of two arrays +CV_EXPORTS void bitwise_or(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask=GpuMat(), Stream& stream = Stream::Null()); +//! calculates per-element bit-wise disjunction of array and scalar +//! supports 1, 3 and 4 channels images with CV_8U, CV_16U or CV_32S depth +CV_EXPORTS void bitwise_or(const GpuMat& src1, const Scalar& sc, GpuMat& dst, Stream& stream = Stream::Null()); + +//! calculates per-element bit-wise conjunction of two arrays +CV_EXPORTS void bitwise_and(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask=GpuMat(), Stream& stream = Stream::Null()); +//! calculates per-element bit-wise conjunction of array and scalar +//! supports 1, 3 and 4 channels images with CV_8U, CV_16U or CV_32S depth +CV_EXPORTS void bitwise_and(const GpuMat& src1, const Scalar& sc, GpuMat& dst, Stream& stream = Stream::Null()); + +//! calculates per-element bit-wise "exclusive or" operation +CV_EXPORTS void bitwise_xor(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask=GpuMat(), Stream& stream = Stream::Null()); +//! calculates per-element bit-wise "exclusive or" of array and scalar +//! supports 1, 3 and 4 channels images with CV_8U, CV_16U or CV_32S depth +CV_EXPORTS void bitwise_xor(const GpuMat& src1, const Scalar& sc, GpuMat& dst, Stream& stream = Stream::Null()); + +//! pixel by pixel right shift of an image by a constant value +//! supports 1, 3 and 4 channels images with integers elements +CV_EXPORTS void rshift(const GpuMat& src, Scalar_ sc, GpuMat& dst, Stream& stream = Stream::Null()); + +//! pixel by pixel left shift of an image by a constant value +//! supports 1, 3 and 4 channels images with CV_8U, CV_16U or CV_32S depth +CV_EXPORTS void lshift(const GpuMat& src, Scalar_ sc, GpuMat& dst, Stream& stream = Stream::Null()); + +//! computes per-element minimum of two arrays (dst = min(src1, src2)) +CV_EXPORTS void min(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, Stream& stream = Stream::Null()); + +//! computes per-element minimum of array and scalar (dst = min(src1, src2)) +CV_EXPORTS void min(const GpuMat& src1, double src2, GpuMat& dst, Stream& stream = Stream::Null()); + +//! computes per-element maximum of two arrays (dst = max(src1, src2)) +CV_EXPORTS void max(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, Stream& stream = Stream::Null()); + +//! computes per-element maximum of array and scalar (dst = max(src1, src2)) +CV_EXPORTS void max(const GpuMat& src1, double src2, GpuMat& dst, Stream& stream = Stream::Null()); + +enum { ALPHA_OVER, ALPHA_IN, ALPHA_OUT, ALPHA_ATOP, ALPHA_XOR, ALPHA_PLUS, ALPHA_OVER_PREMUL, ALPHA_IN_PREMUL, ALPHA_OUT_PREMUL, + ALPHA_ATOP_PREMUL, ALPHA_XOR_PREMUL, ALPHA_PLUS_PREMUL, ALPHA_PREMUL}; + +//! Composite two images using alpha opacity values contained in each image +//! Supports CV_8UC4, CV_16UC4, CV_32SC4 and CV_32FC4 types +CV_EXPORTS void alphaComp(const GpuMat& img1, const GpuMat& img2, GpuMat& dst, int alpha_op, Stream& stream = Stream::Null()); + + +////////////////////////////// Image processing ////////////////////////////// + +//! DST[x,y] = SRC[xmap[x,y],ymap[x,y]] +//! supports only CV_32FC1 map type +CV_EXPORTS void remap(const GpuMat& src, GpuMat& dst, const GpuMat& xmap, const GpuMat& ymap, + int interpolation, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar(), + Stream& stream = Stream::Null()); + +//! Does mean shift filtering on GPU. +CV_EXPORTS void meanShiftFiltering(const GpuMat& src, GpuMat& dst, int sp, int sr, + TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1), + Stream& stream = Stream::Null()); + +//! Does mean shift procedure on GPU. +CV_EXPORTS void meanShiftProc(const GpuMat& src, GpuMat& dstr, GpuMat& dstsp, int sp, int sr, + TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1), + Stream& stream = Stream::Null()); + +//! Does mean shift segmentation with elimination of small regions. +CV_EXPORTS void meanShiftSegmentation(const GpuMat& src, Mat& dst, int sp, int sr, int minsize, + TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1)); + +//! Does coloring of disparity image: [0..ndisp) -> [0..240, 1, 1] in HSV. +//! Supported types of input disparity: CV_8U, CV_16S. +//! Output disparity has CV_8UC4 type in BGRA format (alpha = 255). +CV_EXPORTS void drawColorDisp(const GpuMat& src_disp, GpuMat& dst_disp, int ndisp, Stream& stream = Stream::Null()); + +//! Reprojects disparity image to 3D space. +//! Supports CV_8U and CV_16S types of input disparity. +//! The output is a 3- or 4-channel floating-point matrix. +//! Each element of this matrix will contain the 3D coordinates of the point (x,y,z,1), computed from the disparity map. +//! Q is the 4x4 perspective transformation matrix that can be obtained with cvStereoRectify. +CV_EXPORTS void reprojectImageTo3D(const GpuMat& disp, GpuMat& xyzw, const Mat& Q, int dst_cn = 4, Stream& stream = Stream::Null()); + +//! converts image from one color space to another +CV_EXPORTS void cvtColor(const GpuMat& src, GpuMat& dst, int code, int dcn = 0, Stream& stream = Stream::Null()); + +enum +{ + // Bayer Demosaicing (Malvar, He, and Cutler) + COLOR_BayerBG2BGR_MHT = 256, + COLOR_BayerGB2BGR_MHT = 257, + COLOR_BayerRG2BGR_MHT = 258, + COLOR_BayerGR2BGR_MHT = 259, + + COLOR_BayerBG2RGB_MHT = COLOR_BayerRG2BGR_MHT, + COLOR_BayerGB2RGB_MHT = COLOR_BayerGR2BGR_MHT, + COLOR_BayerRG2RGB_MHT = COLOR_BayerBG2BGR_MHT, + COLOR_BayerGR2RGB_MHT = COLOR_BayerGB2BGR_MHT, + + COLOR_BayerBG2GRAY_MHT = 260, + COLOR_BayerGB2GRAY_MHT = 261, + COLOR_BayerRG2GRAY_MHT = 262, + COLOR_BayerGR2GRAY_MHT = 263 +}; +CV_EXPORTS void demosaicing(const GpuMat& src, GpuMat& dst, int code, int dcn = -1, Stream& stream = Stream::Null()); + +//! swap channels +//! dstOrder - Integer array describing how channel values are permutated. The n-th entry +//! of the array contains the number of the channel that is stored in the n-th channel of +//! the output image. E.g. Given an RGBA image, aDstOrder = [3,2,1,0] converts this to ABGR +//! channel order. +CV_EXPORTS void swapChannels(GpuMat& image, const int dstOrder[4], Stream& stream = Stream::Null()); + +//! Routines for correcting image color gamma +CV_EXPORTS void gammaCorrection(const GpuMat& src, GpuMat& dst, bool forward = true, Stream& stream = Stream::Null()); + +//! applies fixed threshold to the image +CV_EXPORTS double threshold(const GpuMat& src, GpuMat& dst, double thresh, double maxval, int type, Stream& stream = Stream::Null()); + +//! resizes the image +//! Supports INTER_NEAREST, INTER_LINEAR, INTER_CUBIC, INTER_AREA +CV_EXPORTS void resize(const GpuMat& src, GpuMat& dst, Size dsize, double fx=0, double fy=0, int interpolation = INTER_LINEAR, Stream& stream = Stream::Null()); + +//! warps the image using affine transformation +//! Supports INTER_NEAREST, INTER_LINEAR, INTER_CUBIC +CV_EXPORTS void warpAffine(const GpuMat& src, GpuMat& dst, const Mat& M, Size dsize, int flags = INTER_LINEAR, + int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar(), Stream& stream = Stream::Null()); + +CV_EXPORTS void buildWarpAffineMaps(const Mat& M, bool inverse, Size dsize, GpuMat& xmap, GpuMat& ymap, Stream& stream = Stream::Null()); + +//! warps the image using perspective transformation +//! Supports INTER_NEAREST, INTER_LINEAR, INTER_CUBIC +CV_EXPORTS void warpPerspective(const GpuMat& src, GpuMat& dst, const Mat& M, Size dsize, int flags = INTER_LINEAR, + int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar(), Stream& stream = Stream::Null()); + +CV_EXPORTS void buildWarpPerspectiveMaps(const Mat& M, bool inverse, Size dsize, GpuMat& xmap, GpuMat& ymap, Stream& stream = Stream::Null()); + +//! builds plane warping maps +CV_EXPORTS void buildWarpPlaneMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, const Mat &T, float scale, + GpuMat& map_x, GpuMat& map_y, Stream& stream = Stream::Null()); + +//! builds cylindrical warping maps +CV_EXPORTS void buildWarpCylindricalMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale, + GpuMat& map_x, GpuMat& map_y, Stream& stream = Stream::Null()); + +//! builds spherical warping maps +CV_EXPORTS void buildWarpSphericalMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale, + GpuMat& map_x, GpuMat& map_y, Stream& stream = Stream::Null()); + +//! rotates an image around the origin (0,0) and then shifts it +//! supports INTER_NEAREST, INTER_LINEAR, INTER_CUBIC +//! supports 1, 3 or 4 channels images with CV_8U, CV_16U or CV_32F depth +CV_EXPORTS void rotate(const GpuMat& src, GpuMat& dst, Size dsize, double angle, double xShift = 0, double yShift = 0, + int interpolation = INTER_LINEAR, Stream& stream = Stream::Null()); + +//! copies 2D array to a larger destination array and pads borders with user-specifiable constant +CV_EXPORTS void copyMakeBorder(const GpuMat& src, GpuMat& dst, int top, int bottom, int left, int right, int borderType, + const Scalar& value = Scalar(), Stream& stream = Stream::Null()); + +//! computes the integral image +//! sum will have CV_32S type, but will contain unsigned int values +//! supports only CV_8UC1 source type +CV_EXPORTS void integral(const GpuMat& src, GpuMat& sum, Stream& stream = Stream::Null()); +//! buffered version +CV_EXPORTS void integralBuffered(const GpuMat& src, GpuMat& sum, GpuMat& buffer, Stream& stream = Stream::Null()); + +//! computes squared integral image +//! result matrix will have 64F type, but will contain 64U values +//! supports source images of 8UC1 type only +CV_EXPORTS void sqrIntegral(const GpuMat& src, GpuMat& sqsum, Stream& stream = Stream::Null()); + +//! computes vertical sum, supports only CV_32FC1 images +CV_EXPORTS void columnSum(const GpuMat& src, GpuMat& sum); + +//! computes the standard deviation of integral images +//! supports only CV_32SC1 source type and CV_32FC1 sqr type +//! output will have CV_32FC1 type +CV_EXPORTS void rectStdDev(const GpuMat& src, const GpuMat& sqr, GpuMat& dst, const Rect& rect, Stream& stream = Stream::Null()); + +//! computes Harris cornerness criteria at each image pixel +CV_EXPORTS void cornerHarris(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, double k, int borderType = BORDER_REFLECT101); +CV_EXPORTS void cornerHarris(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, int blockSize, int ksize, double k, int borderType = BORDER_REFLECT101); +CV_EXPORTS void cornerHarris(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, GpuMat& buf, int blockSize, int ksize, double k, + int borderType = BORDER_REFLECT101, Stream& stream = Stream::Null()); + +//! computes minimum eigen value of 2x2 derivative covariation matrix at each pixel - the cornerness criteria +CV_EXPORTS void cornerMinEigenVal(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, int borderType=BORDER_REFLECT101); +CV_EXPORTS void cornerMinEigenVal(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, int blockSize, int ksize, int borderType=BORDER_REFLECT101); +CV_EXPORTS void cornerMinEigenVal(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, GpuMat& buf, int blockSize, int ksize, + int borderType=BORDER_REFLECT101, Stream& stream = Stream::Null()); + +//! performs per-element multiplication of two full (not packed) Fourier spectrums +//! supports 32FC2 matrices only (interleaved format) +CV_EXPORTS void mulSpectrums(const GpuMat& a, const GpuMat& b, GpuMat& c, int flags, bool conjB=false, Stream& stream = Stream::Null()); + +//! performs per-element multiplication of two full (not packed) Fourier spectrums +//! supports 32FC2 matrices only (interleaved format) +CV_EXPORTS void mulAndScaleSpectrums(const GpuMat& a, const GpuMat& b, GpuMat& c, int flags, float scale, bool conjB=false, Stream& stream = Stream::Null()); + +//! Performs a forward or inverse discrete Fourier transform (1D or 2D) of floating point matrix. +//! Param dft_size is the size of DFT transform. +//! +//! If the source matrix is not continous, then additional copy will be done, +//! so to avoid copying ensure the source matrix is continous one. If you want to use +//! preallocated output ensure it is continuous too, otherwise it will be reallocated. +//! +//! Being implemented via CUFFT real-to-complex transform result contains only non-redundant values +//! in CUFFT's format. Result as full complex matrix for such kind of transform cannot be retrieved. +//! +//! For complex-to-real transform it is assumed that the source matrix is packed in CUFFT's format. +CV_EXPORTS void dft(const GpuMat& src, GpuMat& dst, Size dft_size, int flags=0, Stream& stream = Stream::Null()); + +struct CV_EXPORTS ConvolveBuf +{ + Size result_size; + Size block_size; + Size user_block_size; + Size dft_size; + int spect_len; + + GpuMat image_spect, templ_spect, result_spect; + GpuMat image_block, templ_block, result_data; + + void create(Size image_size, Size templ_size); + static Size estimateBlockSize(Size result_size, Size templ_size); +}; + + +//! computes convolution (or cross-correlation) of two images using discrete Fourier transform +//! supports source images of 32FC1 type only +//! result matrix will have 32FC1 type +CV_EXPORTS void convolve(const GpuMat& image, const GpuMat& templ, GpuMat& result, bool ccorr = false); +CV_EXPORTS void convolve(const GpuMat& image, const GpuMat& templ, GpuMat& result, bool ccorr, ConvolveBuf& buf, Stream& stream = Stream::Null()); + +struct CV_EXPORTS MatchTemplateBuf +{ + Size user_block_size; + GpuMat imagef, templf; + std::vector images; + std::vector image_sums; + std::vector image_sqsums; +}; + +//! computes the proximity map for the raster template and the image where the template is searched for +CV_EXPORTS void matchTemplate(const GpuMat& image, const GpuMat& templ, GpuMat& result, int method, Stream &stream = Stream::Null()); + +//! computes the proximity map for the raster template and the image where the template is searched for +CV_EXPORTS void matchTemplate(const GpuMat& image, const GpuMat& templ, GpuMat& result, int method, MatchTemplateBuf &buf, Stream& stream = Stream::Null()); + +//! smoothes the source image and downsamples it +CV_EXPORTS void pyrDown(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null()); + +//! upsamples the source image and then smoothes it +CV_EXPORTS void pyrUp(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null()); + +//! performs linear blending of two images +//! to avoid accuracy errors sum of weigths shouldn't be very close to zero +CV_EXPORTS void blendLinear(const GpuMat& img1, const GpuMat& img2, const GpuMat& weights1, const GpuMat& weights2, + GpuMat& result, Stream& stream = Stream::Null()); + +//! Performa bilateral filtering of passsed image +CV_EXPORTS void bilateralFilter(const GpuMat& src, GpuMat& dst, int kernel_size, float sigma_color, float sigma_spatial, + int borderMode = BORDER_DEFAULT, Stream& stream = Stream::Null()); + +//! Brute force non-local means algorith (slow but universal) +CV_EXPORTS void nonLocalMeans(const GpuMat& src, GpuMat& dst, float h, int search_window = 21, int block_size = 7, int borderMode = BORDER_DEFAULT, Stream& s = Stream::Null()); + +//! Fast (but approximate)version of non-local means algorith similar to CPU function (running sums technique) +class CV_EXPORTS FastNonLocalMeansDenoising +{ +public: + //! Simple method, recommended for grayscale images (though it supports multichannel images) + void simpleMethod(const GpuMat& src, GpuMat& dst, float h, int search_window = 21, int block_size = 7, Stream& s = Stream::Null()); + + //! Processes luminance and color components separatelly + void labMethod(const GpuMat& src, GpuMat& dst, float h_luminance, float h_color, int search_window = 21, int block_size = 7, Stream& s = Stream::Null()); + +private: + + GpuMat buffer, extended_src_buffer; + GpuMat lab, l, ab; +}; + +struct CV_EXPORTS CannyBuf +{ + void create(const Size& image_size, int apperture_size = 3); + void release(); + + GpuMat dx, dy; + GpuMat mag; + GpuMat map; + GpuMat st1, st2; + GpuMat unused; + Ptr filterDX, filterDY; + + CannyBuf() {} + explicit CannyBuf(const Size& image_size, int apperture_size = 3) {create(image_size, apperture_size);} + CannyBuf(const GpuMat& dx_, const GpuMat& dy_); +}; + +CV_EXPORTS void Canny(const GpuMat& image, GpuMat& edges, double low_thresh, double high_thresh, int apperture_size = 3, bool L2gradient = false); +CV_EXPORTS void Canny(const GpuMat& image, CannyBuf& buf, GpuMat& edges, double low_thresh, double high_thresh, int apperture_size = 3, bool L2gradient = false); +CV_EXPORTS void Canny(const GpuMat& dx, const GpuMat& dy, GpuMat& edges, double low_thresh, double high_thresh, bool L2gradient = false); +CV_EXPORTS void Canny(const GpuMat& dx, const GpuMat& dy, CannyBuf& buf, GpuMat& edges, double low_thresh, double high_thresh, bool L2gradient = false); + +class CV_EXPORTS ImagePyramid +{ +public: + inline ImagePyramid() : nLayers_(0) {} + inline ImagePyramid(const GpuMat& img, int nLayers, Stream& stream = Stream::Null()) + { + build(img, nLayers, stream); + } + + void build(const GpuMat& img, int nLayers, Stream& stream = Stream::Null()); + + void getLayer(GpuMat& outImg, Size outRoi, Stream& stream = Stream::Null()) const; + + inline void release() + { + layer0_.release(); + pyramid_.clear(); + nLayers_ = 0; + } + +private: + GpuMat layer0_; + std::vector pyramid_; + int nLayers_; +}; + +//! HoughLines + +struct HoughLinesBuf +{ + GpuMat accum; + GpuMat list; +}; + +CV_EXPORTS void HoughLines(const GpuMat& src, GpuMat& lines, float rho, float theta, int threshold, bool doSort = false, int maxLines = 4096); +CV_EXPORTS void HoughLines(const GpuMat& src, GpuMat& lines, HoughLinesBuf& buf, float rho, float theta, int threshold, bool doSort = false, int maxLines = 4096); +CV_EXPORTS void HoughLinesDownload(const GpuMat& d_lines, OutputArray h_lines, OutputArray h_votes = noArray()); + +//! HoughLinesP + +//! finds line segments in the black-n-white image using probabalistic Hough transform +CV_EXPORTS void HoughLinesP(const GpuMat& image, GpuMat& lines, HoughLinesBuf& buf, float rho, float theta, int minLineLength, int maxLineGap, int maxLines = 4096); + +//! HoughCircles + +struct HoughCirclesBuf +{ + GpuMat edges; + GpuMat accum; + GpuMat list; + CannyBuf cannyBuf; +}; + +CV_EXPORTS void HoughCircles(const GpuMat& src, GpuMat& circles, int method, float dp, float minDist, int cannyThreshold, int votesThreshold, int minRadius, int maxRadius, int maxCircles = 4096); +CV_EXPORTS void HoughCircles(const GpuMat& src, GpuMat& circles, HoughCirclesBuf& buf, int method, float dp, float minDist, int cannyThreshold, int votesThreshold, int minRadius, int maxRadius, int maxCircles = 4096); +CV_EXPORTS void HoughCirclesDownload(const GpuMat& d_circles, OutputArray h_circles); + +//! finds arbitrary template in the grayscale image using Generalized Hough Transform +//! Ballard, D.H. (1981). Generalizing the Hough transform to detect arbitrary shapes. Pattern Recognition 13 (2): 111-122. +//! Guil, N., González-Linares, J.M. and Zapata, E.L. (1999). Bidimensional shape detection using an invariant approach. Pattern Recognition 32 (6): 1025-1038. +class CV_EXPORTS GeneralizedHough_GPU : public Algorithm +{ +public: + static Ptr create(int method); + + virtual ~GeneralizedHough_GPU(); + + //! set template to search + void setTemplate(const GpuMat& templ, int cannyThreshold = 100, Point templCenter = Point(-1, -1)); + void setTemplate(const GpuMat& edges, const GpuMat& dx, const GpuMat& dy, Point templCenter = Point(-1, -1)); + + //! find template on image + void detect(const GpuMat& image, GpuMat& positions, int cannyThreshold = 100); + void detect(const GpuMat& edges, const GpuMat& dx, const GpuMat& dy, GpuMat& positions); + + void download(const GpuMat& d_positions, OutputArray h_positions, OutputArray h_votes = noArray()); + + void release(); + +protected: + virtual void setTemplateImpl(const GpuMat& edges, const GpuMat& dx, const GpuMat& dy, Point templCenter) = 0; + virtual void detectImpl(const GpuMat& edges, const GpuMat& dx, const GpuMat& dy, GpuMat& positions) = 0; + virtual void releaseImpl() = 0; + +private: + GpuMat edges_; + CannyBuf cannyBuf_; +}; + +////////////////////////////// Matrix reductions ////////////////////////////// + +//! computes mean value and standard deviation of all or selected array elements +//! supports only CV_8UC1 type +CV_EXPORTS void meanStdDev(const GpuMat& mtx, Scalar& mean, Scalar& stddev); +//! buffered version +CV_EXPORTS void meanStdDev(const GpuMat& mtx, Scalar& mean, Scalar& stddev, GpuMat& buf); + +//! computes norm of array +//! supports NORM_INF, NORM_L1, NORM_L2 +//! supports all matrices except 64F +CV_EXPORTS double norm(const GpuMat& src1, int normType=NORM_L2); +CV_EXPORTS double norm(const GpuMat& src1, int normType, GpuMat& buf); +CV_EXPORTS double norm(const GpuMat& src1, int normType, const GpuMat& mask, GpuMat& buf); + +//! computes norm of the difference between two arrays +//! supports NORM_INF, NORM_L1, NORM_L2 +//! supports only CV_8UC1 type +CV_EXPORTS double norm(const GpuMat& src1, const GpuMat& src2, int normType=NORM_L2); + +//! computes sum of array elements +//! supports only single channel images +CV_EXPORTS Scalar sum(const GpuMat& src); +CV_EXPORTS Scalar sum(const GpuMat& src, GpuMat& buf); +CV_EXPORTS Scalar sum(const GpuMat& src, const GpuMat& mask, GpuMat& buf); + +//! computes sum of array elements absolute values +//! supports only single channel images +CV_EXPORTS Scalar absSum(const GpuMat& src); +CV_EXPORTS Scalar absSum(const GpuMat& src, GpuMat& buf); +CV_EXPORTS Scalar absSum(const GpuMat& src, const GpuMat& mask, GpuMat& buf); + +//! computes squared sum of array elements +//! supports only single channel images +CV_EXPORTS Scalar sqrSum(const GpuMat& src); +CV_EXPORTS Scalar sqrSum(const GpuMat& src, GpuMat& buf); +CV_EXPORTS Scalar sqrSum(const GpuMat& src, const GpuMat& mask, GpuMat& buf); + +//! finds global minimum and maximum array elements and returns their values +CV_EXPORTS void minMax(const GpuMat& src, double* minVal, double* maxVal=0, const GpuMat& mask=GpuMat()); +CV_EXPORTS void minMax(const GpuMat& src, double* minVal, double* maxVal, const GpuMat& mask, GpuMat& buf); + +//! finds global minimum and maximum array elements and returns their values with locations +CV_EXPORTS void minMaxLoc(const GpuMat& src, double* minVal, double* maxVal=0, Point* minLoc=0, Point* maxLoc=0, + const GpuMat& mask=GpuMat()); +CV_EXPORTS void minMaxLoc(const GpuMat& src, double* minVal, double* maxVal, Point* minLoc, Point* maxLoc, + const GpuMat& mask, GpuMat& valbuf, GpuMat& locbuf); + +//! counts non-zero array elements +CV_EXPORTS int countNonZero(const GpuMat& src); +CV_EXPORTS int countNonZero(const GpuMat& src, GpuMat& buf); + +//! reduces a matrix to a vector +CV_EXPORTS void reduce(const GpuMat& mtx, GpuMat& vec, int dim, int reduceOp, int dtype = -1, Stream& stream = Stream::Null()); + + +///////////////////////////// Calibration 3D ////////////////////////////////// + +CV_EXPORTS void transformPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec, + GpuMat& dst, Stream& stream = Stream::Null()); + +CV_EXPORTS void projectPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec, + const Mat& camera_mat, const Mat& dist_coef, GpuMat& dst, + Stream& stream = Stream::Null()); + +CV_EXPORTS void solvePnPRansac(const Mat& object, const Mat& image, const Mat& camera_mat, + const Mat& dist_coef, Mat& rvec, Mat& tvec, bool use_extrinsic_guess=false, + int num_iters=100, float max_dist=8.0, int min_inlier_count=100, + std::vector* inliers=NULL); + +//////////////////////////////// Image Labeling //////////////////////////////// + +//!performs labeling via graph cuts of a 2D regular 4-connected graph. +CV_EXPORTS void graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& bottom, GpuMat& labels, + GpuMat& buf, Stream& stream = Stream::Null()); + +//!performs labeling via graph cuts of a 2D regular 8-connected graph. +CV_EXPORTS void graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& topLeft, GpuMat& topRight, + GpuMat& bottom, GpuMat& bottomLeft, GpuMat& bottomRight, + GpuMat& labels, + GpuMat& buf, Stream& stream = Stream::Null()); + +//! compute mask for Generalized Flood fill componetns labeling. +CV_EXPORTS void connectivityMask(const GpuMat& image, GpuMat& mask, const cv::Scalar& lo, const cv::Scalar& hi, Stream& stream = Stream::Null()); + +//! performs connected componnents labeling. +CV_EXPORTS void labelComponents(const GpuMat& mask, GpuMat& components, int flags = 0, Stream& stream = Stream::Null()); + +////////////////////////////////// Histograms ////////////////////////////////// + +//! Compute levels with even distribution. levels will have 1 row and nLevels cols and CV_32SC1 type. +CV_EXPORTS void evenLevels(GpuMat& levels, int nLevels, int lowerLevel, int upperLevel); +//! Calculates histogram with evenly distributed bins for signle channel source. +//! Supports CV_8UC1, CV_16UC1 and CV_16SC1 source types. +//! Output hist will have one row and histSize cols and CV_32SC1 type. +CV_EXPORTS void histEven(const GpuMat& src, GpuMat& hist, int histSize, int lowerLevel, int upperLevel, Stream& stream = Stream::Null()); +CV_EXPORTS void histEven(const GpuMat& src, GpuMat& hist, GpuMat& buf, int histSize, int lowerLevel, int upperLevel, Stream& stream = Stream::Null()); +//! Calculates histogram with evenly distributed bins for four-channel source. +//! All channels of source are processed separately. +//! Supports CV_8UC4, CV_16UC4 and CV_16SC4 source types. +//! Output hist[i] will have one row and histSize[i] cols and CV_32SC1 type. +CV_EXPORTS void histEven(const GpuMat& src, GpuMat hist[4], int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream = Stream::Null()); +CV_EXPORTS void histEven(const GpuMat& src, GpuMat hist[4], GpuMat& buf, int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream = Stream::Null()); +//! Calculates histogram with bins determined by levels array. +//! levels must have one row and CV_32SC1 type if source has integer type or CV_32FC1 otherwise. +//! Supports CV_8UC1, CV_16UC1, CV_16SC1 and CV_32FC1 source types. +//! Output hist will have one row and (levels.cols-1) cols and CV_32SC1 type. +CV_EXPORTS void histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels, Stream& stream = Stream::Null()); +CV_EXPORTS void histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels, GpuMat& buf, Stream& stream = Stream::Null()); +//! Calculates histogram with bins determined by levels array. +//! All levels must have one row and CV_32SC1 type if source has integer type or CV_32FC1 otherwise. +//! All channels of source are processed separately. +//! Supports CV_8UC4, CV_16UC4, CV_16SC4 and CV_32FC4 source types. +//! Output hist[i] will have one row and (levels[i].cols-1) cols and CV_32SC1 type. +CV_EXPORTS void histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4], Stream& stream = Stream::Null()); +CV_EXPORTS void histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4], GpuMat& buf, Stream& stream = Stream::Null()); + +//! Calculates histogram for 8u one channel image +//! Output hist will have one row, 256 cols and CV32SC1 type. +CV_EXPORTS void calcHist(const GpuMat& src, GpuMat& hist, Stream& stream = Stream::Null()); +CV_EXPORTS void calcHist(const GpuMat& src, GpuMat& hist, GpuMat& buf, Stream& stream = Stream::Null()); + +//! normalizes the grayscale image brightness and contrast by normalizing its histogram +CV_EXPORTS void equalizeHist(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null()); +CV_EXPORTS void equalizeHist(const GpuMat& src, GpuMat& dst, GpuMat& hist, Stream& stream = Stream::Null()); +CV_EXPORTS void equalizeHist(const GpuMat& src, GpuMat& dst, GpuMat& hist, GpuMat& buf, Stream& stream = Stream::Null()); + +class CV_EXPORTS CLAHE : public cv::CLAHE +{ +public: + using cv::CLAHE::apply; + virtual void apply(InputArray src, OutputArray dst, Stream& stream) = 0; +}; +CV_EXPORTS Ptr createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8)); + +//////////////////////////////// StereoBM_GPU //////////////////////////////// + +class CV_EXPORTS StereoBM_GPU +{ +public: + enum { BASIC_PRESET = 0, PREFILTER_XSOBEL = 1 }; + + enum { DEFAULT_NDISP = 64, DEFAULT_WINSZ = 19 }; + + //! the default constructor + StereoBM_GPU(); + //! the full constructor taking the camera-specific preset, number of disparities and the SAD window size. ndisparities must be multiple of 8. + StereoBM_GPU(int preset, int ndisparities = DEFAULT_NDISP, int winSize = DEFAULT_WINSZ); + + //! the stereo correspondence operator. Finds the disparity for the specified rectified stereo pair + //! Output disparity has CV_8U type. + void operator()(const GpuMat& left, const GpuMat& right, GpuMat& disparity, Stream& stream = Stream::Null()); + + //! Some heuristics that tries to estmate + // if current GPU will be faster than CPU in this algorithm. + // It queries current active device. + static bool checkIfGpuCallReasonable(); + + int preset; + int ndisp; + int winSize; + + // If avergeTexThreshold == 0 => post procesing is disabled + // If avergeTexThreshold != 0 then disparity is set 0 in each point (x,y) where for left image + // SumOfHorizontalGradiensInWindow(x, y, winSize) < (winSize * winSize) * avergeTexThreshold + // i.e. input left image is low textured. + float avergeTexThreshold; + +private: + GpuMat minSSD, leBuf, riBuf; +}; + +////////////////////////// StereoBeliefPropagation /////////////////////////// +// "Efficient Belief Propagation for Early Vision" +// P.Felzenszwalb + +class CV_EXPORTS StereoBeliefPropagation +{ +public: + enum { DEFAULT_NDISP = 64 }; + enum { DEFAULT_ITERS = 5 }; + enum { DEFAULT_LEVELS = 5 }; + + static void estimateRecommendedParams(int width, int height, int& ndisp, int& iters, int& levels); + + //! the default constructor + explicit StereoBeliefPropagation(int ndisp = DEFAULT_NDISP, + int iters = DEFAULT_ITERS, + int levels = DEFAULT_LEVELS, + int msg_type = CV_32F); + + //! the full constructor taking the number of disparities, number of BP iterations on each level, + //! number of levels, truncation of data cost, data weight, + //! truncation of discontinuity cost and discontinuity single jump + //! DataTerm = data_weight * min(fabs(I2-I1), max_data_term) + //! DiscTerm = min(disc_single_jump * fabs(f1-f2), max_disc_term) + //! please see paper for more details + StereoBeliefPropagation(int ndisp, int iters, int levels, + float max_data_term, float data_weight, + float max_disc_term, float disc_single_jump, + int msg_type = CV_32F); + + //! the stereo correspondence operator. Finds the disparity for the specified rectified stereo pair, + //! if disparity is empty output type will be CV_16S else output type will be disparity.type(). + void operator()(const GpuMat& left, const GpuMat& right, GpuMat& disparity, Stream& stream = Stream::Null()); + + + //! version for user specified data term + void operator()(const GpuMat& data, GpuMat& disparity, Stream& stream = Stream::Null()); + + int ndisp; + + int iters; + int levels; + + float max_data_term; + float data_weight; + float max_disc_term; + float disc_single_jump; + + int msg_type; +private: + GpuMat u, d, l, r, u2, d2, l2, r2; + std::vector datas; + GpuMat out; +}; + +/////////////////////////// StereoConstantSpaceBP /////////////////////////// +// "A Constant-Space Belief Propagation Algorithm for Stereo Matching" +// Qingxiong Yang, Liang Wang, Narendra Ahuja +// http://vision.ai.uiuc.edu/~qyang6/ + +class CV_EXPORTS StereoConstantSpaceBP +{ +public: + enum { DEFAULT_NDISP = 128 }; + enum { DEFAULT_ITERS = 8 }; + enum { DEFAULT_LEVELS = 4 }; + enum { DEFAULT_NR_PLANE = 4 }; + + static void estimateRecommendedParams(int width, int height, int& ndisp, int& iters, int& levels, int& nr_plane); + + //! the default constructor + explicit StereoConstantSpaceBP(int ndisp = DEFAULT_NDISP, + int iters = DEFAULT_ITERS, + int levels = DEFAULT_LEVELS, + int nr_plane = DEFAULT_NR_PLANE, + int msg_type = CV_32F); + + //! the full constructor taking the number of disparities, number of BP iterations on each level, + //! number of levels, number of active disparity on the first level, truncation of data cost, data weight, + //! truncation of discontinuity cost, discontinuity single jump and minimum disparity threshold + StereoConstantSpaceBP(int ndisp, int iters, int levels, int nr_plane, + float max_data_term, float data_weight, float max_disc_term, float disc_single_jump, + int min_disp_th = 0, + int msg_type = CV_32F); + + //! the stereo correspondence operator. Finds the disparity for the specified rectified stereo pair, + //! if disparity is empty output type will be CV_16S else output type will be disparity.type(). + void operator()(const GpuMat& left, const GpuMat& right, GpuMat& disparity, Stream& stream = Stream::Null()); + + int ndisp; + + int iters; + int levels; + + int nr_plane; + + float max_data_term; + float data_weight; + float max_disc_term; + float disc_single_jump; + + int min_disp_th; + + int msg_type; + + bool use_local_init_data_cost; +private: + GpuMat messages_buffers; + + GpuMat temp; + GpuMat out; +}; + +/////////////////////////// DisparityBilateralFilter /////////////////////////// +// Disparity map refinement using joint bilateral filtering given a single color image. +// Qingxiong Yang, Liang Wang, Narendra Ahuja +// http://vision.ai.uiuc.edu/~qyang6/ + +class CV_EXPORTS DisparityBilateralFilter +{ +public: + enum { DEFAULT_NDISP = 64 }; + enum { DEFAULT_RADIUS = 3 }; + enum { DEFAULT_ITERS = 1 }; + + //! the default constructor + explicit DisparityBilateralFilter(int ndisp = DEFAULT_NDISP, int radius = DEFAULT_RADIUS, int iters = DEFAULT_ITERS); + + //! the full constructor taking the number of disparities, filter radius, + //! number of iterations, truncation of data continuity, truncation of disparity continuity + //! and filter range sigma + DisparityBilateralFilter(int ndisp, int radius, int iters, float edge_threshold, float max_disc_threshold, float sigma_range); + + //! the disparity map refinement operator. Refine disparity map using joint bilateral filtering given a single color image. + //! disparity must have CV_8U or CV_16S type, image must have CV_8UC1 or CV_8UC3 type. + void operator()(const GpuMat& disparity, const GpuMat& image, GpuMat& dst, Stream& stream = Stream::Null()); + +private: + int ndisp; + int radius; + int iters; + + float edge_threshold; + float max_disc_threshold; + float sigma_range; + + GpuMat table_color; + GpuMat table_space; +}; + + +//////////////// HOG (Histogram-of-Oriented-Gradients) Descriptor and Object Detector ////////////// +struct CV_EXPORTS HOGConfidence +{ + double scale; + vector locations; + vector confidences; + vector part_scores[4]; +}; + +struct CV_EXPORTS HOGDescriptor +{ + enum { DEFAULT_WIN_SIGMA = -1 }; + enum { DEFAULT_NLEVELS = 64 }; + enum { DESCR_FORMAT_ROW_BY_ROW, DESCR_FORMAT_COL_BY_COL }; + + HOGDescriptor(Size win_size=Size(64, 128), Size block_size=Size(16, 16), + Size block_stride=Size(8, 8), Size cell_size=Size(8, 8), + int nbins=9, double win_sigma=DEFAULT_WIN_SIGMA, + double threshold_L2hys=0.2, bool gamma_correction=true, + int nlevels=DEFAULT_NLEVELS); + + size_t getDescriptorSize() const; + size_t getBlockHistogramSize() const; + + void setSVMDetector(const vector& detector); + + static vector getDefaultPeopleDetector(); + static vector getPeopleDetector48x96(); + static vector getPeopleDetector64x128(); + + void detect(const GpuMat& img, vector& found_locations, + double hit_threshold=0, Size win_stride=Size(), + Size padding=Size()); + + void detectMultiScale(const GpuMat& img, vector& found_locations, + double hit_threshold=0, Size win_stride=Size(), + Size padding=Size(), double scale0=1.05, + int group_threshold=2); + + void computeConfidence(const GpuMat& img, vector& hits, double hit_threshold, + Size win_stride, Size padding, vector& locations, vector& confidences); + + void computeConfidenceMultiScale(const GpuMat& img, vector& found_locations, + double hit_threshold, Size win_stride, Size padding, + vector &conf_out, int group_threshold); + + void getDescriptors(const GpuMat& img, Size win_stride, + GpuMat& descriptors, + int descr_format=DESCR_FORMAT_COL_BY_COL); + + Size win_size; + Size block_size; + Size block_stride; + Size cell_size; + int nbins; + double win_sigma; + double threshold_L2hys; + bool gamma_correction; + int nlevels; + +protected: + void computeBlockHistograms(const GpuMat& img); + void computeGradient(const GpuMat& img, GpuMat& grad, GpuMat& qangle); + + double getWinSigma() const; + bool checkDetectorSize() const; + + static int numPartsWithin(int size, int part_size, int stride); + static Size numPartsWithin(Size size, Size part_size, Size stride); + + // Coefficients of the separating plane + float free_coef; + GpuMat detector; + + // Results of the last classification step + GpuMat labels, labels_buf; + Mat labels_host; + + // Results of the last histogram evaluation step + GpuMat block_hists, block_hists_buf; + + // Gradients conputation results + GpuMat grad, qangle, grad_buf, qangle_buf; + + // returns subbuffer with required size, reallocates buffer if nessesary. + static GpuMat getBuffer(const Size& sz, int type, GpuMat& buf); + static GpuMat getBuffer(int rows, int cols, int type, GpuMat& buf); + + std::vector image_scales; +}; + + +////////////////////////////////// BruteForceMatcher ////////////////////////////////// + +class CV_EXPORTS BruteForceMatcher_GPU_base +{ +public: + enum DistType {L1Dist = 0, L2Dist, HammingDist}; + + explicit BruteForceMatcher_GPU_base(DistType distType = L2Dist); + + // Add descriptors to train descriptor collection + void add(const std::vector& descCollection); + + // Get train descriptors collection + const std::vector& getTrainDescriptors() const; + + // Clear train descriptors collection + void clear(); + + // Return true if there are not train descriptors in collection + bool empty() const; + + // Return true if the matcher supports mask in match methods + bool isMaskSupported() const; + + // Find one best match for each query descriptor + void matchSingle(const GpuMat& query, const GpuMat& train, + GpuMat& trainIdx, GpuMat& distance, + const GpuMat& mask = GpuMat(), Stream& stream = Stream::Null()); + + // Download trainIdx and distance and convert it to CPU vector with DMatch + static void matchDownload(const GpuMat& trainIdx, const GpuMat& distance, std::vector& matches); + // Convert trainIdx and distance to vector with DMatch + static void matchConvert(const Mat& trainIdx, const Mat& distance, std::vector& matches); + + // Find one best match for each query descriptor + void match(const GpuMat& query, const GpuMat& train, std::vector& matches, const GpuMat& mask = GpuMat()); + + // Make gpu collection of trains and masks in suitable format for matchCollection function + void makeGpuCollection(GpuMat& trainCollection, GpuMat& maskCollection, const std::vector& masks = std::vector()); + + // Find one best match from train collection for each query descriptor + void matchCollection(const GpuMat& query, const GpuMat& trainCollection, + GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance, + const GpuMat& masks = GpuMat(), Stream& stream = Stream::Null()); + + // Download trainIdx, imgIdx and distance and convert it to vector with DMatch + static void matchDownload(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance, std::vector& matches); + // Convert trainIdx, imgIdx and distance to vector with DMatch + static void matchConvert(const Mat& trainIdx, const Mat& imgIdx, const Mat& distance, std::vector& matches); + + // Find one best match from train collection for each query descriptor. + void match(const GpuMat& query, std::vector& matches, const std::vector& masks = std::vector()); + + // Find k best matches for each query descriptor (in increasing order of distances) + void knnMatchSingle(const GpuMat& query, const GpuMat& train, + GpuMat& trainIdx, GpuMat& distance, GpuMat& allDist, int k, + const GpuMat& mask = GpuMat(), Stream& stream = Stream::Null()); + + // Download trainIdx and distance and convert it to vector with DMatch + // compactResult is used when mask is not empty. If compactResult is false matches + // vector will have the same size as queryDescriptors rows. If compactResult is true + // matches vector will not contain matches for fully masked out query descriptors. + static void knnMatchDownload(const GpuMat& trainIdx, const GpuMat& distance, + std::vector< std::vector >& matches, bool compactResult = false); + // Convert trainIdx and distance to vector with DMatch + static void knnMatchConvert(const Mat& trainIdx, const Mat& distance, + std::vector< std::vector >& matches, bool compactResult = false); + + // Find k best matches for each query descriptor (in increasing order of distances). + // compactResult is used when mask is not empty. If compactResult is false matches + // vector will have the same size as queryDescriptors rows. If compactResult is true + // matches vector will not contain matches for fully masked out query descriptors. + void knnMatch(const GpuMat& query, const GpuMat& train, + std::vector< std::vector >& matches, int k, const GpuMat& mask = GpuMat(), + bool compactResult = false); + + // Find k best matches from train collection for each query descriptor (in increasing order of distances) + void knnMatch2Collection(const GpuMat& query, const GpuMat& trainCollection, + GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance, + const GpuMat& maskCollection = GpuMat(), Stream& stream = Stream::Null()); + + // Download trainIdx and distance and convert it to vector with DMatch + // compactResult is used when mask is not empty. If compactResult is false matches + // vector will have the same size as queryDescriptors rows. If compactResult is true + // matches vector will not contain matches for fully masked out query descriptors. + static void knnMatch2Download(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance, + std::vector< std::vector >& matches, bool compactResult = false); + // Convert trainIdx and distance to vector with DMatch + static void knnMatch2Convert(const Mat& trainIdx, const Mat& imgIdx, const Mat& distance, + std::vector< std::vector >& matches, bool compactResult = false); + + // Find k best matches for each query descriptor (in increasing order of distances). + // compactResult is used when mask is not empty. If compactResult is false matches + // vector will have the same size as queryDescriptors rows. If compactResult is true + // matches vector will not contain matches for fully masked out query descriptors. + void knnMatch(const GpuMat& query, std::vector< std::vector >& matches, int k, + const std::vector& masks = std::vector(), bool compactResult = false); + + // Find best matches for each query descriptor which have distance less than maxDistance. + // nMatches.at(0, queryIdx) will contain matches count for queryIdx. + // carefully nMatches can be greater than trainIdx.cols - it means that matcher didn't find all matches, + // because it didn't have enough memory. + // If trainIdx is empty, then trainIdx and distance will be created with size nQuery x max((nTrain / 100), 10), + // otherwize user can pass own allocated trainIdx and distance with size nQuery x nMaxMatches + // Matches doesn't sorted. + void radiusMatchSingle(const GpuMat& query, const GpuMat& train, + GpuMat& trainIdx, GpuMat& distance, GpuMat& nMatches, float maxDistance, + const GpuMat& mask = GpuMat(), Stream& stream = Stream::Null()); + + // Download trainIdx, nMatches and distance and convert it to vector with DMatch. + // matches will be sorted in increasing order of distances. + // compactResult is used when mask is not empty. If compactResult is false matches + // vector will have the same size as queryDescriptors rows. If compactResult is true + // matches vector will not contain matches for fully masked out query descriptors. + static void radiusMatchDownload(const GpuMat& trainIdx, const GpuMat& distance, const GpuMat& nMatches, + std::vector< std::vector >& matches, bool compactResult = false); + // Convert trainIdx, nMatches and distance to vector with DMatch. + static void radiusMatchConvert(const Mat& trainIdx, const Mat& distance, const Mat& nMatches, + std::vector< std::vector >& matches, bool compactResult = false); + + // Find best matches for each query descriptor which have distance less than maxDistance + // in increasing order of distances). + void radiusMatch(const GpuMat& query, const GpuMat& train, + std::vector< std::vector >& matches, float maxDistance, + const GpuMat& mask = GpuMat(), bool compactResult = false); + + // Find best matches for each query descriptor which have distance less than maxDistance. + // If trainIdx is empty, then trainIdx and distance will be created with size nQuery x max((nQuery / 100), 10), + // otherwize user can pass own allocated trainIdx and distance with size nQuery x nMaxMatches + // Matches doesn't sorted. + void radiusMatchCollection(const GpuMat& query, GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance, GpuMat& nMatches, float maxDistance, + const std::vector& masks = std::vector(), Stream& stream = Stream::Null()); + + // Download trainIdx, imgIdx, nMatches and distance and convert it to vector with DMatch. + // matches will be sorted in increasing order of distances. + // compactResult is used when mask is not empty. If compactResult is false matches + // vector will have the same size as queryDescriptors rows. If compactResult is true + // matches vector will not contain matches for fully masked out query descriptors. + static void radiusMatchDownload(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance, const GpuMat& nMatches, + std::vector< std::vector >& matches, bool compactResult = false); + // Convert trainIdx, nMatches and distance to vector with DMatch. + static void radiusMatchConvert(const Mat& trainIdx, const Mat& imgIdx, const Mat& distance, const Mat& nMatches, + std::vector< std::vector >& matches, bool compactResult = false); + + // Find best matches from train collection for each query descriptor which have distance less than + // maxDistance (in increasing order of distances). + void radiusMatch(const GpuMat& query, std::vector< std::vector >& matches, float maxDistance, + const std::vector& masks = std::vector(), bool compactResult = false); + + DistType distType; + +private: + std::vector trainDescCollection; +}; + +template +class CV_EXPORTS BruteForceMatcher_GPU; + +template +class CV_EXPORTS BruteForceMatcher_GPU< L1 > : public BruteForceMatcher_GPU_base +{ +public: + explicit BruteForceMatcher_GPU() : BruteForceMatcher_GPU_base(L1Dist) {} + explicit BruteForceMatcher_GPU(L1 /*d*/) : BruteForceMatcher_GPU_base(L1Dist) {} +}; +template +class CV_EXPORTS BruteForceMatcher_GPU< L2 > : public BruteForceMatcher_GPU_base +{ +public: + explicit BruteForceMatcher_GPU() : BruteForceMatcher_GPU_base(L2Dist) {} + explicit BruteForceMatcher_GPU(L2 /*d*/) : BruteForceMatcher_GPU_base(L2Dist) {} +}; +template <> class CV_EXPORTS BruteForceMatcher_GPU< Hamming > : public BruteForceMatcher_GPU_base +{ +public: + explicit BruteForceMatcher_GPU() : BruteForceMatcher_GPU_base(HammingDist) {} + explicit BruteForceMatcher_GPU(Hamming /*d*/) : BruteForceMatcher_GPU_base(HammingDist) {} +}; + +class CV_EXPORTS BFMatcher_GPU : public BruteForceMatcher_GPU_base +{ +public: + explicit BFMatcher_GPU(int norm = NORM_L2) : BruteForceMatcher_GPU_base(norm == NORM_L1 ? L1Dist : norm == NORM_L2 ? L2Dist : HammingDist) {} +}; + +////////////////////////////////// CascadeClassifier_GPU ////////////////////////////////////////// +// The cascade classifier class for object detection: supports old haar and new lbp xlm formats and nvbin for haar cascades olny. +class CV_EXPORTS CascadeClassifier_GPU +{ +public: + CascadeClassifier_GPU(); + CascadeClassifier_GPU(const std::string& filename); + ~CascadeClassifier_GPU(); + + bool empty() const; + bool load(const std::string& filename); + void release(); + + /* returns number of detected objects */ + int detectMultiScale(const GpuMat& image, GpuMat& objectsBuf, double scaleFactor = 1.2, int minNeighbors = 4, Size minSize = Size()); + int detectMultiScale(const GpuMat& image, GpuMat& objectsBuf, Size maxObjectSize, Size minSize = Size(), double scaleFactor = 1.1, int minNeighbors = 4); + + bool findLargestObject; + bool visualizeInPlace; + + Size getClassifierSize() const; + +private: + struct CascadeClassifierImpl; + CascadeClassifierImpl* impl; + struct HaarCascade; + struct LbpCascade; + friend class CascadeClassifier_GPU_LBP; +}; + +////////////////////////////////// FAST ////////////////////////////////////////// + +class CV_EXPORTS FAST_GPU +{ +public: + enum + { + LOCATION_ROW = 0, + RESPONSE_ROW, + ROWS_COUNT + }; + + // all features have same size + static const int FEATURE_SIZE = 7; + + explicit FAST_GPU(int threshold, bool nonmaxSuppression = true, double keypointsRatio = 0.05); + + //! finds the keypoints using FAST detector + //! supports only CV_8UC1 images + void operator ()(const GpuMat& image, const GpuMat& mask, GpuMat& keypoints); + void operator ()(const GpuMat& image, const GpuMat& mask, std::vector& keypoints); + + //! download keypoints from device to host memory + void downloadKeypoints(const GpuMat& d_keypoints, std::vector& keypoints); + + //! convert keypoints to KeyPoint vector + void convertKeypoints(const Mat& h_keypoints, std::vector& keypoints); + + //! release temporary buffer's memory + void release(); + + bool nonmaxSuppression; + + int threshold; + + //! max keypoints = keypointsRatio * img.size().area() + double keypointsRatio; + + //! find keypoints and compute it's response if nonmaxSuppression is true + //! return count of detected keypoints + int calcKeyPointsLocation(const GpuMat& image, const GpuMat& mask); + + //! get final array of keypoints + //! performs nonmax suppression if needed + //! return final count of keypoints + int getKeyPoints(GpuMat& keypoints); + +private: + GpuMat kpLoc_; + int count_; + + GpuMat score_; + + GpuMat d_keypoints_; +}; + +////////////////////////////////// ORB ////////////////////////////////////////// + +class CV_EXPORTS ORB_GPU +{ +public: + enum + { + X_ROW = 0, + Y_ROW, + RESPONSE_ROW, + ANGLE_ROW, + OCTAVE_ROW, + SIZE_ROW, + ROWS_COUNT + }; + + enum + { + DEFAULT_FAST_THRESHOLD = 20 + }; + + //! Constructor + explicit ORB_GPU(int nFeatures = 500, float scaleFactor = 1.2f, int nLevels = 8, int edgeThreshold = 31, + int firstLevel = 0, int WTA_K = 2, int scoreType = 0, int patchSize = 31); + + //! Compute the ORB features on an image + //! image - the image to compute the features (supports only CV_8UC1 images) + //! mask - the mask to apply + //! keypoints - the resulting keypoints + void operator()(const GpuMat& image, const GpuMat& mask, std::vector& keypoints); + void operator()(const GpuMat& image, const GpuMat& mask, GpuMat& keypoints); + + //! Compute the ORB features and descriptors on an image + //! image - the image to compute the features (supports only CV_8UC1 images) + //! mask - the mask to apply + //! keypoints - the resulting keypoints + //! descriptors - descriptors array + void operator()(const GpuMat& image, const GpuMat& mask, std::vector& keypoints, GpuMat& descriptors); + void operator()(const GpuMat& image, const GpuMat& mask, GpuMat& keypoints, GpuMat& descriptors); + + //! download keypoints from device to host memory + void downloadKeyPoints(GpuMat& d_keypoints, std::vector& keypoints); + + //! convert keypoints to KeyPoint vector + void convertKeyPoints(Mat& d_keypoints, std::vector& keypoints); + + //! returns the descriptor size in bytes + inline int descriptorSize() const { return kBytes; } + + inline void setFastParams(int threshold, bool nonmaxSuppression = true) + { + fastDetector_.threshold = threshold; + fastDetector_.nonmaxSuppression = nonmaxSuppression; + } + + //! release temporary buffer's memory + void release(); + + //! if true, image will be blurred before descriptors calculation + bool blurForDescriptor; + +private: + enum { kBytes = 32 }; + + void buildScalePyramids(const GpuMat& image, const GpuMat& mask); + + void computeKeyPointsPyramid(); + + void computeDescriptors(GpuMat& descriptors); + + void mergeKeyPoints(GpuMat& keypoints); + + int nFeatures_; + float scaleFactor_; + int nLevels_; + int edgeThreshold_; + int firstLevel_; + int WTA_K_; + int scoreType_; + int patchSize_; + + // The number of desired features per scale + std::vector n_features_per_level_; + + // Points to compute BRIEF descriptors from + GpuMat pattern_; + + std::vector imagePyr_; + std::vector maskPyr_; + + GpuMat buf_; + + std::vector keyPointsPyr_; + std::vector keyPointsCount_; + + FAST_GPU fastDetector_; + + Ptr blurFilter; + + GpuMat d_keypoints_; +}; + +////////////////////////////////// Optical Flow ////////////////////////////////////////// + +class CV_EXPORTS BroxOpticalFlow +{ +public: + BroxOpticalFlow(float alpha_, float gamma_, float scale_factor_, int inner_iterations_, int outer_iterations_, int solver_iterations_) : + alpha(alpha_), gamma(gamma_), scale_factor(scale_factor_), + inner_iterations(inner_iterations_), outer_iterations(outer_iterations_), solver_iterations(solver_iterations_) + { + } + + //! Compute optical flow + //! frame0 - source frame (supports only CV_32FC1 type) + //! frame1 - frame to track (with the same size and type as frame0) + //! u - flow horizontal component (along x axis) + //! v - flow vertical component (along y axis) + void operator ()(const GpuMat& frame0, const GpuMat& frame1, GpuMat& u, GpuMat& v, Stream& stream = Stream::Null()); + + //! flow smoothness + float alpha; + + //! gradient constancy importance + float gamma; + + //! pyramid scale factor + float scale_factor; + + //! number of lagged non-linearity iterations (inner loop) + int inner_iterations; + + //! number of warping iterations (number of pyramid levels) + int outer_iterations; + + //! number of linear system solver iterations + int solver_iterations; + + GpuMat buf; +}; + +class CV_EXPORTS GoodFeaturesToTrackDetector_GPU +{ +public: + explicit GoodFeaturesToTrackDetector_GPU(int maxCorners = 1000, double qualityLevel = 0.01, double minDistance = 0.0, + int blockSize = 3, bool useHarrisDetector = false, double harrisK = 0.04); + + //! return 1 rows matrix with CV_32FC2 type + void operator ()(const GpuMat& image, GpuMat& corners, const GpuMat& mask = GpuMat()); + + int maxCorners; + double qualityLevel; + double minDistance; + + int blockSize; + bool useHarrisDetector; + double harrisK; + + void releaseMemory() + { + Dx_.release(); + Dy_.release(); + buf_.release(); + eig_.release(); + minMaxbuf_.release(); + tmpCorners_.release(); + } + +private: + GpuMat Dx_; + GpuMat Dy_; + GpuMat buf_; + GpuMat eig_; + GpuMat minMaxbuf_; + GpuMat tmpCorners_; +}; + +inline GoodFeaturesToTrackDetector_GPU::GoodFeaturesToTrackDetector_GPU(int maxCorners_, double qualityLevel_, double minDistance_, + int blockSize_, bool useHarrisDetector_, double harrisK_) +{ + maxCorners = maxCorners_; + qualityLevel = qualityLevel_; + minDistance = minDistance_; + blockSize = blockSize_; + useHarrisDetector = useHarrisDetector_; + harrisK = harrisK_; +} + + +class CV_EXPORTS PyrLKOpticalFlow +{ +public: + PyrLKOpticalFlow(); + + void sparse(const GpuMat& prevImg, const GpuMat& nextImg, const GpuMat& prevPts, GpuMat& nextPts, + GpuMat& status, GpuMat* err = 0); + + void dense(const GpuMat& prevImg, const GpuMat& nextImg, GpuMat& u, GpuMat& v, GpuMat* err = 0); + + void releaseMemory(); + + Size winSize; + int maxLevel; + int iters; + double derivLambda; //unused + bool useInitialFlow; + float minEigThreshold; //unused + bool getMinEigenVals; //unused + +private: + GpuMat uPyr_[2]; + vector prevPyr_; + vector nextPyr_; + GpuMat vPyr_[2]; + vector buf_; + vector unused; + bool isDeviceArch11_; +}; + + +class CV_EXPORTS FarnebackOpticalFlow +{ +public: + FarnebackOpticalFlow() + { + numLevels = 5; + pyrScale = 0.5; + fastPyramids = false; + winSize = 13; + numIters = 10; + polyN = 5; + polySigma = 1.1; + flags = 0; + isDeviceArch11_ = !DeviceInfo().supports(FEATURE_SET_COMPUTE_12); + } + + int numLevels; + double pyrScale; + bool fastPyramids; + int winSize; + int numIters; + int polyN; + double polySigma; + int flags; + + void operator ()(const GpuMat &frame0, const GpuMat &frame1, GpuMat &flowx, GpuMat &flowy, Stream &s = Stream::Null()); + + void releaseMemory() + { + frames_[0].release(); + frames_[1].release(); + pyrLevel_[0].release(); + pyrLevel_[1].release(); + M_.release(); + bufM_.release(); + R_[0].release(); + R_[1].release(); + blurredFrame_[0].release(); + blurredFrame_[1].release(); + pyramid0_.clear(); + pyramid1_.clear(); + } + +private: + void prepareGaussian( + int n, double sigma, float *g, float *xg, float *xxg, + double &ig11, double &ig03, double &ig33, double &ig55); + + void setPolynomialExpansionConsts(int n, double sigma); + + void updateFlow_boxFilter( + const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat &flowy, + GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]); + + void updateFlow_gaussianBlur( + const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat& flowy, + GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]); + + GpuMat frames_[2]; + GpuMat pyrLevel_[2], M_, bufM_, R_[2], blurredFrame_[2]; + std::vector pyramid0_, pyramid1_; + + bool isDeviceArch11_; +}; + + +// Implementation of the Zach, Pock and Bischof Dual TV-L1 Optical Flow method +// +// see reference: +// [1] C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime TV-L1 Optical Flow". +// [2] Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation". +class CV_EXPORTS OpticalFlowDual_TVL1_GPU +{ +public: + OpticalFlowDual_TVL1_GPU(); + + void operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy); + + void collectGarbage(); + + /** + * Time step of the numerical scheme. + */ + double tau; + + /** + * Weight parameter for the data term, attachment parameter. + * This is the most relevant parameter, which determines the smoothness of the output. + * The smaller this parameter is, the smoother the solutions we obtain. + * It depends on the range of motions of the images, so its value should be adapted to each image sequence. + */ + double lambda; + + /** + * Weight parameter for (u - v)^2, tightness parameter. + * It serves as a link between the attachment and the regularization terms. + * In theory, it should have a small value in order to maintain both parts in correspondence. + * The method is stable for a large range of values of this parameter. + */ + double theta; + + /** + * Number of scales used to create the pyramid of images. + */ + int nscales; + + /** + * Number of warpings per scale. + * Represents the number of times that I1(x+u0) and grad( I1(x+u0) ) are computed per scale. + * This is a parameter that assures the stability of the method. + * It also affects the running time, so it is a compromise between speed and accuracy. + */ + int warps; + + /** + * Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time. + * A small value will yield more accurate solutions at the expense of a slower convergence. + */ + double epsilon; + + /** + * Stopping criterion iterations number used in the numerical scheme. + */ + int iterations; + + bool useInitialFlow; + +private: + void procOneScale(const GpuMat& I0, const GpuMat& I1, GpuMat& u1, GpuMat& u2); + + std::vector I0s; + std::vector I1s; + std::vector u1s; + std::vector u2s; + + GpuMat I1x_buf; + GpuMat I1y_buf; + + GpuMat I1w_buf; + GpuMat I1wx_buf; + GpuMat I1wy_buf; + + GpuMat grad_buf; + GpuMat rho_c_buf; + + GpuMat p11_buf; + GpuMat p12_buf; + GpuMat p21_buf; + GpuMat p22_buf; + + GpuMat diff_buf; + GpuMat norm_buf; +}; + + +//! Calculates optical flow for 2 images using block matching algorithm */ +CV_EXPORTS void calcOpticalFlowBM(const GpuMat& prev, const GpuMat& curr, + Size block_size, Size shift_size, Size max_range, bool use_previous, + GpuMat& velx, GpuMat& vely, GpuMat& buf, + Stream& stream = Stream::Null()); + +class CV_EXPORTS FastOpticalFlowBM +{ +public: + void operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy, int search_window = 21, int block_window = 7, Stream& s = Stream::Null()); + +private: + GpuMat buffer; + GpuMat extended_I0; + GpuMat extended_I1; +}; + + +//! Interpolate frames (images) using provided optical flow (displacement field). +//! frame0 - frame 0 (32-bit floating point images, single channel) +//! frame1 - frame 1 (the same type and size) +//! fu - forward horizontal displacement +//! fv - forward vertical displacement +//! bu - backward horizontal displacement +//! bv - backward vertical displacement +//! pos - new frame position +//! newFrame - new frame +//! buf - temporary buffer, will have width x 6*height size, CV_32FC1 type and contain 6 GpuMat; +//! occlusion masks 0, occlusion masks 1, +//! interpolated forward flow 0, interpolated forward flow 1, +//! interpolated backward flow 0, interpolated backward flow 1 +//! +CV_EXPORTS void interpolateFrames(const GpuMat& frame0, const GpuMat& frame1, + const GpuMat& fu, const GpuMat& fv, + const GpuMat& bu, const GpuMat& bv, + float pos, GpuMat& newFrame, GpuMat& buf, + Stream& stream = Stream::Null()); + +CV_EXPORTS void createOpticalFlowNeedleMap(const GpuMat& u, const GpuMat& v, GpuMat& vertex, GpuMat& colors); + + +//////////////////////// Background/foreground segmentation //////////////////////// + +// Foreground Object Detection from Videos Containing Complex Background. +// Liyuan Li, Weimin Huang, Irene Y.H. Gu, and Qi Tian. +// ACM MM2003 9p +class CV_EXPORTS FGDStatModel +{ +public: + struct CV_EXPORTS Params + { + int Lc; // Quantized levels per 'color' component. Power of two, typically 32, 64 or 128. + int N1c; // Number of color vectors used to model normal background color variation at a given pixel. + int N2c; // Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c. + // Used to allow the first N1c vectors to adapt over time to changing background. + + int Lcc; // Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64. + int N1cc; // Number of color co-occurrence vectors used to model normal background color variation at a given pixel. + int N2cc; // Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc. + // Used to allow the first N1cc vectors to adapt over time to changing background. + + bool is_obj_without_holes; // If TRUE we ignore holes within foreground blobs. Defaults to TRUE. + int perform_morphing; // Number of erode-dilate-erode foreground-blob cleanup iterations. + // These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1. + + float alpha1; // How quickly we forget old background pixel values seen. Typically set to 0.1. + float alpha2; // "Controls speed of feature learning". Depends on T. Typical value circa 0.005. + float alpha3; // Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1. + + float delta; // Affects color and color co-occurrence quantization, typically set to 2. + float T; // A percentage value which determines when new features can be recognized as new background. (Typically 0.9). + float minArea; // Discard foreground blobs whose bounding box is smaller than this threshold. + + // default Params + Params(); + }; + + // out_cn - channels count in output result (can be 3 or 4) + // 4-channels require more memory, but a bit faster + explicit FGDStatModel(int out_cn = 3); + explicit FGDStatModel(const cv::gpu::GpuMat& firstFrame, const Params& params = Params(), int out_cn = 3); + + ~FGDStatModel(); + + void create(const cv::gpu::GpuMat& firstFrame, const Params& params = Params()); + void release(); + + int update(const cv::gpu::GpuMat& curFrame); + + //8UC3 or 8UC4 reference background image + cv::gpu::GpuMat background; + + //8UC1 foreground image + cv::gpu::GpuMat foreground; + + std::vector< std::vector > foreground_regions; + +private: + FGDStatModel(const FGDStatModel&); + FGDStatModel& operator=(const FGDStatModel&); + + class Impl; + std::auto_ptr impl_; +}; + +/*! + Gaussian Mixture-based Backbround/Foreground Segmentation Algorithm + + The class implements the following algorithm: + "An improved adaptive background mixture model for real-time tracking with shadow detection" + P. KadewTraKuPong and R. Bowden, + Proc. 2nd European Workshp on Advanced Video-Based Surveillance Systems, 2001." + http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf +*/ +class CV_EXPORTS MOG_GPU +{ +public: + //! the default constructor + MOG_GPU(int nmixtures = -1); + + //! re-initiaization method + void initialize(Size frameSize, int frameType); + + //! the update operator + void operator()(const GpuMat& frame, GpuMat& fgmask, float learningRate = 0.0f, Stream& stream = Stream::Null()); + + //! computes a background image which are the mean of all background gaussians + void getBackgroundImage(GpuMat& backgroundImage, Stream& stream = Stream::Null()) const; + + //! releases all inner buffers + void release(); + + int history; + float varThreshold; + float backgroundRatio; + float noiseSigma; + +private: + int nmixtures_; + + Size frameSize_; + int frameType_; + int nframes_; + + GpuMat weight_; + GpuMat sortKey_; + GpuMat mean_; + GpuMat var_; +}; + +/*! + The class implements the following algorithm: + "Improved adaptive Gausian mixture model for background subtraction" + Z.Zivkovic + International Conference Pattern Recognition, UK, August, 2004. + http://www.zoranz.net/Publications/zivkovic2004ICPR.pdf +*/ +class CV_EXPORTS MOG2_GPU +{ +public: + //! the default constructor + MOG2_GPU(int nmixtures = -1); + + //! re-initiaization method + void initialize(Size frameSize, int frameType); + + //! the update operator + void operator()(const GpuMat& frame, GpuMat& fgmask, float learningRate = -1.0f, Stream& stream = Stream::Null()); + + //! computes a background image which are the mean of all background gaussians + void getBackgroundImage(GpuMat& backgroundImage, Stream& stream = Stream::Null()) const; + + //! releases all inner buffers + void release(); + + // parameters + // you should call initialize after parameters changes + + int history; + + //! here it is the maximum allowed number of mixture components. + //! Actual number is determined dynamically per pixel + float varThreshold; + // threshold on the squared Mahalanobis distance to decide if it is well described + // by the background model or not. Related to Cthr from the paper. + // This does not influence the update of the background. A typical value could be 4 sigma + // and that is varThreshold=4*4=16; Corresponds to Tb in the paper. + + ///////////////////////// + // less important parameters - things you might change but be carefull + //////////////////////// + + float backgroundRatio; + // corresponds to fTB=1-cf from the paper + // TB - threshold when the component becomes significant enough to be included into + // the background model. It is the TB=1-cf from the paper. So I use cf=0.1 => TB=0. + // For alpha=0.001 it means that the mode should exist for approximately 105 frames before + // it is considered foreground + // float noiseSigma; + float varThresholdGen; + + //correspondts to Tg - threshold on the squared Mahalan. dist. to decide + //when a sample is close to the existing components. If it is not close + //to any a new component will be generated. I use 3 sigma => Tg=3*3=9. + //Smaller Tg leads to more generated components and higher Tg might make + //lead to small number of components but they can grow too large + float fVarInit; + float fVarMin; + float fVarMax; + + //initial variance for the newly generated components. + //It will will influence the speed of adaptation. A good guess should be made. + //A simple way is to estimate the typical standard deviation from the images. + //I used here 10 as a reasonable value + // min and max can be used to further control the variance + float fCT; //CT - complexity reduction prior + //this is related to the number of samples needed to accept that a component + //actually exists. We use CT=0.05 of all the samples. By setting CT=0 you get + //the standard Stauffer&Grimson algorithm (maybe not exact but very similar) + + //shadow detection parameters + bool bShadowDetection; //default 1 - do shadow detection + unsigned char nShadowDetection; //do shadow detection - insert this value as the detection result - 127 default value + float fTau; + // Tau - shadow threshold. The shadow is detected if the pixel is darker + //version of the background. Tau is a threshold on how much darker the shadow can be. + //Tau= 0.5 means that if pixel is more than 2 times darker then it is not shadow + //See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003. + +private: + int nmixtures_; + + Size frameSize_; + int frameType_; + int nframes_; + + GpuMat weight_; + GpuMat variance_; + GpuMat mean_; + + GpuMat bgmodelUsedModes_; //keep track of number of modes per pixel +}; + +/** + * Background Subtractor module. Takes a series of images and returns a sequence of mask (8UC1) + * images of the same size, where 255 indicates Foreground and 0 represents Background. + * This class implements an algorithm described in "Visual Tracking of Human Visitors under + * Variable-Lighting Conditions for a Responsive Audio Art Installation," A. Godbehere, + * A. Matsukawa, K. Goldberg, American Control Conference, Montreal, June 2012. + */ +class CV_EXPORTS GMG_GPU +{ +public: + GMG_GPU(); + + /** + * Validate parameters and set up data structures for appropriate frame size. + * @param frameSize Input frame size + * @param min Minimum value taken on by pixels in image sequence. Usually 0 + * @param max Maximum value taken on by pixels in image sequence. e.g. 1.0 or 255 + */ + void initialize(Size frameSize, float min = 0.0f, float max = 255.0f); + + /** + * Performs single-frame background subtraction and builds up a statistical background image + * model. + * @param frame Input frame + * @param fgmask Output mask image representing foreground and background pixels + * @param learningRate determines how quickly features are “forgotten” from histograms + * @param stream Stream for the asynchronous version + */ + void operator ()(const GpuMat& frame, GpuMat& fgmask, float learningRate = -1.0f, Stream& stream = Stream::Null()); + + //! Releases all inner buffers + void release(); + + //! Total number of distinct colors to maintain in histogram. + int maxFeatures; + + //! Set between 0.0 and 1.0, determines how quickly features are "forgotten" from histograms. + float learningRate; + + //! Number of frames of video to use to initialize histograms. + int numInitializationFrames; + + //! Number of discrete levels in each channel to be used in histograms. + int quantizationLevels; + + //! Prior probability that any given pixel is a background pixel. A sensitivity parameter. + float backgroundPrior; + + //! Value above which pixel is determined to be FG. + float decisionThreshold; + + //! Smoothing radius, in pixels, for cleaning up FG image. + int smoothingRadius; + + //! Perform background model update. + bool updateBackgroundModel; + +private: + float maxVal_, minVal_; + + Size frameSize_; + + int frameNum_; + + GpuMat nfeatures_; + GpuMat colors_; + GpuMat weights_; + + Ptr boxFilter_; + GpuMat buf_; +}; + +////////////////////////////////// Video Encoding ////////////////////////////////// + +// Works only under Windows +// Supports olny H264 video codec and AVI files +class CV_EXPORTS VideoWriter_GPU +{ +public: + struct EncoderParams; + + // Callbacks for video encoder, use it if you want to work with raw video stream + class EncoderCallBack; + + enum SurfaceFormat + { + SF_UYVY = 0, + SF_YUY2, + SF_YV12, + SF_NV12, + SF_IYUV, + SF_BGR, + SF_GRAY = SF_BGR + }; + + VideoWriter_GPU(); + VideoWriter_GPU(const std::string& fileName, cv::Size frameSize, double fps, SurfaceFormat format = SF_BGR); + VideoWriter_GPU(const std::string& fileName, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR); + VideoWriter_GPU(const cv::Ptr& encoderCallback, cv::Size frameSize, double fps, SurfaceFormat format = SF_BGR); + VideoWriter_GPU(const cv::Ptr& encoderCallback, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR); + ~VideoWriter_GPU(); + + // all methods throws cv::Exception if error occurs + void open(const std::string& fileName, cv::Size frameSize, double fps, SurfaceFormat format = SF_BGR); + void open(const std::string& fileName, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR); + void open(const cv::Ptr& encoderCallback, cv::Size frameSize, double fps, SurfaceFormat format = SF_BGR); + void open(const cv::Ptr& encoderCallback, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR); + + bool isOpened() const; + void close(); + + void write(const cv::gpu::GpuMat& image, bool lastFrame = false); + + struct CV_EXPORTS EncoderParams + { + int P_Interval; // NVVE_P_INTERVAL, + int IDR_Period; // NVVE_IDR_PERIOD, + int DynamicGOP; // NVVE_DYNAMIC_GOP, + int RCType; // NVVE_RC_TYPE, + int AvgBitrate; // NVVE_AVG_BITRATE, + int PeakBitrate; // NVVE_PEAK_BITRATE, + int QP_Level_Intra; // NVVE_QP_LEVEL_INTRA, + int QP_Level_InterP; // NVVE_QP_LEVEL_INTER_P, + int QP_Level_InterB; // NVVE_QP_LEVEL_INTER_B, + int DeblockMode; // NVVE_DEBLOCK_MODE, + int ProfileLevel; // NVVE_PROFILE_LEVEL, + int ForceIntra; // NVVE_FORCE_INTRA, + int ForceIDR; // NVVE_FORCE_IDR, + int ClearStat; // NVVE_CLEAR_STAT, + int DIMode; // NVVE_SET_DEINTERLACE, + int Presets; // NVVE_PRESETS, + int DisableCabac; // NVVE_DISABLE_CABAC, + int NaluFramingType; // NVVE_CONFIGURE_NALU_FRAMING_TYPE + int DisableSPSPPS; // NVVE_DISABLE_SPS_PPS + + EncoderParams(); + explicit EncoderParams(const std::string& configFile); + + void load(const std::string& configFile); + void save(const std::string& configFile) const; + }; + + EncoderParams getParams() const; + + class CV_EXPORTS EncoderCallBack + { + public: + enum PicType + { + IFRAME = 1, + PFRAME = 2, + BFRAME = 3 + }; + + virtual ~EncoderCallBack() {} + + // callback function to signal the start of bitstream that is to be encoded + // must return pointer to buffer + virtual uchar* acquireBitStream(int* bufferSize) = 0; + + // callback function to signal that the encoded bitstream is ready to be written to file + virtual void releaseBitStream(unsigned char* data, int size) = 0; + + // callback function to signal that the encoding operation on the frame has started + virtual void onBeginFrame(int frameNumber, PicType picType) = 0; + + // callback function signals that the encoding operation on the frame has finished + virtual void onEndFrame(int frameNumber, PicType picType) = 0; + }; + +private: + VideoWriter_GPU(const VideoWriter_GPU&); + VideoWriter_GPU& operator=(const VideoWriter_GPU&); + + class Impl; + std::auto_ptr impl_; +}; + + +////////////////////////////////// Video Decoding ////////////////////////////////////////// + +namespace detail +{ + class FrameQueue; + class VideoParser; +} + +class CV_EXPORTS VideoReader_GPU +{ +public: + enum Codec + { + MPEG1 = 0, + MPEG2, + MPEG4, + VC1, + H264, + JPEG, + H264_SVC, + H264_MVC, + + Uncompressed_YUV420 = (('I'<<24)|('Y'<<16)|('U'<<8)|('V')), // Y,U,V (4:2:0) + Uncompressed_YV12 = (('Y'<<24)|('V'<<16)|('1'<<8)|('2')), // Y,V,U (4:2:0) + Uncompressed_NV12 = (('N'<<24)|('V'<<16)|('1'<<8)|('2')), // Y,UV (4:2:0) + Uncompressed_YUYV = (('Y'<<24)|('U'<<16)|('Y'<<8)|('V')), // YUYV/YUY2 (4:2:2) + Uncompressed_UYVY = (('U'<<24)|('Y'<<16)|('V'<<8)|('Y')) // UYVY (4:2:2) + }; + + enum ChromaFormat + { + Monochrome=0, + YUV420, + YUV422, + YUV444 + }; + + struct FormatInfo + { + Codec codec; + ChromaFormat chromaFormat; + int width; + int height; + }; + + class VideoSource; + + VideoReader_GPU(); + explicit VideoReader_GPU(const std::string& filename); + explicit VideoReader_GPU(const cv::Ptr& source); + + ~VideoReader_GPU(); + + void open(const std::string& filename); + void open(const cv::Ptr& source); + bool isOpened() const; + + void close(); + + bool read(GpuMat& image); + + FormatInfo format() const; + void dumpFormat(std::ostream& st); + + class CV_EXPORTS VideoSource + { + public: + VideoSource() : frameQueue_(0), videoParser_(0) {} + virtual ~VideoSource() {} + + virtual FormatInfo format() const = 0; + virtual void start() = 0; + virtual void stop() = 0; + virtual bool isStarted() const = 0; + virtual bool hasError() const = 0; + + void setFrameQueue(detail::FrameQueue* frameQueue) { frameQueue_ = frameQueue; } + void setVideoParser(detail::VideoParser* videoParser) { videoParser_ = videoParser; } + + protected: + bool parseVideoData(const uchar* data, size_t size, bool endOfStream = false); + + private: + VideoSource(const VideoSource&); + VideoSource& operator =(const VideoSource&); + + detail::FrameQueue* frameQueue_; + detail::VideoParser* videoParser_; + }; + +private: + VideoReader_GPU(const VideoReader_GPU&); + VideoReader_GPU& operator =(const VideoReader_GPU&); + + class Impl; + std::auto_ptr impl_; +}; + +//! removes points (CV_32FC2, single row matrix) with zero mask value +CV_EXPORTS void compactPoints(GpuMat &points0, GpuMat &points1, const GpuMat &mask); + +CV_EXPORTS void calcWobbleSuppressionMaps( + int left, int idx, int right, Size size, const Mat &ml, const Mat &mr, + GpuMat &mapx, GpuMat &mapy); + +} // namespace gpu + +} // namespace cv + +#endif /* __OPENCV_GPU_HPP__ */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/gpumat.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/gpumat.hpp new file mode 100644 index 0000000..840398b --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/gpumat.hpp @@ -0,0 +1,43 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "opencv2/core/gpumat.hpp" diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/stream_accessor.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/stream_accessor.hpp new file mode 100644 index 0000000..bcd58ba --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/stream_accessor.hpp @@ -0,0 +1,65 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPU_STREAM_ACCESSOR_HPP__ +#define __OPENCV_GPU_STREAM_ACCESSOR_HPP__ + +#include "opencv2/gpu/gpu.hpp" +#include "cuda_runtime_api.h" + +namespace cv +{ + namespace gpu + { + // This is only header file that depends on Cuda. All other headers are independent. + // So if you use OpenCV binaries you do noot need to install Cuda Toolkit. + // But of you wanna use GPU by yourself, may get cuda stream instance using the class below. + // In this case you have to install Cuda Toolkit. + struct StreamAccessor + { + CV_EXPORTS static cudaStream_t getStream(const Stream& stream); + CV_EXPORTS static Stream wrapStream(cudaStream_t stream); + }; + } +} + +#endif /* __OPENCV_GPU_STREAM_ACCESSOR_HPP__ */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui.hpp new file mode 100644 index 0000000..c76a020 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui.hpp @@ -0,0 +1,43 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "opencv2/highgui/highgui.hpp" diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/cap_ios.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/cap_ios.h new file mode 100644 index 0000000..4c931d4 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/cap_ios.h @@ -0,0 +1,171 @@ +/* For iOS video I/O + * by Eduard Feicho on 29/07/12 + * Copyright 2012. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO + * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#import +#import +#import +#import +#include "opencv2/core/core.hpp" + +/////////////////////////////////////// CvAbstractCamera ///////////////////////////////////// + +@class CvAbstractCamera; + +@interface CvAbstractCamera : NSObject +{ + AVCaptureSession* captureSession; + AVCaptureConnection* videoCaptureConnection; + AVCaptureVideoPreviewLayer *captureVideoPreviewLayer; + + UIDeviceOrientation currentDeviceOrientation; + + BOOL cameraAvailable; + BOOL captureSessionLoaded; + BOOL running; + BOOL useAVCaptureVideoPreviewLayer; + + AVCaptureDevicePosition defaultAVCaptureDevicePosition; + AVCaptureVideoOrientation defaultAVCaptureVideoOrientation; + NSString *const defaultAVCaptureSessionPreset; + + int defaultFPS; + + UIView* parentView; + + int imageWidth; + int imageHeight; +} + +@property (nonatomic, retain) AVCaptureSession* captureSession; +@property (nonatomic, retain) AVCaptureConnection* videoCaptureConnection; + +@property (nonatomic, readonly) BOOL running; +@property (nonatomic, readonly) BOOL captureSessionLoaded; + +@property (nonatomic, assign) int defaultFPS; +@property (nonatomic, readonly) AVCaptureVideoPreviewLayer *captureVideoPreviewLayer; +@property (nonatomic, assign) AVCaptureDevicePosition defaultAVCaptureDevicePosition; +@property (nonatomic, assign) AVCaptureVideoOrientation defaultAVCaptureVideoOrientation; +@property (nonatomic, assign) BOOL useAVCaptureVideoPreviewLayer; +@property (nonatomic, strong) NSString *const defaultAVCaptureSessionPreset; + +@property (nonatomic, assign) int imageWidth; +@property (nonatomic, assign) int imageHeight; + +@property (nonatomic, retain) UIView* parentView; + +- (void)start; +- (void)stop; +- (void)switchCameras; + +- (id)initWithParentView:(UIView*)parent; + +- (void)createCaptureOutput; +- (void)createVideoPreviewLayer; +- (void)updateOrientation; + +- (void)lockFocus; +- (void)unlockFocus; +- (void)lockExposure; +- (void)unlockExposure; +- (void)lockBalance; +- (void)unlockBalance; + +@end + +///////////////////////////////// CvVideoCamera /////////////////////////////////////////// + +@class CvVideoCamera; + +@protocol CvVideoCameraDelegate + +#ifdef __cplusplus +// delegate method for processing image frames +- (void)processImage:(cv::Mat&)image; +#endif + +@end + +@interface CvVideoCamera : CvAbstractCamera +{ + AVCaptureVideoDataOutput *videoDataOutput; + + dispatch_queue_t videoDataOutputQueue; + CALayer *customPreviewLayer; + + BOOL grayscaleMode; + + BOOL recordVideo; + BOOL rotateVideo; + AVAssetWriterInput* recordAssetWriterInput; + AVAssetWriterInputPixelBufferAdaptor* recordPixelBufferAdaptor; + AVAssetWriter* recordAssetWriter; + + CMTime lastSampleTime; + +} + +@property (nonatomic, assign) id delegate; +@property (nonatomic, assign) BOOL grayscaleMode; + +@property (nonatomic, assign) BOOL recordVideo; +@property (nonatomic, assign) BOOL rotateVideo; +@property (nonatomic, retain) AVAssetWriterInput* recordAssetWriterInput; +@property (nonatomic, retain) AVAssetWriterInputPixelBufferAdaptor* recordPixelBufferAdaptor; +@property (nonatomic, retain) AVAssetWriter* recordAssetWriter; + +- (void)adjustLayoutToInterfaceOrientation:(UIInterfaceOrientation)interfaceOrientation; +- (void)layoutPreviewLayer; +- (void)saveVideo; +- (NSURL *)videoFileURL; +- (NSString *)videoFileString; + + +@end + +///////////////////////////////// CvPhotoCamera /////////////////////////////////////////// + +@class CvPhotoCamera; + +@protocol CvPhotoCameraDelegate + +- (void)photoCamera:(CvPhotoCamera*)photoCamera capturedImage:(UIImage *)image; +- (void)photoCameraCancel:(CvPhotoCamera*)photoCamera; + +@end + +@interface CvPhotoCamera : CvAbstractCamera +{ + AVCaptureStillImageOutput *stillImageOutput; +} + +@property (nonatomic, assign) id delegate; + +- (void)takePicture; + +@end diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/highgui.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/highgui.hpp new file mode 100644 index 0000000..f6f2293 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/highgui.hpp @@ -0,0 +1,255 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_HIGHGUI_HPP__ +#define __OPENCV_HIGHGUI_HPP__ + +#include "opencv2/core/core.hpp" +#include "opencv2/highgui/highgui_c.h" + +#ifdef __cplusplus + +struct CvCapture; +struct CvVideoWriter; + +namespace cv +{ + +enum { + // Flags for namedWindow + WINDOW_NORMAL = CV_WINDOW_NORMAL, // the user can resize the window (no constraint) / also use to switch a fullscreen window to a normal size + WINDOW_AUTOSIZE = CV_WINDOW_AUTOSIZE, // the user cannot resize the window, the size is constrainted by the image displayed + WINDOW_OPENGL = CV_WINDOW_OPENGL, // window with opengl support + + // Flags for set / getWindowProperty + WND_PROP_FULLSCREEN = CV_WND_PROP_FULLSCREEN, // fullscreen property + WND_PROP_AUTOSIZE = CV_WND_PROP_AUTOSIZE, // autosize property + WND_PROP_ASPECT_RATIO = CV_WND_PROP_ASPECTRATIO, // window's aspect ration + WND_PROP_OPENGL = CV_WND_PROP_OPENGL // opengl support +}; + +CV_EXPORTS_W void namedWindow(const string& winname, int flags = WINDOW_AUTOSIZE); +CV_EXPORTS_W void destroyWindow(const string& winname); +CV_EXPORTS_W void destroyAllWindows(); + +CV_EXPORTS_W int startWindowThread(); + +CV_EXPORTS_W int waitKey(int delay = 0); + +CV_EXPORTS_W void imshow(const string& winname, InputArray mat); + +CV_EXPORTS_W void resizeWindow(const string& winname, int width, int height); +CV_EXPORTS_W void moveWindow(const string& winname, int x, int y); + +CV_EXPORTS_W void setWindowProperty(const string& winname, int prop_id, double prop_value);//YV +CV_EXPORTS_W double getWindowProperty(const string& winname, int prop_id);//YV + +enum +{ + EVENT_MOUSEMOVE =0, + EVENT_LBUTTONDOWN =1, + EVENT_RBUTTONDOWN =2, + EVENT_MBUTTONDOWN =3, + EVENT_LBUTTONUP =4, + EVENT_RBUTTONUP =5, + EVENT_MBUTTONUP =6, + EVENT_LBUTTONDBLCLK =7, + EVENT_RBUTTONDBLCLK =8, + EVENT_MBUTTONDBLCLK =9 +}; + +enum +{ + EVENT_FLAG_LBUTTON =1, + EVENT_FLAG_RBUTTON =2, + EVENT_FLAG_MBUTTON =4, + EVENT_FLAG_CTRLKEY =8, + EVENT_FLAG_SHIFTKEY =16, + EVENT_FLAG_ALTKEY =32 +}; + +typedef void (*MouseCallback)(int event, int x, int y, int flags, void* userdata); + +//! assigns callback for mouse events +CV_EXPORTS void setMouseCallback(const string& winname, MouseCallback onMouse, void* userdata = 0); + + +typedef void (CV_CDECL *TrackbarCallback)(int pos, void* userdata); + +CV_EXPORTS int createTrackbar(const string& trackbarname, const string& winname, + int* value, int count, + TrackbarCallback onChange = 0, + void* userdata = 0); + +CV_EXPORTS_W int getTrackbarPos(const string& trackbarname, const string& winname); +CV_EXPORTS_W void setTrackbarPos(const string& trackbarname, const string& winname, int pos); + +// OpenGL support + +typedef void (*OpenGlDrawCallback)(void* userdata); +CV_EXPORTS void setOpenGlDrawCallback(const string& winname, OpenGlDrawCallback onOpenGlDraw, void* userdata = 0); + +CV_EXPORTS void setOpenGlContext(const string& winname); + +CV_EXPORTS void updateWindow(const string& winname); + +// < Deperecated +CV_EXPORTS void pointCloudShow(const string& winname, const GlCamera& camera, const GlArrays& arr); +CV_EXPORTS void pointCloudShow(const string& winname, const GlCamera& camera, InputArray points, InputArray colors = noArray()); +// > + +//Only for Qt + +CV_EXPORTS CvFont fontQt(const string& nameFont, int pointSize=-1, + Scalar color=Scalar::all(0), int weight=CV_FONT_NORMAL, + int style=CV_STYLE_NORMAL, int spacing=0); +CV_EXPORTS void addText( const Mat& img, const string& text, Point org, CvFont font); + +CV_EXPORTS void displayOverlay(const string& winname, const string& text, int delayms CV_DEFAULT(0)); +CV_EXPORTS void displayStatusBar(const string& winname, const string& text, int delayms CV_DEFAULT(0)); + +CV_EXPORTS void saveWindowParameters(const string& windowName); +CV_EXPORTS void loadWindowParameters(const string& windowName); +CV_EXPORTS int startLoop(int (*pt2Func)(int argc, char *argv[]), int argc, char* argv[]); +CV_EXPORTS void stopLoop(); + +typedef void (CV_CDECL *ButtonCallback)(int state, void* userdata); +CV_EXPORTS int createButton( const string& bar_name, ButtonCallback on_change, + void* userdata=NULL, int type=CV_PUSH_BUTTON, + bool initial_button_state=0); + +//------------------------- + +enum +{ + // 8bit, color or not + IMREAD_UNCHANGED =-1, + // 8bit, gray + IMREAD_GRAYSCALE =0, + // ?, color + IMREAD_COLOR =1, + // any depth, ? + IMREAD_ANYDEPTH =2, + // ?, any color + IMREAD_ANYCOLOR =4 +}; + +enum +{ + IMWRITE_JPEG_QUALITY =1, + IMWRITE_PNG_COMPRESSION =16, + IMWRITE_PNG_STRATEGY =17, + IMWRITE_PNG_BILEVEL =18, + IMWRITE_PNG_STRATEGY_DEFAULT =0, + IMWRITE_PNG_STRATEGY_FILTERED =1, + IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY =2, + IMWRITE_PNG_STRATEGY_RLE =3, + IMWRITE_PNG_STRATEGY_FIXED =4, + IMWRITE_PXM_BINARY =32 +}; + +CV_EXPORTS_W Mat imread( const string& filename, int flags=1 ); +CV_EXPORTS_W bool imwrite( const string& filename, InputArray img, + const vector& params=vector()); +CV_EXPORTS_W Mat imdecode( InputArray buf, int flags ); +CV_EXPORTS Mat imdecode( InputArray buf, int flags, Mat* dst ); +CV_EXPORTS_W bool imencode( const string& ext, InputArray img, + CV_OUT vector& buf, + const vector& params=vector()); + +#ifndef CV_NO_VIDEO_CAPTURE_CPP_API + +template<> void CV_EXPORTS Ptr::delete_obj(); +template<> void CV_EXPORTS Ptr::delete_obj(); + +class CV_EXPORTS_W VideoCapture +{ +public: + CV_WRAP VideoCapture(); + CV_WRAP VideoCapture(const string& filename); + CV_WRAP VideoCapture(int device); + + virtual ~VideoCapture(); + CV_WRAP virtual bool open(const string& filename); + CV_WRAP virtual bool open(int device); + CV_WRAP virtual bool isOpened() const; + CV_WRAP virtual void release(); + + CV_WRAP virtual bool grab(); + CV_WRAP virtual bool retrieve(CV_OUT Mat& image, int channel=0); + virtual VideoCapture& operator >> (CV_OUT Mat& image); + CV_WRAP virtual bool read(CV_OUT Mat& image); + + CV_WRAP virtual bool set(int propId, double value); + CV_WRAP virtual double get(int propId); + +protected: + Ptr cap; +}; + + +class CV_EXPORTS_W VideoWriter +{ +public: + CV_WRAP VideoWriter(); + CV_WRAP VideoWriter(const string& filename, int fourcc, double fps, + Size frameSize, bool isColor=true); + + virtual ~VideoWriter(); + CV_WRAP virtual bool open(const string& filename, int fourcc, double fps, + Size frameSize, bool isColor=true); + CV_WRAP virtual bool isOpened() const; + CV_WRAP virtual void release(); + virtual VideoWriter& operator << (const Mat& image); + CV_WRAP virtual void write(const Mat& image); + +protected: + Ptr writer; +}; + +#endif + +} + +#endif + +#endif diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/highgui_c.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/highgui_c.h new file mode 100644 index 0000000..85a59bb --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/highgui_c.h @@ -0,0 +1,660 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_HIGHGUI_H__ +#define __OPENCV_HIGHGUI_H__ + +#include "opencv2/core/core_c.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/****************************************************************************************\ +* Basic GUI functions * +\****************************************************************************************/ +//YV +//-----------New for Qt +/* For font */ +enum { CV_FONT_LIGHT = 25,//QFont::Light, + CV_FONT_NORMAL = 50,//QFont::Normal, + CV_FONT_DEMIBOLD = 63,//QFont::DemiBold, + CV_FONT_BOLD = 75,//QFont::Bold, + CV_FONT_BLACK = 87 //QFont::Black +}; + +enum { CV_STYLE_NORMAL = 0,//QFont::StyleNormal, + CV_STYLE_ITALIC = 1,//QFont::StyleItalic, + CV_STYLE_OBLIQUE = 2 //QFont::StyleOblique +}; +/* ---------*/ + +//for color cvScalar(blue_component, green_component, red\_component[, alpha_component]) +//and alpha= 0 <-> 0xFF (not transparent <-> transparent) +CVAPI(CvFont) cvFontQt(const char* nameFont, int pointSize CV_DEFAULT(-1), CvScalar color CV_DEFAULT(cvScalarAll(0)), int weight CV_DEFAULT(CV_FONT_NORMAL), int style CV_DEFAULT(CV_STYLE_NORMAL), int spacing CV_DEFAULT(0)); + +CVAPI(void) cvAddText(const CvArr* img, const char* text, CvPoint org, CvFont *arg2); + +CVAPI(void) cvDisplayOverlay(const char* name, const char* text, int delayms CV_DEFAULT(0)); +CVAPI(void) cvDisplayStatusBar(const char* name, const char* text, int delayms CV_DEFAULT(0)); + +CVAPI(void) cvSaveWindowParameters(const char* name); +CVAPI(void) cvLoadWindowParameters(const char* name); +CVAPI(int) cvStartLoop(int (*pt2Func)(int argc, char *argv[]), int argc, char* argv[]); +CVAPI(void) cvStopLoop( void ); + +typedef void (CV_CDECL *CvButtonCallback)(int state, void* userdata); +enum {CV_PUSH_BUTTON = 0, CV_CHECKBOX = 1, CV_RADIOBOX = 2}; +CVAPI(int) cvCreateButton( const char* button_name CV_DEFAULT(NULL),CvButtonCallback on_change CV_DEFAULT(NULL), void* userdata CV_DEFAULT(NULL) , int button_type CV_DEFAULT(CV_PUSH_BUTTON), int initial_button_state CV_DEFAULT(0)); +//---------------------- + + +/* this function is used to set some external parameters in case of X Window */ +CVAPI(int) cvInitSystem( int argc, char** argv ); + +CVAPI(int) cvStartWindowThread( void ); + +// --------- YV --------- +enum +{ + //These 3 flags are used by cvSet/GetWindowProperty + CV_WND_PROP_FULLSCREEN = 0, //to change/get window's fullscreen property + CV_WND_PROP_AUTOSIZE = 1, //to change/get window's autosize property + CV_WND_PROP_ASPECTRATIO= 2, //to change/get window's aspectratio property + CV_WND_PROP_OPENGL = 3, //to change/get window's opengl support + + //These 2 flags are used by cvNamedWindow and cvSet/GetWindowProperty + CV_WINDOW_NORMAL = 0x00000000, //the user can resize the window (no constraint) / also use to switch a fullscreen window to a normal size + CV_WINDOW_AUTOSIZE = 0x00000001, //the user cannot resize the window, the size is constrainted by the image displayed + CV_WINDOW_OPENGL = 0x00001000, //window with opengl support + + //Those flags are only for Qt + CV_GUI_EXPANDED = 0x00000000, //status bar and tool bar + CV_GUI_NORMAL = 0x00000010, //old fashious way + + //These 3 flags are used by cvNamedWindow and cvSet/GetWindowProperty + CV_WINDOW_FULLSCREEN = 1,//change the window to fullscreen + CV_WINDOW_FREERATIO = 0x00000100,//the image expends as much as it can (no ratio constraint) + CV_WINDOW_KEEPRATIO = 0x00000000//the ration image is respected. +}; + +/* create window */ +CVAPI(int) cvNamedWindow( const char* name, int flags CV_DEFAULT(CV_WINDOW_AUTOSIZE) ); + +/* Set and Get Property of the window */ +CVAPI(void) cvSetWindowProperty(const char* name, int prop_id, double prop_value); +CVAPI(double) cvGetWindowProperty(const char* name, int prop_id); + +/* display image within window (highgui windows remember their content) */ +CVAPI(void) cvShowImage( const char* name, const CvArr* image ); + +/* resize/move window */ +CVAPI(void) cvResizeWindow( const char* name, int width, int height ); +CVAPI(void) cvMoveWindow( const char* name, int x, int y ); + + +/* destroy window and all the trackers associated with it */ +CVAPI(void) cvDestroyWindow( const char* name ); + +CVAPI(void) cvDestroyAllWindows(void); + +/* get native window handle (HWND in case of Win32 and Widget in case of X Window) */ +CVAPI(void*) cvGetWindowHandle( const char* name ); + +/* get name of highgui window given its native handle */ +CVAPI(const char*) cvGetWindowName( void* window_handle ); + + +typedef void (CV_CDECL *CvTrackbarCallback)(int pos); + +/* create trackbar and display it on top of given window, set callback */ +CVAPI(int) cvCreateTrackbar( const char* trackbar_name, const char* window_name, + int* value, int count, CvTrackbarCallback on_change CV_DEFAULT(NULL)); + +typedef void (CV_CDECL *CvTrackbarCallback2)(int pos, void* userdata); + +CVAPI(int) cvCreateTrackbar2( const char* trackbar_name, const char* window_name, + int* value, int count, CvTrackbarCallback2 on_change, + void* userdata CV_DEFAULT(0)); + +/* retrieve or set trackbar position */ +CVAPI(int) cvGetTrackbarPos( const char* trackbar_name, const char* window_name ); +CVAPI(void) cvSetTrackbarPos( const char* trackbar_name, const char* window_name, int pos ); +CVAPI(void) cvSetTrackbarMax(const char* trackbar_name, const char* window_name, int maxval); + +enum +{ + CV_EVENT_MOUSEMOVE =0, + CV_EVENT_LBUTTONDOWN =1, + CV_EVENT_RBUTTONDOWN =2, + CV_EVENT_MBUTTONDOWN =3, + CV_EVENT_LBUTTONUP =4, + CV_EVENT_RBUTTONUP =5, + CV_EVENT_MBUTTONUP =6, + CV_EVENT_LBUTTONDBLCLK =7, + CV_EVENT_RBUTTONDBLCLK =8, + CV_EVENT_MBUTTONDBLCLK =9 +}; + +enum +{ + CV_EVENT_FLAG_LBUTTON =1, + CV_EVENT_FLAG_RBUTTON =2, + CV_EVENT_FLAG_MBUTTON =4, + CV_EVENT_FLAG_CTRLKEY =8, + CV_EVENT_FLAG_SHIFTKEY =16, + CV_EVENT_FLAG_ALTKEY =32 +}; + +typedef void (CV_CDECL *CvMouseCallback )(int event, int x, int y, int flags, void* param); + +/* assign callback for mouse events */ +CVAPI(void) cvSetMouseCallback( const char* window_name, CvMouseCallback on_mouse, + void* param CV_DEFAULT(NULL)); + +enum +{ +/* 8bit, color or not */ + CV_LOAD_IMAGE_UNCHANGED =-1, +/* 8bit, gray */ + CV_LOAD_IMAGE_GRAYSCALE =0, +/* ?, color */ + CV_LOAD_IMAGE_COLOR =1, +/* any depth, ? */ + CV_LOAD_IMAGE_ANYDEPTH =2, +/* ?, any color */ + CV_LOAD_IMAGE_ANYCOLOR =4 +}; + +/* load image from file + iscolor can be a combination of above flags where CV_LOAD_IMAGE_UNCHANGED + overrides the other flags + using CV_LOAD_IMAGE_ANYCOLOR alone is equivalent to CV_LOAD_IMAGE_UNCHANGED + unless CV_LOAD_IMAGE_ANYDEPTH is specified images are converted to 8bit +*/ +CVAPI(IplImage*) cvLoadImage( const char* filename, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR)); +CVAPI(CvMat*) cvLoadImageM( const char* filename, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR)); + +enum +{ + CV_IMWRITE_JPEG_QUALITY =1, + CV_IMWRITE_PNG_COMPRESSION =16, + CV_IMWRITE_PNG_STRATEGY =17, + CV_IMWRITE_PNG_BILEVEL =18, + CV_IMWRITE_PNG_STRATEGY_DEFAULT =0, + CV_IMWRITE_PNG_STRATEGY_FILTERED =1, + CV_IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY =2, + CV_IMWRITE_PNG_STRATEGY_RLE =3, + CV_IMWRITE_PNG_STRATEGY_FIXED =4, + CV_IMWRITE_PXM_BINARY =32 +}; + +/* save image to file */ +CVAPI(int) cvSaveImage( const char* filename, const CvArr* image, + const int* params CV_DEFAULT(0) ); + +/* decode image stored in the buffer */ +CVAPI(IplImage*) cvDecodeImage( const CvMat* buf, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR)); +CVAPI(CvMat*) cvDecodeImageM( const CvMat* buf, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR)); + +/* encode image and store the result as a byte vector (single-row 8uC1 matrix) */ +CVAPI(CvMat*) cvEncodeImage( const char* ext, const CvArr* image, + const int* params CV_DEFAULT(0) ); + +enum +{ + CV_CVTIMG_FLIP =1, + CV_CVTIMG_SWAP_RB =2 +}; + +/* utility function: convert one image to another with optional vertical flip */ +CVAPI(void) cvConvertImage( const CvArr* src, CvArr* dst, int flags CV_DEFAULT(0)); + +/* wait for key event infinitely (delay<=0) or for "delay" milliseconds */ +CVAPI(int) cvWaitKey(int delay CV_DEFAULT(0)); + +// OpenGL support + +typedef void (CV_CDECL *CvOpenGlDrawCallback)(void* userdata); +CVAPI(void) cvSetOpenGlDrawCallback(const char* window_name, CvOpenGlDrawCallback callback, void* userdata CV_DEFAULT(NULL)); + +CVAPI(void) cvSetOpenGlContext(const char* window_name); +CVAPI(void) cvUpdateWindow(const char* window_name); + + +/****************************************************************************************\ +* Working with Video Files and Cameras * +\****************************************************************************************/ + +/* "black box" capture structure */ +typedef struct CvCapture CvCapture; + +/* start capturing frames from video file */ +CVAPI(CvCapture*) cvCreateFileCapture( const char* filename ); + +enum +{ + CV_CAP_ANY =0, // autodetect + + CV_CAP_MIL =100, // MIL proprietary drivers + + CV_CAP_VFW =200, // platform native + CV_CAP_V4L =200, + CV_CAP_V4L2 =200, + + CV_CAP_FIREWARE =300, // IEEE 1394 drivers + CV_CAP_FIREWIRE =300, + CV_CAP_IEEE1394 =300, + CV_CAP_DC1394 =300, + CV_CAP_CMU1394 =300, + + CV_CAP_STEREO =400, // TYZX proprietary drivers + CV_CAP_TYZX =400, + CV_TYZX_LEFT =400, + CV_TYZX_RIGHT =401, + CV_TYZX_COLOR =402, + CV_TYZX_Z =403, + + CV_CAP_QT =500, // QuickTime + + CV_CAP_UNICAP =600, // Unicap drivers + + CV_CAP_DSHOW =700, // DirectShow (via videoInput) + CV_CAP_MSMF =1400, // Microsoft Media Foundation (via videoInput) + + CV_CAP_PVAPI =800, // PvAPI, Prosilica GigE SDK + + CV_CAP_OPENNI =900, // OpenNI (for Kinect) + CV_CAP_OPENNI_ASUS =910, // OpenNI (for Asus Xtion) + + CV_CAP_ANDROID =1000, // Android + CV_CAP_ANDROID_BACK =CV_CAP_ANDROID+99, // Android back camera + CV_CAP_ANDROID_FRONT =CV_CAP_ANDROID+98, // Android front camera + + CV_CAP_XIAPI =1100, // XIMEA Camera API + + CV_CAP_AVFOUNDATION = 1200, // AVFoundation framework for iOS (OS X Lion will have the same API) + + CV_CAP_GIGANETIX = 1300, // Smartek Giganetix GigEVisionSDK + + CV_CAP_INTELPERC = 1500 // Intel Perceptual Computing SDK +}; + +/* start capturing frames from camera: index = camera_index + domain_offset (CV_CAP_*) */ +CVAPI(CvCapture*) cvCreateCameraCapture( int index ); + +/* grab a frame, return 1 on success, 0 on fail. + this function is thought to be fast */ +CVAPI(int) cvGrabFrame( CvCapture* capture ); + +/* get the frame grabbed with cvGrabFrame(..) + This function may apply some frame processing like + frame decompression, flipping etc. + !!!DO NOT RELEASE or MODIFY the retrieved frame!!! */ +CVAPI(IplImage*) cvRetrieveFrame( CvCapture* capture, int streamIdx CV_DEFAULT(0) ); + +/* Just a combination of cvGrabFrame and cvRetrieveFrame + !!!DO NOT RELEASE or MODIFY the retrieved frame!!! */ +CVAPI(IplImage*) cvQueryFrame( CvCapture* capture ); + +/* stop capturing/reading and free resources */ +CVAPI(void) cvReleaseCapture( CvCapture** capture ); + +enum +{ + // modes of the controlling registers (can be: auto, manual, auto single push, absolute Latter allowed with any other mode) + // every feature can have only one mode turned on at a time + CV_CAP_PROP_DC1394_OFF = -4, //turn the feature off (not controlled manually nor automatically) + CV_CAP_PROP_DC1394_MODE_MANUAL = -3, //set automatically when a value of the feature is set by the user + CV_CAP_PROP_DC1394_MODE_AUTO = -2, + CV_CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO = -1, + CV_CAP_PROP_POS_MSEC =0, + CV_CAP_PROP_POS_FRAMES =1, + CV_CAP_PROP_POS_AVI_RATIO =2, + CV_CAP_PROP_FRAME_WIDTH =3, + CV_CAP_PROP_FRAME_HEIGHT =4, + CV_CAP_PROP_FPS =5, + CV_CAP_PROP_FOURCC =6, + CV_CAP_PROP_FRAME_COUNT =7, + CV_CAP_PROP_FORMAT =8, + CV_CAP_PROP_MODE =9, + CV_CAP_PROP_BRIGHTNESS =10, + CV_CAP_PROP_CONTRAST =11, + CV_CAP_PROP_SATURATION =12, + CV_CAP_PROP_HUE =13, + CV_CAP_PROP_GAIN =14, + CV_CAP_PROP_EXPOSURE =15, + CV_CAP_PROP_CONVERT_RGB =16, + CV_CAP_PROP_WHITE_BALANCE_U =17, + CV_CAP_PROP_RECTIFICATION =18, + CV_CAP_PROP_MONOCROME =19, + CV_CAP_PROP_SHARPNESS =20, + CV_CAP_PROP_AUTO_EXPOSURE =21, // exposure control done by camera, + // user can adjust refernce level + // using this feature + CV_CAP_PROP_GAMMA =22, + CV_CAP_PROP_TEMPERATURE =23, + CV_CAP_PROP_TRIGGER =24, + CV_CAP_PROP_TRIGGER_DELAY =25, + CV_CAP_PROP_WHITE_BALANCE_V =26, + CV_CAP_PROP_ZOOM =27, + CV_CAP_PROP_FOCUS =28, + CV_CAP_PROP_GUID =29, + CV_CAP_PROP_ISO_SPEED =30, + CV_CAP_PROP_MAX_DC1394 =31, + CV_CAP_PROP_BACKLIGHT =32, + CV_CAP_PROP_PAN =33, + CV_CAP_PROP_TILT =34, + CV_CAP_PROP_ROLL =35, + CV_CAP_PROP_IRIS =36, + CV_CAP_PROP_SETTINGS =37, + CV_CAP_PROP_BUFFERSIZE =38, + + CV_CAP_PROP_AUTOGRAB =1024, // property for highgui class CvCapture_Android only + CV_CAP_PROP_SUPPORTED_PREVIEW_SIZES_STRING=1025, // readonly, tricky property, returns cpnst char* indeed + CV_CAP_PROP_PREVIEW_FORMAT=1026, // readonly, tricky property, returns cpnst char* indeed + + // OpenNI map generators + CV_CAP_OPENNI_DEPTH_GENERATOR = 1 << 31, + CV_CAP_OPENNI_IMAGE_GENERATOR = 1 << 30, + CV_CAP_OPENNI_GENERATORS_MASK = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_OPENNI_IMAGE_GENERATOR, + + // Properties of cameras available through OpenNI interfaces + CV_CAP_PROP_OPENNI_OUTPUT_MODE = 100, + CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH = 101, // in mm + CV_CAP_PROP_OPENNI_BASELINE = 102, // in mm + CV_CAP_PROP_OPENNI_FOCAL_LENGTH = 103, // in pixels + CV_CAP_PROP_OPENNI_REGISTRATION = 104, // flag + CV_CAP_PROP_OPENNI_REGISTRATION_ON = CV_CAP_PROP_OPENNI_REGISTRATION, // flag that synchronizes the remapping depth map to image map + // by changing depth generator's view point (if the flag is "on") or + // sets this view point to its normal one (if the flag is "off"). + CV_CAP_PROP_OPENNI_APPROX_FRAME_SYNC = 105, + CV_CAP_PROP_OPENNI_MAX_BUFFER_SIZE = 106, + CV_CAP_PROP_OPENNI_CIRCLE_BUFFER = 107, + CV_CAP_PROP_OPENNI_MAX_TIME_DURATION = 108, + + CV_CAP_PROP_OPENNI_GENERATOR_PRESENT = 109, + + CV_CAP_OPENNI_IMAGE_GENERATOR_PRESENT = CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_OPENNI_GENERATOR_PRESENT, + CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE = CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_OPENNI_OUTPUT_MODE, + CV_CAP_OPENNI_DEPTH_GENERATOR_BASELINE = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_BASELINE, + CV_CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_FOCAL_LENGTH, + CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_REGISTRATION, + CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON = CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION, + + // Properties of cameras available through GStreamer interface + CV_CAP_GSTREAMER_QUEUE_LENGTH = 200, // default is 1 + + // PVAPI + CV_CAP_PROP_PVAPI_MULTICASTIP = 300, // ip for anable multicast master mode. 0 for disable multicast + CV_CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE = 301, // FrameStartTriggerMode: Determines how a frame is initiated + CV_CAP_PROP_PVAPI_DECIMATIONHORIZONTAL = 302, // Horizontal sub-sampling of the image + CV_CAP_PROP_PVAPI_DECIMATIONVERTICAL = 303, // Vertical sub-sampling of the image + CV_CAP_PROP_PVAPI_BINNINGX = 304, // Horizontal binning factor + CV_CAP_PROP_PVAPI_BINNINGY = 305, // Vertical binning factor + CV_CAP_PROP_PVAPI_PIXELFORMAT = 306, // Pixel format + + // Properties of cameras available through XIMEA SDK interface + CV_CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping. + CV_CAP_PROP_XI_DATA_FORMAT = 401, // Output data format. + CV_CAP_PROP_XI_OFFSET_X = 402, // Horizontal offset from the origin to the area of interest (in pixels). + CV_CAP_PROP_XI_OFFSET_Y = 403, // Vertical offset from the origin to the area of interest (in pixels). + CV_CAP_PROP_XI_TRG_SOURCE = 404, // Defines source of trigger. + CV_CAP_PROP_XI_TRG_SOFTWARE = 405, // Generates an internal trigger. PRM_TRG_SOURCE must be set to TRG_SOFTWARE. + CV_CAP_PROP_XI_GPI_SELECTOR = 406, // Selects general purpose input + CV_CAP_PROP_XI_GPI_MODE = 407, // Set general purpose input mode + CV_CAP_PROP_XI_GPI_LEVEL = 408, // Get general purpose level + CV_CAP_PROP_XI_GPO_SELECTOR = 409, // Selects general purpose output + CV_CAP_PROP_XI_GPO_MODE = 410, // Set general purpose output mode + CV_CAP_PROP_XI_LED_SELECTOR = 411, // Selects camera signalling LED + CV_CAP_PROP_XI_LED_MODE = 412, // Define camera signalling LED functionality + CV_CAP_PROP_XI_MANUAL_WB = 413, // Calculates White Balance(must be called during acquisition) + CV_CAP_PROP_XI_AUTO_WB = 414, // Automatic white balance + CV_CAP_PROP_XI_AEAG = 415, // Automatic exposure/gain + CV_CAP_PROP_XI_EXP_PRIORITY = 416, // Exposure priority (0.5 - exposure 50%, gain 50%). + CV_CAP_PROP_XI_AE_MAX_LIMIT = 417, // Maximum limit of exposure in AEAG procedure + CV_CAP_PROP_XI_AG_MAX_LIMIT = 418, // Maximum limit of gain in AEAG procedure + CV_CAP_PROP_XI_AEAG_LEVEL = 419, // Average intensity of output signal AEAG should achieve(in %) + CV_CAP_PROP_XI_TIMEOUT = 420, // Image capture timeout in milliseconds + + // Properties for Android cameras + CV_CAP_PROP_ANDROID_FLASH_MODE = 8001, + CV_CAP_PROP_ANDROID_FOCUS_MODE = 8002, + CV_CAP_PROP_ANDROID_WHITE_BALANCE = 8003, + CV_CAP_PROP_ANDROID_ANTIBANDING = 8004, + CV_CAP_PROP_ANDROID_FOCAL_LENGTH = 8005, + CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_NEAR = 8006, + CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_OPTIMAL = 8007, + CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_FAR = 8008, + CV_CAP_PROP_ANDROID_EXPOSE_LOCK = 8009, + CV_CAP_PROP_ANDROID_WHITEBALANCE_LOCK = 8010, + + // Properties of cameras available through AVFOUNDATION interface + CV_CAP_PROP_IOS_DEVICE_FOCUS = 9001, + CV_CAP_PROP_IOS_DEVICE_EXPOSURE = 9002, + CV_CAP_PROP_IOS_DEVICE_FLASH = 9003, + CV_CAP_PROP_IOS_DEVICE_WHITEBALANCE = 9004, + CV_CAP_PROP_IOS_DEVICE_TORCH = 9005, + + // Properties of cameras available through Smartek Giganetix Ethernet Vision interface + /* --- Vladimir Litvinenko (litvinenko.vladimir@gmail.com) --- */ + CV_CAP_PROP_GIGA_FRAME_OFFSET_X = 10001, + CV_CAP_PROP_GIGA_FRAME_OFFSET_Y = 10002, + CV_CAP_PROP_GIGA_FRAME_WIDTH_MAX = 10003, + CV_CAP_PROP_GIGA_FRAME_HEIGH_MAX = 10004, + CV_CAP_PROP_GIGA_FRAME_SENS_WIDTH = 10005, + CV_CAP_PROP_GIGA_FRAME_SENS_HEIGH = 10006, + + CV_CAP_PROP_INTELPERC_PROFILE_COUNT = 11001, + CV_CAP_PROP_INTELPERC_PROFILE_IDX = 11002, + CV_CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE = 11003, + CV_CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE = 11004, + CV_CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD = 11005, + CV_CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ = 11006, + CV_CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT = 11007, + + // Intel PerC streams + CV_CAP_INTELPERC_DEPTH_GENERATOR = 1 << 29, + CV_CAP_INTELPERC_IMAGE_GENERATOR = 1 << 28, + CV_CAP_INTELPERC_GENERATORS_MASK = CV_CAP_INTELPERC_DEPTH_GENERATOR + CV_CAP_INTELPERC_IMAGE_GENERATOR +}; + +enum +{ + // Data given from depth generator. + CV_CAP_OPENNI_DEPTH_MAP = 0, // Depth values in mm (CV_16UC1) + CV_CAP_OPENNI_POINT_CLOUD_MAP = 1, // XYZ in meters (CV_32FC3) + CV_CAP_OPENNI_DISPARITY_MAP = 2, // Disparity in pixels (CV_8UC1) + CV_CAP_OPENNI_DISPARITY_MAP_32F = 3, // Disparity in pixels (CV_32FC1) + CV_CAP_OPENNI_VALID_DEPTH_MASK = 4, // CV_8UC1 + + // Data given from RGB image generator. + CV_CAP_OPENNI_BGR_IMAGE = 5, + CV_CAP_OPENNI_GRAY_IMAGE = 6 +}; + +// Supported output modes of OpenNI image generator +enum +{ + CV_CAP_OPENNI_VGA_30HZ = 0, + CV_CAP_OPENNI_SXGA_15HZ = 1, + CV_CAP_OPENNI_SXGA_30HZ = 2, + CV_CAP_OPENNI_QVGA_30HZ = 3, + CV_CAP_OPENNI_QVGA_60HZ = 4 +}; + +//supported by Android camera output formats +enum +{ + CV_CAP_ANDROID_COLOR_FRAME_BGR = 0, //BGR + CV_CAP_ANDROID_COLOR_FRAME = CV_CAP_ANDROID_COLOR_FRAME_BGR, + CV_CAP_ANDROID_GREY_FRAME = 1, //Y + CV_CAP_ANDROID_COLOR_FRAME_RGB = 2, + CV_CAP_ANDROID_COLOR_FRAME_BGRA = 3, + CV_CAP_ANDROID_COLOR_FRAME_RGBA = 4 +}; + +// supported Android camera flash modes +enum +{ + CV_CAP_ANDROID_FLASH_MODE_AUTO = 0, + CV_CAP_ANDROID_FLASH_MODE_OFF, + CV_CAP_ANDROID_FLASH_MODE_ON, + CV_CAP_ANDROID_FLASH_MODE_RED_EYE, + CV_CAP_ANDROID_FLASH_MODE_TORCH +}; + +// supported Android camera focus modes +enum +{ + CV_CAP_ANDROID_FOCUS_MODE_AUTO = 0, + CV_CAP_ANDROID_FOCUS_MODE_CONTINUOUS_PICTURE, + CV_CAP_ANDROID_FOCUS_MODE_CONTINUOUS_VIDEO, + CV_CAP_ANDROID_FOCUS_MODE_EDOF, + CV_CAP_ANDROID_FOCUS_MODE_FIXED, + CV_CAP_ANDROID_FOCUS_MODE_INFINITY, + CV_CAP_ANDROID_FOCUS_MODE_MACRO +}; + +// supported Android camera white balance modes +enum +{ + CV_CAP_ANDROID_WHITE_BALANCE_AUTO = 0, + CV_CAP_ANDROID_WHITE_BALANCE_CLOUDY_DAYLIGHT, + CV_CAP_ANDROID_WHITE_BALANCE_DAYLIGHT, + CV_CAP_ANDROID_WHITE_BALANCE_FLUORESCENT, + CV_CAP_ANDROID_WHITE_BALANCE_INCANDESCENT, + CV_CAP_ANDROID_WHITE_BALANCE_SHADE, + CV_CAP_ANDROID_WHITE_BALANCE_TWILIGHT, + CV_CAP_ANDROID_WHITE_BALANCE_WARM_FLUORESCENT +}; + +// supported Android camera antibanding modes +enum +{ + CV_CAP_ANDROID_ANTIBANDING_50HZ = 0, + CV_CAP_ANDROID_ANTIBANDING_60HZ, + CV_CAP_ANDROID_ANTIBANDING_AUTO, + CV_CAP_ANDROID_ANTIBANDING_OFF +}; + +enum +{ + CV_CAP_INTELPERC_DEPTH_MAP = 0, // Each pixel is a 16-bit integer. The value indicates the distance from an object to the camera's XY plane or the Cartesian depth. + CV_CAP_INTELPERC_UVDEPTH_MAP = 1, // Each pixel contains two 32-bit floating point values in the range of 0-1, representing the mapping of depth coordinates to the color coordinates. + CV_CAP_INTELPERC_IR_MAP = 2, // Each pixel is a 16-bit integer. The value indicates the intensity of the reflected laser beam. + CV_CAP_INTELPERC_IMAGE = 3 +}; + +/* retrieve or set capture properties */ +CVAPI(double) cvGetCaptureProperty( CvCapture* capture, int property_id ); +CVAPI(int) cvSetCaptureProperty( CvCapture* capture, int property_id, double value ); + +// Return the type of the capturer (eg, CV_CAP_V4W, CV_CAP_UNICAP), which is unknown if created with CV_CAP_ANY +CVAPI(int) cvGetCaptureDomain( CvCapture* capture); + +/* "black box" video file writer structure */ +typedef struct CvVideoWriter CvVideoWriter; + +#define CV_FOURCC_MACRO(c1, c2, c3, c4) (((c1) & 255) + (((c2) & 255) << 8) + (((c3) & 255) << 16) + (((c4) & 255) << 24)) + +CV_INLINE int CV_FOURCC(char c1, char c2, char c3, char c4) +{ + return CV_FOURCC_MACRO(c1, c2, c3, c4); +} + +#define CV_FOURCC_PROMPT -1 /* Open Codec Selection Dialog (Windows only) */ +#define CV_FOURCC_DEFAULT CV_FOURCC('I', 'Y', 'U', 'V') /* Use default codec for specified filename (Linux only) */ + +/* initialize video file writer */ +CVAPI(CvVideoWriter*) cvCreateVideoWriter( const char* filename, int fourcc, + double fps, CvSize frame_size, + int is_color CV_DEFAULT(1)); + +//CVAPI(CvVideoWriter*) cvCreateImageSequenceWriter( const char* filename, +// int is_color CV_DEFAULT(1)); + +/* write frame to video file */ +CVAPI(int) cvWriteFrame( CvVideoWriter* writer, const IplImage* image ); + +/* close video file writer */ +CVAPI(void) cvReleaseVideoWriter( CvVideoWriter** writer ); + +/****************************************************************************************\ +* Obsolete functions/synonyms * +\****************************************************************************************/ + +#define cvCaptureFromFile cvCreateFileCapture +#define cvCaptureFromCAM cvCreateCameraCapture +#define cvCaptureFromAVI cvCaptureFromFile +#define cvCreateAVIWriter cvCreateVideoWriter +#define cvWriteToAVI cvWriteFrame +#define cvAddSearchPath(path) +#define cvvInitSystem cvInitSystem +#define cvvNamedWindow cvNamedWindow +#define cvvShowImage cvShowImage +#define cvvResizeWindow cvResizeWindow +#define cvvDestroyWindow cvDestroyWindow +#define cvvCreateTrackbar cvCreateTrackbar +#define cvvLoadImage(name) cvLoadImage((name),1) +#define cvvSaveImage cvSaveImage +#define cvvAddSearchPath cvAddSearchPath +#define cvvWaitKey(name) cvWaitKey(0) +#define cvvWaitKeyEx(name,delay) cvWaitKey(delay) +#define cvvConvertImage cvConvertImage +#define HG_AUTOSIZE CV_WINDOW_AUTOSIZE +#define set_preprocess_func cvSetPreprocessFuncWin32 +#define set_postprocess_func cvSetPostprocessFuncWin32 + +#if defined WIN32 || defined _WIN32 + +CVAPI(void) cvSetPreprocessFuncWin32_(const void* callback); +CVAPI(void) cvSetPostprocessFuncWin32_(const void* callback); +#define cvSetPreprocessFuncWin32(callback) cvSetPreprocessFuncWin32_((const void*)(callback)) +#define cvSetPostprocessFuncWin32(callback) cvSetPostprocessFuncWin32_((const void*)(callback)) + +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/ios.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/ios.h new file mode 100644 index 0000000..a7f0395 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/ios.h @@ -0,0 +1,49 @@ + +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "opencv2/core/core.hpp" +#import "opencv2/highgui/cap_ios.h" + +UIImage* MatToUIImage(const cv::Mat& image); +void UIImageToMat(const UIImage* image, + cv::Mat& m, bool alphaExist = false); diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc.hpp new file mode 100644 index 0000000..112f723 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc.hpp @@ -0,0 +1,43 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "opencv2/imgproc/imgproc.hpp" diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc/imgproc.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc/imgproc.hpp new file mode 100644 index 0000000..aa6a5f6 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc/imgproc.hpp @@ -0,0 +1,1299 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_IMGPROC_HPP__ +#define __OPENCV_IMGPROC_HPP__ + +#include "opencv2/core/core.hpp" +#include "opencv2/imgproc/types_c.h" + +#ifdef __cplusplus + +/*! \namespace cv + Namespace where all the C++ OpenCV functionality resides + */ +namespace cv +{ + +//! various border interpolation methods +enum { BORDER_REPLICATE=IPL_BORDER_REPLICATE, BORDER_CONSTANT=IPL_BORDER_CONSTANT, + BORDER_REFLECT=IPL_BORDER_REFLECT, BORDER_WRAP=IPL_BORDER_WRAP, + BORDER_REFLECT_101=IPL_BORDER_REFLECT_101, BORDER_REFLECT101=BORDER_REFLECT_101, + BORDER_TRANSPARENT=IPL_BORDER_TRANSPARENT, + BORDER_DEFAULT=BORDER_REFLECT_101, BORDER_ISOLATED=16 }; + +//! 1D interpolation function: returns coordinate of the "donor" pixel for the specified location p. +CV_EXPORTS_W int borderInterpolate( int p, int len, int borderType ); + +/*! + The Base Class for 1D or Row-wise Filters + + This is the base class for linear or non-linear filters that process 1D data. + In particular, such filters are used for the "horizontal" filtering parts in separable filters. + + Several functions in OpenCV return Ptr for the specific types of filters, + and those pointers can be used directly or within cv::FilterEngine. +*/ +class CV_EXPORTS BaseRowFilter +{ +public: + //! the default constructor + BaseRowFilter(); + //! the destructor + virtual ~BaseRowFilter(); + //! the filtering operator. Must be overridden in the derived classes. The horizontal border interpolation is done outside of the class. + virtual void operator()(const uchar* src, uchar* dst, + int width, int cn) = 0; + int ksize, anchor; +}; + + +/*! + The Base Class for Column-wise Filters + + This is the base class for linear or non-linear filters that process columns of 2D arrays. + Such filters are used for the "vertical" filtering parts in separable filters. + + Several functions in OpenCV return Ptr for the specific types of filters, + and those pointers can be used directly or within cv::FilterEngine. + + Unlike cv::BaseRowFilter, cv::BaseColumnFilter may have some context information, + i.e. box filter keeps the sliding sum of elements. To reset the state BaseColumnFilter::reset() + must be called (e.g. the method is called by cv::FilterEngine) + */ +class CV_EXPORTS BaseColumnFilter +{ +public: + //! the default constructor + BaseColumnFilter(); + //! the destructor + virtual ~BaseColumnFilter(); + //! the filtering operator. Must be overridden in the derived classes. The vertical border interpolation is done outside of the class. + virtual void operator()(const uchar** src, uchar* dst, int dststep, + int dstcount, int width) = 0; + //! resets the internal buffers, if any + virtual void reset(); + int ksize, anchor; +}; + +/*! + The Base Class for Non-Separable 2D Filters. + + This is the base class for linear or non-linear 2D filters. + + Several functions in OpenCV return Ptr for the specific types of filters, + and those pointers can be used directly or within cv::FilterEngine. + + Similar to cv::BaseColumnFilter, the class may have some context information, + that should be reset using BaseFilter::reset() method before processing the new array. +*/ +class CV_EXPORTS BaseFilter +{ +public: + //! the default constructor + BaseFilter(); + //! the destructor + virtual ~BaseFilter(); + //! the filtering operator. The horizontal and the vertical border interpolation is done outside of the class. + virtual void operator()(const uchar** src, uchar* dst, int dststep, + int dstcount, int width, int cn) = 0; + //! resets the internal buffers, if any + virtual void reset(); + Size ksize; + Point anchor; +}; + +/*! + The Main Class for Image Filtering. + + The class can be used to apply an arbitrary filtering operation to an image. + It contains all the necessary intermediate buffers, it computes extrapolated values + of the "virtual" pixels outside of the image etc. + Pointers to the initialized cv::FilterEngine instances + are returned by various OpenCV functions, such as cv::createSeparableLinearFilter(), + cv::createLinearFilter(), cv::createGaussianFilter(), cv::createDerivFilter(), + cv::createBoxFilter() and cv::createMorphologyFilter(). + + Using the class you can process large images by parts and build complex pipelines + that include filtering as some of the stages. If all you need is to apply some pre-defined + filtering operation, you may use cv::filter2D(), cv::erode(), cv::dilate() etc. + functions that create FilterEngine internally. + + Here is the example on how to use the class to implement Laplacian operator, which is the sum of + second-order derivatives. More complex variant for different types is implemented in cv::Laplacian(). + + \code + void laplace_f(const Mat& src, Mat& dst) + { + CV_Assert( src.type() == CV_32F ); + // make sure the destination array has the proper size and type + dst.create(src.size(), src.type()); + + // get the derivative and smooth kernels for d2I/dx2. + // for d2I/dy2 we could use the same kernels, just swapped + Mat kd, ks; + getSobelKernels( kd, ks, 2, 0, ksize, false, ktype ); + + // let's process 10 source rows at once + int DELTA = std::min(10, src.rows); + Ptr Fxx = createSeparableLinearFilter(src.type(), + dst.type(), kd, ks, Point(-1,-1), 0, borderType, borderType, Scalar() ); + Ptr Fyy = createSeparableLinearFilter(src.type(), + dst.type(), ks, kd, Point(-1,-1), 0, borderType, borderType, Scalar() ); + + int y = Fxx->start(src), dsty = 0, dy = 0; + Fyy->start(src); + const uchar* sptr = src.data + y*src.step; + + // allocate the buffers for the spatial image derivatives; + // the buffers need to have more than DELTA rows, because at the + // last iteration the output may take max(kd.rows-1,ks.rows-1) + // rows more than the input. + Mat Ixx( DELTA + kd.rows - 1, src.cols, dst.type() ); + Mat Iyy( DELTA + kd.rows - 1, src.cols, dst.type() ); + + // inside the loop we always pass DELTA rows to the filter + // (note that the "proceed" method takes care of possibe overflow, since + // it was given the actual image height in the "start" method) + // on output we can get: + // * < DELTA rows (the initial buffer accumulation stage) + // * = DELTA rows (settled state in the middle) + // * > DELTA rows (then the input image is over, but we generate + // "virtual" rows using the border mode and filter them) + // this variable number of output rows is dy. + // dsty is the current output row. + // sptr is the pointer to the first input row in the portion to process + for( ; dsty < dst.rows; sptr += DELTA*src.step, dsty += dy ) + { + Fxx->proceed( sptr, (int)src.step, DELTA, Ixx.data, (int)Ixx.step ); + dy = Fyy->proceed( sptr, (int)src.step, DELTA, d2y.data, (int)Iyy.step ); + if( dy > 0 ) + { + Mat dstripe = dst.rowRange(dsty, dsty + dy); + add(Ixx.rowRange(0, dy), Iyy.rowRange(0, dy), dstripe); + } + } + } + \endcode +*/ +class CV_EXPORTS FilterEngine +{ +public: + //! the default constructor + FilterEngine(); + //! the full constructor. Either _filter2D or both _rowFilter and _columnFilter must be non-empty. + FilterEngine(const Ptr& _filter2D, + const Ptr& _rowFilter, + const Ptr& _columnFilter, + int srcType, int dstType, int bufType, + int _rowBorderType=BORDER_REPLICATE, + int _columnBorderType=-1, + const Scalar& _borderValue=Scalar()); + //! the destructor + virtual ~FilterEngine(); + //! reinitializes the engine. The previously assigned filters are released. + void init(const Ptr& _filter2D, + const Ptr& _rowFilter, + const Ptr& _columnFilter, + int srcType, int dstType, int bufType, + int _rowBorderType=BORDER_REPLICATE, int _columnBorderType=-1, + const Scalar& _borderValue=Scalar()); + //! starts filtering of the specified ROI of an image of size wholeSize. + virtual int start(Size wholeSize, Rect roi, int maxBufRows=-1); + //! starts filtering of the specified ROI of the specified image. + virtual int start(const Mat& src, const Rect& srcRoi=Rect(0,0,-1,-1), + bool isolated=false, int maxBufRows=-1); + //! processes the next srcCount rows of the image. + virtual int proceed(const uchar* src, int srcStep, int srcCount, + uchar* dst, int dstStep); + //! applies filter to the specified ROI of the image. if srcRoi=(0,0,-1,-1), the whole image is filtered. + virtual void apply( const Mat& src, Mat& dst, + const Rect& srcRoi=Rect(0,0,-1,-1), + Point dstOfs=Point(0,0), + bool isolated=false); + //! returns true if the filter is separable + bool isSeparable() const { return (const BaseFilter*)filter2D == 0; } + //! returns the number + int remainingInputRows() const; + int remainingOutputRows() const; + + int srcType, dstType, bufType; + Size ksize; + Point anchor; + int maxWidth; + Size wholeSize; + Rect roi; + int dx1, dx2; + int rowBorderType, columnBorderType; + vector borderTab; + int borderElemSize; + vector ringBuf; + vector srcRow; + vector constBorderValue; + vector constBorderRow; + int bufStep, startY, startY0, endY, rowCount, dstY; + vector rows; + + Ptr filter2D; + Ptr rowFilter; + Ptr columnFilter; +}; + +//! type of the kernel +enum { KERNEL_GENERAL=0, KERNEL_SYMMETRICAL=1, KERNEL_ASYMMETRICAL=2, + KERNEL_SMOOTH=4, KERNEL_INTEGER=8 }; + +//! returns type (one of KERNEL_*) of 1D or 2D kernel specified by its coefficients. +CV_EXPORTS int getKernelType(InputArray kernel, Point anchor); + +//! returns the primitive row filter with the specified kernel +CV_EXPORTS Ptr getLinearRowFilter(int srcType, int bufType, + InputArray kernel, int anchor, + int symmetryType); + +//! returns the primitive column filter with the specified kernel +CV_EXPORTS Ptr getLinearColumnFilter(int bufType, int dstType, + InputArray kernel, int anchor, + int symmetryType, double delta=0, + int bits=0); + +//! returns 2D filter with the specified kernel +CV_EXPORTS Ptr getLinearFilter(int srcType, int dstType, + InputArray kernel, + Point anchor=Point(-1,-1), + double delta=0, int bits=0); + +//! returns the separable linear filter engine +CV_EXPORTS Ptr createSeparableLinearFilter(int srcType, int dstType, + InputArray rowKernel, InputArray columnKernel, + Point anchor=Point(-1,-1), double delta=0, + int rowBorderType=BORDER_DEFAULT, + int columnBorderType=-1, + const Scalar& borderValue=Scalar()); + +//! returns the non-separable linear filter engine +CV_EXPORTS Ptr createLinearFilter(int srcType, int dstType, + InputArray kernel, Point _anchor=Point(-1,-1), + double delta=0, int rowBorderType=BORDER_DEFAULT, + int columnBorderType=-1, const Scalar& borderValue=Scalar()); + +//! returns the Gaussian kernel with the specified parameters +CV_EXPORTS_W Mat getGaussianKernel( int ksize, double sigma, int ktype=CV_64F ); + +//! returns the Gaussian filter engine +CV_EXPORTS Ptr createGaussianFilter( int type, Size ksize, + double sigma1, double sigma2=0, + int borderType=BORDER_DEFAULT); +//! initializes kernels of the generalized Sobel operator +CV_EXPORTS_W void getDerivKernels( OutputArray kx, OutputArray ky, + int dx, int dy, int ksize, + bool normalize=false, int ktype=CV_32F ); +//! returns filter engine for the generalized Sobel operator +CV_EXPORTS Ptr createDerivFilter( int srcType, int dstType, + int dx, int dy, int ksize, + int borderType=BORDER_DEFAULT ); +//! returns horizontal 1D box filter +CV_EXPORTS Ptr getRowSumFilter(int srcType, int sumType, + int ksize, int anchor=-1); +//! returns vertical 1D box filter +CV_EXPORTS Ptr getColumnSumFilter( int sumType, int dstType, + int ksize, int anchor=-1, + double scale=1); +//! returns box filter engine +CV_EXPORTS Ptr createBoxFilter( int srcType, int dstType, Size ksize, + Point anchor=Point(-1,-1), + bool normalize=true, + int borderType=BORDER_DEFAULT); + +//! returns the Gabor kernel with the specified parameters +CV_EXPORTS_W Mat getGaborKernel( Size ksize, double sigma, double theta, double lambd, + double gamma, double psi=CV_PI*0.5, int ktype=CV_64F ); + +//! type of morphological operation +enum { MORPH_ERODE=CV_MOP_ERODE, MORPH_DILATE=CV_MOP_DILATE, + MORPH_OPEN=CV_MOP_OPEN, MORPH_CLOSE=CV_MOP_CLOSE, + MORPH_GRADIENT=CV_MOP_GRADIENT, MORPH_TOPHAT=CV_MOP_TOPHAT, + MORPH_BLACKHAT=CV_MOP_BLACKHAT, MORPH_HITMISS }; + +//! returns horizontal 1D morphological filter +CV_EXPORTS Ptr getMorphologyRowFilter(int op, int type, int ksize, int anchor=-1); +//! returns vertical 1D morphological filter +CV_EXPORTS Ptr getMorphologyColumnFilter(int op, int type, int ksize, int anchor=-1); +//! returns 2D morphological filter +CV_EXPORTS Ptr getMorphologyFilter(int op, int type, InputArray kernel, + Point anchor=Point(-1,-1)); + +//! returns "magic" border value for erosion and dilation. It is automatically transformed to Scalar::all(-DBL_MAX) for dilation. +static inline Scalar morphologyDefaultBorderValue() { return Scalar::all(DBL_MAX); } + +//! returns morphological filter engine. Only MORPH_ERODE and MORPH_DILATE are supported. +CV_EXPORTS Ptr createMorphologyFilter(int op, int type, InputArray kernel, + Point anchor=Point(-1,-1), int rowBorderType=BORDER_CONSTANT, + int columnBorderType=-1, + const Scalar& borderValue=morphologyDefaultBorderValue()); + +//! shape of the structuring element +enum { MORPH_RECT=0, MORPH_CROSS=1, MORPH_ELLIPSE=2 }; +//! returns structuring element of the specified shape and size +CV_EXPORTS_W Mat getStructuringElement(int shape, Size ksize, Point anchor=Point(-1,-1)); + +template<> CV_EXPORTS void Ptr::delete_obj(); + +//! copies 2D array to a larger destination array with extrapolation of the outer part of src using the specified border mode +CV_EXPORTS_W void copyMakeBorder( InputArray src, OutputArray dst, + int top, int bottom, int left, int right, + int borderType, const Scalar& value=Scalar() ); + +//! smooths the image using median filter. +CV_EXPORTS_W void medianBlur( InputArray src, OutputArray dst, int ksize ); +//! smooths the image using Gaussian filter. +CV_EXPORTS_W void GaussianBlur( InputArray src, + OutputArray dst, Size ksize, + double sigmaX, double sigmaY=0, + int borderType=BORDER_DEFAULT ); +//! smooths the image using bilateral filter +CV_EXPORTS_W void bilateralFilter( InputArray src, OutputArray dst, int d, + double sigmaColor, double sigmaSpace, + int borderType=BORDER_DEFAULT ); +//! smooths the image using adaptive bilateral filter +CV_EXPORTS_W void adaptiveBilateralFilter( InputArray src, OutputArray dst, Size ksize, + double sigmaSpace, double maxSigmaColor = 20.0, Point anchor=Point(-1, -1), + int borderType=BORDER_DEFAULT ); +//! smooths the image using the box filter. Each pixel is processed in O(1) time +CV_EXPORTS_W void boxFilter( InputArray src, OutputArray dst, int ddepth, + Size ksize, Point anchor=Point(-1,-1), + bool normalize=true, + int borderType=BORDER_DEFAULT ); +//! a synonym for normalized box filter +CV_EXPORTS_W void blur( InputArray src, OutputArray dst, + Size ksize, Point anchor=Point(-1,-1), + int borderType=BORDER_DEFAULT ); + +//! applies non-separable 2D linear filter to the image +CV_EXPORTS_W void filter2D( InputArray src, OutputArray dst, int ddepth, + InputArray kernel, Point anchor=Point(-1,-1), + double delta=0, int borderType=BORDER_DEFAULT ); + +//! applies separable 2D linear filter to the image +CV_EXPORTS_W void sepFilter2D( InputArray src, OutputArray dst, int ddepth, + InputArray kernelX, InputArray kernelY, + Point anchor=Point(-1,-1), + double delta=0, int borderType=BORDER_DEFAULT ); + +//! applies generalized Sobel operator to the image +CV_EXPORTS_W void Sobel( InputArray src, OutputArray dst, int ddepth, + int dx, int dy, int ksize=3, + double scale=1, double delta=0, + int borderType=BORDER_DEFAULT ); + +//! applies the vertical or horizontal Scharr operator to the image +CV_EXPORTS_W void Scharr( InputArray src, OutputArray dst, int ddepth, + int dx, int dy, double scale=1, double delta=0, + int borderType=BORDER_DEFAULT ); + +//! applies Laplacian operator to the image +CV_EXPORTS_W void Laplacian( InputArray src, OutputArray dst, int ddepth, + int ksize=1, double scale=1, double delta=0, + int borderType=BORDER_DEFAULT ); + +//! applies Canny edge detector and produces the edge map. +CV_EXPORTS_W void Canny( InputArray image, OutputArray edges, + double threshold1, double threshold2, + int apertureSize=3, bool L2gradient=false ); + +//! computes minimum eigen value of 2x2 derivative covariation matrix at each pixel - the cornerness criteria +CV_EXPORTS_W void cornerMinEigenVal( InputArray src, OutputArray dst, + int blockSize, int ksize=3, + int borderType=BORDER_DEFAULT ); + +//! computes Harris cornerness criteria at each image pixel +CV_EXPORTS_W void cornerHarris( InputArray src, OutputArray dst, int blockSize, + int ksize, double k, + int borderType=BORDER_DEFAULT ); + +// low-level function for computing eigenvalues and eigenvectors of 2x2 matrices +CV_EXPORTS void eigen2x2( const float* a, float* e, int n ); + +//! computes both eigenvalues and the eigenvectors of 2x2 derivative covariation matrix at each pixel. The output is stored as 6-channel matrix. +CV_EXPORTS_W void cornerEigenValsAndVecs( InputArray src, OutputArray dst, + int blockSize, int ksize, + int borderType=BORDER_DEFAULT ); + +//! computes another complex cornerness criteria at each pixel +CV_EXPORTS_W void preCornerDetect( InputArray src, OutputArray dst, int ksize, + int borderType=BORDER_DEFAULT ); + +//! adjusts the corner locations with sub-pixel accuracy to maximize the certain cornerness criteria +CV_EXPORTS_W void cornerSubPix( InputArray image, InputOutputArray corners, + Size winSize, Size zeroZone, + TermCriteria criteria ); + +//! finds the strong enough corners where the cornerMinEigenVal() or cornerHarris() report the local maxima +CV_EXPORTS_W void goodFeaturesToTrack( InputArray image, OutputArray corners, + int maxCorners, double qualityLevel, double minDistance, + InputArray mask=noArray(), int blockSize=3, + bool useHarrisDetector=false, double k=0.04 ); + +//! finds lines in the black-n-white image using the standard or pyramid Hough transform +CV_EXPORTS_W void HoughLines( InputArray image, OutputArray lines, + double rho, double theta, int threshold, + double srn=0, double stn=0 ); + +//! finds line segments in the black-n-white image using probabilistic Hough transform +CV_EXPORTS_W void HoughLinesP( InputArray image, OutputArray lines, + double rho, double theta, int threshold, + double minLineLength=0, double maxLineGap=0 ); + +//! finds circles in the grayscale image using 2+1 gradient Hough transform +CV_EXPORTS_W void HoughCircles( InputArray image, OutputArray circles, + int method, double dp, double minDist, + double param1=100, double param2=100, + int minRadius=0, int maxRadius=0 ); + +enum +{ + GHT_POSITION = 0, + GHT_SCALE = 1, + GHT_ROTATION = 2 +}; + +//! finds arbitrary template in the grayscale image using Generalized Hough Transform +//! Ballard, D.H. (1981). Generalizing the Hough transform to detect arbitrary shapes. Pattern Recognition 13 (2): 111-122. +//! Guil, N., González-Linares, J.M. and Zapata, E.L. (1999). Bidimensional shape detection using an invariant approach. Pattern Recognition 32 (6): 1025-1038. +class CV_EXPORTS GeneralizedHough : public Algorithm +{ +public: + static Ptr create(int method); + + virtual ~GeneralizedHough(); + + //! set template to search + void setTemplate(InputArray templ, int cannyThreshold = 100, Point templCenter = Point(-1, -1)); + void setTemplate(InputArray edges, InputArray dx, InputArray dy, Point templCenter = Point(-1, -1)); + + //! find template on image + void detect(InputArray image, OutputArray positions, OutputArray votes = cv::noArray(), int cannyThreshold = 100); + void detect(InputArray edges, InputArray dx, InputArray dy, OutputArray positions, OutputArray votes = cv::noArray()); + + void release(); + +protected: + virtual void setTemplateImpl(const Mat& edges, const Mat& dx, const Mat& dy, Point templCenter) = 0; + virtual void detectImpl(const Mat& edges, const Mat& dx, const Mat& dy, OutputArray positions, OutputArray votes) = 0; + virtual void releaseImpl() = 0; + +private: + Mat edges_, dx_, dy_; +}; + +//! erodes the image (applies the local minimum operator) +CV_EXPORTS_W void erode( InputArray src, OutputArray dst, InputArray kernel, + Point anchor=Point(-1,-1), int iterations=1, + int borderType=BORDER_CONSTANT, + const Scalar& borderValue=morphologyDefaultBorderValue() ); + +//! dilates the image (applies the local maximum operator) +CV_EXPORTS_W void dilate( InputArray src, OutputArray dst, InputArray kernel, + Point anchor=Point(-1,-1), int iterations=1, + int borderType=BORDER_CONSTANT, + const Scalar& borderValue=morphologyDefaultBorderValue() ); + +//! applies an advanced morphological operation to the image +CV_EXPORTS_W void morphologyEx( InputArray src, OutputArray dst, + int op, InputArray kernel, + Point anchor=Point(-1,-1), int iterations=1, + int borderType=BORDER_CONSTANT, + const Scalar& borderValue=morphologyDefaultBorderValue() ); + +//! interpolation algorithm +enum +{ + INTER_NEAREST=CV_INTER_NN, //!< nearest neighbor interpolation + INTER_LINEAR=CV_INTER_LINEAR, //!< bilinear interpolation + INTER_CUBIC=CV_INTER_CUBIC, //!< bicubic interpolation + INTER_AREA=CV_INTER_AREA, //!< area-based (or super) interpolation + INTER_LANCZOS4=CV_INTER_LANCZOS4, //!< Lanczos interpolation over 8x8 neighborhood + INTER_MAX=7, + WARP_INVERSE_MAP=CV_WARP_INVERSE_MAP +}; + +//! resizes the image +CV_EXPORTS_W void resize( InputArray src, OutputArray dst, + Size dsize, double fx=0, double fy=0, + int interpolation=INTER_LINEAR ); + +//! warps the image using affine transformation +CV_EXPORTS_W void warpAffine( InputArray src, OutputArray dst, + InputArray M, Size dsize, + int flags=INTER_LINEAR, + int borderMode=BORDER_CONSTANT, + const Scalar& borderValue=Scalar()); + +//! warps the image using perspective transformation +CV_EXPORTS_W void warpPerspective( InputArray src, OutputArray dst, + InputArray M, Size dsize, + int flags=INTER_LINEAR, + int borderMode=BORDER_CONSTANT, + const Scalar& borderValue=Scalar()); + +enum +{ + INTER_BITS=5, INTER_BITS2=INTER_BITS*2, + INTER_TAB_SIZE=(1< CV_EXPORTS void Ptr::delete_obj(); + +//! computes the joint dense histogram for a set of images. +CV_EXPORTS void calcHist( const Mat* images, int nimages, + const int* channels, InputArray mask, + OutputArray hist, int dims, const int* histSize, + const float** ranges, bool uniform=true, bool accumulate=false ); + +//! computes the joint sparse histogram for a set of images. +CV_EXPORTS void calcHist( const Mat* images, int nimages, + const int* channels, InputArray mask, + SparseMat& hist, int dims, + const int* histSize, const float** ranges, + bool uniform=true, bool accumulate=false ); + +CV_EXPORTS_W void calcHist( InputArrayOfArrays images, + const vector& channels, + InputArray mask, OutputArray hist, + const vector& histSize, + const vector& ranges, + bool accumulate=false ); + +//! computes back projection for the set of images +CV_EXPORTS void calcBackProject( const Mat* images, int nimages, + const int* channels, InputArray hist, + OutputArray backProject, const float** ranges, + double scale=1, bool uniform=true ); + +//! computes back projection for the set of images +CV_EXPORTS void calcBackProject( const Mat* images, int nimages, + const int* channels, const SparseMat& hist, + OutputArray backProject, const float** ranges, + double scale=1, bool uniform=true ); + +CV_EXPORTS_W void calcBackProject( InputArrayOfArrays images, const vector& channels, + InputArray hist, OutputArray dst, + const vector& ranges, + double scale ); + +/*CV_EXPORTS void calcBackProjectPatch( const Mat* images, int nimages, const int* channels, + InputArray hist, OutputArray dst, Size patchSize, + int method, double factor=1 ); + +CV_EXPORTS_W void calcBackProjectPatch( InputArrayOfArrays images, const vector& channels, + InputArray hist, OutputArray dst, Size patchSize, + int method, double factor=1 );*/ + +//! compares two histograms stored in dense arrays +CV_EXPORTS_W double compareHist( InputArray H1, InputArray H2, int method ); + +//! compares two histograms stored in sparse arrays +CV_EXPORTS double compareHist( const SparseMat& H1, const SparseMat& H2, int method ); + +//! normalizes the grayscale image brightness and contrast by normalizing its histogram +CV_EXPORTS_W void equalizeHist( InputArray src, OutputArray dst ); + +class CV_EXPORTS_W CLAHE : public Algorithm +{ +public: + CV_WRAP virtual void apply(InputArray src, OutputArray dst) = 0; + + CV_WRAP virtual void setClipLimit(double clipLimit) = 0; + CV_WRAP virtual double getClipLimit() const = 0; + + CV_WRAP virtual void setTilesGridSize(Size tileGridSize) = 0; + CV_WRAP virtual Size getTilesGridSize() const = 0; + + CV_WRAP virtual void collectGarbage() = 0; +}; +CV_EXPORTS_W Ptr createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8)); + +CV_EXPORTS float EMD( InputArray signature1, InputArray signature2, + int distType, InputArray cost=noArray(), + float* lowerBound=0, OutputArray flow=noArray() ); + +//! segments the image using watershed algorithm +CV_EXPORTS_W void watershed( InputArray image, InputOutputArray markers ); + +//! filters image using meanshift algorithm +CV_EXPORTS_W void pyrMeanShiftFiltering( InputArray src, OutputArray dst, + double sp, double sr, int maxLevel=1, + TermCriteria termcrit=TermCriteria( + TermCriteria::MAX_ITER+TermCriteria::EPS,5,1) ); + +//! class of the pixel in GrabCut algorithm +enum +{ + GC_BGD = 0, //!< background + GC_FGD = 1, //!< foreground + GC_PR_BGD = 2, //!< most probably background + GC_PR_FGD = 3 //!< most probably foreground +}; + +//! GrabCut algorithm flags +enum +{ + GC_INIT_WITH_RECT = 0, + GC_INIT_WITH_MASK = 1, + GC_EVAL = 2 +}; + +//! segments the image using GrabCut algorithm +CV_EXPORTS_W void grabCut( InputArray img, InputOutputArray mask, Rect rect, + InputOutputArray bgdModel, InputOutputArray fgdModel, + int iterCount, int mode = GC_EVAL ); + +enum +{ + DIST_LABEL_CCOMP = 0, + DIST_LABEL_PIXEL = 1 +}; + +//! builds the discrete Voronoi diagram +CV_EXPORTS_AS(distanceTransformWithLabels) void distanceTransform( InputArray src, OutputArray dst, + OutputArray labels, int distanceType, int maskSize, + int labelType=DIST_LABEL_CCOMP ); + +//! computes the distance transform map +CV_EXPORTS_W void distanceTransform( InputArray src, OutputArray dst, + int distanceType, int maskSize ); + +enum { FLOODFILL_FIXED_RANGE = 1 << 16, FLOODFILL_MASK_ONLY = 1 << 17 }; + +//! fills the semi-uniform image region starting from the specified seed point +CV_EXPORTS int floodFill( InputOutputArray image, + Point seedPoint, Scalar newVal, CV_OUT Rect* rect=0, + Scalar loDiff=Scalar(), Scalar upDiff=Scalar(), + int flags=4 ); + +//! fills the semi-uniform image region and/or the mask starting from the specified seed point +CV_EXPORTS_W int floodFill( InputOutputArray image, InputOutputArray mask, + Point seedPoint, Scalar newVal, CV_OUT Rect* rect=0, + Scalar loDiff=Scalar(), Scalar upDiff=Scalar(), + int flags=4 ); + + +enum +{ + COLOR_BGR2BGRA =0, + COLOR_RGB2RGBA =COLOR_BGR2BGRA, + + COLOR_BGRA2BGR =1, + COLOR_RGBA2RGB =COLOR_BGRA2BGR, + + COLOR_BGR2RGBA =2, + COLOR_RGB2BGRA =COLOR_BGR2RGBA, + + COLOR_RGBA2BGR =3, + COLOR_BGRA2RGB =COLOR_RGBA2BGR, + + COLOR_BGR2RGB =4, + COLOR_RGB2BGR =COLOR_BGR2RGB, + + COLOR_BGRA2RGBA =5, + COLOR_RGBA2BGRA =COLOR_BGRA2RGBA, + + COLOR_BGR2GRAY =6, + COLOR_RGB2GRAY =7, + COLOR_GRAY2BGR =8, + COLOR_GRAY2RGB =COLOR_GRAY2BGR, + COLOR_GRAY2BGRA =9, + COLOR_GRAY2RGBA =COLOR_GRAY2BGRA, + COLOR_BGRA2GRAY =10, + COLOR_RGBA2GRAY =11, + + COLOR_BGR2BGR565 =12, + COLOR_RGB2BGR565 =13, + COLOR_BGR5652BGR =14, + COLOR_BGR5652RGB =15, + COLOR_BGRA2BGR565 =16, + COLOR_RGBA2BGR565 =17, + COLOR_BGR5652BGRA =18, + COLOR_BGR5652RGBA =19, + + COLOR_GRAY2BGR565 =20, + COLOR_BGR5652GRAY =21, + + COLOR_BGR2BGR555 =22, + COLOR_RGB2BGR555 =23, + COLOR_BGR5552BGR =24, + COLOR_BGR5552RGB =25, + COLOR_BGRA2BGR555 =26, + COLOR_RGBA2BGR555 =27, + COLOR_BGR5552BGRA =28, + COLOR_BGR5552RGBA =29, + + COLOR_GRAY2BGR555 =30, + COLOR_BGR5552GRAY =31, + + COLOR_BGR2XYZ =32, + COLOR_RGB2XYZ =33, + COLOR_XYZ2BGR =34, + COLOR_XYZ2RGB =35, + + COLOR_BGR2YCrCb =36, + COLOR_RGB2YCrCb =37, + COLOR_YCrCb2BGR =38, + COLOR_YCrCb2RGB =39, + + COLOR_BGR2HSV =40, + COLOR_RGB2HSV =41, + + COLOR_BGR2Lab =44, + COLOR_RGB2Lab =45, + + COLOR_BayerBG2BGR =46, + COLOR_BayerGB2BGR =47, + COLOR_BayerRG2BGR =48, + COLOR_BayerGR2BGR =49, + + COLOR_BayerBG2RGB =COLOR_BayerRG2BGR, + COLOR_BayerGB2RGB =COLOR_BayerGR2BGR, + COLOR_BayerRG2RGB =COLOR_BayerBG2BGR, + COLOR_BayerGR2RGB =COLOR_BayerGB2BGR, + + COLOR_BGR2Luv =50, + COLOR_RGB2Luv =51, + COLOR_BGR2HLS =52, + COLOR_RGB2HLS =53, + + COLOR_HSV2BGR =54, + COLOR_HSV2RGB =55, + + COLOR_Lab2BGR =56, + COLOR_Lab2RGB =57, + COLOR_Luv2BGR =58, + COLOR_Luv2RGB =59, + COLOR_HLS2BGR =60, + COLOR_HLS2RGB =61, + + COLOR_BayerBG2BGR_VNG =62, + COLOR_BayerGB2BGR_VNG =63, + COLOR_BayerRG2BGR_VNG =64, + COLOR_BayerGR2BGR_VNG =65, + + COLOR_BayerBG2RGB_VNG =COLOR_BayerRG2BGR_VNG, + COLOR_BayerGB2RGB_VNG =COLOR_BayerGR2BGR_VNG, + COLOR_BayerRG2RGB_VNG =COLOR_BayerBG2BGR_VNG, + COLOR_BayerGR2RGB_VNG =COLOR_BayerGB2BGR_VNG, + + COLOR_BGR2HSV_FULL = 66, + COLOR_RGB2HSV_FULL = 67, + COLOR_BGR2HLS_FULL = 68, + COLOR_RGB2HLS_FULL = 69, + + COLOR_HSV2BGR_FULL = 70, + COLOR_HSV2RGB_FULL = 71, + COLOR_HLS2BGR_FULL = 72, + COLOR_HLS2RGB_FULL = 73, + + COLOR_LBGR2Lab = 74, + COLOR_LRGB2Lab = 75, + COLOR_LBGR2Luv = 76, + COLOR_LRGB2Luv = 77, + + COLOR_Lab2LBGR = 78, + COLOR_Lab2LRGB = 79, + COLOR_Luv2LBGR = 80, + COLOR_Luv2LRGB = 81, + + COLOR_BGR2YUV = 82, + COLOR_RGB2YUV = 83, + COLOR_YUV2BGR = 84, + COLOR_YUV2RGB = 85, + + COLOR_BayerBG2GRAY = 86, + COLOR_BayerGB2GRAY = 87, + COLOR_BayerRG2GRAY = 88, + COLOR_BayerGR2GRAY = 89, + + //YUV 4:2:0 formats family + COLOR_YUV2RGB_NV12 = 90, + COLOR_YUV2BGR_NV12 = 91, + COLOR_YUV2RGB_NV21 = 92, + COLOR_YUV2BGR_NV21 = 93, + COLOR_YUV420sp2RGB = COLOR_YUV2RGB_NV21, + COLOR_YUV420sp2BGR = COLOR_YUV2BGR_NV21, + + COLOR_YUV2RGBA_NV12 = 94, + COLOR_YUV2BGRA_NV12 = 95, + COLOR_YUV2RGBA_NV21 = 96, + COLOR_YUV2BGRA_NV21 = 97, + COLOR_YUV420sp2RGBA = COLOR_YUV2RGBA_NV21, + COLOR_YUV420sp2BGRA = COLOR_YUV2BGRA_NV21, + + COLOR_YUV2RGB_YV12 = 98, + COLOR_YUV2BGR_YV12 = 99, + COLOR_YUV2RGB_IYUV = 100, + COLOR_YUV2BGR_IYUV = 101, + COLOR_YUV2RGB_I420 = COLOR_YUV2RGB_IYUV, + COLOR_YUV2BGR_I420 = COLOR_YUV2BGR_IYUV, + COLOR_YUV420p2RGB = COLOR_YUV2RGB_YV12, + COLOR_YUV420p2BGR = COLOR_YUV2BGR_YV12, + + COLOR_YUV2RGBA_YV12 = 102, + COLOR_YUV2BGRA_YV12 = 103, + COLOR_YUV2RGBA_IYUV = 104, + COLOR_YUV2BGRA_IYUV = 105, + COLOR_YUV2RGBA_I420 = COLOR_YUV2RGBA_IYUV, + COLOR_YUV2BGRA_I420 = COLOR_YUV2BGRA_IYUV, + COLOR_YUV420p2RGBA = COLOR_YUV2RGBA_YV12, + COLOR_YUV420p2BGRA = COLOR_YUV2BGRA_YV12, + + COLOR_YUV2GRAY_420 = 106, + COLOR_YUV2GRAY_NV21 = COLOR_YUV2GRAY_420, + COLOR_YUV2GRAY_NV12 = COLOR_YUV2GRAY_420, + COLOR_YUV2GRAY_YV12 = COLOR_YUV2GRAY_420, + COLOR_YUV2GRAY_IYUV = COLOR_YUV2GRAY_420, + COLOR_YUV2GRAY_I420 = COLOR_YUV2GRAY_420, + COLOR_YUV420sp2GRAY = COLOR_YUV2GRAY_420, + COLOR_YUV420p2GRAY = COLOR_YUV2GRAY_420, + + //YUV 4:2:2 formats family + COLOR_YUV2RGB_UYVY = 107, + COLOR_YUV2BGR_UYVY = 108, + //COLOR_YUV2RGB_VYUY = 109, + //COLOR_YUV2BGR_VYUY = 110, + COLOR_YUV2RGB_Y422 = COLOR_YUV2RGB_UYVY, + COLOR_YUV2BGR_Y422 = COLOR_YUV2BGR_UYVY, + COLOR_YUV2RGB_UYNV = COLOR_YUV2RGB_UYVY, + COLOR_YUV2BGR_UYNV = COLOR_YUV2BGR_UYVY, + + COLOR_YUV2RGBA_UYVY = 111, + COLOR_YUV2BGRA_UYVY = 112, + //COLOR_YUV2RGBA_VYUY = 113, + //COLOR_YUV2BGRA_VYUY = 114, + COLOR_YUV2RGBA_Y422 = COLOR_YUV2RGBA_UYVY, + COLOR_YUV2BGRA_Y422 = COLOR_YUV2BGRA_UYVY, + COLOR_YUV2RGBA_UYNV = COLOR_YUV2RGBA_UYVY, + COLOR_YUV2BGRA_UYNV = COLOR_YUV2BGRA_UYVY, + + COLOR_YUV2RGB_YUY2 = 115, + COLOR_YUV2BGR_YUY2 = 116, + COLOR_YUV2RGB_YVYU = 117, + COLOR_YUV2BGR_YVYU = 118, + COLOR_YUV2RGB_YUYV = COLOR_YUV2RGB_YUY2, + COLOR_YUV2BGR_YUYV = COLOR_YUV2BGR_YUY2, + COLOR_YUV2RGB_YUNV = COLOR_YUV2RGB_YUY2, + COLOR_YUV2BGR_YUNV = COLOR_YUV2BGR_YUY2, + + COLOR_YUV2RGBA_YUY2 = 119, + COLOR_YUV2BGRA_YUY2 = 120, + COLOR_YUV2RGBA_YVYU = 121, + COLOR_YUV2BGRA_YVYU = 122, + COLOR_YUV2RGBA_YUYV = COLOR_YUV2RGBA_YUY2, + COLOR_YUV2BGRA_YUYV = COLOR_YUV2BGRA_YUY2, + COLOR_YUV2RGBA_YUNV = COLOR_YUV2RGBA_YUY2, + COLOR_YUV2BGRA_YUNV = COLOR_YUV2BGRA_YUY2, + + COLOR_YUV2GRAY_UYVY = 123, + COLOR_YUV2GRAY_YUY2 = 124, + //COLOR_YUV2GRAY_VYUY = COLOR_YUV2GRAY_UYVY, + COLOR_YUV2GRAY_Y422 = COLOR_YUV2GRAY_UYVY, + COLOR_YUV2GRAY_UYNV = COLOR_YUV2GRAY_UYVY, + COLOR_YUV2GRAY_YVYU = COLOR_YUV2GRAY_YUY2, + COLOR_YUV2GRAY_YUYV = COLOR_YUV2GRAY_YUY2, + COLOR_YUV2GRAY_YUNV = COLOR_YUV2GRAY_YUY2, + + // alpha premultiplication + COLOR_RGBA2mRGBA = 125, + COLOR_mRGBA2RGBA = 126, + + COLOR_RGB2YUV_I420 = 127, + COLOR_BGR2YUV_I420 = 128, + COLOR_RGB2YUV_IYUV = COLOR_RGB2YUV_I420, + COLOR_BGR2YUV_IYUV = COLOR_BGR2YUV_I420, + + COLOR_RGBA2YUV_I420 = 129, + COLOR_BGRA2YUV_I420 = 130, + COLOR_RGBA2YUV_IYUV = COLOR_RGBA2YUV_I420, + COLOR_BGRA2YUV_IYUV = COLOR_BGRA2YUV_I420, + COLOR_RGB2YUV_YV12 = 131, + COLOR_BGR2YUV_YV12 = 132, + COLOR_RGBA2YUV_YV12 = 133, + COLOR_BGRA2YUV_YV12 = 134, + + COLOR_COLORCVT_MAX = 135 +}; + + +//! converts image from one color space to another +CV_EXPORTS_W void cvtColor( InputArray src, OutputArray dst, int code, int dstCn=0 ); + +//! raster image moments +class CV_EXPORTS_W_MAP Moments +{ +public: + //! the default constructor + Moments(); + //! the full constructor + Moments(double m00, double m10, double m01, double m20, double m11, + double m02, double m30, double m21, double m12, double m03 ); + //! the conversion from CvMoments + Moments( const CvMoments& moments ); + //! the conversion to CvMoments + operator CvMoments() const; + + //! spatial moments + CV_PROP_RW double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03; + //! central moments + CV_PROP_RW double mu20, mu11, mu02, mu30, mu21, mu12, mu03; + //! central normalized moments + CV_PROP_RW double nu20, nu11, nu02, nu30, nu21, nu12, nu03; +}; + +//! computes moments of the rasterized shape or a vector of points +CV_EXPORTS_W Moments moments( InputArray array, bool binaryImage=false ); + +//! computes 7 Hu invariants from the moments +CV_EXPORTS void HuMoments( const Moments& moments, double hu[7] ); +CV_EXPORTS_W void HuMoments( const Moments& m, CV_OUT OutputArray hu ); + +//! type of the template matching operation +enum { TM_SQDIFF=0, TM_SQDIFF_NORMED=1, TM_CCORR=2, TM_CCORR_NORMED=3, TM_CCOEFF=4, TM_CCOEFF_NORMED=5 }; + +//! computes the proximity map for the raster template and the image where the template is searched for +CV_EXPORTS_W void matchTemplate( InputArray image, InputArray templ, + OutputArray result, int method ); + +//! mode of the contour retrieval algorithm +enum +{ + RETR_EXTERNAL=CV_RETR_EXTERNAL, //!< retrieve only the most external (top-level) contours + RETR_LIST=CV_RETR_LIST, //!< retrieve all the contours without any hierarchical information + RETR_CCOMP=CV_RETR_CCOMP, //!< retrieve the connected components (that can possibly be nested) + RETR_TREE=CV_RETR_TREE, //!< retrieve all the contours and the whole hierarchy + RETR_FLOODFILL=CV_RETR_FLOODFILL +}; + +//! the contour approximation algorithm +enum +{ + CHAIN_APPROX_NONE=CV_CHAIN_APPROX_NONE, + CHAIN_APPROX_SIMPLE=CV_CHAIN_APPROX_SIMPLE, + CHAIN_APPROX_TC89_L1=CV_CHAIN_APPROX_TC89_L1, + CHAIN_APPROX_TC89_KCOS=CV_CHAIN_APPROX_TC89_KCOS +}; + +//! retrieves contours and the hierarchical information from black-n-white image. +CV_EXPORTS_W void findContours( InputOutputArray image, OutputArrayOfArrays contours, + OutputArray hierarchy, int mode, + int method, Point offset=Point()); + +//! retrieves contours from black-n-white image. +CV_EXPORTS void findContours( InputOutputArray image, OutputArrayOfArrays contours, + int mode, int method, Point offset=Point()); + +//! draws contours in the image +CV_EXPORTS_W void drawContours( InputOutputArray image, InputArrayOfArrays contours, + int contourIdx, const Scalar& color, + int thickness=1, int lineType=8, + InputArray hierarchy=noArray(), + int maxLevel=INT_MAX, Point offset=Point() ); + +//! approximates contour or a curve using Douglas-Peucker algorithm +CV_EXPORTS_W void approxPolyDP( InputArray curve, + OutputArray approxCurve, + double epsilon, bool closed ); + +//! computes the contour perimeter (closed=true) or a curve length +CV_EXPORTS_W double arcLength( InputArray curve, bool closed ); +//! computes the bounding rectangle for a contour +CV_EXPORTS_W Rect boundingRect( InputArray points ); +//! computes the contour area +CV_EXPORTS_W double contourArea( InputArray contour, bool oriented=false ); +//! computes the minimal rotated rectangle for a set of points +CV_EXPORTS_W RotatedRect minAreaRect( InputArray points ); +//! computes the minimal enclosing circle for a set of points +CV_EXPORTS_W void minEnclosingCircle( InputArray points, + CV_OUT Point2f& center, CV_OUT float& radius ); +//! matches two contours using one of the available algorithms +CV_EXPORTS_W double matchShapes( InputArray contour1, InputArray contour2, + int method, double parameter ); +//! computes convex hull for a set of 2D points. +CV_EXPORTS_W void convexHull( InputArray points, OutputArray hull, + bool clockwise=false, bool returnPoints=true ); +//! computes the contour convexity defects +CV_EXPORTS_W void convexityDefects( InputArray contour, InputArray convexhull, OutputArray convexityDefects ); + +//! returns true if the contour is convex. Does not support contours with self-intersection +CV_EXPORTS_W bool isContourConvex( InputArray contour ); + +//! finds intersection of two convex polygons +CV_EXPORTS_W float intersectConvexConvex( InputArray _p1, InputArray _p2, + OutputArray _p12, bool handleNested=true ); + +//! fits ellipse to the set of 2D points +CV_EXPORTS_W RotatedRect fitEllipse( InputArray points ); + +//! fits line to the set of 2D points using M-estimator algorithm +CV_EXPORTS_W void fitLine( InputArray points, OutputArray line, int distType, + double param, double reps, double aeps ); +//! checks if the point is inside the contour. Optionally computes the signed distance from the point to the contour boundary +CV_EXPORTS_W double pointPolygonTest( InputArray contour, Point2f pt, bool measureDist ); + + +class CV_EXPORTS_W Subdiv2D +{ +public: + enum + { + PTLOC_ERROR = -2, + PTLOC_OUTSIDE_RECT = -1, + PTLOC_INSIDE = 0, + PTLOC_VERTEX = 1, + PTLOC_ON_EDGE = 2 + }; + + enum + { + NEXT_AROUND_ORG = 0x00, + NEXT_AROUND_DST = 0x22, + PREV_AROUND_ORG = 0x11, + PREV_AROUND_DST = 0x33, + NEXT_AROUND_LEFT = 0x13, + NEXT_AROUND_RIGHT = 0x31, + PREV_AROUND_LEFT = 0x20, + PREV_AROUND_RIGHT = 0x02 + }; + + CV_WRAP Subdiv2D(); + CV_WRAP Subdiv2D(Rect rect); + CV_WRAP void initDelaunay(Rect rect); + + CV_WRAP int insert(Point2f pt); + CV_WRAP void insert(const vector& ptvec); + CV_WRAP int locate(Point2f pt, CV_OUT int& edge, CV_OUT int& vertex); + + CV_WRAP int findNearest(Point2f pt, CV_OUT Point2f* nearestPt=0); + CV_WRAP void getEdgeList(CV_OUT vector& edgeList) const; + CV_WRAP void getTriangleList(CV_OUT vector& triangleList) const; + CV_WRAP void getVoronoiFacetList(const vector& idx, CV_OUT vector >& facetList, + CV_OUT vector& facetCenters); + + CV_WRAP Point2f getVertex(int vertex, CV_OUT int* firstEdge=0) const; + + CV_WRAP int getEdge( int edge, int nextEdgeType ) const; + CV_WRAP int nextEdge(int edge) const; + CV_WRAP int rotateEdge(int edge, int rotate) const; + CV_WRAP int symEdge(int edge) const; + CV_WRAP int edgeOrg(int edge, CV_OUT Point2f* orgpt=0) const; + CV_WRAP int edgeDst(int edge, CV_OUT Point2f* dstpt=0) const; + +protected: + int newEdge(); + void deleteEdge(int edge); + int newPoint(Point2f pt, bool isvirtual, int firstEdge=0); + void deletePoint(int vtx); + void setEdgePoints( int edge, int orgPt, int dstPt ); + void splice( int edgeA, int edgeB ); + int connectEdges( int edgeA, int edgeB ); + void swapEdges( int edge ); + int isRightOf(Point2f pt, int edge) const; + void calcVoronoi(); + void clearVoronoi(); + void checkSubdiv() const; + + struct CV_EXPORTS Vertex + { + Vertex(); + Vertex(Point2f pt, bool _isvirtual, int _firstEdge=0); + bool isvirtual() const; + bool isfree() const; + int firstEdge; + int type; + Point2f pt; + }; + struct CV_EXPORTS QuadEdge + { + QuadEdge(); + QuadEdge(int edgeidx); + bool isfree() const; + int next[4]; + int pt[4]; + }; + + vector vtx; + vector qedges; + int freeQEdge; + int freePoint; + bool validGeometry; + + int recentEdge; + Point2f topLeft; + Point2f bottomRight; +}; + +} + +#endif /* __cplusplus */ + +#endif + +/* End of file. */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc/imgproc_c.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc/imgproc_c.h new file mode 100644 index 0000000..46d9f01 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc/imgproc_c.h @@ -0,0 +1,623 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_IMGPROC_IMGPROC_C_H__ +#define __OPENCV_IMGPROC_IMGPROC_C_H__ + +#include "opencv2/core/core_c.h" +#include "opencv2/imgproc/types_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/*********************** Background statistics accumulation *****************************/ + +/* Adds image to accumulator */ +CVAPI(void) cvAcc( const CvArr* image, CvArr* sum, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Adds squared image to accumulator */ +CVAPI(void) cvSquareAcc( const CvArr* image, CvArr* sqsum, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Adds a product of two images to accumulator */ +CVAPI(void) cvMultiplyAcc( const CvArr* image1, const CvArr* image2, CvArr* acc, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Adds image to accumulator with weights: acc = acc*(1-alpha) + image*alpha */ +CVAPI(void) cvRunningAvg( const CvArr* image, CvArr* acc, double alpha, + const CvArr* mask CV_DEFAULT(NULL) ); + +/****************************************************************************************\ +* Image Processing * +\****************************************************************************************/ + +/* Copies source 2D array inside of the larger destination array and + makes a border of the specified type (IPL_BORDER_*) around the copied area. */ +CVAPI(void) cvCopyMakeBorder( const CvArr* src, CvArr* dst, CvPoint offset, + int bordertype, CvScalar value CV_DEFAULT(cvScalarAll(0))); + +/* Smoothes array (removes noise) */ +CVAPI(void) cvSmooth( const CvArr* src, CvArr* dst, + int smoothtype CV_DEFAULT(CV_GAUSSIAN), + int size1 CV_DEFAULT(3), + int size2 CV_DEFAULT(0), + double sigma1 CV_DEFAULT(0), + double sigma2 CV_DEFAULT(0)); + +/* Convolves the image with the kernel */ +CVAPI(void) cvFilter2D( const CvArr* src, CvArr* dst, const CvMat* kernel, + CvPoint anchor CV_DEFAULT(cvPoint(-1,-1))); + +/* Finds integral image: SUM(X,Y) = sum(x. + After that sum of histogram bins is equal to */ +CVAPI(void) cvNormalizeHist( CvHistogram* hist, double factor ); + + +/* Clear all histogram bins that are below the threshold */ +CVAPI(void) cvThreshHist( CvHistogram* hist, double threshold ); + + +/* Compares two histogram */ +CVAPI(double) cvCompareHist( const CvHistogram* hist1, + const CvHistogram* hist2, + int method); + +/* Copies one histogram to another. Destination histogram is created if + the destination pointer is NULL */ +CVAPI(void) cvCopyHist( const CvHistogram* src, CvHistogram** dst ); + + +/* Calculates bayesian probabilistic histograms + (each or src and dst is an array of histograms */ +CVAPI(void) cvCalcBayesianProb( CvHistogram** src, int number, + CvHistogram** dst); + +/* Calculates array histogram */ +CVAPI(void) cvCalcArrHist( CvArr** arr, CvHistogram* hist, + int accumulate CV_DEFAULT(0), + const CvArr* mask CV_DEFAULT(NULL) ); + +CV_INLINE void cvCalcHist( IplImage** image, CvHistogram* hist, + int accumulate CV_DEFAULT(0), + const CvArr* mask CV_DEFAULT(NULL) ) +{ + cvCalcArrHist( (CvArr**)image, hist, accumulate, mask ); +} + +/* Calculates back project */ +CVAPI(void) cvCalcArrBackProject( CvArr** image, CvArr* dst, + const CvHistogram* hist ); +#define cvCalcBackProject(image, dst, hist) cvCalcArrBackProject((CvArr**)image, dst, hist) + + +/* Does some sort of template matching but compares histograms of + template and each window location */ +CVAPI(void) cvCalcArrBackProjectPatch( CvArr** image, CvArr* dst, CvSize range, + CvHistogram* hist, int method, + double factor ); +#define cvCalcBackProjectPatch( image, dst, range, hist, method, factor ) \ + cvCalcArrBackProjectPatch( (CvArr**)image, dst, range, hist, method, factor ) + + +/* calculates probabilistic density (divides one histogram by another) */ +CVAPI(void) cvCalcProbDensity( const CvHistogram* hist1, const CvHistogram* hist2, + CvHistogram* dst_hist, double scale CV_DEFAULT(255) ); + +/* equalizes histogram of 8-bit single-channel image */ +CVAPI(void) cvEqualizeHist( const CvArr* src, CvArr* dst ); + + +/* Applies distance transform to binary image */ +CVAPI(void) cvDistTransform( const CvArr* src, CvArr* dst, + int distance_type CV_DEFAULT(CV_DIST_L2), + int mask_size CV_DEFAULT(3), + const float* mask CV_DEFAULT(NULL), + CvArr* labels CV_DEFAULT(NULL), + int labelType CV_DEFAULT(CV_DIST_LABEL_CCOMP)); + + +/* Applies fixed-level threshold to grayscale image. + This is a basic operation applied before retrieving contours */ +CVAPI(double) cvThreshold( const CvArr* src, CvArr* dst, + double threshold, double max_value, + int threshold_type ); + +/* Applies adaptive threshold to grayscale image. + The two parameters for methods CV_ADAPTIVE_THRESH_MEAN_C and + CV_ADAPTIVE_THRESH_GAUSSIAN_C are: + neighborhood size (3, 5, 7 etc.), + and a constant subtracted from mean (...,-3,-2,-1,0,1,2,3,...) */ +CVAPI(void) cvAdaptiveThreshold( const CvArr* src, CvArr* dst, double max_value, + int adaptive_method CV_DEFAULT(CV_ADAPTIVE_THRESH_MEAN_C), + int threshold_type CV_DEFAULT(CV_THRESH_BINARY), + int block_size CV_DEFAULT(3), + double param1 CV_DEFAULT(5)); + +/* Fills the connected component until the color difference gets large enough */ +CVAPI(void) cvFloodFill( CvArr* image, CvPoint seed_point, + CvScalar new_val, CvScalar lo_diff CV_DEFAULT(cvScalarAll(0)), + CvScalar up_diff CV_DEFAULT(cvScalarAll(0)), + CvConnectedComp* comp CV_DEFAULT(NULL), + int flags CV_DEFAULT(4), + CvArr* mask CV_DEFAULT(NULL)); + +/****************************************************************************************\ +* Feature detection * +\****************************************************************************************/ + +/* Runs canny edge detector */ +CVAPI(void) cvCanny( const CvArr* image, CvArr* edges, double threshold1, + double threshold2, int aperture_size CV_DEFAULT(3) ); + +/* Calculates constraint image for corner detection + Dx^2 * Dyy + Dxx * Dy^2 - 2 * Dx * Dy * Dxy. + Applying threshold to the result gives coordinates of corners */ +CVAPI(void) cvPreCornerDetect( const CvArr* image, CvArr* corners, + int aperture_size CV_DEFAULT(3) ); + +/* Calculates eigen values and vectors of 2x2 + gradient covariation matrix at every image pixel */ +CVAPI(void) cvCornerEigenValsAndVecs( const CvArr* image, CvArr* eigenvv, + int block_size, int aperture_size CV_DEFAULT(3) ); + +/* Calculates minimal eigenvalue for 2x2 gradient covariation matrix at + every image pixel */ +CVAPI(void) cvCornerMinEigenVal( const CvArr* image, CvArr* eigenval, + int block_size, int aperture_size CV_DEFAULT(3) ); + +/* Harris corner detector: + Calculates det(M) - k*(trace(M)^2), where M is 2x2 gradient covariation matrix for each pixel */ +CVAPI(void) cvCornerHarris( const CvArr* image, CvArr* harris_response, + int block_size, int aperture_size CV_DEFAULT(3), + double k CV_DEFAULT(0.04) ); + +/* Adjust corner position using some sort of gradient search */ +CVAPI(void) cvFindCornerSubPix( const CvArr* image, CvPoint2D32f* corners, + int count, CvSize win, CvSize zero_zone, + CvTermCriteria criteria ); + +/* Finds a sparse set of points within the selected region + that seem to be easy to track */ +CVAPI(void) cvGoodFeaturesToTrack( const CvArr* image, CvArr* eig_image, + CvArr* temp_image, CvPoint2D32f* corners, + int* corner_count, double quality_level, + double min_distance, + const CvArr* mask CV_DEFAULT(NULL), + int block_size CV_DEFAULT(3), + int use_harris CV_DEFAULT(0), + double k CV_DEFAULT(0.04) ); + +/* Finds lines on binary image using one of several methods. + line_storage is either memory storage or 1 x CvMat, its + number of columns is changed by the function. + method is one of CV_HOUGH_*; + rho, theta and threshold are used for each of those methods; + param1 ~ line length, param2 ~ line gap - for probabilistic, + param1 ~ srn, param2 ~ stn - for multi-scale */ +CVAPI(CvSeq*) cvHoughLines2( CvArr* image, void* line_storage, int method, + double rho, double theta, int threshold, + double param1 CV_DEFAULT(0), double param2 CV_DEFAULT(0)); + +/* Finds circles in the image */ +CVAPI(CvSeq*) cvHoughCircles( CvArr* image, void* circle_storage, + int method, double dp, double min_dist, + double param1 CV_DEFAULT(100), + double param2 CV_DEFAULT(100), + int min_radius CV_DEFAULT(0), + int max_radius CV_DEFAULT(0)); + +/* Fits a line into set of 2d or 3d points in a robust way (M-estimator technique) */ +CVAPI(void) cvFitLine( const CvArr* points, int dist_type, double param, + double reps, double aeps, float* line ); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc/types_c.h b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc/types_c.h new file mode 100644 index 0000000..4aba0a8 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc/types_c.h @@ -0,0 +1,640 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_IMGPROC_TYPES_C_H__ +#define __OPENCV_IMGPROC_TYPES_C_H__ + +#include "opencv2/core/core_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Connected component structure */ +typedef struct CvConnectedComp +{ + double area; /* area of the connected component */ + CvScalar value; /* average color of the connected component */ + CvRect rect; /* ROI of the component */ + CvSeq* contour; /* optional component boundary + (the contour might have child contours corresponding to the holes)*/ +} +CvConnectedComp; + +/* Image smooth methods */ +enum +{ + CV_BLUR_NO_SCALE =0, + CV_BLUR =1, + CV_GAUSSIAN =2, + CV_MEDIAN =3, + CV_BILATERAL =4 +}; + +/* Filters used in pyramid decomposition */ +enum +{ + CV_GAUSSIAN_5x5 = 7 +}; + +/* Special filters */ +enum +{ + CV_SCHARR =-1, + CV_MAX_SOBEL_KSIZE =7 +}; + +/* Constants for color conversion */ +enum +{ + CV_BGR2BGRA =0, + CV_RGB2RGBA =CV_BGR2BGRA, + + CV_BGRA2BGR =1, + CV_RGBA2RGB =CV_BGRA2BGR, + + CV_BGR2RGBA =2, + CV_RGB2BGRA =CV_BGR2RGBA, + + CV_RGBA2BGR =3, + CV_BGRA2RGB =CV_RGBA2BGR, + + CV_BGR2RGB =4, + CV_RGB2BGR =CV_BGR2RGB, + + CV_BGRA2RGBA =5, + CV_RGBA2BGRA =CV_BGRA2RGBA, + + CV_BGR2GRAY =6, + CV_RGB2GRAY =7, + CV_GRAY2BGR =8, + CV_GRAY2RGB =CV_GRAY2BGR, + CV_GRAY2BGRA =9, + CV_GRAY2RGBA =CV_GRAY2BGRA, + CV_BGRA2GRAY =10, + CV_RGBA2GRAY =11, + + CV_BGR2BGR565 =12, + CV_RGB2BGR565 =13, + CV_BGR5652BGR =14, + CV_BGR5652RGB =15, + CV_BGRA2BGR565 =16, + CV_RGBA2BGR565 =17, + CV_BGR5652BGRA =18, + CV_BGR5652RGBA =19, + + CV_GRAY2BGR565 =20, + CV_BGR5652GRAY =21, + + CV_BGR2BGR555 =22, + CV_RGB2BGR555 =23, + CV_BGR5552BGR =24, + CV_BGR5552RGB =25, + CV_BGRA2BGR555 =26, + CV_RGBA2BGR555 =27, + CV_BGR5552BGRA =28, + CV_BGR5552RGBA =29, + + CV_GRAY2BGR555 =30, + CV_BGR5552GRAY =31, + + CV_BGR2XYZ =32, + CV_RGB2XYZ =33, + CV_XYZ2BGR =34, + CV_XYZ2RGB =35, + + CV_BGR2YCrCb =36, + CV_RGB2YCrCb =37, + CV_YCrCb2BGR =38, + CV_YCrCb2RGB =39, + + CV_BGR2HSV =40, + CV_RGB2HSV =41, + + CV_BGR2Lab =44, + CV_RGB2Lab =45, + + CV_BayerBG2BGR =46, + CV_BayerGB2BGR =47, + CV_BayerRG2BGR =48, + CV_BayerGR2BGR =49, + + CV_BayerBG2RGB =CV_BayerRG2BGR, + CV_BayerGB2RGB =CV_BayerGR2BGR, + CV_BayerRG2RGB =CV_BayerBG2BGR, + CV_BayerGR2RGB =CV_BayerGB2BGR, + + CV_BGR2Luv =50, + CV_RGB2Luv =51, + CV_BGR2HLS =52, + CV_RGB2HLS =53, + + CV_HSV2BGR =54, + CV_HSV2RGB =55, + + CV_Lab2BGR =56, + CV_Lab2RGB =57, + CV_Luv2BGR =58, + CV_Luv2RGB =59, + CV_HLS2BGR =60, + CV_HLS2RGB =61, + + CV_BayerBG2BGR_VNG =62, + CV_BayerGB2BGR_VNG =63, + CV_BayerRG2BGR_VNG =64, + CV_BayerGR2BGR_VNG =65, + + CV_BayerBG2RGB_VNG =CV_BayerRG2BGR_VNG, + CV_BayerGB2RGB_VNG =CV_BayerGR2BGR_VNG, + CV_BayerRG2RGB_VNG =CV_BayerBG2BGR_VNG, + CV_BayerGR2RGB_VNG =CV_BayerGB2BGR_VNG, + + CV_BGR2HSV_FULL = 66, + CV_RGB2HSV_FULL = 67, + CV_BGR2HLS_FULL = 68, + CV_RGB2HLS_FULL = 69, + + CV_HSV2BGR_FULL = 70, + CV_HSV2RGB_FULL = 71, + CV_HLS2BGR_FULL = 72, + CV_HLS2RGB_FULL = 73, + + CV_LBGR2Lab = 74, + CV_LRGB2Lab = 75, + CV_LBGR2Luv = 76, + CV_LRGB2Luv = 77, + + CV_Lab2LBGR = 78, + CV_Lab2LRGB = 79, + CV_Luv2LBGR = 80, + CV_Luv2LRGB = 81, + + CV_BGR2YUV = 82, + CV_RGB2YUV = 83, + CV_YUV2BGR = 84, + CV_YUV2RGB = 85, + + CV_BayerBG2GRAY = 86, + CV_BayerGB2GRAY = 87, + CV_BayerRG2GRAY = 88, + CV_BayerGR2GRAY = 89, + + //YUV 4:2:0 formats family + CV_YUV2RGB_NV12 = 90, + CV_YUV2BGR_NV12 = 91, + CV_YUV2RGB_NV21 = 92, + CV_YUV2BGR_NV21 = 93, + CV_YUV420sp2RGB = CV_YUV2RGB_NV21, + CV_YUV420sp2BGR = CV_YUV2BGR_NV21, + + CV_YUV2RGBA_NV12 = 94, + CV_YUV2BGRA_NV12 = 95, + CV_YUV2RGBA_NV21 = 96, + CV_YUV2BGRA_NV21 = 97, + CV_YUV420sp2RGBA = CV_YUV2RGBA_NV21, + CV_YUV420sp2BGRA = CV_YUV2BGRA_NV21, + + CV_YUV2RGB_YV12 = 98, + CV_YUV2BGR_YV12 = 99, + CV_YUV2RGB_IYUV = 100, + CV_YUV2BGR_IYUV = 101, + CV_YUV2RGB_I420 = CV_YUV2RGB_IYUV, + CV_YUV2BGR_I420 = CV_YUV2BGR_IYUV, + CV_YUV420p2RGB = CV_YUV2RGB_YV12, + CV_YUV420p2BGR = CV_YUV2BGR_YV12, + + CV_YUV2RGBA_YV12 = 102, + CV_YUV2BGRA_YV12 = 103, + CV_YUV2RGBA_IYUV = 104, + CV_YUV2BGRA_IYUV = 105, + CV_YUV2RGBA_I420 = CV_YUV2RGBA_IYUV, + CV_YUV2BGRA_I420 = CV_YUV2BGRA_IYUV, + CV_YUV420p2RGBA = CV_YUV2RGBA_YV12, + CV_YUV420p2BGRA = CV_YUV2BGRA_YV12, + + CV_YUV2GRAY_420 = 106, + CV_YUV2GRAY_NV21 = CV_YUV2GRAY_420, + CV_YUV2GRAY_NV12 = CV_YUV2GRAY_420, + CV_YUV2GRAY_YV12 = CV_YUV2GRAY_420, + CV_YUV2GRAY_IYUV = CV_YUV2GRAY_420, + CV_YUV2GRAY_I420 = CV_YUV2GRAY_420, + CV_YUV420sp2GRAY = CV_YUV2GRAY_420, + CV_YUV420p2GRAY = CV_YUV2GRAY_420, + + //YUV 4:2:2 formats family + CV_YUV2RGB_UYVY = 107, + CV_YUV2BGR_UYVY = 108, + //CV_YUV2RGB_VYUY = 109, + //CV_YUV2BGR_VYUY = 110, + CV_YUV2RGB_Y422 = CV_YUV2RGB_UYVY, + CV_YUV2BGR_Y422 = CV_YUV2BGR_UYVY, + CV_YUV2RGB_UYNV = CV_YUV2RGB_UYVY, + CV_YUV2BGR_UYNV = CV_YUV2BGR_UYVY, + + CV_YUV2RGBA_UYVY = 111, + CV_YUV2BGRA_UYVY = 112, + //CV_YUV2RGBA_VYUY = 113, + //CV_YUV2BGRA_VYUY = 114, + CV_YUV2RGBA_Y422 = CV_YUV2RGBA_UYVY, + CV_YUV2BGRA_Y422 = CV_YUV2BGRA_UYVY, + CV_YUV2RGBA_UYNV = CV_YUV2RGBA_UYVY, + CV_YUV2BGRA_UYNV = CV_YUV2BGRA_UYVY, + + CV_YUV2RGB_YUY2 = 115, + CV_YUV2BGR_YUY2 = 116, + CV_YUV2RGB_YVYU = 117, + CV_YUV2BGR_YVYU = 118, + CV_YUV2RGB_YUYV = CV_YUV2RGB_YUY2, + CV_YUV2BGR_YUYV = CV_YUV2BGR_YUY2, + CV_YUV2RGB_YUNV = CV_YUV2RGB_YUY2, + CV_YUV2BGR_YUNV = CV_YUV2BGR_YUY2, + + CV_YUV2RGBA_YUY2 = 119, + CV_YUV2BGRA_YUY2 = 120, + CV_YUV2RGBA_YVYU = 121, + CV_YUV2BGRA_YVYU = 122, + CV_YUV2RGBA_YUYV = CV_YUV2RGBA_YUY2, + CV_YUV2BGRA_YUYV = CV_YUV2BGRA_YUY2, + CV_YUV2RGBA_YUNV = CV_YUV2RGBA_YUY2, + CV_YUV2BGRA_YUNV = CV_YUV2BGRA_YUY2, + + CV_YUV2GRAY_UYVY = 123, + CV_YUV2GRAY_YUY2 = 124, + //CV_YUV2GRAY_VYUY = CV_YUV2GRAY_UYVY, + CV_YUV2GRAY_Y422 = CV_YUV2GRAY_UYVY, + CV_YUV2GRAY_UYNV = CV_YUV2GRAY_UYVY, + CV_YUV2GRAY_YVYU = CV_YUV2GRAY_YUY2, + CV_YUV2GRAY_YUYV = CV_YUV2GRAY_YUY2, + CV_YUV2GRAY_YUNV = CV_YUV2GRAY_YUY2, + + // alpha premultiplication + CV_RGBA2mRGBA = 125, + CV_mRGBA2RGBA = 126, + + CV_RGB2YUV_I420 = 127, + CV_BGR2YUV_I420 = 128, + CV_RGB2YUV_IYUV = CV_RGB2YUV_I420, + CV_BGR2YUV_IYUV = CV_BGR2YUV_I420, + + CV_RGBA2YUV_I420 = 129, + CV_BGRA2YUV_I420 = 130, + CV_RGBA2YUV_IYUV = CV_RGBA2YUV_I420, + CV_BGRA2YUV_IYUV = CV_BGRA2YUV_I420, + CV_RGB2YUV_YV12 = 131, + CV_BGR2YUV_YV12 = 132, + CV_RGBA2YUV_YV12 = 133, + CV_BGRA2YUV_YV12 = 134, + + CV_COLORCVT_MAX = 135 +}; + + +/* Sub-pixel interpolation methods */ +enum +{ + CV_INTER_NN =0, + CV_INTER_LINEAR =1, + CV_INTER_CUBIC =2, + CV_INTER_AREA =3, + CV_INTER_LANCZOS4 =4 +}; + +/* ... and other image warping flags */ +enum +{ + CV_WARP_FILL_OUTLIERS =8, + CV_WARP_INVERSE_MAP =16 +}; + +/* Shapes of a structuring element for morphological operations */ +enum +{ + CV_SHAPE_RECT =0, + CV_SHAPE_CROSS =1, + CV_SHAPE_ELLIPSE =2, + CV_SHAPE_CUSTOM =100 +}; + +/* Morphological operations */ +enum +{ + CV_MOP_ERODE =0, + CV_MOP_DILATE =1, + CV_MOP_OPEN =2, + CV_MOP_CLOSE =3, + CV_MOP_GRADIENT =4, + CV_MOP_TOPHAT =5, + CV_MOP_BLACKHAT =6 +}; + +/* Spatial and central moments */ +typedef struct CvMoments +{ + double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03; /* spatial moments */ + double mu20, mu11, mu02, mu30, mu21, mu12, mu03; /* central moments */ + double inv_sqrt_m00; /* m00 != 0 ? 1/sqrt(m00) : 0 */ +} +CvMoments; + +/* Hu invariants */ +typedef struct CvHuMoments +{ + double hu1, hu2, hu3, hu4, hu5, hu6, hu7; /* Hu invariants */ +} +CvHuMoments; + +/* Template matching methods */ +enum +{ + CV_TM_SQDIFF =0, + CV_TM_SQDIFF_NORMED =1, + CV_TM_CCORR =2, + CV_TM_CCORR_NORMED =3, + CV_TM_CCOEFF =4, + CV_TM_CCOEFF_NORMED =5 +}; + +typedef float (CV_CDECL * CvDistanceFunction)( const float* a, const float* b, void* user_param ); + +/* Contour retrieval modes */ +enum +{ + CV_RETR_EXTERNAL=0, + CV_RETR_LIST=1, + CV_RETR_CCOMP=2, + CV_RETR_TREE=3, + CV_RETR_FLOODFILL=4 +}; + +/* Contour approximation methods */ +enum +{ + CV_CHAIN_CODE=0, + CV_CHAIN_APPROX_NONE=1, + CV_CHAIN_APPROX_SIMPLE=2, + CV_CHAIN_APPROX_TC89_L1=3, + CV_CHAIN_APPROX_TC89_KCOS=4, + CV_LINK_RUNS=5 +}; + +/* +Internal structure that is used for sequental retrieving contours from the image. +It supports both hierarchical and plane variants of Suzuki algorithm. +*/ +typedef struct _CvContourScanner* CvContourScanner; + +/* Freeman chain reader state */ +typedef struct CvChainPtReader +{ + CV_SEQ_READER_FIELDS() + char code; + CvPoint pt; + schar deltas[8][2]; +} +CvChainPtReader; + +/* initializes 8-element array for fast access to 3x3 neighborhood of a pixel */ +#define CV_INIT_3X3_DELTAS( deltas, step, nch ) \ + ((deltas)[0] = (nch), (deltas)[1] = -(step) + (nch), \ + (deltas)[2] = -(step), (deltas)[3] = -(step) - (nch), \ + (deltas)[4] = -(nch), (deltas)[5] = (step) - (nch), \ + (deltas)[6] = (step), (deltas)[7] = (step) + (nch)) + + +/****************************************************************************************\ +* Planar subdivisions * +\****************************************************************************************/ + +typedef size_t CvSubdiv2DEdge; + +#define CV_QUADEDGE2D_FIELDS() \ + int flags; \ + struct CvSubdiv2DPoint* pt[4]; \ + CvSubdiv2DEdge next[4]; + +#define CV_SUBDIV2D_POINT_FIELDS()\ + int flags; \ + CvSubdiv2DEdge first; \ + CvPoint2D32f pt; \ + int id; + +#define CV_SUBDIV2D_VIRTUAL_POINT_FLAG (1 << 30) + +typedef struct CvQuadEdge2D +{ + CV_QUADEDGE2D_FIELDS() +} +CvQuadEdge2D; + +typedef struct CvSubdiv2DPoint +{ + CV_SUBDIV2D_POINT_FIELDS() +} +CvSubdiv2DPoint; + +#define CV_SUBDIV2D_FIELDS() \ + CV_GRAPH_FIELDS() \ + int quad_edges; \ + int is_geometry_valid; \ + CvSubdiv2DEdge recent_edge; \ + CvPoint2D32f topleft; \ + CvPoint2D32f bottomright; + +typedef struct CvSubdiv2D +{ + CV_SUBDIV2D_FIELDS() +} +CvSubdiv2D; + + +typedef enum CvSubdiv2DPointLocation +{ + CV_PTLOC_ERROR = -2, + CV_PTLOC_OUTSIDE_RECT = -1, + CV_PTLOC_INSIDE = 0, + CV_PTLOC_VERTEX = 1, + CV_PTLOC_ON_EDGE = 2 +} +CvSubdiv2DPointLocation; + +typedef enum CvNextEdgeType +{ + CV_NEXT_AROUND_ORG = 0x00, + CV_NEXT_AROUND_DST = 0x22, + CV_PREV_AROUND_ORG = 0x11, + CV_PREV_AROUND_DST = 0x33, + CV_NEXT_AROUND_LEFT = 0x13, + CV_NEXT_AROUND_RIGHT = 0x31, + CV_PREV_AROUND_LEFT = 0x20, + CV_PREV_AROUND_RIGHT = 0x02 +} +CvNextEdgeType; + +/* get the next edge with the same origin point (counterwise) */ +#define CV_SUBDIV2D_NEXT_EDGE( edge ) (((CvQuadEdge2D*)((edge) & ~3))->next[(edge)&3]) + + +/* Contour approximation algorithms */ +enum +{ + CV_POLY_APPROX_DP = 0 +}; + +/* Shape matching methods */ +enum +{ + CV_CONTOURS_MATCH_I1 =1, + CV_CONTOURS_MATCH_I2 =2, + CV_CONTOURS_MATCH_I3 =3 +}; + +/* Shape orientation */ +enum +{ + CV_CLOCKWISE =1, + CV_COUNTER_CLOCKWISE =2 +}; + + +/* Convexity defect */ +typedef struct CvConvexityDefect +{ + CvPoint* start; /* point of the contour where the defect begins */ + CvPoint* end; /* point of the contour where the defect ends */ + CvPoint* depth_point; /* the farthest from the convex hull point within the defect */ + float depth; /* distance between the farthest point and the convex hull */ +} CvConvexityDefect; + + +/* Histogram comparison methods */ +enum +{ + CV_COMP_CORREL =0, + CV_COMP_CHISQR =1, + CV_COMP_INTERSECT =2, + CV_COMP_BHATTACHARYYA =3, + CV_COMP_HELLINGER =CV_COMP_BHATTACHARYYA +}; + +/* Mask size for distance transform */ +enum +{ + CV_DIST_MASK_3 =3, + CV_DIST_MASK_5 =5, + CV_DIST_MASK_PRECISE =0 +}; + +/* Content of output label array: connected components or pixels */ +enum +{ + CV_DIST_LABEL_CCOMP = 0, + CV_DIST_LABEL_PIXEL = 1 +}; + +/* Distance types for Distance Transform and M-estimators */ +enum +{ + CV_DIST_USER =-1, /* User defined distance */ + CV_DIST_L1 =1, /* distance = |x1-x2| + |y1-y2| */ + CV_DIST_L2 =2, /* the simple euclidean distance */ + CV_DIST_C =3, /* distance = max(|x1-x2|,|y1-y2|) */ + CV_DIST_L12 =4, /* L1-L2 metric: distance = 2(sqrt(1+x*x/2) - 1)) */ + CV_DIST_FAIR =5, /* distance = c^2(|x|/c-log(1+|x|/c)), c = 1.3998 */ + CV_DIST_WELSCH =6, /* distance = c^2/2(1-exp(-(x/c)^2)), c = 2.9846 */ + CV_DIST_HUBER =7 /* distance = |x| threshold ? max_value : 0 */ + CV_THRESH_BINARY_INV =1, /* value = value > threshold ? 0 : max_value */ + CV_THRESH_TRUNC =2, /* value = value > threshold ? threshold : value */ + CV_THRESH_TOZERO =3, /* value = value > threshold ? value : 0 */ + CV_THRESH_TOZERO_INV =4, /* value = value > threshold ? 0 : value */ + CV_THRESH_MASK =7, + CV_THRESH_OTSU =8 /* use Otsu algorithm to choose the optimal threshold value; + combine the flag with one of the above CV_THRESH_* values */ +}; + +/* Adaptive threshold methods */ +enum +{ + CV_ADAPTIVE_THRESH_MEAN_C =0, + CV_ADAPTIVE_THRESH_GAUSSIAN_C =1 +}; + +/* FloodFill flags */ +enum +{ + CV_FLOODFILL_FIXED_RANGE =(1 << 16), + CV_FLOODFILL_MASK_ONLY =(1 << 17) +}; + + +/* Canny edge detector flags */ +enum +{ + CV_CANNY_L2_GRADIENT =(1 << 31) +}; + +/* Variants of a Hough transform */ +enum +{ + CV_HOUGH_STANDARD =0, + CV_HOUGH_PROBABILISTIC =1, + CV_HOUGH_MULTI_SCALE =2, + CV_HOUGH_GRADIENT =3 +}; + + +/* Fast search data structures */ +struct CvFeatureTree; +struct CvLSH; +struct CvLSHOperations; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/blobtrack.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/blobtrack.hpp new file mode 100644 index 0000000..496b8be --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/blobtrack.hpp @@ -0,0 +1,948 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + + +#ifndef __OPENCV_VIDEOSURVEILLANCE_H__ +#define __OPENCV_VIDEOSURVEILLANCE_H__ + +/* Turn off the functionality until cvaux/src/Makefile.am gets updated: */ +//#if _MSC_VER >= 1200 + +#include "opencv2/core/core_c.h" +#include + +#if (defined _MSC_VER && _MSC_VER >= 1200) || defined __BORLANDC__ +#define cv_stricmp stricmp +#define cv_strnicmp strnicmp +#if defined WINCE +#define strdup _strdup +#define stricmp _stricmp +#endif +#elif defined __GNUC__ || defined __sun +#define cv_stricmp strcasecmp +#define cv_strnicmp strncasecmp +#else +#error Do not know how to make case-insensitive string comparison on this platform +#endif + +//struct DefParam; +struct CvDefParam +{ + struct CvDefParam* next; + char* pName; + char* pComment; + double* pDouble; + double Double; + float* pFloat; + float Float; + int* pInt; + int Int; + char** pStr; + char* Str; +}; + +class CV_EXPORTS CvVSModule +{ +private: /* Internal data: */ + CvDefParam* m_pParamList; + char* m_pModuleTypeName; + char* m_pModuleName; + char* m_pNickName; +protected: + int m_Wnd; +public: /* Constructor and destructor: */ + CvVSModule(); + virtual ~CvVSModule(); +private: /* Internal functions: */ + void FreeParam(CvDefParam** pp); + CvDefParam* NewParam(const char* name); + CvDefParam* GetParamPtr(int index); + CvDefParam* GetParamPtr(const char* name); +protected: /* INTERNAL INTERFACE */ + int IsParam(const char* name); + void AddParam(const char* name, double* pAddr); + void AddParam(const char* name, float* pAddr); + void AddParam(const char* name, int* pAddr); + void AddParam(const char* name, const char** pAddr); + void AddParam(const char* name); + void CommentParam(const char* name, const char* pComment); + void SetTypeName(const char* name); + void SetModuleName(const char* name); + void DelParam(const char* name); + +public: /* EXTERNAL INTERFACE */ + const char* GetParamName(int index); + const char* GetParamComment(const char* name); + double GetParam(const char* name); + const char* GetParamStr(const char* name); + void SetParam(const char* name, double val); + void SetParamStr(const char* name, const char* str); + void TransferParamsFromChild(CvVSModule* pM, const char* prefix = NULL); + void TransferParamsToChild(CvVSModule* pM, char* prefix = NULL); + virtual void ParamUpdate(); + const char* GetTypeName(); + int IsModuleTypeName(const char* name); + char* GetModuleName(); + int IsModuleName(const char* name); + void SetNickName(const char* pStr); + const char* GetNickName(); + virtual void SaveState(CvFileStorage*); + virtual void LoadState(CvFileStorage*, CvFileNode*); + + virtual void Release() = 0; +};/* CvVMModule */ + +CV_EXPORTS void cvWriteStruct(CvFileStorage* fs, const char* name, void* addr, const char* desc, int num=1); +CV_EXPORTS void cvReadStructByName(CvFileStorage* fs, CvFileNode* node, const char* name, void* addr, const char* desc); + +/* FOREGROUND DETECTOR INTERFACE */ +class CV_EXPORTS CvFGDetector : public CvVSModule +{ +public: + CvFGDetector(); + virtual IplImage* GetMask() = 0; + /* Process current image: */ + virtual void Process(IplImage* pImg) = 0; + /* Release foreground detector: */ + virtual void Release() = 0; +}; + +CV_EXPORTS void cvReleaseFGDetector(CvFGDetector** ppT ); +CV_EXPORTS CvFGDetector* cvCreateFGDetectorBase(int type, void *param); + + +/* BLOB STRUCTURE*/ +struct CvBlob +{ + float x,y; /* blob position */ + float w,h; /* blob sizes */ + int ID; /* blob ID */ +}; + +inline CvBlob cvBlob(float x,float y, float w, float h) +{ + CvBlob B = {x,y,w,h,0}; + return B; +} +#define CV_BLOB_MINW 5 +#define CV_BLOB_MINH 5 +#define CV_BLOB_ID(pB) (((CvBlob*)(pB))->ID) +#define CV_BLOB_CENTER(pB) cvPoint2D32f(((CvBlob*)(pB))->x,((CvBlob*)(pB))->y) +#define CV_BLOB_X(pB) (((CvBlob*)(pB))->x) +#define CV_BLOB_Y(pB) (((CvBlob*)(pB))->y) +#define CV_BLOB_WX(pB) (((CvBlob*)(pB))->w) +#define CV_BLOB_WY(pB) (((CvBlob*)(pB))->h) +#define CV_BLOB_RX(pB) (0.5f*CV_BLOB_WX(pB)) +#define CV_BLOB_RY(pB) (0.5f*CV_BLOB_WY(pB)) +#define CV_BLOB_RECT(pB) cvRect(cvRound(((CvBlob*)(pB))->x-CV_BLOB_RX(pB)),cvRound(((CvBlob*)(pB))->y-CV_BLOB_RY(pB)),cvRound(CV_BLOB_WX(pB)),cvRound(CV_BLOB_WY(pB))) +/* END BLOB STRUCTURE*/ + + +/* simple BLOBLIST */ +class CV_EXPORTS CvBlobSeq +{ +public: + CvBlobSeq(int BlobSize = sizeof(CvBlob)) + { + m_pMem = cvCreateMemStorage(); + m_pSeq = cvCreateSeq(0,sizeof(CvSeq),BlobSize,m_pMem); + strcpy(m_pElemFormat,"ffffi"); + } + virtual ~CvBlobSeq() + { + cvReleaseMemStorage(&m_pMem); + }; + virtual CvBlob* GetBlob(int BlobIndex) + { + return (CvBlob*)cvGetSeqElem(m_pSeq,BlobIndex); + }; + virtual CvBlob* GetBlobByID(int BlobID) + { + int i; + for(i=0; itotal; ++i) + if(BlobID == CV_BLOB_ID(GetBlob(i))) + return GetBlob(i); + return NULL; + }; + virtual void DelBlob(int BlobIndex) + { + cvSeqRemove(m_pSeq,BlobIndex); + }; + virtual void DelBlobByID(int BlobID) + { + int i; + for(i=0; itotal; ++i) + { + if(BlobID == CV_BLOB_ID(GetBlob(i))) + { + DelBlob(i); + return; + } + } + }; + virtual void Clear() + { + cvClearSeq(m_pSeq); + }; + virtual void AddBlob(CvBlob* pB) + { + cvSeqPush(m_pSeq,pB); + }; + virtual int GetBlobNum() + { + return m_pSeq->total; + }; + virtual void Write(CvFileStorage* fs, const char* name) + { + const char* attr[] = {"dt",m_pElemFormat,NULL}; + if(fs) + { + cvWrite(fs,name,m_pSeq,cvAttrList(attr,NULL)); + } + } + virtual void Load(CvFileStorage* fs, CvFileNode* node) + { + if(fs==NULL) return; + CvSeq* pSeq = (CvSeq*)cvRead(fs, node); + if(pSeq) + { + int i; + cvClearSeq(m_pSeq); + for(i=0;itotal;++i) + { + void* pB = cvGetSeqElem( pSeq, i ); + cvSeqPush( m_pSeq, pB ); + } + } + } + void AddFormat(const char* str){strcat(m_pElemFormat,str);} +protected: + CvMemStorage* m_pMem; + CvSeq* m_pSeq; + char m_pElemFormat[1024]; +}; +/* simple BLOBLIST */ + + +/* simple TRACKLIST */ +struct CvBlobTrack +{ + int TrackID; + int StartFrame; + CvBlobSeq* pBlobSeq; +}; + +class CV_EXPORTS CvBlobTrackSeq +{ +public: + CvBlobTrackSeq(int TrackSize = sizeof(CvBlobTrack)); + virtual ~CvBlobTrackSeq(); + virtual CvBlobTrack* GetBlobTrack(int TrackIndex); + virtual CvBlobTrack* GetBlobTrackByID(int TrackID); + virtual void DelBlobTrack(int TrackIndex); + virtual void DelBlobTrackByID(int TrackID); + virtual void Clear(); + virtual void AddBlobTrack(int TrackID, int StartFrame = 0); + virtual int GetBlobTrackNum(); +protected: + CvMemStorage* m_pMem; + CvSeq* m_pSeq; +}; + +/* simple TRACKLIST */ + + +/* BLOB DETECTOR INTERFACE */ +class CV_EXPORTS CvBlobDetector: public CvVSModule +{ +public: + CvBlobDetector(){SetTypeName("BlobDetector");}; + /* Try to detect new blob entrance based on foreground mask. */ + /* pFGMask - image of foreground mask */ + /* pNewBlob - pointer to CvBlob structure which will be filled if new blob entrance detected */ + /* pOldBlobList - pointer to blob list which already exist on image */ + virtual int DetectNewBlob(IplImage* pImg, IplImage* pImgFG, CvBlobSeq* pNewBlobList, CvBlobSeq* pOldBlobList) = 0; + /* release blob detector */ + virtual void Release()=0; +}; + +/* Release any blob detector: */ +CV_EXPORTS void cvReleaseBlobDetector(CvBlobDetector** ppBD); + +/* Declarations of constructors of implemented modules: */ +CV_EXPORTS CvBlobDetector* cvCreateBlobDetectorSimple(); +CV_EXPORTS CvBlobDetector* cvCreateBlobDetectorCC(); + +struct CV_EXPORTS CvDetectedBlob : public CvBlob +{ + float response; +}; + +CV_INLINE CvDetectedBlob cvDetectedBlob( float x, float y, float w, float h, int ID = 0, float response = 0.0F ) +{ + CvDetectedBlob b; + b.x = x; b.y = y; b.w = w; b.h = h; b.ID = ID; b.response = response; + return b; +} + + +class CV_EXPORTS CvObjectDetector +{ +public: + CvObjectDetector( const char* /*detector_file_name*/ = 0 ); + ~CvObjectDetector(); + + /* + * Release the current detector and load new detector from file + * (if detector_file_name is not 0) + * Return true on success: + */ + bool Load( const char* /*detector_file_name*/ = 0 ); + + /* Return min detector window size: */ + CvSize GetMinWindowSize() const; + + /* Return max border: */ + int GetMaxBorderSize() const; + + /* + * Detect the object on the image and push the detected + * blobs into which must be the sequence of s + */ + void Detect( const CvArr* /*img*/, /* out */ CvBlobSeq* /*detected_blob_seq*/ = 0 ); + +protected: + class CvObjectDetectorImpl* impl; +}; + + +CV_INLINE CvRect cvRectIntersection( const CvRect r1, const CvRect r2 ) +{ + CvRect r = cvRect( MAX(r1.x, r2.x), MAX(r1.y, r2.y), 0, 0 ); + + r.width = MIN(r1.x + r1.width, r2.x + r2.width) - r.x; + r.height = MIN(r1.y + r1.height, r2.y + r2.height) - r.y; + + return r; +} + + +/* + * CvImageDrawer + * + * Draw on an image the specified ROIs from the source image and + * given blobs as ellipses or rectangles: + */ + +struct CvDrawShape +{ + enum {RECT, ELLIPSE} shape; + CvScalar color; +}; + +/*extern const CvDrawShape icv_shape[] = +{ + { CvDrawShape::ELLIPSE, CV_RGB(255,0,0) }, + { CvDrawShape::ELLIPSE, CV_RGB(0,255,0) }, + { CvDrawShape::ELLIPSE, CV_RGB(0,0,255) }, + { CvDrawShape::ELLIPSE, CV_RGB(255,255,0) }, + { CvDrawShape::ELLIPSE, CV_RGB(0,255,255) }, + { CvDrawShape::ELLIPSE, CV_RGB(255,0,255) } +};*/ + +class CV_EXPORTS CvImageDrawer +{ +public: + CvImageDrawer() : m_image(0) {} + ~CvImageDrawer() { cvReleaseImage( &m_image ); } + void SetShapes( const CvDrawShape* shapes, int num ); + /* must be the sequence of s */ + IplImage* Draw( const CvArr* src, CvBlobSeq* blob_seq = 0, const CvSeq* roi_seq = 0 ); + IplImage* GetImage() { return m_image; } +protected: + //static const int MAX_SHAPES = sizeof(icv_shape) / sizeof(icv_shape[0]);; + + IplImage* m_image; + CvDrawShape m_shape[16]; +}; + + + +/* Trajectory generation module: */ +class CV_EXPORTS CvBlobTrackGen: public CvVSModule +{ +public: + CvBlobTrackGen(){SetTypeName("BlobTrackGen");}; + virtual void SetFileName(char* pFileName) = 0; + virtual void AddBlob(CvBlob* pBlob) = 0; + virtual void Process(IplImage* pImg = NULL, IplImage* pFG = NULL) = 0; + virtual void Release() = 0; +}; + +inline void cvReleaseBlobTrackGen(CvBlobTrackGen** pBTGen) +{ + if(*pBTGen)(*pBTGen)->Release(); + *pBTGen = 0; +} + +/* Declarations of constructors of implemented modules: */ +CV_EXPORTS CvBlobTrackGen* cvCreateModuleBlobTrackGen1(); +CV_EXPORTS CvBlobTrackGen* cvCreateModuleBlobTrackGenYML(); + + + +/* BLOB TRACKER INTERFACE */ +class CV_EXPORTS CvBlobTracker: public CvVSModule +{ +public: + CvBlobTracker(); + + /* Add new blob to track it and assign to this blob personal ID */ + /* pBlob - pointer to structure with blob parameters (ID is ignored)*/ + /* pImg - current image */ + /* pImgFG - current foreground mask */ + /* Return pointer to new added blob: */ + virtual CvBlob* AddBlob(CvBlob* pBlob, IplImage* pImg, IplImage* pImgFG = NULL ) = 0; + + /* Return number of currently tracked blobs: */ + virtual int GetBlobNum() = 0; + + /* Return pointer to specified by index blob: */ + virtual CvBlob* GetBlob(int BlobIndex) = 0; + + /* Delete blob by its index: */ + virtual void DelBlob(int BlobIndex) = 0; + + /* Process current image and track all existed blobs: */ + virtual void Process(IplImage* pImg, IplImage* pImgFG = NULL) = 0; + + /* Release blob tracker: */ + virtual void Release() = 0; + + + /* Process one blob (for multi hypothesis tracing): */ + virtual void ProcessBlob(int BlobIndex, CvBlob* pBlob, IplImage* /*pImg*/, IplImage* /*pImgFG*/ = NULL); + + /* Get confidence/wieght/probability (0-1) for blob: */ + virtual double GetConfidence(int /*BlobIndex*/, CvBlob* /*pBlob*/, IplImage* /*pImg*/, IplImage* /*pImgFG*/ = NULL); + + virtual double GetConfidenceList(CvBlobSeq* pBlobList, IplImage* pImg, IplImage* pImgFG = NULL); + + virtual void UpdateBlob(int /*BlobIndex*/, CvBlob* /*pBlob*/, IplImage* /*pImg*/, IplImage* /*pImgFG*/ = NULL); + + /* Update all blob models: */ + virtual void Update(IplImage* pImg, IplImage* pImgFG = NULL); + + /* Return pointer to blob by its unique ID: */ + virtual int GetBlobIndexByID(int BlobID); + + /* Return pointer to blob by its unique ID: */ + virtual CvBlob* GetBlobByID(int BlobID); + + /* Delete blob by its ID: */ + virtual void DelBlobByID(int BlobID); + + /* Set new parameters for specified (by index) blob: */ + virtual void SetBlob(int /*BlobIndex*/, CvBlob* /*pBlob*/); + + /* Set new parameters for specified (by ID) blob: */ + virtual void SetBlobByID(int BlobID, CvBlob* pBlob); + + /* =============== MULTI HYPOTHESIS INTERFACE ================== */ + + /* Return number of position hyposetis of currently tracked blob: */ + virtual int GetBlobHypNum(int /*BlobIdx*/); + + /* Return pointer to specified blob hypothesis by index blob: */ + virtual CvBlob* GetBlobHyp(int BlobIndex, int /*hypothesis*/); + + /* Set new parameters for specified (by index) blob hyp + * (can be called several times for each hyp ): + */ + virtual void SetBlobHyp(int /*BlobIndex*/, CvBlob* /*pBlob*/); +}; + +CV_EXPORTS void cvReleaseBlobTracker(CvBlobTracker**ppT ); +/* BLOB TRACKER INTERFACE */ + +/*BLOB TRACKER ONE INTERFACE */ +class CV_EXPORTS CvBlobTrackerOne : public CvVSModule +{ +public: + virtual void Init(CvBlob* pBlobInit, IplImage* pImg, IplImage* pImgFG = NULL) = 0; + virtual CvBlob* Process(CvBlob* pBlobPrev, IplImage* pImg, IplImage* pImgFG = NULL) = 0; + virtual void Release() = 0; + + /* Non-required methods: */ + virtual void SkipProcess(CvBlob* /*pBlobPrev*/, IplImage* /*pImg*/, IplImage* /*pImgFG*/ = NULL){}; + virtual void Update(CvBlob* /*pBlob*/, IplImage* /*pImg*/, IplImage* /*pImgFG*/ = NULL){}; + virtual void SetCollision(int /*CollisionFlag*/){}; /* call in case of blob collision situation*/ + virtual double GetConfidence(CvBlob* /*pBlob*/, IplImage* /*pImg*/, + IplImage* /*pImgFG*/ = NULL, IplImage* /*pImgUnusedReg*/ = NULL) + { + return 1; + }; +}; +inline void cvReleaseBlobTrackerOne(CvBlobTrackerOne **ppT ) +{ + ppT[0]->Release(); + ppT[0] = 0; +} +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerList(CvBlobTrackerOne* (*create)()); +/*BLOB TRACKER ONE INTERFACE */ + +/* Declarations of constructors of implemented modules: */ + +/* Some declarations for specific MeanShift tracker: */ +#define PROFILE_EPANECHNIKOV 0 +#define PROFILE_DOG 1 +struct CvBlobTrackerParamMS +{ + int noOfSigBits; + int appearance_profile; + int meanshift_profile; + float sigma; +}; + +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerMS1(CvBlobTrackerParamMS* param); +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerMS2(CvBlobTrackerParamMS* param); +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerMS1ByList(); + +/* Some declarations for specific Likelihood tracker: */ +struct CvBlobTrackerParamLH +{ + int HistType; /* see Prob.h */ + int ScaleAfter; +}; + +/* Without scale optimization: */ +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerLHR(CvBlobTrackerParamLH* /*param*/ = NULL); + +/* With scale optimization: */ +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerLHRS(CvBlobTrackerParamLH* /*param*/ = NULL); + +/* Simple blob tracker based on connected component tracking: */ +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerCC(); + +/* Connected component tracking and mean-shift particle filter collion-resolver: */ +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerCCMSPF(); + +/* Blob tracker that integrates meanshift and connected components: */ +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerMSFG(); +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerMSFGS(); + +/* Meanshift without connected-components */ +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerMS(); + +/* Particle filtering via Bhattacharya coefficient, which */ +/* is roughly the dot-product of two probability densities. */ +/* See: Real-Time Tracking of Non-Rigid Objects using Mean Shift */ +/* Comanicius, Ramesh, Meer, 2000, 8p */ +/* http://citeseer.ist.psu.edu/321441.html */ +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerMSPF(); + +/* =========== tracker integrators trackers =============*/ + +/* Integrator based on Particle Filtering method: */ +//CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerIPF(); + +/* Rule based integrator: */ +//CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerIRB(); + +/* Integrator based on data fusion using particle filtering: */ +//CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerIPFDF(); + + + + +/* Trajectory postprocessing module: */ +class CV_EXPORTS CvBlobTrackPostProc: public CvVSModule +{ +public: + CvBlobTrackPostProc(){SetTypeName("BlobTrackPostProc");}; + virtual void AddBlob(CvBlob* pBlob) = 0; + virtual void Process() = 0; + virtual int GetBlobNum() = 0; + virtual CvBlob* GetBlob(int index) = 0; + virtual void Release() = 0; + + /* Additional functionality: */ + virtual CvBlob* GetBlobByID(int BlobID) + { + int i; + for(i=GetBlobNum();i>0;i--) + { + CvBlob* pB=GetBlob(i-1); + if(pB->ID==BlobID) return pB; + } + return NULL; + }; +}; + +inline void cvReleaseBlobTrackPostProc(CvBlobTrackPostProc** pBTPP) +{ + if(pBTPP == NULL) return; + if(*pBTPP)(*pBTPP)->Release(); + *pBTPP = 0; +} + +/* Trajectory generation module: */ +class CV_EXPORTS CvBlobTrackPostProcOne: public CvVSModule +{ +public: + CvBlobTrackPostProcOne(){SetTypeName("BlobTrackPostOne");}; + virtual CvBlob* Process(CvBlob* pBlob) = 0; + virtual void Release() = 0; +}; + +/* Create blob tracking post processing module based on simle module: */ +CV_EXPORTS CvBlobTrackPostProc* cvCreateBlobTrackPostProcList(CvBlobTrackPostProcOne* (*create)()); + + +/* Declarations of constructors of implemented modules: */ +CV_EXPORTS CvBlobTrackPostProc* cvCreateModuleBlobTrackPostProcKalman(); +CV_EXPORTS CvBlobTrackPostProc* cvCreateModuleBlobTrackPostProcTimeAverRect(); +CV_EXPORTS CvBlobTrackPostProc* cvCreateModuleBlobTrackPostProcTimeAverExp(); + + +/* PREDICTORS */ +/* blob PREDICTOR */ +class CvBlobTrackPredictor: public CvVSModule +{ +public: + CvBlobTrackPredictor(){SetTypeName("BlobTrackPredictor");}; + virtual CvBlob* Predict() = 0; + virtual void Update(CvBlob* pBlob) = 0; + virtual void Release() = 0; +}; +CV_EXPORTS CvBlobTrackPredictor* cvCreateModuleBlobTrackPredictKalman(); + + + +/* Trajectory analyser module: */ +class CV_EXPORTS CvBlobTrackAnalysis: public CvVSModule +{ +public: + CvBlobTrackAnalysis(){SetTypeName("BlobTrackAnalysis");}; + virtual void AddBlob(CvBlob* pBlob) = 0; + virtual void Process(IplImage* pImg, IplImage* pFG) = 0; + virtual float GetState(int BlobID) = 0; + /* return 0 if trajectory is normal + return >0 if trajectory abnormal */ + virtual const char* GetStateDesc(int /*BlobID*/){return NULL;}; + virtual void SetFileName(char* /*DataBaseName*/){}; + virtual void Release() = 0; +}; + + +inline void cvReleaseBlobTrackAnalysis(CvBlobTrackAnalysis** pBTPP) +{ + if(pBTPP == NULL) return; + if(*pBTPP)(*pBTPP)->Release(); + *pBTPP = 0; +} + +/* Feature-vector generation module: */ +class CV_EXPORTS CvBlobTrackFVGen : public CvVSModule +{ +public: + CvBlobTrackFVGen(){SetTypeName("BlobTrackFVGen");}; + virtual void AddBlob(CvBlob* pBlob) = 0; + virtual void Process(IplImage* pImg, IplImage* pFG) = 0; + virtual void Release() = 0; + virtual int GetFVSize() = 0; + virtual int GetFVNum() = 0; + virtual float* GetFV(int index, int* pFVID) = 0; /* Returns pointer to FV, if return 0 then FV not created */ + virtual float* GetFVVar(){return NULL;}; /* Returns pointer to array of variation of values of FV, if returns 0 then FVVar does not exist. */ + virtual float* GetFVMin() = 0; /* Returns pointer to array of minimal values of FV, if returns 0 then FVrange does not exist */ + virtual float* GetFVMax() = 0; /* Returns pointer to array of maximal values of FV, if returns 0 then FVrange does not exist */ +}; + + +/* Trajectory Analyser module: */ +class CV_EXPORTS CvBlobTrackAnalysisOne +{ +public: + virtual ~CvBlobTrackAnalysisOne() {}; + virtual int Process(CvBlob* pBlob, IplImage* pImg, IplImage* pFG) = 0; + /* return 0 if trajectory is normal + return >0 if trajectory abnormal */ + virtual void Release() = 0; +}; + +/* Create blob tracking post processing module based on simle module: */ +CV_EXPORTS CvBlobTrackAnalysis* cvCreateBlobTrackAnalysisList(CvBlobTrackAnalysisOne* (*create)()); + +/* Declarations of constructors of implemented modules: */ + +/* Based on histogram analysis of 2D FV (x,y): */ +CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisHistP(); + +/* Based on histogram analysis of 4D FV (x,y,vx,vy): */ +CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisHistPV(); + +/* Based on histogram analysis of 5D FV (x,y,vx,vy,state): */ +CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisHistPVS(); + +/* Based on histogram analysis of 4D FV (startpos,stoppos): */ +CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisHistSS(); + + + +/* Based on SVM classifier analysis of 2D FV (x,y): */ +//CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisSVMP(); + +/* Based on SVM classifier analysis of 4D FV (x,y,vx,vy): */ +//CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisSVMPV(); + +/* Based on SVM classifier analysis of 5D FV (x,y,vx,vy,state): */ +//CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisSVMPVS(); + +/* Based on SVM classifier analysis of 4D FV (startpos,stoppos): */ +//CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisSVMSS(); + +/* Track analysis based on distance between tracks: */ +CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisTrackDist(); + +/* Analyzer based on reation Road and height map: */ +//CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysis3DRoadMap(); + +/* Analyzer that makes OR decision using set of analyzers: */ +CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisIOR(); + +/* Estimator of human height: */ +class CV_EXPORTS CvBlobTrackAnalysisHeight: public CvBlobTrackAnalysis +{ +public: + virtual double GetHeight(CvBlob* pB) = 0; +}; +//CV_EXPORTS CvBlobTrackAnalysisHeight* cvCreateModuleBlobTrackAnalysisHeightScale(); + + + +/* AUTO BLOB TRACKER INTERFACE -- pipeline of 3 modules: */ +class CV_EXPORTS CvBlobTrackerAuto: public CvVSModule +{ +public: + CvBlobTrackerAuto(){SetTypeName("BlobTrackerAuto");}; + virtual void Process(IplImage* pImg, IplImage* pMask = NULL) = 0; + virtual CvBlob* GetBlob(int index) = 0; + virtual CvBlob* GetBlobByID(int ID) = 0; + virtual int GetBlobNum() = 0; + virtual IplImage* GetFGMask(){return NULL;}; + virtual float GetState(int BlobID) = 0; + virtual const char* GetStateDesc(int BlobID) = 0; + /* return 0 if trajectory is normal; + * return >0 if trajectory abnormal. */ + virtual void Release() = 0; +}; +inline void cvReleaseBlobTrackerAuto(CvBlobTrackerAuto** ppT) +{ + ppT[0]->Release(); + ppT[0] = 0; +} +/* END AUTO BLOB TRACKER INTERFACE */ + + +/* Constructor functions and data for specific BlobTRackerAuto modules: */ + +/* Parameters of blobtracker auto ver1: */ +struct CvBlobTrackerAutoParam1 +{ + int FGTrainFrames; /* Number of frames needed for FG (foreground) detector to train. */ + + CvFGDetector* pFG; /* FGDetector module. If this field is NULL the Process FG mask is used. */ + + CvBlobDetector* pBD; /* Selected blob detector module. */ + /* If this field is NULL default blobdetector module will be created. */ + + CvBlobTracker* pBT; /* Selected blob tracking module. */ + /* If this field is NULL default blobtracker module will be created. */ + + CvBlobTrackGen* pBTGen; /* Selected blob trajectory generator. */ + /* If this field is NULL no generator is used. */ + + CvBlobTrackPostProc* pBTPP; /* Selected blob trajectory postprocessing module. */ + /* If this field is NULL no postprocessing is done. */ + + int UsePPData; + + CvBlobTrackAnalysis* pBTA; /* Selected blob trajectory analysis module. */ + /* If this field is NULL no track analysis is done. */ +}; + +/* Create blob tracker auto ver1: */ +CV_EXPORTS CvBlobTrackerAuto* cvCreateBlobTrackerAuto1(CvBlobTrackerAutoParam1* param = NULL); + +/* Simple loader for many auto trackers by its type : */ +inline CvBlobTrackerAuto* cvCreateBlobTrackerAuto(int type, void* param) +{ + if(type == 0) return cvCreateBlobTrackerAuto1((CvBlobTrackerAutoParam1*)param); + return 0; +} + + + +struct CvTracksTimePos +{ + int len1,len2; + int beg1,beg2; + int end1,end2; + int comLen; //common length for two tracks + int shift1,shift2; +}; + +/*CV_EXPORTS int cvCompareTracks( CvBlobTrackSeq *groundTruth, + CvBlobTrackSeq *result, + FILE *file);*/ + + +/* Constructor functions: */ + +CV_EXPORTS void cvCreateTracks_One(CvBlobTrackSeq *TS); +CV_EXPORTS void cvCreateTracks_Same(CvBlobTrackSeq *TS1, CvBlobTrackSeq *TS2); +CV_EXPORTS void cvCreateTracks_AreaErr(CvBlobTrackSeq *TS1, CvBlobTrackSeq *TS2, int addW, int addH); + + +/* HIST API */ +class CV_EXPORTS CvProb +{ +public: + virtual ~CvProb() {}; + + /* Calculate probability value: */ + virtual double Value(int* /*comp*/, int /*x*/ = 0, int /*y*/ = 0){return -1;}; + + /* Update histograpp Pnew = (1-W)*Pold + W*Padd*/ + /* W weight of new added prob */ + /* comps - matrix of new fetature vectors used to update prob */ + virtual void AddFeature(float W, int* comps, int x =0, int y = 0) = 0; + virtual void Scale(float factor = 0, int x = -1, int y = -1) = 0; + virtual void Release() = 0; +}; +inline void cvReleaseProb(CvProb** ppProb){ppProb[0]->Release();ppProb[0]=NULL;} +/* HIST API */ + +/* Some Prob: */ +CV_EXPORTS CvProb* cvCreateProbS(int dim, CvSize size, int sample_num); +CV_EXPORTS CvProb* cvCreateProbMG(int dim, CvSize size, int sample_num); +CV_EXPORTS CvProb* cvCreateProbMG2(int dim, CvSize size, int sample_num); +CV_EXPORTS CvProb* cvCreateProbHist(int dim, CvSize size); + +#define CV_BT_HIST_TYPE_S 0 +#define CV_BT_HIST_TYPE_MG 1 +#define CV_BT_HIST_TYPE_MG2 2 +#define CV_BT_HIST_TYPE_H 3 +inline CvProb* cvCreateProb(int type, int dim, CvSize size = cvSize(1,1), void* /*param*/ = NULL) +{ + if(type == CV_BT_HIST_TYPE_S) return cvCreateProbS(dim, size, -1); + if(type == CV_BT_HIST_TYPE_MG) return cvCreateProbMG(dim, size, -1); + if(type == CV_BT_HIST_TYPE_MG2) return cvCreateProbMG2(dim, size, -1); + if(type == CV_BT_HIST_TYPE_H) return cvCreateProbHist(dim, size); + return NULL; +} + + + +/* Noise type definitions: */ +#define CV_NOISE_NONE 0 +#define CV_NOISE_GAUSSIAN 1 +#define CV_NOISE_UNIFORM 2 +#define CV_NOISE_SPECKLE 3 +#define CV_NOISE_SALT_AND_PEPPER 4 + +/* Add some noise to image: */ +/* pImg - (input) image without noise */ +/* pImg - (output) image with noise */ +/* noise_type - type of added noise */ +/* CV_NOISE_GAUSSIAN - pImg += n , n - is gaussian noise with Ampl standart deviation */ +/* CV_NOISE_UNIFORM - pImg += n , n - is uniform noise with Ampl standart deviation */ +/* CV_NOISE_SPECKLE - pImg += n*pImg , n - is gaussian noise with Ampl standart deviation */ +/* CV_NOISE_SALT_AND_PAPPER - pImg = pImg with blacked and whited pixels, + Ampl is density of brocken pixels (0-there are not broken pixels, 1 - all pixels are broken)*/ +/* Ampl - "amplitude" of noise */ +//CV_EXPORTS void cvAddNoise(IplImage* pImg, int noise_type, double Ampl, CvRNG* rnd_state = NULL); + +/*================== GENERATOR OF TEST VIDEO SEQUENCE ===================== */ +typedef void CvTestSeq; + +/* pConfigfile - Name of file (yml or xml) with description of test sequence */ +/* videos - array of names of test videos described in "pConfigfile" file */ +/* numvideos - size of "videos" array */ +CV_EXPORTS CvTestSeq* cvCreateTestSeq(char* pConfigfile, char** videos, int numvideo, float Scale = 1, int noise_type = CV_NOISE_NONE, double noise_ampl = 0); +CV_EXPORTS void cvReleaseTestSeq(CvTestSeq** ppTestSeq); + +/* Generate next frame from test video seq and return pointer to it: */ +CV_EXPORTS IplImage* cvTestSeqQueryFrame(CvTestSeq* pTestSeq); + +/* Return pointer to current foreground mask: */ +CV_EXPORTS IplImage* cvTestSeqGetFGMask(CvTestSeq* pTestSeq); + +/* Return pointer to current image: */ +CV_EXPORTS IplImage* cvTestSeqGetImage(CvTestSeq* pTestSeq); + +/* Return frame size of result test video: */ +CV_EXPORTS CvSize cvTestSeqGetImageSize(CvTestSeq* pTestSeq); + +/* Return number of frames result test video: */ +CV_EXPORTS int cvTestSeqFrameNum(CvTestSeq* pTestSeq); + +/* Return number of existing objects. + * This is general number of any objects. + * For example number of trajectories may be equal or less than returned value: + */ +CV_EXPORTS int cvTestSeqGetObjectNum(CvTestSeq* pTestSeq); + +/* Return 0 if there is not position for current defined on current frame */ +/* Return 1 if there is object position and pPos was filled */ +CV_EXPORTS int cvTestSeqGetObjectPos(CvTestSeq* pTestSeq, int ObjIndex, CvPoint2D32f* pPos); +CV_EXPORTS int cvTestSeqGetObjectSize(CvTestSeq* pTestSeq, int ObjIndex, CvPoint2D32f* pSize); + +/* Add noise to final image: */ +CV_EXPORTS void cvTestSeqAddNoise(CvTestSeq* pTestSeq, int noise_type = CV_NOISE_NONE, double noise_ampl = 0); + +/* Add Intensity variation: */ +CV_EXPORTS void cvTestSeqAddIntensityVariation(CvTestSeq* pTestSeq, float DI_per_frame, float MinI, float MaxI); +CV_EXPORTS void cvTestSeqSetFrame(CvTestSeq* pTestSeq, int n); + +#endif + +/* End of file. */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/compat.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/compat.hpp new file mode 100644 index 0000000..5b5495e --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/compat.hpp @@ -0,0 +1,740 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright( C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +//(including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort(including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/* + A few macros and definitions for backward compatibility + with the previous versions of OpenCV. They are obsolete and + are likely to be removed in future. To check whether your code + uses any of these, define CV_NO_BACKWARD_COMPATIBILITY before + including cv.h. +*/ + +#ifndef __OPENCV_COMPAT_HPP__ +#define __OPENCV_COMPAT_HPP__ + +#include "opencv2/core/core_c.h" +#include "opencv2/imgproc/types_c.h" + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef int CvMatType; +typedef int CvDisMaskType; +typedef CvMat CvMatArray; + +typedef int CvThreshType; +typedef int CvAdaptiveThreshMethod; +typedef int CvCompareMethod; +typedef int CvFontFace; +typedef int CvPolyApproxMethod; +typedef int CvContoursMatchMethod; +typedef int CvContourTreesMatchMethod; +typedef int CvCoeffType; +typedef int CvRodriguesType; +typedef int CvElementShape; +typedef int CvMorphOp; +typedef int CvTemplMatchMethod; + +typedef CvPoint2D64f CvPoint2D64d; +typedef CvPoint3D64f CvPoint3D64d; + +enum +{ + CV_MAT32F = CV_32FC1, + CV_MAT3x1_32F = CV_32FC1, + CV_MAT4x1_32F = CV_32FC1, + CV_MAT3x3_32F = CV_32FC1, + CV_MAT4x4_32F = CV_32FC1, + + CV_MAT64D = CV_64FC1, + CV_MAT3x1_64D = CV_64FC1, + CV_MAT4x1_64D = CV_64FC1, + CV_MAT3x3_64D = CV_64FC1, + CV_MAT4x4_64D = CV_64FC1 +}; + +enum +{ + IPL_GAUSSIAN_5x5 = 7 +}; + +typedef CvBox2D CvBox2D32f; + +/* allocation/deallocation macros */ +#define cvCreateImageData cvCreateData +#define cvReleaseImageData cvReleaseData +#define cvSetImageData cvSetData +#define cvGetImageRawData cvGetRawData + +#define cvmAlloc cvCreateData +#define cvmFree cvReleaseData +#define cvmAllocArray cvCreateData +#define cvmFreeArray cvReleaseData + +#define cvIntegralImage cvIntegral +#define cvMatchContours cvMatchShapes + +CV_EXPORTS CvMat cvMatArray( int rows, int cols, int type, + int count, void* data CV_DEFAULT(0)); + +#define cvUpdateMHIByTime cvUpdateMotionHistory + +#define cvAccMask cvAcc +#define cvSquareAccMask cvSquareAcc +#define cvMultiplyAccMask cvMultiplyAcc +#define cvRunningAvgMask(imgY, imgU, mask, alpha) cvRunningAvg(imgY, imgU, alpha, mask) + +#define cvSetHistThresh cvSetHistBinRanges +#define cvCalcHistMask(img, mask, hist, doNotClear) cvCalcHist(img, hist, doNotClear, mask) + +CV_EXPORTS double cvMean( const CvArr* image, const CvArr* mask CV_DEFAULT(0)); +CV_EXPORTS double cvSumPixels( const CvArr* image ); +CV_EXPORTS void cvMean_StdDev( const CvArr* image, double* mean, double* sdv, + const CvArr* mask CV_DEFAULT(0)); + +CV_EXPORTS void cvmPerspectiveProject( const CvMat* mat, const CvArr* src, CvArr* dst ); +CV_EXPORTS void cvFillImage( CvArr* mat, double color ); + +#define cvCvtPixToPlane cvSplit +#define cvCvtPlaneToPix cvMerge + +typedef struct CvRandState +{ + CvRNG state; /* RNG state (the current seed and carry)*/ + int disttype; /* distribution type */ + CvScalar param[2]; /* parameters of RNG */ +} CvRandState; + +/* Changes RNG range while preserving RNG state */ +CV_EXPORTS void cvRandSetRange( CvRandState* state, double param1, + double param2, int index CV_DEFAULT(-1)); + +CV_EXPORTS void cvRandInit( CvRandState* state, double param1, + double param2, int seed, + int disttype CV_DEFAULT(CV_RAND_UNI)); + +/* Fills array with random numbers */ +CV_EXPORTS void cvRand( CvRandState* state, CvArr* arr ); + +#define cvRandNext( _state ) cvRandInt( &(_state)->state ) + +CV_EXPORTS void cvbRand( CvRandState* state, float* dst, int len ); + +CV_EXPORTS void cvbCartToPolar( const float* y, const float* x, + float* magnitude, float* angle, int len ); +CV_EXPORTS void cvbFastArctan( const float* y, const float* x, float* angle, int len ); +CV_EXPORTS void cvbSqrt( const float* x, float* y, int len ); +CV_EXPORTS void cvbInvSqrt( const float* x, float* y, int len ); +CV_EXPORTS void cvbReciprocal( const float* x, float* y, int len ); +CV_EXPORTS void cvbFastExp( const float* x, double* y, int len ); +CV_EXPORTS void cvbFastLog( const double* x, float* y, int len ); + +CV_EXPORTS CvRect cvContourBoundingRect( void* point_set, int update CV_DEFAULT(0)); + +CV_EXPORTS double cvPseudoInverse( const CvArr* src, CvArr* dst ); +#define cvPseudoInv cvPseudoInverse + +#define cvContourMoments( contour, moments ) cvMoments( contour, moments, 0 ) + +#define cvGetPtrAt cvPtr2D +#define cvGetAt cvGet2D +#define cvSetAt(arr,val,y,x) cvSet2D((arr),(y),(x),(val)) + +#define cvMeanMask cvMean +#define cvMean_StdDevMask(img,mask,mean,sdv) cvMean_StdDev(img,mean,sdv,mask) + +#define cvNormMask(imgA,imgB,mask,normType) cvNorm(imgA,imgB,normType,mask) + +#define cvMinMaxLocMask(img, mask, min_val, max_val, min_loc, max_loc) \ + cvMinMaxLoc(img, min_val, max_val, min_loc, max_loc, mask) + +#define cvRemoveMemoryManager cvSetMemoryManager + +#define cvmSetZero( mat ) cvSetZero( mat ) +#define cvmSetIdentity( mat ) cvSetIdentity( mat ) +#define cvmAdd( src1, src2, dst ) cvAdd( src1, src2, dst, 0 ) +#define cvmSub( src1, src2, dst ) cvSub( src1, src2, dst, 0 ) +#define cvmCopy( src, dst ) cvCopy( src, dst, 0 ) +#define cvmMul( src1, src2, dst ) cvMatMulAdd( src1, src2, 0, dst ) +#define cvmTranspose( src, dst ) cvT( src, dst ) +#define cvmInvert( src, dst ) cvInv( src, dst ) +#define cvmMahalanobis(vec1, vec2, mat) cvMahalanobis( vec1, vec2, mat ) +#define cvmDotProduct( vec1, vec2 ) cvDotProduct( vec1, vec2 ) +#define cvmCrossProduct(vec1, vec2,dst) cvCrossProduct( vec1, vec2, dst ) +#define cvmTrace( mat ) (cvTrace( mat )).val[0] +#define cvmMulTransposed( src, dst, order ) cvMulTransposed( src, dst, order ) +#define cvmEigenVV( mat, evec, eval, eps) cvEigenVV( mat, evec, eval, eps ) +#define cvmDet( mat ) cvDet( mat ) +#define cvmScale( src, dst, scale ) cvScale( src, dst, scale ) + +#define cvCopyImage( src, dst ) cvCopy( src, dst, 0 ) +#define cvReleaseMatHeader cvReleaseMat + +/* Calculates exact convex hull of 2d point set */ +CV_EXPORTS void cvConvexHull( CvPoint* points, int num_points, + CvRect* bound_rect, + int orientation, int* hull, int* hullsize ); + + +CV_EXPORTS void cvMinAreaRect( CvPoint* points, int n, + int left, int bottom, + int right, int top, + CvPoint2D32f* anchor, + CvPoint2D32f* vect1, + CvPoint2D32f* vect2 ); + +typedef int CvDisType; +typedef int CvChainApproxMethod; +typedef int CvContourRetrievalMode; + +CV_EXPORTS void cvFitLine3D( CvPoint3D32f* points, int count, int dist, + void *param, float reps, float aeps, float* line ); + +/* Fits a line into set of 2d points in a robust way (M-estimator technique) */ +CV_EXPORTS void cvFitLine2D( CvPoint2D32f* points, int count, int dist, + void *param, float reps, float aeps, float* line ); + +CV_EXPORTS void cvFitEllipse( const CvPoint2D32f* points, int count, CvBox2D* box ); + +/* Projects 2d points to one of standard coordinate planes + (i.e. removes one of coordinates) */ +CV_EXPORTS void cvProject3D( CvPoint3D32f* points3D, int count, + CvPoint2D32f* points2D, + int xIndx CV_DEFAULT(0), + int yIndx CV_DEFAULT(1)); + +/* Retrieves value of the particular bin + of x-dimensional (x=1,2,3,...) histogram */ +#define cvQueryHistValue_1D( hist, idx0 ) \ + ((float)cvGetReal1D( (hist)->bins, (idx0))) +#define cvQueryHistValue_2D( hist, idx0, idx1 ) \ + ((float)cvGetReal2D( (hist)->bins, (idx0), (idx1))) +#define cvQueryHistValue_3D( hist, idx0, idx1, idx2 ) \ + ((float)cvGetReal3D( (hist)->bins, (idx0), (idx1), (idx2))) +#define cvQueryHistValue_nD( hist, idx ) \ + ((float)cvGetRealND( (hist)->bins, (idx))) + +/* Returns pointer to the particular bin of x-dimesional histogram. + For sparse histogram the bin is created if it didn't exist before */ +#define cvGetHistValue_1D( hist, idx0 ) \ + ((float*)cvPtr1D( (hist)->bins, (idx0), 0)) +#define cvGetHistValue_2D( hist, idx0, idx1 ) \ + ((float*)cvPtr2D( (hist)->bins, (idx0), (idx1), 0)) +#define cvGetHistValue_3D( hist, idx0, idx1, idx2 ) \ + ((float*)cvPtr3D( (hist)->bins, (idx0), (idx1), (idx2), 0)) +#define cvGetHistValue_nD( hist, idx ) \ + ((float*)cvPtrND( (hist)->bins, (idx), 0)) + + +#define CV_IS_SET_ELEM_EXISTS CV_IS_SET_ELEM + + +CV_EXPORTS int cvHoughLines( CvArr* image, double rho, + double theta, int threshold, + float* lines, int linesNumber ); + +CV_EXPORTS int cvHoughLinesP( CvArr* image, double rho, + double theta, int threshold, + int lineLength, int lineGap, + int* lines, int linesNumber ); + + +CV_EXPORTS int cvHoughLinesSDiv( CvArr* image, double rho, int srn, + double theta, int stn, int threshold, + float* lines, int linesNumber ); + +CV_EXPORTS float cvCalcEMD( const float* signature1, int size1, + const float* signature2, int size2, + int dims, int dist_type CV_DEFAULT(CV_DIST_L2), + CvDistanceFunction dist_func CV_DEFAULT(0), + float* lower_bound CV_DEFAULT(0), + void* user_param CV_DEFAULT(0)); + +CV_EXPORTS void cvKMeans( int num_clusters, float** samples, + int num_samples, int vec_size, + CvTermCriteria termcrit, int* cluster_idx ); + +CV_EXPORTS void cvStartScanGraph( CvGraph* graph, CvGraphScanner* scanner, + CvGraphVtx* vtx CV_DEFAULT(NULL), + int mask CV_DEFAULT(CV_GRAPH_ALL_ITEMS)); + +CV_EXPORTS void cvEndScanGraph( CvGraphScanner* scanner ); + + +/* old drawing functions */ +CV_EXPORTS void cvLineAA( CvArr* img, CvPoint pt1, CvPoint pt2, + double color, int scale CV_DEFAULT(0)); + +CV_EXPORTS void cvCircleAA( CvArr* img, CvPoint center, int radius, + double color, int scale CV_DEFAULT(0) ); + +CV_EXPORTS void cvEllipseAA( CvArr* img, CvPoint center, CvSize axes, + double angle, double start_angle, + double end_angle, double color, + int scale CV_DEFAULT(0) ); + +CV_EXPORTS void cvPolyLineAA( CvArr* img, CvPoint** pts, int* npts, int contours, + int is_closed, double color, int scale CV_DEFAULT(0) ); + +/****************************************************************************************\ +* Pixel Access Macros * +\****************************************************************************************/ + +typedef struct _CvPixelPosition8u +{ + uchar* currline; /* pointer to the start of the current pixel line */ + uchar* topline; /* pointer to the start of the top pixel line */ + uchar* bottomline; /* pointer to the start of the first line */ + /* which is below the image */ + int x; /* current x coordinate ( in pixels ) */ + int width; /* width of the image ( in pixels ) */ + int height; /* height of the image ( in pixels ) */ + int step; /* distance between lines ( in elements of single */ + /* plane ) */ + int step_arr[3]; /* array: ( 0, -step, step ). It is used for */ + /* vertical moving */ +} CvPixelPosition8u; + +/* this structure differs from the above only in data type */ +typedef struct _CvPixelPosition8s +{ + schar* currline; + schar* topline; + schar* bottomline; + int x; + int width; + int height; + int step; + int step_arr[3]; +} CvPixelPosition8s; + +/* this structure differs from the CvPixelPosition8u only in data type */ +typedef struct _CvPixelPosition32f +{ + float* currline; + float* topline; + float* bottomline; + int x; + int width; + int height; + int step; + int step_arr[3]; +} CvPixelPosition32f; + + +/* Initialize one of the CvPixelPosition structures. */ +/* pos - initialized structure */ +/* origin - pointer to the left-top corner of the ROI */ +/* step - width of the whole image in bytes */ +/* roi - width & height of the ROI */ +/* x, y - initial position */ +#define CV_INIT_PIXEL_POS(pos, origin, _step, roi, _x, _y, orientation) \ + ( \ + (pos).step = (_step)/sizeof((pos).currline[0]) * (orientation ? -1 : 1), \ + (pos).width = (roi).width, \ + (pos).height = (roi).height, \ + (pos).bottomline = (origin) + (pos).step*(pos).height, \ + (pos).topline = (origin) - (pos).step, \ + (pos).step_arr[0] = 0, \ + (pos).step_arr[1] = -(pos).step, \ + (pos).step_arr[2] = (pos).step, \ + (pos).x = (_x), \ + (pos).currline = (origin) + (pos).step*(_y) ) + + +/* Move to specified point ( absolute shift ) */ +/* pos - position structure */ +/* x, y - coordinates of the new position */ +/* cs - number of the image channels */ +#define CV_MOVE_TO( pos, _x, _y, cs ) \ +((pos).currline = (_y) >= 0 && (_y) < (pos).height ? (pos).topline + ((_y)+1)*(pos).step : 0, \ + (pos).x = (_x) >= 0 && (_x) < (pos).width ? (_x) : 0, (pos).currline + (_x) * (cs) ) + +/* Get current coordinates */ +/* pos - position structure */ +/* x, y - coordinates of the new position */ +/* cs - number of the image channels */ +#define CV_GET_CURRENT( pos, cs ) ((pos).currline + (pos).x * (cs)) + +/* Move by one pixel relatively to current position */ +/* pos - position structure */ +/* cs - number of the image channels */ + +/* left */ +#define CV_MOVE_LEFT( pos, cs ) \ + ( --(pos).x >= 0 ? (pos).currline + (pos).x*(cs) : 0 ) + +/* right */ +#define CV_MOVE_RIGHT( pos, cs ) \ + ( ++(pos).x < (pos).width ? (pos).currline + (pos).x*(cs) : 0 ) + +/* up */ +#define CV_MOVE_UP( pos, cs ) \ + (((pos).currline -= (pos).step) != (pos).topline ? (pos).currline + (pos).x*(cs) : 0 ) + +/* down */ +#define CV_MOVE_DOWN( pos, cs ) \ + (((pos).currline += (pos).step) != (pos).bottomline ? (pos).currline + (pos).x*(cs) : 0 ) + +/* left up */ +#define CV_MOVE_LU( pos, cs ) ( CV_MOVE_LEFT(pos, cs), CV_MOVE_UP(pos, cs)) + +/* right up */ +#define CV_MOVE_RU( pos, cs ) ( CV_MOVE_RIGHT(pos, cs), CV_MOVE_UP(pos, cs)) + +/* left down */ +#define CV_MOVE_LD( pos, cs ) ( CV_MOVE_LEFT(pos, cs), CV_MOVE_DOWN(pos, cs)) + +/* right down */ +#define CV_MOVE_RD( pos, cs ) ( CV_MOVE_RIGHT(pos, cs), CV_MOVE_DOWN(pos, cs)) + + + +/* Move by one pixel relatively to current position with wrapping when the position */ +/* achieves image boundary */ +/* pos - position structure */ +/* cs - number of the image channels */ + +/* left */ +#define CV_MOVE_LEFT_WRAP( pos, cs ) \ + ((pos).currline + ( --(pos).x >= 0 ? (pos).x : ((pos).x = (pos).width-1))*(cs)) + +/* right */ +#define CV_MOVE_RIGHT_WRAP( pos, cs ) \ + ((pos).currline + ( ++(pos).x < (pos).width ? (pos).x : ((pos).x = 0))*(cs) ) + +/* up */ +#define CV_MOVE_UP_WRAP( pos, cs ) \ + ((((pos).currline -= (pos).step) != (pos).topline ? \ + (pos).currline : ((pos).currline = (pos).bottomline - (pos).step)) + (pos).x*(cs) ) + +/* down */ +#define CV_MOVE_DOWN_WRAP( pos, cs ) \ + ((((pos).currline += (pos).step) != (pos).bottomline ? \ + (pos).currline : ((pos).currline = (pos).topline + (pos).step)) + (pos).x*(cs) ) + +/* left up */ +#define CV_MOVE_LU_WRAP( pos, cs ) ( CV_MOVE_LEFT_WRAP(pos, cs), CV_MOVE_UP_WRAP(pos, cs)) +/* right up */ +#define CV_MOVE_RU_WRAP( pos, cs ) ( CV_MOVE_RIGHT_WRAP(pos, cs), CV_MOVE_UP_WRAP(pos, cs)) +/* left down */ +#define CV_MOVE_LD_WRAP( pos, cs ) ( CV_MOVE_LEFT_WRAP(pos, cs), CV_MOVE_DOWN_WRAP(pos, cs)) +/* right down */ +#define CV_MOVE_RD_WRAP( pos, cs ) ( CV_MOVE_RIGHT_WRAP(pos, cs), CV_MOVE_DOWN_WRAP(pos, cs)) + +/* Numeric constants which used for moving in arbitrary direction */ +enum +{ + CV_SHIFT_NONE = 2, + CV_SHIFT_LEFT = 1, + CV_SHIFT_RIGHT = 3, + CV_SHIFT_UP = 6, + CV_SHIFT_DOWN = 10, + CV_SHIFT_LU = 5, + CV_SHIFT_RU = 7, + CV_SHIFT_LD = 9, + CV_SHIFT_RD = 11 +}; + +/* Move by one pixel in specified direction */ +/* pos - position structure */ +/* shift - direction ( it's value must be one of the CV_SHIFT_Ö constants ) */ +/* cs - number of the image channels */ +#define CV_MOVE_PARAM( pos, shift, cs ) \ + ( (pos).currline += (pos).step_arr[(shift)>>2], (pos).x += ((shift)&3)-2, \ + ((pos).currline != (pos).topline && (pos).currline != (pos).bottomline && \ + (pos).x >= 0 && (pos).x < (pos).width) ? (pos).currline + (pos).x*(cs) : 0 ) + +/* Move by one pixel in specified direction with wrapping when the */ +/* position achieves image boundary */ +/* pos - position structure */ +/* shift - direction ( it's value must be one of the CV_SHIFT_Ö constants ) */ +/* cs - number of the image channels */ +#define CV_MOVE_PARAM_WRAP( pos, shift, cs ) \ + ( (pos).currline += (pos).step_arr[(shift)>>2], \ + (pos).currline = ((pos).currline == (pos).topline ? \ + (pos).bottomline - (pos).step : \ + (pos).currline == (pos).bottomline ? \ + (pos).topline + (pos).step : (pos).currline), \ + \ + (pos).x += ((shift)&3)-2, \ + (pos).x = ((pos).x < 0 ? (pos).width-1 : (pos).x >= (pos).width ? 0 : (pos).x), \ + \ + (pos).currline + (pos).x*(cs) ) + + +typedef float* CvVect32f; +typedef float* CvMatr32f; +typedef double* CvVect64d; +typedef double* CvMatr64d; + +CV_EXPORTS void cvUnDistortOnce( const CvArr* src, CvArr* dst, + const float* intrinsic_matrix, + const float* distortion_coeffs, + int interpolate ); + +/* the two functions below have quite hackerish implementations, use with care + (or, which is better, switch to cvUndistortInitMap and cvRemap instead */ +CV_EXPORTS void cvUnDistortInit( const CvArr* src, + CvArr* undistortion_map, + const float* A, const float* k, + int interpolate ); + +CV_EXPORTS void cvUnDistort( const CvArr* src, CvArr* dst, + const CvArr* undistortion_map, + int interpolate ); + +/* Find fundamental matrix */ +CV_EXPORTS void cvFindFundamentalMatrix( int* points1, int* points2, + int numpoints, int method, float* matrix ); + + +CV_EXPORTS int cvFindChessBoardCornerGuesses( const void* arr, void* thresharr, + CvMemStorage* storage, + CvSize pattern_size, CvPoint2D32f * corners, + int *corner_count ); + +/* Calibrates camera using multiple views of calibration pattern */ +CV_EXPORTS void cvCalibrateCamera( int image_count, int* _point_counts, + CvSize image_size, CvPoint2D32f* _image_points, CvPoint3D32f* _object_points, + float* _distortion_coeffs, float* _camera_matrix, float* _translation_vectors, + float* _rotation_matrices, int flags ); + + +CV_EXPORTS void cvCalibrateCamera_64d( int image_count, int* _point_counts, + CvSize image_size, CvPoint2D64f* _image_points, CvPoint3D64f* _object_points, + double* _distortion_coeffs, double* _camera_matrix, double* _translation_vectors, + double* _rotation_matrices, int flags ); + + +/* Find 3d position of object given intrinsic camera parameters, + 3d model of the object and projection of the object into view plane */ +CV_EXPORTS void cvFindExtrinsicCameraParams( int point_count, + CvSize image_size, CvPoint2D32f* _image_points, + CvPoint3D32f* _object_points, float* focal_length, + CvPoint2D32f principal_point, float* _distortion_coeffs, + float* _rotation_vector, float* _translation_vector ); + +/* Variant of the previous function that takes double-precision parameters */ +CV_EXPORTS void cvFindExtrinsicCameraParams_64d( int point_count, + CvSize image_size, CvPoint2D64f* _image_points, + CvPoint3D64f* _object_points, double* focal_length, + CvPoint2D64f principal_point, double* _distortion_coeffs, + double* _rotation_vector, double* _translation_vector ); + +/* Rodrigues transform */ +enum +{ + CV_RODRIGUES_M2V = 0, + CV_RODRIGUES_V2M = 1 +}; + +/* Converts rotation_matrix matrix to rotation_matrix vector or vice versa */ +CV_EXPORTS void cvRodrigues( CvMat* rotation_matrix, CvMat* rotation_vector, + CvMat* jacobian, int conv_type ); + +/* Does reprojection of 3d object points to the view plane */ +CV_EXPORTS void cvProjectPoints( int point_count, CvPoint3D64f* _object_points, + double* _rotation_vector, double* _translation_vector, + double* focal_length, CvPoint2D64f principal_point, + double* _distortion, CvPoint2D64f* _image_points, + double* _deriv_points_rotation_matrix, + double* _deriv_points_translation_vect, + double* _deriv_points_focal, + double* _deriv_points_principal_point, + double* _deriv_points_distortion_coeffs ); + + +/* Simpler version of the previous function */ +CV_EXPORTS void cvProjectPointsSimple( int point_count, CvPoint3D64f* _object_points, + double* _rotation_matrix, double* _translation_vector, + double* _camera_matrix, double* _distortion, CvPoint2D64f* _image_points ); + + +#define cvMake2DPoints cvConvertPointsHomogeneous +#define cvMake3DPoints cvConvertPointsHomogeneous + +#define cvWarpPerspectiveQMatrix cvGetPerspectiveTransform + +#define cvConvertPointsHomogenious cvConvertPointsHomogeneous + + +//////////////////////////////////// feature extractors: obsolete API ////////////////////////////////// + +typedef struct CvSURFPoint +{ + CvPoint2D32f pt; + + int laplacian; + int size; + float dir; + float hessian; + +} CvSURFPoint; + +CV_INLINE CvSURFPoint cvSURFPoint( CvPoint2D32f pt, int laplacian, + int size, float dir CV_DEFAULT(0), + float hessian CV_DEFAULT(0)) +{ + CvSURFPoint kp; + + kp.pt = pt; + kp.laplacian = laplacian; + kp.size = size; + kp.dir = dir; + kp.hessian = hessian; + + return kp; +} + +typedef struct CvSURFParams +{ + int extended; + int upright; + double hessianThreshold; + + int nOctaves; + int nOctaveLayers; + +} CvSURFParams; + +CVAPI(CvSURFParams) cvSURFParams( double hessianThreshold, int extended CV_DEFAULT(0) ); + +// If useProvidedKeyPts!=0, keypoints are not detected, but descriptors are computed +// at the locations provided in keypoints (a CvSeq of CvSURFPoint). +CVAPI(void) cvExtractSURF( const CvArr* img, const CvArr* mask, + CvSeq** keypoints, CvSeq** descriptors, + CvMemStorage* storage, CvSURFParams params, + int useProvidedKeyPts CV_DEFAULT(0) ); + +/*! + Maximal Stable Regions Parameters + */ +typedef struct CvMSERParams +{ + //! delta, in the code, it compares (size_{i}-size_{i-delta})/size_{i-delta} + int delta; + //! prune the area which bigger than maxArea + int maxArea; + //! prune the area which smaller than minArea + int minArea; + //! prune the area have simliar size to its children + float maxVariation; + //! trace back to cut off mser with diversity < min_diversity + float minDiversity; + + /////// the next few params for MSER of color image + + //! for color image, the evolution steps + int maxEvolution; + //! the area threshold to cause re-initialize + double areaThreshold; + //! ignore too small margin + double minMargin; + //! the aperture size for edge blur + int edgeBlurSize; +} CvMSERParams; + +CVAPI(CvMSERParams) cvMSERParams( int delta CV_DEFAULT(5), int min_area CV_DEFAULT(60), + int max_area CV_DEFAULT(14400), float max_variation CV_DEFAULT(.25f), + float min_diversity CV_DEFAULT(.2f), int max_evolution CV_DEFAULT(200), + double area_threshold CV_DEFAULT(1.01), + double min_margin CV_DEFAULT(.003), + int edge_blur_size CV_DEFAULT(5) ); + +// Extracts the contours of Maximally Stable Extremal Regions +CVAPI(void) cvExtractMSER( CvArr* _img, CvArr* _mask, CvSeq** contours, CvMemStorage* storage, CvMSERParams params ); + + +typedef struct CvStarKeypoint +{ + CvPoint pt; + int size; + float response; +} CvStarKeypoint; + +CV_INLINE CvStarKeypoint cvStarKeypoint(CvPoint pt, int size, float response) +{ + CvStarKeypoint kpt; + kpt.pt = pt; + kpt.size = size; + kpt.response = response; + return kpt; +} + +typedef struct CvStarDetectorParams +{ + int maxSize; + int responseThreshold; + int lineThresholdProjected; + int lineThresholdBinarized; + int suppressNonmaxSize; +} CvStarDetectorParams; + +CV_INLINE CvStarDetectorParams cvStarDetectorParams( + int maxSize CV_DEFAULT(45), + int responseThreshold CV_DEFAULT(30), + int lineThresholdProjected CV_DEFAULT(10), + int lineThresholdBinarized CV_DEFAULT(8), + int suppressNonmaxSize CV_DEFAULT(5)) +{ + CvStarDetectorParams params; + params.maxSize = maxSize; + params.responseThreshold = responseThreshold; + params.lineThresholdProjected = lineThresholdProjected; + params.lineThresholdBinarized = lineThresholdBinarized; + params.suppressNonmaxSize = suppressNonmaxSize; + + return params; +} + +CVAPI(CvSeq*) cvGetStarKeypoints( const CvArr* img, CvMemStorage* storage, + CvStarDetectorParams params CV_DEFAULT(cvStarDetectorParams())); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/legacy.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/legacy.hpp new file mode 100644 index 0000000..96da25c --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/legacy.hpp @@ -0,0 +1,3436 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_LEGACY_HPP__ +#define __OPENCV_LEGACY_HPP__ + +#include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/imgproc/imgproc_c.h" +#include "opencv2/features2d/features2d.hpp" +#include "opencv2/calib3d/calib3d.hpp" +#include "opencv2/ml/ml.hpp" + +#ifdef __cplusplus +extern "C" { +#endif + +CVAPI(CvSeq*) cvSegmentImage( const CvArr* srcarr, CvArr* dstarr, + double canny_threshold, + double ffill_threshold, + CvMemStorage* storage ); + +/****************************************************************************************\ +* Eigen objects * +\****************************************************************************************/ + +typedef int (CV_CDECL * CvCallback)(int index, void* buffer, void* user_data); +typedef union +{ + CvCallback callback; + void* data; +} +CvInput; + +#define CV_EIGOBJ_NO_CALLBACK 0 +#define CV_EIGOBJ_INPUT_CALLBACK 1 +#define CV_EIGOBJ_OUTPUT_CALLBACK 2 +#define CV_EIGOBJ_BOTH_CALLBACK 3 + +/* Calculates covariation matrix of a set of arrays */ +CVAPI(void) cvCalcCovarMatrixEx( int nObjects, void* input, int ioFlags, + int ioBufSize, uchar* buffer, void* userData, + IplImage* avg, float* covarMatrix ); + +/* Calculates eigen values and vectors of covariation matrix of a set of + arrays */ +CVAPI(void) cvCalcEigenObjects( int nObjects, void* input, void* output, + int ioFlags, int ioBufSize, void* userData, + CvTermCriteria* calcLimit, IplImage* avg, + float* eigVals ); + +/* Calculates dot product (obj - avg) * eigObj (i.e. projects image to eigen vector) */ +CVAPI(double) cvCalcDecompCoeff( IplImage* obj, IplImage* eigObj, IplImage* avg ); + +/* Projects image to eigen space (finds all decomposion coefficients */ +CVAPI(void) cvEigenDecomposite( IplImage* obj, int nEigObjs, void* eigInput, + int ioFlags, void* userData, IplImage* avg, + float* coeffs ); + +/* Projects original objects used to calculate eigen space basis to that space */ +CVAPI(void) cvEigenProjection( void* eigInput, int nEigObjs, int ioFlags, + void* userData, float* coeffs, IplImage* avg, + IplImage* proj ); + +/****************************************************************************************\ +* 1D/2D HMM * +\****************************************************************************************/ + +typedef struct CvImgObsInfo +{ + int obs_x; + int obs_y; + int obs_size; + float* obs;//consequtive observations + + int* state;/* arr of pairs superstate/state to which observation belong */ + int* mix; /* number of mixture to which observation belong */ + +} CvImgObsInfo;/*struct for 1 image*/ + +typedef CvImgObsInfo Cv1DObsInfo; + +typedef struct CvEHMMState +{ + int num_mix; /*number of mixtures in this state*/ + float* mu; /*mean vectors corresponding to each mixture*/ + float* inv_var; /* square root of inversed variances corresp. to each mixture*/ + float* log_var_val; /* sum of 0.5 (LN2PI + ln(variance[i]) ) for i=1,n */ + float* weight; /*array of mixture weights. Summ of all weights in state is 1. */ + +} CvEHMMState; + +typedef struct CvEHMM +{ + int level; /* 0 - lowest(i.e its states are real states), ..... */ + int num_states; /* number of HMM states */ + float* transP;/*transition probab. matrices for states */ + float** obsProb; /* if level == 0 - array of brob matrices corresponding to hmm + if level == 1 - martix of matrices */ + union + { + CvEHMMState* state; /* if level == 0 points to real states array, + if not - points to embedded hmms */ + struct CvEHMM* ehmm; /* pointer to an embedded model or NULL, if it is a leaf */ + } u; + +} CvEHMM; + +/*CVAPI(int) icvCreate1DHMM( CvEHMM** this_hmm, + int state_number, int* num_mix, int obs_size ); + +CVAPI(int) icvRelease1DHMM( CvEHMM** phmm ); + +CVAPI(int) icvUniform1DSegm( Cv1DObsInfo* obs_info, CvEHMM* hmm ); + +CVAPI(int) icvInit1DMixSegm( Cv1DObsInfo** obs_info_array, int num_img, CvEHMM* hmm); + +CVAPI(int) icvEstimate1DHMMStateParams( CvImgObsInfo** obs_info_array, int num_img, CvEHMM* hmm); + +CVAPI(int) icvEstimate1DObsProb( CvImgObsInfo* obs_info, CvEHMM* hmm ); + +CVAPI(int) icvEstimate1DTransProb( Cv1DObsInfo** obs_info_array, + int num_seq, + CvEHMM* hmm ); + +CVAPI(float) icvViterbi( Cv1DObsInfo* obs_info, CvEHMM* hmm); + +CVAPI(int) icv1DMixSegmL2( CvImgObsInfo** obs_info_array, int num_img, CvEHMM* hmm );*/ + +/*********************************** Embedded HMMs *************************************/ + +/* Creates 2D HMM */ +CVAPI(CvEHMM*) cvCreate2DHMM( int* stateNumber, int* numMix, int obsSize ); + +/* Releases HMM */ +CVAPI(void) cvRelease2DHMM( CvEHMM** hmm ); + +#define CV_COUNT_OBS(roi, win, delta, numObs ) \ +{ \ + (numObs)->width =((roi)->width -(win)->width +(delta)->width)/(delta)->width; \ + (numObs)->height =((roi)->height -(win)->height +(delta)->height)/(delta)->height;\ +} + +/* Creates storage for observation vectors */ +CVAPI(CvImgObsInfo*) cvCreateObsInfo( CvSize numObs, int obsSize ); + +/* Releases storage for observation vectors */ +CVAPI(void) cvReleaseObsInfo( CvImgObsInfo** obs_info ); + + +/* The function takes an image on input and and returns the sequnce of observations + to be used with an embedded HMM; Each observation is top-left block of DCT + coefficient matrix */ +CVAPI(void) cvImgToObs_DCT( const CvArr* arr, float* obs, CvSize dctSize, + CvSize obsSize, CvSize delta ); + + +/* Uniformly segments all observation vectors extracted from image */ +CVAPI(void) cvUniformImgSegm( CvImgObsInfo* obs_info, CvEHMM* ehmm ); + +/* Does mixture segmentation of the states of embedded HMM */ +CVAPI(void) cvInitMixSegm( CvImgObsInfo** obs_info_array, + int num_img, CvEHMM* hmm ); + +/* Function calculates means, variances, weights of every Gaussian mixture + of every low-level state of embedded HMM */ +CVAPI(void) cvEstimateHMMStateParams( CvImgObsInfo** obs_info_array, + int num_img, CvEHMM* hmm ); + +/* Function computes transition probability matrices of embedded HMM + given observations segmentation */ +CVAPI(void) cvEstimateTransProb( CvImgObsInfo** obs_info_array, + int num_img, CvEHMM* hmm ); + +/* Function computes probabilities of appearing observations at any state + (i.e. computes P(obs|state) for every pair(obs,state)) */ +CVAPI(void) cvEstimateObsProb( CvImgObsInfo* obs_info, + CvEHMM* hmm ); + +/* Runs Viterbi algorithm for embedded HMM */ +CVAPI(float) cvEViterbi( CvImgObsInfo* obs_info, CvEHMM* hmm ); + + +/* Function clusters observation vectors from several images + given observations segmentation. + Euclidean distance used for clustering vectors. + Centers of clusters are given means of every mixture */ +CVAPI(void) cvMixSegmL2( CvImgObsInfo** obs_info_array, + int num_img, CvEHMM* hmm ); + +/****************************************************************************************\ +* A few functions from old stereo gesture recognition demosions * +\****************************************************************************************/ + +/* Creates hand mask image given several points on the hand */ +CVAPI(void) cvCreateHandMask( CvSeq* hand_points, + IplImage *img_mask, CvRect *roi); + +/* Finds hand region in range image data */ +CVAPI(void) cvFindHandRegion (CvPoint3D32f* points, int count, + CvSeq* indexs, + float* line, CvSize2D32f size, int flag, + CvPoint3D32f* center, + CvMemStorage* storage, CvSeq **numbers); + +/* Finds hand region in range image data (advanced version) */ +CVAPI(void) cvFindHandRegionA( CvPoint3D32f* points, int count, + CvSeq* indexs, + float* line, CvSize2D32f size, int jc, + CvPoint3D32f* center, + CvMemStorage* storage, CvSeq **numbers); + +/* Calculates the cooficients of the homography matrix */ +CVAPI(void) cvCalcImageHomography( float* line, CvPoint3D32f* center, + float* intrinsic, float* homography ); + +/****************************************************************************************\ +* More operations on sequences * +\****************************************************************************************/ + +/*****************************************************************************************/ + +#define CV_CURRENT_INT( reader ) (*((int *)(reader).ptr)) +#define CV_PREV_INT( reader ) (*((int *)(reader).prev_elem)) + +#define CV_GRAPH_WEIGHTED_VERTEX_FIELDS() CV_GRAPH_VERTEX_FIELDS()\ + float weight; + +#define CV_GRAPH_WEIGHTED_EDGE_FIELDS() CV_GRAPH_EDGE_FIELDS() + +typedef struct CvGraphWeightedVtx +{ + CV_GRAPH_WEIGHTED_VERTEX_FIELDS() +} CvGraphWeightedVtx; + +typedef struct CvGraphWeightedEdge +{ + CV_GRAPH_WEIGHTED_EDGE_FIELDS() +} CvGraphWeightedEdge; + +typedef enum CvGraphWeightType +{ + CV_NOT_WEIGHTED, + CV_WEIGHTED_VTX, + CV_WEIGHTED_EDGE, + CV_WEIGHTED_ALL +} CvGraphWeightType; + + +/* Calculates histogram of a contour */ +CVAPI(void) cvCalcPGH( const CvSeq* contour, CvHistogram* hist ); + +#define CV_DOMINANT_IPAN 1 + +/* Finds high-curvature points of the contour */ +CVAPI(CvSeq*) cvFindDominantPoints( CvSeq* contour, CvMemStorage* storage, + int method CV_DEFAULT(CV_DOMINANT_IPAN), + double parameter1 CV_DEFAULT(0), + double parameter2 CV_DEFAULT(0), + double parameter3 CV_DEFAULT(0), + double parameter4 CV_DEFAULT(0)); + +/*****************************************************************************************/ + + +/*******************************Stereo correspondence*************************************/ + +typedef struct CvCliqueFinder +{ + CvGraph* graph; + int** adj_matr; + int N; //graph size + + // stacks, counters etc/ + int k; //stack size + int* current_comp; + int** All; + + int* ne; + int* ce; + int* fixp; //node with minimal disconnections + int* nod; + int* s; //for selected candidate + int status; + int best_score; + int weighted; + int weighted_edges; + float best_weight; + float* edge_weights; + float* vertex_weights; + float* cur_weight; + float* cand_weight; + +} CvCliqueFinder; + +#define CLIQUE_TIME_OFF 2 +#define CLIQUE_FOUND 1 +#define CLIQUE_END 0 + +/*CVAPI(void) cvStartFindCliques( CvGraph* graph, CvCliqueFinder* finder, int reverse, + int weighted CV_DEFAULT(0), int weighted_edges CV_DEFAULT(0)); +CVAPI(int) cvFindNextMaximalClique( CvCliqueFinder* finder, int* clock_rest CV_DEFAULT(0) ); +CVAPI(void) cvEndFindCliques( CvCliqueFinder* finder ); + +CVAPI(void) cvBronKerbosch( CvGraph* graph );*/ + + +/*F/////////////////////////////////////////////////////////////////////////////////////// +// +// Name: cvSubgraphWeight +// Purpose: finds weight of subgraph in a graph +// Context: +// Parameters: +// graph - input graph. +// subgraph - sequence of pairwise different ints. These are indices of vertices of subgraph. +// weight_type - describes the way we measure weight. +// one of the following: +// CV_NOT_WEIGHTED - weight of a clique is simply its size +// CV_WEIGHTED_VTX - weight of a clique is the sum of weights of its vertices +// CV_WEIGHTED_EDGE - the same but edges +// CV_WEIGHTED_ALL - the same but both edges and vertices +// weight_vtx - optional vector of floats, with size = graph->total. +// If weight_type is either CV_WEIGHTED_VTX or CV_WEIGHTED_ALL +// weights of vertices must be provided. If weight_vtx not zero +// these weights considered to be here, otherwise function assumes +// that vertices of graph are inherited from CvGraphWeightedVtx. +// weight_edge - optional matrix of floats, of width and height = graph->total. +// If weight_type is either CV_WEIGHTED_EDGE or CV_WEIGHTED_ALL +// weights of edges ought to be supplied. If weight_edge is not zero +// function finds them here, otherwise function expects +// edges of graph to be inherited from CvGraphWeightedEdge. +// If this parameter is not zero structure of the graph is determined from matrix +// rather than from CvGraphEdge's. In particular, elements corresponding to +// absent edges should be zero. +// Returns: +// weight of subgraph. +// Notes: +//F*/ +/*CVAPI(float) cvSubgraphWeight( CvGraph *graph, CvSeq *subgraph, + CvGraphWeightType weight_type CV_DEFAULT(CV_NOT_WEIGHTED), + CvVect32f weight_vtx CV_DEFAULT(0), + CvMatr32f weight_edge CV_DEFAULT(0) );*/ + + +/*F/////////////////////////////////////////////////////////////////////////////////////// +// +// Name: cvFindCliqueEx +// Purpose: tries to find clique with maximum possible weight in a graph +// Context: +// Parameters: +// graph - input graph. +// storage - memory storage to be used by the result. +// is_complementary - optional flag showing whether function should seek for clique +// in complementary graph. +// weight_type - describes our notion about weight. +// one of the following: +// CV_NOT_WEIGHTED - weight of a clique is simply its size +// CV_WEIGHTED_VTX - weight of a clique is the sum of weights of its vertices +// CV_WEIGHTED_EDGE - the same but edges +// CV_WEIGHTED_ALL - the same but both edges and vertices +// weight_vtx - optional vector of floats, with size = graph->total. +// If weight_type is either CV_WEIGHTED_VTX or CV_WEIGHTED_ALL +// weights of vertices must be provided. If weight_vtx not zero +// these weights considered to be here, otherwise function assumes +// that vertices of graph are inherited from CvGraphWeightedVtx. +// weight_edge - optional matrix of floats, of width and height = graph->total. +// If weight_type is either CV_WEIGHTED_EDGE or CV_WEIGHTED_ALL +// weights of edges ought to be supplied. If weight_edge is not zero +// function finds them here, otherwise function expects +// edges of graph to be inherited from CvGraphWeightedEdge. +// Note that in case of CV_WEIGHTED_EDGE or CV_WEIGHTED_ALL +// nonzero is_complementary implies nonzero weight_edge. +// start_clique - optional sequence of pairwise different ints. They are indices of +// vertices that shall be present in the output clique. +// subgraph_of_ban - optional sequence of (maybe equal) ints. They are indices of +// vertices that shall not be present in the output clique. +// clique_weight_ptr - optional output parameter. Weight of found clique stored here. +// num_generations - optional number of generations in evolutionary part of algorithm, +// zero forces to return first found clique. +// quality - optional parameter determining degree of required quality/speed tradeoff. +// Must be in the range from 0 to 9. +// 0 is fast and dirty, 9 is slow but hopefully yields good clique. +// Returns: +// sequence of pairwise different ints. +// These are indices of vertices that form found clique. +// Notes: +// in cases of CV_WEIGHTED_EDGE and CV_WEIGHTED_ALL weights should be nonnegative. +// start_clique has a priority over subgraph_of_ban. +//F*/ +/*CVAPI(CvSeq*) cvFindCliqueEx( CvGraph *graph, CvMemStorage *storage, + int is_complementary CV_DEFAULT(0), + CvGraphWeightType weight_type CV_DEFAULT(CV_NOT_WEIGHTED), + CvVect32f weight_vtx CV_DEFAULT(0), + CvMatr32f weight_edge CV_DEFAULT(0), + CvSeq *start_clique CV_DEFAULT(0), + CvSeq *subgraph_of_ban CV_DEFAULT(0), + float *clique_weight_ptr CV_DEFAULT(0), + int num_generations CV_DEFAULT(3), + int quality CV_DEFAULT(2) );*/ + + +#define CV_UNDEF_SC_PARAM 12345 //default value of parameters + +#define CV_IDP_BIRCHFIELD_PARAM1 25 +#define CV_IDP_BIRCHFIELD_PARAM2 5 +#define CV_IDP_BIRCHFIELD_PARAM3 12 +#define CV_IDP_BIRCHFIELD_PARAM4 15 +#define CV_IDP_BIRCHFIELD_PARAM5 25 + + +#define CV_DISPARITY_BIRCHFIELD 0 + + +/*F/////////////////////////////////////////////////////////////////////////// +// +// Name: cvFindStereoCorrespondence +// Purpose: find stereo correspondence on stereo-pair +// Context: +// Parameters: +// leftImage - left image of stereo-pair (format 8uC1). +// rightImage - right image of stereo-pair (format 8uC1). +// mode - mode of correspondence retrieval (now CV_DISPARITY_BIRCHFIELD only) +// dispImage - destination disparity image +// maxDisparity - maximal disparity +// param1, param2, param3, param4, param5 - parameters of algorithm +// Returns: +// Notes: +// Images must be rectified. +// All images must have format 8uC1. +//F*/ +CVAPI(void) +cvFindStereoCorrespondence( + const CvArr* leftImage, const CvArr* rightImage, + int mode, + CvArr* dispImage, + int maxDisparity, + double param1 CV_DEFAULT(CV_UNDEF_SC_PARAM), + double param2 CV_DEFAULT(CV_UNDEF_SC_PARAM), + double param3 CV_DEFAULT(CV_UNDEF_SC_PARAM), + double param4 CV_DEFAULT(CV_UNDEF_SC_PARAM), + double param5 CV_DEFAULT(CV_UNDEF_SC_PARAM) ); + +/*****************************************************************************************/ +/************ Epiline functions *******************/ + + + +typedef struct CvStereoLineCoeff +{ + double Xcoef; + double XcoefA; + double XcoefB; + double XcoefAB; + + double Ycoef; + double YcoefA; + double YcoefB; + double YcoefAB; + + double Zcoef; + double ZcoefA; + double ZcoefB; + double ZcoefAB; +}CvStereoLineCoeff; + + +typedef struct CvCamera +{ + float imgSize[2]; /* size of the camera view, used during calibration */ + float matrix[9]; /* intinsic camera parameters: [ fx 0 cx; 0 fy cy; 0 0 1 ] */ + float distortion[4]; /* distortion coefficients - two coefficients for radial distortion + and another two for tangential: [ k1 k2 p1 p2 ] */ + float rotMatr[9]; + float transVect[3]; /* rotation matrix and transition vector relatively + to some reference point in the space. */ +} CvCamera; + +typedef struct CvStereoCamera +{ + CvCamera* camera[2]; /* two individual camera parameters */ + float fundMatr[9]; /* fundamental matrix */ + + /* New part for stereo */ + CvPoint3D32f epipole[2]; + CvPoint2D32f quad[2][4]; /* coordinates of destination quadrangle after + epipolar geometry rectification */ + double coeffs[2][3][3];/* coefficients for transformation */ + CvPoint2D32f border[2][4]; + CvSize warpSize; + CvStereoLineCoeff* lineCoeffs; + int needSwapCameras;/* flag set to 1 if need to swap cameras for good reconstruction */ + float rotMatrix[9]; + float transVector[3]; +} CvStereoCamera; + + +typedef struct CvContourOrientation +{ + float egvals[2]; + float egvects[4]; + + float max, min; // minimum and maximum projections + int imax, imin; +} CvContourOrientation; + +#define CV_CAMERA_TO_WARP 1 +#define CV_WARP_TO_CAMERA 2 + +CVAPI(int) icvConvertWarpCoordinates(double coeffs[3][3], + CvPoint2D32f* cameraPoint, + CvPoint2D32f* warpPoint, + int direction); + +CVAPI(int) icvGetSymPoint3D( CvPoint3D64f pointCorner, + CvPoint3D64f point1, + CvPoint3D64f point2, + CvPoint3D64f *pointSym2); + +CVAPI(void) icvGetPieceLength3D(CvPoint3D64f point1,CvPoint3D64f point2,double* dist); + +CVAPI(int) icvCompute3DPoint( double alpha,double betta, + CvStereoLineCoeff* coeffs, + CvPoint3D64f* point); + +CVAPI(int) icvCreateConvertMatrVect( double* rotMatr1, + double* transVect1, + double* rotMatr2, + double* transVect2, + double* convRotMatr, + double* convTransVect); + +CVAPI(int) icvConvertPointSystem(CvPoint3D64f M2, + CvPoint3D64f* M1, + double* rotMatr, + double* transVect + ); + +CVAPI(int) icvComputeCoeffForStereo( CvStereoCamera* stereoCamera); + +CVAPI(int) icvGetCrossPieceVector(CvPoint2D32f p1_start,CvPoint2D32f p1_end,CvPoint2D32f v2_start,CvPoint2D32f v2_end,CvPoint2D32f *cross); +CVAPI(int) icvGetCrossLineDirect(CvPoint2D32f p1,CvPoint2D32f p2,float a,float b,float c,CvPoint2D32f* cross); +CVAPI(float) icvDefinePointPosition(CvPoint2D32f point1,CvPoint2D32f point2,CvPoint2D32f point); +CVAPI(int) icvStereoCalibration( int numImages, + int* nums, + CvSize imageSize, + CvPoint2D32f* imagePoints1, + CvPoint2D32f* imagePoints2, + CvPoint3D32f* objectPoints, + CvStereoCamera* stereoparams + ); + + +CVAPI(int) icvComputeRestStereoParams(CvStereoCamera *stereoparams); + +CVAPI(void) cvComputePerspectiveMap( const double coeffs[3][3], CvArr* rectMapX, CvArr* rectMapY ); + +CVAPI(int) icvComCoeffForLine( CvPoint2D64f point1, + CvPoint2D64f point2, + CvPoint2D64f point3, + CvPoint2D64f point4, + double* camMatr1, + double* rotMatr1, + double* transVect1, + double* camMatr2, + double* rotMatr2, + double* transVect2, + CvStereoLineCoeff* coeffs, + int* needSwapCameras); + +CVAPI(int) icvGetDirectionForPoint( CvPoint2D64f point, + double* camMatr, + CvPoint3D64f* direct); + +CVAPI(int) icvGetCrossLines(CvPoint3D64f point11,CvPoint3D64f point12, + CvPoint3D64f point21,CvPoint3D64f point22, + CvPoint3D64f* midPoint); + +CVAPI(int) icvComputeStereoLineCoeffs( CvPoint3D64f pointA, + CvPoint3D64f pointB, + CvPoint3D64f pointCam1, + double gamma, + CvStereoLineCoeff* coeffs); + +/*CVAPI(int) icvComputeFundMatrEpipoles ( double* camMatr1, + double* rotMatr1, + double* transVect1, + double* camMatr2, + double* rotMatr2, + double* transVect2, + CvPoint2D64f* epipole1, + CvPoint2D64f* epipole2, + double* fundMatr);*/ + +CVAPI(int) icvGetAngleLine( CvPoint2D64f startPoint, CvSize imageSize,CvPoint2D64f *point1,CvPoint2D64f *point2); + +CVAPI(void) icvGetCoefForPiece( CvPoint2D64f p_start,CvPoint2D64f p_end, + double *a,double *b,double *c, + int* result); + +/*CVAPI(void) icvGetCommonArea( CvSize imageSize, + CvPoint2D64f epipole1,CvPoint2D64f epipole2, + double* fundMatr, + double* coeff11,double* coeff12, + double* coeff21,double* coeff22, + int* result);*/ + +CVAPI(void) icvComputeeInfiniteProject1(double* rotMatr, + double* camMatr1, + double* camMatr2, + CvPoint2D32f point1, + CvPoint2D32f *point2); + +CVAPI(void) icvComputeeInfiniteProject2(double* rotMatr, + double* camMatr1, + double* camMatr2, + CvPoint2D32f* point1, + CvPoint2D32f point2); + +CVAPI(void) icvGetCrossDirectDirect( double* direct1,double* direct2, + CvPoint2D64f *cross,int* result); + +CVAPI(void) icvGetCrossPieceDirect( CvPoint2D64f p_start,CvPoint2D64f p_end, + double a,double b,double c, + CvPoint2D64f *cross,int* result); + +CVAPI(void) icvGetCrossPiecePiece( CvPoint2D64f p1_start,CvPoint2D64f p1_end, + CvPoint2D64f p2_start,CvPoint2D64f p2_end, + CvPoint2D64f* cross, + int* result); + +CVAPI(void) icvGetPieceLength(CvPoint2D64f point1,CvPoint2D64f point2,double* dist); + +CVAPI(void) icvGetCrossRectDirect( CvSize imageSize, + double a,double b,double c, + CvPoint2D64f *start,CvPoint2D64f *end, + int* result); + +CVAPI(void) icvProjectPointToImage( CvPoint3D64f point, + double* camMatr,double* rotMatr,double* transVect, + CvPoint2D64f* projPoint); + +CVAPI(void) icvGetQuadsTransform( CvSize imageSize, + double* camMatr1, + double* rotMatr1, + double* transVect1, + double* camMatr2, + double* rotMatr2, + double* transVect2, + CvSize* warpSize, + double quad1[4][2], + double quad2[4][2], + double* fundMatr, + CvPoint3D64f* epipole1, + CvPoint3D64f* epipole2 + ); + +CVAPI(void) icvGetQuadsTransformStruct( CvStereoCamera* stereoCamera); + +CVAPI(void) icvComputeStereoParamsForCameras(CvStereoCamera* stereoCamera); + +CVAPI(void) icvGetCutPiece( double* areaLineCoef1,double* areaLineCoef2, + CvPoint2D64f epipole, + CvSize imageSize, + CvPoint2D64f* point11,CvPoint2D64f* point12, + CvPoint2D64f* point21,CvPoint2D64f* point22, + int* result); + +CVAPI(void) icvGetMiddleAnglePoint( CvPoint2D64f basePoint, + CvPoint2D64f point1,CvPoint2D64f point2, + CvPoint2D64f* midPoint); + +CVAPI(void) icvGetNormalDirect(double* direct,CvPoint2D64f point,double* normDirect); + +CVAPI(double) icvGetVect(CvPoint2D64f basePoint,CvPoint2D64f point1,CvPoint2D64f point2); + +CVAPI(void) icvProjectPointToDirect( CvPoint2D64f point,double* lineCoeff, + CvPoint2D64f* projectPoint); + +CVAPI(void) icvGetDistanceFromPointToDirect( CvPoint2D64f point,double* lineCoef,double*dist); + +CVAPI(IplImage*) icvCreateIsometricImage( IplImage* src, IplImage* dst, + int desired_depth, int desired_num_channels ); + +CVAPI(void) cvDeInterlace( const CvArr* frame, CvArr* fieldEven, CvArr* fieldOdd ); + +/*CVAPI(int) icvSelectBestRt( int numImages, + int* numPoints, + CvSize imageSize, + CvPoint2D32f* imagePoints1, + CvPoint2D32f* imagePoints2, + CvPoint3D32f* objectPoints, + + CvMatr32f cameraMatrix1, + CvVect32f distortion1, + CvMatr32f rotMatrs1, + CvVect32f transVects1, + + CvMatr32f cameraMatrix2, + CvVect32f distortion2, + CvMatr32f rotMatrs2, + CvVect32f transVects2, + + CvMatr32f bestRotMatr, + CvVect32f bestTransVect + );*/ + + +/****************************************************************************************\ +* Contour Tree * +\****************************************************************************************/ + +/* Contour tree header */ +typedef struct CvContourTree +{ + CV_SEQUENCE_FIELDS() + CvPoint p1; /* the first point of the binary tree root segment */ + CvPoint p2; /* the last point of the binary tree root segment */ +} CvContourTree; + +/* Builds hierarhical representation of a contour */ +CVAPI(CvContourTree*) cvCreateContourTree( const CvSeq* contour, + CvMemStorage* storage, + double threshold ); + +/* Reconstruct (completelly or partially) contour a from contour tree */ +CVAPI(CvSeq*) cvContourFromContourTree( const CvContourTree* tree, + CvMemStorage* storage, + CvTermCriteria criteria ); + +/* Compares two contour trees */ +enum { CV_CONTOUR_TREES_MATCH_I1 = 1 }; + +CVAPI(double) cvMatchContourTrees( const CvContourTree* tree1, + const CvContourTree* tree2, + int method, double threshold ); + +/****************************************************************************************\ +* Contour Morphing * +\****************************************************************************************/ + +/* finds correspondence between two contours */ +CvSeq* cvCalcContoursCorrespondence( const CvSeq* contour1, + const CvSeq* contour2, + CvMemStorage* storage); + +/* morphs contours using the pre-calculated correspondence: + alpha=0 ~ contour1, alpha=1 ~ contour2 */ +CvSeq* cvMorphContours( const CvSeq* contour1, const CvSeq* contour2, + CvSeq* corr, double alpha, + CvMemStorage* storage ); + + +/****************************************************************************************\ +* Active Contours * +\****************************************************************************************/ + +#define CV_VALUE 1 +#define CV_ARRAY 2 +/* Updates active contour in order to minimize its cummulative + (internal and external) energy. */ +CVAPI(void) cvSnakeImage( const IplImage* image, CvPoint* points, + int length, float* alpha, + float* beta, float* gamma, + int coeff_usage, CvSize win, + CvTermCriteria criteria, int calc_gradient CV_DEFAULT(1)); + +/****************************************************************************************\ +* Texture Descriptors * +\****************************************************************************************/ + +#define CV_GLCM_OPTIMIZATION_NONE -2 +#define CV_GLCM_OPTIMIZATION_LUT -1 +#define CV_GLCM_OPTIMIZATION_HISTOGRAM 0 + +#define CV_GLCMDESC_OPTIMIZATION_ALLOWDOUBLENEST 10 +#define CV_GLCMDESC_OPTIMIZATION_ALLOWTRIPLENEST 11 +#define CV_GLCMDESC_OPTIMIZATION_HISTOGRAM 4 + +#define CV_GLCMDESC_ENTROPY 0 +#define CV_GLCMDESC_ENERGY 1 +#define CV_GLCMDESC_HOMOGENITY 2 +#define CV_GLCMDESC_CONTRAST 3 +#define CV_GLCMDESC_CLUSTERTENDENCY 4 +#define CV_GLCMDESC_CLUSTERSHADE 5 +#define CV_GLCMDESC_CORRELATION 6 +#define CV_GLCMDESC_CORRELATIONINFO1 7 +#define CV_GLCMDESC_CORRELATIONINFO2 8 +#define CV_GLCMDESC_MAXIMUMPROBABILITY 9 + +#define CV_GLCM_ALL 0 +#define CV_GLCM_GLCM 1 +#define CV_GLCM_DESC 2 + +typedef struct CvGLCM CvGLCM; + +CVAPI(CvGLCM*) cvCreateGLCM( const IplImage* srcImage, + int stepMagnitude, + const int* stepDirections CV_DEFAULT(0), + int numStepDirections CV_DEFAULT(0), + int optimizationType CV_DEFAULT(CV_GLCM_OPTIMIZATION_NONE)); + +CVAPI(void) cvReleaseGLCM( CvGLCM** GLCM, int flag CV_DEFAULT(CV_GLCM_ALL)); + +CVAPI(void) cvCreateGLCMDescriptors( CvGLCM* destGLCM, + int descriptorOptimizationType + CV_DEFAULT(CV_GLCMDESC_OPTIMIZATION_ALLOWDOUBLENEST)); + +CVAPI(double) cvGetGLCMDescriptor( CvGLCM* GLCM, int step, int descriptor ); + +CVAPI(void) cvGetGLCMDescriptorStatistics( CvGLCM* GLCM, int descriptor, + double* average, double* standardDeviation ); + +CVAPI(IplImage*) cvCreateGLCMImage( CvGLCM* GLCM, int step ); + +/****************************************************************************************\ +* Face eyes&mouth tracking * +\****************************************************************************************/ + + +typedef struct CvFaceTracker CvFaceTracker; + +#define CV_NUM_FACE_ELEMENTS 3 +enum CV_FACE_ELEMENTS +{ + CV_FACE_MOUTH = 0, + CV_FACE_LEFT_EYE = 1, + CV_FACE_RIGHT_EYE = 2 +}; + +CVAPI(CvFaceTracker*) cvInitFaceTracker(CvFaceTracker* pFaceTracking, const IplImage* imgGray, + CvRect* pRects, int nRects); +CVAPI(int) cvTrackFace( CvFaceTracker* pFaceTracker, IplImage* imgGray, + CvRect* pRects, int nRects, + CvPoint* ptRotate, double* dbAngleRotate); +CVAPI(void) cvReleaseFaceTracker(CvFaceTracker** ppFaceTracker); + + +typedef struct CvFace +{ + CvRect MouthRect; + CvRect LeftEyeRect; + CvRect RightEyeRect; +} CvFaceData; + +CvSeq * cvFindFace(IplImage * Image,CvMemStorage* storage); +CvSeq * cvPostBoostingFindFace(IplImage * Image,CvMemStorage* storage); + + +/****************************************************************************************\ +* 3D Tracker * +\****************************************************************************************/ + +typedef unsigned char CvBool; + +typedef struct Cv3dTracker2dTrackedObject +{ + int id; + CvPoint2D32f p; // pgruebele: So we do not loose precision, this needs to be float +} Cv3dTracker2dTrackedObject; + +CV_INLINE Cv3dTracker2dTrackedObject cv3dTracker2dTrackedObject(int id, CvPoint2D32f p) +{ + Cv3dTracker2dTrackedObject r; + r.id = id; + r.p = p; + return r; +} + +typedef struct Cv3dTrackerTrackedObject +{ + int id; + CvPoint3D32f p; // location of the tracked object +} Cv3dTrackerTrackedObject; + +CV_INLINE Cv3dTrackerTrackedObject cv3dTrackerTrackedObject(int id, CvPoint3D32f p) +{ + Cv3dTrackerTrackedObject r; + r.id = id; + r.p = p; + return r; +} + +typedef struct Cv3dTrackerCameraInfo +{ + CvBool valid; + float mat[4][4]; /* maps camera coordinates to world coordinates */ + CvPoint2D32f principal_point; /* copied from intrinsics so this structure */ + /* has all the info we need */ +} Cv3dTrackerCameraInfo; + +typedef struct Cv3dTrackerCameraIntrinsics +{ + CvPoint2D32f principal_point; + float focal_length[2]; + float distortion[4]; +} Cv3dTrackerCameraIntrinsics; + +CVAPI(CvBool) cv3dTrackerCalibrateCameras(int num_cameras, + const Cv3dTrackerCameraIntrinsics camera_intrinsics[], /* size is num_cameras */ + CvSize etalon_size, + float square_size, + IplImage *samples[], /* size is num_cameras */ + Cv3dTrackerCameraInfo camera_info[]); /* size is num_cameras */ + +CVAPI(int) cv3dTrackerLocateObjects(int num_cameras, int num_objects, + const Cv3dTrackerCameraInfo camera_info[], /* size is num_cameras */ + const Cv3dTracker2dTrackedObject tracking_info[], /* size is num_objects*num_cameras */ + Cv3dTrackerTrackedObject tracked_objects[]); /* size is num_objects */ +/**************************************************************************************** + tracking_info is a rectangular array; one row per camera, num_objects elements per row. + The id field of any unused slots must be -1. Ids need not be ordered or consecutive. On + completion, the return value is the number of objects located; i.e., the number of objects + visible by more than one camera. The id field of any unused slots in tracked objects is + set to -1. +****************************************************************************************/ + + +/****************************************************************************************\ +* Skeletons and Linear-Contour Models * +\****************************************************************************************/ + +typedef enum CvLeeParameters +{ + CV_LEE_INT = 0, + CV_LEE_FLOAT = 1, + CV_LEE_DOUBLE = 2, + CV_LEE_AUTO = -1, + CV_LEE_ERODE = 0, + CV_LEE_ZOOM = 1, + CV_LEE_NON = 2 +} CvLeeParameters; + +#define CV_NEXT_VORONOISITE2D( SITE ) ((SITE)->edge[0]->site[((SITE)->edge[0]->site[0] == (SITE))]) +#define CV_PREV_VORONOISITE2D( SITE ) ((SITE)->edge[1]->site[((SITE)->edge[1]->site[0] == (SITE))]) +#define CV_FIRST_VORONOIEDGE2D( SITE ) ((SITE)->edge[0]) +#define CV_LAST_VORONOIEDGE2D( SITE ) ((SITE)->edge[1]) +#define CV_NEXT_VORONOIEDGE2D( EDGE, SITE ) ((EDGE)->next[(EDGE)->site[0] != (SITE)]) +#define CV_PREV_VORONOIEDGE2D( EDGE, SITE ) ((EDGE)->next[2 + ((EDGE)->site[0] != (SITE))]) +#define CV_VORONOIEDGE2D_BEGINNODE( EDGE, SITE ) ((EDGE)->node[((EDGE)->site[0] != (SITE))]) +#define CV_VORONOIEDGE2D_ENDNODE( EDGE, SITE ) ((EDGE)->node[((EDGE)->site[0] == (SITE))]) +#define CV_TWIN_VORONOISITE2D( SITE, EDGE ) ( (EDGE)->site[((EDGE)->site[0] == (SITE))]) + +#define CV_VORONOISITE2D_FIELDS() \ + struct CvVoronoiNode2D *node[2]; \ + struct CvVoronoiEdge2D *edge[2]; + +typedef struct CvVoronoiSite2D +{ + CV_VORONOISITE2D_FIELDS() + struct CvVoronoiSite2D *next[2]; +} CvVoronoiSite2D; + +#define CV_VORONOIEDGE2D_FIELDS() \ + struct CvVoronoiNode2D *node[2]; \ + struct CvVoronoiSite2D *site[2]; \ + struct CvVoronoiEdge2D *next[4]; + +typedef struct CvVoronoiEdge2D +{ + CV_VORONOIEDGE2D_FIELDS() +} CvVoronoiEdge2D; + +#define CV_VORONOINODE2D_FIELDS() \ + CV_SET_ELEM_FIELDS(CvVoronoiNode2D) \ + CvPoint2D32f pt; \ + float radius; + +typedef struct CvVoronoiNode2D +{ + CV_VORONOINODE2D_FIELDS() +} CvVoronoiNode2D; + +#define CV_VORONOIDIAGRAM2D_FIELDS() \ + CV_GRAPH_FIELDS() \ + CvSet *sites; + +typedef struct CvVoronoiDiagram2D +{ + CV_VORONOIDIAGRAM2D_FIELDS() +} CvVoronoiDiagram2D; + +/* Computes Voronoi Diagram for given polygons with holes */ +CVAPI(int) cvVoronoiDiagramFromContour(CvSeq* ContourSeq, + CvVoronoiDiagram2D** VoronoiDiagram, + CvMemStorage* VoronoiStorage, + CvLeeParameters contour_type CV_DEFAULT(CV_LEE_INT), + int contour_orientation CV_DEFAULT(-1), + int attempt_number CV_DEFAULT(10)); + +/* Computes Voronoi Diagram for domains in given image */ +CVAPI(int) cvVoronoiDiagramFromImage(IplImage* pImage, + CvSeq** ContourSeq, + CvVoronoiDiagram2D** VoronoiDiagram, + CvMemStorage* VoronoiStorage, + CvLeeParameters regularization_method CV_DEFAULT(CV_LEE_NON), + float approx_precision CV_DEFAULT(CV_LEE_AUTO)); + +/* Deallocates the storage */ +CVAPI(void) cvReleaseVoronoiStorage(CvVoronoiDiagram2D* VoronoiDiagram, + CvMemStorage** pVoronoiStorage); + +/*********************** Linear-Contour Model ****************************/ + +struct CvLCMEdge; +struct CvLCMNode; + +typedef struct CvLCMEdge +{ + CV_GRAPH_EDGE_FIELDS() + CvSeq* chain; + float width; + int index1; + int index2; +} CvLCMEdge; + +typedef struct CvLCMNode +{ + CV_GRAPH_VERTEX_FIELDS() + CvContour* contour; +} CvLCMNode; + + +/* Computes hybrid model from Voronoi Diagram */ +CVAPI(CvGraph*) cvLinearContorModelFromVoronoiDiagram(CvVoronoiDiagram2D* VoronoiDiagram, + float maxWidth); + +/* Releases hybrid model storage */ +CVAPI(int) cvReleaseLinearContorModelStorage(CvGraph** Graph); + + +/* two stereo-related functions */ + +CVAPI(void) cvInitPerspectiveTransform( CvSize size, const CvPoint2D32f vertex[4], double matrix[3][3], + CvArr* rectMap ); + +/*CVAPI(void) cvInitStereoRectification( CvStereoCamera* params, + CvArr* rectMap1, CvArr* rectMap2, + int do_undistortion );*/ + +/*************************** View Morphing Functions ************************/ + +typedef struct CvMatrix3 +{ + float m[3][3]; +} CvMatrix3; + +/* The order of the function corresponds to the order they should appear in + the view morphing pipeline */ + +/* Finds ending points of scanlines on left and right images of stereo-pair */ +CVAPI(void) cvMakeScanlines( const CvMatrix3* matrix, CvSize img_size, + int* scanlines1, int* scanlines2, + int* lengths1, int* lengths2, + int* line_count ); + +/* Grab pixel values from scanlines and stores them sequentially + (some sort of perspective image transform) */ +CVAPI(void) cvPreWarpImage( int line_count, + IplImage* img, + uchar* dst, + int* dst_nums, + int* scanlines); + +/* Approximate each grabbed scanline by a sequence of runs + (lossy run-length compression) */ +CVAPI(void) cvFindRuns( int line_count, + uchar* prewarp1, + uchar* prewarp2, + int* line_lengths1, + int* line_lengths2, + int* runs1, + int* runs2, + int* num_runs1, + int* num_runs2); + +/* Compares two sets of compressed scanlines */ +CVAPI(void) cvDynamicCorrespondMulti( int line_count, + int* first, + int* first_runs, + int* second, + int* second_runs, + int* first_corr, + int* second_corr); + +/* Finds scanline ending coordinates for some intermediate "virtual" camera position */ +CVAPI(void) cvMakeAlphaScanlines( int* scanlines1, + int* scanlines2, + int* scanlinesA, + int* lengths, + int line_count, + float alpha); + +/* Blends data of the left and right image scanlines to get + pixel values of "virtual" image scanlines */ +CVAPI(void) cvMorphEpilinesMulti( int line_count, + uchar* first_pix, + int* first_num, + uchar* second_pix, + int* second_num, + uchar* dst_pix, + int* dst_num, + float alpha, + int* first, + int* first_runs, + int* second, + int* second_runs, + int* first_corr, + int* second_corr); + +/* Does reverse warping of the morphing result to make + it fill the destination image rectangle */ +CVAPI(void) cvPostWarpImage( int line_count, + uchar* src, + int* src_nums, + IplImage* img, + int* scanlines); + +/* Deletes Moire (missed pixels that appear due to discretization) */ +CVAPI(void) cvDeleteMoire( IplImage* img ); + + +typedef struct CvConDensation +{ + int MP; + int DP; + float* DynamMatr; /* Matrix of the linear Dynamics system */ + float* State; /* Vector of State */ + int SamplesNum; /* Number of the Samples */ + float** flSamples; /* arr of the Sample Vectors */ + float** flNewSamples; /* temporary array of the Sample Vectors */ + float* flConfidence; /* Confidence for each Sample */ + float* flCumulative; /* Cumulative confidence */ + float* Temp; /* Temporary vector */ + float* RandomSample; /* RandomVector to update sample set */ + struct CvRandState* RandS; /* Array of structures to generate random vectors */ +} CvConDensation; + +/* Creates ConDensation filter state */ +CVAPI(CvConDensation*) cvCreateConDensation( int dynam_params, + int measure_params, + int sample_count ); + +/* Releases ConDensation filter state */ +CVAPI(void) cvReleaseConDensation( CvConDensation** condens ); + +/* Updates ConDensation filter by time (predict future state of the system) */ +CVAPI(void) cvConDensUpdateByTime( CvConDensation* condens); + +/* Initializes ConDensation filter samples */ +CVAPI(void) cvConDensInitSampleSet( CvConDensation* condens, CvMat* lower_bound, CvMat* upper_bound ); + +CV_INLINE int iplWidth( const IplImage* img ) +{ + return !img ? 0 : !img->roi ? img->width : img->roi->width; +} + +CV_INLINE int iplHeight( const IplImage* img ) +{ + return !img ? 0 : !img->roi ? img->height : img->roi->height; +} + +#ifdef __cplusplus +} +#endif + +#ifdef __cplusplus + +/****************************************************************************************\ +* Calibration engine * +\****************************************************************************************/ + +typedef enum CvCalibEtalonType +{ + CV_CALIB_ETALON_USER = -1, + CV_CALIB_ETALON_CHESSBOARD = 0, + CV_CALIB_ETALON_CHECKERBOARD = CV_CALIB_ETALON_CHESSBOARD +} +CvCalibEtalonType; + +class CV_EXPORTS CvCalibFilter +{ +public: + /* Constructor & destructor */ + CvCalibFilter(); + virtual ~CvCalibFilter(); + + /* Sets etalon type - one for all cameras. + etalonParams is used in case of pre-defined etalons (such as chessboard). + Number of elements in etalonParams is determined by etalonType. + E.g., if etalon type is CV_ETALON_TYPE_CHESSBOARD then: + etalonParams[0] is number of squares per one side of etalon + etalonParams[1] is number of squares per another side of etalon + etalonParams[2] is linear size of squares in the board in arbitrary units. + pointCount & points are used in case of + CV_CALIB_ETALON_USER (user-defined) etalon. */ + virtual bool + SetEtalon( CvCalibEtalonType etalonType, double* etalonParams, + int pointCount = 0, CvPoint2D32f* points = 0 ); + + /* Retrieves etalon parameters/or and points */ + virtual CvCalibEtalonType + GetEtalon( int* paramCount = 0, const double** etalonParams = 0, + int* pointCount = 0, const CvPoint2D32f** etalonPoints = 0 ) const; + + /* Sets number of cameras calibrated simultaneously. It is equal to 1 initially */ + virtual void SetCameraCount( int cameraCount ); + + /* Retrieves number of cameras */ + int GetCameraCount() const { return cameraCount; } + + /* Starts cameras calibration */ + virtual bool SetFrames( int totalFrames ); + + /* Stops cameras calibration */ + virtual void Stop( bool calibrate = false ); + + /* Retrieves number of cameras */ + bool IsCalibrated() const { return isCalibrated; } + + /* Feeds another serie of snapshots (one per each camera) to filter. + Etalon points on these images are found automatically. + If the function can't locate points, it returns false */ + virtual bool FindEtalon( IplImage** imgs ); + + /* The same but takes matrices */ + virtual bool FindEtalon( CvMat** imgs ); + + /* Lower-level function for feeding filter with already found etalon points. + Array of point arrays for each camera is passed. */ + virtual bool Push( const CvPoint2D32f** points = 0 ); + + /* Returns total number of accepted frames and, optionally, + total number of frames to collect */ + virtual int GetFrameCount( int* framesTotal = 0 ) const; + + /* Retrieves camera parameters for specified camera. + If camera is not calibrated the function returns 0 */ + virtual const CvCamera* GetCameraParams( int idx = 0 ) const; + + virtual const CvStereoCamera* GetStereoParams() const; + + /* Sets camera parameters for all cameras */ + virtual bool SetCameraParams( CvCamera* params ); + + /* Saves all camera parameters to file */ + virtual bool SaveCameraParams( const char* filename ); + + /* Loads all camera parameters from file */ + virtual bool LoadCameraParams( const char* filename ); + + /* Undistorts images using camera parameters. Some of src pointers can be NULL. */ + virtual bool Undistort( IplImage** src, IplImage** dst ); + + /* Undistorts images using camera parameters. Some of src pointers can be NULL. */ + virtual bool Undistort( CvMat** src, CvMat** dst ); + + /* Returns array of etalon points detected/partally detected + on the latest frame for idx-th camera */ + virtual bool GetLatestPoints( int idx, CvPoint2D32f** pts, + int* count, bool* found ); + + /* Draw the latest detected/partially detected etalon */ + virtual void DrawPoints( IplImage** dst ); + + /* Draw the latest detected/partially detected etalon */ + virtual void DrawPoints( CvMat** dst ); + + virtual bool Rectify( IplImage** srcarr, IplImage** dstarr ); + virtual bool Rectify( CvMat** srcarr, CvMat** dstarr ); + +protected: + + enum { MAX_CAMERAS = 3 }; + + /* etalon data */ + CvCalibEtalonType etalonType; + int etalonParamCount; + double* etalonParams; + int etalonPointCount; + CvPoint2D32f* etalonPoints; + CvSize imgSize; + CvMat* grayImg; + CvMat* tempImg; + CvMemStorage* storage; + + /* camera data */ + int cameraCount; + CvCamera cameraParams[MAX_CAMERAS]; + CvStereoCamera stereo; + CvPoint2D32f* points[MAX_CAMERAS]; + CvMat* undistMap[MAX_CAMERAS][2]; + CvMat* undistImg; + int latestCounts[MAX_CAMERAS]; + CvPoint2D32f* latestPoints[MAX_CAMERAS]; + CvMat* rectMap[MAX_CAMERAS][2]; + + /* Added by Valery */ + //CvStereoCamera stereoParams; + + int maxPoints; + int framesTotal; + int framesAccepted; + bool isCalibrated; +}; + +#include +#include + +class CV_EXPORTS CvImage +{ +public: + CvImage() : image(0), refcount(0) {} + CvImage( CvSize _size, int _depth, int _channels ) + { + image = cvCreateImage( _size, _depth, _channels ); + refcount = image ? new int(1) : 0; + } + + CvImage( IplImage* img ) : image(img) + { + refcount = image ? new int(1) : 0; + } + + CvImage( const CvImage& img ) : image(img.image), refcount(img.refcount) + { + if( refcount ) ++(*refcount); + } + + CvImage( const char* filename, const char* imgname=0, int color=-1 ) : image(0), refcount(0) + { load( filename, imgname, color ); } + + CvImage( CvFileStorage* fs, const char* mapname, const char* imgname ) : image(0), refcount(0) + { read( fs, mapname, imgname ); } + + CvImage( CvFileStorage* fs, const char* seqname, int idx ) : image(0), refcount(0) + { read( fs, seqname, idx ); } + + ~CvImage() + { + if( refcount && !(--*refcount) ) + { + cvReleaseImage( &image ); + delete refcount; + } + } + + CvImage clone() { return CvImage(image ? cvCloneImage(image) : 0); } + + void create( CvSize _size, int _depth, int _channels ) + { + if( !image || !refcount || + image->width != _size.width || image->height != _size.height || + image->depth != _depth || image->nChannels != _channels ) + attach( cvCreateImage( _size, _depth, _channels )); + } + + void release() { detach(); } + void clear() { detach(); } + + void attach( IplImage* img, bool use_refcount=true ) + { + if( refcount && --*refcount == 0 ) + { + cvReleaseImage( &image ); + delete refcount; + } + image = img; + refcount = use_refcount && image ? new int(1) : 0; + } + + void detach() + { + if( refcount && --*refcount == 0 ) + { + cvReleaseImage( &image ); + delete refcount; + } + image = 0; + refcount = 0; + } + + bool load( const char* filename, const char* imgname=0, int color=-1 ); + bool read( CvFileStorage* fs, const char* mapname, const char* imgname ); + bool read( CvFileStorage* fs, const char* seqname, int idx ); + void save( const char* filename, const char* imgname, const int* params=0 ); + void write( CvFileStorage* fs, const char* imgname ); + + void show( const char* window_name ); + bool is_valid() { return image != 0; } + + int width() const { return image ? image->width : 0; } + int height() const { return image ? image->height : 0; } + + CvSize size() const { return image ? cvSize(image->width, image->height) : cvSize(0,0); } + + CvSize roi_size() const + { + return !image ? cvSize(0,0) : + !image->roi ? cvSize(image->width,image->height) : + cvSize(image->roi->width, image->roi->height); + } + + CvRect roi() const + { + return !image ? cvRect(0,0,0,0) : + !image->roi ? cvRect(0,0,image->width,image->height) : + cvRect(image->roi->xOffset,image->roi->yOffset, + image->roi->width,image->roi->height); + } + + int coi() const { return !image || !image->roi ? 0 : image->roi->coi; } + + void set_roi(CvRect _roi) { cvSetImageROI(image,_roi); } + void reset_roi() { cvResetImageROI(image); } + void set_coi(int _coi) { cvSetImageCOI(image,_coi); } + int depth() const { return image ? image->depth : 0; } + int channels() const { return image ? image->nChannels : 0; } + int pix_size() const { return image ? ((image->depth & 255)>>3)*image->nChannels : 0; } + + uchar* data() { return image ? (uchar*)image->imageData : 0; } + const uchar* data() const { return image ? (const uchar*)image->imageData : 0; } + int step() const { return image ? image->widthStep : 0; } + int origin() const { return image ? image->origin : 0; } + + uchar* roi_row(int y) + { + assert(0<=y); + assert(!image ? + 1 : image->roi ? + yroi->height : yheight); + + return !image ? 0 : + !image->roi ? + (uchar*)(image->imageData + y*image->widthStep) : + (uchar*)(image->imageData + (y+image->roi->yOffset)*image->widthStep + + image->roi->xOffset*((image->depth & 255)>>3)*image->nChannels); + } + + const uchar* roi_row(int y) const + { + assert(0<=y); + assert(!image ? + 1 : image->roi ? + yroi->height : yheight); + + return !image ? 0 : + !image->roi ? + (const uchar*)(image->imageData + y*image->widthStep) : + (const uchar*)(image->imageData + (y+image->roi->yOffset)*image->widthStep + + image->roi->xOffset*((image->depth & 255)>>3)*image->nChannels); + } + + operator const IplImage* () const { return image; } + operator IplImage* () { return image; } + + CvImage& operator = (const CvImage& img) + { + if( img.refcount ) + ++*img.refcount; + if( refcount && !(--*refcount) ) + cvReleaseImage( &image ); + image=img.image; + refcount=img.refcount; + return *this; + } + +protected: + IplImage* image; + int* refcount; +}; + + +class CV_EXPORTS CvMatrix +{ +public: + CvMatrix() : matrix(0) {} + CvMatrix( int _rows, int _cols, int _type ) + { matrix = cvCreateMat( _rows, _cols, _type ); } + + CvMatrix( int _rows, int _cols, int _type, CvMat* hdr, + void* _data=0, int _step=CV_AUTOSTEP ) + { matrix = cvInitMatHeader( hdr, _rows, _cols, _type, _data, _step ); } + + CvMatrix( int rows, int cols, int type, CvMemStorage* storage, bool alloc_data=true ); + + CvMatrix( int _rows, int _cols, int _type, void* _data, int _step=CV_AUTOSTEP ) + { matrix = cvCreateMatHeader( _rows, _cols, _type ); + cvSetData( matrix, _data, _step ); } + + CvMatrix( CvMat* m ) + { matrix = m; } + + CvMatrix( const CvMatrix& m ) + { + matrix = m.matrix; + addref(); + } + + CvMatrix( const char* filename, const char* matname=0, int color=-1 ) : matrix(0) + { load( filename, matname, color ); } + + CvMatrix( CvFileStorage* fs, const char* mapname, const char* matname ) : matrix(0) + { read( fs, mapname, matname ); } + + CvMatrix( CvFileStorage* fs, const char* seqname, int idx ) : matrix(0) + { read( fs, seqname, idx ); } + + ~CvMatrix() + { + release(); + } + + CvMatrix clone() { return CvMatrix(matrix ? cvCloneMat(matrix) : 0); } + + void set( CvMat* m, bool add_ref ) + { + release(); + matrix = m; + if( add_ref ) + addref(); + } + + void create( int _rows, int _cols, int _type ) + { + if( !matrix || !matrix->refcount || + matrix->rows != _rows || matrix->cols != _cols || + CV_MAT_TYPE(matrix->type) != _type ) + set( cvCreateMat( _rows, _cols, _type ), false ); + } + + void addref() const + { + if( matrix ) + { + if( matrix->hdr_refcount ) + ++matrix->hdr_refcount; + else if( matrix->refcount ) + ++*matrix->refcount; + } + } + + void release() + { + if( matrix ) + { + if( matrix->hdr_refcount ) + { + if( --matrix->hdr_refcount == 0 ) + cvReleaseMat( &matrix ); + } + else if( matrix->refcount ) + { + if( --*matrix->refcount == 0 ) + cvFree( &matrix->refcount ); + } + matrix = 0; + } + } + + void clear() + { + release(); + } + + bool load( const char* filename, const char* matname=0, int color=-1 ); + bool read( CvFileStorage* fs, const char* mapname, const char* matname ); + bool read( CvFileStorage* fs, const char* seqname, int idx ); + void save( const char* filename, const char* matname, const int* params=0 ); + void write( CvFileStorage* fs, const char* matname ); + + void show( const char* window_name ); + + bool is_valid() { return matrix != 0; } + + int rows() const { return matrix ? matrix->rows : 0; } + int cols() const { return matrix ? matrix->cols : 0; } + + CvSize size() const + { + return !matrix ? cvSize(0,0) : cvSize(matrix->rows,matrix->cols); + } + + int type() const { return matrix ? CV_MAT_TYPE(matrix->type) : 0; } + int depth() const { return matrix ? CV_MAT_DEPTH(matrix->type) : 0; } + int channels() const { return matrix ? CV_MAT_CN(matrix->type) : 0; } + int pix_size() const { return matrix ? CV_ELEM_SIZE(matrix->type) : 0; } + + uchar* data() { return matrix ? matrix->data.ptr : 0; } + const uchar* data() const { return matrix ? matrix->data.ptr : 0; } + int step() const { return matrix ? matrix->step : 0; } + + void set_data( void* _data, int _step=CV_AUTOSTEP ) + { cvSetData( matrix, _data, _step ); } + + uchar* row(int i) { return !matrix ? 0 : matrix->data.ptr + i*matrix->step; } + const uchar* row(int i) const + { return !matrix ? 0 : matrix->data.ptr + i*matrix->step; } + + operator const CvMat* () const { return matrix; } + operator CvMat* () { return matrix; } + + CvMatrix& operator = (const CvMatrix& _m) + { + _m.addref(); + release(); + matrix = _m.matrix; + return *this; + } + +protected: + CvMat* matrix; +}; + +/****************************************************************************************\ + * CamShiftTracker * + \****************************************************************************************/ + +class CV_EXPORTS CvCamShiftTracker +{ +public: + + CvCamShiftTracker(); + virtual ~CvCamShiftTracker(); + + /**** Characteristics of the object that are calculated by track_object method *****/ + float get_orientation() const // orientation of the object in degrees + { return m_box.angle; } + float get_length() const // the larger linear size of the object + { return m_box.size.height; } + float get_width() const // the smaller linear size of the object + { return m_box.size.width; } + CvPoint2D32f get_center() const // center of the object + { return m_box.center; } + CvRect get_window() const // bounding rectangle for the object + { return m_comp.rect; } + + /*********************** Tracking parameters ************************/ + int get_threshold() const // thresholding value that applied to back project + { return m_threshold; } + + int get_hist_dims( int* dims = 0 ) const // returns number of histogram dimensions and sets + { return m_hist ? cvGetDims( m_hist->bins, dims ) : 0; } + + int get_min_ch_val( int channel ) const // get the minimum allowed value of the specified channel + { return m_min_ch_val[channel]; } + + int get_max_ch_val( int channel ) const // get the maximum allowed value of the specified channel + { return m_max_ch_val[channel]; } + + // set initial object rectangle (must be called before initial calculation of the histogram) + bool set_window( CvRect window) + { m_comp.rect = window; return true; } + + bool set_threshold( int threshold ) // threshold applied to the histogram bins + { m_threshold = threshold; return true; } + + bool set_hist_bin_range( int dim, int min_val, int max_val ); + + bool set_hist_dims( int c_dims, int* dims );// set the histogram parameters + + bool set_min_ch_val( int channel, int val ) // set the minimum allowed value of the specified channel + { m_min_ch_val[channel] = val; return true; } + bool set_max_ch_val( int channel, int val ) // set the maximum allowed value of the specified channel + { m_max_ch_val[channel] = val; return true; } + + /************************ The processing methods *********************************/ + // update object position + virtual bool track_object( const IplImage* cur_frame ); + + // update object histogram + virtual bool update_histogram( const IplImage* cur_frame ); + + // reset histogram + virtual void reset_histogram(); + + /************************ Retrieving internal data *******************************/ + // get back project image + virtual IplImage* get_back_project() + { return m_back_project; } + + float query( int* bin ) const + { return m_hist ? (float)cvGetRealND(m_hist->bins, bin) : 0.f; } + +protected: + + // internal method for color conversion: fills m_color_planes group + virtual void color_transform( const IplImage* img ); + + CvHistogram* m_hist; + + CvBox2D m_box; + CvConnectedComp m_comp; + + float m_hist_ranges_data[CV_MAX_DIM][2]; + float* m_hist_ranges[CV_MAX_DIM]; + + int m_min_ch_val[CV_MAX_DIM]; + int m_max_ch_val[CV_MAX_DIM]; + int m_threshold; + + IplImage* m_color_planes[CV_MAX_DIM]; + IplImage* m_back_project; + IplImage* m_temp; + IplImage* m_mask; +}; + +/****************************************************************************************\ +* Expectation - Maximization * +\****************************************************************************************/ +struct CV_EXPORTS_W_MAP CvEMParams +{ + CvEMParams(); + CvEMParams( int nclusters, int cov_mat_type=cv::EM::COV_MAT_DIAGONAL, + int start_step=cv::EM::START_AUTO_STEP, + CvTermCriteria term_crit=cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, FLT_EPSILON), + const CvMat* probs=0, const CvMat* weights=0, const CvMat* means=0, const CvMat** covs=0 ); + + CV_PROP_RW int nclusters; + CV_PROP_RW int cov_mat_type; + CV_PROP_RW int start_step; + const CvMat* probs; + const CvMat* weights; + const CvMat* means; + const CvMat** covs; + CV_PROP_RW CvTermCriteria term_crit; +}; + + +class CV_EXPORTS_W CvEM : public CvStatModel +{ +public: + // Type of covariation matrices + enum { COV_MAT_SPHERICAL=cv::EM::COV_MAT_SPHERICAL, + COV_MAT_DIAGONAL =cv::EM::COV_MAT_DIAGONAL, + COV_MAT_GENERIC =cv::EM::COV_MAT_GENERIC }; + + // The initial step + enum { START_E_STEP=cv::EM::START_E_STEP, + START_M_STEP=cv::EM::START_M_STEP, + START_AUTO_STEP=cv::EM::START_AUTO_STEP }; + + CV_WRAP CvEM(); + CvEM( const CvMat* samples, const CvMat* sampleIdx=0, + CvEMParams params=CvEMParams(), CvMat* labels=0 ); + + virtual ~CvEM(); + + virtual bool train( const CvMat* samples, const CvMat* sampleIdx=0, + CvEMParams params=CvEMParams(), CvMat* labels=0 ); + + virtual float predict( const CvMat* sample, CV_OUT CvMat* probs ) const; + + CV_WRAP CvEM( const cv::Mat& samples, const cv::Mat& sampleIdx=cv::Mat(), + CvEMParams params=CvEMParams() ); + + CV_WRAP virtual bool train( const cv::Mat& samples, + const cv::Mat& sampleIdx=cv::Mat(), + CvEMParams params=CvEMParams(), + CV_OUT cv::Mat* labels=0 ); + + CV_WRAP virtual float predict( const cv::Mat& sample, CV_OUT cv::Mat* probs=0 ) const; + CV_WRAP virtual double calcLikelihood( const cv::Mat &sample ) const; + + CV_WRAP int getNClusters() const; + CV_WRAP cv::Mat getMeans() const; + CV_WRAP void getCovs(CV_OUT std::vector& covs) const; + CV_WRAP cv::Mat getWeights() const; + CV_WRAP cv::Mat getProbs() const; + + CV_WRAP inline double getLikelihood() const { return emObj.isTrained() ? logLikelihood : DBL_MAX; } + + CV_WRAP virtual void clear(); + + int get_nclusters() const; + const CvMat* get_means() const; + const CvMat** get_covs() const; + const CvMat* get_weights() const; + const CvMat* get_probs() const; + + inline double get_log_likelihood() const { return getLikelihood(); } + + virtual void read( CvFileStorage* fs, CvFileNode* node ); + virtual void write( CvFileStorage* fs, const char* name ) const; + +protected: + void set_mat_hdrs(); + + cv::EM emObj; + cv::Mat probs; + double logLikelihood; + + CvMat meansHdr; + std::vector covsHdrs; + std::vector covsPtrs; + CvMat weightsHdr; + CvMat probsHdr; +}; + +namespace cv +{ + +typedef CvEMParams EMParams; +typedef CvEM ExpectationMaximization; + +/*! + The Patch Generator class + */ +class CV_EXPORTS PatchGenerator +{ +public: + PatchGenerator(); + PatchGenerator(double _backgroundMin, double _backgroundMax, + double _noiseRange, bool _randomBlur=true, + double _lambdaMin=0.6, double _lambdaMax=1.5, + double _thetaMin=-CV_PI, double _thetaMax=CV_PI, + double _phiMin=-CV_PI, double _phiMax=CV_PI ); + void operator()(const Mat& image, Point2f pt, Mat& patch, Size patchSize, RNG& rng) const; + void operator()(const Mat& image, const Mat& transform, Mat& patch, + Size patchSize, RNG& rng) const; + void warpWholeImage(const Mat& image, Mat& matT, Mat& buf, + CV_OUT Mat& warped, int border, RNG& rng) const; + void generateRandomTransform(Point2f srcCenter, Point2f dstCenter, + CV_OUT Mat& transform, RNG& rng, + bool inverse=false) const; + void setAffineParam(double lambda, double theta, double phi); + + double backgroundMin, backgroundMax; + double noiseRange; + bool randomBlur; + double lambdaMin, lambdaMax; + double thetaMin, thetaMax; + double phiMin, phiMax; +}; + + +class CV_EXPORTS LDetector +{ +public: + LDetector(); + LDetector(int _radius, int _threshold, int _nOctaves, + int _nViews, double _baseFeatureSize, double _clusteringDistance); + void operator()(const Mat& image, + CV_OUT vector& keypoints, + int maxCount=0, bool scaleCoords=true) const; + void operator()(const vector& pyr, + CV_OUT vector& keypoints, + int maxCount=0, bool scaleCoords=true) const; + void getMostStable2D(const Mat& image, CV_OUT vector& keypoints, + int maxCount, const PatchGenerator& patchGenerator) const; + void setVerbose(bool verbose); + + void read(const FileNode& node); + void write(FileStorage& fs, const String& name=String()) const; + + int radius; + int threshold; + int nOctaves; + int nViews; + bool verbose; + + double baseFeatureSize; + double clusteringDistance; +}; + +typedef LDetector YAPE; + +class CV_EXPORTS FernClassifier +{ +public: + FernClassifier(); + FernClassifier(const FileNode& node); + FernClassifier(const vector >& points, + const vector& refimgs, + const vector >& labels=vector >(), + int _nclasses=0, int _patchSize=PATCH_SIZE, + int _signatureSize=DEFAULT_SIGNATURE_SIZE, + int _nstructs=DEFAULT_STRUCTS, + int _structSize=DEFAULT_STRUCT_SIZE, + int _nviews=DEFAULT_VIEWS, + int _compressionMethod=COMPRESSION_NONE, + const PatchGenerator& patchGenerator=PatchGenerator()); + virtual ~FernClassifier(); + virtual void read(const FileNode& n); + virtual void write(FileStorage& fs, const String& name=String()) const; + virtual void trainFromSingleView(const Mat& image, + const vector& keypoints, + int _patchSize=PATCH_SIZE, + int _signatureSize=DEFAULT_SIGNATURE_SIZE, + int _nstructs=DEFAULT_STRUCTS, + int _structSize=DEFAULT_STRUCT_SIZE, + int _nviews=DEFAULT_VIEWS, + int _compressionMethod=COMPRESSION_NONE, + const PatchGenerator& patchGenerator=PatchGenerator()); + virtual void train(const vector >& points, + const vector& refimgs, + const vector >& labels=vector >(), + int _nclasses=0, int _patchSize=PATCH_SIZE, + int _signatureSize=DEFAULT_SIGNATURE_SIZE, + int _nstructs=DEFAULT_STRUCTS, + int _structSize=DEFAULT_STRUCT_SIZE, + int _nviews=DEFAULT_VIEWS, + int _compressionMethod=COMPRESSION_NONE, + const PatchGenerator& patchGenerator=PatchGenerator()); + virtual int operator()(const Mat& img, Point2f kpt, vector& signature) const; + virtual int operator()(const Mat& patch, vector& signature) const; + virtual void clear(); + virtual bool empty() const; + void setVerbose(bool verbose); + + int getClassCount() const; + int getStructCount() const; + int getStructSize() const; + int getSignatureSize() const; + int getCompressionMethod() const; + Size getPatchSize() const; + + struct Feature + { + uchar x1, y1, x2, y2; + Feature() : x1(0), y1(0), x2(0), y2(0) {} + Feature(int _x1, int _y1, int _x2, int _y2) + : x1((uchar)_x1), y1((uchar)_y1), x2((uchar)_x2), y2((uchar)_y2) + {} + template bool operator ()(const Mat_<_Tp>& patch) const + { return patch(y1,x1) > patch(y2, x2); } + }; + + enum + { + PATCH_SIZE = 31, + DEFAULT_STRUCTS = 50, + DEFAULT_STRUCT_SIZE = 9, + DEFAULT_VIEWS = 5000, + DEFAULT_SIGNATURE_SIZE = 176, + COMPRESSION_NONE = 0, + COMPRESSION_RANDOM_PROJ = 1, + COMPRESSION_PCA = 2, + DEFAULT_COMPRESSION_METHOD = COMPRESSION_NONE + }; + +protected: + virtual void prepare(int _nclasses, int _patchSize, int _signatureSize, + int _nstructs, int _structSize, + int _nviews, int _compressionMethod); + virtual void finalize(RNG& rng); + virtual int getLeaf(int fidx, const Mat& patch) const; + + bool verbose; + int nstructs; + int structSize; + int nclasses; + int signatureSize; + int compressionMethod; + int leavesPerStruct; + Size patchSize; + vector features; + vector classCounters; + vector posteriors; +}; + + +/****************************************************************************************\ + * Calonder Classifier * + \****************************************************************************************/ + +struct RTreeNode; + +struct CV_EXPORTS BaseKeypoint +{ + int x; + int y; + IplImage* image; + + BaseKeypoint() + : x(0), y(0), image(NULL) + {} + + BaseKeypoint(int _x, int _y, IplImage* _image) + : x(_x), y(_y), image(_image) + {} +}; + +class CV_EXPORTS RandomizedTree +{ +public: + friend class RTreeClassifier; + + static const uchar PATCH_SIZE = 32; + static const int DEFAULT_DEPTH = 9; + static const int DEFAULT_VIEWS = 5000; + static const size_t DEFAULT_REDUCED_NUM_DIM = 176; + static float GET_LOWER_QUANT_PERC() { return .03f; } + static float GET_UPPER_QUANT_PERC() { return .92f; } + + RandomizedTree(); + ~RandomizedTree(); + + void train(vector const& base_set, RNG &rng, + int depth, int views, size_t reduced_num_dim, int num_quant_bits); + void train(vector const& base_set, RNG &rng, + PatchGenerator &make_patch, int depth, int views, size_t reduced_num_dim, + int num_quant_bits); + + // following two funcs are EXPERIMENTAL (do not use unless you know exactly what you do) + static void quantizeVector(float *vec, int dim, int N, float bnds[2], int clamp_mode=0); + static void quantizeVector(float *src, int dim, int N, float bnds[2], uchar *dst); + + // patch_data must be a 32x32 array (no row padding) + float* getPosterior(uchar* patch_data); + const float* getPosterior(uchar* patch_data) const; + uchar* getPosterior2(uchar* patch_data); + const uchar* getPosterior2(uchar* patch_data) const; + + void read(const char* file_name, int num_quant_bits); + void read(std::istream &is, int num_quant_bits); + void write(const char* file_name) const; + void write(std::ostream &os) const; + + int classes() { return classes_; } + int depth() { return depth_; } + + //void setKeepFloatPosteriors(bool b) { keep_float_posteriors_ = b; } + void discardFloatPosteriors() { freePosteriors(1); } + + inline void applyQuantization(int num_quant_bits) { makePosteriors2(num_quant_bits); } + + // debug + void savePosteriors(std::string url, bool append=false); + void savePosteriors2(std::string url, bool append=false); + +private: + int classes_; + int depth_; + int num_leaves_; + vector nodes_; + float **posteriors_; // 16-bytes aligned posteriors + uchar **posteriors2_; // 16-bytes aligned posteriors + vector leaf_counts_; + + void createNodes(int num_nodes, RNG &rng); + void allocPosteriorsAligned(int num_leaves, int num_classes); + void freePosteriors(int which); // which: 1=posteriors_, 2=posteriors2_, 3=both + void init(int classes, int depth, RNG &rng); + void addExample(int class_id, uchar* patch_data); + void finalize(size_t reduced_num_dim, int num_quant_bits); + int getIndex(uchar* patch_data) const; + inline float* getPosteriorByIndex(int index); + inline const float* getPosteriorByIndex(int index) const; + inline uchar* getPosteriorByIndex2(int index); + inline const uchar* getPosteriorByIndex2(int index) const; + //void makeRandomMeasMatrix(float *cs_phi, PHI_DISTR_TYPE dt, size_t reduced_num_dim); + void convertPosteriorsToChar(); + void makePosteriors2(int num_quant_bits); + void compressLeaves(size_t reduced_num_dim); + void estimateQuantPercForPosteriors(float perc[2]); +}; + + +inline uchar* getData(IplImage* image) +{ + return reinterpret_cast(image->imageData); +} + +inline float* RandomizedTree::getPosteriorByIndex(int index) +{ + return const_cast(const_cast(this)->getPosteriorByIndex(index)); +} + +inline const float* RandomizedTree::getPosteriorByIndex(int index) const +{ + return posteriors_[index]; +} + +inline uchar* RandomizedTree::getPosteriorByIndex2(int index) +{ + return const_cast(const_cast(this)->getPosteriorByIndex2(index)); +} + +inline const uchar* RandomizedTree::getPosteriorByIndex2(int index) const +{ + return posteriors2_[index]; +} + +struct CV_EXPORTS RTreeNode +{ + short offset1, offset2; + + RTreeNode() {} + RTreeNode(uchar x1, uchar y1, uchar x2, uchar y2) + : offset1(y1*RandomizedTree::PATCH_SIZE + x1), + offset2(y2*RandomizedTree::PATCH_SIZE + x2) + {} + + //! Left child on 0, right child on 1 + inline bool operator() (uchar* patch_data) const + { + return patch_data[offset1] > patch_data[offset2]; + } +}; + +class CV_EXPORTS RTreeClassifier +{ +public: + static const int DEFAULT_TREES = 48; + static const size_t DEFAULT_NUM_QUANT_BITS = 4; + + RTreeClassifier(); + void train(vector const& base_set, + RNG &rng, + int num_trees = RTreeClassifier::DEFAULT_TREES, + int depth = RandomizedTree::DEFAULT_DEPTH, + int views = RandomizedTree::DEFAULT_VIEWS, + size_t reduced_num_dim = RandomizedTree::DEFAULT_REDUCED_NUM_DIM, + int num_quant_bits = DEFAULT_NUM_QUANT_BITS); + void train(vector const& base_set, + RNG &rng, + PatchGenerator &make_patch, + int num_trees = RTreeClassifier::DEFAULT_TREES, + int depth = RandomizedTree::DEFAULT_DEPTH, + int views = RandomizedTree::DEFAULT_VIEWS, + size_t reduced_num_dim = RandomizedTree::DEFAULT_REDUCED_NUM_DIM, + int num_quant_bits = DEFAULT_NUM_QUANT_BITS); + + // sig must point to a memory block of at least classes()*sizeof(float|uchar) bytes + void getSignature(IplImage *patch, uchar *sig) const; + void getSignature(IplImage *patch, float *sig) const; + void getSparseSignature(IplImage *patch, float *sig, float thresh) const; + // TODO: deprecated in favor of getSignature overload, remove + void getFloatSignature(IplImage *patch, float *sig) const { getSignature(patch, sig); } + + static int countNonZeroElements(float *vec, int n, double tol=1e-10); + static inline void safeSignatureAlloc(uchar **sig, int num_sig=1, int sig_len=176); + static inline uchar* safeSignatureAlloc(int num_sig=1, int sig_len=176); + + inline int classes() const { return classes_; } + inline int original_num_classes() const { return original_num_classes_; } + + void setQuantization(int num_quant_bits); + void discardFloatPosteriors(); + + void read(const char* file_name); + void read(std::istream &is); + void write(const char* file_name) const; + void write(std::ostream &os) const; + + // experimental and debug + void saveAllFloatPosteriors(std::string file_url); + void saveAllBytePosteriors(std::string file_url); + void setFloatPosteriorsFromTextfile_176(std::string url); + float countZeroElements(); + + vector trees_; + +private: + int classes_; + int num_quant_bits_; + mutable uchar **posteriors_; + mutable unsigned short *ptemp_; + int original_num_classes_; + bool keep_floats_; +}; + +/****************************************************************************************\ +* One-Way Descriptor * +\****************************************************************************************/ + +// CvAffinePose: defines a parameterized affine transformation of an image patch. +// An image patch is rotated on angle phi (in degrees), then scaled lambda1 times +// along horizontal and lambda2 times along vertical direction, and then rotated again +// on angle (theta - phi). +class CV_EXPORTS CvAffinePose +{ +public: + float phi; + float theta; + float lambda1; + float lambda2; +}; + +class CV_EXPORTS OneWayDescriptor +{ +public: + OneWayDescriptor(); + ~OneWayDescriptor(); + + // allocates memory for given descriptor parameters + void Allocate(int pose_count, CvSize size, int nChannels); + + // GenerateSamples: generates affine transformed patches with averaging them over small transformation variations. + // If external poses and transforms were specified, uses them instead of generating random ones + // - pose_count: the number of poses to be generated + // - frontal: the input patch (can be a roi in a larger image) + // - norm: if nonzero, normalizes the output patch so that the sum of pixel intensities is 1 + void GenerateSamples(int pose_count, IplImage* frontal, int norm = 0); + + // GenerateSamplesFast: generates affine transformed patches with averaging them over small transformation variations. + // Uses precalculated transformed pca components. + // - frontal: the input patch (can be a roi in a larger image) + // - pca_hr_avg: pca average vector + // - pca_hr_eigenvectors: pca eigenvectors + // - pca_descriptors: an array of precomputed descriptors of pca components containing their affine transformations + // pca_descriptors[0] corresponds to the average, pca_descriptors[1]-pca_descriptors[pca_dim] correspond to eigenvectors + void GenerateSamplesFast(IplImage* frontal, CvMat* pca_hr_avg, + CvMat* pca_hr_eigenvectors, OneWayDescriptor* pca_descriptors); + + // sets the poses and corresponding transforms + void SetTransforms(CvAffinePose* poses, CvMat** transforms); + + // Initialize: builds a descriptor. + // - pose_count: the number of poses to build. If poses were set externally, uses them rather than generating random ones + // - frontal: input patch. Can be a roi in a larger image + // - feature_name: the feature name to be associated with the descriptor + // - norm: if 1, the affine transformed patches are normalized so that their sum is 1 + void Initialize(int pose_count, IplImage* frontal, const char* feature_name = 0, int norm = 0); + + // InitializeFast: builds a descriptor using precomputed descriptors of pca components + // - pose_count: the number of poses to build + // - frontal: input patch. Can be a roi in a larger image + // - feature_name: the feature name to be associated with the descriptor + // - pca_hr_avg: average vector for PCA + // - pca_hr_eigenvectors: PCA eigenvectors (one vector per row) + // - pca_descriptors: precomputed descriptors of PCA components, the first descriptor for the average vector + // followed by the descriptors for eigenvectors + void InitializeFast(int pose_count, IplImage* frontal, const char* feature_name, + CvMat* pca_hr_avg, CvMat* pca_hr_eigenvectors, OneWayDescriptor* pca_descriptors); + + // ProjectPCASample: unwarps an image patch into a vector and projects it into PCA space + // - patch: input image patch + // - avg: PCA average vector + // - eigenvectors: PCA eigenvectors, one per row + // - pca_coeffs: output PCA coefficients + void ProjectPCASample(IplImage* patch, CvMat* avg, CvMat* eigenvectors, CvMat* pca_coeffs) const; + + // InitializePCACoeffs: projects all warped patches into PCA space + // - avg: PCA average vector + // - eigenvectors: PCA eigenvectors, one per row + void InitializePCACoeffs(CvMat* avg, CvMat* eigenvectors); + + // EstimatePose: finds the closest match between an input patch and a set of patches with different poses + // - patch: input image patch + // - pose_idx: the output index of the closest pose + // - distance: the distance to the closest pose (L2 distance) + void EstimatePose(IplImage* patch, int& pose_idx, float& distance) const; + + // EstimatePosePCA: finds the closest match between an input patch and a set of patches with different poses. + // The distance between patches is computed in PCA space + // - patch: input image patch + // - pose_idx: the output index of the closest pose + // - distance: distance to the closest pose (L2 distance in PCA space) + // - avg: PCA average vector. If 0, matching without PCA is used + // - eigenvectors: PCA eigenvectors, one per row + void EstimatePosePCA(CvArr* patch, int& pose_idx, float& distance, CvMat* avg, CvMat* eigenvalues) const; + + // GetPatchSize: returns the size of each image patch after warping (2 times smaller than the input patch) + CvSize GetPatchSize() const + { + return m_patch_size; + } + + // GetInputPatchSize: returns the required size of the patch that the descriptor is built from + // (2 time larger than the patch after warping) + CvSize GetInputPatchSize() const + { + return cvSize(m_patch_size.width*2, m_patch_size.height*2); + } + + // GetPatch: returns a patch corresponding to specified pose index + // - index: pose index + // - return value: the patch corresponding to specified pose index + IplImage* GetPatch(int index); + + // GetPose: returns a pose corresponding to specified pose index + // - index: pose index + // - return value: the pose corresponding to specified pose index + CvAffinePose GetPose(int index) const; + + // Save: saves all patches with different poses to a specified path + void Save(const char* path); + + // ReadByName: reads a descriptor from a file storage + // - fs: file storage + // - parent: parent node + // - name: node name + // - return value: 1 if succeeded, 0 otherwise + int ReadByName(CvFileStorage* fs, CvFileNode* parent, const char* name); + + // ReadByName: reads a descriptor from a file node + // - parent: parent node + // - name: node name + // - return value: 1 if succeeded, 0 otherwise + int ReadByName(const FileNode &parent, const char* name); + + // Write: writes a descriptor into a file storage + // - fs: file storage + // - name: node name + void Write(CvFileStorage* fs, const char* name); + + // GetFeatureName: returns a name corresponding to a feature + const char* GetFeatureName() const; + + // GetCenter: returns the center of the feature + CvPoint GetCenter() const; + + void SetPCADimHigh(int pca_dim_high) {m_pca_dim_high = pca_dim_high;}; + void SetPCADimLow(int pca_dim_low) {m_pca_dim_low = pca_dim_low;}; + + int GetPCADimLow() const; + int GetPCADimHigh() const; + + CvMat** GetPCACoeffs() const {return m_pca_coeffs;} + +protected: + int m_pose_count; // the number of poses + CvSize m_patch_size; // size of each image + IplImage** m_samples; // an array of length m_pose_count containing the patch in different poses + IplImage* m_input_patch; + IplImage* m_train_patch; + CvMat** m_pca_coeffs; // an array of length m_pose_count containing pca decomposition of the patch in different poses + CvAffinePose* m_affine_poses; // an array of poses + CvMat** m_transforms; // an array of affine transforms corresponding to poses + + string m_feature_name; // the name of the feature associated with the descriptor + CvPoint m_center; // the coordinates of the feature (the center of the input image ROI) + + int m_pca_dim_high; // the number of descriptor pca components to use for generating affine poses + int m_pca_dim_low; // the number of pca components to use for comparison +}; + + +// OneWayDescriptorBase: encapsulates functionality for training/loading a set of one way descriptors +// and finding the nearest closest descriptor to an input feature +class CV_EXPORTS OneWayDescriptorBase +{ +public: + + // creates an instance of OneWayDescriptor from a set of training files + // - patch_size: size of the input (large) patch + // - pose_count: the number of poses to generate for each descriptor + // - train_path: path to training files + // - pca_config: the name of the file that contains PCA for small patches (2 times smaller + // than patch_size each dimension + // - pca_hr_config: the name of the file that contains PCA for large patches (of patch_size size) + // - pca_desc_config: the name of the file that contains descriptors of PCA components + OneWayDescriptorBase(CvSize patch_size, int pose_count, const char* train_path = 0, const char* pca_config = 0, + const char* pca_hr_config = 0, const char* pca_desc_config = 0, int pyr_levels = 1, + int pca_dim_high = 100, int pca_dim_low = 100); + + OneWayDescriptorBase(CvSize patch_size, int pose_count, const string &pca_filename, const string &train_path = string(), const string &images_list = string(), + float _scale_min = 0.7f, float _scale_max=1.5f, float _scale_step=1.2f, int pyr_levels = 1, + int pca_dim_high = 100, int pca_dim_low = 100); + + + virtual ~OneWayDescriptorBase(); + void clear (); + + + // Allocate: allocates memory for a given number of descriptors + void Allocate(int train_feature_count); + + // AllocatePCADescriptors: allocates memory for pca descriptors + void AllocatePCADescriptors(); + + // returns patch size + CvSize GetPatchSize() const {return m_patch_size;}; + // returns the number of poses for each descriptor + int GetPoseCount() const {return m_pose_count;}; + + // returns the number of pyramid levels + int GetPyrLevels() const {return m_pyr_levels;}; + + // returns the number of descriptors + int GetDescriptorCount() const {return m_train_feature_count;}; + + // CreateDescriptorsFromImage: creates descriptors for each of the input features + // - src: input image + // - features: input features + // - pyr_levels: the number of pyramid levels + void CreateDescriptorsFromImage(IplImage* src, const vector& features); + + // CreatePCADescriptors: generates descriptors for PCA components, needed for fast generation of feature descriptors + void CreatePCADescriptors(); + + // returns a feature descriptor by feature index + const OneWayDescriptor* GetDescriptor(int desc_idx) const {return &m_descriptors[desc_idx];}; + + // FindDescriptor: finds the closest descriptor + // - patch: input image patch + // - desc_idx: output index of the closest descriptor to the input patch + // - pose_idx: output index of the closest pose of the closest descriptor to the input patch + // - distance: distance from the input patch to the closest feature pose + // - _scales: scales of the input patch for each descriptor + // - scale_ranges: input scales variation (float[2]) + void FindDescriptor(IplImage* patch, int& desc_idx, int& pose_idx, float& distance, float* _scale = 0, float* scale_ranges = 0) const; + + // - patch: input image patch + // - n: number of the closest indexes + // - desc_idxs: output indexes of the closest descriptor to the input patch (n) + // - pose_idx: output indexes of the closest pose of the closest descriptor to the input patch (n) + // - distances: distance from the input patch to the closest feature pose (n) + // - _scales: scales of the input patch + // - scale_ranges: input scales variation (float[2]) + void FindDescriptor(IplImage* patch, int n, vector& desc_idxs, vector& pose_idxs, + vector& distances, vector& _scales, float* scale_ranges = 0) const; + + // FindDescriptor: finds the closest descriptor + // - src: input image + // - pt: center of the feature + // - desc_idx: output index of the closest descriptor to the input patch + // - pose_idx: output index of the closest pose of the closest descriptor to the input patch + // - distance: distance from the input patch to the closest feature pose + void FindDescriptor(IplImage* src, cv::Point2f pt, int& desc_idx, int& pose_idx, float& distance) const; + + // InitializePoses: generates random poses + void InitializePoses(); + + // InitializeTransformsFromPoses: generates 2x3 affine matrices from poses (initializes m_transforms) + void InitializeTransformsFromPoses(); + + // InitializePoseTransforms: subsequently calls InitializePoses and InitializeTransformsFromPoses + void InitializePoseTransforms(); + + // InitializeDescriptor: initializes a descriptor + // - desc_idx: descriptor index + // - train_image: image patch (ROI is supported) + // - feature_label: feature textual label + void InitializeDescriptor(int desc_idx, IplImage* train_image, const char* feature_label); + + void InitializeDescriptor(int desc_idx, IplImage* train_image, const KeyPoint& keypoint, const char* feature_label); + + // InitializeDescriptors: load features from an image and create descriptors for each of them + void InitializeDescriptors(IplImage* train_image, const vector& features, + const char* feature_label = "", int desc_start_idx = 0); + + // Write: writes this object to a file storage + // - fs: output filestorage + void Write (FileStorage &fs) const; + + // Read: reads OneWayDescriptorBase object from a file node + // - fn: input file node + void Read (const FileNode &fn); + + // LoadPCADescriptors: loads PCA descriptors from a file + // - filename: input filename + int LoadPCADescriptors(const char* filename); + + // LoadPCADescriptors: loads PCA descriptors from a file node + // - fn: input file node + int LoadPCADescriptors(const FileNode &fn); + + // SavePCADescriptors: saves PCA descriptors to a file + // - filename: output filename + void SavePCADescriptors(const char* filename); + + // SavePCADescriptors: saves PCA descriptors to a file storage + // - fs: output file storage + void SavePCADescriptors(CvFileStorage* fs) const; + + // GeneratePCA: calculate and save PCA components and descriptors + // - img_path: path to training PCA images directory + // - images_list: filename with filenames of training PCA images + void GeneratePCA(const char* img_path, const char* images_list, int pose_count=500); + + // SetPCAHigh: sets the high resolution pca matrices (copied to internal structures) + void SetPCAHigh(CvMat* avg, CvMat* eigenvectors); + + // SetPCALow: sets the low resolution pca matrices (copied to internal structures) + void SetPCALow(CvMat* avg, CvMat* eigenvectors); + + int GetLowPCA(CvMat** avg, CvMat** eigenvectors) + { + *avg = m_pca_avg; + *eigenvectors = m_pca_eigenvectors; + return m_pca_dim_low; + }; + + int GetPCADimLow() const {return m_pca_dim_low;}; + int GetPCADimHigh() const {return m_pca_dim_high;}; + + void ConvertDescriptorsArrayToTree(); // Converting pca_descriptors array to KD tree + + // GetPCAFilename: get default PCA filename + static string GetPCAFilename () { return "pca.yml"; } + + virtual bool empty() const { return m_train_feature_count <= 0 ? true : false; } + +protected: + CvSize m_patch_size; // patch size + int m_pose_count; // the number of poses for each descriptor + int m_train_feature_count; // the number of the training features + OneWayDescriptor* m_descriptors; // array of train feature descriptors + CvMat* m_pca_avg; // PCA average Vector for small patches + CvMat* m_pca_eigenvectors; // PCA eigenvectors for small patches + CvMat* m_pca_hr_avg; // PCA average Vector for large patches + CvMat* m_pca_hr_eigenvectors; // PCA eigenvectors for large patches + OneWayDescriptor* m_pca_descriptors; // an array of PCA descriptors + + cv::flann::Index* m_pca_descriptors_tree; + CvMat* m_pca_descriptors_matrix; + + CvAffinePose* m_poses; // array of poses + CvMat** m_transforms; // array of affine transformations corresponding to poses + + int m_pca_dim_high; + int m_pca_dim_low; + + int m_pyr_levels; + float scale_min; + float scale_max; + float scale_step; + + // SavePCAall: saves PCA components and descriptors to a file storage + // - fs: output file storage + void SavePCAall (FileStorage &fs) const; + + // LoadPCAall: loads PCA components and descriptors from a file node + // - fn: input file node + void LoadPCAall (const FileNode &fn); +}; + +class CV_EXPORTS OneWayDescriptorObject : public OneWayDescriptorBase +{ +public: + // creates an instance of OneWayDescriptorObject from a set of training files + // - patch_size: size of the input (large) patch + // - pose_count: the number of poses to generate for each descriptor + // - train_path: path to training files + // - pca_config: the name of the file that contains PCA for small patches (2 times smaller + // than patch_size each dimension + // - pca_hr_config: the name of the file that contains PCA for large patches (of patch_size size) + // - pca_desc_config: the name of the file that contains descriptors of PCA components + OneWayDescriptorObject(CvSize patch_size, int pose_count, const char* train_path, const char* pca_config, + const char* pca_hr_config = 0, const char* pca_desc_config = 0, int pyr_levels = 1); + + OneWayDescriptorObject(CvSize patch_size, int pose_count, const string &pca_filename, + const string &train_path = string (), const string &images_list = string (), + float _scale_min = 0.7f, float _scale_max=1.5f, float _scale_step=1.2f, int pyr_levels = 1); + + + virtual ~OneWayDescriptorObject(); + + // Allocate: allocates memory for a given number of features + // - train_feature_count: the total number of features + // - object_feature_count: the number of features extracted from the object + void Allocate(int train_feature_count, int object_feature_count); + + + void SetLabeledFeatures(const vector& features) {m_train_features = features;}; + vector& GetLabeledFeatures() {return m_train_features;}; + const vector& GetLabeledFeatures() const {return m_train_features;}; + vector _GetLabeledFeatures() const; + + // IsDescriptorObject: returns 1 if descriptor with specified index is positive, otherwise 0 + int IsDescriptorObject(int desc_idx) const; + + // MatchPointToPart: returns the part number of a feature if it matches one of the object parts, otherwise -1 + int MatchPointToPart(CvPoint pt) const; + + // GetDescriptorPart: returns the part number of the feature corresponding to a specified descriptor + // - desc_idx: descriptor index + int GetDescriptorPart(int desc_idx) const; + + + void InitializeObjectDescriptors(IplImage* train_image, const vector& features, + const char* feature_label, int desc_start_idx = 0, float scale = 1.0f, + int is_background = 0); + + // GetObjectFeatureCount: returns the number of object features + int GetObjectFeatureCount() const {return m_object_feature_count;}; + +protected: + int* m_part_id; // contains part id for each of object descriptors + vector m_train_features; // train features + int m_object_feature_count; // the number of the positive features + +}; + + +/* + * OneWayDescriptorMatcher + */ +class OneWayDescriptorMatcher; +typedef OneWayDescriptorMatcher OneWayDescriptorMatch; + +class CV_EXPORTS OneWayDescriptorMatcher : public GenericDescriptorMatcher +{ +public: + class CV_EXPORTS Params + { + public: + static const int POSE_COUNT = 500; + static const int PATCH_WIDTH = 24; + static const int PATCH_HEIGHT = 24; + static float GET_MIN_SCALE() { return 0.7f; } + static float GET_MAX_SCALE() { return 1.5f; } + static float GET_STEP_SCALE() { return 1.2f; } + + Params( int poseCount = POSE_COUNT, + Size patchSize = Size(PATCH_WIDTH, PATCH_HEIGHT), + string pcaFilename = string(), + string trainPath = string(), string trainImagesList = string(), + float minScale = GET_MIN_SCALE(), float maxScale = GET_MAX_SCALE(), + float stepScale = GET_STEP_SCALE() ); + + int poseCount; + Size patchSize; + string pcaFilename; + string trainPath; + string trainImagesList; + + float minScale, maxScale, stepScale; + }; + + OneWayDescriptorMatcher( const Params& params=Params() ); + virtual ~OneWayDescriptorMatcher(); + + void initialize( const Params& params, const Ptr& base=Ptr() ); + + // Clears keypoints storing in collection and OneWayDescriptorBase + virtual void clear(); + + virtual void train(); + + virtual bool isMaskSupported(); + + virtual void read( const FileNode &fn ); + virtual void write( FileStorage& fs ) const; + + virtual bool empty() const; + + virtual Ptr clone( bool emptyTrainData=false ) const; + +protected: + // Matches a set of keypoints from a single image of the training set. A rectangle with a center in a keypoint + // and size (patch_width/2*scale, patch_height/2*scale) is cropped from the source image for each + // keypoint. scale is iterated from DescriptorOneWayParams::min_scale to DescriptorOneWayParams::max_scale. + // The minimum distance to each training patch with all its affine poses is found over all scales. + // The class ID of a match is returned for each keypoint. The distance is calculated over PCA components + // loaded with DescriptorOneWay::Initialize, kd tree is used for finding minimum distances. + virtual void knnMatchImpl( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, int k, + const vector& masks, bool compactResult ); + virtual void radiusMatchImpl( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, float maxDistance, + const vector& masks, bool compactResult ); + + Ptr base; + Params params; + int prevTrainCount; +}; + +/* + * FernDescriptorMatcher + */ +class FernDescriptorMatcher; +typedef FernDescriptorMatcher FernDescriptorMatch; + +class CV_EXPORTS FernDescriptorMatcher : public GenericDescriptorMatcher +{ +public: + class CV_EXPORTS Params + { + public: + Params( int nclasses=0, + int patchSize=FernClassifier::PATCH_SIZE, + int signatureSize=FernClassifier::DEFAULT_SIGNATURE_SIZE, + int nstructs=FernClassifier::DEFAULT_STRUCTS, + int structSize=FernClassifier::DEFAULT_STRUCT_SIZE, + int nviews=FernClassifier::DEFAULT_VIEWS, + int compressionMethod=FernClassifier::COMPRESSION_NONE, + const PatchGenerator& patchGenerator=PatchGenerator() ); + + Params( const string& filename ); + + int nclasses; + int patchSize; + int signatureSize; + int nstructs; + int structSize; + int nviews; + int compressionMethod; + PatchGenerator patchGenerator; + + string filename; + }; + + FernDescriptorMatcher( const Params& params=Params() ); + virtual ~FernDescriptorMatcher(); + + virtual void clear(); + + virtual void train(); + + virtual bool isMaskSupported(); + + virtual void read( const FileNode &fn ); + virtual void write( FileStorage& fs ) const; + virtual bool empty() const; + + virtual Ptr clone( bool emptyTrainData=false ) const; + +protected: + virtual void knnMatchImpl( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, int k, + const vector& masks, bool compactResult ); + virtual void radiusMatchImpl( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, float maxDistance, + const vector& masks, bool compactResult ); + + void trainFernClassifier(); + void calcBestProbAndMatchIdx( const Mat& image, const Point2f& pt, + float& bestProb, int& bestMatchIdx, vector& signature ); + Ptr classifier; + Params params; + int prevTrainCount; +}; + + +/* + * CalonderDescriptorExtractor + */ +template +class CV_EXPORTS CalonderDescriptorExtractor : public DescriptorExtractor +{ +public: + CalonderDescriptorExtractor( const string& classifierFile ); + + virtual void read( const FileNode &fn ); + virtual void write( FileStorage &fs ) const; + + virtual int descriptorSize() const { return classifier_.classes(); } + virtual int descriptorType() const { return DataType::type; } + + virtual bool empty() const; + +protected: + virtual void computeImpl( const Mat& image, vector& keypoints, Mat& descriptors ) const; + + RTreeClassifier classifier_; + static const int BORDER_SIZE = 16; +}; + +template +CalonderDescriptorExtractor::CalonderDescriptorExtractor(const std::string& classifier_file) +{ + classifier_.read( classifier_file.c_str() ); +} + +template +void CalonderDescriptorExtractor::computeImpl( const Mat& image, + vector& keypoints, + Mat& descriptors) const +{ + // Cannot compute descriptors for keypoints on the image border. + KeyPointsFilter::runByImageBorder(keypoints, image.size(), BORDER_SIZE); + + /// @todo Check 16-byte aligned + descriptors.create((int)keypoints.size(), classifier_.classes(), cv::DataType::type); + + int patchSize = RandomizedTree::PATCH_SIZE; + int offset = patchSize / 2; + for (size_t i = 0; i < keypoints.size(); ++i) + { + cv::Point2f pt = keypoints[i].pt; + IplImage ipl = image( Rect((int)(pt.x - offset), (int)(pt.y - offset), patchSize, patchSize) ); + classifier_.getSignature( &ipl, descriptors.ptr((int)i)); + } +} + +template +void CalonderDescriptorExtractor::read( const FileNode& ) +{} + +template +void CalonderDescriptorExtractor::write( FileStorage& ) const +{} + +template +bool CalonderDescriptorExtractor::empty() const +{ + return classifier_.trees_.empty(); +} + + +////////////////////// Brute Force Matcher ////////////////////////// + +template +class CV_EXPORTS BruteForceMatcher : public BFMatcher +{ +public: + BruteForceMatcher( Distance d = Distance() ) : BFMatcher(Distance::normType, false) {(void)d;} + virtual ~BruteForceMatcher() {} +}; + + +/****************************************************************************************\ +* Planar Object Detection * +\****************************************************************************************/ + +class CV_EXPORTS PlanarObjectDetector +{ +public: + PlanarObjectDetector(); + PlanarObjectDetector(const FileNode& node); + PlanarObjectDetector(const vector& pyr, int _npoints=300, + int _patchSize=FernClassifier::PATCH_SIZE, + int _nstructs=FernClassifier::DEFAULT_STRUCTS, + int _structSize=FernClassifier::DEFAULT_STRUCT_SIZE, + int _nviews=FernClassifier::DEFAULT_VIEWS, + const LDetector& detector=LDetector(), + const PatchGenerator& patchGenerator=PatchGenerator()); + virtual ~PlanarObjectDetector(); + virtual void train(const vector& pyr, int _npoints=300, + int _patchSize=FernClassifier::PATCH_SIZE, + int _nstructs=FernClassifier::DEFAULT_STRUCTS, + int _structSize=FernClassifier::DEFAULT_STRUCT_SIZE, + int _nviews=FernClassifier::DEFAULT_VIEWS, + const LDetector& detector=LDetector(), + const PatchGenerator& patchGenerator=PatchGenerator()); + virtual void train(const vector& pyr, const vector& keypoints, + int _patchSize=FernClassifier::PATCH_SIZE, + int _nstructs=FernClassifier::DEFAULT_STRUCTS, + int _structSize=FernClassifier::DEFAULT_STRUCT_SIZE, + int _nviews=FernClassifier::DEFAULT_VIEWS, + const LDetector& detector=LDetector(), + const PatchGenerator& patchGenerator=PatchGenerator()); + Rect getModelROI() const; + vector getModelPoints() const; + const LDetector& getDetector() const; + const FernClassifier& getClassifier() const; + void setVerbose(bool verbose); + + void read(const FileNode& node); + void write(FileStorage& fs, const String& name=String()) const; + bool operator()(const Mat& image, CV_OUT Mat& H, CV_OUT vector& corners) const; + bool operator()(const vector& pyr, const vector& keypoints, + CV_OUT Mat& H, CV_OUT vector& corners, + CV_OUT vector* pairs=0) const; + +protected: + bool verbose; + Rect modelROI; + vector modelPoints; + LDetector ldetector; + FernClassifier fernClassifier; +}; + +} + +// 2009-01-12, Xavier Delacour + +struct lsh_hash { + int h1, h2; +}; + +struct CvLSHOperations +{ + virtual ~CvLSHOperations() {} + + virtual int vector_add(const void* data) = 0; + virtual void vector_remove(int i) = 0; + virtual const void* vector_lookup(int i) = 0; + virtual void vector_reserve(int n) = 0; + virtual unsigned int vector_count() = 0; + + virtual void hash_insert(lsh_hash h, int l, int i) = 0; + virtual void hash_remove(lsh_hash h, int l, int i) = 0; + virtual int hash_lookup(lsh_hash h, int l, int* ret_i, int ret_i_max) = 0; +}; + +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* Splits color or grayscale image into multiple connected components + of nearly the same color/brightness using modification of Burt algorithm. + comp with contain a pointer to sequence (CvSeq) + of connected components (CvConnectedComp) */ +CVAPI(void) cvPyrSegmentation( IplImage* src, IplImage* dst, + CvMemStorage* storage, CvSeq** comp, + int level, double threshold1, + double threshold2 ); + +/****************************************************************************************\ +* Planar subdivisions * +\****************************************************************************************/ + +/* Initializes Delaunay triangulation */ +CVAPI(void) cvInitSubdivDelaunay2D( CvSubdiv2D* subdiv, CvRect rect ); + +/* Creates new subdivision */ +CVAPI(CvSubdiv2D*) cvCreateSubdiv2D( int subdiv_type, int header_size, + int vtx_size, int quadedge_size, + CvMemStorage* storage ); + +/************************* high-level subdivision functions ***************************/ + +/* Simplified Delaunay diagram creation */ +CV_INLINE CvSubdiv2D* cvCreateSubdivDelaunay2D( CvRect rect, CvMemStorage* storage ) +{ + CvSubdiv2D* subdiv = cvCreateSubdiv2D( CV_SEQ_KIND_SUBDIV2D, sizeof(*subdiv), + sizeof(CvSubdiv2DPoint), sizeof(CvQuadEdge2D), storage ); + + cvInitSubdivDelaunay2D( subdiv, rect ); + return subdiv; +} + + +/* Inserts new point to the Delaunay triangulation */ +CVAPI(CvSubdiv2DPoint*) cvSubdivDelaunay2DInsert( CvSubdiv2D* subdiv, CvPoint2D32f pt); + +/* Locates a point within the Delaunay triangulation (finds the edge + the point is left to or belongs to, or the triangulation point the given + point coinsides with */ +CVAPI(CvSubdiv2DPointLocation) cvSubdiv2DLocate( + CvSubdiv2D* subdiv, CvPoint2D32f pt, + CvSubdiv2DEdge* edge, + CvSubdiv2DPoint** vertex CV_DEFAULT(NULL) ); + +/* Calculates Voronoi tesselation (i.e. coordinates of Voronoi points) */ +CVAPI(void) cvCalcSubdivVoronoi2D( CvSubdiv2D* subdiv ); + + +/* Removes all Voronoi points from the tesselation */ +CVAPI(void) cvClearSubdivVoronoi2D( CvSubdiv2D* subdiv ); + + +/* Finds the nearest to the given point vertex in subdivision. */ +CVAPI(CvSubdiv2DPoint*) cvFindNearestPoint2D( CvSubdiv2D* subdiv, CvPoint2D32f pt ); + + +/************ Basic quad-edge navigation and operations ************/ + +CV_INLINE CvSubdiv2DEdge cvSubdiv2DNextEdge( CvSubdiv2DEdge edge ) +{ + return CV_SUBDIV2D_NEXT_EDGE(edge); +} + + +CV_INLINE CvSubdiv2DEdge cvSubdiv2DRotateEdge( CvSubdiv2DEdge edge, int rotate ) +{ + return (edge & ~3) + ((edge + rotate) & 3); +} + +CV_INLINE CvSubdiv2DEdge cvSubdiv2DSymEdge( CvSubdiv2DEdge edge ) +{ + return edge ^ 2; +} + +CV_INLINE CvSubdiv2DEdge cvSubdiv2DGetEdge( CvSubdiv2DEdge edge, CvNextEdgeType type ) +{ + CvQuadEdge2D* e = (CvQuadEdge2D*)(edge & ~3); + edge = e->next[(edge + (int)type) & 3]; + return (edge & ~3) + ((edge + ((int)type >> 4)) & 3); +} + + +CV_INLINE CvSubdiv2DPoint* cvSubdiv2DEdgeOrg( CvSubdiv2DEdge edge ) +{ + CvQuadEdge2D* e = (CvQuadEdge2D*)(edge & ~3); + return (CvSubdiv2DPoint*)e->pt[edge & 3]; +} + + +CV_INLINE CvSubdiv2DPoint* cvSubdiv2DEdgeDst( CvSubdiv2DEdge edge ) +{ + CvQuadEdge2D* e = (CvQuadEdge2D*)(edge & ~3); + return (CvSubdiv2DPoint*)e->pt[(edge + 2) & 3]; +} + +/****************************************************************************************\ +* Additional operations on Subdivisions * +\****************************************************************************************/ + +// paints voronoi diagram: just demo function +CVAPI(void) icvDrawMosaic( CvSubdiv2D* subdiv, IplImage* src, IplImage* dst ); + +// checks planar subdivision for correctness. It is not an absolute check, +// but it verifies some relations between quad-edges +CVAPI(int) icvSubdiv2DCheck( CvSubdiv2D* subdiv ); + +// returns squared distance between two 2D points with floating-point coordinates. +CV_INLINE double icvSqDist2D32f( CvPoint2D32f pt1, CvPoint2D32f pt2 ) +{ + double dx = pt1.x - pt2.x; + double dy = pt1.y - pt2.y; + + return dx*dx + dy*dy; +} + + + + +CV_INLINE double cvTriangleArea( CvPoint2D32f a, CvPoint2D32f b, CvPoint2D32f c ) +{ + return ((double)b.x - a.x) * ((double)c.y - a.y) - ((double)b.y - a.y) * ((double)c.x - a.x); +} + + +/* Constructs kd-tree from set of feature descriptors */ +CVAPI(struct CvFeatureTree*) cvCreateKDTree(CvMat* desc); + +/* Constructs spill-tree from set of feature descriptors */ +CVAPI(struct CvFeatureTree*) cvCreateSpillTree( const CvMat* raw_data, + const int naive CV_DEFAULT(50), + const double rho CV_DEFAULT(.7), + const double tau CV_DEFAULT(.1) ); + +/* Release feature tree */ +CVAPI(void) cvReleaseFeatureTree(struct CvFeatureTree* tr); + +/* Searches feature tree for k nearest neighbors of given reference points, + searching (in case of kd-tree/bbf) at most emax leaves. */ +CVAPI(void) cvFindFeatures(struct CvFeatureTree* tr, const CvMat* query_points, + CvMat* indices, CvMat* dist, int k, int emax CV_DEFAULT(20)); + +/* Search feature tree for all points that are inlier to given rect region. + Only implemented for kd trees */ +CVAPI(int) cvFindFeaturesBoxed(struct CvFeatureTree* tr, + CvMat* bounds_min, CvMat* bounds_max, + CvMat* out_indices); + + +/* Construct a Locality Sensitive Hash (LSH) table, for indexing d-dimensional vectors of + given type. Vectors will be hashed L times with k-dimensional p-stable (p=2) functions. */ +CVAPI(struct CvLSH*) cvCreateLSH(struct CvLSHOperations* ops, int d, + int L CV_DEFAULT(10), int k CV_DEFAULT(10), + int type CV_DEFAULT(CV_64FC1), double r CV_DEFAULT(4), + int64 seed CV_DEFAULT(-1)); + +/* Construct in-memory LSH table, with n bins. */ +CVAPI(struct CvLSH*) cvCreateMemoryLSH(int d, int n, int L CV_DEFAULT(10), int k CV_DEFAULT(10), + int type CV_DEFAULT(CV_64FC1), double r CV_DEFAULT(4), + int64 seed CV_DEFAULT(-1)); + +/* Free the given LSH structure. */ +CVAPI(void) cvReleaseLSH(struct CvLSH** lsh); + +/* Return the number of vectors in the LSH. */ +CVAPI(unsigned int) LSHSize(struct CvLSH* lsh); + +/* Add vectors to the LSH structure, optionally returning indices. */ +CVAPI(void) cvLSHAdd(struct CvLSH* lsh, const CvMat* data, CvMat* indices CV_DEFAULT(0)); + +/* Remove vectors from LSH, as addressed by given indices. */ +CVAPI(void) cvLSHRemove(struct CvLSH* lsh, const CvMat* indices); + +/* Query the LSH n times for at most k nearest points; data is n x d, + indices and dist are n x k. At most emax stored points will be accessed. */ +CVAPI(void) cvLSHQuery(struct CvLSH* lsh, const CvMat* query_points, + CvMat* indices, CvMat* dist, int k, int emax); + +/* Kolmogorov-Zabin stereo-correspondence algorithm (a.k.a. KZ1) */ +#define CV_STEREO_GC_OCCLUDED SHRT_MAX + +typedef struct CvStereoGCState +{ + int Ithreshold; + int interactionRadius; + float K, lambda, lambda1, lambda2; + int occlusionCost; + int minDisparity; + int numberOfDisparities; + int maxIters; + + CvMat* left; + CvMat* right; + CvMat* dispLeft; + CvMat* dispRight; + CvMat* ptrLeft; + CvMat* ptrRight; + CvMat* vtxBuf; + CvMat* edgeBuf; +} CvStereoGCState; + +CVAPI(CvStereoGCState*) cvCreateStereoGCState( int numberOfDisparities, int maxIters ); +CVAPI(void) cvReleaseStereoGCState( CvStereoGCState** state ); + +CVAPI(void) cvFindStereoCorrespondenceGC( const CvArr* left, const CvArr* right, + CvArr* disparityLeft, CvArr* disparityRight, + CvStereoGCState* state, + int useDisparityGuess CV_DEFAULT(0) ); + +/* Calculates optical flow for 2 images using classical Lucas & Kanade algorithm */ +CVAPI(void) cvCalcOpticalFlowLK( const CvArr* prev, const CvArr* curr, + CvSize win_size, CvArr* velx, CvArr* vely ); + +/* Calculates optical flow for 2 images using block matching algorithm */ +CVAPI(void) cvCalcOpticalFlowBM( const CvArr* prev, const CvArr* curr, + CvSize block_size, CvSize shift_size, + CvSize max_range, int use_previous, + CvArr* velx, CvArr* vely ); + +/* Calculates Optical flow for 2 images using Horn & Schunck algorithm */ +CVAPI(void) cvCalcOpticalFlowHS( const CvArr* prev, const CvArr* curr, + int use_previous, CvArr* velx, CvArr* vely, + double lambda, CvTermCriteria criteria ); + + +/****************************************************************************************\ +* Background/foreground segmentation * +\****************************************************************************************/ + +/* We discriminate between foreground and background pixels + * by building and maintaining a model of the background. + * Any pixel which does not fit this model is then deemed + * to be foreground. + * + * At present we support two core background models, + * one of which has two variations: + * + * o CV_BG_MODEL_FGD: latest and greatest algorithm, described in + * + * Foreground Object Detection from Videos Containing Complex Background. + * Liyuan Li, Weimin Huang, Irene Y.H. Gu, and Qi Tian. + * ACM MM2003 9p + * + * o CV_BG_MODEL_FGD_SIMPLE: + * A code comment describes this as a simplified version of the above, + * but the code is in fact currently identical + * + * o CV_BG_MODEL_MOG: "Mixture of Gaussians", older algorithm, described in + * + * Moving target classification and tracking from real-time video. + * A Lipton, H Fujijoshi, R Patil + * Proceedings IEEE Workshop on Application of Computer Vision pp 8-14 1998 + * + * Learning patterns of activity using real-time tracking + * C Stauffer and W Grimson August 2000 + * IEEE Transactions on Pattern Analysis and Machine Intelligence 22(8):747-757 + */ + + +#define CV_BG_MODEL_FGD 0 +#define CV_BG_MODEL_MOG 1 /* "Mixture of Gaussians". */ +#define CV_BG_MODEL_FGD_SIMPLE 2 + +struct CvBGStatModel; + +typedef void (CV_CDECL * CvReleaseBGStatModel)( struct CvBGStatModel** bg_model ); +typedef int (CV_CDECL * CvUpdateBGStatModel)( IplImage* curr_frame, struct CvBGStatModel* bg_model, + double learningRate ); + +#define CV_BG_STAT_MODEL_FIELDS() \ +int type; /*type of BG model*/ \ +CvReleaseBGStatModel release; \ +CvUpdateBGStatModel update; \ +IplImage* background; /*8UC3 reference background image*/ \ +IplImage* foreground; /*8UC1 foreground image*/ \ +IplImage** layers; /*8UC3 reference background image, can be null */ \ +int layer_count; /* can be zero */ \ +CvMemStorage* storage; /*storage for foreground_regions*/ \ +CvSeq* foreground_regions /*foreground object contours*/ + +typedef struct CvBGStatModel +{ + CV_BG_STAT_MODEL_FIELDS(); +} CvBGStatModel; + +// + +// Releases memory used by BGStatModel +CVAPI(void) cvReleaseBGStatModel( CvBGStatModel** bg_model ); + +// Updates statistical model and returns number of found foreground regions +CVAPI(int) cvUpdateBGStatModel( IplImage* current_frame, CvBGStatModel* bg_model, + double learningRate CV_DEFAULT(-1)); + +// Performs FG post-processing using segmentation +// (all pixels of a region will be classified as foreground if majority of pixels of the region are FG). +// parameters: +// segments - pointer to result of segmentation (for example MeanShiftSegmentation) +// bg_model - pointer to CvBGStatModel structure +CVAPI(void) cvRefineForegroundMaskBySegm( CvSeq* segments, CvBGStatModel* bg_model ); + +/* Common use change detection function */ +CVAPI(int) cvChangeDetection( IplImage* prev_frame, + IplImage* curr_frame, + IplImage* change_mask ); + +/* + Interface of ACM MM2003 algorithm + */ + +/* Default parameters of foreground detection algorithm: */ +#define CV_BGFG_FGD_LC 128 +#define CV_BGFG_FGD_N1C 15 +#define CV_BGFG_FGD_N2C 25 + +#define CV_BGFG_FGD_LCC 64 +#define CV_BGFG_FGD_N1CC 25 +#define CV_BGFG_FGD_N2CC 40 + +/* Background reference image update parameter: */ +#define CV_BGFG_FGD_ALPHA_1 0.1f + +/* stat model update parameter + * 0.002f ~ 1K frame(~45sec), 0.005 ~ 18sec (if 25fps and absolutely static BG) + */ +#define CV_BGFG_FGD_ALPHA_2 0.005f + +/* start value for alpha parameter (to fast initiate statistic model) */ +#define CV_BGFG_FGD_ALPHA_3 0.1f + +#define CV_BGFG_FGD_DELTA 2 + +#define CV_BGFG_FGD_T 0.9f + +#define CV_BGFG_FGD_MINAREA 15.f + +#define CV_BGFG_FGD_BG_UPDATE_TRESH 0.5f + +/* See the above-referenced Li/Huang/Gu/Tian paper + * for a full description of these background-model + * tuning parameters. + * + * Nomenclature: 'c' == "color", a three-component red/green/blue vector. + * We use histograms of these to model the range of + * colors we've seen at a given background pixel. + * + * 'cc' == "color co-occurrence", a six-component vector giving + * RGB color for both this frame and preceding frame. + * We use histograms of these to model the range of + * color CHANGES we've seen at a given background pixel. + */ +typedef struct CvFGDStatModelParams +{ + int Lc; /* Quantized levels per 'color' component. Power of two, typically 32, 64 or 128. */ + int N1c; /* Number of color vectors used to model normal background color variation at a given pixel. */ + int N2c; /* Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c. */ + /* Used to allow the first N1c vectors to adapt over time to changing background. */ + + int Lcc; /* Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64. */ + int N1cc; /* Number of color co-occurrence vectors used to model normal background color variation at a given pixel. */ + int N2cc; /* Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc. */ + /* Used to allow the first N1cc vectors to adapt over time to changing background. */ + + int is_obj_without_holes;/* If TRUE we ignore holes within foreground blobs. Defaults to TRUE. */ + int perform_morphing; /* Number of erode-dilate-erode foreground-blob cleanup iterations. */ + /* These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1. */ + + float alpha1; /* How quickly we forget old background pixel values seen. Typically set to 0.1 */ + float alpha2; /* "Controls speed of feature learning". Depends on T. Typical value circa 0.005. */ + float alpha3; /* Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1. */ + + float delta; /* Affects color and color co-occurrence quantization, typically set to 2. */ + float T; /* "A percentage value which determines when new features can be recognized as new background." (Typically 0.9).*/ + float minArea; /* Discard foreground blobs whose bounding box is smaller than this threshold. */ +} CvFGDStatModelParams; + +typedef struct CvBGPixelCStatTable +{ + float Pv, Pvb; + uchar v[3]; +} CvBGPixelCStatTable; + +typedef struct CvBGPixelCCStatTable +{ + float Pv, Pvb; + uchar v[6]; +} CvBGPixelCCStatTable; + +typedef struct CvBGPixelStat +{ + float Pbc; + float Pbcc; + CvBGPixelCStatTable* ctable; + CvBGPixelCCStatTable* cctable; + uchar is_trained_st_model; + uchar is_trained_dyn_model; +} CvBGPixelStat; + + +typedef struct CvFGDStatModel +{ + CV_BG_STAT_MODEL_FIELDS(); + CvBGPixelStat* pixel_stat; + IplImage* Ftd; + IplImage* Fbd; + IplImage* prev_frame; + CvFGDStatModelParams params; +} CvFGDStatModel; + +/* Creates FGD model */ +CVAPI(CvBGStatModel*) cvCreateFGDStatModel( IplImage* first_frame, + CvFGDStatModelParams* parameters CV_DEFAULT(NULL)); + +/* + Interface of Gaussian mixture algorithm + + "An improved adaptive background mixture model for real-time tracking with shadow detection" + P. KadewTraKuPong and R. Bowden, + Proc. 2nd European Workshp on Advanced Video-Based Surveillance Systems, 2001." + http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf + */ + +/* Note: "MOG" == "Mixture Of Gaussians": */ + +#define CV_BGFG_MOG_MAX_NGAUSSIANS 500 + +/* default parameters of gaussian background detection algorithm */ +#define CV_BGFG_MOG_BACKGROUND_THRESHOLD 0.7 /* threshold sum of weights for background test */ +#define CV_BGFG_MOG_STD_THRESHOLD 2.5 /* lambda=2.5 is 99% */ +#define CV_BGFG_MOG_WINDOW_SIZE 200 /* Learning rate; alpha = 1/CV_GBG_WINDOW_SIZE */ +#define CV_BGFG_MOG_NGAUSSIANS 5 /* = K = number of Gaussians in mixture */ +#define CV_BGFG_MOG_WEIGHT_INIT 0.05 +#define CV_BGFG_MOG_SIGMA_INIT 30 +#define CV_BGFG_MOG_MINAREA 15.f + + +#define CV_BGFG_MOG_NCOLORS 3 + +typedef struct CvGaussBGStatModelParams +{ + int win_size; /* = 1/alpha */ + int n_gauss; + double bg_threshold, std_threshold, minArea; + double weight_init, variance_init; +}CvGaussBGStatModelParams; + +typedef struct CvGaussBGValues +{ + int match_sum; + double weight; + double variance[CV_BGFG_MOG_NCOLORS]; + double mean[CV_BGFG_MOG_NCOLORS]; +} CvGaussBGValues; + +typedef struct CvGaussBGPoint +{ + CvGaussBGValues* g_values; +} CvGaussBGPoint; + + +typedef struct CvGaussBGModel +{ + CV_BG_STAT_MODEL_FIELDS(); + CvGaussBGStatModelParams params; + CvGaussBGPoint* g_point; + int countFrames; + void* mog; +} CvGaussBGModel; + + +/* Creates Gaussian mixture background model */ +CVAPI(CvBGStatModel*) cvCreateGaussianBGModel( IplImage* first_frame, + CvGaussBGStatModelParams* parameters CV_DEFAULT(NULL)); + + +typedef struct CvBGCodeBookElem +{ + struct CvBGCodeBookElem* next; + int tLastUpdate; + int stale; + uchar boxMin[3]; + uchar boxMax[3]; + uchar learnMin[3]; + uchar learnMax[3]; +} CvBGCodeBookElem; + +typedef struct CvBGCodeBookModel +{ + CvSize size; + int t; + uchar cbBounds[3]; + uchar modMin[3]; + uchar modMax[3]; + CvBGCodeBookElem** cbmap; + CvMemStorage* storage; + CvBGCodeBookElem* freeList; +} CvBGCodeBookModel; + +CVAPI(CvBGCodeBookModel*) cvCreateBGCodeBookModel( void ); +CVAPI(void) cvReleaseBGCodeBookModel( CvBGCodeBookModel** model ); + +CVAPI(void) cvBGCodeBookUpdate( CvBGCodeBookModel* model, const CvArr* image, + CvRect roi CV_DEFAULT(cvRect(0,0,0,0)), + const CvArr* mask CV_DEFAULT(0) ); + +CVAPI(int) cvBGCodeBookDiff( const CvBGCodeBookModel* model, const CvArr* image, + CvArr* fgmask, CvRect roi CV_DEFAULT(cvRect(0,0,0,0)) ); + +CVAPI(void) cvBGCodeBookClearStale( CvBGCodeBookModel* model, int staleThresh, + CvRect roi CV_DEFAULT(cvRect(0,0,0,0)), + const CvArr* mask CV_DEFAULT(0) ); + +CVAPI(CvSeq*) cvSegmentFGMask( CvArr *fgmask, int poly1Hull0 CV_DEFAULT(1), + float perimScale CV_DEFAULT(4.f), + CvMemStorage* storage CV_DEFAULT(0), + CvPoint offset CV_DEFAULT(cvPoint(0,0))); + +#ifdef __cplusplus +} +#endif + +#endif + +/* End of file. */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/streams.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/streams.hpp new file mode 100644 index 0000000..e164bf4 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/streams.hpp @@ -0,0 +1,92 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CVSTREAMS_H__ +#define __OPENCV_CVSTREAMS_H__ + +#ifdef WIN32 +#include /* !!! IF YOU'VE GOT AN ERROR HERE, PLEASE READ BELOW !!! */ +/***************** How to get Visual Studio understand streams.h ****************\ + +You need DirectShow SDK that is now a part of Platform SDK +(Windows Server 2003 SP1 SDK or later), +and DirectX SDK (2006 April or later). + +1. Download the Platform SDK from + http://www.microsoft.com/msdownload/platformsdk/sdkupdate/ + and DirectX SDK from msdn.microsoft.com/directx/ + (They are huge, but you can download it by parts). + If it doesn't work for you, consider HighGUI that can capture video via VFW or MIL + +2. Install Platform SDK together with DirectShow SDK. + Install DirectX (with or without sample code). + +3. Build baseclasses. + See \samples\multimedia\directshow\readme.txt. + +4. Copy the built libraries (called strmbase.lib and strmbasd.lib + in Release and Debug versions, respectively) to + \lib. + +5. In Developer Studio add the following paths: + \include + \include + \samples\multimedia\directshow\baseclasses + to the includes' search path + (at Tools->Options->Directories->Include files in case of Visual Studio 6.0, + at Tools->Options->Projects and Solutions->VC++ Directories->Include files in case + of Visual Studio 2005) + Add + \lib + \lib + to the libraries' search path (in the same dialog, ...->"Library files" page) + + NOTE: PUT THE ADDED LINES ON THE VERY TOP OF THE LISTS, OTHERWISE YOU MAY STILL GET + COMPILER OR LINKER ERRORS. This is necessary, because Visual Studio + may include older versions of the same headers and libraries. + +6. Now you can build OpenCV DirectShow filters. + +\***********************************************************************************/ + +#endif + +#endif diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ml.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ml.hpp new file mode 100644 index 0000000..dc62dcb --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ml.hpp @@ -0,0 +1,41 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "opencv2/ml/ml.hpp" diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ml/ml.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ml/ml.hpp new file mode 100644 index 0000000..d86ecde --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ml/ml.hpp @@ -0,0 +1,2147 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_ML_HPP__ +#define __OPENCV_ML_HPP__ + +#include "opencv2/core/core.hpp" +#include + +#ifdef __cplusplus + +#include +#include +#include + +// Apple defines a check() macro somewhere in the debug headers +// that interferes with a method definiton in this header +#undef check + +/****************************************************************************************\ +* Main struct definitions * +\****************************************************************************************/ + +/* log(2*PI) */ +#define CV_LOG2PI (1.8378770664093454835606594728112) + +/* columns of matrix are training samples */ +#define CV_COL_SAMPLE 0 + +/* rows of matrix are training samples */ +#define CV_ROW_SAMPLE 1 + +#define CV_IS_ROW_SAMPLE(flags) ((flags) & CV_ROW_SAMPLE) + +struct CvVectors +{ + int type; + int dims, count; + CvVectors* next; + union + { + uchar** ptr; + float** fl; + double** db; + } data; +}; + +#if 0 +/* A structure, representing the lattice range of statmodel parameters. + It is used for optimizing statmodel parameters by cross-validation method. + The lattice is logarithmic, so must be greater then 1. */ +typedef struct CvParamLattice +{ + double min_val; + double max_val; + double step; +} +CvParamLattice; + +CV_INLINE CvParamLattice cvParamLattice( double min_val, double max_val, + double log_step ) +{ + CvParamLattice pl; + pl.min_val = MIN( min_val, max_val ); + pl.max_val = MAX( min_val, max_val ); + pl.step = MAX( log_step, 1. ); + return pl; +} + +CV_INLINE CvParamLattice cvDefaultParamLattice( void ) +{ + CvParamLattice pl = {0,0,0}; + return pl; +} +#endif + +/* Variable type */ +#define CV_VAR_NUMERICAL 0 +#define CV_VAR_ORDERED 0 +#define CV_VAR_CATEGORICAL 1 + +#define CV_TYPE_NAME_ML_SVM "opencv-ml-svm" +#define CV_TYPE_NAME_ML_KNN "opencv-ml-knn" +#define CV_TYPE_NAME_ML_NBAYES "opencv-ml-bayesian" +#define CV_TYPE_NAME_ML_EM "opencv-ml-em" +#define CV_TYPE_NAME_ML_BOOSTING "opencv-ml-boost-tree" +#define CV_TYPE_NAME_ML_TREE "opencv-ml-tree" +#define CV_TYPE_NAME_ML_ANN_MLP "opencv-ml-ann-mlp" +#define CV_TYPE_NAME_ML_CNN "opencv-ml-cnn" +#define CV_TYPE_NAME_ML_RTREES "opencv-ml-random-trees" +#define CV_TYPE_NAME_ML_ERTREES "opencv-ml-extremely-randomized-trees" +#define CV_TYPE_NAME_ML_GBT "opencv-ml-gradient-boosting-trees" + +#define CV_TRAIN_ERROR 0 +#define CV_TEST_ERROR 1 + +class CV_EXPORTS_W CvStatModel +{ +public: + CvStatModel(); + virtual ~CvStatModel(); + + virtual void clear(); + + CV_WRAP virtual void save( const char* filename, const char* name=0 ) const; + CV_WRAP virtual void load( const char* filename, const char* name=0 ); + + virtual void write( CvFileStorage* storage, const char* name ) const; + virtual void read( CvFileStorage* storage, CvFileNode* node ); + +protected: + const char* default_model_name; +}; + +/****************************************************************************************\ +* Normal Bayes Classifier * +\****************************************************************************************/ + +/* The structure, representing the grid range of statmodel parameters. + It is used for optimizing statmodel accuracy by varying model parameters, + the accuracy estimate being computed by cross-validation. + The grid is logarithmic, so must be greater then 1. */ + +class CvMLData; + +struct CV_EXPORTS_W_MAP CvParamGrid +{ + // SVM params type + enum { SVM_C=0, SVM_GAMMA=1, SVM_P=2, SVM_NU=3, SVM_COEF=4, SVM_DEGREE=5 }; + + CvParamGrid() + { + min_val = max_val = step = 0; + } + + CvParamGrid( double min_val, double max_val, double log_step ); + //CvParamGrid( int param_id ); + bool check() const; + + CV_PROP_RW double min_val; + CV_PROP_RW double max_val; + CV_PROP_RW double step; +}; + +inline CvParamGrid::CvParamGrid( double _min_val, double _max_val, double _log_step ) +{ + min_val = _min_val; + max_val = _max_val; + step = _log_step; +} + +class CV_EXPORTS_W CvNormalBayesClassifier : public CvStatModel +{ +public: + CV_WRAP CvNormalBayesClassifier(); + virtual ~CvNormalBayesClassifier(); + + CvNormalBayesClassifier( const CvMat* trainData, const CvMat* responses, + const CvMat* varIdx=0, const CvMat* sampleIdx=0 ); + + virtual bool train( const CvMat* trainData, const CvMat* responses, + const CvMat* varIdx = 0, const CvMat* sampleIdx=0, bool update=false ); + + virtual float predict( const CvMat* samples, CV_OUT CvMat* results=0 ) const; + CV_WRAP virtual void clear(); + + CV_WRAP CvNormalBayesClassifier( const cv::Mat& trainData, const cv::Mat& responses, + const cv::Mat& varIdx=cv::Mat(), const cv::Mat& sampleIdx=cv::Mat() ); + CV_WRAP virtual bool train( const cv::Mat& trainData, const cv::Mat& responses, + const cv::Mat& varIdx = cv::Mat(), const cv::Mat& sampleIdx=cv::Mat(), + bool update=false ); + CV_WRAP virtual float predict( const cv::Mat& samples, CV_OUT cv::Mat* results=0 ) const; + + virtual void write( CvFileStorage* storage, const char* name ) const; + virtual void read( CvFileStorage* storage, CvFileNode* node ); + +protected: + int var_count, var_all; + CvMat* var_idx; + CvMat* cls_labels; + CvMat** count; + CvMat** sum; + CvMat** productsum; + CvMat** avg; + CvMat** inv_eigen_values; + CvMat** cov_rotate_mats; + CvMat* c; +}; + + +/****************************************************************************************\ +* K-Nearest Neighbour Classifier * +\****************************************************************************************/ + +// k Nearest Neighbors +class CV_EXPORTS_W CvKNearest : public CvStatModel +{ +public: + + CV_WRAP CvKNearest(); + virtual ~CvKNearest(); + + CvKNearest( const CvMat* trainData, const CvMat* responses, + const CvMat* sampleIdx=0, bool isRegression=false, int max_k=32 ); + + virtual bool train( const CvMat* trainData, const CvMat* responses, + const CvMat* sampleIdx=0, bool is_regression=false, + int maxK=32, bool updateBase=false ); + + virtual float find_nearest( const CvMat* samples, int k, CV_OUT CvMat* results=0, + const float** neighbors=0, CV_OUT CvMat* neighborResponses=0, CV_OUT CvMat* dist=0 ) const; + + CV_WRAP CvKNearest( const cv::Mat& trainData, const cv::Mat& responses, + const cv::Mat& sampleIdx=cv::Mat(), bool isRegression=false, int max_k=32 ); + + CV_WRAP virtual bool train( const cv::Mat& trainData, const cv::Mat& responses, + const cv::Mat& sampleIdx=cv::Mat(), bool isRegression=false, + int maxK=32, bool updateBase=false ); + + virtual float find_nearest( const cv::Mat& samples, int k, cv::Mat* results=0, + const float** neighbors=0, cv::Mat* neighborResponses=0, + cv::Mat* dist=0 ) const; + CV_WRAP virtual float find_nearest( const cv::Mat& samples, int k, CV_OUT cv::Mat& results, + CV_OUT cv::Mat& neighborResponses, CV_OUT cv::Mat& dists) const; + + virtual void clear(); + int get_max_k() const; + int get_var_count() const; + int get_sample_count() const; + bool is_regression() const; + + virtual float write_results( int k, int k1, int start, int end, + const float* neighbor_responses, const float* dist, CvMat* _results, + CvMat* _neighbor_responses, CvMat* _dist, Cv32suf* sort_buf ) const; + + virtual void find_neighbors_direct( const CvMat* _samples, int k, int start, int end, + float* neighbor_responses, const float** neighbors, float* dist ) const; + +protected: + + int max_k, var_count; + int total; + bool regression; + CvVectors* samples; +}; + +/****************************************************************************************\ +* Support Vector Machines * +\****************************************************************************************/ + +// SVM training parameters +struct CV_EXPORTS_W_MAP CvSVMParams +{ + CvSVMParams(); + CvSVMParams( int svm_type, int kernel_type, + double degree, double gamma, double coef0, + double Cvalue, double nu, double p, + CvMat* class_weights, CvTermCriteria term_crit ); + + CV_PROP_RW int svm_type; + CV_PROP_RW int kernel_type; + CV_PROP_RW double degree; // for poly + CV_PROP_RW double gamma; // for poly/rbf/sigmoid + CV_PROP_RW double coef0; // for poly/sigmoid + + CV_PROP_RW double C; // for CV_SVM_C_SVC, CV_SVM_EPS_SVR and CV_SVM_NU_SVR + CV_PROP_RW double nu; // for CV_SVM_NU_SVC, CV_SVM_ONE_CLASS, and CV_SVM_NU_SVR + CV_PROP_RW double p; // for CV_SVM_EPS_SVR + CvMat* class_weights; // for CV_SVM_C_SVC + CV_PROP_RW CvTermCriteria term_crit; // termination criteria +}; + + +struct CV_EXPORTS CvSVMKernel +{ + typedef void (CvSVMKernel::*Calc)( int vec_count, int vec_size, const float** vecs, + const float* another, float* results ); + CvSVMKernel(); + CvSVMKernel( const CvSVMParams* params, Calc _calc_func ); + virtual bool create( const CvSVMParams* params, Calc _calc_func ); + virtual ~CvSVMKernel(); + + virtual void clear(); + virtual void calc( int vcount, int n, const float** vecs, const float* another, float* results ); + + const CvSVMParams* params; + Calc calc_func; + + virtual void calc_non_rbf_base( int vec_count, int vec_size, const float** vecs, + const float* another, float* results, + double alpha, double beta ); + + virtual void calc_linear( int vec_count, int vec_size, const float** vecs, + const float* another, float* results ); + virtual void calc_rbf( int vec_count, int vec_size, const float** vecs, + const float* another, float* results ); + virtual void calc_poly( int vec_count, int vec_size, const float** vecs, + const float* another, float* results ); + virtual void calc_sigmoid( int vec_count, int vec_size, const float** vecs, + const float* another, float* results ); +}; + + +struct CvSVMKernelRow +{ + CvSVMKernelRow* prev; + CvSVMKernelRow* next; + float* data; +}; + + +struct CvSVMSolutionInfo +{ + double obj; + double rho; + double upper_bound_p; + double upper_bound_n; + double r; // for Solver_NU +}; + +class CV_EXPORTS CvSVMSolver +{ +public: + typedef bool (CvSVMSolver::*SelectWorkingSet)( int& i, int& j ); + typedef float* (CvSVMSolver::*GetRow)( int i, float* row, float* dst, bool existed ); + typedef void (CvSVMSolver::*CalcRho)( double& rho, double& r ); + + CvSVMSolver(); + + CvSVMSolver( int count, int var_count, const float** samples, schar* y, + int alpha_count, double* alpha, double Cp, double Cn, + CvMemStorage* storage, CvSVMKernel* kernel, GetRow get_row, + SelectWorkingSet select_working_set, CalcRho calc_rho ); + virtual bool create( int count, int var_count, const float** samples, schar* y, + int alpha_count, double* alpha, double Cp, double Cn, + CvMemStorage* storage, CvSVMKernel* kernel, GetRow get_row, + SelectWorkingSet select_working_set, CalcRho calc_rho ); + virtual ~CvSVMSolver(); + + virtual void clear(); + virtual bool solve_generic( CvSVMSolutionInfo& si ); + + virtual bool solve_c_svc( int count, int var_count, const float** samples, schar* y, + double Cp, double Cn, CvMemStorage* storage, + CvSVMKernel* kernel, double* alpha, CvSVMSolutionInfo& si ); + virtual bool solve_nu_svc( int count, int var_count, const float** samples, schar* y, + CvMemStorage* storage, CvSVMKernel* kernel, + double* alpha, CvSVMSolutionInfo& si ); + virtual bool solve_one_class( int count, int var_count, const float** samples, + CvMemStorage* storage, CvSVMKernel* kernel, + double* alpha, CvSVMSolutionInfo& si ); + + virtual bool solve_eps_svr( int count, int var_count, const float** samples, const float* y, + CvMemStorage* storage, CvSVMKernel* kernel, + double* alpha, CvSVMSolutionInfo& si ); + + virtual bool solve_nu_svr( int count, int var_count, const float** samples, const float* y, + CvMemStorage* storage, CvSVMKernel* kernel, + double* alpha, CvSVMSolutionInfo& si ); + + virtual float* get_row_base( int i, bool* _existed ); + virtual float* get_row( int i, float* dst ); + + int sample_count; + int var_count; + int cache_size; + int cache_line_size; + const float** samples; + const CvSVMParams* params; + CvMemStorage* storage; + CvSVMKernelRow lru_list; + CvSVMKernelRow* rows; + + int alpha_count; + + double* G; + double* alpha; + + // -1 - lower bound, 0 - free, 1 - upper bound + schar* alpha_status; + + schar* y; + double* b; + float* buf[2]; + double eps; + int max_iter; + double C[2]; // C[0] == Cn, C[1] == Cp + CvSVMKernel* kernel; + + SelectWorkingSet select_working_set_func; + CalcRho calc_rho_func; + GetRow get_row_func; + + virtual bool select_working_set( int& i, int& j ); + virtual bool select_working_set_nu_svm( int& i, int& j ); + virtual void calc_rho( double& rho, double& r ); + virtual void calc_rho_nu_svm( double& rho, double& r ); + + virtual float* get_row_svc( int i, float* row, float* dst, bool existed ); + virtual float* get_row_one_class( int i, float* row, float* dst, bool existed ); + virtual float* get_row_svr( int i, float* row, float* dst, bool existed ); +}; + + +struct CvSVMDecisionFunc +{ + double rho; + int sv_count; + double* alpha; + int* sv_index; +}; + + +// SVM model +class CV_EXPORTS_W CvSVM : public CvStatModel +{ +public: + // SVM type + enum { C_SVC=100, NU_SVC=101, ONE_CLASS=102, EPS_SVR=103, NU_SVR=104 }; + + // SVM kernel type + enum { LINEAR=0, POLY=1, RBF=2, SIGMOID=3 }; + + // SVM params type + enum { C=0, GAMMA=1, P=2, NU=3, COEF=4, DEGREE=5 }; + + CV_WRAP CvSVM(); + virtual ~CvSVM(); + + CvSVM( const CvMat* trainData, const CvMat* responses, + const CvMat* varIdx=0, const CvMat* sampleIdx=0, + CvSVMParams params=CvSVMParams() ); + + virtual bool train( const CvMat* trainData, const CvMat* responses, + const CvMat* varIdx=0, const CvMat* sampleIdx=0, + CvSVMParams params=CvSVMParams() ); + + virtual bool train_auto( const CvMat* trainData, const CvMat* responses, + const CvMat* varIdx, const CvMat* sampleIdx, CvSVMParams params, + int kfold = 10, + CvParamGrid Cgrid = get_default_grid(CvSVM::C), + CvParamGrid gammaGrid = get_default_grid(CvSVM::GAMMA), + CvParamGrid pGrid = get_default_grid(CvSVM::P), + CvParamGrid nuGrid = get_default_grid(CvSVM::NU), + CvParamGrid coeffGrid = get_default_grid(CvSVM::COEF), + CvParamGrid degreeGrid = get_default_grid(CvSVM::DEGREE), + bool balanced=false ); + + virtual float predict( const CvMat* sample, bool returnDFVal=false ) const; + virtual float predict( const CvMat* samples, CV_OUT CvMat* results ) const; + + CV_WRAP CvSVM( const cv::Mat& trainData, const cv::Mat& responses, + const cv::Mat& varIdx=cv::Mat(), const cv::Mat& sampleIdx=cv::Mat(), + CvSVMParams params=CvSVMParams() ); + + CV_WRAP virtual bool train( const cv::Mat& trainData, const cv::Mat& responses, + const cv::Mat& varIdx=cv::Mat(), const cv::Mat& sampleIdx=cv::Mat(), + CvSVMParams params=CvSVMParams() ); + + CV_WRAP virtual bool train_auto( const cv::Mat& trainData, const cv::Mat& responses, + const cv::Mat& varIdx, const cv::Mat& sampleIdx, CvSVMParams params, + int k_fold = 10, + CvParamGrid Cgrid = CvSVM::get_default_grid(CvSVM::C), + CvParamGrid gammaGrid = CvSVM::get_default_grid(CvSVM::GAMMA), + CvParamGrid pGrid = CvSVM::get_default_grid(CvSVM::P), + CvParamGrid nuGrid = CvSVM::get_default_grid(CvSVM::NU), + CvParamGrid coeffGrid = CvSVM::get_default_grid(CvSVM::COEF), + CvParamGrid degreeGrid = CvSVM::get_default_grid(CvSVM::DEGREE), + bool balanced=false); + CV_WRAP virtual float predict( const cv::Mat& sample, bool returnDFVal=false ) const; + CV_WRAP_AS(predict_all) void predict( cv::InputArray samples, cv::OutputArray results ) const; + + CV_WRAP virtual int get_support_vector_count() const; + virtual const float* get_support_vector(int i) const; + virtual CvSVMParams get_params() const { return params; }; + CV_WRAP virtual void clear(); + + static CvParamGrid get_default_grid( int param_id ); + + virtual void write( CvFileStorage* storage, const char* name ) const; + virtual void read( CvFileStorage* storage, CvFileNode* node ); + CV_WRAP int get_var_count() const { return var_idx ? var_idx->cols : var_all; } + +protected: + + virtual bool set_params( const CvSVMParams& params ); + virtual bool train1( int sample_count, int var_count, const float** samples, + const void* responses, double Cp, double Cn, + CvMemStorage* _storage, double* alpha, double& rho ); + virtual bool do_train( int svm_type, int sample_count, int var_count, const float** samples, + const CvMat* responses, CvMemStorage* _storage, double* alpha ); + virtual void create_kernel(); + virtual void create_solver(); + + virtual float predict( const float* row_sample, int row_len, bool returnDFVal=false ) const; + + virtual void write_params( CvFileStorage* fs ) const; + virtual void read_params( CvFileStorage* fs, CvFileNode* node ); + + void optimize_linear_svm(); + + CvSVMParams params; + CvMat* class_labels; + int var_all; + float** sv; + int sv_total; + CvMat* var_idx; + CvMat* class_weights; + CvSVMDecisionFunc* decision_func; + CvMemStorage* storage; + + CvSVMSolver* solver; + CvSVMKernel* kernel; + +private: + CvSVM(const CvSVM&); + CvSVM& operator = (const CvSVM&); +}; + +/****************************************************************************************\ +* Expectation - Maximization * +\****************************************************************************************/ +namespace cv +{ +class CV_EXPORTS_W EM : public Algorithm +{ +public: + // Type of covariation matrices + enum {COV_MAT_SPHERICAL=0, COV_MAT_DIAGONAL=1, COV_MAT_GENERIC=2, COV_MAT_DEFAULT=COV_MAT_DIAGONAL}; + + // Default parameters + enum {DEFAULT_NCLUSTERS=5, DEFAULT_MAX_ITERS=100}; + + // The initial step + enum {START_E_STEP=1, START_M_STEP=2, START_AUTO_STEP=0}; + + CV_WRAP EM(int nclusters=EM::DEFAULT_NCLUSTERS, int covMatType=EM::COV_MAT_DIAGONAL, + const TermCriteria& termCrit=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, + EM::DEFAULT_MAX_ITERS, FLT_EPSILON)); + + virtual ~EM(); + CV_WRAP virtual void clear(); + + CV_WRAP virtual bool train(InputArray samples, + OutputArray logLikelihoods=noArray(), + OutputArray labels=noArray(), + OutputArray probs=noArray()); + + CV_WRAP virtual bool trainE(InputArray samples, + InputArray means0, + InputArray covs0=noArray(), + InputArray weights0=noArray(), + OutputArray logLikelihoods=noArray(), + OutputArray labels=noArray(), + OutputArray probs=noArray()); + + CV_WRAP virtual bool trainM(InputArray samples, + InputArray probs0, + OutputArray logLikelihoods=noArray(), + OutputArray labels=noArray(), + OutputArray probs=noArray()); + + CV_WRAP Vec2d predict(InputArray sample, + OutputArray probs=noArray()) const; + + CV_WRAP bool isTrained() const; + + AlgorithmInfo* info() const; + virtual void read(const FileNode& fn); + +protected: + + virtual void setTrainData(int startStep, const Mat& samples, + const Mat* probs0, + const Mat* means0, + const vector* covs0, + const Mat* weights0); + + bool doTrain(int startStep, + OutputArray logLikelihoods, + OutputArray labels, + OutputArray probs); + virtual void eStep(); + virtual void mStep(); + + void clusterTrainSamples(); + void decomposeCovs(); + void computeLogWeightDivDet(); + + Vec2d computeProbabilities(const Mat& sample, Mat* probs) const; + + // all inner matrices have type CV_64FC1 + CV_PROP_RW int nclusters; + CV_PROP_RW int covMatType; + CV_PROP_RW int maxIters; + CV_PROP_RW double epsilon; + + Mat trainSamples; + Mat trainProbs; + Mat trainLogLikelihoods; + Mat trainLabels; + + CV_PROP Mat weights; + CV_PROP Mat means; + CV_PROP vector covs; + + vector covsEigenValues; + vector covsRotateMats; + vector invCovsEigenValues; + Mat logWeightDivDet; +}; +} // namespace cv + +/****************************************************************************************\ +* Decision Tree * +\****************************************************************************************/\ +struct CvPair16u32s +{ + unsigned short* u; + int* i; +}; + + +#define CV_DTREE_CAT_DIR(idx,subset) \ + (2*((subset[(idx)>>5]&(1 << ((idx) & 31)))==0)-1) + +struct CvDTreeSplit +{ + int var_idx; + int condensed_idx; + int inversed; + float quality; + CvDTreeSplit* next; + union + { + int subset[2]; + struct + { + float c; + int split_point; + } + ord; + }; +}; + +struct CvDTreeNode +{ + int class_idx; + int Tn; + double value; + + CvDTreeNode* parent; + CvDTreeNode* left; + CvDTreeNode* right; + + CvDTreeSplit* split; + + int sample_count; + int depth; + int* num_valid; + int offset; + int buf_idx; + double maxlr; + + // global pruning data + int complexity; + double alpha; + double node_risk, tree_risk, tree_error; + + // cross-validation pruning data + int* cv_Tn; + double* cv_node_risk; + double* cv_node_error; + + int get_num_valid(int vi) { return num_valid ? num_valid[vi] : sample_count; } + void set_num_valid(int vi, int n) { if( num_valid ) num_valid[vi] = n; } +}; + + +struct CV_EXPORTS_W_MAP CvDTreeParams +{ + CV_PROP_RW int max_categories; + CV_PROP_RW int max_depth; + CV_PROP_RW int min_sample_count; + CV_PROP_RW int cv_folds; + CV_PROP_RW bool use_surrogates; + CV_PROP_RW bool use_1se_rule; + CV_PROP_RW bool truncate_pruned_tree; + CV_PROP_RW float regression_accuracy; + const float* priors; + + CvDTreeParams(); + CvDTreeParams( int max_depth, int min_sample_count, + float regression_accuracy, bool use_surrogates, + int max_categories, int cv_folds, + bool use_1se_rule, bool truncate_pruned_tree, + const float* priors ); +}; + + +struct CV_EXPORTS CvDTreeTrainData +{ + CvDTreeTrainData(); + CvDTreeTrainData( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + const CvDTreeParams& params=CvDTreeParams(), + bool _shared=false, bool _add_labels=false ); + virtual ~CvDTreeTrainData(); + + virtual void set_data( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + const CvDTreeParams& params=CvDTreeParams(), + bool _shared=false, bool _add_labels=false, + bool _update_data=false ); + virtual void do_responses_copy(); + + virtual void get_vectors( const CvMat* _subsample_idx, + float* values, uchar* missing, float* responses, bool get_class_idx=false ); + + virtual CvDTreeNode* subsample_data( const CvMat* _subsample_idx ); + + virtual void write_params( CvFileStorage* fs ) const; + virtual void read_params( CvFileStorage* fs, CvFileNode* node ); + + // release all the data + virtual void clear(); + + int get_num_classes() const; + int get_var_type(int vi) const; + int get_work_var_count() const {return work_var_count;} + + virtual const float* get_ord_responses( CvDTreeNode* n, float* values_buf, int* sample_indices_buf ); + virtual const int* get_class_labels( CvDTreeNode* n, int* labels_buf ); + virtual const int* get_cv_labels( CvDTreeNode* n, int* labels_buf ); + virtual const int* get_sample_indices( CvDTreeNode* n, int* indices_buf ); + virtual const int* get_cat_var_data( CvDTreeNode* n, int vi, int* cat_values_buf ); + virtual void get_ord_var_data( CvDTreeNode* n, int vi, float* ord_values_buf, int* sorted_indices_buf, + const float** ord_values, const int** sorted_indices, int* sample_indices_buf ); + virtual int get_child_buf_idx( CvDTreeNode* n ); + + //////////////////////////////////// + + virtual bool set_params( const CvDTreeParams& params ); + virtual CvDTreeNode* new_node( CvDTreeNode* parent, int count, + int storage_idx, int offset ); + + virtual CvDTreeSplit* new_split_ord( int vi, float cmp_val, + int split_point, int inversed, float quality ); + virtual CvDTreeSplit* new_split_cat( int vi, float quality ); + virtual void free_node_data( CvDTreeNode* node ); + virtual void free_train_data(); + virtual void free_node( CvDTreeNode* node ); + + int sample_count, var_all, var_count, max_c_count; + int ord_var_count, cat_var_count, work_var_count; + bool have_labels, have_priors; + bool is_classifier; + int tflag; + + const CvMat* train_data; + const CvMat* responses; + CvMat* responses_copy; // used in Boosting + + int buf_count, buf_size; // buf_size is obsolete, please do not use it, use expression ((int64)buf->rows * (int64)buf->cols / buf_count) instead + bool shared; + int is_buf_16u; + + CvMat* cat_count; + CvMat* cat_ofs; + CvMat* cat_map; + + CvMat* counts; + CvMat* buf; + inline size_t get_length_subbuf() const + { + size_t res = (size_t)(work_var_count + 1) * (size_t)sample_count; + return res; + } + + CvMat* direction; + CvMat* split_buf; + + CvMat* var_idx; + CvMat* var_type; // i-th element = + // k<0 - ordered + // k>=0 - categorical, see k-th element of cat_* arrays + CvMat* priors; + CvMat* priors_mult; + + CvDTreeParams params; + + CvMemStorage* tree_storage; + CvMemStorage* temp_storage; + + CvDTreeNode* data_root; + + CvSet* node_heap; + CvSet* split_heap; + CvSet* cv_heap; + CvSet* nv_heap; + + cv::RNG* rng; +}; + +class CvDTree; +class CvForestTree; + +namespace cv +{ + struct DTreeBestSplitFinder; + struct ForestTreeBestSplitFinder; +} + +class CV_EXPORTS_W CvDTree : public CvStatModel +{ +public: + CV_WRAP CvDTree(); + virtual ~CvDTree(); + + virtual bool train( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvDTreeParams params=CvDTreeParams() ); + + virtual bool train( CvMLData* trainData, CvDTreeParams params=CvDTreeParams() ); + + // type in {CV_TRAIN_ERROR, CV_TEST_ERROR} + virtual float calc_error( CvMLData* trainData, int type, std::vector *resp = 0 ); + + virtual bool train( CvDTreeTrainData* trainData, const CvMat* subsampleIdx ); + + virtual CvDTreeNode* predict( const CvMat* sample, const CvMat* missingDataMask=0, + bool preprocessedInput=false ) const; + + CV_WRAP virtual bool train( const cv::Mat& trainData, int tflag, + const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(), + const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(), + const cv::Mat& missingDataMask=cv::Mat(), + CvDTreeParams params=CvDTreeParams() ); + + CV_WRAP virtual CvDTreeNode* predict( const cv::Mat& sample, const cv::Mat& missingDataMask=cv::Mat(), + bool preprocessedInput=false ) const; + CV_WRAP virtual cv::Mat getVarImportance(); + + virtual const CvMat* get_var_importance(); + CV_WRAP virtual void clear(); + + virtual void read( CvFileStorage* fs, CvFileNode* node ); + virtual void write( CvFileStorage* fs, const char* name ) const; + + // special read & write methods for trees in the tree ensembles + virtual void read( CvFileStorage* fs, CvFileNode* node, + CvDTreeTrainData* data ); + virtual void write( CvFileStorage* fs ) const; + + const CvDTreeNode* get_root() const; + int get_pruned_tree_idx() const; + CvDTreeTrainData* get_data(); + +protected: + friend struct cv::DTreeBestSplitFinder; + + virtual bool do_train( const CvMat* _subsample_idx ); + + virtual void try_split_node( CvDTreeNode* n ); + virtual void split_node_data( CvDTreeNode* n ); + virtual CvDTreeSplit* find_best_split( CvDTreeNode* n ); + virtual CvDTreeSplit* find_split_ord_class( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_cat_class( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_ord_reg( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_cat_reg( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_surrogate_split_ord( CvDTreeNode* n, int vi, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_surrogate_split_cat( CvDTreeNode* n, int vi, uchar* ext_buf = 0 ); + virtual double calc_node_dir( CvDTreeNode* node ); + virtual void complete_node_dir( CvDTreeNode* node ); + virtual void cluster_categories( const int* vectors, int vector_count, + int var_count, int* sums, int k, int* cluster_labels ); + + virtual void calc_node_value( CvDTreeNode* node ); + + virtual void prune_cv(); + virtual double update_tree_rnc( int T, int fold ); + virtual int cut_tree( int T, int fold, double min_alpha ); + virtual void free_prune_data(bool cut_tree); + virtual void free_tree(); + + virtual void write_node( CvFileStorage* fs, CvDTreeNode* node ) const; + virtual void write_split( CvFileStorage* fs, CvDTreeSplit* split ) const; + virtual CvDTreeNode* read_node( CvFileStorage* fs, CvFileNode* node, CvDTreeNode* parent ); + virtual CvDTreeSplit* read_split( CvFileStorage* fs, CvFileNode* node ); + virtual void write_tree_nodes( CvFileStorage* fs ) const; + virtual void read_tree_nodes( CvFileStorage* fs, CvFileNode* node ); + + CvDTreeNode* root; + CvMat* var_importance; + CvDTreeTrainData* data; + +public: + int pruned_tree_idx; +}; + + +/****************************************************************************************\ +* Random Trees Classifier * +\****************************************************************************************/ + +class CvRTrees; + +class CV_EXPORTS CvForestTree: public CvDTree +{ +public: + CvForestTree(); + virtual ~CvForestTree(); + + virtual bool train( CvDTreeTrainData* trainData, const CvMat* _subsample_idx, CvRTrees* forest ); + + virtual int get_var_count() const {return data ? data->var_count : 0;} + virtual void read( CvFileStorage* fs, CvFileNode* node, CvRTrees* forest, CvDTreeTrainData* _data ); + + /* dummy methods to avoid warnings: BEGIN */ + virtual bool train( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvDTreeParams params=CvDTreeParams() ); + + virtual bool train( CvDTreeTrainData* trainData, const CvMat* _subsample_idx ); + virtual void read( CvFileStorage* fs, CvFileNode* node ); + virtual void read( CvFileStorage* fs, CvFileNode* node, + CvDTreeTrainData* data ); + /* dummy methods to avoid warnings: END */ + +protected: + friend struct cv::ForestTreeBestSplitFinder; + + virtual CvDTreeSplit* find_best_split( CvDTreeNode* n ); + CvRTrees* forest; +}; + + +struct CV_EXPORTS_W_MAP CvRTParams : public CvDTreeParams +{ + //Parameters for the forest + CV_PROP_RW bool calc_var_importance; // true <=> RF processes variable importance + CV_PROP_RW int nactive_vars; + CV_PROP_RW CvTermCriteria term_crit; + + CvRTParams(); + CvRTParams( int max_depth, int min_sample_count, + float regression_accuracy, bool use_surrogates, + int max_categories, const float* priors, bool calc_var_importance, + int nactive_vars, int max_num_of_trees_in_the_forest, + float forest_accuracy, int termcrit_type ); +}; + + +class CV_EXPORTS_W CvRTrees : public CvStatModel +{ +public: + CV_WRAP CvRTrees(); + virtual ~CvRTrees(); + virtual bool train( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvRTParams params=CvRTParams() ); + + virtual bool train( CvMLData* data, CvRTParams params=CvRTParams() ); + virtual float predict( const CvMat* sample, const CvMat* missing = 0 ) const; + virtual float predict_prob( const CvMat* sample, const CvMat* missing = 0 ) const; + + CV_WRAP virtual bool train( const cv::Mat& trainData, int tflag, + const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(), + const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(), + const cv::Mat& missingDataMask=cv::Mat(), + CvRTParams params=CvRTParams() ); + CV_WRAP virtual float predict( const cv::Mat& sample, const cv::Mat& missing = cv::Mat() ) const; + CV_WRAP virtual float predict_prob( const cv::Mat& sample, const cv::Mat& missing = cv::Mat() ) const; + CV_WRAP virtual cv::Mat getVarImportance(); + + CV_WRAP virtual void clear(); + + virtual const CvMat* get_var_importance(); + virtual float get_proximity( const CvMat* sample1, const CvMat* sample2, + const CvMat* missing1 = 0, const CvMat* missing2 = 0 ) const; + + virtual float calc_error( CvMLData* data, int type , std::vector* resp = 0 ); // type in {CV_TRAIN_ERROR, CV_TEST_ERROR} + + virtual float get_train_error(); + + virtual void read( CvFileStorage* fs, CvFileNode* node ); + virtual void write( CvFileStorage* fs, const char* name ) const; + + CvMat* get_active_var_mask(); + CvRNG* get_rng(); + + int get_tree_count() const; + CvForestTree* get_tree(int i) const; + +protected: + virtual std::string getName() const; + + virtual bool grow_forest( const CvTermCriteria term_crit ); + + // array of the trees of the forest + CvForestTree** trees; + CvDTreeTrainData* data; + int ntrees; + int nclasses; + double oob_error; + CvMat* var_importance; + int nsamples; + + cv::RNG* rng; + CvMat* active_var_mask; +}; + +/****************************************************************************************\ +* Extremely randomized trees Classifier * +\****************************************************************************************/ +struct CV_EXPORTS CvERTreeTrainData : public CvDTreeTrainData +{ + virtual void set_data( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + const CvDTreeParams& params=CvDTreeParams(), + bool _shared=false, bool _add_labels=false, + bool _update_data=false ); + virtual void get_ord_var_data( CvDTreeNode* n, int vi, float* ord_values_buf, int* missing_buf, + const float** ord_values, const int** missing, int* sample_buf = 0 ); + virtual const int* get_sample_indices( CvDTreeNode* n, int* indices_buf ); + virtual const int* get_cv_labels( CvDTreeNode* n, int* labels_buf ); + virtual const int* get_cat_var_data( CvDTreeNode* n, int vi, int* cat_values_buf ); + virtual void get_vectors( const CvMat* _subsample_idx, float* values, uchar* missing, + float* responses, bool get_class_idx=false ); + virtual CvDTreeNode* subsample_data( const CvMat* _subsample_idx ); + const CvMat* missing_mask; +}; + +class CV_EXPORTS CvForestERTree : public CvForestTree +{ +protected: + virtual double calc_node_dir( CvDTreeNode* node ); + virtual CvDTreeSplit* find_split_ord_class( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_cat_class( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_ord_reg( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_cat_reg( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual void split_node_data( CvDTreeNode* n ); +}; + +class CV_EXPORTS_W CvERTrees : public CvRTrees +{ +public: + CV_WRAP CvERTrees(); + virtual ~CvERTrees(); + virtual bool train( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvRTParams params=CvRTParams()); + CV_WRAP virtual bool train( const cv::Mat& trainData, int tflag, + const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(), + const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(), + const cv::Mat& missingDataMask=cv::Mat(), + CvRTParams params=CvRTParams()); + virtual bool train( CvMLData* data, CvRTParams params=CvRTParams() ); +protected: + virtual std::string getName() const; + virtual bool grow_forest( const CvTermCriteria term_crit ); +}; + + +/****************************************************************************************\ +* Boosted tree classifier * +\****************************************************************************************/ + +struct CV_EXPORTS_W_MAP CvBoostParams : public CvDTreeParams +{ + CV_PROP_RW int boost_type; + CV_PROP_RW int weak_count; + CV_PROP_RW int split_criteria; + CV_PROP_RW double weight_trim_rate; + + CvBoostParams(); + CvBoostParams( int boost_type, int weak_count, double weight_trim_rate, + int max_depth, bool use_surrogates, const float* priors ); +}; + + +class CvBoost; + +class CV_EXPORTS CvBoostTree: public CvDTree +{ +public: + CvBoostTree(); + virtual ~CvBoostTree(); + + virtual bool train( CvDTreeTrainData* trainData, + const CvMat* subsample_idx, CvBoost* ensemble ); + + virtual void scale( double s ); + virtual void read( CvFileStorage* fs, CvFileNode* node, + CvBoost* ensemble, CvDTreeTrainData* _data ); + virtual void clear(); + + /* dummy methods to avoid warnings: BEGIN */ + virtual bool train( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvDTreeParams params=CvDTreeParams() ); + virtual bool train( CvDTreeTrainData* trainData, const CvMat* _subsample_idx ); + + virtual void read( CvFileStorage* fs, CvFileNode* node ); + virtual void read( CvFileStorage* fs, CvFileNode* node, + CvDTreeTrainData* data ); + /* dummy methods to avoid warnings: END */ + +protected: + + virtual void try_split_node( CvDTreeNode* n ); + virtual CvDTreeSplit* find_surrogate_split_ord( CvDTreeNode* n, int vi, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_surrogate_split_cat( CvDTreeNode* n, int vi, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_ord_class( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_cat_class( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_ord_reg( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_cat_reg( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual void calc_node_value( CvDTreeNode* n ); + virtual double calc_node_dir( CvDTreeNode* n ); + + CvBoost* ensemble; +}; + + +class CV_EXPORTS_W CvBoost : public CvStatModel +{ +public: + // Boosting type + enum { DISCRETE=0, REAL=1, LOGIT=2, GENTLE=3 }; + + // Splitting criteria + enum { DEFAULT=0, GINI=1, MISCLASS=3, SQERR=4 }; + + CV_WRAP CvBoost(); + virtual ~CvBoost(); + + CvBoost( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvBoostParams params=CvBoostParams() ); + + virtual bool train( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvBoostParams params=CvBoostParams(), + bool update=false ); + + virtual bool train( CvMLData* data, + CvBoostParams params=CvBoostParams(), + bool update=false ); + + virtual float predict( const CvMat* sample, const CvMat* missing=0, + CvMat* weak_responses=0, CvSlice slice=CV_WHOLE_SEQ, + bool raw_mode=false, bool return_sum=false ) const; + + CV_WRAP CvBoost( const cv::Mat& trainData, int tflag, + const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(), + const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(), + const cv::Mat& missingDataMask=cv::Mat(), + CvBoostParams params=CvBoostParams() ); + + CV_WRAP virtual bool train( const cv::Mat& trainData, int tflag, + const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(), + const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(), + const cv::Mat& missingDataMask=cv::Mat(), + CvBoostParams params=CvBoostParams(), + bool update=false ); + + CV_WRAP virtual float predict( const cv::Mat& sample, const cv::Mat& missing=cv::Mat(), + const cv::Range& slice=cv::Range::all(), bool rawMode=false, + bool returnSum=false ) const; + + virtual float calc_error( CvMLData* _data, int type , std::vector *resp = 0 ); // type in {CV_TRAIN_ERROR, CV_TEST_ERROR} + + CV_WRAP virtual void prune( CvSlice slice ); + + CV_WRAP virtual void clear(); + + virtual void write( CvFileStorage* storage, const char* name ) const; + virtual void read( CvFileStorage* storage, CvFileNode* node ); + virtual const CvMat* get_active_vars(bool absolute_idx=true); + + CvSeq* get_weak_predictors(); + + CvMat* get_weights(); + CvMat* get_subtree_weights(); + CvMat* get_weak_response(); + const CvBoostParams& get_params() const; + const CvDTreeTrainData* get_data() const; + +protected: + + void update_weights_impl( CvBoostTree* tree, double initial_weights[2] ); + + virtual bool set_params( const CvBoostParams& params ); + virtual void update_weights( CvBoostTree* tree ); + virtual void trim_weights(); + virtual void write_params( CvFileStorage* fs ) const; + virtual void read_params( CvFileStorage* fs, CvFileNode* node ); + + CvDTreeTrainData* data; + CvBoostParams params; + CvSeq* weak; + + CvMat* active_vars; + CvMat* active_vars_abs; + bool have_active_cat_vars; + + CvMat* orig_response; + CvMat* sum_response; + CvMat* weak_eval; + CvMat* subsample_mask; + CvMat* weights; + CvMat* subtree_weights; + bool have_subsample; +}; + + +/****************************************************************************************\ +* Gradient Boosted Trees * +\****************************************************************************************/ + +// DataType: STRUCT CvGBTreesParams +// Parameters of GBT (Gradient Boosted trees model), including single +// tree settings and ensemble parameters. +// +// weak_count - count of trees in the ensemble +// loss_function_type - loss function used for ensemble training +// subsample_portion - portion of whole training set used for +// every single tree training. +// subsample_portion value is in (0.0, 1.0]. +// subsample_portion == 1.0 when whole dataset is +// used on each step. Count of sample used on each +// step is computed as +// int(total_samples_count * subsample_portion). +// shrinkage - regularization parameter. +// Each tree prediction is multiplied on shrinkage value. + + +struct CV_EXPORTS_W_MAP CvGBTreesParams : public CvDTreeParams +{ + CV_PROP_RW int weak_count; + CV_PROP_RW int loss_function_type; + CV_PROP_RW float subsample_portion; + CV_PROP_RW float shrinkage; + + CvGBTreesParams(); + CvGBTreesParams( int loss_function_type, int weak_count, float shrinkage, + float subsample_portion, int max_depth, bool use_surrogates ); +}; + +// DataType: CLASS CvGBTrees +// Gradient Boosting Trees (GBT) algorithm implementation. +// +// data - training dataset +// params - parameters of the CvGBTrees +// weak - array[0..(class_count-1)] of CvSeq +// for storing tree ensembles +// orig_response - original responses of the training set samples +// sum_response - predicitons of the current model on the training dataset. +// this matrix is updated on every iteration. +// sum_response_tmp - predicitons of the model on the training set on the next +// step. On every iteration values of sum_responses_tmp are +// computed via sum_responses values. When the current +// step is complete sum_response values become equal to +// sum_responses_tmp. +// sampleIdx - indices of samples used for training the ensemble. +// CvGBTrees training procedure takes a set of samples +// (train_data) and a set of responses (responses). +// Only pairs (train_data[i], responses[i]), where i is +// in sample_idx are used for training the ensemble. +// subsample_train - indices of samples used for training a single decision +// tree on the current step. This indices are countered +// relatively to the sample_idx, so that pairs +// (train_data[sample_idx[i]], responses[sample_idx[i]]) +// are used for training a decision tree. +// Training set is randomly splited +// in two parts (subsample_train and subsample_test) +// on every iteration accordingly to the portion parameter. +// subsample_test - relative indices of samples from the training set, +// which are not used for training a tree on the current +// step. +// missing - mask of the missing values in the training set. This +// matrix has the same size as train_data. 1 - missing +// value, 0 - not a missing value. +// class_labels - output class labels map. +// rng - random number generator. Used for spliting the +// training set. +// class_count - count of output classes. +// class_count == 1 in the case of regression, +// and > 1 in the case of classification. +// delta - Huber loss function parameter. +// base_value - start point of the gradient descent procedure. +// model prediction is +// f(x) = f_0 + sum_{i=1..weak_count-1}(f_i(x)), where +// f_0 is the base value. + + + +class CV_EXPORTS_W CvGBTrees : public CvStatModel +{ +public: + + /* + // DataType: ENUM + // Loss functions implemented in CvGBTrees. + // + // SQUARED_LOSS + // problem: regression + // loss = (x - x')^2 + // + // ABSOLUTE_LOSS + // problem: regression + // loss = abs(x - x') + // + // HUBER_LOSS + // problem: regression + // loss = delta*( abs(x - x') - delta/2), if abs(x - x') > delta + // 1/2*(x - x')^2, if abs(x - x') <= delta, + // where delta is the alpha-quantile of pseudo responses from + // the training set. + // + // DEVIANCE_LOSS + // problem: classification + // + */ + enum {SQUARED_LOSS=0, ABSOLUTE_LOSS, HUBER_LOSS=3, DEVIANCE_LOSS}; + + + /* + // Default constructor. Creates a model only (without training). + // Should be followed by one form of the train(...) function. + // + // API + // CvGBTrees(); + + // INPUT + // OUTPUT + // RESULT + */ + CV_WRAP CvGBTrees(); + + + /* + // Full form constructor. Creates a gradient boosting model and does the + // train. + // + // API + // CvGBTrees( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvGBTreesParams params=CvGBTreesParams() ); + + // INPUT + // trainData - a set of input feature vectors. + // size of matrix is + // x + // or x + // depending on the tflag parameter. + // matrix values are float. + // tflag - a flag showing how do samples stored in the + // trainData matrix row by row (tflag=CV_ROW_SAMPLE) + // or column by column (tflag=CV_COL_SAMPLE). + // responses - a vector of responses corresponding to the samples + // in trainData. + // varIdx - indices of used variables. zero value means that all + // variables are active. + // sampleIdx - indices of used samples. zero value means that all + // samples from trainData are in the training set. + // varType - vector of length. gives every + // variable type CV_VAR_CATEGORICAL or CV_VAR_ORDERED. + // varType = 0 means all variables are numerical. + // missingDataMask - a mask of misiing values in trainData. + // missingDataMask = 0 means that there are no missing + // values. + // params - parameters of GTB algorithm. + // OUTPUT + // RESULT + */ + CvGBTrees( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvGBTreesParams params=CvGBTreesParams() ); + + + /* + // Destructor. + */ + virtual ~CvGBTrees(); + + + /* + // Gradient tree boosting model training + // + // API + // virtual bool train( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvGBTreesParams params=CvGBTreesParams(), + bool update=false ); + + // INPUT + // trainData - a set of input feature vectors. + // size of matrix is + // x + // or x + // depending on the tflag parameter. + // matrix values are float. + // tflag - a flag showing how do samples stored in the + // trainData matrix row by row (tflag=CV_ROW_SAMPLE) + // or column by column (tflag=CV_COL_SAMPLE). + // responses - a vector of responses corresponding to the samples + // in trainData. + // varIdx - indices of used variables. zero value means that all + // variables are active. + // sampleIdx - indices of used samples. zero value means that all + // samples from trainData are in the training set. + // varType - vector of length. gives every + // variable type CV_VAR_CATEGORICAL or CV_VAR_ORDERED. + // varType = 0 means all variables are numerical. + // missingDataMask - a mask of misiing values in trainData. + // missingDataMask = 0 means that there are no missing + // values. + // params - parameters of GTB algorithm. + // update - is not supported now. (!) + // OUTPUT + // RESULT + // Error state. + */ + virtual bool train( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvGBTreesParams params=CvGBTreesParams(), + bool update=false ); + + + /* + // Gradient tree boosting model training + // + // API + // virtual bool train( CvMLData* data, + CvGBTreesParams params=CvGBTreesParams(), + bool update=false ) {return false;}; + + // INPUT + // data - training set. + // params - parameters of GTB algorithm. + // update - is not supported now. (!) + // OUTPUT + // RESULT + // Error state. + */ + virtual bool train( CvMLData* data, + CvGBTreesParams params=CvGBTreesParams(), + bool update=false ); + + + /* + // Response value prediction + // + // API + // virtual float predict_serial( const CvMat* sample, const CvMat* missing=0, + CvMat* weak_responses=0, CvSlice slice = CV_WHOLE_SEQ, + int k=-1 ) const; + + // INPUT + // sample - input sample of the same type as in the training set. + // missing - missing values mask. missing=0 if there are no + // missing values in sample vector. + // weak_responses - predictions of all of the trees. + // not implemented (!) + // slice - part of the ensemble used for prediction. + // slice = CV_WHOLE_SEQ when all trees are used. + // k - number of ensemble used. + // k is in {-1,0,1,..,}. + // in the case of classification problem + // ensembles are built. + // If k = -1 ordinary prediction is the result, + // otherwise function gives the prediction of the + // k-th ensemble only. + // OUTPUT + // RESULT + // Predicted value. + */ + virtual float predict_serial( const CvMat* sample, const CvMat* missing=0, + CvMat* weakResponses=0, CvSlice slice = CV_WHOLE_SEQ, + int k=-1 ) const; + + /* + // Response value prediction. + // Parallel version (in the case of TBB existence) + // + // API + // virtual float predict( const CvMat* sample, const CvMat* missing=0, + CvMat* weak_responses=0, CvSlice slice = CV_WHOLE_SEQ, + int k=-1 ) const; + + // INPUT + // sample - input sample of the same type as in the training set. + // missing - missing values mask. missing=0 if there are no + // missing values in sample vector. + // weak_responses - predictions of all of the trees. + // not implemented (!) + // slice - part of the ensemble used for prediction. + // slice = CV_WHOLE_SEQ when all trees are used. + // k - number of ensemble used. + // k is in {-1,0,1,..,}. + // in the case of classification problem + // ensembles are built. + // If k = -1 ordinary prediction is the result, + // otherwise function gives the prediction of the + // k-th ensemble only. + // OUTPUT + // RESULT + // Predicted value. + */ + virtual float predict( const CvMat* sample, const CvMat* missing=0, + CvMat* weakResponses=0, CvSlice slice = CV_WHOLE_SEQ, + int k=-1 ) const; + + /* + // Deletes all the data. + // + // API + // virtual void clear(); + + // INPUT + // OUTPUT + // delete data, weak, orig_response, sum_response, + // weak_eval, subsample_train, subsample_test, + // sample_idx, missing, lass_labels + // delta = 0.0 + // RESULT + */ + CV_WRAP virtual void clear(); + + /* + // Compute error on the train/test set. + // + // API + // virtual float calc_error( CvMLData* _data, int type, + // std::vector *resp = 0 ); + // + // INPUT + // data - dataset + // type - defines which error is to compute: train (CV_TRAIN_ERROR) or + // test (CV_TEST_ERROR). + // OUTPUT + // resp - vector of predicitons + // RESULT + // Error value. + */ + virtual float calc_error( CvMLData* _data, int type, + std::vector *resp = 0 ); + + /* + // + // Write parameters of the gtb model and data. Write learned model. + // + // API + // virtual void write( CvFileStorage* fs, const char* name ) const; + // + // INPUT + // fs - file storage to read parameters from. + // name - model name. + // OUTPUT + // RESULT + */ + virtual void write( CvFileStorage* fs, const char* name ) const; + + + /* + // + // Read parameters of the gtb model and data. Read learned model. + // + // API + // virtual void read( CvFileStorage* fs, CvFileNode* node ); + // + // INPUT + // fs - file storage to read parameters from. + // node - file node. + // OUTPUT + // RESULT + */ + virtual void read( CvFileStorage* fs, CvFileNode* node ); + + + // new-style C++ interface + CV_WRAP CvGBTrees( const cv::Mat& trainData, int tflag, + const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(), + const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(), + const cv::Mat& missingDataMask=cv::Mat(), + CvGBTreesParams params=CvGBTreesParams() ); + + CV_WRAP virtual bool train( const cv::Mat& trainData, int tflag, + const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(), + const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(), + const cv::Mat& missingDataMask=cv::Mat(), + CvGBTreesParams params=CvGBTreesParams(), + bool update=false ); + + CV_WRAP virtual float predict( const cv::Mat& sample, const cv::Mat& missing=cv::Mat(), + const cv::Range& slice = cv::Range::all(), + int k=-1 ) const; + +protected: + + /* + // Compute the gradient vector components. + // + // API + // virtual void find_gradient( const int k = 0); + + // INPUT + // k - used for classification problem, determining current + // tree ensemble. + // OUTPUT + // changes components of data->responses + // which correspond to samples used for training + // on the current step. + // RESULT + */ + virtual void find_gradient( const int k = 0); + + + /* + // + // Change values in tree leaves according to the used loss function. + // + // API + // virtual void change_values(CvDTree* tree, const int k = 0); + // + // INPUT + // tree - decision tree to change. + // k - used for classification problem, determining current + // tree ensemble. + // OUTPUT + // changes 'value' fields of the trees' leaves. + // changes sum_response_tmp. + // RESULT + */ + virtual void change_values(CvDTree* tree, const int k = 0); + + + /* + // + // Find optimal constant prediction value according to the used loss + // function. + // The goal is to find a constant which gives the minimal summary loss + // on the _Idx samples. + // + // API + // virtual float find_optimal_value( const CvMat* _Idx ); + // + // INPUT + // _Idx - indices of the samples from the training set. + // OUTPUT + // RESULT + // optimal constant value. + */ + virtual float find_optimal_value( const CvMat* _Idx ); + + + /* + // + // Randomly split the whole training set in two parts according + // to params.portion. + // + // API + // virtual void do_subsample(); + // + // INPUT + // OUTPUT + // subsample_train - indices of samples used for training + // subsample_test - indices of samples used for test + // RESULT + */ + virtual void do_subsample(); + + + /* + // + // Internal recursive function giving an array of subtree tree leaves. + // + // API + // void leaves_get( CvDTreeNode** leaves, int& count, CvDTreeNode* node ); + // + // INPUT + // node - current leaf. + // OUTPUT + // count - count of leaves in the subtree. + // leaves - array of pointers to leaves. + // RESULT + */ + void leaves_get( CvDTreeNode** leaves, int& count, CvDTreeNode* node ); + + + /* + // + // Get leaves of the tree. + // + // API + // CvDTreeNode** GetLeaves( const CvDTree* dtree, int& len ); + // + // INPUT + // dtree - decision tree. + // OUTPUT + // len - count of the leaves. + // RESULT + // CvDTreeNode** - array of pointers to leaves. + */ + CvDTreeNode** GetLeaves( const CvDTree* dtree, int& len ); + + + /* + // + // Is it a regression or a classification. + // + // API + // bool problem_type(); + // + // INPUT + // OUTPUT + // RESULT + // false if it is a classification problem, + // true - if regression. + */ + virtual bool problem_type() const; + + + /* + // + // Write parameters of the gtb model. + // + // API + // virtual void write_params( CvFileStorage* fs ) const; + // + // INPUT + // fs - file storage to write parameters to. + // OUTPUT + // RESULT + */ + virtual void write_params( CvFileStorage* fs ) const; + + + /* + // + // Read parameters of the gtb model and data. + // + // API + // virtual void read_params( CvFileStorage* fs ); + // + // INPUT + // fs - file storage to read parameters from. + // OUTPUT + // params - parameters of the gtb model. + // data - contains information about the structure + // of the data set (count of variables, + // their types, etc.). + // class_labels - output class labels map. + // RESULT + */ + virtual void read_params( CvFileStorage* fs, CvFileNode* fnode ); + int get_len(const CvMat* mat) const; + + + CvDTreeTrainData* data; + CvGBTreesParams params; + + CvSeq** weak; + CvMat* orig_response; + CvMat* sum_response; + CvMat* sum_response_tmp; + CvMat* sample_idx; + CvMat* subsample_train; + CvMat* subsample_test; + CvMat* missing; + CvMat* class_labels; + + cv::RNG* rng; + + int class_count; + float delta; + float base_value; + +}; + + + +/****************************************************************************************\ +* Artificial Neural Networks (ANN) * +\****************************************************************************************/ + +/////////////////////////////////// Multi-Layer Perceptrons ////////////////////////////// + +struct CV_EXPORTS_W_MAP CvANN_MLP_TrainParams +{ + CvANN_MLP_TrainParams(); + CvANN_MLP_TrainParams( CvTermCriteria term_crit, int train_method, + double param1, double param2=0 ); + ~CvANN_MLP_TrainParams(); + + enum { BACKPROP=0, RPROP=1 }; + + CV_PROP_RW CvTermCriteria term_crit; + CV_PROP_RW int train_method; + + // backpropagation parameters + CV_PROP_RW double bp_dw_scale, bp_moment_scale; + + // rprop parameters + CV_PROP_RW double rp_dw0, rp_dw_plus, rp_dw_minus, rp_dw_min, rp_dw_max; +}; + + +class CV_EXPORTS_W CvANN_MLP : public CvStatModel +{ +public: + CV_WRAP CvANN_MLP(); + CvANN_MLP( const CvMat* layerSizes, + int activateFunc=CvANN_MLP::SIGMOID_SYM, + double fparam1=0, double fparam2=0 ); + + virtual ~CvANN_MLP(); + + virtual void create( const CvMat* layerSizes, + int activateFunc=CvANN_MLP::SIGMOID_SYM, + double fparam1=0, double fparam2=0 ); + + virtual int train( const CvMat* inputs, const CvMat* outputs, + const CvMat* sampleWeights, const CvMat* sampleIdx=0, + CvANN_MLP_TrainParams params = CvANN_MLP_TrainParams(), + int flags=0 ); + virtual float predict( const CvMat* inputs, CV_OUT CvMat* outputs ) const; + + CV_WRAP CvANN_MLP( const cv::Mat& layerSizes, + int activateFunc=CvANN_MLP::SIGMOID_SYM, + double fparam1=0, double fparam2=0 ); + + CV_WRAP virtual void create( const cv::Mat& layerSizes, + int activateFunc=CvANN_MLP::SIGMOID_SYM, + double fparam1=0, double fparam2=0 ); + + CV_WRAP virtual int train( const cv::Mat& inputs, const cv::Mat& outputs, + const cv::Mat& sampleWeights, const cv::Mat& sampleIdx=cv::Mat(), + CvANN_MLP_TrainParams params = CvANN_MLP_TrainParams(), + int flags=0 ); + + CV_WRAP virtual float predict( const cv::Mat& inputs, CV_OUT cv::Mat& outputs ) const; + + CV_WRAP virtual void clear(); + + // possible activation functions + enum { IDENTITY = 0, SIGMOID_SYM = 1, GAUSSIAN = 2 }; + + // available training flags + enum { UPDATE_WEIGHTS = 1, NO_INPUT_SCALE = 2, NO_OUTPUT_SCALE = 4 }; + + virtual void read( CvFileStorage* fs, CvFileNode* node ); + virtual void write( CvFileStorage* storage, const char* name ) const; + + int get_layer_count() { return layer_sizes ? layer_sizes->cols : 0; } + const CvMat* get_layer_sizes() { return layer_sizes; } + double* get_weights(int layer) + { + return layer_sizes && weights && + (unsigned)layer <= (unsigned)layer_sizes->cols ? weights[layer] : 0; + } + + virtual void calc_activ_func_deriv( CvMat* xf, CvMat* deriv, const double* bias ) const; + +protected: + + virtual bool prepare_to_train( const CvMat* _inputs, const CvMat* _outputs, + const CvMat* _sample_weights, const CvMat* sampleIdx, + CvVectors* _ivecs, CvVectors* _ovecs, double** _sw, int _flags ); + + // sequential random backpropagation + virtual int train_backprop( CvVectors _ivecs, CvVectors _ovecs, const double* _sw ); + + // RPROP algorithm + virtual int train_rprop( CvVectors _ivecs, CvVectors _ovecs, const double* _sw ); + + virtual void calc_activ_func( CvMat* xf, const double* bias ) const; + virtual void set_activ_func( int _activ_func=SIGMOID_SYM, + double _f_param1=0, double _f_param2=0 ); + virtual void init_weights(); + virtual void scale_input( const CvMat* _src, CvMat* _dst ) const; + virtual void scale_output( const CvMat* _src, CvMat* _dst ) const; + virtual void calc_input_scale( const CvVectors* vecs, int flags ); + virtual void calc_output_scale( const CvVectors* vecs, int flags ); + + virtual void write_params( CvFileStorage* fs ) const; + virtual void read_params( CvFileStorage* fs, CvFileNode* node ); + + CvMat* layer_sizes; + CvMat* wbuf; + CvMat* sample_weights; + double** weights; + double f_param1, f_param2; + double min_val, max_val, min_val1, max_val1; + int activ_func; + int max_count, max_buf_sz; + CvANN_MLP_TrainParams params; + cv::RNG* rng; +}; + +/****************************************************************************************\ +* Auxilary functions declarations * +\****************************************************************************************/ + +/* Generates from multivariate normal distribution, where - is an + average row vector, - symmetric covariation matrix */ +CVAPI(void) cvRandMVNormal( CvMat* mean, CvMat* cov, CvMat* sample, + CvRNG* rng CV_DEFAULT(0) ); + +/* Generates sample from gaussian mixture distribution */ +CVAPI(void) cvRandGaussMixture( CvMat* means[], + CvMat* covs[], + float weights[], + int clsnum, + CvMat* sample, + CvMat* sampClasses CV_DEFAULT(0) ); + +#define CV_TS_CONCENTRIC_SPHERES 0 + +/* creates test set */ +CVAPI(void) cvCreateTestSet( int type, CvMat** samples, + int num_samples, + int num_features, + CvMat** responses, + int num_classes, ... ); + +/****************************************************************************************\ +* Data * +\****************************************************************************************/ + +#define CV_COUNT 0 +#define CV_PORTION 1 + +struct CV_EXPORTS CvTrainTestSplit +{ + CvTrainTestSplit(); + CvTrainTestSplit( int train_sample_count, bool mix = true); + CvTrainTestSplit( float train_sample_portion, bool mix = true); + + union + { + int count; + float portion; + } train_sample_part; + int train_sample_part_mode; + + bool mix; +}; + +class CV_EXPORTS CvMLData +{ +public: + CvMLData(); + virtual ~CvMLData(); + + // returns: + // 0 - OK + // -1 - file can not be opened or is not correct + int read_csv( const char* filename ); + + const CvMat* get_values() const; + const CvMat* get_responses(); + const CvMat* get_missing() const; + + void set_response_idx( int idx ); // old response become predictors, new response_idx = idx + // if idx < 0 there will be no response + int get_response_idx() const; + + void set_train_test_split( const CvTrainTestSplit * spl ); + const CvMat* get_train_sample_idx() const; + const CvMat* get_test_sample_idx() const; + void mix_train_and_test_idx(); + + const CvMat* get_var_idx(); + void chahge_var_idx( int vi, bool state ); // misspelled (saved for back compitability), + // use change_var_idx + void change_var_idx( int vi, bool state ); // state == true to set vi-variable as predictor + + const CvMat* get_var_types(); + int get_var_type( int var_idx ) const; + // following 2 methods enable to change vars type + // use these methods to assign CV_VAR_CATEGORICAL type for categorical variable + // with numerical labels; in the other cases var types are correctly determined automatically + void set_var_types( const char* str ); // str examples: + // "ord[0-17],cat[18]", "ord[0,2,4,10-12], cat[1,3,5-9,13,14]", + // "cat", "ord" (all vars are categorical/ordered) + void change_var_type( int var_idx, int type); // type in { CV_VAR_ORDERED, CV_VAR_CATEGORICAL } + + void set_delimiter( char ch ); + char get_delimiter() const; + + void set_miss_ch( char ch ); + char get_miss_ch() const; + + const std::map& get_class_labels_map() const; + +protected: + virtual void clear(); + + void str_to_flt_elem( const char* token, float& flt_elem, int& type); + void free_train_test_idx(); + + char delimiter; + char miss_ch; + //char flt_separator; + + CvMat* values; + CvMat* missing; + CvMat* var_types; + CvMat* var_idx_mask; + + CvMat* response_out; // header + CvMat* var_idx_out; // mat + CvMat* var_types_out; // mat + + int response_idx; + + int train_sample_count; + bool mix; + + int total_class_count; + std::map class_map; + + CvMat* train_sample_idx; + CvMat* test_sample_idx; + int* sample_idx; // data of train_sample_idx and test_sample_idx + + cv::RNG* rng; +}; + + +namespace cv +{ + +typedef CvStatModel StatModel; +typedef CvParamGrid ParamGrid; +typedef CvNormalBayesClassifier NormalBayesClassifier; +typedef CvKNearest KNearest; +typedef CvSVMParams SVMParams; +typedef CvSVMKernel SVMKernel; +typedef CvSVMSolver SVMSolver; +typedef CvSVM SVM; +typedef CvDTreeParams DTreeParams; +typedef CvMLData TrainData; +typedef CvDTree DecisionTree; +typedef CvForestTree ForestTree; +typedef CvRTParams RandomTreeParams; +typedef CvRTrees RandomTrees; +typedef CvERTreeTrainData ERTreeTRainData; +typedef CvForestERTree ERTree; +typedef CvERTrees ERTrees; +typedef CvBoostParams BoostParams; +typedef CvBoostTree BoostTree; +typedef CvBoost Boost; +typedef CvANN_MLP_TrainParams ANN_MLP_TrainParams; +typedef CvANN_MLP NeuralNet_MLP; +typedef CvGBTreesParams GradientBoostingTreeParams; +typedef CvGBTrees GradientBoostingTrees; + +template<> CV_EXPORTS void Ptr::delete_obj(); + +CV_EXPORTS bool initModule_ml(void); + +} + +#endif // __cplusplus +#endif // __OPENCV_ML_HPP__ + +/* End of file. */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/features2d.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/features2d.hpp new file mode 100644 index 0000000..f23bec8 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/features2d.hpp @@ -0,0 +1,155 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_NONFREE_FEATURES_2D_HPP__ +#define __OPENCV_NONFREE_FEATURES_2D_HPP__ + +#include "opencv2/features2d/features2d.hpp" + +#ifdef __cplusplus + +namespace cv +{ + +/*! + SIFT implementation. + + The class implements SIFT algorithm by D. Lowe. +*/ +class CV_EXPORTS_W SIFT : public Feature2D +{ +public: + CV_WRAP explicit SIFT( int nfeatures=0, int nOctaveLayers=3, + double contrastThreshold=0.04, double edgeThreshold=10, + double sigma=1.6); + + //! returns the descriptor size in floats (128) + CV_WRAP int descriptorSize() const; + + //! returns the descriptor type + CV_WRAP int descriptorType() const; + + //! finds the keypoints using SIFT algorithm + void operator()(InputArray img, InputArray mask, + vector& keypoints) const; + //! finds the keypoints and computes descriptors for them using SIFT algorithm. + //! Optionally it can compute descriptors for the user-provided keypoints + void operator()(InputArray img, InputArray mask, + vector& keypoints, + OutputArray descriptors, + bool useProvidedKeypoints=false) const; + + AlgorithmInfo* info() const; + + void buildGaussianPyramid( const Mat& base, vector& pyr, int nOctaves ) const; + void buildDoGPyramid( const vector& pyr, vector& dogpyr ) const; + void findScaleSpaceExtrema( const vector& gauss_pyr, const vector& dog_pyr, + vector& keypoints ) const; + +protected: + void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + void computeImpl( const Mat& image, vector& keypoints, Mat& descriptors ) const; + + CV_PROP_RW int nfeatures; + CV_PROP_RW int nOctaveLayers; + CV_PROP_RW double contrastThreshold; + CV_PROP_RW double edgeThreshold; + CV_PROP_RW double sigma; +}; + +typedef SIFT SiftFeatureDetector; +typedef SIFT SiftDescriptorExtractor; + +/*! + SURF implementation. + + The class implements SURF algorithm by H. Bay et al. + */ +class CV_EXPORTS_W SURF : public Feature2D +{ +public: + //! the default constructor + CV_WRAP SURF(); + //! the full constructor taking all the necessary parameters + explicit CV_WRAP SURF(double hessianThreshold, + int nOctaves=4, int nOctaveLayers=2, + bool extended=true, bool upright=false); + + //! returns the descriptor size in float's (64 or 128) + CV_WRAP int descriptorSize() const; + + //! returns the descriptor type + CV_WRAP int descriptorType() const; + + //! finds the keypoints using fast hessian detector used in SURF + void operator()(InputArray img, InputArray mask, + CV_OUT vector& keypoints) const; + //! finds the keypoints and computes their descriptors. Optionally it can compute descriptors for the user-provided keypoints + void operator()(InputArray img, InputArray mask, + CV_OUT vector& keypoints, + OutputArray descriptors, + bool useProvidedKeypoints=false) const; + + AlgorithmInfo* info() const; + + CV_PROP_RW double hessianThreshold; + CV_PROP_RW int nOctaves; + CV_PROP_RW int nOctaveLayers; + CV_PROP_RW bool extended; + CV_PROP_RW bool upright; + +protected: + + void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + void computeImpl( const Mat& image, vector& keypoints, Mat& descriptors ) const; +}; + +typedef SURF SurfFeatureDetector; +typedef SURF SurfDescriptorExtractor; + +} /* namespace cv */ + +#endif /* __cplusplus */ + +#endif + +/* End of file. */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/gpu.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/gpu.hpp new file mode 100644 index 0000000..722ef26 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/gpu.hpp @@ -0,0 +1,128 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_NONFREE_GPU_HPP__ +#define __OPENCV_NONFREE_GPU_HPP__ + +#include "opencv2/core/gpumat.hpp" + +namespace cv { namespace gpu { + +class CV_EXPORTS SURF_GPU +{ +public: + enum KeypointLayout + { + X_ROW = 0, + Y_ROW, + LAPLACIAN_ROW, + OCTAVE_ROW, + SIZE_ROW, + ANGLE_ROW, + HESSIAN_ROW, + ROWS_COUNT + }; + + //! the default constructor + SURF_GPU(); + //! the full constructor taking all the necessary parameters + explicit SURF_GPU(double _hessianThreshold, int _nOctaves=4, + int _nOctaveLayers=2, bool _extended=false, float _keypointsRatio=0.01f, bool _upright = false); + + //! returns the descriptor size in float's (64 or 128) + int descriptorSize() const; + + //! upload host keypoints to device memory + void uploadKeypoints(const std::vector& keypoints, GpuMat& keypointsGPU); + //! download keypoints from device to host memory + void downloadKeypoints(const GpuMat& keypointsGPU, std::vector& keypoints); + + //! download descriptors from device to host memory + void downloadDescriptors(const GpuMat& descriptorsGPU, std::vector& descriptors); + + //! finds the keypoints using fast hessian detector used in SURF + //! supports CV_8UC1 images + //! keypoints will have nFeature cols and 6 rows + //! keypoints.ptr(X_ROW)[i] will contain x coordinate of i'th feature + //! keypoints.ptr(Y_ROW)[i] will contain y coordinate of i'th feature + //! keypoints.ptr(LAPLACIAN_ROW)[i] will contain laplacian sign of i'th feature + //! keypoints.ptr(OCTAVE_ROW)[i] will contain octave of i'th feature + //! keypoints.ptr(SIZE_ROW)[i] will contain size of i'th feature + //! keypoints.ptr(ANGLE_ROW)[i] will contain orientation of i'th feature + //! keypoints.ptr(HESSIAN_ROW)[i] will contain response of i'th feature + void operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints); + //! finds the keypoints and computes their descriptors. + //! Optionally it can compute descriptors for the user-provided keypoints and recompute keypoints direction + void operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints, GpuMat& descriptors, + bool useProvidedKeypoints = false); + + void operator()(const GpuMat& img, const GpuMat& mask, std::vector& keypoints); + void operator()(const GpuMat& img, const GpuMat& mask, std::vector& keypoints, GpuMat& descriptors, + bool useProvidedKeypoints = false); + + void operator()(const GpuMat& img, const GpuMat& mask, std::vector& keypoints, std::vector& descriptors, + bool useProvidedKeypoints = false); + + void releaseMemory(); + + // SURF parameters + double hessianThreshold; + int nOctaves; + int nOctaveLayers; + bool extended; + bool upright; + + //! max keypoints = min(keypointsRatio * img.size().area(), 65535) + float keypointsRatio; + + GpuMat sum, mask1, maskSum, intBuffer; + + GpuMat det, trace; + + GpuMat maxPosBuffer; +}; + +} // namespace gpu + +} // namespace cv + +#endif // __OPENCV_NONFREE_GPU_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/nonfree.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/nonfree.hpp new file mode 100644 index 0000000..c64c566 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/nonfree.hpp @@ -0,0 +1,57 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2012, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_NONFREE_HPP__ +#define __OPENCV_NONFREE_HPP__ + +#include "opencv2/nonfree/features2d.hpp" + +namespace cv +{ + +CV_EXPORTS_W bool initModule_nonfree(); + +} + +#endif + +/* End of file. */ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/ocl.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/ocl.hpp new file mode 100644 index 0000000..ba84d24 --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/ocl.hpp @@ -0,0 +1,140 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_NONFREE_OCL_HPP__ +#define __OPENCV_NONFREE_OCL_HPP__ + +#include "opencv2/ocl/ocl.hpp" + +namespace cv +{ + namespace ocl + { + //! Speeded up robust features, port from GPU module. + ////////////////////////////////// SURF ////////////////////////////////////////// + + class CV_EXPORTS SURF_OCL : public cv::Feature2D + { + public: + enum KeypointLayout + { + X_ROW = 0, + Y_ROW, + LAPLACIAN_ROW, + OCTAVE_ROW, + SIZE_ROW, + ANGLE_ROW, + HESSIAN_ROW, + ROWS_COUNT + }; + + //! the default constructor + SURF_OCL(); + //! the full constructor taking all the necessary parameters + explicit SURF_OCL(double _hessianThreshold, int _nOctaves = 4, + int _nOctaveLayers = 2, bool _extended = true, float _keypointsRatio = 0.01f, bool _upright = false); + + //! returns the descriptor size in float's (64 or 128) + int descriptorSize() const; + + int descriptorType() const; + + //! upload host keypoints to device memory + void uploadKeypoints(const vector &keypoints, oclMat &keypointsocl); + //! download keypoints from device to host memory + void downloadKeypoints(const oclMat &keypointsocl, vector &keypoints); + //! download descriptors from device to host memory + void downloadDescriptors(const oclMat &descriptorsocl, vector &descriptors); + //! finds the keypoints using fast hessian detector used in SURF + //! supports CV_8UC1 images + //! keypoints will have nFeature cols and 6 rows + //! keypoints.ptr(X_ROW)[i] will contain x coordinate of i'th feature + //! keypoints.ptr(Y_ROW)[i] will contain y coordinate of i'th feature + //! keypoints.ptr(LAPLACIAN_ROW)[i] will contain laplacian sign of i'th feature + //! keypoints.ptr(OCTAVE_ROW)[i] will contain octave of i'th feature + //! keypoints.ptr(SIZE_ROW)[i] will contain size of i'th feature + //! keypoints.ptr(ANGLE_ROW)[i] will contain orientation of i'th feature + //! keypoints.ptr(HESSIAN_ROW)[i] will contain response of i'th feature + void operator()(const oclMat &img, const oclMat &mask, oclMat &keypoints); + //! finds the keypoints and computes their descriptors. + //! Optionally it can compute descriptors for the user-provided keypoints and recompute keypoints direction + void operator()(const oclMat &img, const oclMat &mask, oclMat &keypoints, oclMat &descriptors, + bool useProvidedKeypoints = false); + void operator()(const oclMat &img, const oclMat &mask, std::vector &keypoints); + void operator()(const oclMat &img, const oclMat &mask, std::vector &keypoints, oclMat &descriptors, + bool useProvidedKeypoints = false); + void operator()(const oclMat &img, const oclMat &mask, std::vector &keypoints, std::vector &descriptors, + bool useProvidedKeypoints = false); + + //! finds the keypoints using fast hessian detector used in SURF + void operator()(InputArray img, InputArray mask, + CV_OUT vector& keypoints) const; + //! finds the keypoints and computes their descriptors. Optionally it can compute descriptors for the user-provided keypoints + void operator()(InputArray img, InputArray mask, + CV_OUT vector& keypoints, + OutputArray descriptors, + bool useProvidedKeypoints=false) const; + + AlgorithmInfo* info() const; + + void releaseMemory(); + + // SURF parameters + float hessianThreshold; + int nOctaves; + int nOctaveLayers; + bool extended; + bool upright; + //! max keypoints = min(keypointsRatio * img.size().area(), 65535) + float keypointsRatio; + oclMat sum, mask1, maskSum, intBuffer; + oclMat det, trace; + oclMat maxPosBuffer; + protected: + void detectImpl( const Mat& image, vector& keypoints, const Mat& mask) const; + void computeImpl( const Mat& image, vector& keypoints, Mat& descriptors) const; + }; + } +} + +#endif //__OPENCV_NONFREE_OCL_HPP__ diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/objdetect.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/objdetect.hpp new file mode 100644 index 0000000..71f201c --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/objdetect.hpp @@ -0,0 +1,43 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "opencv2/objdetect/objdetect.hpp" diff --git a/src/c/hardware/rasberrypi/libraries/opencv/opencv2/objdetect/objdetect.hpp b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/objdetect/objdetect.hpp new file mode 100644 index 0000000..d5d6f0b --- /dev/null +++ b/src/c/hardware/rasberrypi/libraries/opencv/opencv2/objdetect/objdetect.hpp @@ -0,0 +1,1073 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_OBJDETECT_HPP__ +#define __OPENCV_OBJDETECT_HPP__ + +#include "opencv2/core/core.hpp" + +#ifdef __cplusplus +#include +#include + +extern "C" { +#endif + +/****************************************************************************************\ +* Haar-like Object Detection functions * +\****************************************************************************************/ + +#define CV_HAAR_MAGIC_VAL 0x42500000 +#define CV_TYPE_NAME_HAAR "opencv-haar-classifier" + +#define CV_IS_HAAR_CLASSIFIER( haar ) \ + ((haar) != NULL && \ + (((const CvHaarClassifierCascade*)(haar))->flags & CV_MAGIC_MASK)==CV_HAAR_MAGIC_VAL) + +#define CV_HAAR_FEATURE_MAX 3 + +typedef struct CvHaarFeature +{ + int tilted; + struct + { + CvRect r; + float weight; + } rect[CV_HAAR_FEATURE_MAX]; +} CvHaarFeature; + +typedef struct CvHaarClassifier +{ + int count; + CvHaarFeature* haar_feature; + float* threshold; + int* left; + int* right; + float* alpha; +} CvHaarClassifier; + +typedef struct CvHaarStageClassifier +{ + int count; + float threshold; + CvHaarClassifier* classifier; + + int next; + int child; + int parent; +} CvHaarStageClassifier; + +typedef struct CvHidHaarClassifierCascade CvHidHaarClassifierCascade; + +typedef struct CvHaarClassifierCascade +{ + int flags; + int count; + CvSize orig_window_size; + CvSize real_window_size; + double scale; + CvHaarStageClassifier* stage_classifier; + CvHidHaarClassifierCascade* hid_cascade; +} CvHaarClassifierCascade; + +typedef struct CvAvgComp +{ + CvRect rect; + int neighbors; +} CvAvgComp; + +/* Loads haar classifier cascade from a directory. + It is obsolete: convert your cascade to xml and use cvLoad instead */ +CVAPI(CvHaarClassifierCascade*) cvLoadHaarClassifierCascade( + const char* directory, CvSize orig_window_size); + +CVAPI(void) cvReleaseHaarClassifierCascade( CvHaarClassifierCascade** cascade ); + +#define CV_HAAR_DO_CANNY_PRUNING 1 +#define CV_HAAR_SCALE_IMAGE 2 +#define CV_HAAR_FIND_BIGGEST_OBJECT 4 +#define CV_HAAR_DO_ROUGH_SEARCH 8 + +//CVAPI(CvSeq*) cvHaarDetectObjectsForROC( const CvArr* image, +// CvHaarClassifierCascade* cascade, CvMemStorage* storage, +// CvSeq** rejectLevels, CvSeq** levelWeightds, +// double scale_factor CV_DEFAULT(1.1), +// int min_neighbors CV_DEFAULT(3), int flags CV_DEFAULT(0), +// CvSize min_size CV_DEFAULT(cvSize(0,0)), CvSize max_size CV_DEFAULT(cvSize(0,0)), +// bool outputRejectLevels = false ); + + +CVAPI(CvSeq*) cvHaarDetectObjects( const CvArr* image, + CvHaarClassifierCascade* cascade, CvMemStorage* storage, + double scale_factor CV_DEFAULT(1.1), + int min_neighbors CV_DEFAULT(3), int flags CV_DEFAULT(0), + CvSize min_size CV_DEFAULT(cvSize(0,0)), CvSize max_size CV_DEFAULT(cvSize(0,0))); + +/* sets images for haar classifier cascade */ +CVAPI(void) cvSetImagesForHaarClassifierCascade( CvHaarClassifierCascade* cascade, + const CvArr* sum, const CvArr* sqsum, + const CvArr* tilted_sum, double scale ); + +/* runs the cascade on the specified window */ +CVAPI(int) cvRunHaarClassifierCascade( const CvHaarClassifierCascade* cascade, + CvPoint pt, int start_stage CV_DEFAULT(0)); + + +/****************************************************************************************\ +* Latent SVM Object Detection functions * +\****************************************************************************************/ + +// DataType: STRUCT position +// Structure describes the position of the filter in the feature pyramid +// l - level in the feature pyramid +// (x, y) - coordinate in level l +typedef struct CvLSVMFilterPosition +{ + int x; + int y; + int l; +} CvLSVMFilterPosition; + +// DataType: STRUCT filterObject +// Description of the filter, which corresponds to the part of the object +// V - ideal (penalty = 0) position of the partial filter +// from the root filter position (V_i in the paper) +// penaltyFunction - vector describes penalty function (d_i in the paper) +// pf[0] * x + pf[1] * y + pf[2] * x^2 + pf[3] * y^2 +// FILTER DESCRIPTION +// Rectangular map (sizeX x sizeY), +// every cell stores feature vector (dimension = p) +// H - matrix of feature vectors +// to set and get feature vectors (i,j) +// used formula H[(j * sizeX + i) * p + k], where +// k - component of feature vector in cell (i, j) +// END OF FILTER DESCRIPTION +typedef struct CvLSVMFilterObject{ + CvLSVMFilterPosition V; + float fineFunction[4]; + int sizeX; + int sizeY; + int numFeatures; + float *H; +} CvLSVMFilterObject; + +// data type: STRUCT CvLatentSvmDetector +// structure contains internal representation of trained Latent SVM detector +// num_filters - total number of filters (root plus part) in model +// num_components - number of components in model +// num_part_filters - array containing number of part filters for each component +// filters - root and part filters for all model components +// b - biases for all model components +// score_threshold - confidence level threshold +typedef struct CvLatentSvmDetector +{ + int num_filters; + int num_components; + int* num_part_filters; + CvLSVMFilterObject** filters; + float* b; + float score_threshold; +} +CvLatentSvmDetector; + +// data type: STRUCT CvObjectDetection +// structure contains the bounding box and confidence level for detected object +// rect - bounding box for a detected object +// score - confidence level +typedef struct CvObjectDetection +{ + CvRect rect; + float score; +} CvObjectDetection; + +//////////////// Object Detection using Latent SVM ////////////// + + +/* +// load trained detector from a file +// +// API +// CvLatentSvmDetector* cvLoadLatentSvmDetector(const char* filename); +// INPUT +// filename - path to the file containing the parameters of + - trained Latent SVM detector +// OUTPUT +// trained Latent SVM detector in internal representation +*/ +CVAPI(CvLatentSvmDetector*) cvLoadLatentSvmDetector(const char* filename); + +/* +// release memory allocated for CvLatentSvmDetector structure +// +// API +// void cvReleaseLatentSvmDetector(CvLatentSvmDetector** detector); +// INPUT +// detector - CvLatentSvmDetector structure to be released +// OUTPUT +*/ +CVAPI(void) cvReleaseLatentSvmDetector(CvLatentSvmDetector** detector); + +/* +// find rectangular regions in the given image that are likely +// to contain objects and corresponding confidence levels +// +// API +// CvSeq* cvLatentSvmDetectObjects(const IplImage* image, +// CvLatentSvmDetector* detector, +// CvMemStorage* storage, +// float overlap_threshold = 0.5f, +// int numThreads = -1); +// INPUT +// image - image to detect objects in +// detector - Latent SVM detector in internal representation +// storage - memory storage to store the resultant sequence +// of the object candidate rectangles +// overlap_threshold - threshold for the non-maximum suppression algorithm + = 0.5f [here will be the reference to original paper] +// OUTPUT +// sequence of detected objects (bounding boxes and confidence levels stored in CvObjectDetection structures) +*/ +CVAPI(CvSeq*) cvLatentSvmDetectObjects(IplImage* image, + CvLatentSvmDetector* detector, + CvMemStorage* storage, + float overlap_threshold CV_DEFAULT(0.5f), + int numThreads CV_DEFAULT(-1)); + +#ifdef __cplusplus +} + +CV_EXPORTS CvSeq* cvHaarDetectObjectsForROC( const CvArr* image, + CvHaarClassifierCascade* cascade, CvMemStorage* storage, + std::vector& rejectLevels, std::vector& levelWeightds, + double scale_factor CV_DEFAULT(1.1), + int min_neighbors CV_DEFAULT(3), int flags CV_DEFAULT(0), + CvSize min_size CV_DEFAULT(cvSize(0,0)), CvSize max_size CV_DEFAULT(cvSize(0,0)), + bool outputRejectLevels = false ); + +namespace cv +{ + +///////////////////////////// Object Detection //////////////////////////// + +/* + * This is a class wrapping up the structure CvLatentSvmDetector and functions working with it. + * The class goals are: + * 1) provide c++ interface; + * 2) make it possible to load and detect more than one class (model) unlike CvLatentSvmDetector. + */ +class CV_EXPORTS LatentSvmDetector +{ +public: + struct CV_EXPORTS ObjectDetection + { + ObjectDetection(); + ObjectDetection( const Rect& rect, float score, int classID=-1 ); + Rect rect; + float score; + int classID; + }; + + LatentSvmDetector(); + LatentSvmDetector( const vector& filenames, const vector& classNames=vector() ); + virtual ~LatentSvmDetector(); + + virtual void clear(); + virtual bool empty() const; + bool load( const vector& filenames, const vector& classNames=vector() ); + + virtual void detect( const Mat& image, + vector& objectDetections, + float overlapThreshold=0.5f, + int numThreads=-1 ); + + const vector& getClassNames() const; + size_t getClassCount() const; + +private: + vector detectors; + vector classNames; +}; + +// class for grouping object candidates, detected by Cascade Classifier, HOG etc. +// instance of the class is to be passed to cv::partition (see cxoperations.hpp) +class CV_EXPORTS SimilarRects +{ +public: + SimilarRects(double _eps) : eps(_eps) {} + inline bool operator()(const Rect& r1, const Rect& r2) const + { + double delta = eps*(std::min(r1.width, r2.width) + std::min(r1.height, r2.height))*0.5; + return std::abs(r1.x - r2.x) <= delta && + std::abs(r1.y - r2.y) <= delta && + std::abs(r1.x + r1.width - r2.x - r2.width) <= delta && + std::abs(r1.y + r1.height - r2.y - r2.height) <= delta; + } + double eps; +}; + +CV_EXPORTS void groupRectangles(CV_OUT CV_IN_OUT vector& rectList, int groupThreshold, double eps=0.2); +CV_EXPORTS_W void groupRectangles(CV_OUT CV_IN_OUT vector& rectList, CV_OUT vector& weights, int groupThreshold, double eps=0.2); +CV_EXPORTS void groupRectangles( vector& rectList, int groupThreshold, double eps, vector* weights, vector* levelWeights ); +CV_EXPORTS void groupRectangles(vector& rectList, vector& rejectLevels, + vector& levelWeights, int groupThreshold, double eps=0.2); +CV_EXPORTS void groupRectangles_meanshift(vector& rectList, vector& foundWeights, vector& foundScales, + double detectThreshold = 0.0, Size winDetSize = Size(64, 128)); + + +class CV_EXPORTS FeatureEvaluator +{ +public: + enum { HAAR = 0, LBP = 1, HOG = 2 }; + virtual ~FeatureEvaluator(); + + virtual bool read(const FileNode& node); + virtual Ptr clone() const; + virtual int getFeatureType() const; + + virtual bool setImage(const Mat& img, Size origWinSize); + virtual bool setWindow(Point p); + + virtual double calcOrd(int featureIdx) const; + virtual int calcCat(int featureIdx) const; + + static Ptr create(int type); +}; + +template<> CV_EXPORTS void Ptr::delete_obj(); + +enum +{ + CASCADE_DO_CANNY_PRUNING=1, + CASCADE_SCALE_IMAGE=2, + CASCADE_FIND_BIGGEST_OBJECT=4, + CASCADE_DO_ROUGH_SEARCH=8 +}; + +class CV_EXPORTS_W CascadeClassifier +{ +public: + CV_WRAP CascadeClassifier(); + CV_WRAP CascadeClassifier( const string& filename ); + virtual ~CascadeClassifier(); + + CV_WRAP virtual bool empty() const; + CV_WRAP bool load( const string& filename ); + virtual bool read( const FileNode& node ); + CV_WRAP virtual void detectMultiScale( const Mat& image, + CV_OUT vector& objects, + double scaleFactor=1.1, + int minNeighbors=3, int flags=0, + Size minSize=Size(), + Size maxSize=Size() ); + + CV_WRAP virtual void detectMultiScale( const Mat& image, + CV_OUT vector& objects, + vector& rejectLevels, + vector& levelWeights, + double scaleFactor=1.1, + int minNeighbors=3, int flags=0, + Size minSize=Size(), + Size maxSize=Size(), + bool outputRejectLevels=false ); + + + bool isOldFormatCascade() const; + virtual Size getOriginalWindowSize() const; + int getFeatureType() const; + bool setImage( const Mat& ); + +protected: + //virtual bool detectSingleScale( const Mat& image, int stripCount, Size processingRectSize, + // int stripSize, int yStep, double factor, vector& candidates ); + + virtual bool detectSingleScale( const Mat& image, int stripCount, Size processingRectSize, + int stripSize, int yStep, double factor, vector& candidates, + vector& rejectLevels, vector& levelWeights, bool outputRejectLevels=false); + +protected: + enum { BOOST = 0 }; + enum { DO_CANNY_PRUNING = 1, SCALE_IMAGE = 2, + FIND_BIGGEST_OBJECT = 4, DO_ROUGH_SEARCH = 8 }; + + friend class CascadeClassifierInvoker; + + template + friend int predictOrdered( CascadeClassifier& cascade, Ptr &featureEvaluator, double& weight); + + template + friend int predictCategorical( CascadeClassifier& cascade, Ptr &featureEvaluator, double& weight); + + template + friend int predictOrderedStump( CascadeClassifier& cascade, Ptr &featureEvaluator, double& weight); + + template + friend int predictCategoricalStump( CascadeClassifier& cascade, Ptr &featureEvaluator, double& weight); + + bool setImage( Ptr& feval, const Mat& image); + virtual int runAt( Ptr& feval, Point pt, double& weight ); + + class Data + { + public: + struct CV_EXPORTS DTreeNode + { + int featureIdx; + float threshold; // for ordered features only + int left; + int right; + }; + + struct CV_EXPORTS DTree + { + int nodeCount; + }; + + struct CV_EXPORTS Stage + { + int first; + int ntrees; + float threshold; + }; + + bool read(const FileNode &node); + + bool isStumpBased; + + int stageType; + int featureType; + int ncategories; + Size origWinSize; + + vector stages; + vector classifiers; + vector nodes; + vector leaves; + vector subsets; + }; + + Data data; + Ptr featureEvaluator; + Ptr oldCascade; + +public: + class CV_EXPORTS MaskGenerator + { + public: + virtual ~MaskGenerator() {} + virtual cv::Mat generateMask(const cv::Mat& src)=0; + virtual void initializeMask(const cv::Mat& /*src*/) {}; + }; + void setMaskGenerator(Ptr maskGenerator); + Ptr getMaskGenerator(); + + void setFaceDetectionMaskGenerator(); + +protected: + Ptr maskGenerator; +}; + + +//////////////// HOG (Histogram-of-Oriented-Gradients) Descriptor and Object Detector ////////////// + +// struct for detection region of interest (ROI) +struct DetectionROI +{ + // scale(size) of the bounding box + double scale; + // set of requrested locations to be evaluated + vector locations; + // vector that will contain confidence values for each location + vector confidences; +}; + +struct CV_EXPORTS_W HOGDescriptor +{ +public: + enum { L2Hys=0 }; + enum { DEFAULT_NLEVELS=64 }; + + CV_WRAP HOGDescriptor() : winSize(64,128), blockSize(16,16), blockStride(8,8), + cellSize(8,8), nbins(9), derivAperture(1), winSigma(-1), + histogramNormType(HOGDescriptor::L2Hys), L2HysThreshold(0.2), gammaCorrection(true), + nlevels(HOGDescriptor::DEFAULT_NLEVELS) + {} + + CV_WRAP HOGDescriptor(Size _winSize, Size _blockSize, Size _blockStride, + Size _cellSize, int _nbins, int _derivAperture=1, double _winSigma=-1, + int _histogramNormType=HOGDescriptor::L2Hys, + double _L2HysThreshold=0.2, bool _gammaCorrection=false, + int _nlevels=HOGDescriptor::DEFAULT_NLEVELS) + : winSize(_winSize), blockSize(_blockSize), blockStride(_blockStride), cellSize(_cellSize), + nbins(_nbins), derivAperture(_derivAperture), winSigma(_winSigma), + histogramNormType(_histogramNormType), L2HysThreshold(_L2HysThreshold), + gammaCorrection(_gammaCorrection), nlevels(_nlevels) + {} + + CV_WRAP HOGDescriptor(const String& filename) + { + load(filename); + } + + HOGDescriptor(const HOGDescriptor& d) + { + d.copyTo(*this); + } + + virtual ~HOGDescriptor() {} + + CV_WRAP size_t getDescriptorSize() const; + CV_WRAP bool checkDetectorSize() const; + CV_WRAP double getWinSigma() const; + + CV_WRAP virtual void setSVMDetector(InputArray _svmdetector); + + virtual bool read(FileNode& fn); + virtual void write(FileStorage& fs, const String& objname) const; + + CV_WRAP virtual bool load(const String& filename, const String& objname=String()); + CV_WRAP virtual void save(const String& filename, const String& objname=String()) const; + virtual void copyTo(HOGDescriptor& c) const; + + CV_WRAP virtual void compute(const Mat& img, + CV_OUT vector& descriptors, + Size winStride=Size(), Size padding=Size(), + const vector& locations=vector()) const; + //with found weights output + CV_WRAP virtual void detect(const Mat& img, CV_OUT vector& foundLocations, + CV_OUT vector& weights, + double hitThreshold=0, Size winStride=Size(), + Size padding=Size(), + const vector& searchLocations=vector()) const; + //without found weights output + virtual void detect(const Mat& img, CV_OUT vector& foundLocations, + double hitThreshold=0, Size winStride=Size(), + Size padding=Size(), + const vector& searchLocations=vector()) const; + //with result weights output + CV_WRAP virtual void detectMultiScale(const Mat& img, CV_OUT vector& foundLocations, + CV_OUT vector& foundWeights, double hitThreshold=0, + Size winStride=Size(), Size padding=Size(), double scale=1.05, + double finalThreshold=2.0,bool useMeanshiftGrouping = false) const; + //without found weights output + virtual void detectMultiScale(const Mat& img, CV_OUT vector& foundLocations, + double hitThreshold=0, Size winStride=Size(), + Size padding=Size(), double scale=1.05, + double finalThreshold=2.0, bool useMeanshiftGrouping = false) const; + + CV_WRAP virtual void computeGradient(const Mat& img, CV_OUT Mat& grad, CV_OUT Mat& angleOfs, + Size paddingTL=Size(), Size paddingBR=Size()) const; + + CV_WRAP static vector getDefaultPeopleDetector(); + CV_WRAP static vector getDaimlerPeopleDetector(); + + CV_PROP Size winSize; + CV_PROP Size blockSize; + CV_PROP Size blockStride; + CV_PROP Size cellSize; + CV_PROP int nbins; + CV_PROP int derivAperture; + CV_PROP double winSigma; + CV_PROP int histogramNormType; + CV_PROP double L2HysThreshold; + CV_PROP bool gammaCorrection; + CV_PROP vector svmDetector; + CV_PROP int nlevels; + + + // evaluate specified ROI and return confidence value for each location + void detectROI(const cv::Mat& img, const vector &locations, + CV_OUT std::vector& foundLocations, CV_OUT std::vector& confidences, + double hitThreshold = 0, cv::Size winStride = Size(), + cv::Size padding = Size()) const; + + // evaluate specified ROI and return confidence value for each location in multiple scales + void detectMultiScaleROI(const cv::Mat& img, + CV_OUT std::vector& foundLocations, + std::vector& locations, + double hitThreshold = 0, + int groupThreshold = 0) const; + + // read/parse Dalal's alt model file + void readALTModel(std::string modelfile); + void groupRectangles(vector& rectList, vector& weights, int groupThreshold, double eps) const; +}; + + +CV_EXPORTS_W void findDataMatrix(InputArray image, + CV_OUT vector& codes, + OutputArray corners=noArray(), + OutputArrayOfArrays dmtx=noArray()); +CV_EXPORTS_W void drawDataMatrixCodes(InputOutputArray image, + const vector& codes, + InputArray corners); +} + +/****************************************************************************************\ +* Datamatrix * +\****************************************************************************************/ + +struct CV_EXPORTS CvDataMatrixCode { + char msg[4]; + CvMat *original; + CvMat *corners; +}; + +CV_EXPORTS std::deque cvFindDataMatrix(CvMat *im); + +/****************************************************************************************\ +* LINE-MOD * +\****************************************************************************************/ + +namespace cv { +namespace linemod { + +using cv::FileNode; +using cv::FileStorage; +using cv::Mat; +using cv::noArray; +using cv::OutputArrayOfArrays; +using cv::Point; +using cv::Ptr; +using cv::Rect; +using cv::Size; + +/// @todo Convert doxy comments to rst + +/** + * \brief Discriminant feature described by its location and label. + */ +struct CV_EXPORTS Feature +{ + int x; ///< x offset + int y; ///< y offset + int label; ///< Quantization + + Feature() : x(0), y(0), label(0) {} + Feature(int x, int y, int label); + + void read(const FileNode& fn); + void write(FileStorage& fs) const; +}; + +inline Feature::Feature(int _x, int _y, int _label) : x(_x), y(_y), label(_label) {} + +struct CV_EXPORTS Template +{ + int width; + int height; + int pyramid_level; + std::vector features; + + void read(const FileNode& fn); + void write(FileStorage& fs) const; +}; + +/** + * \brief Represents a modality operating over an image pyramid. + */ +class QuantizedPyramid +{ +public: + // Virtual destructor + virtual ~QuantizedPyramid() {} + + /** + * \brief Compute quantized image at current pyramid level for online detection. + * + * \param[out] dst The destination 8-bit image. For each pixel at most one bit is set, + * representing its classification. + */ + virtual void quantize(Mat& dst) const =0; + + /** + * \brief Extract most discriminant features at current pyramid level to form a new template. + * + * \param[out] templ The new template. + */ + virtual bool extractTemplate(Template& templ) const =0; + + /** + * \brief Go to the next pyramid level. + * + * \todo Allow pyramid scale factor other than 2 + */ + virtual void pyrDown() =0; + +protected: + /// Candidate feature with a score + struct Candidate + { + Candidate(int x, int y, int label, float score); + + /// Sort candidates with high score to the front + bool operator<(const Candidate& rhs) const + { + return score > rhs.score; + } + + Feature f; + float score; + }; + + /** + * \brief Choose candidate features so that they are not bunched together. + * + * \param[in] candidates Candidate features sorted by score. + * \param[out] features Destination vector of selected features. + * \param[in] num_features Number of candidates to select. + * \param[in] distance Hint for desired distance between features. + */ + static void selectScatteredFeatures(const std::vector& candidates, + std::vector& features, + size_t num_features, float distance); +}; + +inline QuantizedPyramid::Candidate::Candidate(int x, int y, int label, float _score) : f(x, y, label), score(_score) {} + +/** + * \brief Interface for modalities that plug into the LINE template matching representation. + * + * \todo Max response, to allow optimization of summing (255/MAX) features as uint8 + */ +class CV_EXPORTS Modality +{ +public: + // Virtual destructor + virtual ~Modality() {} + + /** + * \brief Form a quantized image pyramid from a source image. + * + * \param[in] src The source image. Type depends on the modality. + * \param[in] mask Optional mask. If not empty, unmasked pixels are set to zero + * in quantized image and cannot be extracted as features. + */ + Ptr process(const Mat& src, + const Mat& mask = Mat()) const + { + return processImpl(src, mask); + } + + virtual std::string name() const =0; + + virtual void read(const FileNode& fn) =0; + virtual void write(FileStorage& fs) const =0; + + /** + * \brief Create modality by name. + * + * The following modality types are supported: + * - "ColorGradient" + * - "DepthNormal" + */ + static Ptr create(const std::string& modality_type); + + /** + * \brief Load a modality from file. + */ + static Ptr create(const FileNode& fn); + +protected: + // Indirection is because process() has a default parameter. + virtual Ptr processImpl(const Mat& src, + const Mat& mask) const =0; +}; + +/** + * \brief Modality that computes quantized gradient orientations from a color image. + */ +class CV_EXPORTS ColorGradient : public Modality +{ +public: + /** + * \brief Default constructor. Uses reasonable default parameter values. + */ + ColorGradient(); + + /** + * \brief Constructor. + * + * \param weak_threshold When quantizing, discard gradients with magnitude less than this. + * \param num_features How many features a template must contain. + * \param strong_threshold Consider as candidate features only gradients whose norms are + * larger than this. + */ + ColorGradient(float weak_threshold, size_t num_features, float strong_threshold); + + virtual std::string name() const; + + virtual void read(const FileNode& fn); + virtual void write(FileStorage& fs) const; + + float weak_threshold; + size_t num_features; + float strong_threshold; + +protected: + virtual Ptr processImpl(const Mat& src, + const Mat& mask) const; +}; + +/** + * \brief Modality that computes quantized surface normals from a dense depth map. + */ +class CV_EXPORTS DepthNormal : public Modality +{ +public: + /** + * \brief Default constructor. Uses reasonable default parameter values. + */ + DepthNormal(); + + /** + * \brief Constructor. + * + * \param distance_threshold Ignore pixels beyond this distance. + * \param difference_threshold When computing normals, ignore contributions of pixels whose + * depth difference with the central pixel is above this threshold. + * \param num_features How many features a template must contain. + * \param extract_threshold Consider as candidate feature only if there are no differing + * orientations within a distance of extract_threshold. + */ + DepthNormal(int distance_threshold, int difference_threshold, size_t num_features, + int extract_threshold); + + virtual std::string name() const; + + virtual void read(const FileNode& fn); + virtual void write(FileStorage& fs) const; + + int distance_threshold; + int difference_threshold; + size_t num_features; + int extract_threshold; + +protected: + virtual Ptr processImpl(const Mat& src, + const Mat& mask) const; +}; + +/** + * \brief Debug function to colormap a quantized image for viewing. + */ +void colormap(const Mat& quantized, Mat& dst); + +/** + * \brief Represents a successful template match. + */ +struct CV_EXPORTS Match +{ + Match() + { + } + + Match(int x, int y, float similarity, const std::string& class_id, int template_id); + + /// Sort matches with high similarity to the front + bool operator<(const Match& rhs) const + { + // Secondarily sort on template_id for the sake of duplicate removal + if (similarity != rhs.similarity) + return similarity > rhs.similarity; + else + return template_id < rhs.template_id; + } + + bool operator==(const Match& rhs) const + { + return x == rhs.x && y == rhs.y && similarity == rhs.similarity && class_id == rhs.class_id; + } + + int x; + int y; + float similarity; + std::string class_id; + int template_id; +}; + +inline Match::Match(int _x, int _y, float _similarity, const std::string& _class_id, int _template_id) + : x(_x), y(_y), similarity(_similarity), class_id(_class_id), template_id(_template_id) + { + } + +/** + * \brief Object detector using the LINE template matching algorithm with any set of + * modalities. + */ +class CV_EXPORTS Detector +{ +public: + /** + * \brief Empty constructor, initialize with read(). + */ + Detector(); + + /** + * \brief Constructor. + * + * \param modalities Modalities to use (color gradients, depth normals, ...). + * \param T_pyramid Value of the sampling step T at each pyramid level. The + * number of pyramid levels is T_pyramid.size(). + */ + Detector(const std::vector< Ptr >& modalities, const std::vector& T_pyramid); + + /** + * \brief Detect objects by template matching. + * + * Matches globally at the lowest pyramid level, then refines locally stepping up the pyramid. + * + * \param sources Source images, one for each modality. + * \param threshold Similarity threshold, a percentage between 0 and 100. + * \param[out] matches Template matches, sorted by similarity score. + * \param class_ids If non-empty, only search for the desired object classes. + * \param[out] quantized_images Optionally return vector of quantized images. + * \param masks The masks for consideration during matching. The masks should be CV_8UC1 + * where 255 represents a valid pixel. If non-empty, the vector must be + * the same size as sources. Each element must be + * empty or the same size as its corresponding source. + */ + void match(const std::vector& sources, float threshold, std::vector& matches, + const std::vector& class_ids = std::vector(), + OutputArrayOfArrays quantized_images = noArray(), + const std::vector& masks = std::vector()) const; + + /** + * \brief Add new object template. + * + * \param sources Source images, one for each modality. + * \param class_id Object class ID. + * \param object_mask Mask separating object from background. + * \param[out] bounding_box Optionally return bounding box of the extracted features. + * + * \return Template ID, or -1 if failed to extract a valid template. + */ + int addTemplate(const std::vector& sources, const std::string& class_id, + const Mat& object_mask, Rect* bounding_box = NULL); + + /** + * \brief Add a new object template computed by external means. + */ + int addSyntheticTemplate(const std::vector